diff options
405 files changed, 5359 insertions, 3972 deletions
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 1423d2570d78..44c6dcc93d6d 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt | |||
@@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector | |||
3 | The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables | 3 | The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables |
4 | RCU's CPU stall detector, which detects conditions that unduly delay | 4 | RCU's CPU stall detector, which detects conditions that unduly delay |
5 | RCU grace periods. The stall detector's idea of what constitutes | 5 | RCU grace periods. The stall detector's idea of what constitutes |
6 | "unduly delayed" is controlled by a pair of C preprocessor macros: | 6 | "unduly delayed" is controlled by a set of C preprocessor macros: |
7 | 7 | ||
8 | RCU_SECONDS_TILL_STALL_CHECK | 8 | RCU_SECONDS_TILL_STALL_CHECK |
9 | 9 | ||
10 | This macro defines the period of time that RCU will wait from | 10 | This macro defines the period of time that RCU will wait from |
11 | the beginning of a grace period until it issues an RCU CPU | 11 | the beginning of a grace period until it issues an RCU CPU |
12 | stall warning. It is normally ten seconds. | 12 | stall warning. This time period is normally ten seconds. |
13 | 13 | ||
14 | RCU_SECONDS_TILL_STALL_RECHECK | 14 | RCU_SECONDS_TILL_STALL_RECHECK |
15 | 15 | ||
16 | This macro defines the period of time that RCU will wait after | 16 | This macro defines the period of time that RCU will wait after |
17 | issuing a stall warning until it issues another stall warning. | 17 | issuing a stall warning until it issues another stall warning |
18 | It is normally set to thirty seconds. | 18 | for the same stall. This time period is normally set to thirty |
19 | seconds. | ||
19 | 20 | ||
20 | RCU_STALL_RAT_DELAY | 21 | RCU_STALL_RAT_DELAY |
21 | 22 | ||
22 | The CPU stall detector tries to make the offending CPU rat on itself, | 23 | The CPU stall detector tries to make the offending CPU print its |
23 | as this often gives better-quality stack traces. However, if | 24 | own warnings, as this often gives better-quality stack traces. |
24 | the offending CPU does not detect its own stall in the number | 25 | However, if the offending CPU does not detect its own stall in |
25 | of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will | 26 | the number of jiffies specified by RCU_STALL_RAT_DELAY, then |
26 | complain. This is normally set to two jiffies. | 27 | some other CPU will complain. This delay is normally set to |
28 | two jiffies. | ||
27 | 29 | ||
28 | The following problems can result in an RCU CPU stall warning: | 30 | When a CPU detects that it is stalling, it will print a message similar |
31 | to the following: | ||
32 | |||
33 | INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies) | ||
34 | |||
35 | This message indicates that CPU 5 detected that it was causing a stall, | ||
36 | and that the stall was affecting RCU-sched. This message will normally be | ||
37 | followed by a stack dump of the offending CPU. On TREE_RCU kernel builds, | ||
38 | RCU and RCU-sched are implemented by the same underlying mechanism, | ||
39 | while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented | ||
40 | by rcu_preempt_state. | ||
41 | |||
42 | On the other hand, if the offending CPU fails to print out a stall-warning | ||
43 | message quickly enough, some other CPU will print a message similar to | ||
44 | the following: | ||
45 | |||
46 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies) | ||
47 | |||
48 | This message indicates that CPU 2 detected that CPUs 3 and 5 were both | ||
49 | causing stalls, and that the stall was affecting RCU-bh. This message | ||
50 | will normally be followed by stack dumps for each CPU. Please note that | ||
51 | TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, | ||
52 | and that the tasks will be indicated by PID, for example, "P3421". | ||
53 | It is even possible for a rcu_preempt_state stall to be caused by both | ||
54 | CPUs -and- tasks, in which case the offending CPUs and tasks will all | ||
55 | be called out in the list. | ||
56 | |||
57 | Finally, if the grace period ends just as the stall warning starts | ||
58 | printing, there will be a spurious stall-warning message: | ||
59 | |||
60 | INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) | ||
61 | |||
62 | This is rare, but does happen from time to time in real life. | ||
63 | |||
64 | So your kernel printed an RCU CPU stall warning. The next question is | ||
65 | "What caused it?" The following problems can result in RCU CPU stall | ||
66 | warnings: | ||
29 | 67 | ||
30 | o A CPU looping in an RCU read-side critical section. | 68 | o A CPU looping in an RCU read-side critical section. |
31 | 69 | ||
32 | o A CPU looping with interrupts disabled. | 70 | o A CPU looping with interrupts disabled. This condition can |
71 | result in RCU-sched and RCU-bh stalls. | ||
33 | 72 | ||
34 | o A CPU looping with preemption disabled. | 73 | o A CPU looping with preemption disabled. This condition can |
74 | result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh | ||
75 | stalls. | ||
76 | |||
77 | o A CPU looping with bottom halves disabled. This condition can | ||
78 | result in RCU-sched and RCU-bh stalls. | ||
35 | 79 | ||
36 | o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel | 80 | o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel |
37 | without invoking schedule(). | 81 | without invoking schedule(). |
@@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel | |||
39 | o A bug in the RCU implementation. | 83 | o A bug in the RCU implementation. |
40 | 84 | ||
41 | o A hardware failure. This is quite unlikely, but has occurred | 85 | o A hardware failure. This is quite unlikely, but has occurred |
42 | at least once in a former life. A CPU failed in a running system, | 86 | at least once in real life. A CPU failed in a running system, |
43 | becoming unresponsive, but not causing an immediate crash. | 87 | becoming unresponsive, but not causing an immediate crash. |
44 | This resulted in a series of RCU CPU stall warnings, eventually | 88 | This resulted in a series of RCU CPU stall warnings, eventually |
45 | leading the realization that the CPU had failed. | 89 | leading the realization that the CPU had failed. |
46 | 90 | ||
47 | The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. | 91 | The RCU, RCU-sched, and RCU-bh implementations have CPU stall |
48 | SRCU does not do so directly, but its calls to synchronize_sched() will | 92 | warning. SRCU does not have its own CPU stall warnings, but its |
49 | result in RCU-sched detecting any CPU stalls that might be occurring. | 93 | calls to synchronize_sched() will result in RCU-sched detecting |
50 | 94 | RCU-sched-related CPU stalls. Please note that RCU only detects | |
51 | To diagnose the cause of the stall, inspect the stack traces. The offending | 95 | CPU stalls when there is a grace period in progress. No grace period, |
52 | function will usually be near the top of the stack. If you have a series | 96 | no CPU stall warnings. |
53 | of stall warnings from a single extended stall, comparing the stack traces | 97 | |
54 | can often help determine where the stall is occurring, which will usually | 98 | To diagnose the cause of the stall, inspect the stack traces. |
55 | be in the function nearest the top of the stack that stays the same from | 99 | The offending function will usually be near the top of the stack. |
56 | trace to trace. | 100 | If you have a series of stall warnings from a single extended stall, |
101 | comparing the stack traces can often help determine where the stall | ||
102 | is occurring, which will usually be in the function nearest the top of | ||
103 | that portion of the stack which remains the same from trace to trace. | ||
104 | If you can reliably trigger the stall, ftrace can be quite helpful. | ||
57 | 105 | ||
58 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. | 106 | RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. |
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index 8608fd85e921..efd8cc95c06b 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
@@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct | |||
256 | The output of "cat rcu/rcu_pending" looks as follows: | 256 | The output of "cat rcu/rcu_pending" looks as follows: |
257 | 257 | ||
258 | rcu_sched: | 258 | rcu_sched: |
259 | 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 | 259 | 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 |
260 | 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 | 260 | 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 |
261 | 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 | 261 | 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 |
262 | 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 | 262 | 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 |
263 | 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 | 263 | 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 |
264 | 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 | 264 | 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 |
265 | 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 | 265 | 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 |
266 | 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 | 266 | 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 |
267 | rcu_bh: | 267 | rcu_bh: |
268 | 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 | 268 | 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 |
269 | 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 | 269 | 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 |
270 | 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 | 270 | 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 |
271 | 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 | 271 | 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 |
272 | 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 | 272 | 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 |
273 | 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 | 273 | 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 |
274 | 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 | 274 | 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 |
275 | 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 | 275 | 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 |
276 | 276 | ||
277 | As always, this is once again split into "rcu_sched" and "rcu_bh" | 277 | As always, this is once again split into "rcu_sched" and "rcu_bh" |
278 | portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional | 278 | portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional |
@@ -284,6 +284,9 @@ o "np" is the number of times that __rcu_pending() has been invoked | |||
284 | o "qsp" is the number of times that the RCU was waiting for a | 284 | o "qsp" is the number of times that the RCU was waiting for a |
285 | quiescent state from this CPU. | 285 | quiescent state from this CPU. |
286 | 286 | ||
287 | o "rpq" is the number of times that the CPU had passed through | ||
288 | a quiescent state, but not yet reported it to RCU. | ||
289 | |||
287 | o "cbr" is the number of times that this CPU had RCU callbacks | 290 | o "cbr" is the number of times that this CPU had RCU callbacks |
288 | that had passed through a grace period, and were thus ready | 291 | that had passed through a grace period, and were thus ready |
289 | to be invoked. | 292 | to be invoked. |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index ed511af0f79a..05df0b7514b6 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -589,3 +589,26 @@ Why: Useful in 2003, implementation is a hack. | |||
589 | Generally invoked by accident today. | 589 | Generally invoked by accident today. |
590 | Seen as doing more harm than good. | 590 | Seen as doing more harm than good. |
591 | Who: Len Brown <len.brown@intel.com> | 591 | Who: Len Brown <len.brown@intel.com> |
592 | |||
593 | ---------------------------- | ||
594 | |||
595 | What: video4linux /dev/vtx teletext API support | ||
596 | When: 2.6.35 | ||
597 | Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c | ||
598 | include/linux/videotext.h | ||
599 | Why: The vtx device nodes have been superseded by vbi device nodes | ||
600 | for many years. No applications exist that use the vtx support. | ||
601 | Of the two i2c drivers that actually support this API the saa5249 | ||
602 | has been impossible to use for a year now and no known hardware | ||
603 | that supports this device exists. The saa5246a is theoretically | ||
604 | supported by the old mxb boards, but it never actually worked. | ||
605 | |||
606 | In summary: there is no hardware that can use this API and there | ||
607 | are no applications actually implementing this API. | ||
608 | |||
609 | The vtx support still reserves minors 192-223 and we would really | ||
610 | like to reuse those for upcoming new functionality. In the unlikely | ||
611 | event that new hardware appears that wants to use the functionality | ||
612 | provided by the vtx API, then that functionality should be build | ||
613 | around the sliced VBI API instead. | ||
614 | Who: Hans Verkuil <hverkuil@xs4all.nl> | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index a4f30faa4f1f..1e359b62c40a 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -316,7 +316,7 @@ address perms offset dev inode pathname | |||
316 | 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test | 316 | 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test |
317 | 0804a000-0806b000 rw-p 00000000 00:00 0 [heap] | 317 | 0804a000-0806b000 rw-p 00000000 00:00 0 [heap] |
318 | a7cb1000-a7cb2000 ---p 00000000 00:00 0 | 318 | a7cb1000-a7cb2000 ---p 00000000 00:00 0 |
319 | a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4] | 319 | a7cb2000-a7eb2000 rw-p 00000000 00:00 0 |
320 | a7eb2000-a7eb3000 ---p 00000000 00:00 0 | 320 | a7eb2000-a7eb3000 ---p 00000000 00:00 0 |
321 | a7eb3000-a7ed5000 rw-p 00000000 00:00 0 | 321 | a7eb3000-a7ed5000 rw-p 00000000 00:00 0 |
322 | a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 | 322 | a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 |
@@ -352,7 +352,6 @@ is not associated with a file: | |||
352 | [stack] = the stack of the main process | 352 | [stack] = the stack of the main process |
353 | [vdso] = the "virtual dynamic shared object", | 353 | [vdso] = the "virtual dynamic shared object", |
354 | the kernel system call handler | 354 | the kernel system call handler |
355 | [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size | ||
356 | 355 | ||
357 | or if empty, the mapping is anonymous. | 356 | or if empty, the mapping is anonymous. |
358 | 357 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 839b21b0699a..0c6c56076d19 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -324,6 +324,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
324 | they are unmapped. Otherwise they are | 324 | they are unmapped. Otherwise they are |
325 | flushed before they will be reused, which | 325 | flushed before they will be reused, which |
326 | is a lot of faster | 326 | is a lot of faster |
327 | off - do not initialize any AMD IOMMU found in | ||
328 | the system | ||
327 | 329 | ||
328 | amijoy.map= [HW,JOY] Amiga joystick support | 330 | amijoy.map= [HW,JOY] Amiga joystick support |
329 | Map of devices attached to JOY0DAT and JOY1DAT | 331 | Map of devices attached to JOY0DAT and JOY1DAT |
diff --git a/MAINTAINERS b/MAINTAINERS index 5085c90a6ec8..28332e1b0863 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4165,6 +4165,7 @@ OPROFILE | |||
4165 | M: Robert Richter <robert.richter@amd.com> | 4165 | M: Robert Richter <robert.richter@amd.com> |
4166 | L: oprofile-list@lists.sf.net | 4166 | L: oprofile-list@lists.sf.net |
4167 | S: Maintained | 4167 | S: Maintained |
4168 | F: arch/*/include/asm/oprofile*.h | ||
4168 | F: arch/*/oprofile/ | 4169 | F: arch/*/oprofile/ |
4169 | F: drivers/oprofile/ | 4170 | F: drivers/oprofile/ |
4170 | F: include/linux/oprofile.h | 4171 | F: include/linux/oprofile.h |
@@ -5492,7 +5493,7 @@ S: Maintained | |||
5492 | F: drivers/mmc/host/tmio_mmc.* | 5493 | F: drivers/mmc/host/tmio_mmc.* |
5493 | 5494 | ||
5494 | TMPFS (SHMEM FILESYSTEM) | 5495 | TMPFS (SHMEM FILESYSTEM) |
5495 | M: Hugh Dickins <hugh.dickins@tiscali.co.uk> | 5496 | M: Hugh Dickins <hughd@google.com> |
5496 | L: linux-mm@kvack.org | 5497 | L: linux-mm@kvack.org |
5497 | S: Maintained | 5498 | S: Maintained |
5498 | F: include/linux/shmem_fs.h | 5499 | F: include/linux/shmem_fs.h |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 34 | 3 | SUBLEVEL = 34 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = |
5 | NAME = Sheep on Meth | 5 | NAME = Sheep on Meth |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 610dff44d94b..e756d04b6cd5 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h | |||
@@ -17,8 +17,8 @@ | |||
17 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | 17 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
18 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) | 18 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) |
19 | 19 | ||
20 | #define atomic_read(v) ((v)->counter + 0) | 20 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
21 | #define atomic64_read(v) ((v)->counter + 0) | 21 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
22 | 22 | ||
23 | #define atomic_set(v,i) ((v)->counter = (i)) | 23 | #define atomic_set(v,i) ((v)->counter = (i)) |
24 | #define atomic64_set(v,i) ((v)->counter = (i)) | 24 | #define atomic64_set(v,i) ((v)->counter = (i)) |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6ab6b337a913..c5191b1532e8 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -685,8 +685,8 @@ proc_types: | |||
685 | W(b) __armv4_mmu_cache_off | 685 | W(b) __armv4_mmu_cache_off |
686 | W(b) __armv4_mmu_cache_flush | 686 | W(b) __armv4_mmu_cache_flush |
687 | 687 | ||
688 | .word 0x56056930 | 688 | .word 0x56056900 |
689 | .word 0xff0ffff0 @ PXA935 | 689 | .word 0xffffff00 @ PXA9xx |
690 | W(b) __armv4_mmu_cache_on | 690 | W(b) __armv4_mmu_cache_on |
691 | W(b) __armv4_mmu_cache_off | 691 | W(b) __armv4_mmu_cache_off |
692 | W(b) __armv4_mmu_cache_flush | 692 | W(b) __armv4_mmu_cache_flush |
@@ -697,12 +697,6 @@ proc_types: | |||
697 | W(b) __armv4_mmu_cache_off | 697 | W(b) __armv4_mmu_cache_off |
698 | W(b) __armv5tej_mmu_cache_flush | 698 | W(b) __armv5tej_mmu_cache_flush |
699 | 699 | ||
700 | .word 0x56056930 | ||
701 | .word 0xff0ffff0 @ PXA935 | ||
702 | W(b) __armv4_mmu_cache_on | ||
703 | W(b) __armv4_mmu_cache_off | ||
704 | W(b) __armv4_mmu_cache_flush | ||
705 | |||
706 | .word 0x56050000 @ Feroceon | 700 | .word 0x56050000 @ Feroceon |
707 | .word 0xff0f0000 | 701 | .word 0xff0f0000 |
708 | W(b) __armv4_mmu_cache_on | 702 | W(b) __armv4_mmu_cache_on |
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index 95d2becfc664..21f2bff8a363 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig | |||
@@ -1,13 +1,14 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.33-rc8 | 3 | # Linux kernel version: 2.6.34-rc2 |
4 | # Sat Feb 13 21:48:53 2010 | 4 | # Thu Apr 8 14:49:08 2010 |
5 | # | 5 | # |
6 | CONFIG_ARM=y | 6 | CONFIG_ARM=y |
7 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y | 7 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y |
8 | CONFIG_GENERIC_GPIO=y | 8 | CONFIG_GENERIC_GPIO=y |
9 | CONFIG_GENERIC_TIME=y | 9 | CONFIG_GENERIC_TIME=y |
10 | CONFIG_GENERIC_CLOCKEVENTS=y | 10 | CONFIG_GENERIC_CLOCKEVENTS=y |
11 | CONFIG_HAVE_PROC_CPU=y | ||
11 | CONFIG_GENERIC_HARDIRQS=y | 12 | CONFIG_GENERIC_HARDIRQS=y |
12 | CONFIG_STACKTRACE_SUPPORT=y | 13 | CONFIG_STACKTRACE_SUPPORT=y |
13 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 14 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
@@ -19,6 +20,7 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y | |||
19 | CONFIG_ARCH_HAS_CPUFREQ=y | 20 | CONFIG_ARCH_HAS_CPUFREQ=y |
20 | CONFIG_GENERIC_HWEIGHT=y | 21 | CONFIG_GENERIC_HWEIGHT=y |
21 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 22 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
23 | CONFIG_NEED_DMA_MAP_STATE=y | ||
22 | CONFIG_ARCH_MTD_XIP=y | 24 | CONFIG_ARCH_MTD_XIP=y |
23 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 25 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
24 | CONFIG_VECTORS_BASE=0xffff0000 | 26 | CONFIG_VECTORS_BASE=0xffff0000 |
@@ -60,11 +62,6 @@ CONFIG_RCU_FANOUT=32 | |||
60 | # CONFIG_TREE_RCU_TRACE is not set | 62 | # CONFIG_TREE_RCU_TRACE is not set |
61 | # CONFIG_IKCONFIG is not set | 63 | # CONFIG_IKCONFIG is not set |
62 | CONFIG_LOG_BUF_SHIFT=14 | 64 | CONFIG_LOG_BUF_SHIFT=14 |
63 | CONFIG_GROUP_SCHED=y | ||
64 | CONFIG_FAIR_GROUP_SCHED=y | ||
65 | # CONFIG_RT_GROUP_SCHED is not set | ||
66 | CONFIG_USER_SCHED=y | ||
67 | # CONFIG_CGROUP_SCHED is not set | ||
68 | # CONFIG_CGROUPS is not set | 65 | # CONFIG_CGROUPS is not set |
69 | CONFIG_SYSFS_DEPRECATED=y | 66 | CONFIG_SYSFS_DEPRECATED=y |
70 | CONFIG_SYSFS_DEPRECATED_V2=y | 67 | CONFIG_SYSFS_DEPRECATED_V2=y |
@@ -97,10 +94,14 @@ CONFIG_TIMERFD=y | |||
97 | CONFIG_EVENTFD=y | 94 | CONFIG_EVENTFD=y |
98 | CONFIG_SHMEM=y | 95 | CONFIG_SHMEM=y |
99 | CONFIG_AIO=y | 96 | CONFIG_AIO=y |
97 | CONFIG_HAVE_PERF_EVENTS=y | ||
98 | CONFIG_PERF_USE_VMALLOC=y | ||
100 | 99 | ||
101 | # | 100 | # |
102 | # Kernel Performance Events And Counters | 101 | # Kernel Performance Events And Counters |
103 | # | 102 | # |
103 | # CONFIG_PERF_EVENTS is not set | ||
104 | # CONFIG_PERF_COUNTERS is not set | ||
104 | CONFIG_VM_EVENT_COUNTERS=y | 105 | CONFIG_VM_EVENT_COUNTERS=y |
105 | # CONFIG_COMPAT_BRK is not set | 106 | # CONFIG_COMPAT_BRK is not set |
106 | CONFIG_SLAB=y | 107 | CONFIG_SLAB=y |
@@ -184,6 +185,7 @@ CONFIG_MMU=y | |||
184 | # CONFIG_ARCH_REALVIEW is not set | 185 | # CONFIG_ARCH_REALVIEW is not set |
185 | # CONFIG_ARCH_VERSATILE is not set | 186 | # CONFIG_ARCH_VERSATILE is not set |
186 | # CONFIG_ARCH_AT91 is not set | 187 | # CONFIG_ARCH_AT91 is not set |
188 | # CONFIG_ARCH_BCMRING is not set | ||
187 | # CONFIG_ARCH_CLPS711X is not set | 189 | # CONFIG_ARCH_CLPS711X is not set |
188 | # CONFIG_ARCH_GEMINI is not set | 190 | # CONFIG_ARCH_GEMINI is not set |
189 | # CONFIG_ARCH_EBSA110 is not set | 191 | # CONFIG_ARCH_EBSA110 is not set |
@@ -193,7 +195,6 @@ CONFIG_MMU=y | |||
193 | # CONFIG_ARCH_STMP3XXX is not set | 195 | # CONFIG_ARCH_STMP3XXX is not set |
194 | # CONFIG_ARCH_NETX is not set | 196 | # CONFIG_ARCH_NETX is not set |
195 | # CONFIG_ARCH_H720X is not set | 197 | # CONFIG_ARCH_H720X is not set |
196 | # CONFIG_ARCH_NOMADIK is not set | ||
197 | # CONFIG_ARCH_IOP13XX is not set | 198 | # CONFIG_ARCH_IOP13XX is not set |
198 | # CONFIG_ARCH_IOP32X is not set | 199 | # CONFIG_ARCH_IOP32X is not set |
199 | # CONFIG_ARCH_IOP33X is not set | 200 | # CONFIG_ARCH_IOP33X is not set |
@@ -210,21 +211,26 @@ CONFIG_MMU=y | |||
210 | # CONFIG_ARCH_KS8695 is not set | 211 | # CONFIG_ARCH_KS8695 is not set |
211 | # CONFIG_ARCH_NS9XXX is not set | 212 | # CONFIG_ARCH_NS9XXX is not set |
212 | # CONFIG_ARCH_W90X900 is not set | 213 | # CONFIG_ARCH_W90X900 is not set |
214 | # CONFIG_ARCH_NUC93X is not set | ||
213 | # CONFIG_ARCH_PNX4008 is not set | 215 | # CONFIG_ARCH_PNX4008 is not set |
214 | CONFIG_ARCH_PXA=y | 216 | CONFIG_ARCH_PXA=y |
215 | # CONFIG_ARCH_MSM is not set | 217 | # CONFIG_ARCH_MSM is not set |
218 | # CONFIG_ARCH_SHMOBILE is not set | ||
216 | # CONFIG_ARCH_RPC is not set | 219 | # CONFIG_ARCH_RPC is not set |
217 | # CONFIG_ARCH_SA1100 is not set | 220 | # CONFIG_ARCH_SA1100 is not set |
218 | # CONFIG_ARCH_S3C2410 is not set | 221 | # CONFIG_ARCH_S3C2410 is not set |
219 | # CONFIG_ARCH_S3C64XX is not set | 222 | # CONFIG_ARCH_S3C64XX is not set |
223 | # CONFIG_ARCH_S5P6440 is not set | ||
224 | # CONFIG_ARCH_S5P6442 is not set | ||
220 | # CONFIG_ARCH_S5PC1XX is not set | 225 | # CONFIG_ARCH_S5PC1XX is not set |
226 | # CONFIG_ARCH_S5PV210 is not set | ||
221 | # CONFIG_ARCH_SHARK is not set | 227 | # CONFIG_ARCH_SHARK is not set |
222 | # CONFIG_ARCH_LH7A40X is not set | 228 | # CONFIG_ARCH_LH7A40X is not set |
223 | # CONFIG_ARCH_U300 is not set | 229 | # CONFIG_ARCH_U300 is not set |
230 | # CONFIG_ARCH_U8500 is not set | ||
231 | # CONFIG_ARCH_NOMADIK is not set | ||
224 | # CONFIG_ARCH_DAVINCI is not set | 232 | # CONFIG_ARCH_DAVINCI is not set |
225 | # CONFIG_ARCH_OMAP is not set | 233 | # CONFIG_ARCH_OMAP is not set |
226 | # CONFIG_ARCH_BCMRING is not set | ||
227 | # CONFIG_ARCH_U8500 is not set | ||
228 | 234 | ||
229 | # | 235 | # |
230 | # Intel PXA2xx/PXA3xx Implementations | 236 | # Intel PXA2xx/PXA3xx Implementations |
@@ -253,6 +259,7 @@ CONFIG_ARCH_PXA=y | |||
253 | # CONFIG_MACH_EM_X270 is not set | 259 | # CONFIG_MACH_EM_X270 is not set |
254 | # CONFIG_MACH_EXEDA is not set | 260 | # CONFIG_MACH_EXEDA is not set |
255 | # CONFIG_MACH_CM_X300 is not set | 261 | # CONFIG_MACH_CM_X300 is not set |
262 | # CONFIG_MACH_CAPC7117 is not set | ||
256 | # CONFIG_ARCH_GUMSTIX is not set | 263 | # CONFIG_ARCH_GUMSTIX is not set |
257 | CONFIG_MACH_INTELMOTE2=y | 264 | CONFIG_MACH_INTELMOTE2=y |
258 | # CONFIG_MACH_STARGATE2 is not set | 265 | # CONFIG_MACH_STARGATE2 is not set |
@@ -275,7 +282,11 @@ CONFIG_MACH_INTELMOTE2=y | |||
275 | # CONFIG_PXA_EZX is not set | 282 | # CONFIG_PXA_EZX is not set |
276 | # CONFIG_MACH_MP900C is not set | 283 | # CONFIG_MACH_MP900C is not set |
277 | # CONFIG_ARCH_PXA_PALM is not set | 284 | # CONFIG_ARCH_PXA_PALM is not set |
285 | # CONFIG_MACH_RAUMFELD_RC is not set | ||
286 | # CONFIG_MACH_RAUMFELD_CONNECTOR is not set | ||
287 | # CONFIG_MACH_RAUMFELD_SPEAKER is not set | ||
278 | # CONFIG_PXA_SHARPSL is not set | 288 | # CONFIG_PXA_SHARPSL is not set |
289 | # CONFIG_MACH_ICONTROL is not set | ||
279 | # CONFIG_ARCH_PXA_ESERIES is not set | 290 | # CONFIG_ARCH_PXA_ESERIES is not set |
280 | CONFIG_PXA27x=y | 291 | CONFIG_PXA27x=y |
281 | CONFIG_PXA_SSP=y | 292 | CONFIG_PXA_SSP=y |
@@ -302,6 +313,7 @@ CONFIG_ARM_THUMB=y | |||
302 | CONFIG_ARM_L1_CACHE_SHIFT=5 | 313 | CONFIG_ARM_L1_CACHE_SHIFT=5 |
303 | CONFIG_IWMMXT=y | 314 | CONFIG_IWMMXT=y |
304 | CONFIG_XSCALE_PMU=y | 315 | CONFIG_XSCALE_PMU=y |
316 | CONFIG_CPU_HAS_PMU=y | ||
305 | CONFIG_COMMON_CLKDEV=y | 317 | CONFIG_COMMON_CLKDEV=y |
306 | 318 | ||
307 | # | 319 | # |
@@ -352,7 +364,7 @@ CONFIG_ALIGNMENT_TRAP=y | |||
352 | # | 364 | # |
353 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 365 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
354 | CONFIG_ZBOOT_ROM_BSS=0x0 | 366 | CONFIG_ZBOOT_ROM_BSS=0x0 |
355 | CONFIG_CMDLINE="console=tty1 root=/dev/mmcblk0p2 rootfstype=ext2 rootdelay=3 ip=192.168.0.202:192.168.0.200:192.168.0.200:255.255.255.0 debug" | 367 | CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=jffs2 console=ttyS2,115200 mem=32M" |
356 | # CONFIG_XIP_KERNEL is not set | 368 | # CONFIG_XIP_KERNEL is not set |
357 | CONFIG_KEXEC=y | 369 | CONFIG_KEXEC=y |
358 | CONFIG_ATAGS_PROC=y | 370 | CONFIG_ATAGS_PROC=y |
@@ -360,24 +372,8 @@ CONFIG_ATAGS_PROC=y | |||
360 | # | 372 | # |
361 | # CPU Power Management | 373 | # CPU Power Management |
362 | # | 374 | # |
363 | CONFIG_CPU_FREQ=y | 375 | # CONFIG_CPU_FREQ is not set |
364 | CONFIG_CPU_FREQ_TABLE=y | 376 | # CONFIG_CPU_IDLE is not set |
365 | CONFIG_CPU_FREQ_DEBUG=y | ||
366 | CONFIG_CPU_FREQ_STAT=y | ||
367 | # CONFIG_CPU_FREQ_STAT_DETAILS is not set | ||
368 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
369 | # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set | ||
370 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
371 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
372 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
373 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
374 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | ||
375 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | ||
376 | CONFIG_CPU_FREQ_GOV_ONDEMAND=m | ||
377 | CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m | ||
378 | CONFIG_CPU_IDLE=y | ||
379 | CONFIG_CPU_IDLE_GOV_LADDER=y | ||
380 | CONFIG_CPU_IDLE_GOV_MENU=y | ||
381 | 377 | ||
382 | # | 378 | # |
383 | # Floating point emulation | 379 | # Floating point emulation |
@@ -409,6 +405,7 @@ CONFIG_SUSPEND=y | |||
409 | CONFIG_SUSPEND_FREEZER=y | 405 | CONFIG_SUSPEND_FREEZER=y |
410 | CONFIG_APM_EMULATION=y | 406 | CONFIG_APM_EMULATION=y |
411 | CONFIG_PM_RUNTIME=y | 407 | CONFIG_PM_RUNTIME=y |
408 | CONFIG_PM_OPS=y | ||
412 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | 409 | CONFIG_ARCH_SUSPEND_POSSIBLE=y |
413 | CONFIG_NET=y | 410 | CONFIG_NET=y |
414 | 411 | ||
@@ -416,7 +413,6 @@ CONFIG_NET=y | |||
416 | # Networking options | 413 | # Networking options |
417 | # | 414 | # |
418 | CONFIG_PACKET=y | 415 | CONFIG_PACKET=y |
419 | CONFIG_PACKET_MMAP=y | ||
420 | CONFIG_UNIX=y | 416 | CONFIG_UNIX=y |
421 | CONFIG_XFRM=y | 417 | CONFIG_XFRM=y |
422 | # CONFIG_XFRM_USER is not set | 418 | # CONFIG_XFRM_USER is not set |
@@ -506,6 +502,7 @@ CONFIG_NF_CT_NETLINK=m | |||
506 | CONFIG_NETFILTER_XTABLES=m | 502 | CONFIG_NETFILTER_XTABLES=m |
507 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 503 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
508 | # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set | 504 | # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set |
505 | # CONFIG_NETFILTER_XT_TARGET_CT is not set | ||
509 | # CONFIG_NETFILTER_XT_TARGET_DSCP is not set | 506 | # CONFIG_NETFILTER_XT_TARGET_DSCP is not set |
510 | CONFIG_NETFILTER_XT_TARGET_HL=m | 507 | CONFIG_NETFILTER_XT_TARGET_HL=m |
511 | CONFIG_NETFILTER_XT_TARGET_LED=m | 508 | CONFIG_NETFILTER_XT_TARGET_LED=m |
@@ -622,6 +619,7 @@ CONFIG_IP6_NF_RAW=m | |||
622 | # CONFIG_ATM is not set | 619 | # CONFIG_ATM is not set |
623 | CONFIG_STP=m | 620 | CONFIG_STP=m |
624 | CONFIG_BRIDGE=m | 621 | CONFIG_BRIDGE=m |
622 | # CONFIG_BRIDGE_IGMP_SNOOPING is not set | ||
625 | # CONFIG_NET_DSA is not set | 623 | # CONFIG_NET_DSA is not set |
626 | # CONFIG_VLAN_8021Q is not set | 624 | # CONFIG_VLAN_8021Q is not set |
627 | # CONFIG_DECNET is not set | 625 | # CONFIG_DECNET is not set |
@@ -646,32 +644,7 @@ CONFIG_NET_CLS_ROUTE=y | |||
646 | # CONFIG_HAMRADIO is not set | 644 | # CONFIG_HAMRADIO is not set |
647 | # CONFIG_CAN is not set | 645 | # CONFIG_CAN is not set |
648 | # CONFIG_IRDA is not set | 646 | # CONFIG_IRDA is not set |
649 | CONFIG_BT=y | 647 | # CONFIG_BT is not set |
650 | CONFIG_BT_L2CAP=y | ||
651 | CONFIG_BT_SCO=y | ||
652 | CONFIG_BT_RFCOMM=y | ||
653 | CONFIG_BT_RFCOMM_TTY=y | ||
654 | CONFIG_BT_BNEP=y | ||
655 | CONFIG_BT_BNEP_MC_FILTER=y | ||
656 | CONFIG_BT_BNEP_PROTO_FILTER=y | ||
657 | CONFIG_BT_HIDP=y | ||
658 | |||
659 | # | ||
660 | # Bluetooth device drivers | ||
661 | # | ||
662 | CONFIG_BT_HCIBTUSB=m | ||
663 | CONFIG_BT_HCIBTSDIO=m | ||
664 | CONFIG_BT_HCIUART=y | ||
665 | CONFIG_BT_HCIUART_H4=y | ||
666 | # CONFIG_BT_HCIUART_BCSP is not set | ||
667 | # CONFIG_BT_HCIUART_LL is not set | ||
668 | CONFIG_BT_HCIBCM203X=m | ||
669 | CONFIG_BT_HCIBPA10X=m | ||
670 | CONFIG_BT_HCIBFUSB=m | ||
671 | CONFIG_BT_HCIVHCI=m | ||
672 | CONFIG_BT_MRVL=m | ||
673 | CONFIG_BT_MRVL_SDIO=m | ||
674 | # CONFIG_BT_ATH3K is not set | ||
675 | # CONFIG_AF_RXRPC is not set | 648 | # CONFIG_AF_RXRPC is not set |
676 | CONFIG_FIB_RULES=y | 649 | CONFIG_FIB_RULES=y |
677 | # CONFIG_WIRELESS is not set | 650 | # CONFIG_WIRELESS is not set |
@@ -687,7 +660,8 @@ CONFIG_FIB_RULES=y | |||
687 | # Generic Driver Options | 660 | # Generic Driver Options |
688 | # | 661 | # |
689 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 662 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
690 | # CONFIG_DEVTMPFS is not set | 663 | CONFIG_DEVTMPFS=y |
664 | CONFIG_DEVTMPFS_MOUNT=y | ||
691 | CONFIG_STANDALONE=y | 665 | CONFIG_STANDALONE=y |
692 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 666 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
693 | CONFIG_FW_LOADER=m | 667 | CONFIG_FW_LOADER=m |
@@ -703,9 +677,9 @@ CONFIG_MTD=y | |||
703 | # CONFIG_MTD_CONCAT is not set | 677 | # CONFIG_MTD_CONCAT is not set |
704 | CONFIG_MTD_PARTITIONS=y | 678 | CONFIG_MTD_PARTITIONS=y |
705 | # CONFIG_MTD_REDBOOT_PARTS is not set | 679 | # CONFIG_MTD_REDBOOT_PARTS is not set |
706 | # CONFIG_MTD_CMDLINE_PARTS is not set | 680 | CONFIG_MTD_CMDLINE_PARTS=y |
707 | # CONFIG_MTD_AFS_PARTS is not set | 681 | CONFIG_MTD_AFS_PARTS=y |
708 | # CONFIG_MTD_AR7_PARTS is not set | 682 | CONFIG_MTD_AR7_PARTS=y |
709 | 683 | ||
710 | # | 684 | # |
711 | # User Modules And Translation Layers | 685 | # User Modules And Translation Layers |
@@ -812,6 +786,7 @@ CONFIG_HAVE_IDE=y | |||
812 | # | 786 | # |
813 | # SCSI device support | 787 | # SCSI device support |
814 | # | 788 | # |
789 | CONFIG_SCSI_MOD=y | ||
815 | # CONFIG_RAID_ATTRS is not set | 790 | # CONFIG_RAID_ATTRS is not set |
816 | # CONFIG_SCSI is not set | 791 | # CONFIG_SCSI is not set |
817 | # CONFIG_SCSI_DMA is not set | 792 | # CONFIG_SCSI_DMA is not set |
@@ -965,6 +940,7 @@ CONFIG_SERIAL_PXA=y | |||
965 | CONFIG_SERIAL_PXA_CONSOLE=y | 940 | CONFIG_SERIAL_PXA_CONSOLE=y |
966 | CONFIG_SERIAL_CORE=y | 941 | CONFIG_SERIAL_CORE=y |
967 | CONFIG_SERIAL_CORE_CONSOLE=y | 942 | CONFIG_SERIAL_CORE_CONSOLE=y |
943 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
968 | CONFIG_UNIX98_PTYS=y | 944 | CONFIG_UNIX98_PTYS=y |
969 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 945 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
970 | CONFIG_LEGACY_PTYS=y | 946 | CONFIG_LEGACY_PTYS=y |
@@ -993,6 +969,7 @@ CONFIG_I2C_HELPER_AUTO=y | |||
993 | CONFIG_I2C_PXA=y | 969 | CONFIG_I2C_PXA=y |
994 | # CONFIG_I2C_PXA_SLAVE is not set | 970 | # CONFIG_I2C_PXA_SLAVE is not set |
995 | # CONFIG_I2C_SIMTEC is not set | 971 | # CONFIG_I2C_SIMTEC is not set |
972 | # CONFIG_I2C_XILINX is not set | ||
996 | 973 | ||
997 | # | 974 | # |
998 | # External I2C/SMBus adapter drivers | 975 | # External I2C/SMBus adapter drivers |
@@ -1006,15 +983,9 @@ CONFIG_I2C_PXA=y | |||
1006 | # | 983 | # |
1007 | # CONFIG_I2C_PCA_PLATFORM is not set | 984 | # CONFIG_I2C_PCA_PLATFORM is not set |
1008 | # CONFIG_I2C_STUB is not set | 985 | # CONFIG_I2C_STUB is not set |
1009 | |||
1010 | # | ||
1011 | # Miscellaneous I2C Chip support | ||
1012 | # | ||
1013 | # CONFIG_SENSORS_TSL2550 is not set | ||
1014 | # CONFIG_I2C_DEBUG_CORE is not set | 986 | # CONFIG_I2C_DEBUG_CORE is not set |
1015 | # CONFIG_I2C_DEBUG_ALGO is not set | 987 | # CONFIG_I2C_DEBUG_ALGO is not set |
1016 | # CONFIG_I2C_DEBUG_BUS is not set | 988 | # CONFIG_I2C_DEBUG_BUS is not set |
1017 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
1018 | CONFIG_SPI=y | 989 | CONFIG_SPI=y |
1019 | # CONFIG_SPI_DEBUG is not set | 990 | # CONFIG_SPI_DEBUG is not set |
1020 | CONFIG_SPI_MASTER=y | 991 | CONFIG_SPI_MASTER=y |
@@ -1046,10 +1017,12 @@ CONFIG_GPIO_SYSFS=y | |||
1046 | # | 1017 | # |
1047 | # Memory mapped GPIO expanders: | 1018 | # Memory mapped GPIO expanders: |
1048 | # | 1019 | # |
1020 | # CONFIG_GPIO_IT8761E is not set | ||
1049 | 1021 | ||
1050 | # | 1022 | # |
1051 | # I2C GPIO expanders: | 1023 | # I2C GPIO expanders: |
1052 | # | 1024 | # |
1025 | # CONFIG_GPIO_MAX7300 is not set | ||
1053 | # CONFIG_GPIO_MAX732X is not set | 1026 | # CONFIG_GPIO_MAX732X is not set |
1054 | # CONFIG_GPIO_PCA953X is not set | 1027 | # CONFIG_GPIO_PCA953X is not set |
1055 | # CONFIG_GPIO_PCF857X is not set | 1028 | # CONFIG_GPIO_PCF857X is not set |
@@ -1093,10 +1066,12 @@ CONFIG_SSB_POSSIBLE=y | |||
1093 | # Multifunction device drivers | 1066 | # Multifunction device drivers |
1094 | # | 1067 | # |
1095 | # CONFIG_MFD_CORE is not set | 1068 | # CONFIG_MFD_CORE is not set |
1069 | # CONFIG_MFD_88PM860X is not set | ||
1096 | # CONFIG_MFD_SM501 is not set | 1070 | # CONFIG_MFD_SM501 is not set |
1097 | # CONFIG_MFD_ASIC3 is not set | 1071 | # CONFIG_MFD_ASIC3 is not set |
1098 | # CONFIG_HTC_EGPIO is not set | 1072 | # CONFIG_HTC_EGPIO is not set |
1099 | # CONFIG_HTC_PASIC3 is not set | 1073 | # CONFIG_HTC_PASIC3 is not set |
1074 | # CONFIG_HTC_I2CPLD is not set | ||
1100 | # CONFIG_TPS65010 is not set | 1075 | # CONFIG_TPS65010 is not set |
1101 | # CONFIG_TWL4030_CORE is not set | 1076 | # CONFIG_TWL4030_CORE is not set |
1102 | # CONFIG_MFD_TMIO is not set | 1077 | # CONFIG_MFD_TMIO is not set |
@@ -1105,22 +1080,25 @@ CONFIG_SSB_POSSIBLE=y | |||
1105 | # CONFIG_MFD_TC6393XB is not set | 1080 | # CONFIG_MFD_TC6393XB is not set |
1106 | CONFIG_PMIC_DA903X=y | 1081 | CONFIG_PMIC_DA903X=y |
1107 | # CONFIG_PMIC_ADP5520 is not set | 1082 | # CONFIG_PMIC_ADP5520 is not set |
1083 | # CONFIG_MFD_MAX8925 is not set | ||
1108 | # CONFIG_MFD_WM8400 is not set | 1084 | # CONFIG_MFD_WM8400 is not set |
1109 | # CONFIG_MFD_WM831X is not set | 1085 | # CONFIG_MFD_WM831X is not set |
1110 | # CONFIG_MFD_WM8350_I2C is not set | 1086 | # CONFIG_MFD_WM8350_I2C is not set |
1087 | # CONFIG_MFD_WM8994 is not set | ||
1111 | # CONFIG_MFD_PCF50633 is not set | 1088 | # CONFIG_MFD_PCF50633 is not set |
1112 | # CONFIG_MFD_MC13783 is not set | 1089 | # CONFIG_MFD_MC13783 is not set |
1113 | # CONFIG_AB3100_CORE is not set | 1090 | # CONFIG_AB3100_CORE is not set |
1114 | # CONFIG_EZX_PCAP is not set | 1091 | # CONFIG_EZX_PCAP is not set |
1115 | # CONFIG_MFD_88PM8607 is not set | ||
1116 | # CONFIG_AB4500_CORE is not set | 1092 | # CONFIG_AB4500_CORE is not set |
1117 | CONFIG_REGULATOR=y | 1093 | CONFIG_REGULATOR=y |
1118 | CONFIG_REGULATOR_DEBUG=y | 1094 | CONFIG_REGULATOR_DEBUG=y |
1095 | # CONFIG_REGULATOR_DUMMY is not set | ||
1119 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set | 1096 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set |
1120 | CONFIG_REGULATOR_VIRTUAL_CONSUMER=y | 1097 | CONFIG_REGULATOR_VIRTUAL_CONSUMER=y |
1121 | CONFIG_REGULATOR_USERSPACE_CONSUMER=y | 1098 | CONFIG_REGULATOR_USERSPACE_CONSUMER=y |
1122 | # CONFIG_REGULATOR_BQ24022 is not set | 1099 | # CONFIG_REGULATOR_BQ24022 is not set |
1123 | # CONFIG_REGULATOR_MAX1586 is not set | 1100 | # CONFIG_REGULATOR_MAX1586 is not set |
1101 | # CONFIG_REGULATOR_MAX8649 is not set | ||
1124 | # CONFIG_REGULATOR_MAX8660 is not set | 1102 | # CONFIG_REGULATOR_MAX8660 is not set |
1125 | CONFIG_REGULATOR_DA903X=y | 1103 | CONFIG_REGULATOR_DA903X=y |
1126 | # CONFIG_REGULATOR_LP3971 is not set | 1104 | # CONFIG_REGULATOR_LP3971 is not set |
@@ -1218,6 +1196,7 @@ CONFIG_VIDEO_IR_I2C=y | |||
1218 | # CONFIG_VIDEO_SAA7191 is not set | 1196 | # CONFIG_VIDEO_SAA7191 is not set |
1219 | # CONFIG_VIDEO_TVP514X is not set | 1197 | # CONFIG_VIDEO_TVP514X is not set |
1220 | # CONFIG_VIDEO_TVP5150 is not set | 1198 | # CONFIG_VIDEO_TVP5150 is not set |
1199 | # CONFIG_VIDEO_TVP7002 is not set | ||
1221 | # CONFIG_VIDEO_VPX3220 is not set | 1200 | # CONFIG_VIDEO_VPX3220 is not set |
1222 | 1201 | ||
1223 | # | 1202 | # |
@@ -1264,15 +1243,7 @@ CONFIG_SOC_CAMERA_MT9M111=y | |||
1264 | CONFIG_VIDEO_PXA27x=y | 1243 | CONFIG_VIDEO_PXA27x=y |
1265 | # CONFIG_VIDEO_SH_MOBILE_CEU is not set | 1244 | # CONFIG_VIDEO_SH_MOBILE_CEU is not set |
1266 | # CONFIG_V4L_USB_DRIVERS is not set | 1245 | # CONFIG_V4L_USB_DRIVERS is not set |
1267 | CONFIG_RADIO_ADAPTERS=y | 1246 | # CONFIG_RADIO_ADAPTERS is not set |
1268 | # CONFIG_I2C_SI4713 is not set | ||
1269 | # CONFIG_RADIO_SI4713 is not set | ||
1270 | # CONFIG_USB_DSBR is not set | ||
1271 | # CONFIG_RADIO_SI470X is not set | ||
1272 | # CONFIG_USB_MR800 is not set | ||
1273 | CONFIG_RADIO_TEA5764=y | ||
1274 | CONFIG_RADIO_TEA5764_XTAL=y | ||
1275 | # CONFIG_RADIO_TEF6862 is not set | ||
1276 | # CONFIG_DAB is not set | 1247 | # CONFIG_DAB is not set |
1277 | 1248 | ||
1278 | # | 1249 | # |
@@ -1398,8 +1369,6 @@ CONFIG_HID=y | |||
1398 | # | 1369 | # |
1399 | # Special HID drivers | 1370 | # Special HID drivers |
1400 | # | 1371 | # |
1401 | CONFIG_HID_APPLE=m | ||
1402 | # CONFIG_HID_WACOM is not set | ||
1403 | CONFIG_USB_SUPPORT=y | 1372 | CONFIG_USB_SUPPORT=y |
1404 | CONFIG_USB_ARCH_HAS_HCD=y | 1373 | CONFIG_USB_ARCH_HAS_HCD=y |
1405 | CONFIG_USB_ARCH_HAS_OHCI=y | 1374 | CONFIG_USB_ARCH_HAS_OHCI=y |
@@ -1477,7 +1446,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y | |||
1477 | # CONFIG_USB_RIO500 is not set | 1446 | # CONFIG_USB_RIO500 is not set |
1478 | # CONFIG_USB_LEGOTOWER is not set | 1447 | # CONFIG_USB_LEGOTOWER is not set |
1479 | # CONFIG_USB_LCD is not set | 1448 | # CONFIG_USB_LCD is not set |
1480 | # CONFIG_USB_BERRY_CHARGE is not set | ||
1481 | # CONFIG_USB_LED is not set | 1449 | # CONFIG_USB_LED is not set |
1482 | # CONFIG_USB_CYPRESS_CY7C63 is not set | 1450 | # CONFIG_USB_CYPRESS_CY7C63 is not set |
1483 | # CONFIG_USB_CYTHERM is not set | 1451 | # CONFIG_USB_CYTHERM is not set |
@@ -1489,7 +1457,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y | |||
1489 | # CONFIG_USB_IOWARRIOR is not set | 1457 | # CONFIG_USB_IOWARRIOR is not set |
1490 | # CONFIG_USB_TEST is not set | 1458 | # CONFIG_USB_TEST is not set |
1491 | # CONFIG_USB_ISIGHTFW is not set | 1459 | # CONFIG_USB_ISIGHTFW is not set |
1492 | # CONFIG_USB_VST is not set | ||
1493 | CONFIG_USB_GADGET=y | 1460 | CONFIG_USB_GADGET=y |
1494 | # CONFIG_USB_GADGET_DEBUG is not set | 1461 | # CONFIG_USB_GADGET_DEBUG is not set |
1495 | # CONFIG_USB_GADGET_DEBUG_FILES is not set | 1462 | # CONFIG_USB_GADGET_DEBUG_FILES is not set |
@@ -1529,6 +1496,7 @@ CONFIG_USB_ETH=y | |||
1529 | # CONFIG_USB_MIDI_GADGET is not set | 1496 | # CONFIG_USB_MIDI_GADGET is not set |
1530 | # CONFIG_USB_G_PRINTER is not set | 1497 | # CONFIG_USB_G_PRINTER is not set |
1531 | # CONFIG_USB_CDC_COMPOSITE is not set | 1498 | # CONFIG_USB_CDC_COMPOSITE is not set |
1499 | # CONFIG_USB_G_NOKIA is not set | ||
1532 | # CONFIG_USB_G_MULTI is not set | 1500 | # CONFIG_USB_G_MULTI is not set |
1533 | 1501 | ||
1534 | # | 1502 | # |
@@ -1555,8 +1523,6 @@ CONFIG_SDIO_UART=m | |||
1555 | # | 1523 | # |
1556 | CONFIG_MMC_PXA=y | 1524 | CONFIG_MMC_PXA=y |
1557 | # CONFIG_MMC_SDHCI is not set | 1525 | # CONFIG_MMC_SDHCI is not set |
1558 | # CONFIG_MMC_AT91 is not set | ||
1559 | # CONFIG_MMC_ATMELMCI is not set | ||
1560 | CONFIG_MMC_SPI=y | 1526 | CONFIG_MMC_SPI=y |
1561 | # CONFIG_MEMSTICK is not set | 1527 | # CONFIG_MEMSTICK is not set |
1562 | CONFIG_NEW_LEDS=y | 1528 | CONFIG_NEW_LEDS=y |
@@ -1574,11 +1540,11 @@ CONFIG_LEDS_LP3944=y | |||
1574 | # CONFIG_LEDS_REGULATOR is not set | 1540 | # CONFIG_LEDS_REGULATOR is not set |
1575 | # CONFIG_LEDS_BD2802 is not set | 1541 | # CONFIG_LEDS_BD2802 is not set |
1576 | # CONFIG_LEDS_LT3593 is not set | 1542 | # CONFIG_LEDS_LT3593 is not set |
1543 | CONFIG_LEDS_TRIGGERS=y | ||
1577 | 1544 | ||
1578 | # | 1545 | # |
1579 | # LED Triggers | 1546 | # LED Triggers |
1580 | # | 1547 | # |
1581 | CONFIG_LEDS_TRIGGERS=y | ||
1582 | CONFIG_LEDS_TRIGGER_TIMER=y | 1548 | CONFIG_LEDS_TRIGGER_TIMER=y |
1583 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 1549 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
1584 | CONFIG_LEDS_TRIGGER_BACKLIGHT=y | 1550 | CONFIG_LEDS_TRIGGER_BACKLIGHT=y |
@@ -1656,7 +1622,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1656 | # on-CPU RTC drivers | 1622 | # on-CPU RTC drivers |
1657 | # | 1623 | # |
1658 | # CONFIG_RTC_DRV_SA1100 is not set | 1624 | # CONFIG_RTC_DRV_SA1100 is not set |
1659 | # CONFIG_RTC_DRV_PXA is not set | 1625 | CONFIG_RTC_DRV_PXA=y |
1660 | # CONFIG_DMADEVICES is not set | 1626 | # CONFIG_DMADEVICES is not set |
1661 | # CONFIG_AUXDISPLAY is not set | 1627 | # CONFIG_AUXDISPLAY is not set |
1662 | # CONFIG_UIO is not set | 1628 | # CONFIG_UIO is not set |
@@ -1681,19 +1647,10 @@ CONFIG_EXT3_FS_XATTR=y | |||
1681 | CONFIG_JBD=m | 1647 | CONFIG_JBD=m |
1682 | # CONFIG_JBD_DEBUG is not set | 1648 | # CONFIG_JBD_DEBUG is not set |
1683 | CONFIG_FS_MBCACHE=m | 1649 | CONFIG_FS_MBCACHE=m |
1684 | CONFIG_REISERFS_FS=m | 1650 | # CONFIG_REISERFS_FS is not set |
1685 | # CONFIG_REISERFS_CHECK is not set | ||
1686 | # CONFIG_REISERFS_PROC_INFO is not set | ||
1687 | CONFIG_REISERFS_FS_XATTR=y | ||
1688 | CONFIG_REISERFS_FS_POSIX_ACL=y | ||
1689 | CONFIG_REISERFS_FS_SECURITY=y | ||
1690 | # CONFIG_JFS_FS is not set | 1651 | # CONFIG_JFS_FS is not set |
1691 | CONFIG_FS_POSIX_ACL=y | 1652 | CONFIG_FS_POSIX_ACL=y |
1692 | CONFIG_XFS_FS=m | 1653 | # CONFIG_XFS_FS is not set |
1693 | # CONFIG_XFS_QUOTA is not set | ||
1694 | # CONFIG_XFS_POSIX_ACL is not set | ||
1695 | # CONFIG_XFS_RT is not set | ||
1696 | # CONFIG_XFS_DEBUG is not set | ||
1697 | # CONFIG_OCFS2_FS is not set | 1654 | # CONFIG_OCFS2_FS is not set |
1698 | # CONFIG_BTRFS_FS is not set | 1655 | # CONFIG_BTRFS_FS is not set |
1699 | # CONFIG_NILFS2_FS is not set | 1656 | # CONFIG_NILFS2_FS is not set |
@@ -1716,9 +1673,7 @@ CONFIG_CUSE=m | |||
1716 | # | 1673 | # |
1717 | # CD-ROM/DVD Filesystems | 1674 | # CD-ROM/DVD Filesystems |
1718 | # | 1675 | # |
1719 | CONFIG_ISO9660_FS=m | 1676 | # CONFIG_ISO9660_FS is not set |
1720 | CONFIG_JOLIET=y | ||
1721 | CONFIG_ZISOFS=y | ||
1722 | # CONFIG_UDF_FS is not set | 1677 | # CONFIG_UDF_FS is not set |
1723 | 1678 | ||
1724 | # | 1679 | # |
@@ -1750,12 +1705,14 @@ CONFIG_MISC_FILESYSTEMS=y | |||
1750 | # CONFIG_BEFS_FS is not set | 1705 | # CONFIG_BEFS_FS is not set |
1751 | # CONFIG_BFS_FS is not set | 1706 | # CONFIG_BFS_FS is not set |
1752 | # CONFIG_EFS_FS is not set | 1707 | # CONFIG_EFS_FS is not set |
1753 | CONFIG_JFFS2_FS=m | 1708 | CONFIG_JFFS2_FS=y |
1754 | CONFIG_JFFS2_FS_DEBUG=0 | 1709 | CONFIG_JFFS2_FS_DEBUG=0 |
1755 | CONFIG_JFFS2_FS_WRITEBUFFER=y | 1710 | CONFIG_JFFS2_FS_WRITEBUFFER=y |
1756 | # CONFIG_JFFS2_FS_WBUF_VERIFY is not set | 1711 | CONFIG_JFFS2_FS_WBUF_VERIFY=y |
1757 | # CONFIG_JFFS2_SUMMARY is not set | 1712 | CONFIG_JFFS2_SUMMARY=y |
1758 | # CONFIG_JFFS2_FS_XATTR is not set | 1713 | CONFIG_JFFS2_FS_XATTR=y |
1714 | CONFIG_JFFS2_FS_POSIX_ACL=y | ||
1715 | CONFIG_JFFS2_FS_SECURITY=y | ||
1759 | CONFIG_JFFS2_COMPRESSION_OPTIONS=y | 1716 | CONFIG_JFFS2_COMPRESSION_OPTIONS=y |
1760 | CONFIG_JFFS2_ZLIB=y | 1717 | CONFIG_JFFS2_ZLIB=y |
1761 | CONFIG_JFFS2_LZO=y | 1718 | CONFIG_JFFS2_LZO=y |
@@ -1765,6 +1722,7 @@ CONFIG_JFFS2_RUBIN=y | |||
1765 | CONFIG_JFFS2_CMODE_PRIORITY=y | 1722 | CONFIG_JFFS2_CMODE_PRIORITY=y |
1766 | # CONFIG_JFFS2_CMODE_SIZE is not set | 1723 | # CONFIG_JFFS2_CMODE_SIZE is not set |
1767 | # CONFIG_JFFS2_CMODE_FAVOURLZO is not set | 1724 | # CONFIG_JFFS2_CMODE_FAVOURLZO is not set |
1725 | # CONFIG_LOGFS is not set | ||
1768 | CONFIG_CRAMFS=m | 1726 | CONFIG_CRAMFS=m |
1769 | CONFIG_SQUASHFS=m | 1727 | CONFIG_SQUASHFS=m |
1770 | # CONFIG_SQUASHFS_EMBEDDED is not set | 1728 | # CONFIG_SQUASHFS_EMBEDDED is not set |
@@ -1802,6 +1760,7 @@ CONFIG_SUNRPC=y | |||
1802 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 1760 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
1803 | CONFIG_SMB_FS=m | 1761 | CONFIG_SMB_FS=m |
1804 | # CONFIG_SMB_NLS_DEFAULT is not set | 1762 | # CONFIG_SMB_NLS_DEFAULT is not set |
1763 | # CONFIG_CEPH_FS is not set | ||
1805 | CONFIG_CIFS=m | 1764 | CONFIG_CIFS=m |
1806 | CONFIG_CIFS_STATS=y | 1765 | CONFIG_CIFS_STATS=y |
1807 | # CONFIG_CIFS_STATS2 is not set | 1766 | # CONFIG_CIFS_STATS2 is not set |
@@ -1895,6 +1854,7 @@ CONFIG_DEBUG_SPINLOCK=y | |||
1895 | CONFIG_DEBUG_MUTEXES=y | 1854 | CONFIG_DEBUG_MUTEXES=y |
1896 | CONFIG_DEBUG_LOCK_ALLOC=y | 1855 | CONFIG_DEBUG_LOCK_ALLOC=y |
1897 | CONFIG_PROVE_LOCKING=y | 1856 | CONFIG_PROVE_LOCKING=y |
1857 | # CONFIG_PROVE_RCU is not set | ||
1898 | CONFIG_LOCKDEP=y | 1858 | CONFIG_LOCKDEP=y |
1899 | # CONFIG_LOCK_STAT is not set | 1859 | # CONFIG_LOCK_STAT is not set |
1900 | # CONFIG_DEBUG_LOCKDEP is not set | 1860 | # CONFIG_DEBUG_LOCKDEP is not set |
@@ -1918,6 +1878,7 @@ CONFIG_DEBUG_BUGVERBOSE=y | |||
1918 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1878 | # CONFIG_BACKTRACE_SELF_TEST is not set |
1919 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | 1879 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set |
1920 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | 1880 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set |
1881 | # CONFIG_LKDTM is not set | ||
1921 | # CONFIG_FAULT_INJECTION is not set | 1882 | # CONFIG_FAULT_INJECTION is not set |
1922 | # CONFIG_LATENCYTOP is not set | 1883 | # CONFIG_LATENCYTOP is not set |
1923 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | 1884 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set |
@@ -2061,9 +2022,9 @@ CONFIG_CRC32=y | |||
2061 | CONFIG_CRC7=y | 2022 | CONFIG_CRC7=y |
2062 | CONFIG_LIBCRC32C=m | 2023 | CONFIG_LIBCRC32C=m |
2063 | CONFIG_ZLIB_INFLATE=y | 2024 | CONFIG_ZLIB_INFLATE=y |
2064 | CONFIG_ZLIB_DEFLATE=m | 2025 | CONFIG_ZLIB_DEFLATE=y |
2065 | CONFIG_LZO_COMPRESS=m | 2026 | CONFIG_LZO_COMPRESS=y |
2066 | CONFIG_LZO_DECOMPRESS=m | 2027 | CONFIG_LZO_DECOMPRESS=y |
2067 | CONFIG_DECOMPRESS_GZIP=y | 2028 | CONFIG_DECOMPRESS_GZIP=y |
2068 | CONFIG_DECOMPRESS_BZIP2=y | 2029 | CONFIG_DECOMPRESS_BZIP2=y |
2069 | CONFIG_DECOMPRESS_LZMA=y | 2030 | CONFIG_DECOMPRESS_LZMA=y |
@@ -2075,3 +2036,4 @@ CONFIG_HAS_IOMEM=y | |||
2075 | CONFIG_HAS_IOPORT=y | 2036 | CONFIG_HAS_IOPORT=y |
2076 | CONFIG_HAS_DMA=y | 2037 | CONFIG_HAS_DMA=y |
2077 | CONFIG_NLATTR=y | 2038 | CONFIG_NLATTR=y |
2039 | CONFIG_GENERIC_ATOMIC64=y | ||
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index e8ddec2cb158..a0162fa94564 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -24,7 +24,7 @@ | |||
24 | * strex/ldrex monitor on some implementations. The reason we can use it for | 24 | * strex/ldrex monitor on some implementations. The reason we can use it for |
25 | * atomic_set() is the clrex or dummy strex done on every exception return. | 25 | * atomic_set() is the clrex or dummy strex done on every exception return. |
26 | */ | 26 | */ |
27 | #define atomic_read(v) ((v)->counter) | 27 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
28 | #define atomic_set(v,i) (((v)->counter) = (i)) | 28 | #define atomic_set(v,i) (((v)->counter) = (i)) |
29 | 29 | ||
30 | #if __LINUX_ARM_ARCH__ >= 6 | 30 | #if __LINUX_ARM_ARCH__ >= 6 |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 0d08d4170b64..4656a24058d2 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -371,6 +371,10 @@ static inline void __flush_icache_all(void) | |||
371 | #ifdef CONFIG_ARM_ERRATA_411920 | 371 | #ifdef CONFIG_ARM_ERRATA_411920 |
372 | extern void v6_icache_inval_all(void); | 372 | extern void v6_icache_inval_all(void); |
373 | v6_icache_inval_all(); | 373 | v6_icache_inval_all(); |
374 | #elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7 | ||
375 | asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n" | ||
376 | : | ||
377 | : "r" (0)); | ||
374 | #else | 378 | #else |
375 | asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" | 379 | asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" |
376 | : | 380 | : |
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index bff056489cc1..51662feb9f1d 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/ptrace.h> | 9 | #include <asm/ptrace.h> |
10 | #include <asm/user.h> | 10 | #include <asm/user.h> |
11 | 11 | ||
12 | struct task_struct; | ||
13 | |||
12 | typedef unsigned long elf_greg_t; | 14 | typedef unsigned long elf_greg_t; |
13 | typedef unsigned long elf_freg_t[3]; | 15 | typedef unsigned long elf_freg_t[3]; |
14 | 16 | ||
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 7be0978b2625..634f357be6bb 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h | |||
@@ -1,6 +1,23 @@ | |||
1 | #ifndef __ASMARM_SMP_TWD_H | 1 | #ifndef __ASMARM_SMP_TWD_H |
2 | #define __ASMARM_SMP_TWD_H | 2 | #define __ASMARM_SMP_TWD_H |
3 | 3 | ||
4 | #define TWD_TIMER_LOAD 0x00 | ||
5 | #define TWD_TIMER_COUNTER 0x04 | ||
6 | #define TWD_TIMER_CONTROL 0x08 | ||
7 | #define TWD_TIMER_INTSTAT 0x0C | ||
8 | |||
9 | #define TWD_WDOG_LOAD 0x20 | ||
10 | #define TWD_WDOG_COUNTER 0x24 | ||
11 | #define TWD_WDOG_CONTROL 0x28 | ||
12 | #define TWD_WDOG_INTSTAT 0x2C | ||
13 | #define TWD_WDOG_RESETSTAT 0x30 | ||
14 | #define TWD_WDOG_DISABLE 0x34 | ||
15 | |||
16 | #define TWD_TIMER_CONTROL_ENABLE (1 << 0) | ||
17 | #define TWD_TIMER_CONTROL_ONESHOT (0 << 1) | ||
18 | #define TWD_TIMER_CONTROL_PERIODIC (1 << 1) | ||
19 | #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) | ||
20 | |||
4 | struct clock_event_device; | 21 | struct clock_event_device; |
5 | 22 | ||
6 | extern void __iomem *twd_base; | 23 | extern void __iomem *twd_base; |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index e085e2c545eb..bd863d8608cd 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -46,6 +46,9 @@ | |||
46 | #define TLB_V7_UIS_FULL (1 << 20) | 46 | #define TLB_V7_UIS_FULL (1 << 20) |
47 | #define TLB_V7_UIS_ASID (1 << 21) | 47 | #define TLB_V7_UIS_ASID (1 << 21) |
48 | 48 | ||
49 | /* Inner Shareable BTB operation (ARMv7 MP extensions) */ | ||
50 | #define TLB_V7_IS_BTB (1 << 22) | ||
51 | |||
49 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ | 52 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ |
50 | #define TLB_DCLEAN (1 << 30) | 53 | #define TLB_DCLEAN (1 << 30) |
51 | #define TLB_WB (1 << 31) | 54 | #define TLB_WB (1 << 31) |
@@ -183,7 +186,7 @@ | |||
183 | #endif | 186 | #endif |
184 | 187 | ||
185 | #ifdef CONFIG_SMP | 188 | #ifdef CONFIG_SMP |
186 | #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ | 189 | #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ |
187 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) | 190 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) |
188 | #else | 191 | #else |
189 | #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ | 192 | #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ |
@@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void) | |||
339 | dsb(); | 342 | dsb(); |
340 | isb(); | 343 | isb(); |
341 | } | 344 | } |
345 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
346 | /* flush the branch target cache */ | ||
347 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
348 | dsb(); | ||
349 | isb(); | ||
350 | } | ||
342 | } | 351 | } |
343 | 352 | ||
344 | static inline void local_flush_tlb_mm(struct mm_struct *mm) | 353 | static inline void local_flush_tlb_mm(struct mm_struct *mm) |
@@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
376 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | 385 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); |
377 | dsb(); | 386 | dsb(); |
378 | } | 387 | } |
388 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
389 | /* flush the branch target cache */ | ||
390 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
391 | dsb(); | ||
392 | isb(); | ||
393 | } | ||
379 | } | 394 | } |
380 | 395 | ||
381 | static inline void | 396 | static inline void |
@@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
416 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | 431 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); |
417 | dsb(); | 432 | dsb(); |
418 | } | 433 | } |
434 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
435 | /* flush the branch target cache */ | ||
436 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
437 | dsb(); | ||
438 | isb(); | ||
439 | } | ||
419 | } | 440 | } |
420 | 441 | ||
421 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | 442 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) |
@@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |||
454 | dsb(); | 475 | dsb(); |
455 | isb(); | 476 | isb(); |
456 | } | 477 | } |
478 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
479 | /* flush the branch target cache */ | ||
480 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
481 | dsb(); | ||
482 | isb(); | ||
483 | } | ||
457 | } | 484 | } |
458 | 485 | ||
459 | /* | 486 | /* |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e6a0fb0f392e..7ee48e7f8f31 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -676,10 +676,10 @@ do_fpe: | |||
676 | * lr = unrecognised FP instruction return address | 676 | * lr = unrecognised FP instruction return address |
677 | */ | 677 | */ |
678 | 678 | ||
679 | .data | 679 | .pushsection .data |
680 | ENTRY(fp_enter) | 680 | ENTRY(fp_enter) |
681 | .word no_fp | 681 | .word no_fp |
682 | .text | 682 | .popsection |
683 | 683 | ||
684 | ENTRY(no_fp) | 684 | ENTRY(no_fp) |
685 | mov pc, lr | 685 | mov pc, lr |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 577543f3857f..a01194e583ff 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -86,6 +86,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
86 | return PTR_ERR(idle); | 86 | return PTR_ERR(idle); |
87 | } | 87 | } |
88 | ci->idle = idle; | 88 | ci->idle = idle; |
89 | } else { | ||
90 | /* | ||
91 | * Since this idle thread is being re-used, call | ||
92 | * init_idle() to reinitialize the thread structure. | ||
93 | */ | ||
94 | init_idle(idle, cpu); | ||
89 | } | 95 | } |
90 | 96 | ||
91 | /* | 97 | /* |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index ea02a7b1c244..7c5f0c024db7 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -21,23 +21,6 @@ | |||
21 | #include <asm/smp_twd.h> | 21 | #include <asm/smp_twd.h> |
22 | #include <asm/hardware/gic.h> | 22 | #include <asm/hardware/gic.h> |
23 | 23 | ||
24 | #define TWD_TIMER_LOAD 0x00 | ||
25 | #define TWD_TIMER_COUNTER 0x04 | ||
26 | #define TWD_TIMER_CONTROL 0x08 | ||
27 | #define TWD_TIMER_INTSTAT 0x0C | ||
28 | |||
29 | #define TWD_WDOG_LOAD 0x20 | ||
30 | #define TWD_WDOG_COUNTER 0x24 | ||
31 | #define TWD_WDOG_CONTROL 0x28 | ||
32 | #define TWD_WDOG_INTSTAT 0x2C | ||
33 | #define TWD_WDOG_RESETSTAT 0x30 | ||
34 | #define TWD_WDOG_DISABLE 0x34 | ||
35 | |||
36 | #define TWD_TIMER_CONTROL_ENABLE (1 << 0) | ||
37 | #define TWD_TIMER_CONTROL_ONESHOT (0 << 1) | ||
38 | #define TWD_TIMER_CONTROL_PERIODIC (1 << 1) | ||
39 | #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) | ||
40 | |||
41 | /* set up by the platform code */ | 24 | /* set up by the platform code */ |
42 | void __iomem *twd_base; | 25 | void __iomem *twd_base; |
43 | 26 | ||
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 5e3f99620c04..14a0d988c82c 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S | |||
@@ -45,6 +45,7 @@ USER( strnebt r2, [r0]) | |||
45 | mov r0, #0 | 45 | mov r0, #0 |
46 | ldmfd sp!, {r1, pc} | 46 | ldmfd sp!, {r1, pc} |
47 | ENDPROC(__clear_user) | 47 | ENDPROC(__clear_user) |
48 | ENDPROC(__clear_user_std) | ||
48 | 49 | ||
49 | .pushsection .fixup,"ax" | 50 | .pushsection .fixup,"ax" |
50 | .align 0 | 51 | .align 0 |
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 027b69bdbad1..d066df686e17 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S | |||
@@ -93,6 +93,7 @@ WEAK(__copy_to_user) | |||
93 | #include "copy_template.S" | 93 | #include "copy_template.S" |
94 | 94 | ||
95 | ENDPROC(__copy_to_user) | 95 | ENDPROC(__copy_to_user) |
96 | ENDPROC(__copy_to_user_std) | ||
96 | 97 | ||
97 | .pushsection .fixup,"ax" | 98 | .pushsection .fixup,"ax" |
98 | .align 0 | 99 | .align 0 |
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c index 122e61a9f505..e8cb982f5e8e 100644 --- a/arch/arm/mach-davinci/da830.c +++ b/arch/arm/mach-davinci/da830.c | |||
@@ -410,7 +410,7 @@ static struct clk_lookup da830_clks[] = { | |||
410 | CLK("davinci-mcasp.0", NULL, &mcasp0_clk), | 410 | CLK("davinci-mcasp.0", NULL, &mcasp0_clk), |
411 | CLK("davinci-mcasp.1", NULL, &mcasp1_clk), | 411 | CLK("davinci-mcasp.1", NULL, &mcasp1_clk), |
412 | CLK("davinci-mcasp.2", NULL, &mcasp2_clk), | 412 | CLK("davinci-mcasp.2", NULL, &mcasp2_clk), |
413 | CLK("musb_hdrc", NULL, &usb20_clk), | 413 | CLK(NULL, "usb20", &usb20_clk), |
414 | CLK(NULL, "aemif", &aemif_clk), | 414 | CLK(NULL, "aemif", &aemif_clk), |
415 | CLK(NULL, "aintc", &aintc_clk), | 415 | CLK(NULL, "aintc", &aintc_clk), |
416 | CLK(NULL, "secu_mgr", &secu_mgr_clk), | 416 | CLK(NULL, "secu_mgr", &secu_mgr_clk), |
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index 8f85f73b83a8..1ee6ce4087b8 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | 17 | ||
18 | #include <asm/clkdev.h> | 18 | #include <asm/clkdev.h> |
19 | #include <asm/div64.h> | ||
19 | 20 | ||
20 | #include <mach/hardware.h> | 21 | #include <mach/hardware.h> |
21 | #include <mach/common.h> | 22 | #include <mach/common.h> |
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index 811743c56147..5f2ba8d9015c 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _COLIBRI_H_ | 2 | #define _COLIBRI_H_ |
3 | 3 | ||
4 | #include <net/ax88796.h> | 4 | #include <net/ax88796.h> |
5 | #include <mach/mfp.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * common settings for all modules | 8 | * common settings for all modules |
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h index 7515757d6911..3d8d8cb09685 100644 --- a/arch/arm/mach-pxa/include/mach/hardware.h +++ b/arch/arm/mach-pxa/include/mach/hardware.h | |||
@@ -202,7 +202,7 @@ | |||
202 | #define __cpu_is_pxa950(id) \ | 202 | #define __cpu_is_pxa950(id) \ |
203 | ({ \ | 203 | ({ \ |
204 | unsigned int _id = (id) >> 4 & 0xfff; \ | 204 | unsigned int _id = (id) >> 4 & 0xfff; \ |
205 | id == 0x697; \ | 205 | _id == 0x697; \ |
206 | }) | 206 | }) |
207 | #else | 207 | #else |
208 | #define __cpu_is_pxa950(id) (0) | 208 | #define __cpu_is_pxa950(id) (0) |
diff --git a/arch/arm/mach-pxa/include/mach/regs-u2d.h b/arch/arm/mach-pxa/include/mach/regs-u2d.h index 44b0b20b69a4..c15c0c57de08 100644 --- a/arch/arm/mach-pxa/include/mach/regs-u2d.h +++ b/arch/arm/mach-pxa/include/mach/regs-u2d.h | |||
@@ -166,7 +166,8 @@ | |||
166 | #define U2DMACSR_BUSERRTYPE (7 << 10) /* PX Bus Error Type */ | 166 | #define U2DMACSR_BUSERRTYPE (7 << 10) /* PX Bus Error Type */ |
167 | #define U2DMACSR_EORINTR (1 << 9) /* End Of Receive */ | 167 | #define U2DMACSR_EORINTR (1 << 9) /* End Of Receive */ |
168 | #define U2DMACSR_REQPEND (1 << 8) /* Request Pending */ | 168 | #define U2DMACSR_REQPEND (1 << 8) /* Request Pending */ |
169 | #define U2DMACSR_RASINTR (1 << 4) /* Request After Channel Stopped (read / write 1 clear) */#define U2DMACSR_STOPINTR (1 << 3) /* Stop Interrupt (read only) */ | 169 | #define U2DMACSR_RASINTR (1 << 4) /* Request After Channel Stopped (read / write 1 clear) */ |
170 | #define U2DMACSR_STOPINTR (1 << 3) /* Stop Interrupt (read only) */ | ||
170 | #define U2DMACSR_ENDINTR (1 << 2) /* End Interrupt (read / write 1 clear) */ | 171 | #define U2DMACSR_ENDINTR (1 << 2) /* End Interrupt (read / write 1 clear) */ |
171 | #define U2DMACSR_STARTINTR (1 << 1) /* Start Interrupt (read / write 1 clear) */ | 172 | #define U2DMACSR_STARTINTR (1 << 1) /* Start Interrupt (read / write 1 clear) */ |
172 | #define U2DMACSR_BUSERRINTR (1 << 0) /* Bus Error Interrupt (read / write 1 clear) */ | 173 | #define U2DMACSR_BUSERRINTR (1 << 0) /* Bus Error Interrupt (read / write 1 clear) */ |
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 44bb675e47f1..d12667bd9ebe 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -983,7 +983,7 @@ static void __init raumfeld_common_init(void) | |||
983 | int i; | 983 | int i; |
984 | 984 | ||
985 | for (i = 0; i < ARRAY_SIZE(gpio_keys_button); i++) | 985 | for (i = 0; i < ARRAY_SIZE(gpio_keys_button); i++) |
986 | if (!strcmp(gpio_keys_button[i].desc, "on/off button")) | 986 | if (!strcmp(gpio_keys_button[i].desc, "on_off button")) |
987 | gpio_keys_button[i].active_low = 1; | 987 | gpio_keys_button[i].active_low = 1; |
988 | } | 988 | } |
989 | 989 | ||
@@ -1009,8 +1009,7 @@ static void __init raumfeld_common_init(void) | |||
1009 | gpio_direction_output(GPIO_W2W_PDN, 0); | 1009 | gpio_direction_output(GPIO_W2W_PDN, 0); |
1010 | 1010 | ||
1011 | /* this can be used to switch off the device */ | 1011 | /* this can be used to switch off the device */ |
1012 | ret = gpio_request(GPIO_SHUTDOWN_SUPPLY, | 1012 | ret = gpio_request(GPIO_SHUTDOWN_SUPPLY, "supply shutdown"); |
1013 | "supply shutdown"); | ||
1014 | if (ret < 0) | 1013 | if (ret < 0) |
1015 | pr_warning("Unable to request GPIO_SHUTDOWN_SUPPLY\n"); | 1014 | pr_warning("Unable to request GPIO_SHUTDOWN_SUPPLY\n"); |
1016 | else | 1015 | else |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 19b5109d9808..01bdd7500df4 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -363,7 +363,7 @@ static struct gpio_keys_button spitz_gpio_keys[] = { | |||
363 | .type = EV_PWR, | 363 | .type = EV_PWR, |
364 | .code = KEY_SUSPEND, | 364 | .code = KEY_SUSPEND, |
365 | .gpio = SPITZ_GPIO_ON_KEY, | 365 | .gpio = SPITZ_GPIO_ON_KEY, |
366 | .desc = "On/Off", | 366 | .desc = "On Off", |
367 | .wakeup = 1, | 367 | .wakeup = 1, |
368 | }, | 368 | }, |
369 | /* Two buttons detecting the lid state */ | 369 | /* Two buttons detecting the lid state */ |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 9e0c5c3988a1..e90114a7e246 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pm.h> | 34 | #include <linux/pm.h> |
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/gpio.h> | 36 | #include <linux/gpio.h> |
37 | #include <linux/jiffies.h> | ||
37 | #include <linux/i2c-gpio.h> | 38 | #include <linux/i2c-gpio.h> |
38 | #include <linux/serial_8250.h> | 39 | #include <linux/serial_8250.h> |
39 | #include <linux/smc91x.h> | 40 | #include <linux/smc91x.h> |
@@ -454,7 +455,7 @@ static struct i2c_gpio_platform_data i2c_bus_data = { | |||
454 | .sda_pin = VIPER_RTC_I2C_SDA_GPIO, | 455 | .sda_pin = VIPER_RTC_I2C_SDA_GPIO, |
455 | .scl_pin = VIPER_RTC_I2C_SCL_GPIO, | 456 | .scl_pin = VIPER_RTC_I2C_SCL_GPIO, |
456 | .udelay = 10, | 457 | .udelay = 10, |
457 | .timeout = 100, | 458 | .timeout = HZ, |
458 | }; | 459 | }; |
459 | 460 | ||
460 | static struct platform_device i2c_bus_device = { | 461 | static struct platform_device i2c_bus_device = { |
@@ -779,7 +780,7 @@ static void __init viper_tpm_init(void) | |||
779 | .sda_pin = VIPER_TPM_I2C_SDA_GPIO, | 780 | .sda_pin = VIPER_TPM_I2C_SDA_GPIO, |
780 | .scl_pin = VIPER_TPM_I2C_SCL_GPIO, | 781 | .scl_pin = VIPER_TPM_I2C_SCL_GPIO, |
781 | .udelay = 10, | 782 | .udelay = 10, |
782 | .timeout = 100, | 783 | .timeout = HZ, |
783 | }; | 784 | }; |
784 | char *errstr; | 785 | char *errstr; |
785 | 786 | ||
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index b17d52f7cc48..fd4c52b7ccb6 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig | |||
@@ -57,7 +57,7 @@ config SA1100_COLLIE | |||
57 | config SA1100_H3100 | 57 | config SA1100_H3100 |
58 | bool "Compaq iPAQ H3100" | 58 | bool "Compaq iPAQ H3100" |
59 | select HTC_EGPIO | 59 | select HTC_EGPIO |
60 | select CPU_FREQ_SA1100 | 60 | select CPU_FREQ_SA1110 |
61 | help | 61 | help |
62 | Say Y here if you intend to run this kernel on the Compaq iPAQ | 62 | Say Y here if you intend to run this kernel on the Compaq iPAQ |
63 | H3100 handheld computer. Information about this machine and the | 63 | H3100 handheld computer. Information about this machine and the |
@@ -68,7 +68,7 @@ config SA1100_H3100 | |||
68 | config SA1100_H3600 | 68 | config SA1100_H3600 |
69 | bool "Compaq iPAQ H3600/H3700" | 69 | bool "Compaq iPAQ H3600/H3700" |
70 | select HTC_EGPIO | 70 | select HTC_EGPIO |
71 | select CPU_FREQ_SA1100 | 71 | select CPU_FREQ_SA1110 |
72 | help | 72 | help |
73 | Say Y here if you intend to run this kernel on the Compaq iPAQ | 73 | Say Y here if you intend to run this kernel on the Compaq iPAQ |
74 | H3600 handheld computer. Information about this machine and the | 74 | H3600 handheld computer. Information about this machine and the |
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c index 63b32b68b296..7252874d328b 100644 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ b/arch/arm/mach-sa1100/cpu-sa1110.c | |||
@@ -363,6 +363,9 @@ static int __init sa1110_clk_init(void) | |||
363 | struct sdram_params *sdram; | 363 | struct sdram_params *sdram; |
364 | const char *name = sdram_name; | 364 | const char *name = sdram_name; |
365 | 365 | ||
366 | if (!cpu_is_sa1110()) | ||
367 | return -ENODEV; | ||
368 | |||
366 | if (!name[0]) { | 369 | if (!name[0]) { |
367 | if (machine_is_assabet()) | 370 | if (machine_is_assabet()) |
368 | name = "TC59SM716-CL3"; | 371 | name = "TC59SM716-CL3"; |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 9d89c67a1cc3..e46ecd847138 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -211,6 +211,9 @@ v6_dma_inv_range: | |||
211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
212 | #endif | 212 | #endif |
213 | 1: | 213 | 1: |
214 | #ifdef CONFIG_SMP | ||
215 | str r0, [r0] @ write for ownership | ||
216 | #endif | ||
214 | #ifdef HARVARD_CACHE | 217 | #ifdef HARVARD_CACHE |
215 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 218 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
216 | #else | 219 | #else |
@@ -231,6 +234,9 @@ v6_dma_inv_range: | |||
231 | v6_dma_clean_range: | 234 | v6_dma_clean_range: |
232 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 235 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
233 | 1: | 236 | 1: |
237 | #ifdef CONFIG_SMP | ||
238 | ldr r2, [r0] @ read for ownership | ||
239 | #endif | ||
234 | #ifdef HARVARD_CACHE | 240 | #ifdef HARVARD_CACHE |
235 | mcr p15, 0, r0, c7, c10, 1 @ clean D line | 241 | mcr p15, 0, r0, c7, c10, 1 @ clean D line |
236 | #else | 242 | #else |
@@ -251,6 +257,10 @@ v6_dma_clean_range: | |||
251 | ENTRY(v6_dma_flush_range) | 257 | ENTRY(v6_dma_flush_range) |
252 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 258 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
253 | 1: | 259 | 1: |
260 | #ifdef CONFIG_SMP | ||
261 | ldr r2, [r0] @ read for ownership | ||
262 | str r2, [r0] @ write for ownership | ||
263 | #endif | ||
254 | #ifdef HARVARD_CACHE | 264 | #ifdef HARVARD_CACHE |
255 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 265 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
256 | #else | 266 | #else |
@@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area) | |||
273 | add r1, r1, r0 | 283 | add r1, r1, r0 |
274 | teq r2, #DMA_FROM_DEVICE | 284 | teq r2, #DMA_FROM_DEVICE |
275 | beq v6_dma_inv_range | 285 | beq v6_dma_inv_range |
276 | b v6_dma_clean_range | 286 | teq r2, #DMA_TO_DEVICE |
287 | beq v6_dma_clean_range | ||
288 | b v6_dma_flush_range | ||
277 | ENDPROC(v6_dma_map_area) | 289 | ENDPROC(v6_dma_map_area) |
278 | 290 | ||
279 | /* | 291 | /* |
@@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area) | |||
283 | * - dir - DMA direction | 295 | * - dir - DMA direction |
284 | */ | 296 | */ |
285 | ENTRY(v6_dma_unmap_area) | 297 | ENTRY(v6_dma_unmap_area) |
286 | add r1, r1, r0 | ||
287 | teq r2, #DMA_TO_DEVICE | ||
288 | bne v6_dma_inv_range | ||
289 | mov pc, lr | 298 | mov pc, lr |
290 | ENDPROC(v6_dma_unmap_area) | 299 | ENDPROC(v6_dma_unmap_area) |
291 | 300 | ||
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index bcd64f265870..06a90dcfc60a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range) | |||
167 | cmp r0, r1 | 167 | cmp r0, r1 |
168 | blo 1b | 168 | blo 1b |
169 | mov r0, #0 | 169 | mov r0, #0 |
170 | #ifdef CONFIG_SMP | ||
171 | mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable | ||
172 | #else | ||
170 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | 173 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB |
174 | #endif | ||
171 | dsb | 175 | dsb |
172 | isb | 176 | isb |
173 | mov pc, lr | 177 | mov pc, lr |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 83db12a68d56..0ed29bfeba1c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -86,9 +86,6 @@ void show_mem(void) | |||
86 | printk("Mem-info:\n"); | 86 | printk("Mem-info:\n"); |
87 | show_free_areas(); | 87 | show_free_areas(); |
88 | for_each_online_node(node) { | 88 | for_each_online_node(node) { |
89 | pg_data_t *n = NODE_DATA(node); | ||
90 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; | ||
91 | |||
92 | for_each_nodebank (i,mi,node) { | 89 | for_each_nodebank (i,mi,node) { |
93 | struct membank *bank = &mi->bank[i]; | 90 | struct membank *bank = &mi->bank[i]; |
94 | unsigned int pfn1, pfn2; | 91 | unsigned int pfn1, pfn2; |
@@ -97,8 +94,8 @@ void show_mem(void) | |||
97 | pfn1 = bank_pfn_start(bank); | 94 | pfn1 = bank_pfn_start(bank); |
98 | pfn2 = bank_pfn_end(bank); | 95 | pfn2 = bank_pfn_end(bank); |
99 | 96 | ||
100 | page = map + pfn1; | 97 | page = pfn_to_page(pfn1); |
101 | end = map + pfn2; | 98 | end = pfn_to_page(pfn2 - 1) + 1; |
102 | 99 | ||
103 | do { | 100 | do { |
104 | total++; | 101 | total++; |
@@ -603,9 +600,6 @@ void __init mem_init(void) | |||
603 | reserved_pages = free_pages = 0; | 600 | reserved_pages = free_pages = 0; |
604 | 601 | ||
605 | for_each_online_node(node) { | 602 | for_each_online_node(node) { |
606 | pg_data_t *n = NODE_DATA(node); | ||
607 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; | ||
608 | |||
609 | for_each_nodebank(i, &meminfo, node) { | 603 | for_each_nodebank(i, &meminfo, node) { |
610 | struct membank *bank = &meminfo.bank[i]; | 604 | struct membank *bank = &meminfo.bank[i]; |
611 | unsigned int pfn1, pfn2; | 605 | unsigned int pfn1, pfn2; |
@@ -614,8 +608,8 @@ void __init mem_init(void) | |||
614 | pfn1 = bank_pfn_start(bank); | 608 | pfn1 = bank_pfn_start(bank); |
615 | pfn2 = bank_pfn_end(bank); | 609 | pfn2 = bank_pfn_end(bank); |
616 | 610 | ||
617 | page = map + pfn1; | 611 | page = pfn_to_page(pfn1); |
618 | end = map + pfn2; | 612 | end = pfn_to_page(pfn2 - 1) + 1; |
619 | 613 | ||
620 | do { | 614 | do { |
621 | if (PageReserved(page)) | 615 | if (PageReserved(page)) |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 9bfeb6b9509a..33b327379f07 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page) | |||
65 | } | 65 | } |
66 | EXPORT_SYMBOL(flush_dcache_page); | 66 | EXPORT_SYMBOL(flush_dcache_page); |
67 | 67 | ||
68 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
69 | unsigned long uaddr, void *dst, const void *src, | ||
70 | unsigned long len) | ||
71 | { | ||
72 | memcpy(dst, src, len); | ||
73 | if (vma->vm_flags & VM_EXEC) | ||
74 | __cpuc_coherent_user_range(uaddr, uaddr + len); | ||
75 | } | ||
76 | |||
68 | void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, | 77 | void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, |
69 | size_t size, unsigned int mtype) | 78 | size_t size, unsigned int mtype) |
70 | { | 79 | { |
@@ -87,8 +96,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | |||
87 | } | 96 | } |
88 | EXPORT_SYMBOL(__arm_ioremap); | 97 | EXPORT_SYMBOL(__arm_ioremap); |
89 | 98 | ||
90 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 99 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
91 | unsigned int mtype, void *caller) | 100 | unsigned int mtype, void *caller) |
92 | { | 101 | { |
93 | return __arm_ioremap(phys_addr, size, mtype); | 102 | return __arm_ioremap(phys_addr, size, mtype); |
94 | } | 103 | } |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 0cb1848bd876..f3f288a9546d 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
50 | cmp r0, r1 | 50 | cmp r0, r1 |
51 | blo 1b | 51 | blo 1b |
52 | mov ip, #0 | 52 | mov ip, #0 |
53 | #ifdef CONFIG_SMP | ||
54 | mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | ||
55 | #else | ||
53 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | 56 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB |
57 | #endif | ||
54 | dsb | 58 | dsb |
55 | mov pc, lr | 59 | mov pc, lr |
56 | ENDPROC(v7wbi_flush_user_tlb_range) | 60 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
79 | cmp r0, r1 | 83 | cmp r0, r1 |
80 | blo 1b | 84 | blo 1b |
81 | mov r2, #0 | 85 | mov r2, #0 |
86 | #ifdef CONFIG_SMP | ||
87 | mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | ||
88 | #else | ||
82 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 89 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
90 | #endif | ||
83 | dsb | 91 | dsb |
84 | isb | 92 | isb |
85 | mov pc, lr | 93 | mov pc, lr |
diff --git a/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h b/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h index 07be8ad7ec37..7c4870bd5a21 100644 --- a/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h +++ b/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h | |||
@@ -31,7 +31,13 @@ | |||
31 | #define DMA_MODE_WRITE 1 | 31 | #define DMA_MODE_WRITE 1 |
32 | #define DMA_MODE_MASK 1 | 32 | #define DMA_MODE_MASK 1 |
33 | 33 | ||
34 | #define DMA_BASE IO_ADDRESS(DMA_BASE_ADDR) | 34 | #define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset)) |
35 | |||
36 | /* DMA Interrupt Mask Register */ | ||
37 | #define MX1_DMA_DIMR MX1_DMA_REG(0x08) | ||
38 | |||
39 | /* Channel Control Register */ | ||
40 | #define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6)) | ||
35 | 41 | ||
36 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | 42 | #define IMX_DMA_MEMSIZE_32 (0 << 4) |
37 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | 43 | #define IMX_DMA_MEMSIZE_8 (1 << 4) |
diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index 742350e0f2a7..2d3c19d7c7b1 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c | |||
@@ -245,7 +245,7 @@ static void pxa_dma_init_debugfs(void) | |||
245 | 245 | ||
246 | dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels, | 246 | dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels, |
247 | GFP_KERNEL); | 247 | GFP_KERNEL); |
248 | if (!dbgfs_state) | 248 | if (!dbgfs_chan) |
249 | goto err_alloc; | 249 | goto err_alloc; |
250 | 250 | ||
251 | chandir = debugfs_create_dir("channels", dbgfs_root); | 251 | chandir = debugfs_create_dir("channels", dbgfs_root); |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 1536f1784cac..8f10d24ae625 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -12,7 +12,7 @@ | |||
12 | # | 12 | # |
13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
14 | # | 14 | # |
15 | # Last update: Sat Mar 20 15:35:41 2010 | 15 | # Last update: Sat May 1 10:36:42 2010 |
16 | # | 16 | # |
17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
18 | # | 18 | # |
@@ -2749,3 +2749,58 @@ stamp9g45 MACH_STAMP9G45 STAMP9G45 2761 | |||
2749 | h6053 MACH_H6053 H6053 2762 | 2749 | h6053 MACH_H6053 H6053 2762 |
2750 | smint01 MACH_SMINT01 SMINT01 2763 | 2750 | smint01 MACH_SMINT01 SMINT01 2763 |
2751 | prtlvt2 MACH_PRTLVT2 PRTLVT2 2764 | 2751 | prtlvt2 MACH_PRTLVT2 PRTLVT2 2764 |
2752 | ap420 MACH_AP420 AP420 2765 | ||
2753 | htcshift MACH_HTCSHIFT HTCSHIFT 2766 | ||
2754 | davinci_dm365_fc MACH_DAVINCI_DM365_FC DAVINCI_DM365_FC 2767 | ||
2755 | msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768 | ||
2756 | msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769 | ||
2757 | esl_vamana MACH_ESL_VAMANA ESL_VAMANA 2770 | ||
2758 | sbc35 MACH_SBC35 SBC35 2771 | ||
2759 | mpx6446 MACH_MPX6446 MPX6446 2772 | ||
2760 | oreo_controller MACH_OREO_CONTROLLER OREO_CONTROLLER 2773 | ||
2761 | kopin_models MACH_KOPIN_MODELS KOPIN_MODELS 2774 | ||
2762 | ttc_vision2 MACH_TTC_VISION2 TTC_VISION2 2775 | ||
2763 | cns3420vb MACH_CNS3420VB CNS3420VB 2776 | ||
2764 | lpc2 MACH_LPC2 LPC2 2777 | ||
2765 | olympus MACH_OLYMPUS OLYMPUS 2778 | ||
2766 | vortex MACH_VORTEX VORTEX 2779 | ||
2767 | s5pc200 MACH_S5PC200 S5PC200 2780 | ||
2768 | ecucore_9263 MACH_ECUCORE_9263 ECUCORE_9263 2781 | ||
2769 | smdkc200 MACH_SMDKC200 SMDKC200 2782 | ||
2770 | emsiso_sx27 MACH_EMSISO_SX27 EMSISO_SX27 2783 | ||
2771 | apx_som9g45_ek MACH_APX_SOM9G45_EK APX_SOM9G45_EK 2784 | ||
2772 | songshan MACH_SONGSHAN SONGSHAN 2785 | ||
2773 | tianshan MACH_TIANSHAN TIANSHAN 2786 | ||
2774 | vpx500 MACH_VPX500 VPX500 2787 | ||
2775 | am3517sam MACH_AM3517SAM AM3517SAM 2788 | ||
2776 | skat91_sim508 MACH_SKAT91_SIM508 SKAT91_SIM508 2789 | ||
2777 | skat91_s3e MACH_SKAT91_S3E SKAT91_S3E 2790 | ||
2778 | omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791 | ||
2779 | df7220 MACH_DF7220 DF7220 2792 | ||
2780 | nemini MACH_NEMINI NEMINI 2793 | ||
2781 | t8200 MACH_T8200 T8200 2794 | ||
2782 | apf51 MACH_APF51 APF51 2795 | ||
2783 | dr_rc_unit MACH_DR_RC_UNIT DR_RC_UNIT 2796 | ||
2784 | bordeaux MACH_BORDEAUX BORDEAUX 2797 | ||
2785 | catania_b MACH_CATANIA_B CATANIA_B 2798 | ||
2786 | mx51_ocean MACH_MX51_OCEAN MX51_OCEAN 2799 | ||
2787 | ti8168evm MACH_TI8168EVM TI8168EVM 2800 | ||
2788 | neocoreomap MACH_NEOCOREOMAP NEOCOREOMAP 2801 | ||
2789 | withings_wbp MACH_WITHINGS_WBP WITHINGS_WBP 2802 | ||
2790 | dbps MACH_DBPS DBPS 2803 | ||
2791 | sbc9261 MACH_SBC9261 SBC9261 2804 | ||
2792 | pcbfp0001 MACH_PCBFP0001 PCBFP0001 2805 | ||
2793 | speedy MACH_SPEEDY SPEEDY 2806 | ||
2794 | chrysaor MACH_CHRYSAOR CHRYSAOR 2807 | ||
2795 | tango MACH_TANGO TANGO 2808 | ||
2796 | synology_dsx11 MACH_SYNOLOGY_DSX11 SYNOLOGY_DSX11 2809 | ||
2797 | hanlin_v3ext MACH_HANLIN_V3EXT HANLIN_V3EXT 2810 | ||
2798 | hanlin_v5 MACH_HANLIN_V5 HANLIN_V5 2811 | ||
2799 | hanlin_v3plus MACH_HANLIN_V3PLUS HANLIN_V3PLUS 2812 | ||
2800 | iriver_story MACH_IRIVER_STORY IRIVER_STORY 2813 | ||
2801 | irex_iliad MACH_IREX_ILIAD IREX_ILIAD 2814 | ||
2802 | irex_dr1000 MACH_IREX_DR1000 IREX_DR1000 2815 | ||
2803 | teton_bga MACH_TETON_BGA TETON_BGA 2816 | ||
2804 | snapper9g45 MACH_SNAPPER9G45 SNAPPER9G45 2817 | ||
2805 | tam3517 MACH_TAM3517 TAM3517 2818 | ||
2806 | pdc100 MACH_PDC100 PDC100 2819 | ||
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index b131c27ddf57..bbce6a1c6bb6 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 20 | #define ATOMIC_INIT(i) { (i) } |
21 | 21 | ||
22 | #define atomic_read(v) ((v)->counter) | 22 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
23 | #define atomic_set(v, i) (((v)->counter) = i) | 23 | #define atomic_set(v, i) (((v)->counter) = i) |
24 | 24 | ||
25 | /* | 25 | /* |
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index a6aca819e9f3..88dc9b9c4ba0 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #define ATOMIC_INIT(i) { (i) } | 16 | #define ATOMIC_INIT(i) { (i) } |
17 | 17 | ||
18 | #define atomic_read(v) ((v)->counter) | 18 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
19 | #define atomic_set(v,i) (((v)->counter) = (i)) | 19 | #define atomic_set(v,i) (((v)->counter) = (i)) |
20 | 20 | ||
21 | /* These should be written in asm but we do it in C for now. */ | 21 | /* These should be written in asm but we do it in C for now. */ |
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 00a57af79afc..fae32c7fdcb6 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h | |||
@@ -36,7 +36,7 @@ | |||
36 | #define smp_mb__after_atomic_inc() barrier() | 36 | #define smp_mb__after_atomic_inc() barrier() |
37 | 37 | ||
38 | #define ATOMIC_INIT(i) { (i) } | 38 | #define ATOMIC_INIT(i) { (i) } |
39 | #define atomic_read(v) ((v)->counter) | 39 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
40 | #define atomic_set(v, i) (((v)->counter) = (i)) | 40 | #define atomic_set(v, i) (((v)->counter) = (i)) |
41 | 41 | ||
42 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS | 42 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 33c8c0fa9583..e936804b7508 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #define ATOMIC_INIT(i) { (i) } | 11 | #define ATOMIC_INIT(i) { (i) } |
12 | 12 | ||
13 | #define atomic_read(v) ((v)->counter) | 13 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
14 | #define atomic_set(v, i) (((v)->counter) = i) | 14 | #define atomic_set(v, i) (((v)->counter) = i) |
15 | 15 | ||
16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 88405cb0832a..4e1948447a00 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -21,8 +21,8 @@ | |||
21 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | 21 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) |
22 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | 22 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) |
23 | 23 | ||
24 | #define atomic_read(v) ((v)->counter) | 24 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
25 | #define atomic64_read(v) ((v)->counter) | 25 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
26 | 26 | ||
27 | #define atomic_set(v,i) (((v)->counter) = (i)) | 27 | #define atomic_set(v,i) (((v)->counter) = (i)) |
28 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 28 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 63f0cf0f50dd..d44a51e5271b 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h | |||
@@ -26,7 +26,7 @@ | |||
26 | * | 26 | * |
27 | * Atomically reads the value of @v. | 27 | * Atomically reads the value of @v. |
28 | */ | 28 | */ |
29 | #define atomic_read(v) ((v)->counter) | 29 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * atomic_set - set atomic variable | 32 | * atomic_set - set atomic variable |
diff --git a/arch/m68k/amiga/Makefile b/arch/m68k/amiga/Makefile index 6a0d7650f980..11dd30b16b3b 100644 --- a/arch/m68k/amiga/Makefile +++ b/arch/m68k/amiga/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for Linux arch/m68k/amiga source directory | 2 | # Makefile for Linux arch/m68k/amiga source directory |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := config.o amiints.o cia.o chipram.o amisound.o | 5 | obj-y := config.o amiints.o cia.o chipram.o amisound.o platform.o |
6 | 6 | ||
7 | obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o | 7 | obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o |
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c new file mode 100644 index 000000000000..38f18bf14737 --- /dev/null +++ b/arch/m68k/amiga/platform.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2009 Geert Uytterhoeven | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file COPYING in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/zorro.h> | ||
12 | |||
13 | #include <asm/amigahw.h> | ||
14 | |||
15 | |||
16 | #ifdef CONFIG_ZORRO | ||
17 | |||
18 | static const struct resource zorro_resources[] __initconst = { | ||
19 | /* Zorro II regions (on Zorro II/III) */ | ||
20 | { | ||
21 | .name = "Zorro II exp", | ||
22 | .start = 0x00e80000, | ||
23 | .end = 0x00efffff, | ||
24 | .flags = IORESOURCE_MEM, | ||
25 | }, { | ||
26 | .name = "Zorro II mem", | ||
27 | .start = 0x00200000, | ||
28 | .end = 0x009fffff, | ||
29 | .flags = IORESOURCE_MEM, | ||
30 | }, | ||
31 | /* Zorro III regions (on Zorro III only) */ | ||
32 | { | ||
33 | .name = "Zorro III exp", | ||
34 | .start = 0xff000000, | ||
35 | .end = 0xffffffff, | ||
36 | .flags = IORESOURCE_MEM, | ||
37 | }, { | ||
38 | .name = "Zorro III cfg", | ||
39 | .start = 0x40000000, | ||
40 | .end = 0x7fffffff, | ||
41 | .flags = IORESOURCE_MEM, | ||
42 | } | ||
43 | }; | ||
44 | |||
45 | |||
46 | static int __init amiga_init_bus(void) | ||
47 | { | ||
48 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) | ||
49 | return -ENODEV; | ||
50 | |||
51 | platform_device_register_simple("amiga-zorro", -1, zorro_resources, | ||
52 | AMIGAHW_PRESENT(ZORRO3) ? 4 : 2); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | subsys_initcall(amiga_init_bus); | ||
57 | |||
58 | #endif /* CONFIG_ZORRO */ | ||
59 | |||
60 | |||
61 | static int __init amiga_init_devices(void) | ||
62 | { | ||
63 | if (!MACH_IS_AMIGA) | ||
64 | return -ENODEV; | ||
65 | |||
66 | /* video hardware */ | ||
67 | if (AMIGAHW_PRESENT(AMI_VIDEO)) | ||
68 | platform_device_register_simple("amiga-video", -1, NULL, 0); | ||
69 | |||
70 | |||
71 | /* sound hardware */ | ||
72 | if (AMIGAHW_PRESENT(AMI_AUDIO)) | ||
73 | platform_device_register_simple("amiga-audio", -1, NULL, 0); | ||
74 | |||
75 | |||
76 | /* storage interfaces */ | ||
77 | if (AMIGAHW_PRESENT(AMI_FLOPPY)) | ||
78 | platform_device_register_simple("amiga-floppy", -1, NULL, 0); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | device_initcall(amiga_init_devices); | ||
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index b46ea1714a89..cb8617bb194b 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/miscdevice.h> | 11 | #include <linux/miscdevice.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
14 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
15 | #include <linux/fcntl.h> | 14 | #include <linux/fcntl.h> |
@@ -35,10 +34,9 @@ | |||
35 | static unsigned char days_in_mo[] = | 34 | static unsigned char days_in_mo[] = |
36 | {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; | 35 | {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; |
37 | 36 | ||
38 | static char rtc_status; | 37 | static atomic_t rtc_status = ATOMIC_INIT(1); |
39 | 38 | ||
40 | static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | 39 | static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
41 | unsigned long arg) | ||
42 | { | 40 | { |
43 | volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; | 41 | volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; |
44 | unsigned char msr; | 42 | unsigned char msr; |
@@ -132,29 +130,20 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
132 | } | 130 | } |
133 | 131 | ||
134 | /* | 132 | /* |
135 | * We enforce only one user at a time here with the open/close. | 133 | * We enforce only one user at a time here with the open/close. |
136 | * Also clear the previous interrupt data on an open, and clean | ||
137 | * up things on a close. | ||
138 | */ | 134 | */ |
139 | |||
140 | static int rtc_open(struct inode *inode, struct file *file) | 135 | static int rtc_open(struct inode *inode, struct file *file) |
141 | { | 136 | { |
142 | lock_kernel(); | 137 | if (!atomic_dec_and_test(&rtc_status)) { |
143 | if(rtc_status) { | 138 | atomic_inc(&rtc_status); |
144 | unlock_kernel(); | ||
145 | return -EBUSY; | 139 | return -EBUSY; |
146 | } | 140 | } |
147 | |||
148 | rtc_status = 1; | ||
149 | unlock_kernel(); | ||
150 | return 0; | 141 | return 0; |
151 | } | 142 | } |
152 | 143 | ||
153 | static int rtc_release(struct inode *inode, struct file *file) | 144 | static int rtc_release(struct inode *inode, struct file *file) |
154 | { | 145 | { |
155 | lock_kernel(); | 146 | atomic_inc(&rtc_status); |
156 | rtc_status = 0; | ||
157 | unlock_kernel(); | ||
158 | return 0; | 147 | return 0; |
159 | } | 148 | } |
160 | 149 | ||
@@ -163,9 +152,9 @@ static int rtc_release(struct inode *inode, struct file *file) | |||
163 | */ | 152 | */ |
164 | 153 | ||
165 | static const struct file_operations rtc_fops = { | 154 | static const struct file_operations rtc_fops = { |
166 | .ioctl = rtc_ioctl, | 155 | .unlocked_ioctl = rtc_ioctl, |
167 | .open = rtc_open, | 156 | .open = rtc_open, |
168 | .release = rtc_release, | 157 | .release = rtc_release, |
169 | }; | 158 | }; |
170 | 159 | ||
171 | static struct miscdevice rtc_dev = { | 160 | static struct miscdevice rtc_dev = { |
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h index f5b3d098b0f5..7b98242960de 100644 --- a/arch/m68k/hp300/time.h +++ b/arch/m68k/hp300/time.h | |||
@@ -1,4 +1,2 @@ | |||
1 | extern void hp300_sched_init(irq_handler_t vector); | 1 | extern void hp300_sched_init(irq_handler_t vector); |
2 | extern unsigned long hp300_gettimeoffset (void); | 2 | extern unsigned long hp300_gettimeoffset(void); |
3 | |||
4 | |||
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index d9d2ed647435..6a223b3f7e74 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #define ATOMIC_INIT(i) { (i) } | 16 | #define ATOMIC_INIT(i) { (i) } |
17 | 17 | ||
18 | #define atomic_read(v) ((v)->counter) | 18 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
19 | #define atomic_set(v, i) (((v)->counter) = i) | 19 | #define atomic_set(v, i) (((v)->counter) = i) |
20 | 20 | ||
21 | static inline void atomic_add(int i, atomic_t *v) | 21 | static inline void atomic_add(int i, atomic_t *v) |
diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h index 5674cb9449bd..289310c63a8a 100644 --- a/arch/m68k/include/asm/atomic_no.h +++ b/arch/m68k/include/asm/atomic_no.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #define ATOMIC_INIT(i) { (i) } | 16 | #define ATOMIC_INIT(i) { (i) } |
17 | 17 | ||
18 | #define atomic_read(v) ((v)->counter) | 18 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
19 | #define atomic_set(v, i) (((v)->counter) = i) | 19 | #define atomic_set(v, i) (((v)->counter) = i) |
20 | 20 | ||
21 | static __inline__ void atomic_add(int i, atomic_t *v) | 21 | static __inline__ void atomic_add(int i, atomic_t *v) |
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h index 9bde784e7bad..b4ecdaada520 100644 --- a/arch/m68k/include/asm/bitops_mm.h +++ b/arch/m68k/include/asm/bitops_mm.h | |||
@@ -365,6 +365,10 @@ static inline int minix_test_bit(int nr, const void *vaddr) | |||
365 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 365 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
366 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 366 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
367 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 367 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
368 | #define ext2_find_next_zero_bit(addr, size, offset) \ | ||
369 | generic_find_next_zero_le_bit((unsigned long *)addr, size, offset) | ||
370 | #define ext2_find_next_bit(addr, size, offset) \ | ||
371 | generic_find_next_le_bit((unsigned long *)addr, size, offset) | ||
368 | 372 | ||
369 | static inline int ext2_test_bit(int nr, const void *vaddr) | 373 | static inline int ext2_test_bit(int nr, const void *vaddr) |
370 | { | 374 | { |
@@ -394,10 +398,9 @@ static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size) | |||
394 | return (p - addr) * 32 + res; | 398 | return (p - addr) * 32 + res; |
395 | } | 399 | } |
396 | 400 | ||
397 | static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, | 401 | static inline unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, |
398 | unsigned offset) | 402 | unsigned long size, unsigned long offset) |
399 | { | 403 | { |
400 | const unsigned long *addr = vaddr; | ||
401 | const unsigned long *p = addr + (offset >> 5); | 404 | const unsigned long *p = addr + (offset >> 5); |
402 | int bit = offset & 31UL, res; | 405 | int bit = offset & 31UL, res; |
403 | 406 | ||
@@ -437,10 +440,9 @@ static inline int ext2_find_first_bit(const void *vaddr, unsigned size) | |||
437 | return (p - addr) * 32 + res; | 440 | return (p - addr) * 32 + res; |
438 | } | 441 | } |
439 | 442 | ||
440 | static inline int ext2_find_next_bit(const void *vaddr, unsigned size, | 443 | static inline unsigned long generic_find_next_le_bit(const unsigned long *addr, |
441 | unsigned offset) | 444 | unsigned long size, unsigned long offset) |
442 | { | 445 | { |
443 | const unsigned long *addr = vaddr; | ||
444 | const unsigned long *p = addr + (offset >> 5); | 446 | const unsigned long *p = addr + (offset >> 5); |
445 | int bit = offset & 31UL, res; | 447 | int bit = offset & 31UL, res; |
446 | 448 | ||
diff --git a/arch/m68k/include/asm/param.h b/arch/m68k/include/asm/param.h index 85c41b75aa78..36265ccf5c7b 100644 --- a/arch/m68k/include/asm/param.h +++ b/arch/m68k/include/asm/param.h | |||
@@ -1,26 +1,12 @@ | |||
1 | #ifndef _M68K_PARAM_H | 1 | #ifndef _M68K_PARAM_H |
2 | #define _M68K_PARAM_H | 2 | #define _M68K_PARAM_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #ifdef __uClinux__ | 4 | #ifdef __uClinux__ |
15 | #define EXEC_PAGESIZE 4096 | 5 | #define EXEC_PAGESIZE 4096 |
16 | #else | 6 | #else |
17 | #define EXEC_PAGESIZE 8192 | 7 | #define EXEC_PAGESIZE 8192 |
18 | #endif | 8 | #endif |
19 | 9 | ||
20 | #ifndef NOGROUP | 10 | #include <asm-generic/param.h> |
21 | #define NOGROUP (-1) | ||
22 | #endif | ||
23 | |||
24 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
25 | 11 | ||
26 | #endif /* _M68K_PARAM_H */ | 12 | #endif /* _M68K_PARAM_H */ |
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index aacd6d17b833..ada4f4cca811 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c | |||
@@ -455,7 +455,7 @@ static inline void access_error040(struct frame *fp) | |||
455 | 455 | ||
456 | if (do_page_fault(&fp->ptregs, addr, errorcode)) { | 456 | if (do_page_fault(&fp->ptregs, addr, errorcode)) { |
457 | #ifdef DEBUG | 457 | #ifdef DEBUG |
458 | printk("do_page_fault() !=0 \n"); | 458 | printk("do_page_fault() !=0\n"); |
459 | #endif | 459 | #endif |
460 | if (user_mode(&fp->ptregs)){ | 460 | if (user_mode(&fp->ptregs)){ |
461 | /* delay writebacks after signal delivery */ | 461 | /* delay writebacks after signal delivery */ |
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 0356da9bf763..1c16b1baf8db 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c | |||
@@ -148,7 +148,7 @@ static void mac_cache_card_flush(int writeback) | |||
148 | void __init config_mac(void) | 148 | void __init config_mac(void) |
149 | { | 149 | { |
150 | if (!MACH_IS_MAC) | 150 | if (!MACH_IS_MAC) |
151 | printk(KERN_ERR "ERROR: no Mac, but config_mac() called!! \n"); | 151 | printk(KERN_ERR "ERROR: no Mac, but config_mac() called!!\n"); |
152 | 152 | ||
153 | mach_sched_init = mac_sched_init; | 153 | mach_sched_init = mac_sched_init; |
154 | mach_init_IRQ = mac_init_IRQ; | 154 | mach_init_IRQ = mac_init_IRQ; |
@@ -867,7 +867,7 @@ static void __init mac_identify(void) | |||
867 | */ | 867 | */ |
868 | iop_preinit(); | 868 | iop_preinit(); |
869 | 869 | ||
870 | printk(KERN_INFO "Detected Macintosh model: %d \n", model); | 870 | printk(KERN_INFO "Detected Macintosh model: %d\n", model); |
871 | 871 | ||
872 | /* | 872 | /* |
873 | * Report booter data: | 873 | * Report booter data: |
@@ -878,12 +878,12 @@ static void __init mac_identify(void) | |||
878 | mac_bi_data.videoaddr, mac_bi_data.videorow, | 878 | mac_bi_data.videoaddr, mac_bi_data.videorow, |
879 | mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF, | 879 | mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF, |
880 | mac_bi_data.dimensions >> 16); | 880 | mac_bi_data.dimensions >> 16); |
881 | printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx \n", | 881 | printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx\n", |
882 | mac_bi_data.videological, mac_orig_videoaddr, | 882 | mac_bi_data.videological, mac_orig_videoaddr, |
883 | mac_bi_data.sccbase); | 883 | mac_bi_data.sccbase); |
884 | printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx \n", | 884 | printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx\n", |
885 | mac_bi_data.boottime, mac_bi_data.gmtbias); | 885 | mac_bi_data.boottime, mac_bi_data.gmtbias); |
886 | printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n", | 886 | printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx\n", |
887 | mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize); | 887 | mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize); |
888 | 888 | ||
889 | iop_init(); | 889 | iop_init(); |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index d0e35cf99fc6..a96394a0333d 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -154,7 +154,6 @@ good_area: | |||
154 | * the fault. | 154 | * the fault. |
155 | */ | 155 | */ |
156 | 156 | ||
157 | survive: | ||
158 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 157 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
159 | #ifdef DEBUG | 158 | #ifdef DEBUG |
160 | printk("handle_mm_fault returns %d\n",fault); | 159 | printk("handle_mm_fault returns %d\n",fault); |
@@ -180,15 +179,10 @@ good_area: | |||
180 | */ | 179 | */ |
181 | out_of_memory: | 180 | out_of_memory: |
182 | up_read(&mm->mmap_sem); | 181 | up_read(&mm->mmap_sem); |
183 | if (is_global_init(current)) { | 182 | if (!user_mode(regs)) |
184 | yield(); | 183 | goto no_context; |
185 | down_read(&mm->mmap_sem); | 184 | pagefault_out_of_memory(); |
186 | goto survive; | 185 | return 0; |
187 | } | ||
188 | |||
189 | printk("VM: killing process %s\n", current->comm); | ||
190 | if (user_mode(regs)) | ||
191 | do_group_exit(SIGKILL); | ||
192 | 186 | ||
193 | no_context: | 187 | no_context: |
194 | current->thread.signo = SIGBUS; | 188 | current->thread.signo = SIGBUS; |
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c index 8da9c250d3e1..11ac6f63967a 100644 --- a/arch/m68k/mvme16x/rtc.c +++ b/arch/m68k/mvme16x/rtc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/miscdevice.h> | 11 | #include <linux/miscdevice.h> |
12 | #include <linux/smp_lock.h> | ||
13 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
14 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
15 | #include <linux/fcntl.h> | 14 | #include <linux/fcntl.h> |
@@ -36,8 +35,7 @@ static const unsigned char days_in_mo[] = | |||
36 | 35 | ||
37 | static atomic_t rtc_ready = ATOMIC_INIT(1); | 36 | static atomic_t rtc_ready = ATOMIC_INIT(1); |
38 | 37 | ||
39 | static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | 38 | static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
40 | unsigned long arg) | ||
41 | { | 39 | { |
42 | volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE; | 40 | volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE; |
43 | unsigned long flags; | 41 | unsigned long flags; |
@@ -120,22 +118,15 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
120 | } | 118 | } |
121 | 119 | ||
122 | /* | 120 | /* |
123 | * We enforce only one user at a time here with the open/close. | 121 | * We enforce only one user at a time here with the open/close. |
124 | * Also clear the previous interrupt data on an open, and clean | ||
125 | * up things on a close. | ||
126 | */ | 122 | */ |
127 | |||
128 | static int rtc_open(struct inode *inode, struct file *file) | 123 | static int rtc_open(struct inode *inode, struct file *file) |
129 | { | 124 | { |
130 | lock_kernel(); | ||
131 | if( !atomic_dec_and_test(&rtc_ready) ) | 125 | if( !atomic_dec_and_test(&rtc_ready) ) |
132 | { | 126 | { |
133 | atomic_inc( &rtc_ready ); | 127 | atomic_inc( &rtc_ready ); |
134 | unlock_kernel(); | ||
135 | return -EBUSY; | 128 | return -EBUSY; |
136 | } | 129 | } |
137 | unlock_kernel(); | ||
138 | |||
139 | return 0; | 130 | return 0; |
140 | } | 131 | } |
141 | 132 | ||
@@ -150,9 +141,9 @@ static int rtc_release(struct inode *inode, struct file *file) | |||
150 | */ | 141 | */ |
151 | 142 | ||
152 | static const struct file_operations rtc_fops = { | 143 | static const struct file_operations rtc_fops = { |
153 | .ioctl = rtc_ioctl, | 144 | .unlocked_ioctl = rtc_ioctl, |
154 | .open = rtc_open, | 145 | .open = rtc_open, |
155 | .release = rtc_release, | 146 | .release = rtc_release, |
156 | }; | 147 | }; |
157 | 148 | ||
158 | static struct miscdevice rtc_dev= | 149 | static struct miscdevice rtc_dev= |
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 31ab3f08bbda..ad10fecec2fe 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c | |||
@@ -126,7 +126,7 @@ static void q40_reset(void) | |||
126 | { | 126 | { |
127 | halted = 1; | 127 | halted = 1; |
128 | printk("\n\n*******************************************\n" | 128 | printk("\n\n*******************************************\n" |
129 | "Called q40_reset : press the RESET button!! \n" | 129 | "Called q40_reset : press the RESET button!!\n" |
130 | "*******************************************\n"); | 130 | "*******************************************\n"); |
131 | Q40_LED_ON(); | 131 | Q40_LED_ON(); |
132 | while (1) | 132 | while (1) |
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index 6fced1fe3bf0..3c91cf6192c6 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.33-rc6 | 3 | # Linux kernel version: 2.6.34-rc6 |
4 | # Wed Feb 3 10:02:59 2010 | 4 | # Thu May 6 11:22:14 2010 |
5 | # | 5 | # |
6 | CONFIG_MICROBLAZE=y | 6 | CONFIG_MICROBLAZE=y |
7 | # CONFIG_SWAP is not set | 7 | # CONFIG_SWAP is not set |
@@ -22,8 +22,6 @@ CONFIG_GENERIC_CSUM=y | |||
22 | CONFIG_STACKTRACE_SUPPORT=y | 22 | CONFIG_STACKTRACE_SUPPORT=y |
23 | CONFIG_LOCKDEP_SUPPORT=y | 23 | CONFIG_LOCKDEP_SUPPORT=y |
24 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 24 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
25 | # CONFIG_PCI is not set | ||
26 | CONFIG_NO_DMA=y | ||
27 | CONFIG_DTC=y | 25 | CONFIG_DTC=y |
28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 26 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
29 | CONFIG_CONSTRUCTORS=y | 27 | CONFIG_CONSTRUCTORS=y |
@@ -56,7 +54,6 @@ CONFIG_RCU_FANOUT=32 | |||
56 | CONFIG_IKCONFIG=y | 54 | CONFIG_IKCONFIG=y |
57 | CONFIG_IKCONFIG_PROC=y | 55 | CONFIG_IKCONFIG_PROC=y |
58 | CONFIG_LOG_BUF_SHIFT=17 | 56 | CONFIG_LOG_BUF_SHIFT=17 |
59 | # CONFIG_GROUP_SCHED is not set | ||
60 | # CONFIG_CGROUPS is not set | 57 | # CONFIG_CGROUPS is not set |
61 | CONFIG_SYSFS_DEPRECATED=y | 58 | CONFIG_SYSFS_DEPRECATED=y |
62 | CONFIG_SYSFS_DEPRECATED_V2=y | 59 | CONFIG_SYSFS_DEPRECATED_V2=y |
@@ -106,6 +103,8 @@ CONFIG_SLAB=y | |||
106 | # CONFIG_SLOB is not set | 103 | # CONFIG_SLOB is not set |
107 | # CONFIG_PROFILING is not set | 104 | # CONFIG_PROFILING is not set |
108 | CONFIG_HAVE_OPROFILE=y | 105 | CONFIG_HAVE_OPROFILE=y |
106 | CONFIG_HAVE_DMA_ATTRS=y | ||
107 | CONFIG_HAVE_DMA_API_DEBUG=y | ||
109 | 108 | ||
110 | # | 109 | # |
111 | # GCOV-based kernel profiling | 110 | # GCOV-based kernel profiling |
@@ -245,13 +244,20 @@ CONFIG_BINFMT_ELF=y | |||
245 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 244 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
246 | # CONFIG_HAVE_AOUT is not set | 245 | # CONFIG_HAVE_AOUT is not set |
247 | # CONFIG_BINFMT_MISC is not set | 246 | # CONFIG_BINFMT_MISC is not set |
247 | |||
248 | # | ||
249 | # Bus Options | ||
250 | # | ||
251 | # CONFIG_PCI is not set | ||
252 | # CONFIG_PCI_DOMAINS is not set | ||
253 | # CONFIG_PCI_SYSCALL is not set | ||
254 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
248 | CONFIG_NET=y | 255 | CONFIG_NET=y |
249 | 256 | ||
250 | # | 257 | # |
251 | # Networking options | 258 | # Networking options |
252 | # | 259 | # |
253 | CONFIG_PACKET=y | 260 | CONFIG_PACKET=y |
254 | # CONFIG_PACKET_MMAP is not set | ||
255 | CONFIG_UNIX=y | 261 | CONFIG_UNIX=y |
256 | CONFIG_XFRM=y | 262 | CONFIG_XFRM=y |
257 | # CONFIG_XFRM_USER is not set | 263 | # CONFIG_XFRM_USER is not set |
@@ -341,7 +347,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y | |||
341 | # CONFIG_SYS_HYPERVISOR is not set | 347 | # CONFIG_SYS_HYPERVISOR is not set |
342 | # CONFIG_CONNECTOR is not set | 348 | # CONFIG_CONNECTOR is not set |
343 | # CONFIG_MTD is not set | 349 | # CONFIG_MTD is not set |
350 | CONFIG_OF_FLATTREE=y | ||
344 | CONFIG_OF_DEVICE=y | 351 | CONFIG_OF_DEVICE=y |
352 | CONFIG_OF_MDIO=y | ||
345 | # CONFIG_PARPORT is not set | 353 | # CONFIG_PARPORT is not set |
346 | CONFIG_BLK_DEV=y | 354 | CONFIG_BLK_DEV=y |
347 | # CONFIG_BLK_DEV_COW_COMMON is not set | 355 | # CONFIG_BLK_DEV_COW_COMMON is not set |
@@ -370,6 +378,7 @@ CONFIG_MISC_DEVICES=y | |||
370 | # | 378 | # |
371 | # SCSI device support | 379 | # SCSI device support |
372 | # | 380 | # |
381 | CONFIG_SCSI_MOD=y | ||
373 | # CONFIG_RAID_ATTRS is not set | 382 | # CONFIG_RAID_ATTRS is not set |
374 | # CONFIG_SCSI is not set | 383 | # CONFIG_SCSI is not set |
375 | # CONFIG_SCSI_DMA is not set | 384 | # CONFIG_SCSI_DMA is not set |
@@ -383,9 +392,30 @@ CONFIG_NETDEVICES=y | |||
383 | # CONFIG_EQUALIZER is not set | 392 | # CONFIG_EQUALIZER is not set |
384 | # CONFIG_TUN is not set | 393 | # CONFIG_TUN is not set |
385 | # CONFIG_VETH is not set | 394 | # CONFIG_VETH is not set |
386 | # CONFIG_PHYLIB is not set | 395 | CONFIG_PHYLIB=y |
396 | |||
397 | # | ||
398 | # MII PHY device drivers | ||
399 | # | ||
400 | # CONFIG_MARVELL_PHY is not set | ||
401 | # CONFIG_DAVICOM_PHY is not set | ||
402 | # CONFIG_QSEMI_PHY is not set | ||
403 | # CONFIG_LXT_PHY is not set | ||
404 | # CONFIG_CICADA_PHY is not set | ||
405 | # CONFIG_VITESSE_PHY is not set | ||
406 | # CONFIG_SMSC_PHY is not set | ||
407 | # CONFIG_BROADCOM_PHY is not set | ||
408 | # CONFIG_ICPLUS_PHY is not set | ||
409 | # CONFIG_REALTEK_PHY is not set | ||
410 | # CONFIG_NATIONAL_PHY is not set | ||
411 | # CONFIG_STE10XP is not set | ||
412 | # CONFIG_LSI_ET1011C_PHY is not set | ||
413 | # CONFIG_MICREL_PHY is not set | ||
414 | # CONFIG_FIXED_PHY is not set | ||
415 | # CONFIG_MDIO_BITBANG is not set | ||
387 | CONFIG_NET_ETHERNET=y | 416 | CONFIG_NET_ETHERNET=y |
388 | # CONFIG_MII is not set | 417 | # CONFIG_MII is not set |
418 | # CONFIG_ETHOC is not set | ||
389 | # CONFIG_DNET is not set | 419 | # CONFIG_DNET is not set |
390 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | 420 | # CONFIG_IBM_NEW_EMAC_ZMII is not set |
391 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 421 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
@@ -394,6 +424,7 @@ CONFIG_NET_ETHERNET=y | |||
394 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | 424 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set |
395 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 425 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
396 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 426 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
427 | # CONFIG_B44 is not set | ||
397 | # CONFIG_KS8842 is not set | 428 | # CONFIG_KS8842 is not set |
398 | # CONFIG_KS8851_MLL is not set | 429 | # CONFIG_KS8851_MLL is not set |
399 | CONFIG_XILINX_EMACLITE=y | 430 | CONFIG_XILINX_EMACLITE=y |
@@ -444,6 +475,7 @@ CONFIG_SERIAL_UARTLITE=y | |||
444 | CONFIG_SERIAL_UARTLITE_CONSOLE=y | 475 | CONFIG_SERIAL_UARTLITE_CONSOLE=y |
445 | CONFIG_SERIAL_CORE=y | 476 | CONFIG_SERIAL_CORE=y |
446 | CONFIG_SERIAL_CORE_CONSOLE=y | 477 | CONFIG_SERIAL_CORE_CONSOLE=y |
478 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
447 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set | 479 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set |
448 | CONFIG_UNIX98_PTYS=y | 480 | CONFIG_UNIX98_PTYS=y |
449 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 481 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
@@ -471,6 +503,12 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |||
471 | # CONFIG_HWMON is not set | 503 | # CONFIG_HWMON is not set |
472 | # CONFIG_THERMAL is not set | 504 | # CONFIG_THERMAL is not set |
473 | # CONFIG_WATCHDOG is not set | 505 | # CONFIG_WATCHDOG is not set |
506 | CONFIG_SSB_POSSIBLE=y | ||
507 | |||
508 | # | ||
509 | # Sonics Silicon Backplane | ||
510 | # | ||
511 | # CONFIG_SSB is not set | ||
474 | 512 | ||
475 | # | 513 | # |
476 | # Multifunction device drivers | 514 | # Multifunction device drivers |
@@ -502,6 +540,7 @@ CONFIG_USB_ARCH_HAS_EHCI=y | |||
502 | # CONFIG_NEW_LEDS is not set | 540 | # CONFIG_NEW_LEDS is not set |
503 | # CONFIG_ACCESSIBILITY is not set | 541 | # CONFIG_ACCESSIBILITY is not set |
504 | # CONFIG_RTC_CLASS is not set | 542 | # CONFIG_RTC_CLASS is not set |
543 | # CONFIG_DMADEVICES is not set | ||
505 | # CONFIG_AUXDISPLAY is not set | 544 | # CONFIG_AUXDISPLAY is not set |
506 | # CONFIG_UIO is not set | 545 | # CONFIG_UIO is not set |
507 | 546 | ||
@@ -572,6 +611,7 @@ CONFIG_MISC_FILESYSTEMS=y | |||
572 | # CONFIG_BEFS_FS is not set | 611 | # CONFIG_BEFS_FS is not set |
573 | # CONFIG_BFS_FS is not set | 612 | # CONFIG_BFS_FS is not set |
574 | # CONFIG_EFS_FS is not set | 613 | # CONFIG_EFS_FS is not set |
614 | # CONFIG_LOGFS is not set | ||
575 | # CONFIG_CRAMFS is not set | 615 | # CONFIG_CRAMFS is not set |
576 | # CONFIG_SQUASHFS is not set | 616 | # CONFIG_SQUASHFS is not set |
577 | # CONFIG_VXFS_FS is not set | 617 | # CONFIG_VXFS_FS is not set |
@@ -595,6 +635,7 @@ CONFIG_SUNRPC=y | |||
595 | # CONFIG_RPCSEC_GSS_KRB5 is not set | 635 | # CONFIG_RPCSEC_GSS_KRB5 is not set |
596 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 636 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
597 | # CONFIG_SMB_FS is not set | 637 | # CONFIG_SMB_FS is not set |
638 | # CONFIG_CEPH_FS is not set | ||
598 | CONFIG_CIFS=y | 639 | CONFIG_CIFS=y |
599 | CONFIG_CIFS_STATS=y | 640 | CONFIG_CIFS_STATS=y |
600 | CONFIG_CIFS_STATS2=y | 641 | CONFIG_CIFS_STATS2=y |
@@ -696,6 +737,7 @@ CONFIG_SCHED_DEBUG=y | |||
696 | # CONFIG_DEBUG_OBJECTS is not set | 737 | # CONFIG_DEBUG_OBJECTS is not set |
697 | CONFIG_DEBUG_SLAB=y | 738 | CONFIG_DEBUG_SLAB=y |
698 | # CONFIG_DEBUG_SLAB_LEAK is not set | 739 | # CONFIG_DEBUG_SLAB_LEAK is not set |
740 | # CONFIG_DEBUG_KMEMLEAK is not set | ||
699 | CONFIG_DEBUG_SPINLOCK=y | 741 | CONFIG_DEBUG_SPINLOCK=y |
700 | # CONFIG_DEBUG_MUTEXES is not set | 742 | # CONFIG_DEBUG_MUTEXES is not set |
701 | # CONFIG_DEBUG_LOCK_ALLOC is not set | 743 | # CONFIG_DEBUG_LOCK_ALLOC is not set |
@@ -741,6 +783,7 @@ CONFIG_BRANCH_PROFILE_NONE=y | |||
741 | # CONFIG_KMEMTRACE is not set | 783 | # CONFIG_KMEMTRACE is not set |
742 | # CONFIG_WORKQUEUE_TRACER is not set | 784 | # CONFIG_WORKQUEUE_TRACER is not set |
743 | # CONFIG_BLK_DEV_IO_TRACE is not set | 785 | # CONFIG_BLK_DEV_IO_TRACE is not set |
786 | # CONFIG_DMA_API_DEBUG is not set | ||
744 | # CONFIG_SAMPLES is not set | 787 | # CONFIG_SAMPLES is not set |
745 | CONFIG_EARLY_PRINTK=y | 788 | CONFIG_EARLY_PRINTK=y |
746 | # CONFIG_HEART_BEAT is not set | 789 | # CONFIG_HEART_BEAT is not set |
@@ -862,5 +905,6 @@ CONFIG_ZLIB_INFLATE=y | |||
862 | CONFIG_DECOMPRESS_GZIP=y | 905 | CONFIG_DECOMPRESS_GZIP=y |
863 | CONFIG_HAS_IOMEM=y | 906 | CONFIG_HAS_IOMEM=y |
864 | CONFIG_HAS_IOPORT=y | 907 | CONFIG_HAS_IOPORT=y |
908 | CONFIG_HAS_DMA=y | ||
865 | CONFIG_HAVE_LMB=y | 909 | CONFIG_HAVE_LMB=y |
866 | CONFIG_NLATTR=y | 910 | CONFIG_NLATTR=y |
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig index ce2da535246a..dd3a494257f4 100644 --- a/arch/microblaze/configs/nommu_defconfig +++ b/arch/microblaze/configs/nommu_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.33-rc6 | 3 | # Linux kernel version: 2.6.34-rc6 |
4 | # Wed Feb 3 10:03:21 2010 | 4 | # Thu May 6 11:25:12 2010 |
5 | # | 5 | # |
6 | CONFIG_MICROBLAZE=y | 6 | CONFIG_MICROBLAZE=y |
7 | # CONFIG_SWAP is not set | 7 | # CONFIG_SWAP is not set |
@@ -22,8 +22,6 @@ CONFIG_GENERIC_CSUM=y | |||
22 | CONFIG_STACKTRACE_SUPPORT=y | 22 | CONFIG_STACKTRACE_SUPPORT=y |
23 | CONFIG_LOCKDEP_SUPPORT=y | 23 | CONFIG_LOCKDEP_SUPPORT=y |
24 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 24 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
25 | # CONFIG_PCI is not set | ||
26 | CONFIG_NO_DMA=y | ||
27 | CONFIG_DTC=y | 25 | CONFIG_DTC=y |
28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 26 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
29 | CONFIG_CONSTRUCTORS=y | 27 | CONFIG_CONSTRUCTORS=y |
@@ -58,7 +56,6 @@ CONFIG_RCU_FANOUT=32 | |||
58 | CONFIG_IKCONFIG=y | 56 | CONFIG_IKCONFIG=y |
59 | CONFIG_IKCONFIG_PROC=y | 57 | CONFIG_IKCONFIG_PROC=y |
60 | CONFIG_LOG_BUF_SHIFT=17 | 58 | CONFIG_LOG_BUF_SHIFT=17 |
61 | # CONFIG_GROUP_SCHED is not set | ||
62 | # CONFIG_CGROUPS is not set | 59 | # CONFIG_CGROUPS is not set |
63 | CONFIG_SYSFS_DEPRECATED=y | 60 | CONFIG_SYSFS_DEPRECATED=y |
64 | CONFIG_SYSFS_DEPRECATED_V2=y | 61 | CONFIG_SYSFS_DEPRECATED_V2=y |
@@ -96,6 +93,8 @@ CONFIG_SLAB=y | |||
96 | # CONFIG_MMAP_ALLOW_UNINITIALIZED is not set | 93 | # CONFIG_MMAP_ALLOW_UNINITIALIZED is not set |
97 | # CONFIG_PROFILING is not set | 94 | # CONFIG_PROFILING is not set |
98 | CONFIG_HAVE_OPROFILE=y | 95 | CONFIG_HAVE_OPROFILE=y |
96 | CONFIG_HAVE_DMA_ATTRS=y | ||
97 | CONFIG_HAVE_DMA_API_DEBUG=y | ||
99 | 98 | ||
100 | # | 99 | # |
101 | # GCOV-based kernel profiling | 100 | # GCOV-based kernel profiling |
@@ -209,11 +208,14 @@ CONFIG_PROC_DEVICETREE=y | |||
209 | # | 208 | # |
210 | # Advanced setup | 209 | # Advanced setup |
211 | # | 210 | # |
211 | # CONFIG_ADVANCED_OPTIONS is not set | ||
212 | 212 | ||
213 | # | 213 | # |
214 | # Default settings for advanced configuration options are used | 214 | # Default settings for advanced configuration options are used |
215 | # | 215 | # |
216 | CONFIG_LOWMEM_SIZE=0x30000000 | ||
216 | CONFIG_KERNEL_START=0x90000000 | 217 | CONFIG_KERNEL_START=0x90000000 |
218 | CONFIG_TASK_SIZE=0x80000000 | ||
217 | CONFIG_SELECT_MEMORY_MODEL=y | 219 | CONFIG_SELECT_MEMORY_MODEL=y |
218 | CONFIG_FLATMEM_MANUAL=y | 220 | CONFIG_FLATMEM_MANUAL=y |
219 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 221 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
@@ -235,13 +237,20 @@ CONFIG_BINFMT_FLAT=y | |||
235 | # CONFIG_BINFMT_SHARED_FLAT is not set | 237 | # CONFIG_BINFMT_SHARED_FLAT is not set |
236 | # CONFIG_HAVE_AOUT is not set | 238 | # CONFIG_HAVE_AOUT is not set |
237 | # CONFIG_BINFMT_MISC is not set | 239 | # CONFIG_BINFMT_MISC is not set |
240 | |||
241 | # | ||
242 | # Bus Options | ||
243 | # | ||
244 | # CONFIG_PCI is not set | ||
245 | # CONFIG_PCI_DOMAINS is not set | ||
246 | # CONFIG_PCI_SYSCALL is not set | ||
247 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
238 | CONFIG_NET=y | 248 | CONFIG_NET=y |
239 | 249 | ||
240 | # | 250 | # |
241 | # Networking options | 251 | # Networking options |
242 | # | 252 | # |
243 | CONFIG_PACKET=y | 253 | CONFIG_PACKET=y |
244 | # CONFIG_PACKET_MMAP is not set | ||
245 | CONFIG_UNIX=y | 254 | CONFIG_UNIX=y |
246 | CONFIG_XFRM=y | 255 | CONFIG_XFRM=y |
247 | # CONFIG_XFRM_USER is not set | 256 | # CONFIG_XFRM_USER is not set |
@@ -413,6 +422,7 @@ CONFIG_MTD_UCLINUX=y | |||
413 | # UBI - Unsorted block images | 422 | # UBI - Unsorted block images |
414 | # | 423 | # |
415 | # CONFIG_MTD_UBI is not set | 424 | # CONFIG_MTD_UBI is not set |
425 | CONFIG_OF_FLATTREE=y | ||
416 | CONFIG_OF_DEVICE=y | 426 | CONFIG_OF_DEVICE=y |
417 | # CONFIG_PARPORT is not set | 427 | # CONFIG_PARPORT is not set |
418 | CONFIG_BLK_DEV=y | 428 | CONFIG_BLK_DEV=y |
@@ -442,6 +452,7 @@ CONFIG_MISC_DEVICES=y | |||
442 | # | 452 | # |
443 | # SCSI device support | 453 | # SCSI device support |
444 | # | 454 | # |
455 | CONFIG_SCSI_MOD=y | ||
445 | # CONFIG_RAID_ATTRS is not set | 456 | # CONFIG_RAID_ATTRS is not set |
446 | # CONFIG_SCSI is not set | 457 | # CONFIG_SCSI is not set |
447 | # CONFIG_SCSI_DMA is not set | 458 | # CONFIG_SCSI_DMA is not set |
@@ -458,6 +469,7 @@ CONFIG_NETDEVICES=y | |||
458 | # CONFIG_PHYLIB is not set | 469 | # CONFIG_PHYLIB is not set |
459 | CONFIG_NET_ETHERNET=y | 470 | CONFIG_NET_ETHERNET=y |
460 | # CONFIG_MII is not set | 471 | # CONFIG_MII is not set |
472 | # CONFIG_ETHOC is not set | ||
461 | # CONFIG_DNET is not set | 473 | # CONFIG_DNET is not set |
462 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | 474 | # CONFIG_IBM_NEW_EMAC_ZMII is not set |
463 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 475 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
@@ -466,6 +478,7 @@ CONFIG_NET_ETHERNET=y | |||
466 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | 478 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set |
467 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 479 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
468 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 480 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
481 | # CONFIG_B44 is not set | ||
469 | # CONFIG_KS8842 is not set | 482 | # CONFIG_KS8842 is not set |
470 | # CONFIG_KS8851_MLL is not set | 483 | # CONFIG_KS8851_MLL is not set |
471 | # CONFIG_XILINX_EMACLITE is not set | 484 | # CONFIG_XILINX_EMACLITE is not set |
@@ -516,6 +529,7 @@ CONFIG_SERIAL_UARTLITE=y | |||
516 | CONFIG_SERIAL_UARTLITE_CONSOLE=y | 529 | CONFIG_SERIAL_UARTLITE_CONSOLE=y |
517 | CONFIG_SERIAL_CORE=y | 530 | CONFIG_SERIAL_CORE=y |
518 | CONFIG_SERIAL_CORE_CONSOLE=y | 531 | CONFIG_SERIAL_CORE_CONSOLE=y |
532 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
519 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set | 533 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set |
520 | CONFIG_UNIX98_PTYS=y | 534 | CONFIG_UNIX98_PTYS=y |
521 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 535 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
@@ -544,6 +558,12 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |||
544 | # CONFIG_HWMON is not set | 558 | # CONFIG_HWMON is not set |
545 | # CONFIG_THERMAL is not set | 559 | # CONFIG_THERMAL is not set |
546 | # CONFIG_WATCHDOG is not set | 560 | # CONFIG_WATCHDOG is not set |
561 | CONFIG_SSB_POSSIBLE=y | ||
562 | |||
563 | # | ||
564 | # Sonics Silicon Backplane | ||
565 | # | ||
566 | # CONFIG_SSB is not set | ||
547 | 567 | ||
548 | # | 568 | # |
549 | # Multifunction device drivers | 569 | # Multifunction device drivers |
@@ -593,6 +613,7 @@ CONFIG_USB_ARCH_HAS_EHCI=y | |||
593 | # CONFIG_NEW_LEDS is not set | 613 | # CONFIG_NEW_LEDS is not set |
594 | # CONFIG_ACCESSIBILITY is not set | 614 | # CONFIG_ACCESSIBILITY is not set |
595 | # CONFIG_RTC_CLASS is not set | 615 | # CONFIG_RTC_CLASS is not set |
616 | # CONFIG_DMADEVICES is not set | ||
596 | # CONFIG_AUXDISPLAY is not set | 617 | # CONFIG_AUXDISPLAY is not set |
597 | # CONFIG_UIO is not set | 618 | # CONFIG_UIO is not set |
598 | 619 | ||
@@ -661,6 +682,7 @@ CONFIG_MISC_FILESYSTEMS=y | |||
661 | # CONFIG_BFS_FS is not set | 682 | # CONFIG_BFS_FS is not set |
662 | # CONFIG_EFS_FS is not set | 683 | # CONFIG_EFS_FS is not set |
663 | # CONFIG_JFFS2_FS is not set | 684 | # CONFIG_JFFS2_FS is not set |
685 | # CONFIG_LOGFS is not set | ||
664 | CONFIG_CRAMFS=y | 686 | CONFIG_CRAMFS=y |
665 | # CONFIG_SQUASHFS is not set | 687 | # CONFIG_SQUASHFS is not set |
666 | # CONFIG_VXFS_FS is not set | 688 | # CONFIG_VXFS_FS is not set |
@@ -689,6 +711,7 @@ CONFIG_SUNRPC=y | |||
689 | # CONFIG_RPCSEC_GSS_KRB5 is not set | 711 | # CONFIG_RPCSEC_GSS_KRB5 is not set |
690 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 712 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
691 | # CONFIG_SMB_FS is not set | 713 | # CONFIG_SMB_FS is not set |
714 | # CONFIG_CEPH_FS is not set | ||
692 | # CONFIG_CIFS is not set | 715 | # CONFIG_CIFS is not set |
693 | # CONFIG_NCP_FS is not set | 716 | # CONFIG_NCP_FS is not set |
694 | # CONFIG_CODA_FS is not set | 717 | # CONFIG_CODA_FS is not set |
@@ -733,6 +756,7 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y | |||
733 | # CONFIG_DEBUG_OBJECTS_WORK is not set | 756 | # CONFIG_DEBUG_OBJECTS_WORK is not set |
734 | CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 | 757 | CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 |
735 | # CONFIG_DEBUG_SLAB is not set | 758 | # CONFIG_DEBUG_SLAB is not set |
759 | # CONFIG_DEBUG_KMEMLEAK is not set | ||
736 | # CONFIG_DEBUG_RT_MUTEXES is not set | 760 | # CONFIG_DEBUG_RT_MUTEXES is not set |
737 | # CONFIG_RT_MUTEX_TESTER is not set | 761 | # CONFIG_RT_MUTEX_TESTER is not set |
738 | # CONFIG_DEBUG_SPINLOCK is not set | 762 | # CONFIG_DEBUG_SPINLOCK is not set |
@@ -758,6 +782,7 @@ CONFIG_DEBUG_SG=y | |||
758 | # CONFIG_BACKTRACE_SELF_TEST is not set | 782 | # CONFIG_BACKTRACE_SELF_TEST is not set |
759 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | 783 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set |
760 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | 784 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set |
785 | # CONFIG_LKDTM is not set | ||
761 | # CONFIG_FAULT_INJECTION is not set | 786 | # CONFIG_FAULT_INJECTION is not set |
762 | # CONFIG_LATENCYTOP is not set | 787 | # CONFIG_LATENCYTOP is not set |
763 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 788 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
@@ -782,6 +807,7 @@ CONFIG_BRANCH_PROFILE_NONE=y | |||
782 | # CONFIG_WORKQUEUE_TRACER is not set | 807 | # CONFIG_WORKQUEUE_TRACER is not set |
783 | # CONFIG_BLK_DEV_IO_TRACE is not set | 808 | # CONFIG_BLK_DEV_IO_TRACE is not set |
784 | # CONFIG_DYNAMIC_DEBUG is not set | 809 | # CONFIG_DYNAMIC_DEBUG is not set |
810 | # CONFIG_DMA_API_DEBUG is not set | ||
785 | # CONFIG_SAMPLES is not set | 811 | # CONFIG_SAMPLES is not set |
786 | CONFIG_EARLY_PRINTK=y | 812 | CONFIG_EARLY_PRINTK=y |
787 | # CONFIG_HEART_BEAT is not set | 813 | # CONFIG_HEART_BEAT is not set |
@@ -901,5 +927,6 @@ CONFIG_GENERIC_FIND_LAST_BIT=y | |||
901 | CONFIG_ZLIB_INFLATE=y | 927 | CONFIG_ZLIB_INFLATE=y |
902 | CONFIG_HAS_IOMEM=y | 928 | CONFIG_HAS_IOMEM=y |
903 | CONFIG_HAS_IOPORT=y | 929 | CONFIG_HAS_IOPORT=y |
930 | CONFIG_HAS_DMA=y | ||
904 | CONFIG_HAVE_LMB=y | 931 | CONFIG_HAVE_LMB=y |
905 | CONFIG_NLATTR=y | 932 | CONFIG_NLATTR=y |
diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h index e52210891d78..4efe96a036f7 100644 --- a/arch/microblaze/include/asm/cache.h +++ b/arch/microblaze/include/asm/cache.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/registers.h> | 16 | #include <asm/registers.h> |
17 | 17 | ||
18 | #define L1_CACHE_SHIFT 2 | 18 | #define L1_CACHE_SHIFT 5 |
19 | /* word-granular cache in microblaze */ | 19 | /* word-granular cache in microblaze */ |
20 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 20 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
21 | 21 | ||
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h index 08c073badf19..0d73d0c6de37 100644 --- a/arch/microblaze/include/asm/dma.h +++ b/arch/microblaze/include/asm/dma.h | |||
@@ -18,4 +18,10 @@ | |||
18 | #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) | 18 | #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #ifdef CONFIG_PCI | ||
22 | extern int isa_dma_bridge_buggy; | ||
23 | #else | ||
24 | #define isa_dma_bridge_buggy (0) | ||
25 | #endif | ||
26 | |||
21 | #endif /* _ASM_MICROBLAZE_DMA_H */ | 27 | #endif /* _ASM_MICROBLAZE_DMA_H */ |
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h index 90731df9e574..4c7b5d037c88 100644 --- a/arch/microblaze/include/asm/exceptions.h +++ b/arch/microblaze/include/asm/exceptions.h | |||
@@ -64,12 +64,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
64 | void die(const char *str, struct pt_regs *fp, long err); | 64 | void die(const char *str, struct pt_regs *fp, long err); |
65 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); | 65 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); |
66 | 66 | ||
67 | #ifdef CONFIG_MMU | ||
68 | void __bug(const char *file, int line, void *data); | ||
69 | int bad_trap(int trap_num, struct pt_regs *regs); | ||
70 | int debug_trap(struct pt_regs *regs); | ||
71 | #endif /* CONFIG_MMU */ | ||
72 | |||
73 | #if defined(CONFIG_KGDB) | 67 | #if defined(CONFIG_KGDB) |
74 | void (*debugger)(struct pt_regs *regs); | 68 | void (*debugger)(struct pt_regs *regs); |
75 | int (*debugger_bpt)(struct pt_regs *regs); | 69 | int (*debugger_bpt)(struct pt_regs *regs); |
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index e45a6eea92e0..00b5398d08c7 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h | |||
@@ -139,8 +139,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) | |||
139 | 139 | ||
140 | #ifdef CONFIG_MMU | 140 | #ifdef CONFIG_MMU |
141 | 141 | ||
142 | #define mm_ptov(addr) ((void *)__phys_to_virt(addr)) | ||
143 | #define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr)) | ||
144 | #define phys_to_virt(addr) ((void *)__phys_to_virt(addr)) | 142 | #define phys_to_virt(addr) ((void *)__phys_to_virt(addr)) |
145 | #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) | 143 | #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) |
146 | #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) | 144 | #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) |
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 2dd1d04129e0..de493f86d28f 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -31,6 +31,9 @@ | |||
31 | 31 | ||
32 | #ifndef __ASSEMBLY__ | 32 | #ifndef __ASSEMBLY__ |
33 | 33 | ||
34 | /* MS be sure that SLAB allocates aligned objects */ | ||
35 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | ||
36 | |||
34 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) | 37 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) |
35 | #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) | 38 | #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) |
36 | 39 | ||
@@ -70,14 +73,7 @@ typedef unsigned long pte_basic_t; | |||
70 | 73 | ||
71 | #endif /* CONFIG_MMU */ | 74 | #endif /* CONFIG_MMU */ |
72 | 75 | ||
73 | # ifndef CONFIG_MMU | 76 | # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) |
74 | # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | ||
75 | # define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | ||
76 | # define free_user_page(page, addr) free_page(addr) | ||
77 | # else /* CONFIG_MMU */ | ||
78 | extern void copy_page(void *to, void *from); | ||
79 | # endif /* CONFIG_MMU */ | ||
80 | |||
81 | # define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) | 77 | # define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) |
82 | 78 | ||
83 | # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) | 79 | # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) |
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index bdd65aaee30d..5a388eeeb28f 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h | |||
@@ -94,14 +94,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
94 | 94 | ||
95 | #define HAVE_PCI_LEGACY 1 | 95 | #define HAVE_PCI_LEGACY 1 |
96 | 96 | ||
97 | /* pci_unmap_{page,single} is a nop so... */ | ||
98 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | ||
99 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | ||
100 | #define pci_unmap_addr(PTR, ADDR_NAME) (0) | ||
101 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | ||
102 | #define pci_unmap_len(PTR, LEN_NAME) (0) | ||
103 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | ||
104 | |||
105 | /* The PCI address space does equal the physical memory | 97 | /* The PCI address space does equal the physical memory |
106 | * address space (no IOMMU). The IDE and SCSI device layers use | 98 | * address space (no IOMMU). The IDE and SCSI device layers use |
107 | * this boolean for bounce buffer decisions. | 99 | * this boolean for bounce buffer decisions. |
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index f44b0d696fe2..c614a893f8a3 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h | |||
@@ -108,21 +108,7 @@ extern inline void free_pgd_slow(pgd_t *pgd) | |||
108 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) | 108 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) |
109 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | 109 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) |
110 | 110 | ||
111 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 111 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); |
112 | unsigned long address) | ||
113 | { | ||
114 | pte_t *pte; | ||
115 | extern void *early_get_page(void); | ||
116 | if (mem_init_done) { | ||
117 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | ||
118 | __GFP_REPEAT | __GFP_ZERO); | ||
119 | } else { | ||
120 | pte = (pte_t *)early_get_page(); | ||
121 | if (pte) | ||
122 | clear_page(pte); | ||
123 | } | ||
124 | return pte; | ||
125 | } | ||
126 | 112 | ||
127 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 113 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
128 | unsigned long address) | 114 | unsigned long address) |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index dd2bb60651c7..ca2d92871545 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -512,15 +512,6 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) | |||
512 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 512 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
513 | 513 | ||
514 | /* | 514 | /* |
515 | * When flushing the tlb entry for a page, we also need to flush the hash | ||
516 | * table entry. flush_hash_page is assembler (for speed) in hashtable.S. | ||
517 | */ | ||
518 | extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
519 | |||
520 | /* Add an HPTE to the hash table */ | ||
521 | extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
522 | |||
523 | /* | ||
524 | * Encode and decode a swap entry. | 515 | * Encode and decode a swap entry. |
525 | * Note that the bits we use in a PTE for representing a swap entry | 516 | * Note that the bits we use in a PTE for representing a swap entry |
526 | * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit | 517 | * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit |
@@ -533,15 +524,7 @@ extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); | |||
533 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) | 524 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) |
534 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) | 525 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) |
535 | 526 | ||
536 | |||
537 | /* CONFIG_APUS */ | ||
538 | /* For virtual address to physical address conversion */ | ||
539 | extern void cache_clear(__u32 addr, int length); | ||
540 | extern void cache_push(__u32 addr, int length); | ||
541 | extern int mm_end_of_chunk(unsigned long addr, int len); | ||
542 | extern unsigned long iopa(unsigned long addr); | 527 | extern unsigned long iopa(unsigned long addr); |
543 | /* extern unsigned long mm_ptov(unsigned long addr) \ | ||
544 | __attribute__ ((const)); TBD */ | ||
545 | 528 | ||
546 | /* Values for nocacheflag and cmode */ | 529 | /* Values for nocacheflag and cmode */ |
547 | /* These are not used by the APUS kernel_map, but prevents | 530 | /* These are not used by the APUS kernel_map, but prevents |
@@ -552,18 +535,6 @@ extern unsigned long iopa(unsigned long addr); | |||
552 | #define IOMAP_NOCACHE_NONSER 2 | 535 | #define IOMAP_NOCACHE_NONSER 2 |
553 | #define IOMAP_NO_COPYBACK 3 | 536 | #define IOMAP_NO_COPYBACK 3 |
554 | 537 | ||
555 | /* | ||
556 | * Map some physical address range into the kernel address space. | ||
557 | */ | ||
558 | extern unsigned long kernel_map(unsigned long paddr, unsigned long size, | ||
559 | int nocacheflag, unsigned long *memavailp); | ||
560 | |||
561 | /* | ||
562 | * Set cache mode of (kernel space) address range. | ||
563 | */ | ||
564 | extern void kernel_set_cachemode(unsigned long address, unsigned long size, | ||
565 | unsigned int cmode); | ||
566 | |||
567 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | 538 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
568 | #define kern_addr_valid(addr) (1) | 539 | #define kern_addr_valid(addr) (1) |
569 | 540 | ||
@@ -577,10 +548,6 @@ extern void kernel_set_cachemode(unsigned long address, unsigned long size, | |||
577 | void do_page_fault(struct pt_regs *regs, unsigned long address, | 548 | void do_page_fault(struct pt_regs *regs, unsigned long address, |
578 | unsigned long error_code); | 549 | unsigned long error_code); |
579 | 550 | ||
580 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
581 | unsigned int size, int flags); | ||
582 | |||
583 | void __init adjust_total_lowmem(void); | ||
584 | void mapin_ram(void); | 551 | void mapin_ram(void); |
585 | int map_page(unsigned long va, phys_addr_t pa, int flags); | 552 | int map_page(unsigned long va, phys_addr_t pa, int flags); |
586 | 553 | ||
@@ -601,7 +568,7 @@ void __init *early_get_page(void); | |||
601 | extern unsigned long ioremap_bot, ioremap_base; | 568 | extern unsigned long ioremap_bot, ioremap_base; |
602 | 569 | ||
603 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); | 570 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); |
604 | void consistent_free(void *vaddr); | 571 | void consistent_free(size_t size, void *vaddr); |
605 | void consistent_sync(void *vaddr, size_t size, int direction); | 572 | void consistent_sync(void *vaddr, size_t size, int direction); |
606 | void consistent_sync_page(struct page *page, unsigned long offset, | 573 | void consistent_sync_page(struct page *page, unsigned long offset, |
607 | size_t size, int direction); | 574 | size_t size, int direction); |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 446bec29b142..26460d15b338 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -182,6 +182,39 @@ extern long __user_bad(void); | |||
182 | * Returns zero on success, or -EFAULT on error. | 182 | * Returns zero on success, or -EFAULT on error. |
183 | * On error, the variable @x is set to zero. | 183 | * On error, the variable @x is set to zero. |
184 | */ | 184 | */ |
185 | #define get_user(x, ptr) \ | ||
186 | __get_user_check((x), (ptr), sizeof(*(ptr))) | ||
187 | |||
188 | #define __get_user_check(x, ptr, size) \ | ||
189 | ({ \ | ||
190 | unsigned long __gu_val = 0; \ | ||
191 | const typeof(*(ptr)) __user *__gu_addr = (ptr); \ | ||
192 | int __gu_err = 0; \ | ||
193 | \ | ||
194 | if (access_ok(VERIFY_READ, __gu_addr, size)) { \ | ||
195 | switch (size) { \ | ||
196 | case 1: \ | ||
197 | __get_user_asm("lbu", __gu_addr, __gu_val, \ | ||
198 | __gu_err); \ | ||
199 | break; \ | ||
200 | case 2: \ | ||
201 | __get_user_asm("lhu", __gu_addr, __gu_val, \ | ||
202 | __gu_err); \ | ||
203 | break; \ | ||
204 | case 4: \ | ||
205 | __get_user_asm("lw", __gu_addr, __gu_val, \ | ||
206 | __gu_err); \ | ||
207 | break; \ | ||
208 | default: \ | ||
209 | __gu_err = __user_bad(); \ | ||
210 | break; \ | ||
211 | } \ | ||
212 | } else { \ | ||
213 | __gu_err = -EFAULT; \ | ||
214 | } \ | ||
215 | x = (typeof(*(ptr)))__gu_val; \ | ||
216 | __gu_err; \ | ||
217 | }) | ||
185 | 218 | ||
186 | #define __get_user(x, ptr) \ | 219 | #define __get_user(x, ptr) \ |
187 | ({ \ | 220 | ({ \ |
@@ -206,12 +239,6 @@ extern long __user_bad(void); | |||
206 | }) | 239 | }) |
207 | 240 | ||
208 | 241 | ||
209 | #define get_user(x, ptr) \ | ||
210 | ({ \ | ||
211 | access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ | ||
212 | ? __get_user((x), (ptr)) : -EFAULT; \ | ||
213 | }) | ||
214 | |||
215 | #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | 242 | #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ |
216 | ({ \ | 243 | ({ \ |
217 | __asm__ __volatile__ ( \ | 244 | __asm__ __volatile__ ( \ |
@@ -266,6 +293,42 @@ extern long __user_bad(void); | |||
266 | * | 293 | * |
267 | * Returns zero on success, or -EFAULT on error. | 294 | * Returns zero on success, or -EFAULT on error. |
268 | */ | 295 | */ |
296 | #define put_user(x, ptr) \ | ||
297 | __put_user_check((x), (ptr), sizeof(*(ptr))) | ||
298 | |||
299 | #define __put_user_check(x, ptr, size) \ | ||
300 | ({ \ | ||
301 | typeof(*(ptr)) __pu_val; \ | ||
302 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ | ||
303 | int __pu_err = 0; \ | ||
304 | \ | ||
305 | __pu_val = (x); \ | ||
306 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \ | ||
307 | switch (size) { \ | ||
308 | case 1: \ | ||
309 | __put_user_asm("sb", __pu_addr, __pu_val, \ | ||
310 | __pu_err); \ | ||
311 | break; \ | ||
312 | case 2: \ | ||
313 | __put_user_asm("sh", __pu_addr, __pu_val, \ | ||
314 | __pu_err); \ | ||
315 | break; \ | ||
316 | case 4: \ | ||
317 | __put_user_asm("sw", __pu_addr, __pu_val, \ | ||
318 | __pu_err); \ | ||
319 | break; \ | ||
320 | case 8: \ | ||
321 | __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\ | ||
322 | break; \ | ||
323 | default: \ | ||
324 | __pu_err = __user_bad(); \ | ||
325 | break; \ | ||
326 | } \ | ||
327 | } else { \ | ||
328 | __pu_err = -EFAULT; \ | ||
329 | } \ | ||
330 | __pu_err; \ | ||
331 | }) | ||
269 | 332 | ||
270 | #define __put_user(x, ptr) \ | 333 | #define __put_user(x, ptr) \ |
271 | ({ \ | 334 | ({ \ |
@@ -290,18 +353,6 @@ extern long __user_bad(void); | |||
290 | __gu_err; \ | 353 | __gu_err; \ |
291 | }) | 354 | }) |
292 | 355 | ||
293 | #ifndef CONFIG_MMU | ||
294 | |||
295 | #define put_user(x, ptr) __put_user((x), (ptr)) | ||
296 | |||
297 | #else /* CONFIG_MMU */ | ||
298 | |||
299 | #define put_user(x, ptr) \ | ||
300 | ({ \ | ||
301 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ | ||
302 | ? __put_user((x), (ptr)) : -EFAULT; \ | ||
303 | }) | ||
304 | #endif /* CONFIG_MMU */ | ||
305 | 356 | ||
306 | /* copy_to_from_user */ | 357 | /* copy_to_from_user */ |
307 | #define __copy_from_user(to, from, n) \ | 358 | #define __copy_from_user(to, from, n) \ |
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 0071260a672c..c1b459c97571 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
17 | #include <linux/thread_info.h> | 17 | #include <linux/thread_info.h> |
18 | #include <linux/kbuild.h> | 18 | #include <linux/kbuild.h> |
19 | #include <asm/cpuinfo.h> | ||
19 | 20 | ||
20 | int main(int argc, char *argv[]) | 21 | int main(int argc, char *argv[]) |
21 | { | 22 | { |
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index f04d8a86dead..109876e8d643 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c | |||
@@ -96,13 +96,16 @@ static inline void __disable_dcache_nomsr(void) | |||
96 | } | 96 | } |
97 | 97 | ||
98 | 98 | ||
99 | /* Helper macro for computing the limits of cache range loops */ | 99 | /* Helper macro for computing the limits of cache range loops |
100 | * | ||
101 | * End address can be unaligned which is OK for C implementation. | ||
102 | * ASM implementation align it in ASM macros | ||
103 | */ | ||
100 | #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ | 104 | #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ |
101 | do { \ | 105 | do { \ |
102 | int align = ~(cache_line_length - 1); \ | 106 | int align = ~(cache_line_length - 1); \ |
103 | end = min(start + cache_size, end); \ | 107 | end = min(start + cache_size, end); \ |
104 | start &= align; \ | 108 | start &= align; \ |
105 | end = ((end & align) + cache_line_length); \ | ||
106 | } while (0); | 109 | } while (0); |
107 | 110 | ||
108 | /* | 111 | /* |
@@ -111,9 +114,9 @@ do { \ | |||
111 | */ | 114 | */ |
112 | #define CACHE_ALL_LOOP(cache_size, line_length, op) \ | 115 | #define CACHE_ALL_LOOP(cache_size, line_length, op) \ |
113 | do { \ | 116 | do { \ |
114 | unsigned int len = cache_size; \ | 117 | unsigned int len = cache_size - line_length; \ |
115 | int step = -line_length; \ | 118 | int step = -line_length; \ |
116 | BUG_ON(step >= 0); \ | 119 | WARN_ON(step >= 0); \ |
117 | \ | 120 | \ |
118 | __asm__ __volatile__ (" 1: " #op " %0, r0; \ | 121 | __asm__ __volatile__ (" 1: " #op " %0, r0; \ |
119 | bgtid %0, 1b; \ | 122 | bgtid %0, 1b; \ |
@@ -122,26 +125,22 @@ do { \ | |||
122 | : "memory"); \ | 125 | : "memory"); \ |
123 | } while (0); | 126 | } while (0); |
124 | 127 | ||
125 | 128 | /* Used for wdc.flush/clear which can use rB for offset which is not possible | |
126 | #define CACHE_ALL_LOOP2(cache_size, line_length, op) \ | 129 | * to use for simple wdc or wic. |
127 | do { \ | 130 | * |
128 | unsigned int len = cache_size; \ | 131 | * start address is cache aligned |
129 | int step = -line_length; \ | 132 | * end address is not aligned, if end is aligned then I have to substract |
130 | BUG_ON(step >= 0); \ | 133 | * cacheline length because I can't flush/invalidate the next cacheline. |
131 | \ | 134 | * If is not, I align it because I will flush/invalidate whole line. |
132 | __asm__ __volatile__ (" 1: " #op " r0, %0; \ | 135 | */ |
133 | bgtid %0, 1b; \ | ||
134 | addk %0, %0, %1; \ | ||
135 | " : : "r" (len), "r" (step) \ | ||
136 | : "memory"); \ | ||
137 | } while (0); | ||
138 | |||
139 | /* for wdc.flush/clear */ | ||
140 | #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ | 136 | #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ |
141 | do { \ | 137 | do { \ |
142 | int step = -line_length; \ | 138 | int step = -line_length; \ |
143 | int count = end - start; \ | 139 | int align = ~(line_length - 1); \ |
144 | BUG_ON(count <= 0); \ | 140 | int count; \ |
141 | end = ((end & align) == end) ? end - line_length : end & align; \ | ||
142 | count = end - start; \ | ||
143 | WARN_ON(count < 0); \ | ||
145 | \ | 144 | \ |
146 | __asm__ __volatile__ (" 1: " #op " %0, %1; \ | 145 | __asm__ __volatile__ (" 1: " #op " %0, %1; \ |
147 | bgtid %1, 1b; \ | 146 | bgtid %1, 1b; \ |
@@ -154,7 +153,9 @@ do { \ | |||
154 | #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ | 153 | #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ |
155 | do { \ | 154 | do { \ |
156 | int volatile temp; \ | 155 | int volatile temp; \ |
157 | BUG_ON(end - start <= 0); \ | 156 | int align = ~(line_length - 1); \ |
157 | end = ((end & align) == end) ? end - line_length : end & align; \ | ||
158 | WARN_ON(end - start < 0); \ | ||
158 | \ | 159 | \ |
159 | __asm__ __volatile__ (" 1: " #op " %1, r0; \ | 160 | __asm__ __volatile__ (" 1: " #op " %1, r0; \ |
160 | cmpu %0, %1, %2; \ | 161 | cmpu %0, %1, %2; \ |
@@ -360,8 +361,12 @@ static void __invalidate_dcache_all_noirq_wt(void) | |||
360 | #endif | 361 | #endif |
361 | } | 362 | } |
362 | 363 | ||
363 | /* FIXME this is weird - should be only wdc but not work | 364 | /* FIXME It is blindly invalidation as is expected |
364 | * MS: I am getting bus errors and other weird things */ | 365 | * but can't be called on noMMU in microblaze_cache_init below |
366 | * | ||
367 | * MS: noMMU kernel won't boot if simple wdc is used | ||
368 | * The reason should be that there are discared data which kernel needs | ||
369 | */ | ||
365 | static void __invalidate_dcache_all_wb(void) | 370 | static void __invalidate_dcache_all_wb(void) |
366 | { | 371 | { |
367 | #ifndef ASM_LOOP | 372 | #ifndef ASM_LOOP |
@@ -369,12 +374,12 @@ static void __invalidate_dcache_all_wb(void) | |||
369 | #endif | 374 | #endif |
370 | pr_debug("%s\n", __func__); | 375 | pr_debug("%s\n", __func__); |
371 | #ifdef ASM_LOOP | 376 | #ifdef ASM_LOOP |
372 | CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, | 377 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, |
373 | wdc.clear) | 378 | wdc) |
374 | #else | 379 | #else |
375 | for (i = 0; i < cpuinfo.dcache_size; | 380 | for (i = 0; i < cpuinfo.dcache_size; |
376 | i += cpuinfo.dcache_line_length) | 381 | i += cpuinfo.dcache_line_length) |
377 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ | 382 | __asm__ __volatile__ ("wdc %0, r0;" \ |
378 | : : "r" (i)); | 383 | : : "r" (i)); |
379 | #endif | 384 | #endif |
380 | } | 385 | } |
@@ -393,7 +398,7 @@ static void __invalidate_dcache_range_wb(unsigned long start, | |||
393 | #ifdef ASM_LOOP | 398 | #ifdef ASM_LOOP |
394 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); | 399 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); |
395 | #else | 400 | #else |
396 | for (i = start; i < end; i += cpuinfo.icache_line_length) | 401 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
397 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ | 402 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ |
398 | : : "r" (i)); | 403 | : : "r" (i)); |
399 | #endif | 404 | #endif |
@@ -413,7 +418,7 @@ static void __invalidate_dcache_range_nomsr_wt(unsigned long start, | |||
413 | #ifdef ASM_LOOP | 418 | #ifdef ASM_LOOP |
414 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 419 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
415 | #else | 420 | #else |
416 | for (i = start; i < end; i += cpuinfo.icache_line_length) | 421 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
417 | __asm__ __volatile__ ("wdc %0, r0;" \ | 422 | __asm__ __volatile__ ("wdc %0, r0;" \ |
418 | : : "r" (i)); | 423 | : : "r" (i)); |
419 | #endif | 424 | #endif |
@@ -437,7 +442,7 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, | |||
437 | #ifdef ASM_LOOP | 442 | #ifdef ASM_LOOP |
438 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 443 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
439 | #else | 444 | #else |
440 | for (i = start; i < end; i += cpuinfo.icache_line_length) | 445 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
441 | __asm__ __volatile__ ("wdc %0, r0;" \ | 446 | __asm__ __volatile__ ("wdc %0, r0;" \ |
442 | : : "r" (i)); | 447 | : : "r" (i)); |
443 | #endif | 448 | #endif |
@@ -465,7 +470,7 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, | |||
465 | #ifdef ASM_LOOP | 470 | #ifdef ASM_LOOP |
466 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | 471 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
467 | #else | 472 | #else |
468 | for (i = start; i < end; i += cpuinfo.icache_line_length) | 473 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
469 | __asm__ __volatile__ ("wdc %0, r0;" \ | 474 | __asm__ __volatile__ ("wdc %0, r0;" \ |
470 | : : "r" (i)); | 475 | : : "r" (i)); |
471 | #endif | 476 | #endif |
@@ -504,7 +509,7 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end) | |||
504 | #ifdef ASM_LOOP | 509 | #ifdef ASM_LOOP |
505 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); | 510 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); |
506 | #else | 511 | #else |
507 | for (i = start; i < end; i += cpuinfo.icache_line_length) | 512 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
508 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | 513 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ |
509 | : : "r" (i)); | 514 | : : "r" (i)); |
510 | #endif | 515 | #endif |
@@ -650,7 +655,11 @@ void microblaze_cache_init(void) | |||
650 | } | 655 | } |
651 | } | 656 | } |
652 | } | 657 | } |
653 | invalidate_dcache(); | 658 | /* FIXME Invalidation is done in U-BOOT |
659 | * WT cache: Data is already written to main memory | ||
660 | * WB cache: Discard data on noMMU which caused that kernel doesn't boot | ||
661 | */ | ||
662 | /* invalidate_dcache(); */ | ||
654 | enable_dcache(); | 663 | enable_dcache(); |
655 | 664 | ||
656 | invalidate_icache(); | 665 | invalidate_icache(); |
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c index 0c912b2a8e03..4216eb1eaa32 100644 --- a/arch/microblaze/kernel/cpu/mb.c +++ b/arch/microblaze/kernel/cpu/mb.c | |||
@@ -98,15 +98,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
98 | 98 | ||
99 | if (cpuinfo.use_icache) | 99 | if (cpuinfo.use_icache) |
100 | count += seq_printf(m, | 100 | count += seq_printf(m, |
101 | "Icache:\t\t%ukB\n", | 101 | "Icache:\t\t%ukB\tline length:\t%dB\n", |
102 | cpuinfo.icache_size >> 10); | 102 | cpuinfo.icache_size >> 10, |
103 | cpuinfo.icache_line_length); | ||
103 | else | 104 | else |
104 | count += seq_printf(m, "Icache:\t\tno\n"); | 105 | count += seq_printf(m, "Icache:\t\tno\n"); |
105 | 106 | ||
106 | if (cpuinfo.use_dcache) { | 107 | if (cpuinfo.use_dcache) { |
107 | count += seq_printf(m, | 108 | count += seq_printf(m, |
108 | "Dcache:\t\t%ukB\n", | 109 | "Dcache:\t\t%ukB\tline length:\t%dB\n", |
109 | cpuinfo.dcache_size >> 10); | 110 | cpuinfo.dcache_size >> 10, |
111 | cpuinfo.dcache_line_length); | ||
110 | if (cpuinfo.dcache_wb) | 112 | if (cpuinfo.dcache_wb) |
111 | count += seq_printf(m, "\t\twrite-back\n"); | 113 | count += seq_printf(m, "\t\twrite-back\n"); |
112 | else | 114 | else |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index ce72dd4967cf..9dcd90b5df55 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -74,7 +74,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, | |||
74 | void *vaddr, dma_addr_t dma_handle) | 74 | void *vaddr, dma_addr_t dma_handle) |
75 | { | 75 | { |
76 | #ifdef NOT_COHERENT_CACHE | 76 | #ifdef NOT_COHERENT_CACHE |
77 | consistent_free(vaddr); | 77 | consistent_free(size, vaddr); |
78 | #else | 78 | #else |
79 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
80 | #endif | 80 | #endif |
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index 391d6197fc3b..8cc18cd2cce6 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S | |||
@@ -476,6 +476,8 @@ ENTRY(ret_from_fork) | |||
476 | nop | 476 | nop |
477 | 477 | ||
478 | work_pending: | 478 | work_pending: |
479 | enable_irq | ||
480 | |||
479 | andi r11, r19, _TIF_NEED_RESCHED | 481 | andi r11, r19, _TIF_NEED_RESCHED |
480 | beqi r11, 1f | 482 | beqi r11, 1f |
481 | bralid r15, schedule | 483 | bralid r15, schedule |
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index d9f70f83097f..02cbdfe5aa8d 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c | |||
@@ -121,7 +121,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
121 | } | 121 | } |
122 | printk(KERN_WARNING "Divide by zero exception " \ | 122 | printk(KERN_WARNING "Divide by zero exception " \ |
123 | "in kernel mode.\n"); | 123 | "in kernel mode.\n"); |
124 | die("Divide by exception", regs, SIGBUS); | 124 | die("Divide by zero exception", regs, SIGBUS); |
125 | break; | 125 | break; |
126 | case MICROBLAZE_FPU_EXCEPTION: | 126 | case MICROBLAZE_FPU_EXCEPTION: |
127 | pr_debug(KERN_WARNING "FPU exception\n"); | 127 | pr_debug(KERN_WARNING "FPU exception\n"); |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index da6a5f5dc766..1bf739888260 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -28,6 +28,7 @@ | |||
28 | * for more details. | 28 | * for more details. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/init.h> | ||
31 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
32 | #include <asm/thread_info.h> | 33 | #include <asm/thread_info.h> |
33 | #include <asm/page.h> | 34 | #include <asm/page.h> |
@@ -49,7 +50,7 @@ swapper_pg_dir: | |||
49 | 50 | ||
50 | #endif /* CONFIG_MMU */ | 51 | #endif /* CONFIG_MMU */ |
51 | 52 | ||
52 | .text | 53 | __HEAD |
53 | ENTRY(_start) | 54 | ENTRY(_start) |
54 | #if CONFIG_KERNEL_BASE_ADDR == 0 | 55 | #if CONFIG_KERNEL_BASE_ADDR == 0 |
55 | brai TOPHYS(real_start) | 56 | brai TOPHYS(real_start) |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 6f39e2c001f3..8f120aca123d 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/ftrace.h> | ||
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/hardirq.h> | 14 | #include <linux/hardirq.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
@@ -32,7 +33,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |||
32 | 33 | ||
33 | static u32 concurrent_irq; | 34 | static u32 concurrent_irq; |
34 | 35 | ||
35 | void do_IRQ(struct pt_regs *regs) | 36 | void __irq_entry do_IRQ(struct pt_regs *regs) |
36 | { | 37 | { |
37 | unsigned int irq; | 38 | unsigned int irq; |
38 | struct pt_regs *old_regs = set_irq_regs(regs); | 39 | struct pt_regs *old_regs = set_irq_regs(regs); |
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index bc4dcb7d3861..ff85f7718035 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c | |||
@@ -52,3 +52,14 @@ EXPORT_SYMBOL_GPL(_ebss); | |||
52 | extern void _mcount(void); | 52 | extern void _mcount(void); |
53 | EXPORT_SYMBOL(_mcount); | 53 | EXPORT_SYMBOL(_mcount); |
54 | #endif | 54 | #endif |
55 | |||
56 | /* | ||
57 | * Assembly functions that may be used (directly or indirectly) by modules | ||
58 | */ | ||
59 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
60 | EXPORT_SYMBOL(__strncpy_user); | ||
61 | |||
62 | #ifdef CONFIG_OPT_LIB_ASM | ||
63 | EXPORT_SYMBOL(memcpy); | ||
64 | EXPORT_SYMBOL(memmove); | ||
65 | #endif | ||
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index 7cf86498326c..0fb5fc6c1fc2 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S | |||
@@ -93,39 +93,3 @@ early_console_reg_tlb_alloc: | |||
93 | nop | 93 | nop |
94 | 94 | ||
95 | .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc | 95 | .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc |
96 | |||
97 | /* | ||
98 | * Copy a whole page (4096 bytes). | ||
99 | */ | ||
100 | #define COPY_16_BYTES \ | ||
101 | lwi r7, r6, 0; \ | ||
102 | lwi r8, r6, 4; \ | ||
103 | lwi r9, r6, 8; \ | ||
104 | lwi r10, r6, 12; \ | ||
105 | swi r7, r5, 0; \ | ||
106 | swi r8, r5, 4; \ | ||
107 | swi r9, r5, 8; \ | ||
108 | swi r10, r5, 12 | ||
109 | |||
110 | |||
111 | /* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/ | ||
112 | #define DCACHE_LINE_BYTES (4 * 4) | ||
113 | |||
114 | .globl copy_page; | ||
115 | .type copy_page, @function | ||
116 | .align 4; | ||
117 | copy_page: | ||
118 | ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 | ||
119 | _copy_page_loop: | ||
120 | COPY_16_BYTES | ||
121 | #if DCACHE_LINE_BYTES >= 32 | ||
122 | COPY_16_BYTES | ||
123 | #endif | ||
124 | addik r6, r6, DCACHE_LINE_BYTES | ||
125 | addik r5, r5, DCACHE_LINE_BYTES | ||
126 | bneid r11, _copy_page_loop | ||
127 | addik r11, r11, -1 | ||
128 | rtsd r15, 8 | ||
129 | nop | ||
130 | |||
131 | .size copy_page, . - copy_page | ||
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index cbecf110dc30..0e73f6606547 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | 17 | ||
18 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
19 | #include <asm/cacheflush.h> | ||
19 | 20 | ||
20 | void *module_alloc(unsigned long size) | 21 | void *module_alloc(unsigned long size) |
21 | { | 22 | { |
@@ -151,6 +152,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
151 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | 152 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, |
152 | struct module *module) | 153 | struct module *module) |
153 | { | 154 | { |
155 | flush_dcache(); | ||
154 | return 0; | 156 | return 0; |
155 | } | 157 | } |
156 | 158 | ||
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 5e4570ef515c..75e49202a5ed 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c | |||
@@ -95,37 +95,3 @@ void dump_stack(void) | |||
95 | show_stack(NULL, NULL); | 95 | show_stack(NULL, NULL); |
96 | } | 96 | } |
97 | EXPORT_SYMBOL(dump_stack); | 97 | EXPORT_SYMBOL(dump_stack); |
98 | |||
99 | #ifdef CONFIG_MMU | ||
100 | void __bug(const char *file, int line, void *data) | ||
101 | { | ||
102 | if (data) | ||
103 | printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n", | ||
104 | file, line, data); | ||
105 | else | ||
106 | printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line); | ||
107 | |||
108 | machine_halt(); | ||
109 | } | ||
110 | |||
111 | int bad_trap(int trap_num, struct pt_regs *regs) | ||
112 | { | ||
113 | printk(KERN_CRIT | ||
114 | "unimplemented trap %d called at 0x%08lx, pid %d!\n", | ||
115 | trap_num, regs->pc, current->pid); | ||
116 | return -ENOSYS; | ||
117 | } | ||
118 | |||
119 | int debug_trap(struct pt_regs *regs) | ||
120 | { | ||
121 | int i; | ||
122 | printk(KERN_CRIT "debug trap\n"); | ||
123 | for (i = 0; i < 32; i++) { | ||
124 | /* printk("r%i:%08X\t",i,regs->gpr[i]); */ | ||
125 | if ((i % 4) == 3) | ||
126 | printk(KERN_CRIT "\n"); | ||
127 | } | ||
128 | printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr); | ||
129 | return -ENOSYS; | ||
130 | } | ||
131 | #endif | ||
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 5ef619aad634..db72d7124602 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
@@ -24,7 +24,8 @@ SECTIONS { | |||
24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
25 | _text = . ; | 25 | _text = . ; |
26 | _stext = . ; | 26 | _stext = . ; |
27 | *(.text .text.*) | 27 | HEAD_TEXT |
28 | TEXT_TEXT | ||
28 | *(.fixup) | 29 | *(.fixup) |
29 | EXIT_TEXT | 30 | EXIT_TEXT |
30 | EXIT_CALL | 31 | EXIT_CALL |
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index f956e24fe49c..5a59dad62bd2 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c | |||
@@ -42,11 +42,12 @@ | |||
42 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | #include <asm/cpuinfo.h> | 44 | #include <asm/cpuinfo.h> |
45 | #include <asm/tlbflush.h> | ||
45 | 46 | ||
46 | #ifndef CONFIG_MMU | 47 | #ifndef CONFIG_MMU |
47 | |||
48 | /* I have to use dcache values because I can't relate on ram size */ | 48 | /* I have to use dcache values because I can't relate on ram size */ |
49 | #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) | 49 | # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) |
50 | #endif | ||
50 | 51 | ||
51 | /* | 52 | /* |
52 | * Consistent memory allocators. Used for DMA devices that want to | 53 | * Consistent memory allocators. Used for DMA devices that want to |
@@ -60,71 +61,16 @@ | |||
60 | */ | 61 | */ |
61 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | 62 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) |
62 | { | 63 | { |
63 | struct page *page, *end, *free; | 64 | unsigned long order, vaddr; |
64 | unsigned long order; | 65 | void *ret; |
65 | void *ret, *virt; | 66 | unsigned int i, err = 0; |
66 | 67 | struct page *page, *end; | |
67 | if (in_interrupt()) | ||
68 | BUG(); | ||
69 | |||
70 | size = PAGE_ALIGN(size); | ||
71 | order = get_order(size); | ||
72 | |||
73 | page = alloc_pages(gfp, order); | ||
74 | if (!page) | ||
75 | goto no_page; | ||
76 | |||
77 | /* We could do with a page_to_phys and page_to_bus here. */ | ||
78 | virt = page_address(page); | ||
79 | ret = ioremap(virt_to_phys(virt), size); | ||
80 | if (!ret) | ||
81 | goto no_remap; | ||
82 | |||
83 | /* | ||
84 | * Here's the magic! Note if the uncached shadow is not implemented, | ||
85 | * it's up to the calling code to also test that condition and make | ||
86 | * other arranegments, such as manually flushing the cache and so on. | ||
87 | */ | ||
88 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
89 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | ||
90 | #endif | ||
91 | /* dma_handle is same as physical (shadowed) address */ | ||
92 | *dma_handle = (dma_addr_t)ret; | ||
93 | |||
94 | /* | ||
95 | * free wasted pages. We skip the first page since we know | ||
96 | * that it will have count = 1 and won't require freeing. | ||
97 | * We also mark the pages in use as reserved so that | ||
98 | * remap_page_range works. | ||
99 | */ | ||
100 | page = virt_to_page(virt); | ||
101 | free = page + (size >> PAGE_SHIFT); | ||
102 | end = page + (1 << order); | ||
103 | |||
104 | for (; page < end; page++) { | ||
105 | init_page_count(page); | ||
106 | if (page >= free) | ||
107 | __free_page(page); | ||
108 | else | ||
109 | SetPageReserved(page); | ||
110 | } | ||
111 | |||
112 | return ret; | ||
113 | no_remap: | ||
114 | __free_pages(page, order); | ||
115 | no_page: | ||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | #else | ||
120 | 68 | ||
121 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | 69 | #ifdef CONFIG_MMU |
122 | { | ||
123 | int order, err, i; | ||
124 | unsigned long page, va, flags; | ||
125 | phys_addr_t pa; | 70 | phys_addr_t pa; |
126 | struct vm_struct *area; | 71 | struct vm_struct *area; |
127 | void *ret; | 72 | unsigned long va; |
73 | #endif | ||
128 | 74 | ||
129 | if (in_interrupt()) | 75 | if (in_interrupt()) |
130 | BUG(); | 76 | BUG(); |
@@ -133,71 +79,133 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | |||
133 | size = PAGE_ALIGN(size); | 79 | size = PAGE_ALIGN(size); |
134 | order = get_order(size); | 80 | order = get_order(size); |
135 | 81 | ||
136 | page = __get_free_pages(gfp, order); | 82 | vaddr = __get_free_pages(gfp, order); |
137 | if (!page) { | 83 | if (!vaddr) |
138 | BUG(); | ||
139 | return NULL; | 84 | return NULL; |
140 | } | ||
141 | 85 | ||
142 | /* | 86 | /* |
143 | * we need to ensure that there are no cachelines in use, | 87 | * we need to ensure that there are no cachelines in use, |
144 | * or worse dirty in this area. | 88 | * or worse dirty in this area. |
145 | */ | 89 | */ |
146 | flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size); | 90 | flush_dcache_range(virt_to_phys((void *)vaddr), |
91 | virt_to_phys((void *)vaddr) + size); | ||
147 | 92 | ||
93 | #ifndef CONFIG_MMU | ||
94 | ret = (void *)vaddr; | ||
95 | /* | ||
96 | * Here's the magic! Note if the uncached shadow is not implemented, | ||
97 | * it's up to the calling code to also test that condition and make | ||
98 | * other arranegments, such as manually flushing the cache and so on. | ||
99 | */ | ||
100 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
101 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | ||
102 | # endif | ||
103 | if ((unsigned int)ret > cpuinfo.dcache_base && | ||
104 | (unsigned int)ret < cpuinfo.dcache_high) | ||
105 | printk(KERN_WARNING | ||
106 | "ERROR: Your cache coherent area is CACHED!!!\n"); | ||
107 | |||
108 | /* dma_handle is same as physical (shadowed) address */ | ||
109 | *dma_handle = (dma_addr_t)ret; | ||
110 | #else | ||
148 | /* Allocate some common virtual space to map the new pages. */ | 111 | /* Allocate some common virtual space to map the new pages. */ |
149 | area = get_vm_area(size, VM_ALLOC); | 112 | area = get_vm_area(size, VM_ALLOC); |
150 | if (area == NULL) { | 113 | if (!area) { |
151 | free_pages(page, order); | 114 | free_pages(vaddr, order); |
152 | return NULL; | 115 | return NULL; |
153 | } | 116 | } |
154 | va = (unsigned long) area->addr; | 117 | va = (unsigned long) area->addr; |
155 | ret = (void *)va; | 118 | ret = (void *)va; |
156 | 119 | ||
157 | /* This gives us the real physical address of the first page. */ | 120 | /* This gives us the real physical address of the first page. */ |
158 | *dma_handle = pa = virt_to_bus((void *)page); | 121 | *dma_handle = pa = virt_to_bus((void *)vaddr); |
159 | 122 | #endif | |
160 | /* MS: This is the whole magic - use cache inhibit pages */ | ||
161 | flags = _PAGE_KERNEL | _PAGE_NO_CACHE; | ||
162 | 123 | ||
163 | /* | 124 | /* |
164 | * Set refcount=1 on all pages in an order>0 | 125 | * free wasted pages. We skip the first page since we know |
165 | * allocation so that vfree() will actually | 126 | * that it will have count = 1 and won't require freeing. |
166 | * free all pages that were allocated. | 127 | * We also mark the pages in use as reserved so that |
128 | * remap_page_range works. | ||
167 | */ | 129 | */ |
168 | if (order > 0) { | 130 | page = virt_to_page(vaddr); |
169 | struct page *rpage = virt_to_page(page); | 131 | end = page + (1 << order); |
170 | for (i = 1; i < (1 << order); i++) | 132 | |
171 | init_page_count(rpage+i); | 133 | split_page(page, order); |
134 | |||
135 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) { | ||
136 | #ifdef CONFIG_MMU | ||
137 | /* MS: This is the whole magic - use cache inhibit pages */ | ||
138 | err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); | ||
139 | #endif | ||
140 | |||
141 | SetPageReserved(page); | ||
142 | page++; | ||
172 | } | 143 | } |
173 | 144 | ||
174 | err = 0; | 145 | /* Free the otherwise unused pages. */ |
175 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | 146 | while (page < end) { |
176 | err = map_page(va+i, pa+i, flags); | 147 | __free_page(page); |
148 | page++; | ||
149 | } | ||
177 | 150 | ||
178 | if (err) { | 151 | if (err) { |
179 | vfree((void *)va); | 152 | free_pages(vaddr, order); |
180 | return NULL; | 153 | return NULL; |
181 | } | 154 | } |
182 | 155 | ||
183 | return ret; | 156 | return ret; |
184 | } | 157 | } |
185 | #endif /* CONFIG_MMU */ | ||
186 | EXPORT_SYMBOL(consistent_alloc); | 158 | EXPORT_SYMBOL(consistent_alloc); |
187 | 159 | ||
188 | /* | 160 | /* |
189 | * free page(s) as defined by the above mapping. | 161 | * free page(s) as defined by the above mapping. |
190 | */ | 162 | */ |
191 | void consistent_free(void *vaddr) | 163 | void consistent_free(size_t size, void *vaddr) |
192 | { | 164 | { |
165 | struct page *page; | ||
166 | |||
193 | if (in_interrupt()) | 167 | if (in_interrupt()) |
194 | BUG(); | 168 | BUG(); |
195 | 169 | ||
170 | size = PAGE_ALIGN(size); | ||
171 | |||
172 | #ifndef CONFIG_MMU | ||
196 | /* Clear SHADOW_MASK bit in address, and free as per usual */ | 173 | /* Clear SHADOW_MASK bit in address, and free as per usual */ |
197 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | 174 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW |
198 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); | 175 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); |
176 | # endif | ||
177 | page = virt_to_page(vaddr); | ||
178 | |||
179 | do { | ||
180 | ClearPageReserved(page); | ||
181 | __free_page(page); | ||
182 | page++; | ||
183 | } while (size -= PAGE_SIZE); | ||
184 | #else | ||
185 | do { | ||
186 | pte_t *ptep; | ||
187 | unsigned long pfn; | ||
188 | |||
189 | ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( | ||
190 | (unsigned int)vaddr), | ||
191 | (unsigned int)vaddr), | ||
192 | (unsigned int)vaddr); | ||
193 | if (!pte_none(*ptep) && pte_present(*ptep)) { | ||
194 | pfn = pte_pfn(*ptep); | ||
195 | pte_clear(&init_mm, (unsigned int)vaddr, ptep); | ||
196 | if (pfn_valid(pfn)) { | ||
197 | page = pfn_to_page(pfn); | ||
198 | |||
199 | ClearPageReserved(page); | ||
200 | __free_page(page); | ||
201 | } | ||
202 | } | ||
203 | vaddr += PAGE_SIZE; | ||
204 | } while (size -= PAGE_SIZE); | ||
205 | |||
206 | /* flush tlb */ | ||
207 | flush_tlb_all(); | ||
199 | #endif | 208 | #endif |
200 | vfree(vaddr); | ||
201 | } | 209 | } |
202 | EXPORT_SYMBOL(consistent_free); | 210 | EXPORT_SYMBOL(consistent_free); |
203 | 211 | ||
@@ -221,7 +229,7 @@ void consistent_sync(void *vaddr, size_t size, int direction) | |||
221 | case PCI_DMA_NONE: | 229 | case PCI_DMA_NONE: |
222 | BUG(); | 230 | BUG(); |
223 | case PCI_DMA_FROMDEVICE: /* invalidate only */ | 231 | case PCI_DMA_FROMDEVICE: /* invalidate only */ |
224 | flush_dcache_range(start, end); | 232 | invalidate_dcache_range(start, end); |
225 | break; | 233 | break; |
226 | case PCI_DMA_TODEVICE: /* writeback only */ | 234 | case PCI_DMA_TODEVICE: /* writeback only */ |
227 | flush_dcache_range(start, end); | 235 | flush_dcache_range(start, end); |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 7af87f4b2c2c..bab922993185 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -273,16 +273,11 @@ bad_area_nosemaphore: | |||
273 | * us unable to handle the page fault gracefully. | 273 | * us unable to handle the page fault gracefully. |
274 | */ | 274 | */ |
275 | out_of_memory: | 275 | out_of_memory: |
276 | if (current->pid == 1) { | ||
277 | yield(); | ||
278 | down_read(&mm->mmap_sem); | ||
279 | goto survive; | ||
280 | } | ||
281 | up_read(&mm->mmap_sem); | 276 | up_read(&mm->mmap_sem); |
282 | printk(KERN_WARNING "VM: killing process %s\n", current->comm); | 277 | if (!user_mode(regs)) |
283 | if (user_mode(regs)) | 278 | bad_page_fault(regs, address, SIGKILL); |
284 | do_exit(SIGKILL); | 279 | else |
285 | bad_page_fault(regs, address, SIGKILL); | 280 | pagefault_out_of_memory(); |
286 | return; | 281 | return; |
287 | 282 | ||
288 | do_sigbus: | 283 | do_sigbus: |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index f42c2dde8b1c..cca3579d4268 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -47,6 +47,7 @@ unsigned long memory_start; | |||
47 | EXPORT_SYMBOL(memory_start); | 47 | EXPORT_SYMBOL(memory_start); |
48 | unsigned long memory_end; /* due to mm/nommu.c */ | 48 | unsigned long memory_end; /* due to mm/nommu.c */ |
49 | unsigned long memory_size; | 49 | unsigned long memory_size; |
50 | EXPORT_SYMBOL(memory_size); | ||
50 | 51 | ||
51 | /* | 52 | /* |
52 | * paging_init() sets up the page tables - in fact we've already done this. | 53 | * paging_init() sets up the page tables - in fact we've already done this. |
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index d31312cde6ea..59bf2335a4ce 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | unsigned long ioremap_base; | 43 | unsigned long ioremap_base; |
44 | unsigned long ioremap_bot; | 44 | unsigned long ioremap_bot; |
45 | EXPORT_SYMBOL(ioremap_bot); | ||
45 | 46 | ||
46 | /* The maximum lowmem defaults to 768Mb, but this can be configured to | 47 | /* The maximum lowmem defaults to 768Mb, but this can be configured to |
47 | * another value. | 48 | * another value. |
@@ -161,24 +162,6 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) | |||
161 | return err; | 162 | return err; |
162 | } | 163 | } |
163 | 164 | ||
164 | void __init adjust_total_lowmem(void) | ||
165 | { | ||
166 | /* TBD */ | ||
167 | #if 0 | ||
168 | unsigned long max_low_mem = MAX_LOW_MEM; | ||
169 | |||
170 | if (total_lowmem > max_low_mem) { | ||
171 | total_lowmem = max_low_mem; | ||
172 | #ifndef CONFIG_HIGHMEM | ||
173 | printk(KERN_INFO "Warning, memory limited to %ld Mb, use " | ||
174 | "CONFIG_HIGHMEM to reach %ld Mb\n", | ||
175 | max_low_mem >> 20, total_memory >> 20); | ||
176 | total_memory = total_lowmem; | ||
177 | #endif /* CONFIG_HIGHMEM */ | ||
178 | } | ||
179 | #endif | ||
180 | } | ||
181 | |||
182 | /* | 165 | /* |
183 | * Map in all of physical memory starting at CONFIG_KERNEL_START. | 166 | * Map in all of physical memory starting at CONFIG_KERNEL_START. |
184 | */ | 167 | */ |
@@ -206,24 +189,6 @@ void __init mapin_ram(void) | |||
206 | /* is x a power of 2? */ | 189 | /* is x a power of 2? */ |
207 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) | 190 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) |
208 | 191 | ||
209 | /* | ||
210 | * Set up a mapping for a block of I/O. | ||
211 | * virt, phys, size must all be page-aligned. | ||
212 | * This should only be called before ioremap is called. | ||
213 | */ | ||
214 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
215 | unsigned int size, int flags) | ||
216 | { | ||
217 | int i; | ||
218 | |||
219 | if (virt > CONFIG_KERNEL_START && virt < ioremap_bot) | ||
220 | ioremap_bot = ioremap_base = virt; | ||
221 | |||
222 | /* Put it in the page tables. */ | ||
223 | for (i = 0; i < size; i += PAGE_SIZE) | ||
224 | map_page(virt + i, phys + i, flags); | ||
225 | } | ||
226 | |||
227 | /* Scan the real Linux page tables and return a PTE pointer for | 192 | /* Scan the real Linux page tables and return a PTE pointer for |
228 | * a virtual address in a context. | 193 | * a virtual address in a context. |
229 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | 194 | * Returns true (1) if PTE was found, zero otherwise. The pointer to |
@@ -274,3 +239,18 @@ unsigned long iopa(unsigned long addr) | |||
274 | 239 | ||
275 | return pa; | 240 | return pa; |
276 | } | 241 | } |
242 | |||
243 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
244 | unsigned long address) | ||
245 | { | ||
246 | pte_t *pte; | ||
247 | if (mem_init_done) { | ||
248 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | ||
249 | __GFP_REPEAT | __GFP_ZERO); | ||
250 | } else { | ||
251 | pte = (pte_t *)early_get_page(); | ||
252 | if (pte) | ||
253 | clear_page(pte); | ||
254 | } | ||
255 | return pte; | ||
256 | } | ||
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 740bb32ec57e..9cb782b8e036 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -1025,7 +1025,7 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) | |||
1025 | 1025 | ||
1026 | struct pci_dev *dev = bus->self; | 1026 | struct pci_dev *dev = bus->self; |
1027 | 1027 | ||
1028 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | 1028 | pci_bus_for_each_resource(bus, res, i) { |
1029 | res = bus->resource[i]; | 1029 | res = bus->resource[i]; |
1030 | if (!res) | 1030 | if (!res) |
1031 | continue; | 1031 | continue; |
@@ -1131,21 +1131,20 @@ static int skip_isa_ioresource_align(struct pci_dev *dev) | |||
1131 | * but we want to try to avoid allocating at 0x2900-0x2bff | 1131 | * but we want to try to avoid allocating at 0x2900-0x2bff |
1132 | * which might have be mirrored at 0x0100-0x03ff.. | 1132 | * which might have be mirrored at 0x0100-0x03ff.. |
1133 | */ | 1133 | */ |
1134 | void pcibios_align_resource(void *data, struct resource *res, | 1134 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, |
1135 | resource_size_t size, resource_size_t align) | 1135 | resource_size_t size, resource_size_t align) |
1136 | { | 1136 | { |
1137 | struct pci_dev *dev = data; | 1137 | struct pci_dev *dev = data; |
1138 | resource_size_t start = res->start; | ||
1138 | 1139 | ||
1139 | if (res->flags & IORESOURCE_IO) { | 1140 | if (res->flags & IORESOURCE_IO) { |
1140 | resource_size_t start = res->start; | ||
1141 | |||
1142 | if (skip_isa_ioresource_align(dev)) | 1141 | if (skip_isa_ioresource_align(dev)) |
1143 | return; | 1142 | return start; |
1144 | if (start & 0x300) { | 1143 | if (start & 0x300) |
1145 | start = (start + 0x3ff) & ~0x3ff; | 1144 | start = (start + 0x3ff) & ~0x3ff; |
1146 | res->start = start; | ||
1147 | } | ||
1148 | } | 1145 | } |
1146 | |||
1147 | return start; | ||
1149 | } | 1148 | } |
1150 | EXPORT_SYMBOL(pcibios_align_resource); | 1149 | EXPORT_SYMBOL(pcibios_align_resource); |
1151 | 1150 | ||
@@ -1228,7 +1227,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) | |||
1228 | pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", | 1227 | pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", |
1229 | pci_domain_nr(bus), bus->number); | 1228 | pci_domain_nr(bus), bus->number); |
1230 | 1229 | ||
1231 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | 1230 | pci_bus_for_each_resource(bus, res, i) { |
1232 | res = bus->resource[i]; | 1231 | res = bus->resource[i]; |
1233 | if (!res || !res->flags | 1232 | if (!res || !res->flags |
1234 | || res->start > res->end || res->parent) | 1233 | || res->start > res->end || res->parent) |
@@ -1508,7 +1507,7 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus) | |||
1508 | pci_bus_add_devices(bus); | 1507 | pci_bus_add_devices(bus); |
1509 | 1508 | ||
1510 | /* Fixup EEH */ | 1509 | /* Fixup EEH */ |
1511 | eeh_add_device_tree_late(bus); | 1510 | /* eeh_add_device_tree_late(bus); */ |
1512 | } | 1511 | } |
1513 | EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); | 1512 | EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); |
1514 | 1513 | ||
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 519197ede089..59dc0c7ef733 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -29,7 +29,7 @@ | |||
29 | * | 29 | * |
30 | * Atomically reads the value of @v. | 30 | * Atomically reads the value of @v. |
31 | */ | 31 | */ |
32 | #define atomic_read(v) ((v)->counter) | 32 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * atomic_set - set atomic variable | 35 | * atomic_set - set atomic variable |
@@ -410,7 +410,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
410 | * @v: pointer of type atomic64_t | 410 | * @v: pointer of type atomic64_t |
411 | * | 411 | * |
412 | */ | 412 | */ |
413 | #define atomic64_read(v) ((v)->counter) | 413 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
414 | 414 | ||
415 | /* | 415 | /* |
416 | * atomic64_set - set atomic variable | 416 | * atomic64_set - set atomic variable |
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h index 032ca73f181b..48bb82372994 100644 --- a/arch/mips/include/asm/i8253.h +++ b/arch/mips/include/asm/i8253.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #define PIT_CH0 0x40 | 12 | #define PIT_CH0 0x40 |
13 | #define PIT_CH2 0x42 | 13 | #define PIT_CH2 0x42 |
14 | 14 | ||
15 | extern spinlock_t i8253_lock; | 15 | extern raw_spinlock_t i8253_lock; |
16 | 16 | ||
17 | extern void setup_pit_timer(void); | 17 | extern void setup_pit_timer(void); |
18 | 18 | ||
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 49382d5e891a..c6e3c93ce7c7 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -135,6 +135,12 @@ | |||
135 | #define FPU_CSR_COND7 0x80000000 /* $fcc7 */ | 135 | #define FPU_CSR_COND7 0x80000000 /* $fcc7 */ |
136 | 136 | ||
137 | /* | 137 | /* |
138 | * Bits 18 - 20 of the FPU Status Register will be read as 0, | ||
139 | * and should be written as zero. | ||
140 | */ | ||
141 | #define FPU_CSR_RSVD 0x001c0000 | ||
142 | |||
143 | /* | ||
138 | * X the exception cause indicator | 144 | * X the exception cause indicator |
139 | * E the exception enable | 145 | * E the exception enable |
140 | * S the sticky/flag bit | 146 | * S the sticky/flag bit |
@@ -161,7 +167,8 @@ | |||
161 | #define FPU_CSR_UDF_S 0x00000008 | 167 | #define FPU_CSR_UDF_S 0x00000008 |
162 | #define FPU_CSR_INE_S 0x00000004 | 168 | #define FPU_CSR_INE_S 0x00000004 |
163 | 169 | ||
164 | /* rounding mode */ | 170 | /* Bits 0 and 1 of FPU Status Register specify the rounding mode */ |
171 | #define FPU_CSR_RM 0x00000003 | ||
165 | #define FPU_CSR_RN 0x0 /* nearest */ | 172 | #define FPU_CSR_RN 0x0 /* nearest */ |
166 | #define FPU_CSR_RZ 0x1 /* towards zero */ | 173 | #define FPU_CSR_RZ 0x1 /* towards zero */ |
167 | #define FPU_CSR_RU 0x2 /* towards +Infinity */ | 174 | #define FPU_CSR_RU 0x2 /* towards +Infinity */ |
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index ed5c441615e4..94794062a177 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/time.h> | 16 | #include <asm/time.h> |
17 | 17 | ||
18 | DEFINE_SPINLOCK(i8253_lock); | 18 | DEFINE_RAW_SPINLOCK(i8253_lock); |
19 | EXPORT_SYMBOL(i8253_lock); | 19 | EXPORT_SYMBOL(i8253_lock); |
20 | 20 | ||
21 | /* | 21 | /* |
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock); | |||
26 | static void init_pit_timer(enum clock_event_mode mode, | 26 | static void init_pit_timer(enum clock_event_mode mode, |
27 | struct clock_event_device *evt) | 27 | struct clock_event_device *evt) |
28 | { | 28 | { |
29 | spin_lock(&i8253_lock); | 29 | raw_spin_lock(&i8253_lock); |
30 | 30 | ||
31 | switch(mode) { | 31 | switch(mode) { |
32 | case CLOCK_EVT_MODE_PERIODIC: | 32 | case CLOCK_EVT_MODE_PERIODIC: |
@@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
55 | /* Nothing to do here */ | 55 | /* Nothing to do here */ |
56 | break; | 56 | break; |
57 | } | 57 | } |
58 | spin_unlock(&i8253_lock); | 58 | raw_spin_unlock(&i8253_lock); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* | 61 | /* |
@@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
65 | */ | 65 | */ |
66 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) | 66 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) |
67 | { | 67 | { |
68 | spin_lock(&i8253_lock); | 68 | raw_spin_lock(&i8253_lock); |
69 | outb_p(delta & 0xff , PIT_CH0); /* LSB */ | 69 | outb_p(delta & 0xff , PIT_CH0); /* LSB */ |
70 | outb(delta >> 8 , PIT_CH0); /* MSB */ | 70 | outb(delta >> 8 , PIT_CH0); /* MSB */ |
71 | spin_unlock(&i8253_lock); | 71 | raw_spin_unlock(&i8253_lock); |
72 | 72 | ||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
@@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
137 | static int old_count; | 137 | static int old_count; |
138 | static u32 old_jifs; | 138 | static u32 old_jifs; |
139 | 139 | ||
140 | spin_lock_irqsave(&i8253_lock, flags); | 140 | raw_spin_lock_irqsave(&i8253_lock, flags); |
141 | /* | 141 | /* |
142 | * Although our caller may have the read side of xtime_lock, | 142 | * Although our caller may have the read side of xtime_lock, |
143 | * this is now a seqlock, and we are cheating in this routine | 143 | * this is now a seqlock, and we are cheating in this routine |
@@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
183 | old_count = count; | 183 | old_count = count; |
184 | old_jifs = jifs; | 184 | old_jifs = jifs; |
185 | 185 | ||
186 | spin_unlock_irqrestore(&i8253_lock, flags); | 186 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
187 | 187 | ||
188 | count = (LATCH - 1) - count; | 188 | count = (LATCH - 1) - count; |
189 | 189 | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 44337ba03717..a5297e2a353a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -385,7 +385,7 @@ EXPORT(sysn32_call_table) | |||
385 | PTR sys_fchmodat | 385 | PTR sys_fchmodat |
386 | PTR sys_faccessat | 386 | PTR sys_faccessat |
387 | PTR compat_sys_pselect6 | 387 | PTR compat_sys_pselect6 |
388 | PTR sys_ppoll /* 6265 */ | 388 | PTR compat_sys_ppoll /* 6265 */ |
389 | PTR sys_unshare | 389 | PTR sys_unshare |
390 | PTR sys_splice | 390 | PTR sys_splice |
391 | PTR sys_sync_file_range | 391 | PTR sys_sync_file_range |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 8f2f8e9d8b21..f2338d1c0b48 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -78,6 +78,9 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); | |||
78 | #define FPCREG_RID 0 /* $0 = revision id */ | 78 | #define FPCREG_RID 0 /* $0 = revision id */ |
79 | #define FPCREG_CSR 31 /* $31 = csr */ | 79 | #define FPCREG_CSR 31 /* $31 = csr */ |
80 | 80 | ||
81 | /* Determine rounding mode from the RM bits of the FCSR */ | ||
82 | #define modeindex(v) ((v) & FPU_CSR_RM) | ||
83 | |||
81 | /* Convert Mips rounding mode (0..3) to IEEE library modes. */ | 84 | /* Convert Mips rounding mode (0..3) to IEEE library modes. */ |
82 | static const unsigned char ieee_rm[4] = { | 85 | static const unsigned char ieee_rm[4] = { |
83 | [FPU_CSR_RN] = IEEE754_RN, | 86 | [FPU_CSR_RN] = IEEE754_RN, |
@@ -384,10 +387,14 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx) | |||
384 | (void *) (xcp->cp0_epc), | 387 | (void *) (xcp->cp0_epc), |
385 | MIPSInst_RT(ir), value); | 388 | MIPSInst_RT(ir), value); |
386 | #endif | 389 | #endif |
387 | value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03); | 390 | |
388 | ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03); | 391 | /* |
389 | /* convert to ieee library modes */ | 392 | * Don't write reserved bits, |
390 | ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3]; | 393 | * and convert to ieee library modes |
394 | */ | ||
395 | ctx->fcr31 = (value & | ||
396 | ~(FPU_CSR_RSVD | FPU_CSR_RM)) | | ||
397 | ieee_rm[modeindex(value)]; | ||
391 | } | 398 | } |
392 | if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { | 399 | if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { |
393 | return SIGFPE; | 400 | return SIGFPE; |
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c index 29e2326b6257..fa3bf661ae29 100644 --- a/arch/mips/oprofile/op_model_loongson2.c +++ b/arch/mips/oprofile/op_model_loongson2.c | |||
@@ -122,7 +122,7 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id) | |||
122 | */ | 122 | */ |
123 | 123 | ||
124 | /* Check whether the irq belongs to me */ | 124 | /* Check whether the irq belongs to me */ |
125 | enabled = read_c0_perfcnt() & LOONGSON2_PERFCNT_INT_EN; | 125 | enabled = read_c0_perfctrl() & LOONGSON2_PERFCNT_INT_EN; |
126 | if (!enabled) | 126 | if (!enabled) |
127 | return IRQ_NONE; | 127 | return IRQ_NONE; |
128 | enabled = reg.cnt1_enabled | reg.cnt2_enabled; | 128 | enabled = reg.cnt1_enabled | reg.cnt2_enabled; |
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 5bf5be9566de..e41222d6c2fd 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -31,7 +31,7 @@ | |||
31 | * Atomically reads the value of @v. Note that the guaranteed | 31 | * Atomically reads the value of @v. Note that the guaranteed |
32 | * useful range of an atomic_t is only 24 bits. | 32 | * useful range of an atomic_t is only 24 bits. |
33 | */ | 33 | */ |
34 | #define atomic_read(v) ((v)->counter) | 34 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * atomic_set - set atomic variable | 37 | * atomic_set - set atomic variable |
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 716634d1f546..f81955934aeb 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -189,7 +189,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) | |||
189 | 189 | ||
190 | static __inline__ int atomic_read(const atomic_t *v) | 190 | static __inline__ int atomic_read(const atomic_t *v) |
191 | { | 191 | { |
192 | return v->counter; | 192 | return (*(volatile int *)&(v)->counter); |
193 | } | 193 | } |
194 | 194 | ||
195 | /* exported interface */ | 195 | /* exported interface */ |
@@ -286,7 +286,7 @@ atomic64_set(atomic64_t *v, s64 i) | |||
286 | static __inline__ s64 | 286 | static __inline__ s64 |
287 | atomic64_read(const atomic64_t *v) | 287 | atomic64_read(const atomic64_t *v) |
288 | { | 288 | { |
289 | return v->counter; | 289 | return (*(volatile long *)&(v)->counter); |
290 | } | 290 | } |
291 | 291 | ||
292 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) | 292 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 9f4c9d4f5803..bd100fcf40d0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags) | |||
130 | */ | 130 | */ |
131 | struct irq_chip; | 131 | struct irq_chip; |
132 | 132 | ||
133 | #ifdef CONFIG_PERF_EVENTS | ||
134 | |||
135 | #ifdef CONFIG_PPC64 | ||
136 | static inline unsigned long test_perf_event_pending(void) | ||
137 | { | ||
138 | unsigned long x; | ||
139 | |||
140 | asm volatile("lbz %0,%1(13)" | ||
141 | : "=r" (x) | ||
142 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
143 | return x; | ||
144 | } | ||
145 | |||
146 | static inline void set_perf_event_pending(void) | ||
147 | { | ||
148 | asm volatile("stb %0,%1(13)" : : | ||
149 | "r" (1), | ||
150 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
151 | } | ||
152 | |||
153 | static inline void clear_perf_event_pending(void) | ||
154 | { | ||
155 | asm volatile("stb %0,%1(13)" : : | ||
156 | "r" (0), | ||
157 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
158 | } | ||
159 | #endif /* CONFIG_PPC64 */ | ||
160 | |||
161 | #else /* CONFIG_PERF_EVENTS */ | ||
162 | |||
163 | static inline unsigned long test_perf_event_pending(void) | ||
164 | { | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static inline void clear_perf_event_pending(void) {} | ||
169 | #endif /* CONFIG_PERF_EVENTS */ | ||
170 | |||
171 | #endif /* __KERNEL__ */ | 133 | #endif /* __KERNEL__ */ |
172 | #endif /* _ASM_POWERPC_HW_IRQ_H */ | 134 | #endif /* _ASM_POWERPC_HW_IRQ_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 957ceb7059c5..c09138d150d4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -133,7 +133,6 @@ int main(void) | |||
133 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | 133 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); |
134 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 134 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
135 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 135 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); |
136 | DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending)); | ||
137 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 136 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
138 | #ifdef CONFIG_PPC_MM_SLICES | 137 | #ifdef CONFIG_PPC_MM_SLICES |
139 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, | 138 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 59c928564a03..4ff4da2c238b 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Contains routines needed to support swiotlb for ppc. | 2 | * Contains routines needed to support swiotlb for ppc. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor | 4 | * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. |
5 | * Author: Becky Bruce | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 8 | * under the terms of the GNU General Public License as published by the |
@@ -70,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb, | |||
70 | sd->max_direct_dma_addr = 0; | 71 | sd->max_direct_dma_addr = 0; |
71 | 72 | ||
72 | /* May need to bounce if the device can't address all of DRAM */ | 73 | /* May need to bounce if the device can't address all of DRAM */ |
73 | if (dma_get_mask(dev) < lmb_end_of_DRAM()) | 74 | if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM()) |
74 | set_dma_ops(dev, &swiotlb_dma_ops); | 75 | set_dma_ops(dev, &swiotlb_dma_ops); |
75 | 76 | ||
76 | return NOTIFY_DONE; | 77 | return NOTIFY_DONE; |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 07109d843787..42e9d908914a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
556 | 2: | 556 | 2: |
557 | TRACE_AND_RESTORE_IRQ(r5); | 557 | TRACE_AND_RESTORE_IRQ(r5); |
558 | 558 | ||
559 | #ifdef CONFIG_PERF_EVENTS | ||
560 | /* check paca->perf_event_pending if we're enabling ints */ | ||
561 | lbz r3,PACAPERFPEND(r13) | ||
562 | and. r3,r3,r5 | ||
563 | beq 27f | ||
564 | bl .perf_event_do_pending | ||
565 | 27: | ||
566 | #endif /* CONFIG_PERF_EVENTS */ | ||
567 | |||
568 | /* extract EE bit and use it to restore paca->hard_enabled */ | 559 | /* extract EE bit and use it to restore paca->hard_enabled */ |
569 | ld r3,_MSR(r1) | 560 | ld r3,_MSR(r1) |
570 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 561 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 64f6f2031c22..066bd31551d5 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -53,7 +53,6 @@ | |||
53 | #include <linux/bootmem.h> | 53 | #include <linux/bootmem.h> |
54 | #include <linux/pci.h> | 54 | #include <linux/pci.h> |
55 | #include <linux/debugfs.h> | 55 | #include <linux/debugfs.h> |
56 | #include <linux/perf_event.h> | ||
57 | 56 | ||
58 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
59 | #include <asm/system.h> | 58 | #include <asm/system.h> |
@@ -145,11 +144,6 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
145 | } | 144 | } |
146 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 145 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
147 | 146 | ||
148 | if (test_perf_event_pending()) { | ||
149 | clear_perf_event_pending(); | ||
150 | perf_event_do_pending(); | ||
151 | } | ||
152 | |||
153 | /* | 147 | /* |
154 | * if (get_paca()->hard_enabled) return; | 148 | * if (get_paca()->hard_enabled) return; |
155 | * But again we need to take care that gcc gets hard_enabled directly | 149 | * But again we need to take care that gcc gets hard_enabled directly |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1b16b9a3e49a..0441bbdadbd1 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void) | |||
532 | } | 532 | } |
533 | #endif /* CONFIG_PPC_ISERIES */ | 533 | #endif /* CONFIG_PPC_ISERIES */ |
534 | 534 | ||
535 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) | 535 | #ifdef CONFIG_PERF_EVENTS |
536 | DEFINE_PER_CPU(u8, perf_event_pending); | ||
537 | 536 | ||
538 | void set_perf_event_pending(void) | 537 | /* |
538 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | ||
539 | */ | ||
540 | #ifdef CONFIG_PPC64 | ||
541 | static inline unsigned long test_perf_event_pending(void) | ||
539 | { | 542 | { |
540 | get_cpu_var(perf_event_pending) = 1; | 543 | unsigned long x; |
541 | set_dec(1); | 544 | |
542 | put_cpu_var(perf_event_pending); | 545 | asm volatile("lbz %0,%1(13)" |
546 | : "=r" (x) | ||
547 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
548 | return x; | ||
543 | } | 549 | } |
544 | 550 | ||
551 | static inline void set_perf_event_pending_flag(void) | ||
552 | { | ||
553 | asm volatile("stb %0,%1(13)" : : | ||
554 | "r" (1), | ||
555 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
556 | } | ||
557 | |||
558 | static inline void clear_perf_event_pending(void) | ||
559 | { | ||
560 | asm volatile("stb %0,%1(13)" : : | ||
561 | "r" (0), | ||
562 | "i" (offsetof(struct paca_struct, perf_event_pending))); | ||
563 | } | ||
564 | |||
565 | #else /* 32-bit */ | ||
566 | |||
567 | DEFINE_PER_CPU(u8, perf_event_pending); | ||
568 | |||
569 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | ||
545 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 570 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) |
546 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 571 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 |
547 | 572 | ||
548 | #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 573 | #endif /* 32 vs 64 bit */ |
574 | |||
575 | void set_perf_event_pending(void) | ||
576 | { | ||
577 | preempt_disable(); | ||
578 | set_perf_event_pending_flag(); | ||
579 | set_dec(1); | ||
580 | preempt_enable(); | ||
581 | } | ||
582 | |||
583 | #else /* CONFIG_PERF_EVENTS */ | ||
549 | 584 | ||
550 | #define test_perf_event_pending() 0 | 585 | #define test_perf_event_pending() 0 |
551 | #define clear_perf_event_pending() | 586 | #define clear_perf_event_pending() |
552 | 587 | ||
553 | #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 588 | #endif /* CONFIG_PERF_EVENTS */ |
554 | 589 | ||
555 | /* | 590 | /* |
556 | * For iSeries shared processors, we have to let the hypervisor | 591 | * For iSeries shared processors, we have to let the hypervisor |
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
582 | set_dec(DECREMENTER_MAX); | 617 | set_dec(DECREMENTER_MAX); |
583 | 618 | ||
584 | #ifdef CONFIG_PPC32 | 619 | #ifdef CONFIG_PPC32 |
585 | if (test_perf_event_pending()) { | ||
586 | clear_perf_event_pending(); | ||
587 | perf_event_do_pending(); | ||
588 | } | ||
589 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 620 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
590 | do_IRQ(regs); | 621 | do_IRQ(regs); |
591 | #endif | 622 | #endif |
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
604 | 635 | ||
605 | calculate_steal_time(); | 636 | calculate_steal_time(); |
606 | 637 | ||
638 | if (test_perf_event_pending()) { | ||
639 | clear_perf_event_pending(); | ||
640 | perf_event_do_pending(); | ||
641 | } | ||
642 | |||
607 | #ifdef CONFIG_PPC_ISERIES | 643 | #ifdef CONFIG_PPC_ISERIES |
608 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 644 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
609 | get_lppaca()->int_dword.fields.decr_int = 0; | 645 | get_lppaca()->int_dword.fields.decr_int = 0; |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 2570fcc7665d..812312542e50 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -440,7 +440,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
440 | unsigned int gtlb_index; | 440 | unsigned int gtlb_index; |
441 | 441 | ||
442 | gtlb_index = kvmppc_get_gpr(vcpu, ra); | 442 | gtlb_index = kvmppc_get_gpr(vcpu, ra); |
443 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | 443 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { |
444 | printk("%s: index %d\n", __func__, gtlb_index); | 444 | printk("%s: index %d\n", __func__, gtlb_index); |
445 | kvmppc_dump_vcpu(vcpu); | 445 | kvmppc_dump_vcpu(vcpu); |
446 | return EMULATE_FAIL; | 446 | return EMULATE_FAIL; |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 1bbcc499d455..b8f8dc126102 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -82,7 +82,7 @@ startup_continue: | |||
82 | _ehead: | 82 | _ehead: |
83 | 83 | ||
84 | #ifdef CONFIG_SHARED_KERNEL | 84 | #ifdef CONFIG_SHARED_KERNEL |
85 | .org 0x100000 | 85 | .org 0x100000 - 0x11000 # head.o ends at 0x11000 |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | # | 88 | # |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 1f70970de0aa..cdef68717416 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -80,7 +80,7 @@ startup_continue: | |||
80 | _ehead: | 80 | _ehead: |
81 | 81 | ||
82 | #ifdef CONFIG_SHARED_KERNEL | 82 | #ifdef CONFIG_SHARED_KERNEL |
83 | .org 0x100000 | 83 | .org 0x100000 - 0x11000 # head.o ends at 0x11000 |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | # | 86 | # |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 33fdc5a79764..9f654da4cecc 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -640,7 +640,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
640 | 640 | ||
641 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | 641 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) |
642 | { | 642 | { |
643 | long ret; | 643 | long ret = 0; |
644 | 644 | ||
645 | /* Do the secure computing check first. */ | 645 | /* Do the secure computing check first. */ |
646 | secure_computing(regs->gprs[2]); | 646 | secure_computing(regs->gprs[2]); |
@@ -649,7 +649,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
649 | * The sysc_tracesys code in entry.S stored the system | 649 | * The sysc_tracesys code in entry.S stored the system |
650 | * call number to gprs[2]. | 650 | * call number to gprs[2]. |
651 | */ | 651 | */ |
652 | ret = regs->gprs[2]; | ||
653 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 652 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
654 | (tracehook_report_syscall_entry(regs) || | 653 | (tracehook_report_syscall_entry(regs) || |
655 | regs->gprs[2] >= NR_syscalls)) { | 654 | regs->gprs[2] >= NR_syscalls)) { |
@@ -671,7 +670,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
671 | regs->gprs[2], regs->orig_gpr2, | 670 | regs->gprs[2], regs->orig_gpr2, |
672 | regs->gprs[3], regs->gprs[4], | 671 | regs->gprs[3], regs->gprs[4], |
673 | regs->gprs[5]); | 672 | regs->gprs[5]); |
674 | return ret; | 673 | return ret ?: regs->gprs[2]; |
675 | } | 674 | } |
676 | 675 | ||
677 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) | 676 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) |
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index fba1f62d56e7..dba024d72a89 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig | |||
@@ -877,7 +877,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | |||
877 | # | 877 | # |
878 | # CONFIG_SERIAL_MAX3100 is not set | 878 | # CONFIG_SERIAL_MAX3100 is not set |
879 | CONFIG_SERIAL_SH_SCI=y | 879 | CONFIG_SERIAL_SH_SCI=y |
880 | CONFIG_SERIAL_SH_SCI_NR_UARTS=1 | 880 | CONFIG_SERIAL_SH_SCI_NR_UARTS=2 |
881 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | 881 | CONFIG_SERIAL_SH_SCI_CONSOLE=y |
882 | CONFIG_SERIAL_CORE=y | 882 | CONFIG_SERIAL_CORE=y |
883 | CONFIG_SERIAL_CORE_CONSOLE=y | 883 | CONFIG_SERIAL_CORE_CONSOLE=y |
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index a8d538f06e67..6d511d06cbf6 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig | |||
@@ -963,7 +963,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | |||
963 | # | 963 | # |
964 | # CONFIG_SERIAL_MAX3100 is not set | 964 | # CONFIG_SERIAL_MAX3100 is not set |
965 | CONFIG_SERIAL_SH_SCI=y | 965 | CONFIG_SERIAL_SH_SCI=y |
966 | CONFIG_SERIAL_SH_SCI_NR_UARTS=1 | 966 | CONFIG_SERIAL_SH_SCI_NR_UARTS=2 |
967 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | 967 | CONFIG_SERIAL_SH_SCI_CONSOLE=y |
968 | CONFIG_SERIAL_CORE=y | 968 | CONFIG_SERIAL_CORE=y |
969 | CONFIG_SERIAL_CORE_CONSOLE=y | 969 | CONFIG_SERIAL_CORE_CONSOLE=y |
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c index 17811e5d287b..f98141b3b7d7 100644 --- a/arch/sh/drivers/pci/pci-sh7751.c +++ b/arch/sh/drivers/pci/pci-sh7751.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include "pci-sh4.h" | 18 | #include "pci-sh4.h" |
19 | #include <asm/addrspace.h> | 19 | #include <asm/addrspace.h> |
20 | #include <asm/sizes.h> | ||
20 | 21 | ||
21 | static int __init __area_sdram_check(struct pci_channel *chan, | 22 | static int __init __area_sdram_check(struct pci_channel *chan, |
22 | unsigned int area) | 23 | unsigned int area) |
@@ -47,8 +48,8 @@ static int __init __area_sdram_check(struct pci_channel *chan, | |||
47 | static struct resource sh7751_pci_resources[] = { | 48 | static struct resource sh7751_pci_resources[] = { |
48 | { | 49 | { |
49 | .name = "SH7751_IO", | 50 | .name = "SH7751_IO", |
50 | .start = SH7751_PCI_IO_BASE, | 51 | .start = 0x1000, |
51 | .end = SH7751_PCI_IO_BASE + SH7751_PCI_IO_SIZE - 1, | 52 | .end = SZ_4M - 1, |
52 | .flags = IORESOURCE_IO | 53 | .flags = IORESOURCE_IO |
53 | }, { | 54 | }, { |
54 | .name = "SH7751_mem", | 55 | .name = "SH7751_mem", |
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 275a448ae8c2..c7983124d99d 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | 14 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
15 | 15 | ||
16 | #define atomic_read(v) ((v)->counter) | 16 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
17 | #define atomic_set(v,i) ((v)->counter = (i)) | 17 | #define atomic_set(v,i) ((v)->counter = (i)) |
18 | 18 | ||
19 | #if defined(CONFIG_GUSA_RB) | 19 | #if defined(CONFIG_GUSA_RB) |
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h index 55f9fec082d4..de2359533994 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-register.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h | |||
@@ -76,7 +76,7 @@ enum { | |||
76 | } | 76 | } |
77 | 77 | ||
78 | #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ | 78 | #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ |
79 | ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) | 79 | (((i) & 0xc) << CHCR_TS_HIGH_SHIFT)) |
80 | 80 | ||
81 | #else /* CONFIG_CPU_SH4A */ | 81 | #else /* CONFIG_CPU_SH4A */ |
82 | 82 | ||
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index f0d343c3b956..7ae128b19d3f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int); | |||
25 | extern int atomic_add_unless(atomic_t *, int, int); | 25 | extern int atomic_add_unless(atomic_t *, int, int); |
26 | extern void atomic_set(atomic_t *, int); | 26 | extern void atomic_set(atomic_t *, int); |
27 | 27 | ||
28 | #define atomic_read(v) ((v)->counter) | 28 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
29 | 29 | ||
30 | #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) | 30 | #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) |
31 | #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) | 31 | #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) |
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index f2e48009989e..2050ca02c423 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
@@ -13,8 +13,8 @@ | |||
13 | #define ATOMIC_INIT(i) { (i) } | 13 | #define ATOMIC_INIT(i) { (i) } |
14 | #define ATOMIC64_INIT(i) { (i) } | 14 | #define ATOMIC64_INIT(i) { (i) } |
15 | 15 | ||
16 | #define atomic_read(v) ((v)->counter) | 16 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
17 | #define atomic64_read(v) ((v)->counter) | 17 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
18 | 18 | ||
19 | #define atomic_set(v, i) (((v)->counter) = i) | 19 | #define atomic_set(v, i) (((v)->counter) = i) |
20 | #define atomic64_set(v, i) (((v)->counter) = i) | 20 | #define atomic64_set(v, i) (((v)->counter) = i) |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 86a0ff0aeac7..7014e88bc779 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -174,6 +174,40 @@ | |||
174 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) | 174 | (~((1ULL << (12 + ((lvl) * 9))) - 1))) |
175 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) | 175 | #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) |
176 | 176 | ||
177 | /* | ||
178 | * Returns the page table level to use for a given page size | ||
179 | * Pagesize is expected to be a power-of-two | ||
180 | */ | ||
181 | #define PAGE_SIZE_LEVEL(pagesize) \ | ||
182 | ((__ffs(pagesize) - 12) / 9) | ||
183 | /* | ||
184 | * Returns the number of ptes to use for a given page size | ||
185 | * Pagesize is expected to be a power-of-two | ||
186 | */ | ||
187 | #define PAGE_SIZE_PTE_COUNT(pagesize) \ | ||
188 | (1ULL << ((__ffs(pagesize) - 12) % 9)) | ||
189 | |||
190 | /* | ||
191 | * Aligns a given io-virtual address to a given page size | ||
192 | * Pagesize is expected to be a power-of-two | ||
193 | */ | ||
194 | #define PAGE_SIZE_ALIGN(address, pagesize) \ | ||
195 | ((address) & ~((pagesize) - 1)) | ||
196 | /* | ||
197 | * Creates an IOMMU PTE for an address an a given pagesize | ||
198 | * The PTE has no permission bits set | ||
199 | * Pagesize is expected to be a power-of-two larger than 4096 | ||
200 | */ | ||
201 | #define PAGE_SIZE_PTE(address, pagesize) \ | ||
202 | (((address) | ((pagesize) - 1)) & \ | ||
203 | (~(pagesize >> 1)) & PM_ADDR_MASK) | ||
204 | |||
205 | /* | ||
206 | * Takes a PTE value with mode=0x07 and returns the page size it maps | ||
207 | */ | ||
208 | #define PTE_PAGE_SIZE(pte) \ | ||
209 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) | ||
210 | |||
177 | #define IOMMU_PTE_P (1ULL << 0) | 211 | #define IOMMU_PTE_P (1ULL << 0) |
178 | #define IOMMU_PTE_TV (1ULL << 1) | 212 | #define IOMMU_PTE_TV (1ULL << 1) |
179 | #define IOMMU_PTE_U (1ULL << 59) | 213 | #define IOMMU_PTE_U (1ULL << 59) |
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 8f8217b9bdac..37b39d27abe0 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | static inline int atomic_read(const atomic_t *v) | 23 | static inline int atomic_read(const atomic_t *v) |
24 | { | 24 | { |
25 | return v->counter; | 25 | return (*(volatile int *)&(v)->counter); |
26 | } | 26 | } |
27 | 27 | ||
28 | /** | 28 | /** |
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 51c5b4056929..b014e235ea8d 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | static inline long atomic64_read(const atomic64_t *v) | 19 | static inline long atomic64_read(const atomic64_t *v) |
20 | { | 20 | { |
21 | return v->counter; | 21 | return (*(volatile long *)&(v)->counter); |
22 | } | 22 | } |
23 | 23 | ||
24 | /** | 24 | /** |
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h index 1edbf89680fd..fc1f579fb965 100644 --- a/arch/x86/include/asm/i8253.h +++ b/arch/x86/include/asm/i8253.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #define PIT_CH0 0x40 | 6 | #define PIT_CH0 0x40 |
7 | #define PIT_CH2 0x42 | 7 | #define PIT_CH2 0x42 |
8 | 8 | ||
9 | extern spinlock_t i8253_lock; | 9 | extern raw_spinlock_t i8253_lock; |
10 | 10 | ||
11 | extern struct clock_event_device *global_clock_event; | 11 | extern struct clock_event_device *global_clock_event; |
12 | 12 | ||
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index f70e60071fe8..af00bd1d2089 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h | |||
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | |||
16 | extern int k8_scan_nodes(void); | 16 | extern int k8_scan_nodes(void); |
17 | 17 | ||
18 | #ifdef CONFIG_K8_NB | 18 | #ifdef CONFIG_K8_NB |
19 | extern int num_k8_northbridges; | ||
20 | |||
19 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
20 | { | 22 | { |
21 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; | 23 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; |
22 | } | 24 | } |
25 | |||
23 | #else | 26 | #else |
27 | #define num_k8_northbridges 0 | ||
28 | |||
24 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | 29 | static inline struct pci_dev *node_to_k8_nb_misc(int node) |
25 | { | 30 | { |
26 | return NULL; | 31 | return NULL; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f854d89b7edf..fa5a1474cd18 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -731,18 +731,22 @@ static bool increase_address_space(struct protection_domain *domain, | |||
731 | 731 | ||
732 | static u64 *alloc_pte(struct protection_domain *domain, | 732 | static u64 *alloc_pte(struct protection_domain *domain, |
733 | unsigned long address, | 733 | unsigned long address, |
734 | int end_lvl, | 734 | unsigned long page_size, |
735 | u64 **pte_page, | 735 | u64 **pte_page, |
736 | gfp_t gfp) | 736 | gfp_t gfp) |
737 | { | 737 | { |
738 | int level, end_lvl; | ||
738 | u64 *pte, *page; | 739 | u64 *pte, *page; |
739 | int level; | 740 | |
741 | BUG_ON(!is_power_of_2(page_size)); | ||
740 | 742 | ||
741 | while (address > PM_LEVEL_SIZE(domain->mode)) | 743 | while (address > PM_LEVEL_SIZE(domain->mode)) |
742 | increase_address_space(domain, gfp); | 744 | increase_address_space(domain, gfp); |
743 | 745 | ||
744 | level = domain->mode - 1; | 746 | level = domain->mode - 1; |
745 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 747 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
748 | address = PAGE_SIZE_ALIGN(address, page_size); | ||
749 | end_lvl = PAGE_SIZE_LEVEL(page_size); | ||
746 | 750 | ||
747 | while (level > end_lvl) { | 751 | while (level > end_lvl) { |
748 | if (!IOMMU_PTE_PRESENT(*pte)) { | 752 | if (!IOMMU_PTE_PRESENT(*pte)) { |
@@ -752,6 +756,10 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
752 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | 756 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); |
753 | } | 757 | } |
754 | 758 | ||
759 | /* No level skipping support yet */ | ||
760 | if (PM_PTE_LEVEL(*pte) != level) | ||
761 | return NULL; | ||
762 | |||
755 | level -= 1; | 763 | level -= 1; |
756 | 764 | ||
757 | pte = IOMMU_PTE_PAGE(*pte); | 765 | pte = IOMMU_PTE_PAGE(*pte); |
@@ -769,28 +777,47 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
769 | * This function checks if there is a PTE for a given dma address. If | 777 | * This function checks if there is a PTE for a given dma address. If |
770 | * there is one, it returns the pointer to it. | 778 | * there is one, it returns the pointer to it. |
771 | */ | 779 | */ |
772 | static u64 *fetch_pte(struct protection_domain *domain, | 780 | static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) |
773 | unsigned long address, int map_size) | ||
774 | { | 781 | { |
775 | int level; | 782 | int level; |
776 | u64 *pte; | 783 | u64 *pte; |
777 | 784 | ||
778 | level = domain->mode - 1; | 785 | if (address > PM_LEVEL_SIZE(domain->mode)) |
779 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 786 | return NULL; |
787 | |||
788 | level = domain->mode - 1; | ||
789 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
780 | 790 | ||
781 | while (level > map_size) { | 791 | while (level > 0) { |
792 | |||
793 | /* Not Present */ | ||
782 | if (!IOMMU_PTE_PRESENT(*pte)) | 794 | if (!IOMMU_PTE_PRESENT(*pte)) |
783 | return NULL; | 795 | return NULL; |
784 | 796 | ||
797 | /* Large PTE */ | ||
798 | if (PM_PTE_LEVEL(*pte) == 0x07) { | ||
799 | unsigned long pte_mask, __pte; | ||
800 | |||
801 | /* | ||
802 | * If we have a series of large PTEs, make | ||
803 | * sure to return a pointer to the first one. | ||
804 | */ | ||
805 | pte_mask = PTE_PAGE_SIZE(*pte); | ||
806 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
807 | __pte = ((unsigned long)pte) & pte_mask; | ||
808 | |||
809 | return (u64 *)__pte; | ||
810 | } | ||
811 | |||
812 | /* No level skipping support yet */ | ||
813 | if (PM_PTE_LEVEL(*pte) != level) | ||
814 | return NULL; | ||
815 | |||
785 | level -= 1; | 816 | level -= 1; |
786 | 817 | ||
818 | /* Walk to the next level */ | ||
787 | pte = IOMMU_PTE_PAGE(*pte); | 819 | pte = IOMMU_PTE_PAGE(*pte); |
788 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 820 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
789 | |||
790 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { | ||
791 | pte = NULL; | ||
792 | break; | ||
793 | } | ||
794 | } | 821 | } |
795 | 822 | ||
796 | return pte; | 823 | return pte; |
@@ -807,44 +834,84 @@ static int iommu_map_page(struct protection_domain *dom, | |||
807 | unsigned long bus_addr, | 834 | unsigned long bus_addr, |
808 | unsigned long phys_addr, | 835 | unsigned long phys_addr, |
809 | int prot, | 836 | int prot, |
810 | int map_size) | 837 | unsigned long page_size) |
811 | { | 838 | { |
812 | u64 __pte, *pte; | 839 | u64 __pte, *pte; |
813 | 840 | int i, count; | |
814 | bus_addr = PAGE_ALIGN(bus_addr); | ||
815 | phys_addr = PAGE_ALIGN(phys_addr); | ||
816 | |||
817 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); | ||
818 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); | ||
819 | 841 | ||
820 | if (!(prot & IOMMU_PROT_MASK)) | 842 | if (!(prot & IOMMU_PROT_MASK)) |
821 | return -EINVAL; | 843 | return -EINVAL; |
822 | 844 | ||
823 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); | 845 | bus_addr = PAGE_ALIGN(bus_addr); |
846 | phys_addr = PAGE_ALIGN(phys_addr); | ||
847 | count = PAGE_SIZE_PTE_COUNT(page_size); | ||
848 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); | ||
849 | |||
850 | for (i = 0; i < count; ++i) | ||
851 | if (IOMMU_PTE_PRESENT(pte[i])) | ||
852 | return -EBUSY; | ||
824 | 853 | ||
825 | if (IOMMU_PTE_PRESENT(*pte)) | 854 | if (page_size > PAGE_SIZE) { |
826 | return -EBUSY; | 855 | __pte = PAGE_SIZE_PTE(phys_addr, page_size); |
856 | __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
857 | } else | ||
858 | __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
827 | 859 | ||
828 | __pte = phys_addr | IOMMU_PTE_P; | ||
829 | if (prot & IOMMU_PROT_IR) | 860 | if (prot & IOMMU_PROT_IR) |
830 | __pte |= IOMMU_PTE_IR; | 861 | __pte |= IOMMU_PTE_IR; |
831 | if (prot & IOMMU_PROT_IW) | 862 | if (prot & IOMMU_PROT_IW) |
832 | __pte |= IOMMU_PTE_IW; | 863 | __pte |= IOMMU_PTE_IW; |
833 | 864 | ||
834 | *pte = __pte; | 865 | for (i = 0; i < count; ++i) |
866 | pte[i] = __pte; | ||
835 | 867 | ||
836 | update_domain(dom); | 868 | update_domain(dom); |
837 | 869 | ||
838 | return 0; | 870 | return 0; |
839 | } | 871 | } |
840 | 872 | ||
841 | static void iommu_unmap_page(struct protection_domain *dom, | 873 | static unsigned long iommu_unmap_page(struct protection_domain *dom, |
842 | unsigned long bus_addr, int map_size) | 874 | unsigned long bus_addr, |
875 | unsigned long page_size) | ||
843 | { | 876 | { |
844 | u64 *pte = fetch_pte(dom, bus_addr, map_size); | 877 | unsigned long long unmap_size, unmapped; |
878 | u64 *pte; | ||
879 | |||
880 | BUG_ON(!is_power_of_2(page_size)); | ||
881 | |||
882 | unmapped = 0; | ||
845 | 883 | ||
846 | if (pte) | 884 | while (unmapped < page_size) { |
847 | *pte = 0; | 885 | |
886 | pte = fetch_pte(dom, bus_addr); | ||
887 | |||
888 | if (!pte) { | ||
889 | /* | ||
890 | * No PTE for this address | ||
891 | * move forward in 4kb steps | ||
892 | */ | ||
893 | unmap_size = PAGE_SIZE; | ||
894 | } else if (PM_PTE_LEVEL(*pte) == 0) { | ||
895 | /* 4kb PTE found for this address */ | ||
896 | unmap_size = PAGE_SIZE; | ||
897 | *pte = 0ULL; | ||
898 | } else { | ||
899 | int count, i; | ||
900 | |||
901 | /* Large PTE found which maps this address */ | ||
902 | unmap_size = PTE_PAGE_SIZE(*pte); | ||
903 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | ||
904 | for (i = 0; i < count; i++) | ||
905 | pte[i] = 0ULL; | ||
906 | } | ||
907 | |||
908 | bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; | ||
909 | unmapped += unmap_size; | ||
910 | } | ||
911 | |||
912 | BUG_ON(!is_power_of_2(unmapped)); | ||
913 | |||
914 | return unmapped; | ||
848 | } | 915 | } |
849 | 916 | ||
850 | /* | 917 | /* |
@@ -878,7 +945,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
878 | for (addr = e->address_start; addr < e->address_end; | 945 | for (addr = e->address_start; addr < e->address_end; |
879 | addr += PAGE_SIZE) { | 946 | addr += PAGE_SIZE) { |
880 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, | 947 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
881 | PM_MAP_4k); | 948 | PAGE_SIZE); |
882 | if (ret) | 949 | if (ret) |
883 | return ret; | 950 | return ret; |
884 | /* | 951 | /* |
@@ -1006,7 +1073,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1006 | u64 *pte, *pte_page; | 1073 | u64 *pte, *pte_page; |
1007 | 1074 | ||
1008 | for (i = 0; i < num_ptes; ++i) { | 1075 | for (i = 0; i < num_ptes; ++i) { |
1009 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, | 1076 | pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, |
1010 | &pte_page, gfp); | 1077 | &pte_page, gfp); |
1011 | if (!pte) | 1078 | if (!pte) |
1012 | goto out_free; | 1079 | goto out_free; |
@@ -1042,7 +1109,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
1042 | for (i = dma_dom->aperture[index]->offset; | 1109 | for (i = dma_dom->aperture[index]->offset; |
1043 | i < dma_dom->aperture_size; | 1110 | i < dma_dom->aperture_size; |
1044 | i += PAGE_SIZE) { | 1111 | i += PAGE_SIZE) { |
1045 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); | 1112 | u64 *pte = fetch_pte(&dma_dom->domain, i); |
1046 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1113 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
1047 | continue; | 1114 | continue; |
1048 | 1115 | ||
@@ -1712,7 +1779,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1712 | 1779 | ||
1713 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1780 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1714 | if (!pte) { | 1781 | if (!pte) { |
1715 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, | 1782 | pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, |
1716 | GFP_ATOMIC); | 1783 | GFP_ATOMIC); |
1717 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1784 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1718 | } else | 1785 | } else |
@@ -2439,12 +2506,11 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
2439 | return ret; | 2506 | return ret; |
2440 | } | 2507 | } |
2441 | 2508 | ||
2442 | static int amd_iommu_map_range(struct iommu_domain *dom, | 2509 | static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, |
2443 | unsigned long iova, phys_addr_t paddr, | 2510 | phys_addr_t paddr, int gfp_order, int iommu_prot) |
2444 | size_t size, int iommu_prot) | ||
2445 | { | 2511 | { |
2512 | unsigned long page_size = 0x1000UL << gfp_order; | ||
2446 | struct protection_domain *domain = dom->priv; | 2513 | struct protection_domain *domain = dom->priv; |
2447 | unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE); | ||
2448 | int prot = 0; | 2514 | int prot = 0; |
2449 | int ret; | 2515 | int ret; |
2450 | 2516 | ||
@@ -2453,61 +2519,50 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2453 | if (iommu_prot & IOMMU_WRITE) | 2519 | if (iommu_prot & IOMMU_WRITE) |
2454 | prot |= IOMMU_PROT_IW; | 2520 | prot |= IOMMU_PROT_IW; |
2455 | 2521 | ||
2456 | iova &= PAGE_MASK; | ||
2457 | paddr &= PAGE_MASK; | ||
2458 | |||
2459 | mutex_lock(&domain->api_lock); | 2522 | mutex_lock(&domain->api_lock); |
2460 | 2523 | ret = iommu_map_page(domain, iova, paddr, prot, page_size); | |
2461 | for (i = 0; i < npages; ++i) { | ||
2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | ||
2463 | if (ret) | ||
2464 | return ret; | ||
2465 | |||
2466 | iova += PAGE_SIZE; | ||
2467 | paddr += PAGE_SIZE; | ||
2468 | } | ||
2469 | |||
2470 | mutex_unlock(&domain->api_lock); | 2524 | mutex_unlock(&domain->api_lock); |
2471 | 2525 | ||
2472 | return 0; | 2526 | return ret; |
2473 | } | 2527 | } |
2474 | 2528 | ||
2475 | static void amd_iommu_unmap_range(struct iommu_domain *dom, | 2529 | static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, |
2476 | unsigned long iova, size_t size) | 2530 | int gfp_order) |
2477 | { | 2531 | { |
2478 | |||
2479 | struct protection_domain *domain = dom->priv; | 2532 | struct protection_domain *domain = dom->priv; |
2480 | unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE); | 2533 | unsigned long page_size, unmap_size; |
2481 | 2534 | ||
2482 | iova &= PAGE_MASK; | 2535 | page_size = 0x1000UL << gfp_order; |
2483 | 2536 | ||
2484 | mutex_lock(&domain->api_lock); | 2537 | mutex_lock(&domain->api_lock); |
2485 | 2538 | unmap_size = iommu_unmap_page(domain, iova, page_size); | |
2486 | for (i = 0; i < npages; ++i) { | 2539 | mutex_unlock(&domain->api_lock); |
2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); | ||
2488 | iova += PAGE_SIZE; | ||
2489 | } | ||
2490 | 2540 | ||
2491 | iommu_flush_tlb_pde(domain); | 2541 | iommu_flush_tlb_pde(domain); |
2492 | 2542 | ||
2493 | mutex_unlock(&domain->api_lock); | 2543 | return get_order(unmap_size); |
2494 | } | 2544 | } |
2495 | 2545 | ||
2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2546 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
2497 | unsigned long iova) | 2547 | unsigned long iova) |
2498 | { | 2548 | { |
2499 | struct protection_domain *domain = dom->priv; | 2549 | struct protection_domain *domain = dom->priv; |
2500 | unsigned long offset = iova & ~PAGE_MASK; | 2550 | unsigned long offset_mask; |
2501 | phys_addr_t paddr; | 2551 | phys_addr_t paddr; |
2502 | u64 *pte; | 2552 | u64 *pte, __pte; |
2503 | 2553 | ||
2504 | pte = fetch_pte(domain, iova, PM_MAP_4k); | 2554 | pte = fetch_pte(domain, iova); |
2505 | 2555 | ||
2506 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 2556 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2507 | return 0; | 2557 | return 0; |
2508 | 2558 | ||
2509 | paddr = *pte & IOMMU_PAGE_MASK; | 2559 | if (PM_PTE_LEVEL(*pte) == 0) |
2510 | paddr |= offset; | 2560 | offset_mask = PAGE_SIZE - 1; |
2561 | else | ||
2562 | offset_mask = PTE_PAGE_SIZE(*pte) - 1; | ||
2563 | |||
2564 | __pte = *pte & PM_ADDR_MASK; | ||
2565 | paddr = (__pte & ~offset_mask) | (iova & offset_mask); | ||
2511 | 2566 | ||
2512 | return paddr; | 2567 | return paddr; |
2513 | } | 2568 | } |
@@ -2523,8 +2578,8 @@ static struct iommu_ops amd_iommu_ops = { | |||
2523 | .domain_destroy = amd_iommu_domain_destroy, | 2578 | .domain_destroy = amd_iommu_domain_destroy, |
2524 | .attach_dev = amd_iommu_attach_device, | 2579 | .attach_dev = amd_iommu_attach_device, |
2525 | .detach_dev = amd_iommu_detach_device, | 2580 | .detach_dev = amd_iommu_detach_device, |
2526 | .map = amd_iommu_map_range, | 2581 | .map = amd_iommu_map, |
2527 | .unmap = amd_iommu_unmap_range, | 2582 | .unmap = amd_iommu_unmap, |
2528 | .iova_to_phys = amd_iommu_iova_to_phys, | 2583 | .iova_to_phys = amd_iommu_iova_to_phys, |
2529 | .domain_has_cap = amd_iommu_domain_has_cap, | 2584 | .domain_has_cap = amd_iommu_domain_has_cap, |
2530 | }; | 2585 | }; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 6360abf993d4..3bacb4d0844c 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -120,6 +120,7 @@ struct ivmd_header { | |||
120 | bool amd_iommu_dump; | 120 | bool amd_iommu_dump; |
121 | 121 | ||
122 | static int __initdata amd_iommu_detected; | 122 | static int __initdata amd_iommu_detected; |
123 | static bool __initdata amd_iommu_disabled; | ||
123 | 124 | ||
124 | u16 amd_iommu_last_bdf; /* largest PCI device id we have | 125 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
125 | to handle */ | 126 | to handle */ |
@@ -1372,6 +1373,9 @@ void __init amd_iommu_detect(void) | |||
1372 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) | 1373 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
1373 | return; | 1374 | return; |
1374 | 1375 | ||
1376 | if (amd_iommu_disabled) | ||
1377 | return; | ||
1378 | |||
1375 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { | 1379 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1376 | iommu_detected = 1; | 1380 | iommu_detected = 1; |
1377 | amd_iommu_detected = 1; | 1381 | amd_iommu_detected = 1; |
@@ -1401,6 +1405,8 @@ static int __init parse_amd_iommu_options(char *str) | |||
1401 | for (; *str; ++str) { | 1405 | for (; *str; ++str) { |
1402 | if (strncmp(str, "fullflush", 9) == 0) | 1406 | if (strncmp(str, "fullflush", 9) == 0) |
1403 | amd_iommu_unmap_flush = true; | 1407 | amd_iommu_unmap_flush = true; |
1408 | if (strncmp(str, "off", 3) == 0) | ||
1409 | amd_iommu_disabled = true; | ||
1404 | } | 1410 | } |
1405 | 1411 | ||
1406 | return 1; | 1412 | return 1; |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 031aa887b0eb..c4f9182ca3ac 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1224,7 +1224,7 @@ static void reinit_timer(void) | |||
1224 | #ifdef INIT_TIMER_AFTER_SUSPEND | 1224 | #ifdef INIT_TIMER_AFTER_SUSPEND |
1225 | unsigned long flags; | 1225 | unsigned long flags; |
1226 | 1226 | ||
1227 | spin_lock_irqsave(&i8253_lock, flags); | 1227 | raw_spin_lock_irqsave(&i8253_lock, flags); |
1228 | /* set the clock to HZ */ | 1228 | /* set the clock to HZ */ |
1229 | outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ | 1229 | outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ |
1230 | udelay(10); | 1230 | udelay(10); |
@@ -1232,7 +1232,7 @@ static void reinit_timer(void) | |||
1232 | udelay(10); | 1232 | udelay(10); |
1233 | outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ | 1233 | outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ |
1234 | udelay(10); | 1234 | udelay(10); |
1235 | spin_unlock_irqrestore(&i8253_lock, flags); | 1235 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
1236 | #endif | 1236 | #endif |
1237 | } | 1237 | } |
1238 | 1238 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index b3eeb66c0a51..95962a93f99a 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -340,6 +340,10 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
340 | (boot_cpu_data.x86_mask < 0x1))) | 340 | (boot_cpu_data.x86_mask < 0x1))) |
341 | return; | 341 | return; |
342 | 342 | ||
343 | /* not in virtualized environments */ | ||
344 | if (num_k8_northbridges == 0) | ||
345 | return; | ||
346 | |||
343 | this_leaf->can_disable = true; | 347 | this_leaf->can_disable = true; |
344 | this_leaf->l3_indices = amd_calc_l3_indices(); | 348 | this_leaf->l3_indices = amd_calc_l3_indices(); |
345 | } | 349 | } |
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 23c167925a5c..2dfd31597443 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/hpet.h> | 16 | #include <asm/hpet.h> |
17 | #include <asm/smp.h> | 17 | #include <asm/smp.h> |
18 | 18 | ||
19 | DEFINE_SPINLOCK(i8253_lock); | 19 | DEFINE_RAW_SPINLOCK(i8253_lock); |
20 | EXPORT_SYMBOL(i8253_lock); | 20 | EXPORT_SYMBOL(i8253_lock); |
21 | 21 | ||
22 | /* | 22 | /* |
@@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event; | |||
33 | static void init_pit_timer(enum clock_event_mode mode, | 33 | static void init_pit_timer(enum clock_event_mode mode, |
34 | struct clock_event_device *evt) | 34 | struct clock_event_device *evt) |
35 | { | 35 | { |
36 | spin_lock(&i8253_lock); | 36 | raw_spin_lock(&i8253_lock); |
37 | 37 | ||
38 | switch (mode) { | 38 | switch (mode) { |
39 | case CLOCK_EVT_MODE_PERIODIC: | 39 | case CLOCK_EVT_MODE_PERIODIC: |
@@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
62 | /* Nothing to do here */ | 62 | /* Nothing to do here */ |
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | spin_unlock(&i8253_lock); | 65 | raw_spin_unlock(&i8253_lock); |
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
@@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
72 | */ | 72 | */ |
73 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) | 73 | static int pit_next_event(unsigned long delta, struct clock_event_device *evt) |
74 | { | 74 | { |
75 | spin_lock(&i8253_lock); | 75 | raw_spin_lock(&i8253_lock); |
76 | outb_pit(delta & 0xff , PIT_CH0); /* LSB */ | 76 | outb_pit(delta & 0xff , PIT_CH0); /* LSB */ |
77 | outb_pit(delta >> 8 , PIT_CH0); /* MSB */ | 77 | outb_pit(delta >> 8 , PIT_CH0); /* MSB */ |
78 | spin_unlock(&i8253_lock); | 78 | raw_spin_unlock(&i8253_lock); |
79 | 79 | ||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
@@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
130 | int count; | 130 | int count; |
131 | u32 jifs; | 131 | u32 jifs; |
132 | 132 | ||
133 | spin_lock_irqsave(&i8253_lock, flags); | 133 | raw_spin_lock_irqsave(&i8253_lock, flags); |
134 | /* | 134 | /* |
135 | * Although our caller may have the read side of xtime_lock, | 135 | * Although our caller may have the read side of xtime_lock, |
136 | * this is now a seqlock, and we are cheating in this routine | 136 | * this is now a seqlock, and we are cheating in this routine |
@@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs) | |||
176 | old_count = count; | 176 | old_count = count; |
177 | old_jifs = jifs; | 177 | old_jifs = jifs; |
178 | 178 | ||
179 | spin_unlock_irqrestore(&i8253_lock, flags); | 179 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
180 | 180 | ||
181 | count = (LATCH - 1) - count; | 181 | count = (LATCH - 1) - count; |
182 | 182 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index f2f56c0967b6..345a4b1fe144 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -542,20 +542,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
542 | struct kprobe_ctlblk *kcb; | 542 | struct kprobe_ctlblk *kcb; |
543 | 543 | ||
544 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); | 544 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
545 | if (*addr != BREAKPOINT_INSTRUCTION) { | ||
546 | /* | ||
547 | * The breakpoint instruction was removed right | ||
548 | * after we hit it. Another cpu has removed | ||
549 | * either a probepoint or a debugger breakpoint | ||
550 | * at this address. In either case, no further | ||
551 | * handling of this interrupt is appropriate. | ||
552 | * Back up over the (now missing) int3 and run | ||
553 | * the original instruction. | ||
554 | */ | ||
555 | regs->ip = (unsigned long)addr; | ||
556 | return 1; | ||
557 | } | ||
558 | |||
559 | /* | 545 | /* |
560 | * We don't want to be preempted for the entire | 546 | * We don't want to be preempted for the entire |
561 | * duration of kprobe processing. We conditionally | 547 | * duration of kprobe processing. We conditionally |
@@ -587,6 +573,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
587 | setup_singlestep(p, regs, kcb, 0); | 573 | setup_singlestep(p, regs, kcb, 0); |
588 | return 1; | 574 | return 1; |
589 | } | 575 | } |
576 | } else if (*addr != BREAKPOINT_INSTRUCTION) { | ||
577 | /* | ||
578 | * The breakpoint instruction was removed right | ||
579 | * after we hit it. Another cpu has removed | ||
580 | * either a probepoint or a debugger breakpoint | ||
581 | * at this address. In either case, no further | ||
582 | * handling of this interrupt is appropriate. | ||
583 | * Back up over the (now missing) int3 and run | ||
584 | * the original instruction. | ||
585 | */ | ||
586 | regs->ip = (unsigned long)addr; | ||
587 | preempt_enable_no_resched(); | ||
588 | return 1; | ||
590 | } else if (kprobe_running()) { | 589 | } else if (kprobe_running()) { |
591 | p = __get_cpu_var(current_kprobe); | 590 | p = __get_cpu_var(current_kprobe); |
592 | if (p->break_handler && p->break_handler(p, regs)) { | 591 | if (p->break_handler && p->break_handler(p, regs)) { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index eccdb57094e3..cc6877535ef4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -548,11 +548,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
548 | * check OSVW bit for CPUs that are not affected | 548 | * check OSVW bit for CPUs that are not affected |
549 | * by erratum #400 | 549 | * by erratum #400 |
550 | */ | 550 | */ |
551 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); | 551 | if (cpu_has(c, X86_FEATURE_OSVW)) { |
552 | if (val >= 2) { | 552 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); |
553 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); | 553 | if (val >= 2) { |
554 | if (!(val & BIT(1))) | 554 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); |
555 | goto no_c1e_idle; | 555 | if (!(val & BIT(1))) |
556 | goto no_c1e_idle; | ||
557 | } | ||
556 | } | 558 | } |
557 | return 1; | 559 | return 1; |
558 | } | 560 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2ba58206812a..737361fcd503 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2067,7 +2067,7 @@ static int cpuid_interception(struct vcpu_svm *svm) | |||
2067 | static int iret_interception(struct vcpu_svm *svm) | 2067 | static int iret_interception(struct vcpu_svm *svm) |
2068 | { | 2068 | { |
2069 | ++svm->vcpu.stat.nmi_window_exits; | 2069 | ++svm->vcpu.stat.nmi_window_exits; |
2070 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | 2070 | svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); |
2071 | svm->vcpu.arch.hflags |= HF_IRET_MASK; | 2071 | svm->vcpu.arch.hflags |= HF_IRET_MASK; |
2072 | return 1; | 2072 | return 1; |
2073 | } | 2073 | } |
@@ -2479,7 +2479,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) | |||
2479 | 2479 | ||
2480 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; | 2480 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
2481 | vcpu->arch.hflags |= HF_NMI_MASK; | 2481 | vcpu->arch.hflags |= HF_NMI_MASK; |
2482 | svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); | 2482 | svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); |
2483 | ++vcpu->stat.nmi_injections; | 2483 | ++vcpu->stat.nmi_injections; |
2484 | } | 2484 | } |
2485 | 2485 | ||
@@ -2539,10 +2539,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | |||
2539 | 2539 | ||
2540 | if (masked) { | 2540 | if (masked) { |
2541 | svm->vcpu.arch.hflags |= HF_NMI_MASK; | 2541 | svm->vcpu.arch.hflags |= HF_NMI_MASK; |
2542 | svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); | 2542 | svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET); |
2543 | } else { | 2543 | } else { |
2544 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; | 2544 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; |
2545 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | 2545 | svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET); |
2546 | } | 2546 | } |
2547 | } | 2547 | } |
2548 | 2548 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 32022a8a5c3b..edca080407a5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2703,8 +2703,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | |||
2703 | return 0; | 2703 | return 0; |
2704 | 2704 | ||
2705 | return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | 2705 | return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
2706 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS | | 2706 | (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI)); |
2707 | GUEST_INTR_STATE_NMI)); | ||
2708 | } | 2707 | } |
2709 | 2708 | ||
2710 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) | 2709 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 73d854c36e39..dd9bc8fb81ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1713,6 +1713,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
1713 | if (copy_from_user(cpuid_entries, entries, | 1713 | if (copy_from_user(cpuid_entries, entries, |
1714 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | 1714 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) |
1715 | goto out_free; | 1715 | goto out_free; |
1716 | vcpu_load(vcpu); | ||
1716 | for (i = 0; i < cpuid->nent; i++) { | 1717 | for (i = 0; i < cpuid->nent; i++) { |
1717 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; | 1718 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; |
1718 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; | 1719 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; |
@@ -1730,6 +1731,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
1730 | r = 0; | 1731 | r = 0; |
1731 | kvm_apic_set_version(vcpu); | 1732 | kvm_apic_set_version(vcpu); |
1732 | kvm_x86_ops->cpuid_update(vcpu); | 1733 | kvm_x86_ops->cpuid_update(vcpu); |
1734 | vcpu_put(vcpu); | ||
1733 | 1735 | ||
1734 | out_free: | 1736 | out_free: |
1735 | vfree(cpuid_entries); | 1737 | vfree(cpuid_entries); |
@@ -1750,9 +1752,11 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, | |||
1750 | if (copy_from_user(&vcpu->arch.cpuid_entries, entries, | 1752 | if (copy_from_user(&vcpu->arch.cpuid_entries, entries, |
1751 | cpuid->nent * sizeof(struct kvm_cpuid_entry2))) | 1753 | cpuid->nent * sizeof(struct kvm_cpuid_entry2))) |
1752 | goto out; | 1754 | goto out; |
1755 | vcpu_load(vcpu); | ||
1753 | vcpu->arch.cpuid_nent = cpuid->nent; | 1756 | vcpu->arch.cpuid_nent = cpuid->nent; |
1754 | kvm_apic_set_version(vcpu); | 1757 | kvm_apic_set_version(vcpu); |
1755 | kvm_x86_ops->cpuid_update(vcpu); | 1758 | kvm_x86_ops->cpuid_update(vcpu); |
1759 | vcpu_put(vcpu); | ||
1756 | return 0; | 1760 | return 0; |
1757 | 1761 | ||
1758 | out: | 1762 | out: |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 28c68762648f..38512d0c4742 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -461,7 +461,8 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | |||
461 | * node, it must now point to the fake node ID. | 461 | * node, it must now point to the fake node ID. |
462 | */ | 462 | */ |
463 | for (j = 0; j < MAX_LOCAL_APIC; j++) | 463 | for (j = 0; j < MAX_LOCAL_APIC; j++) |
464 | if (apicid_to_node[j] == nid) | 464 | if (apicid_to_node[j] == nid && |
465 | fake_apicid_to_node[j] == NUMA_NO_NODE) | ||
465 | fake_apicid_to_node[j] = i; | 466 | fake_apicid_to_node[j] = i; |
466 | } | 467 | } |
467 | for (i = 0; i < num_nodes; i++) | 468 | for (i = 0; i < num_nodes; i++) |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2c505ee71014..b28d2f1253bb 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model; | |||
31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 31 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 32 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
33 | 33 | ||
34 | /* 0 == registered but off, 1 == registered and on */ | 34 | /* must be protected with get_online_cpus()/put_online_cpus(): */ |
35 | static int nmi_enabled = 0; | 35 | static int nmi_enabled; |
36 | static int ctr_running; | ||
36 | 37 | ||
37 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | 38 | struct op_counter_config counter_config[OP_MAX_COUNTER]; |
38 | 39 | ||
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
61 | { | 62 | { |
62 | struct die_args *args = (struct die_args *)data; | 63 | struct die_args *args = (struct die_args *)data; |
63 | int ret = NOTIFY_DONE; | 64 | int ret = NOTIFY_DONE; |
64 | int cpu = smp_processor_id(); | ||
65 | 65 | ||
66 | switch (val) { | 66 | switch (val) { |
67 | case DIE_NMI: | 67 | case DIE_NMI: |
68 | case DIE_NMI_IPI: | 68 | case DIE_NMI_IPI: |
69 | model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); | 69 | if (ctr_running) |
70 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); | ||
71 | else if (!nmi_enabled) | ||
72 | break; | ||
73 | else | ||
74 | model->stop(&__get_cpu_var(cpu_msrs)); | ||
70 | ret = NOTIFY_STOP; | 75 | ret = NOTIFY_STOP; |
71 | break; | 76 | break; |
72 | default: | 77 | default: |
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) | |||
95 | static void nmi_cpu_start(void *dummy) | 100 | static void nmi_cpu_start(void *dummy) |
96 | { | 101 | { |
97 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 102 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
98 | model->start(msrs); | 103 | if (!msrs->controls) |
104 | WARN_ON_ONCE(1); | ||
105 | else | ||
106 | model->start(msrs); | ||
99 | } | 107 | } |
100 | 108 | ||
101 | static int nmi_start(void) | 109 | static int nmi_start(void) |
102 | { | 110 | { |
111 | get_online_cpus(); | ||
103 | on_each_cpu(nmi_cpu_start, NULL, 1); | 112 | on_each_cpu(nmi_cpu_start, NULL, 1); |
113 | ctr_running = 1; | ||
114 | put_online_cpus(); | ||
104 | return 0; | 115 | return 0; |
105 | } | 116 | } |
106 | 117 | ||
107 | static void nmi_cpu_stop(void *dummy) | 118 | static void nmi_cpu_stop(void *dummy) |
108 | { | 119 | { |
109 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 120 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
110 | model->stop(msrs); | 121 | if (!msrs->controls) |
122 | WARN_ON_ONCE(1); | ||
123 | else | ||
124 | model->stop(msrs); | ||
111 | } | 125 | } |
112 | 126 | ||
113 | static void nmi_stop(void) | 127 | static void nmi_stop(void) |
114 | { | 128 | { |
129 | get_online_cpus(); | ||
115 | on_each_cpu(nmi_cpu_stop, NULL, 1); | 130 | on_each_cpu(nmi_cpu_stop, NULL, 1); |
131 | ctr_running = 0; | ||
132 | put_online_cpus(); | ||
116 | } | 133 | } |
117 | 134 | ||
118 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 135 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
@@ -252,7 +269,10 @@ static int nmi_switch_event(void) | |||
252 | if (nmi_multiplex_on() < 0) | 269 | if (nmi_multiplex_on() < 0) |
253 | return -EINVAL; /* not necessary */ | 270 | return -EINVAL; /* not necessary */ |
254 | 271 | ||
255 | on_each_cpu(nmi_cpu_switch, NULL, 1); | 272 | get_online_cpus(); |
273 | if (ctr_running) | ||
274 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
275 | put_online_cpus(); | ||
256 | 276 | ||
257 | return 0; | 277 | return 0; |
258 | } | 278 | } |
@@ -295,6 +315,7 @@ static void free_msrs(void) | |||
295 | kfree(per_cpu(cpu_msrs, i).controls); | 315 | kfree(per_cpu(cpu_msrs, i).controls); |
296 | per_cpu(cpu_msrs, i).controls = NULL; | 316 | per_cpu(cpu_msrs, i).controls = NULL; |
297 | } | 317 | } |
318 | nmi_shutdown_mux(); | ||
298 | } | 319 | } |
299 | 320 | ||
300 | static int allocate_msrs(void) | 321 | static int allocate_msrs(void) |
@@ -307,14 +328,21 @@ static int allocate_msrs(void) | |||
307 | per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, | 328 | per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, |
308 | GFP_KERNEL); | 329 | GFP_KERNEL); |
309 | if (!per_cpu(cpu_msrs, i).counters) | 330 | if (!per_cpu(cpu_msrs, i).counters) |
310 | return 0; | 331 | goto fail; |
311 | per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, | 332 | per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, |
312 | GFP_KERNEL); | 333 | GFP_KERNEL); |
313 | if (!per_cpu(cpu_msrs, i).controls) | 334 | if (!per_cpu(cpu_msrs, i).controls) |
314 | return 0; | 335 | goto fail; |
315 | } | 336 | } |
316 | 337 | ||
338 | if (!nmi_setup_mux()) | ||
339 | goto fail; | ||
340 | |||
317 | return 1; | 341 | return 1; |
342 | |||
343 | fail: | ||
344 | free_msrs(); | ||
345 | return 0; | ||
318 | } | 346 | } |
319 | 347 | ||
320 | static void nmi_cpu_setup(void *dummy) | 348 | static void nmi_cpu_setup(void *dummy) |
@@ -336,49 +364,6 @@ static struct notifier_block profile_exceptions_nb = { | |||
336 | .priority = 2 | 364 | .priority = 2 |
337 | }; | 365 | }; |
338 | 366 | ||
339 | static int nmi_setup(void) | ||
340 | { | ||
341 | int err = 0; | ||
342 | int cpu; | ||
343 | |||
344 | if (!allocate_msrs()) | ||
345 | err = -ENOMEM; | ||
346 | else if (!nmi_setup_mux()) | ||
347 | err = -ENOMEM; | ||
348 | else | ||
349 | err = register_die_notifier(&profile_exceptions_nb); | ||
350 | |||
351 | if (err) { | ||
352 | free_msrs(); | ||
353 | nmi_shutdown_mux(); | ||
354 | return err; | ||
355 | } | ||
356 | |||
357 | /* We need to serialize save and setup for HT because the subset | ||
358 | * of msrs are distinct for save and setup operations | ||
359 | */ | ||
360 | |||
361 | /* Assume saved/restored counters are the same on all CPUs */ | ||
362 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
363 | for_each_possible_cpu(cpu) { | ||
364 | if (!cpu) | ||
365 | continue; | ||
366 | |||
367 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
368 | per_cpu(cpu_msrs, 0).counters, | ||
369 | sizeof(struct op_msr) * model->num_counters); | ||
370 | |||
371 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
372 | per_cpu(cpu_msrs, 0).controls, | ||
373 | sizeof(struct op_msr) * model->num_controls); | ||
374 | |||
375 | mux_clone(cpu); | ||
376 | } | ||
377 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
378 | nmi_enabled = 1; | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 367 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
383 | { | 368 | { |
384 | struct op_msr *counters = msrs->counters; | 369 | struct op_msr *counters = msrs->counters; |
@@ -412,20 +397,24 @@ static void nmi_cpu_shutdown(void *dummy) | |||
412 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 397 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
413 | apic_write(APIC_LVTERR, v); | 398 | apic_write(APIC_LVTERR, v); |
414 | nmi_cpu_restore_registers(msrs); | 399 | nmi_cpu_restore_registers(msrs); |
400 | if (model->cpu_down) | ||
401 | model->cpu_down(); | ||
415 | } | 402 | } |
416 | 403 | ||
417 | static void nmi_shutdown(void) | 404 | static void nmi_cpu_up(void *dummy) |
418 | { | 405 | { |
419 | struct op_msrs *msrs; | 406 | if (nmi_enabled) |
407 | nmi_cpu_setup(dummy); | ||
408 | if (ctr_running) | ||
409 | nmi_cpu_start(dummy); | ||
410 | } | ||
420 | 411 | ||
421 | nmi_enabled = 0; | 412 | static void nmi_cpu_down(void *dummy) |
422 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 413 | { |
423 | unregister_die_notifier(&profile_exceptions_nb); | 414 | if (ctr_running) |
424 | nmi_shutdown_mux(); | 415 | nmi_cpu_stop(dummy); |
425 | msrs = &get_cpu_var(cpu_msrs); | 416 | if (nmi_enabled) |
426 | model->shutdown(msrs); | 417 | nmi_cpu_shutdown(dummy); |
427 | free_msrs(); | ||
428 | put_cpu_var(cpu_msrs); | ||
429 | } | 418 | } |
430 | 419 | ||
431 | static int nmi_create_files(struct super_block *sb, struct dentry *root) | 420 | static int nmi_create_files(struct super_block *sb, struct dentry *root) |
@@ -457,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
457 | return 0; | 446 | return 0; |
458 | } | 447 | } |
459 | 448 | ||
460 | #ifdef CONFIG_SMP | ||
461 | static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | 449 | static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, |
462 | void *data) | 450 | void *data) |
463 | { | 451 | { |
@@ -465,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | |||
465 | switch (action) { | 453 | switch (action) { |
466 | case CPU_DOWN_FAILED: | 454 | case CPU_DOWN_FAILED: |
467 | case CPU_ONLINE: | 455 | case CPU_ONLINE: |
468 | smp_call_function_single(cpu, nmi_cpu_start, NULL, 0); | 456 | smp_call_function_single(cpu, nmi_cpu_up, NULL, 0); |
469 | break; | 457 | break; |
470 | case CPU_DOWN_PREPARE: | 458 | case CPU_DOWN_PREPARE: |
471 | smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1); | 459 | smp_call_function_single(cpu, nmi_cpu_down, NULL, 1); |
472 | break; | 460 | break; |
473 | } | 461 | } |
474 | return NOTIFY_DONE; | 462 | return NOTIFY_DONE; |
@@ -477,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, | |||
477 | static struct notifier_block oprofile_cpu_nb = { | 465 | static struct notifier_block oprofile_cpu_nb = { |
478 | .notifier_call = oprofile_cpu_notifier | 466 | .notifier_call = oprofile_cpu_notifier |
479 | }; | 467 | }; |
480 | #endif | 468 | |
469 | static int nmi_setup(void) | ||
470 | { | ||
471 | int err = 0; | ||
472 | int cpu; | ||
473 | |||
474 | if (!allocate_msrs()) | ||
475 | return -ENOMEM; | ||
476 | |||
477 | /* We need to serialize save and setup for HT because the subset | ||
478 | * of msrs are distinct for save and setup operations | ||
479 | */ | ||
480 | |||
481 | /* Assume saved/restored counters are the same on all CPUs */ | ||
482 | err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
483 | if (err) | ||
484 | goto fail; | ||
485 | |||
486 | for_each_possible_cpu(cpu) { | ||
487 | if (!cpu) | ||
488 | continue; | ||
489 | |||
490 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
491 | per_cpu(cpu_msrs, 0).counters, | ||
492 | sizeof(struct op_msr) * model->num_counters); | ||
493 | |||
494 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
495 | per_cpu(cpu_msrs, 0).controls, | ||
496 | sizeof(struct op_msr) * model->num_controls); | ||
497 | |||
498 | mux_clone(cpu); | ||
499 | } | ||
500 | |||
501 | nmi_enabled = 0; | ||
502 | ctr_running = 0; | ||
503 | barrier(); | ||
504 | err = register_die_notifier(&profile_exceptions_nb); | ||
505 | if (err) | ||
506 | goto fail; | ||
507 | |||
508 | get_online_cpus(); | ||
509 | register_cpu_notifier(&oprofile_cpu_nb); | ||
510 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
511 | nmi_enabled = 1; | ||
512 | put_online_cpus(); | ||
513 | |||
514 | return 0; | ||
515 | fail: | ||
516 | free_msrs(); | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | static void nmi_shutdown(void) | ||
521 | { | ||
522 | struct op_msrs *msrs; | ||
523 | |||
524 | get_online_cpus(); | ||
525 | unregister_cpu_notifier(&oprofile_cpu_nb); | ||
526 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | ||
527 | nmi_enabled = 0; | ||
528 | ctr_running = 0; | ||
529 | put_online_cpus(); | ||
530 | barrier(); | ||
531 | unregister_die_notifier(&profile_exceptions_nb); | ||
532 | msrs = &get_cpu_var(cpu_msrs); | ||
533 | model->shutdown(msrs); | ||
534 | free_msrs(); | ||
535 | put_cpu_var(cpu_msrs); | ||
536 | } | ||
481 | 537 | ||
482 | #ifdef CONFIG_PM | 538 | #ifdef CONFIG_PM |
483 | 539 | ||
@@ -687,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
687 | return -ENODEV; | 743 | return -ENODEV; |
688 | } | 744 | } |
689 | 745 | ||
690 | #ifdef CONFIG_SMP | ||
691 | register_cpu_notifier(&oprofile_cpu_nb); | ||
692 | #endif | ||
693 | /* default values, can be overwritten by model */ | 746 | /* default values, can be overwritten by model */ |
694 | ops->create_files = nmi_create_files; | 747 | ops->create_files = nmi_create_files; |
695 | ops->setup = nmi_setup; | 748 | ops->setup = nmi_setup; |
@@ -716,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
716 | 769 | ||
717 | void op_nmi_exit(void) | 770 | void op_nmi_exit(void) |
718 | { | 771 | { |
719 | if (using_nmi) { | 772 | if (using_nmi) |
720 | exit_sysfs(); | 773 | exit_sysfs(); |
721 | #ifdef CONFIG_SMP | ||
722 | unregister_cpu_notifier(&oprofile_cpu_nb); | ||
723 | #endif | ||
724 | } | ||
725 | if (model->exit) | ||
726 | model->exit(); | ||
727 | } | 774 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 090cbbec7dbd..b67a6b5aa8d4 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -30,13 +30,10 @@ | |||
30 | #include "op_counter.h" | 30 | #include "op_counter.h" |
31 | 31 | ||
32 | #define NUM_COUNTERS 4 | 32 | #define NUM_COUNTERS 4 |
33 | #define NUM_CONTROLS 4 | ||
34 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 33 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
35 | #define NUM_VIRT_COUNTERS 32 | 34 | #define NUM_VIRT_COUNTERS 32 |
36 | #define NUM_VIRT_CONTROLS 32 | ||
37 | #else | 35 | #else |
38 | #define NUM_VIRT_COUNTERS NUM_COUNTERS | 36 | #define NUM_VIRT_COUNTERS NUM_COUNTERS |
39 | #define NUM_VIRT_CONTROLS NUM_CONTROLS | ||
40 | #endif | 37 | #endif |
41 | 38 | ||
42 | #define OP_EVENT_MASK 0x0FFF | 39 | #define OP_EVENT_MASK 0x0FFF |
@@ -105,102 +102,6 @@ static u32 get_ibs_caps(void) | |||
105 | return ibs_caps; | 102 | return ibs_caps; |
106 | } | 103 | } |
107 | 104 | ||
108 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
109 | |||
110 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
111 | struct op_msrs const * const msrs) | ||
112 | { | ||
113 | u64 val; | ||
114 | int i; | ||
115 | |||
116 | /* enable active counters */ | ||
117 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
118 | int virt = op_x86_phys_to_virt(i); | ||
119 | if (!reset_value[virt]) | ||
120 | continue; | ||
121 | rdmsrl(msrs->controls[i].addr, val); | ||
122 | val &= model->reserved; | ||
123 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
124 | wrmsrl(msrs->controls[i].addr, val); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | #endif | ||
129 | |||
130 | /* functions for op_amd_spec */ | ||
131 | |||
132 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | ||
133 | { | ||
134 | int i; | ||
135 | |||
136 | for (i = 0; i < NUM_COUNTERS; i++) { | ||
137 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
138 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | ||
139 | } | ||
140 | |||
141 | for (i = 0; i < NUM_CONTROLS; i++) { | ||
142 | if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) | ||
143 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | ||
148 | struct op_msrs const * const msrs) | ||
149 | { | ||
150 | u64 val; | ||
151 | int i; | ||
152 | |||
153 | /* setup reset_value */ | ||
154 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
155 | if (counter_config[i].enabled | ||
156 | && msrs->counters[op_x86_virt_to_phys(i)].addr) | ||
157 | reset_value[i] = counter_config[i].count; | ||
158 | else | ||
159 | reset_value[i] = 0; | ||
160 | } | ||
161 | |||
162 | /* clear all counters */ | ||
163 | for (i = 0; i < NUM_CONTROLS; ++i) { | ||
164 | if (unlikely(!msrs->controls[i].addr)) { | ||
165 | if (counter_config[i].enabled && !smp_processor_id()) | ||
166 | /* | ||
167 | * counter is reserved, this is on all | ||
168 | * cpus, so report only for cpu #0 | ||
169 | */ | ||
170 | op_x86_warn_reserved(i); | ||
171 | continue; | ||
172 | } | ||
173 | rdmsrl(msrs->controls[i].addr, val); | ||
174 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | ||
175 | op_x86_warn_in_use(i); | ||
176 | val &= model->reserved; | ||
177 | wrmsrl(msrs->controls[i].addr, val); | ||
178 | } | ||
179 | |||
180 | /* avoid a false detection of ctr overflows in NMI handler */ | ||
181 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
182 | if (unlikely(!msrs->counters[i].addr)) | ||
183 | continue; | ||
184 | wrmsrl(msrs->counters[i].addr, -1LL); | ||
185 | } | ||
186 | |||
187 | /* enable active counters */ | ||
188 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
189 | int virt = op_x86_phys_to_virt(i); | ||
190 | if (!reset_value[virt]) | ||
191 | continue; | ||
192 | |||
193 | /* setup counter registers */ | ||
194 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
195 | |||
196 | /* setup control registers */ | ||
197 | rdmsrl(msrs->controls[i].addr, val); | ||
198 | val &= model->reserved; | ||
199 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
200 | wrmsrl(msrs->controls[i].addr, val); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | /* | 105 | /* |
205 | * 16-bit Linear Feedback Shift Register (LFSR) | 106 | * 16-bit Linear Feedback Shift Register (LFSR) |
206 | * | 107 | * |
@@ -365,6 +266,125 @@ static void op_amd_stop_ibs(void) | |||
365 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 266 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
366 | } | 267 | } |
367 | 268 | ||
269 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | ||
270 | |||
271 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | ||
272 | struct op_msrs const * const msrs) | ||
273 | { | ||
274 | u64 val; | ||
275 | int i; | ||
276 | |||
277 | /* enable active counters */ | ||
278 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
279 | int virt = op_x86_phys_to_virt(i); | ||
280 | if (!reset_value[virt]) | ||
281 | continue; | ||
282 | rdmsrl(msrs->controls[i].addr, val); | ||
283 | val &= model->reserved; | ||
284 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
285 | wrmsrl(msrs->controls[i].addr, val); | ||
286 | } | ||
287 | } | ||
288 | |||
289 | #endif | ||
290 | |||
291 | /* functions for op_amd_spec */ | ||
292 | |||
293 | static void op_amd_shutdown(struct op_msrs const * const msrs) | ||
294 | { | ||
295 | int i; | ||
296 | |||
297 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
298 | if (!msrs->counters[i].addr) | ||
299 | continue; | ||
300 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
301 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static int op_amd_fill_in_addresses(struct op_msrs * const msrs) | ||
306 | { | ||
307 | int i; | ||
308 | |||
309 | for (i = 0; i < NUM_COUNTERS; i++) { | ||
310 | if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | ||
311 | goto fail; | ||
312 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { | ||
313 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
314 | goto fail; | ||
315 | } | ||
316 | /* both registers must be reserved */ | ||
317 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | ||
318 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | ||
319 | continue; | ||
320 | fail: | ||
321 | if (!counter_config[i].enabled) | ||
322 | continue; | ||
323 | op_x86_warn_reserved(i); | ||
324 | op_amd_shutdown(msrs); | ||
325 | return -EBUSY; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | ||
332 | struct op_msrs const * const msrs) | ||
333 | { | ||
334 | u64 val; | ||
335 | int i; | ||
336 | |||
337 | /* setup reset_value */ | ||
338 | for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { | ||
339 | if (counter_config[i].enabled | ||
340 | && msrs->counters[op_x86_virt_to_phys(i)].addr) | ||
341 | reset_value[i] = counter_config[i].count; | ||
342 | else | ||
343 | reset_value[i] = 0; | ||
344 | } | ||
345 | |||
346 | /* clear all counters */ | ||
347 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
348 | if (!msrs->controls[i].addr) | ||
349 | continue; | ||
350 | rdmsrl(msrs->controls[i].addr, val); | ||
351 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | ||
352 | op_x86_warn_in_use(i); | ||
353 | val &= model->reserved; | ||
354 | wrmsrl(msrs->controls[i].addr, val); | ||
355 | /* | ||
356 | * avoid a false detection of ctr overflows in NMI | ||
357 | * handler | ||
358 | */ | ||
359 | wrmsrl(msrs->counters[i].addr, -1LL); | ||
360 | } | ||
361 | |||
362 | /* enable active counters */ | ||
363 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
364 | int virt = op_x86_phys_to_virt(i); | ||
365 | if (!reset_value[virt]) | ||
366 | continue; | ||
367 | |||
368 | /* setup counter registers */ | ||
369 | wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]); | ||
370 | |||
371 | /* setup control registers */ | ||
372 | rdmsrl(msrs->controls[i].addr, val); | ||
373 | val &= model->reserved; | ||
374 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | ||
375 | wrmsrl(msrs->controls[i].addr, val); | ||
376 | } | ||
377 | |||
378 | if (ibs_caps) | ||
379 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | ||
380 | } | ||
381 | |||
382 | static void op_amd_cpu_shutdown(void) | ||
383 | { | ||
384 | if (ibs_caps) | ||
385 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
386 | } | ||
387 | |||
368 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 388 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
369 | struct op_msrs const * const msrs) | 389 | struct op_msrs const * const msrs) |
370 | { | 390 | { |
@@ -425,42 +445,16 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
425 | op_amd_stop_ibs(); | 445 | op_amd_stop_ibs(); |
426 | } | 446 | } |
427 | 447 | ||
428 | static void op_amd_shutdown(struct op_msrs const * const msrs) | 448 | static int __init_ibs_nmi(void) |
429 | { | ||
430 | int i; | ||
431 | |||
432 | for (i = 0; i < NUM_COUNTERS; ++i) { | ||
433 | if (msrs->counters[i].addr) | ||
434 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
435 | } | ||
436 | for (i = 0; i < NUM_CONTROLS; ++i) { | ||
437 | if (msrs->controls[i].addr) | ||
438 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | static u8 ibs_eilvt_off; | ||
443 | |||
444 | static inline void apic_init_ibs_nmi_per_cpu(void *arg) | ||
445 | { | ||
446 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); | ||
447 | } | ||
448 | |||
449 | static inline void apic_clear_ibs_nmi_per_cpu(void *arg) | ||
450 | { | ||
451 | setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); | ||
452 | } | ||
453 | |||
454 | static int init_ibs_nmi(void) | ||
455 | { | 449 | { |
456 | #define IBSCTL_LVTOFFSETVAL (1 << 8) | 450 | #define IBSCTL_LVTOFFSETVAL (1 << 8) |
457 | #define IBSCTL 0x1cc | 451 | #define IBSCTL 0x1cc |
458 | struct pci_dev *cpu_cfg; | 452 | struct pci_dev *cpu_cfg; |
459 | int nodes; | 453 | int nodes; |
460 | u32 value = 0; | 454 | u32 value = 0; |
455 | u8 ibs_eilvt_off; | ||
461 | 456 | ||
462 | /* per CPU setup */ | 457 | ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); |
463 | on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1); | ||
464 | 458 | ||
465 | nodes = 0; | 459 | nodes = 0; |
466 | cpu_cfg = NULL; | 460 | cpu_cfg = NULL; |
@@ -490,22 +484,15 @@ static int init_ibs_nmi(void) | |||
490 | return 0; | 484 | return 0; |
491 | } | 485 | } |
492 | 486 | ||
493 | /* uninitialize the APIC for the IBS interrupts if needed */ | ||
494 | static void clear_ibs_nmi(void) | ||
495 | { | ||
496 | if (ibs_caps) | ||
497 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); | ||
498 | } | ||
499 | |||
500 | /* initialize the APIC for the IBS interrupts if available */ | 487 | /* initialize the APIC for the IBS interrupts if available */ |
501 | static void ibs_init(void) | 488 | static void init_ibs(void) |
502 | { | 489 | { |
503 | ibs_caps = get_ibs_caps(); | 490 | ibs_caps = get_ibs_caps(); |
504 | 491 | ||
505 | if (!ibs_caps) | 492 | if (!ibs_caps) |
506 | return; | 493 | return; |
507 | 494 | ||
508 | if (init_ibs_nmi()) { | 495 | if (__init_ibs_nmi()) { |
509 | ibs_caps = 0; | 496 | ibs_caps = 0; |
510 | return; | 497 | return; |
511 | } | 498 | } |
@@ -514,14 +501,6 @@ static void ibs_init(void) | |||
514 | (unsigned)ibs_caps); | 501 | (unsigned)ibs_caps); |
515 | } | 502 | } |
516 | 503 | ||
517 | static void ibs_exit(void) | ||
518 | { | ||
519 | if (!ibs_caps) | ||
520 | return; | ||
521 | |||
522 | clear_ibs_nmi(); | ||
523 | } | ||
524 | |||
525 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); | 504 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); |
526 | 505 | ||
527 | static int setup_ibs_files(struct super_block *sb, struct dentry *root) | 506 | static int setup_ibs_files(struct super_block *sb, struct dentry *root) |
@@ -570,27 +549,22 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) | |||
570 | 549 | ||
571 | static int op_amd_init(struct oprofile_operations *ops) | 550 | static int op_amd_init(struct oprofile_operations *ops) |
572 | { | 551 | { |
573 | ibs_init(); | 552 | init_ibs(); |
574 | create_arch_files = ops->create_files; | 553 | create_arch_files = ops->create_files; |
575 | ops->create_files = setup_ibs_files; | 554 | ops->create_files = setup_ibs_files; |
576 | return 0; | 555 | return 0; |
577 | } | 556 | } |
578 | 557 | ||
579 | static void op_amd_exit(void) | ||
580 | { | ||
581 | ibs_exit(); | ||
582 | } | ||
583 | |||
584 | struct op_x86_model_spec op_amd_spec = { | 558 | struct op_x86_model_spec op_amd_spec = { |
585 | .num_counters = NUM_COUNTERS, | 559 | .num_counters = NUM_COUNTERS, |
586 | .num_controls = NUM_CONTROLS, | 560 | .num_controls = NUM_COUNTERS, |
587 | .num_virt_counters = NUM_VIRT_COUNTERS, | 561 | .num_virt_counters = NUM_VIRT_COUNTERS, |
588 | .reserved = MSR_AMD_EVENTSEL_RESERVED, | 562 | .reserved = MSR_AMD_EVENTSEL_RESERVED, |
589 | .event_mask = OP_EVENT_MASK, | 563 | .event_mask = OP_EVENT_MASK, |
590 | .init = op_amd_init, | 564 | .init = op_amd_init, |
591 | .exit = op_amd_exit, | ||
592 | .fill_in_addresses = &op_amd_fill_in_addresses, | 565 | .fill_in_addresses = &op_amd_fill_in_addresses, |
593 | .setup_ctrs = &op_amd_setup_ctrs, | 566 | .setup_ctrs = &op_amd_setup_ctrs, |
567 | .cpu_down = &op_amd_cpu_shutdown, | ||
594 | .check_ctrs = &op_amd_check_ctrs, | 568 | .check_ctrs = &op_amd_check_ctrs, |
595 | .start = &op_amd_start, | 569 | .start = &op_amd_start, |
596 | .stop = &op_amd_stop, | 570 | .stop = &op_amd_stop, |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index e6a160a4684a..182558dd5515 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -385,8 +385,26 @@ static unsigned int get_stagger(void) | |||
385 | 385 | ||
386 | static unsigned long reset_value[NUM_COUNTERS_NON_HT]; | 386 | static unsigned long reset_value[NUM_COUNTERS_NON_HT]; |
387 | 387 | ||
388 | static void p4_shutdown(struct op_msrs const * const msrs) | ||
389 | { | ||
390 | int i; | ||
388 | 391 | ||
389 | static void p4_fill_in_addresses(struct op_msrs * const msrs) | 392 | for (i = 0; i < num_counters; ++i) { |
393 | if (msrs->counters[i].addr) | ||
394 | release_perfctr_nmi(msrs->counters[i].addr); | ||
395 | } | ||
396 | /* | ||
397 | * some of the control registers are specially reserved in | ||
398 | * conjunction with the counter registers (hence the starting offset). | ||
399 | * This saves a few bits. | ||
400 | */ | ||
401 | for (i = num_counters; i < num_controls; ++i) { | ||
402 | if (msrs->controls[i].addr) | ||
403 | release_evntsel_nmi(msrs->controls[i].addr); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static int p4_fill_in_addresses(struct op_msrs * const msrs) | ||
390 | { | 408 | { |
391 | unsigned int i; | 409 | unsigned int i; |
392 | unsigned int addr, cccraddr, stag; | 410 | unsigned int addr, cccraddr, stag; |
@@ -468,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) | |||
468 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 486 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
469 | } | 487 | } |
470 | } | 488 | } |
489 | |||
490 | for (i = 0; i < num_counters; ++i) { | ||
491 | if (!counter_config[i].enabled) | ||
492 | continue; | ||
493 | if (msrs->controls[i].addr) | ||
494 | continue; | ||
495 | op_x86_warn_reserved(i); | ||
496 | p4_shutdown(msrs); | ||
497 | return -EBUSY; | ||
498 | } | ||
499 | |||
500 | return 0; | ||
471 | } | 501 | } |
472 | 502 | ||
473 | 503 | ||
@@ -668,26 +698,6 @@ static void p4_stop(struct op_msrs const * const msrs) | |||
668 | } | 698 | } |
669 | } | 699 | } |
670 | 700 | ||
671 | static void p4_shutdown(struct op_msrs const * const msrs) | ||
672 | { | ||
673 | int i; | ||
674 | |||
675 | for (i = 0; i < num_counters; ++i) { | ||
676 | if (msrs->counters[i].addr) | ||
677 | release_perfctr_nmi(msrs->counters[i].addr); | ||
678 | } | ||
679 | /* | ||
680 | * some of the control registers are specially reserved in | ||
681 | * conjunction with the counter registers (hence the starting offset). | ||
682 | * This saves a few bits. | ||
683 | */ | ||
684 | for (i = num_counters; i < num_controls; ++i) { | ||
685 | if (msrs->controls[i].addr) | ||
686 | release_evntsel_nmi(msrs->controls[i].addr); | ||
687 | } | ||
688 | } | ||
689 | |||
690 | |||
691 | #ifdef CONFIG_SMP | 701 | #ifdef CONFIG_SMP |
692 | struct op_x86_model_spec op_p4_ht2_spec = { | 702 | struct op_x86_model_spec op_p4_ht2_spec = { |
693 | .num_counters = NUM_COUNTERS_HT2, | 703 | .num_counters = NUM_COUNTERS_HT2, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index c8abc4d1bf35..d769cda54082 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -30,19 +30,46 @@ static int counter_width = 32; | |||
30 | 30 | ||
31 | static u64 *reset_value; | 31 | static u64 *reset_value; |
32 | 32 | ||
33 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 33 | static void ppro_shutdown(struct op_msrs const * const msrs) |
34 | { | 34 | { |
35 | int i; | 35 | int i; |
36 | 36 | ||
37 | for (i = 0; i < num_counters; i++) { | 37 | for (i = 0; i < num_counters; ++i) { |
38 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | 38 | if (!msrs->counters[i].addr) |
39 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | 39 | continue; |
40 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
41 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | ||
42 | } | ||
43 | if (reset_value) { | ||
44 | kfree(reset_value); | ||
45 | reset_value = NULL; | ||
40 | } | 46 | } |
47 | } | ||
48 | |||
49 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) | ||
50 | { | ||
51 | int i; | ||
41 | 52 | ||
42 | for (i = 0; i < num_counters; i++) { | 53 | for (i = 0; i < num_counters; i++) { |
43 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) | 54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
44 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | 55 | goto fail; |
56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { | ||
57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
58 | goto fail; | ||
59 | } | ||
60 | /* both registers must be reserved */ | ||
61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | ||
62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | ||
63 | continue; | ||
64 | fail: | ||
65 | if (!counter_config[i].enabled) | ||
66 | continue; | ||
67 | op_x86_warn_reserved(i); | ||
68 | ppro_shutdown(msrs); | ||
69 | return -EBUSY; | ||
45 | } | 70 | } |
71 | |||
72 | return 0; | ||
46 | } | 73 | } |
47 | 74 | ||
48 | 75 | ||
@@ -78,26 +105,17 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
78 | 105 | ||
79 | /* clear all counters */ | 106 | /* clear all counters */ |
80 | for (i = 0; i < num_counters; ++i) { | 107 | for (i = 0; i < num_counters; ++i) { |
81 | if (unlikely(!msrs->controls[i].addr)) { | 108 | if (!msrs->controls[i].addr) |
82 | if (counter_config[i].enabled && !smp_processor_id()) | ||
83 | /* | ||
84 | * counter is reserved, this is on all | ||
85 | * cpus, so report only for cpu #0 | ||
86 | */ | ||
87 | op_x86_warn_reserved(i); | ||
88 | continue; | 109 | continue; |
89 | } | ||
90 | rdmsrl(msrs->controls[i].addr, val); | 110 | rdmsrl(msrs->controls[i].addr, val); |
91 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | 111 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
92 | op_x86_warn_in_use(i); | 112 | op_x86_warn_in_use(i); |
93 | val &= model->reserved; | 113 | val &= model->reserved; |
94 | wrmsrl(msrs->controls[i].addr, val); | 114 | wrmsrl(msrs->controls[i].addr, val); |
95 | } | 115 | /* |
96 | 116 | * avoid a false detection of ctr overflows in NMI * | |
97 | /* avoid a false detection of ctr overflows in NMI handler */ | 117 | * handler |
98 | for (i = 0; i < num_counters; ++i) { | 118 | */ |
99 | if (unlikely(!msrs->counters[i].addr)) | ||
100 | continue; | ||
101 | wrmsrl(msrs->counters[i].addr, -1LL); | 119 | wrmsrl(msrs->counters[i].addr, -1LL); |
102 | } | 120 | } |
103 | 121 | ||
@@ -189,25 +207,6 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
189 | } | 207 | } |
190 | } | 208 | } |
191 | 209 | ||
192 | static void ppro_shutdown(struct op_msrs const * const msrs) | ||
193 | { | ||
194 | int i; | ||
195 | |||
196 | for (i = 0; i < num_counters; ++i) { | ||
197 | if (msrs->counters[i].addr) | ||
198 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | ||
199 | } | ||
200 | for (i = 0; i < num_counters; ++i) { | ||
201 | if (msrs->controls[i].addr) | ||
202 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | ||
203 | } | ||
204 | if (reset_value) { | ||
205 | kfree(reset_value); | ||
206 | reset_value = NULL; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | |||
211 | struct op_x86_model_spec op_ppro_spec = { | 210 | struct op_x86_model_spec op_ppro_spec = { |
212 | .num_counters = 2, | 211 | .num_counters = 2, |
213 | .num_controls = 2, | 212 | .num_controls = 2, |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index ff82a755edd4..89017fa1fd63 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -40,10 +40,10 @@ struct op_x86_model_spec { | |||
40 | u64 reserved; | 40 | u64 reserved; |
41 | u16 event_mask; | 41 | u16 event_mask; |
42 | int (*init)(struct oprofile_operations *ops); | 42 | int (*init)(struct oprofile_operations *ops); |
43 | void (*exit)(void); | 43 | int (*fill_in_addresses)(struct op_msrs * const msrs); |
44 | void (*fill_in_addresses)(struct op_msrs * const msrs); | ||
45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, | 44 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
46 | struct op_msrs const * const msrs); | 45 | struct op_msrs const * const msrs); |
46 | void (*cpu_down)(void); | ||
47 | int (*check_ctrs)(struct pt_regs * const regs, | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | 48 | struct op_msrs const * const msrs); |
49 | void (*start)(struct op_msrs const * const msrs); | 49 | void (*start)(struct op_msrs const * const msrs); |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index 8bf2fcb88d04..1cdc02cf8fa4 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c | |||
@@ -247,6 +247,10 @@ static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev) | |||
247 | u32 size; | 247 | u32 size; |
248 | int i; | 248 | int i; |
249 | 249 | ||
250 | /* Must have extended configuration space */ | ||
251 | if (dev->cfg_size < PCIE_CAP_OFFSET + 4) | ||
252 | return; | ||
253 | |||
250 | /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */ | 254 | /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */ |
251 | offset = fixed_bar_cap(dev->bus, dev->devfn); | 255 | offset = fixed_bar_cap(dev->bus, dev->devfn); |
252 | if (!offset || PCI_DEVFN(2, 0) == dev->devfn || | 256 | if (!offset || PCI_DEVFN(2, 0) == dev->devfn || |
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 22d6dde42619..a96a0619d0b7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h | |||
@@ -46,7 +46,7 @@ | |||
46 | * | 46 | * |
47 | * Atomically reads the value of @v. | 47 | * Atomically reads the value of @v. |
48 | */ | 48 | */ |
49 | #define atomic_read(v) ((v)->counter) | 49 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
50 | 50 | ||
51 | /** | 51 | /** |
52 | * atomic_set - set atomic variable | 52 | * atomic_set - set atomic variable |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5fe03def34b2..2cc682b860ea 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -286,16 +286,16 @@ done: | |||
286 | static struct cgroup_subsys_state * | 286 | static struct cgroup_subsys_state * |
287 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | 287 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) |
288 | { | 288 | { |
289 | struct blkio_cgroup *blkcg, *parent_blkcg; | 289 | struct blkio_cgroup *blkcg; |
290 | struct cgroup *parent = cgroup->parent; | ||
290 | 291 | ||
291 | if (!cgroup->parent) { | 292 | if (!parent) { |
292 | blkcg = &blkio_root_cgroup; | 293 | blkcg = &blkio_root_cgroup; |
293 | goto done; | 294 | goto done; |
294 | } | 295 | } |
295 | 296 | ||
296 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | 297 | /* Currently we do not support hierarchy deeper than two level (0,1) */ |
297 | parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); | 298 | if (parent != cgroup->top_cgroup) |
298 | if (css_depth(&parent_blkcg->css) > 0) | ||
299 | return ERR_PTR(-EINVAL); | 299 | return ERR_PTR(-EINVAL); |
300 | 300 | ||
301 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | 301 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 838834be115b..5f127cfb2e92 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -3694,8 +3694,10 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3694 | * to make sure that cfq_put_cfqg() does not try to kfree root group | 3694 | * to make sure that cfq_put_cfqg() does not try to kfree root group |
3695 | */ | 3695 | */ |
3696 | atomic_set(&cfqg->ref, 1); | 3696 | atomic_set(&cfqg->ref, 1); |
3697 | rcu_read_lock(); | ||
3697 | blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, | 3698 | blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, |
3698 | 0); | 3699 | 0); |
3700 | rcu_read_unlock(); | ||
3699 | #endif | 3701 | #endif |
3700 | /* | 3702 | /* |
3701 | * Not strictly needed (since RB_ROOT just clears the node and we | 3703 | * Not strictly needed (since RB_ROOT just clears the node and we |
diff --git a/drivers/Makefile b/drivers/Makefile index 34f1e1064dbc..f42a03029b7c 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/ | |||
17 | obj-$(CONFIG_PNP) += pnp/ | 17 | obj-$(CONFIG_PNP) += pnp/ |
18 | obj-$(CONFIG_ARM_AMBA) += amba/ | 18 | obj-$(CONFIG_ARM_AMBA) += amba/ |
19 | 19 | ||
20 | obj-$(CONFIG_VIRTIO) += virtio/ | ||
20 | obj-$(CONFIG_XEN) += xen/ | 21 | obj-$(CONFIG_XEN) += xen/ |
21 | 22 | ||
22 | # regulators early, since some subsystems rely on them to initialize | 23 | # regulators early, since some subsystems rely on them to initialize |
@@ -108,7 +109,6 @@ obj-$(CONFIG_PPC_PS3) += ps3/ | |||
108 | obj-$(CONFIG_OF) += of/ | 109 | obj-$(CONFIG_OF) += of/ |
109 | obj-$(CONFIG_SSB) += ssb/ | 110 | obj-$(CONFIG_SSB) += ssb/ |
110 | obj-$(CONFIG_VHOST_NET) += vhost/ | 111 | obj-$(CONFIG_VHOST_NET) += vhost/ |
111 | obj-$(CONFIG_VIRTIO) += virtio/ | ||
112 | obj-$(CONFIG_VLYNQ) += vlynq/ | 112 | obj-$(CONFIG_VLYNQ) += vlynq/ |
113 | obj-$(CONFIG_STAGING) += staging/ | 113 | obj-$(CONFIG_STAGING) += staging/ |
114 | obj-y += platform/ | 114 | obj-y += platform/ |
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 19dacfd43163..62122134693b 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <acpi/acpi_bus.h> | 31 | #include <acpi/acpi_bus.h> |
32 | #include <acpi/acpi_drivers.h> | 32 | #include <acpi/acpi_drivers.h> |
33 | 33 | ||
34 | #define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" | 34 | #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" |
35 | #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" | 35 | #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" |
36 | #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 | 36 | #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 |
37 | static DEFINE_MUTEX(isolated_cpus_lock); | 37 | static DEFINE_MUTEX(isolated_cpus_lock); |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 37132dc2da03..743576bf1bd7 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -527,7 +527,7 @@ int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, | |||
527 | if (!event_is_open) | 527 | if (!event_is_open) |
528 | return 0; | 528 | return 0; |
529 | 529 | ||
530 | event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); | 530 | event = kzalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC); |
531 | if (!event) | 531 | if (!event) |
532 | return -ENOMEM; | 532 | return -ENOMEM; |
533 | 533 | ||
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c index 4bb18c980ac6..1c527a192872 100644 --- a/drivers/acpi/hest.c +++ b/drivers/acpi/hest.c | |||
@@ -123,6 +123,10 @@ int acpi_hest_firmware_first_pci(struct pci_dev *pci) | |||
123 | { | 123 | { |
124 | acpi_status status = AE_NOT_FOUND; | 124 | acpi_status status = AE_NOT_FOUND; |
125 | struct acpi_table_header *hest = NULL; | 125 | struct acpi_table_header *hest = NULL; |
126 | |||
127 | if (acpi_disabled) | ||
128 | return 0; | ||
129 | |||
126 | status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); | 130 | status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); |
127 | 131 | ||
128 | if (ACPI_SUCCESS(status)) { | 132 | if (ACPI_SUCCESS(status)) { |
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c index e8c32a49f14e..66f67293341e 100644 --- a/drivers/acpi/power_meter.c +++ b/drivers/acpi/power_meter.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #define ACPI_POWER_METER_NAME "power_meter" | 35 | #define ACPI_POWER_METER_NAME "power_meter" |
36 | ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); | 36 | ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); |
37 | #define ACPI_POWER_METER_DEVICE_NAME "Power Meter" | 37 | #define ACPI_POWER_METER_DEVICE_NAME "Power Meter" |
38 | #define ACPI_POWER_METER_CLASS "power_meter_resource" | 38 | #define ACPI_POWER_METER_CLASS "pwr_meter_resource" |
39 | 39 | ||
40 | #define NUM_SENSORS 17 | 40 | #define NUM_SENSORS 17 |
41 | 41 | ||
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 36704b887ccf..f8be23b6c129 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | #define PREFIX "ACPI: " | 19 | #define PREFIX "ACPI: " |
20 | 20 | ||
21 | #define ACPI_SMB_HC_CLASS "smbus_host_controller" | 21 | #define ACPI_SMB_HC_CLASS "smbus_host_ctl" |
22 | #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" | 22 | #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC" |
23 | 23 | ||
24 | struct acpi_smb_hc { | 24 | struct acpi_smb_hc { |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index f74834a544fd..baa76bbf244a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -450,6 +450,38 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
450 | }, | 450 | }, |
451 | }, | 451 | }, |
452 | { | 452 | { |
453 | .callback = init_set_sci_en_on_resume, | ||
454 | .ident = "Lenovo ThinkPad T410", | ||
455 | .matches = { | ||
456 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
457 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), | ||
458 | }, | ||
459 | }, | ||
460 | { | ||
461 | .callback = init_set_sci_en_on_resume, | ||
462 | .ident = "Lenovo ThinkPad T510", | ||
463 | .matches = { | ||
464 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
465 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), | ||
466 | }, | ||
467 | }, | ||
468 | { | ||
469 | .callback = init_set_sci_en_on_resume, | ||
470 | .ident = "Lenovo ThinkPad W510", | ||
471 | .matches = { | ||
472 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
473 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), | ||
474 | }, | ||
475 | }, | ||
476 | { | ||
477 | .callback = init_set_sci_en_on_resume, | ||
478 | .ident = "Lenovo ThinkPad X201[s]", | ||
479 | .matches = { | ||
480 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
481 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), | ||
482 | }, | ||
483 | }, | ||
484 | { | ||
453 | .callback = init_old_suspend_ordering, | 485 | .callback = init_old_suspend_ordering, |
454 | .ident = "Panasonic CF51-2L", | 486 | .ident = "Panasonic CF51-2L", |
455 | .matches = { | 487 | .matches = { |
@@ -458,6 +490,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
458 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), | 490 | DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), |
459 | }, | 491 | }, |
460 | }, | 492 | }, |
493 | { | ||
494 | .callback = init_set_sci_en_on_resume, | ||
495 | .ident = "Dell Studio 1558", | ||
496 | .matches = { | ||
497 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
498 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"), | ||
499 | }, | ||
500 | }, | ||
501 | { | ||
502 | .callback = init_set_sci_en_on_resume, | ||
503 | .ident = "Dell Studio 1557", | ||
504 | .matches = { | ||
505 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
506 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"), | ||
507 | }, | ||
508 | }, | ||
509 | { | ||
510 | .callback = init_set_sci_en_on_resume, | ||
511 | .ident = "Dell Studio 1555", | ||
512 | .matches = { | ||
513 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
514 | DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"), | ||
515 | }, | ||
516 | }, | ||
461 | {}, | 517 | {}, |
462 | }; | 518 | }; |
463 | #endif /* CONFIG_SUSPEND */ | 519 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 8ad4ffea6920..6e6b6a11b3ce 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c | |||
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | |||
80 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(iommu_detach_device); | 81 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
82 | 82 | ||
83 | int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | ||
84 | phys_addr_t paddr, size_t size, int prot) | ||
85 | { | ||
86 | return iommu_ops->map(domain, iova, paddr, size, prot); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(iommu_map_range); | ||
89 | |||
90 | void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | ||
91 | size_t size) | ||
92 | { | ||
93 | iommu_ops->unmap(domain, iova, size); | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(iommu_unmap_range); | ||
96 | |||
97 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 83 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
98 | unsigned long iova) | 84 | unsigned long iova) |
99 | { | 85 | { |
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain, | |||
107 | return iommu_ops->domain_has_cap(domain, cap); | 93 | return iommu_ops->domain_has_cap(domain, cap); |
108 | } | 94 | } |
109 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 95 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
96 | |||
97 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | ||
98 | phys_addr_t paddr, int gfp_order, int prot) | ||
99 | { | ||
100 | unsigned long invalid_mask; | ||
101 | size_t size; | ||
102 | |||
103 | size = 0x1000UL << gfp_order; | ||
104 | invalid_mask = size - 1; | ||
105 | |||
106 | BUG_ON((iova | paddr) & invalid_mask); | ||
107 | |||
108 | return iommu_ops->map(domain, iova, paddr, gfp_order, prot); | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(iommu_map); | ||
111 | |||
112 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) | ||
113 | { | ||
114 | unsigned long invalid_mask; | ||
115 | size_t size; | ||
116 | |||
117 | size = 0x1000UL << gfp_order; | ||
118 | invalid_mask = size - 1; | ||
119 | |||
120 | BUG_ON(iova & invalid_mask); | ||
121 | |||
122 | return iommu_ops->unmap(domain, iova, gfp_order); | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(iommu_unmap); | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4b4b565c835f..c5fbe198fbdb 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc); | |||
187 | * released. | 187 | * released. |
188 | */ | 188 | */ |
189 | int platform_device_add_resources(struct platform_device *pdev, | 189 | int platform_device_add_resources(struct platform_device *pdev, |
190 | struct resource *res, unsigned int num) | 190 | const struct resource *res, unsigned int num) |
191 | { | 191 | { |
192 | struct resource *r; | 192 | struct resource *r; |
193 | 193 | ||
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); | |||
367 | */ | 367 | */ |
368 | struct platform_device *platform_device_register_simple(const char *name, | 368 | struct platform_device *platform_device_register_simple(const char *name, |
369 | int id, | 369 | int id, |
370 | struct resource *res, | 370 | const struct resource *res, |
371 | unsigned int num) | 371 | unsigned int num) |
372 | { | 372 | { |
373 | struct platform_device *pdev; | 373 | struct platform_device *pdev; |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 0182a22c423a..832798aa14f6 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/blkdev.h> | 66 | #include <linux/blkdev.h> |
67 | #include <linux/elevator.h> | 67 | #include <linux/elevator.h> |
68 | #include <linux/interrupt.h> | 68 | #include <linux/interrupt.h> |
69 | #include <linux/platform_device.h> | ||
69 | 70 | ||
70 | #include <asm/setup.h> | 71 | #include <asm/setup.h> |
71 | #include <asm/uaccess.h> | 72 | #include <asm/uaccess.h> |
@@ -1696,34 +1697,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1696 | return get_disk(unit[drive].gendisk); | 1697 | return get_disk(unit[drive].gendisk); |
1697 | } | 1698 | } |
1698 | 1699 | ||
1699 | static int __init amiga_floppy_init(void) | 1700 | static int __init amiga_floppy_probe(struct platform_device *pdev) |
1700 | { | 1701 | { |
1701 | int i, ret; | 1702 | int i, ret; |
1702 | 1703 | ||
1703 | if (!MACH_IS_AMIGA) | ||
1704 | return -ENODEV; | ||
1705 | |||
1706 | if (!AMIGAHW_PRESENT(AMI_FLOPPY)) | ||
1707 | return -ENODEV; | ||
1708 | |||
1709 | if (register_blkdev(FLOPPY_MAJOR,"fd")) | 1704 | if (register_blkdev(FLOPPY_MAJOR,"fd")) |
1710 | return -EBUSY; | 1705 | return -EBUSY; |
1711 | 1706 | ||
1712 | /* | ||
1713 | * We request DSKPTR, DSKLEN and DSKDATA only, because the other | ||
1714 | * floppy registers are too spreaded over the custom register space | ||
1715 | */ | ||
1716 | ret = -EBUSY; | ||
1717 | if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) { | ||
1718 | printk("fd: cannot get floppy registers\n"); | ||
1719 | goto out_blkdev; | ||
1720 | } | ||
1721 | |||
1722 | ret = -ENOMEM; | 1707 | ret = -ENOMEM; |
1723 | if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == | 1708 | if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == |
1724 | NULL) { | 1709 | NULL) { |
1725 | printk("fd: cannot get chip mem buffer\n"); | 1710 | printk("fd: cannot get chip mem buffer\n"); |
1726 | goto out_memregion; | 1711 | goto out_blkdev; |
1727 | } | 1712 | } |
1728 | 1713 | ||
1729 | ret = -EBUSY; | 1714 | ret = -EBUSY; |
@@ -1792,18 +1777,13 @@ out_irq2: | |||
1792 | free_irq(IRQ_AMIGA_DSKBLK, NULL); | 1777 | free_irq(IRQ_AMIGA_DSKBLK, NULL); |
1793 | out_irq: | 1778 | out_irq: |
1794 | amiga_chip_free(raw_buf); | 1779 | amiga_chip_free(raw_buf); |
1795 | out_memregion: | ||
1796 | release_mem_region(CUSTOM_PHYSADDR+0x20, 8); | ||
1797 | out_blkdev: | 1780 | out_blkdev: |
1798 | unregister_blkdev(FLOPPY_MAJOR,"fd"); | 1781 | unregister_blkdev(FLOPPY_MAJOR,"fd"); |
1799 | return ret; | 1782 | return ret; |
1800 | } | 1783 | } |
1801 | 1784 | ||
1802 | module_init(amiga_floppy_init); | ||
1803 | #ifdef MODULE | ||
1804 | |||
1805 | #if 0 /* not safe to unload */ | 1785 | #if 0 /* not safe to unload */ |
1806 | void cleanup_module(void) | 1786 | static int __exit amiga_floppy_remove(struct platform_device *pdev) |
1807 | { | 1787 | { |
1808 | int i; | 1788 | int i; |
1809 | 1789 | ||
@@ -1820,12 +1800,25 @@ void cleanup_module(void) | |||
1820 | custom.dmacon = DMAF_DISK; /* disable DMA */ | 1800 | custom.dmacon = DMAF_DISK; /* disable DMA */ |
1821 | amiga_chip_free(raw_buf); | 1801 | amiga_chip_free(raw_buf); |
1822 | blk_cleanup_queue(floppy_queue); | 1802 | blk_cleanup_queue(floppy_queue); |
1823 | release_mem_region(CUSTOM_PHYSADDR+0x20, 8); | ||
1824 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 1803 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
1825 | } | 1804 | } |
1826 | #endif | 1805 | #endif |
1827 | 1806 | ||
1828 | #else | 1807 | static struct platform_driver amiga_floppy_driver = { |
1808 | .driver = { | ||
1809 | .name = "amiga-floppy", | ||
1810 | .owner = THIS_MODULE, | ||
1811 | }, | ||
1812 | }; | ||
1813 | |||
1814 | static int __init amiga_floppy_init(void) | ||
1815 | { | ||
1816 | return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe); | ||
1817 | } | ||
1818 | |||
1819 | module_init(amiga_floppy_init); | ||
1820 | |||
1821 | #ifndef MODULE | ||
1829 | static int __init amiga_floppy_setup (char *str) | 1822 | static int __init amiga_floppy_setup (char *str) |
1830 | { | 1823 | { |
1831 | int n; | 1824 | int n; |
@@ -1840,3 +1833,5 @@ static int __init amiga_floppy_setup (char *str) | |||
1840 | 1833 | ||
1841 | __setup("floppy=", amiga_floppy_setup); | 1834 | __setup("floppy=", amiga_floppy_setup); |
1842 | #endif | 1835 | #endif |
1836 | |||
1837 | MODULE_ALIAS("platform:amiga-floppy"); | ||
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 44bf6d11197e..d48a1dfd7b24 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -235,7 +235,7 @@ void drbd_endio_pri(struct bio *bio, int error) | |||
235 | if (unlikely(error)) { | 235 | if (unlikely(error)) { |
236 | what = (bio_data_dir(bio) == WRITE) | 236 | what = (bio_data_dir(bio) == WRITE) |
237 | ? write_completed_with_error | 237 | ? write_completed_with_error |
238 | : (bio_rw(bio) == READA) | 238 | : (bio_rw(bio) == READ) |
239 | ? read_completed_with_error | 239 | ? read_completed_with_error |
240 | : read_ahead_completed_with_error; | 240 | : read_ahead_completed_with_error; |
241 | } else | 241 | } else |
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 034e6dfc878c..81c78b3ce2df 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -164,12 +164,12 @@ unsigned long read_timer(void) | |||
164 | unsigned long t, flags; | 164 | unsigned long t, flags; |
165 | int i; | 165 | int i; |
166 | 166 | ||
167 | spin_lock_irqsave(&i8253_lock, flags); | 167 | raw_spin_lock_irqsave(&i8253_lock, flags); |
168 | t = jiffies * 11932; | 168 | t = jiffies * 11932; |
169 | outb_p(0, 0x43); | 169 | outb_p(0, 0x43); |
170 | i = inb_p(0x40); | 170 | i = inb_p(0x40); |
171 | i |= inb(0x40) << 8; | 171 | i |= inb(0x40) << 8; |
172 | spin_unlock_irqrestore(&i8253_lock, flags); | 172 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
173 | return(t - i); | 173 | return(t - i); |
174 | } | 174 | } |
175 | #endif | 175 | #endif |
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 8dfd24721a82..78a62ebe75c7 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -627,7 +627,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id) | |||
627 | char data; | 627 | char data; |
628 | int char_count; | 628 | int char_count; |
629 | int save_cnt; | 629 | int save_cnt; |
630 | int len; | ||
631 | 630 | ||
632 | /* determine the channel and change to that context */ | 631 | /* determine the channel and change to that context */ |
633 | channel = (u_short) (base_addr[CyLICR] >> 2); | 632 | channel = (u_short) (base_addr[CyLICR] >> 2); |
@@ -1528,7 +1527,6 @@ static int | |||
1528 | cy_ioctl(struct tty_struct *tty, struct file *file, | 1527 | cy_ioctl(struct tty_struct *tty, struct file *file, |
1529 | unsigned int cmd, unsigned long arg) | 1528 | unsigned int cmd, unsigned long arg) |
1530 | { | 1529 | { |
1531 | unsigned long val; | ||
1532 | struct cyclades_port *info = tty->driver_data; | 1530 | struct cyclades_port *info = tty->driver_data; |
1533 | int ret_val = 0; | 1531 | int ret_val = 0; |
1534 | void __user *argp = (void __user *)arg; | 1532 | void __user *argp = (void __user *)arg; |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6da962c9b21c..d71f0fc34b46 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1875,6 +1875,7 @@ got_driver: | |||
1875 | */ | 1875 | */ |
1876 | if (filp->f_op == &hung_up_tty_fops) | 1876 | if (filp->f_op == &hung_up_tty_fops) |
1877 | filp->f_op = &tty_fops; | 1877 | filp->f_op = &tty_fops; |
1878 | unlock_kernel(); | ||
1878 | goto retry_open; | 1879 | goto retry_open; |
1879 | } | 1880 | } |
1880 | unlock_kernel(); | 1881 | unlock_kernel(); |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 1aea7157d8ff..f8e57c6303f2 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -100,7 +100,6 @@ struct menu_device { | |||
100 | int needs_update; | 100 | int needs_update; |
101 | 101 | ||
102 | unsigned int expected_us; | 102 | unsigned int expected_us; |
103 | unsigned int measured_us; | ||
104 | u64 predicted_us; | 103 | u64 predicted_us; |
105 | unsigned int exit_us; | 104 | unsigned int exit_us; |
106 | unsigned int bucket; | 105 | unsigned int bucket; |
@@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev) | |||
187 | int i; | 186 | int i; |
188 | int multiplier; | 187 | int multiplier; |
189 | 188 | ||
190 | data->last_state_idx = 0; | ||
191 | data->exit_us = 0; | ||
192 | |||
193 | if (data->needs_update) { | 189 | if (data->needs_update) { |
194 | menu_update(dev); | 190 | menu_update(dev); |
195 | data->needs_update = 0; | 191 | data->needs_update = 0; |
196 | } | 192 | } |
197 | 193 | ||
194 | data->last_state_idx = 0; | ||
195 | data->exit_us = 0; | ||
196 | |||
198 | /* Special case when user has set very strict latency requirement */ | 197 | /* Special case when user has set very strict latency requirement */ |
199 | if (unlikely(latency_req == 0)) | 198 | if (unlikely(latency_req == 0)) |
200 | return 0; | 199 | return 0; |
@@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev) | |||
294 | new_factor = data->correction_factor[data->bucket] | 293 | new_factor = data->correction_factor[data->bucket] |
295 | * (DECAY - 1) / DECAY; | 294 | * (DECAY - 1) / DECAY; |
296 | 295 | ||
297 | if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) | 296 | if (data->expected_us > 0 && measured_us < MAX_INTERESTING) |
298 | new_factor += RESOLUTION * measured_us / data->expected_us; | 297 | new_factor += RESOLUTION * measured_us / data->expected_us; |
299 | else | 298 | else |
300 | /* | 299 | /* |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 7cc31b3f40d8..6f25a20de99f 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -290,6 +290,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
290 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 290 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
291 | struct sh_desc *desc; | 291 | struct sh_desc *desc; |
292 | struct sh_dmae_slave *param = chan->private; | 292 | struct sh_dmae_slave *param = chan->private; |
293 | int ret; | ||
293 | 294 | ||
294 | pm_runtime_get_sync(sh_chan->dev); | 295 | pm_runtime_get_sync(sh_chan->dev); |
295 | 296 | ||
@@ -301,11 +302,15 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
301 | struct sh_dmae_slave_config *cfg; | 302 | struct sh_dmae_slave_config *cfg; |
302 | 303 | ||
303 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | 304 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); |
304 | if (!cfg) | 305 | if (!cfg) { |
305 | return -EINVAL; | 306 | ret = -EINVAL; |
307 | goto efindslave; | ||
308 | } | ||
306 | 309 | ||
307 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) | 310 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { |
308 | return -EBUSY; | 311 | ret = -EBUSY; |
312 | goto etestused; | ||
313 | } | ||
309 | 314 | ||
310 | param->config = cfg; | 315 | param->config = cfg; |
311 | 316 | ||
@@ -334,10 +339,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
334 | } | 339 | } |
335 | spin_unlock_bh(&sh_chan->desc_lock); | 340 | spin_unlock_bh(&sh_chan->desc_lock); |
336 | 341 | ||
337 | if (!sh_chan->descs_allocated) | 342 | if (!sh_chan->descs_allocated) { |
338 | pm_runtime_put(sh_chan->dev); | 343 | ret = -ENOMEM; |
344 | goto edescalloc; | ||
345 | } | ||
339 | 346 | ||
340 | return sh_chan->descs_allocated; | 347 | return sh_chan->descs_allocated; |
348 | |||
349 | edescalloc: | ||
350 | if (param) | ||
351 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
352 | etestused: | ||
353 | efindslave: | ||
354 | pm_runtime_put(sh_chan->dev); | ||
355 | return ret; | ||
341 | } | 356 | } |
342 | 357 | ||
343 | /* | 358 | /* |
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c index 753219cf993a..41a9388f2fde 100644 --- a/drivers/gpio/it8761e_gpio.c +++ b/drivers/gpio/it8761e_gpio.c | |||
@@ -80,8 +80,8 @@ static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num) | |||
80 | u16 reg; | 80 | u16 reg; |
81 | u8 bit; | 81 | u8 bit; |
82 | 82 | ||
83 | bit = gpio_num % 7; | 83 | bit = gpio_num % 8; |
84 | reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; | 84 | reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; |
85 | 85 | ||
86 | return !!(inb(reg) & (1 << bit)); | 86 | return !!(inb(reg) & (1 << bit)); |
87 | } | 87 | } |
@@ -91,8 +91,8 @@ static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num) | |||
91 | u8 curr_dirs; | 91 | u8 curr_dirs; |
92 | u8 io_reg, bit; | 92 | u8 io_reg, bit; |
93 | 93 | ||
94 | bit = gpio_num % 7; | 94 | bit = gpio_num % 8; |
95 | io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; | 95 | io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; |
96 | 96 | ||
97 | spin_lock(&sio_lock); | 97 | spin_lock(&sio_lock); |
98 | 98 | ||
@@ -116,8 +116,8 @@ static void it8761e_gpio_set(struct gpio_chip *gc, | |||
116 | u8 curr_vals, bit; | 116 | u8 curr_vals, bit; |
117 | u16 reg; | 117 | u16 reg; |
118 | 118 | ||
119 | bit = gpio_num % 7; | 119 | bit = gpio_num % 8; |
120 | reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba; | 120 | reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba; |
121 | 121 | ||
122 | spin_lock(&sio_lock); | 122 | spin_lock(&sio_lock); |
123 | 123 | ||
@@ -135,8 +135,8 @@ static int it8761e_gpio_direction_out(struct gpio_chip *gc, | |||
135 | { | 135 | { |
136 | u8 curr_dirs, io_reg, bit; | 136 | u8 curr_dirs, io_reg, bit; |
137 | 137 | ||
138 | bit = gpio_num % 7; | 138 | bit = gpio_num % 8; |
139 | io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO; | 139 | io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO; |
140 | 140 | ||
141 | it8761e_gpio_set(gc, gpio_num, val); | 141 | it8761e_gpio_set(gc, gpio_num, val); |
142 | 142 | ||
@@ -200,7 +200,7 @@ static int __init it8761e_gpio_init(void) | |||
200 | return -EBUSY; | 200 | return -EBUSY; |
201 | 201 | ||
202 | it8761e_gpio_chip.base = -1; | 202 | it8761e_gpio_chip.base = -1; |
203 | it8761e_gpio_chip.ngpio = 14; | 203 | it8761e_gpio_chip.ngpio = 16; |
204 | 204 | ||
205 | err = gpiochip_add(&it8761e_gpio_chip); | 205 | err = gpiochip_add(&it8761e_gpio_chip); |
206 | if (err < 0) | 206 | if (err < 0) |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2b8b969d0c15..df6a9cd82c4d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -456,11 +456,15 @@ i915_error_object_create(struct drm_device *dev, | |||
456 | 456 | ||
457 | for (page = 0; page < page_count; page++) { | 457 | for (page = 0; page < page_count; page++) { |
458 | void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | 458 | void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
459 | unsigned long flags; | ||
460 | |||
459 | if (d == NULL) | 461 | if (d == NULL) |
460 | goto unwind; | 462 | goto unwind; |
461 | s = kmap_atomic(src_priv->pages[page], KM_USER0); | 463 | local_irq_save(flags); |
464 | s = kmap_atomic(src_priv->pages[page], KM_IRQ0); | ||
462 | memcpy(d, s, PAGE_SIZE); | 465 | memcpy(d, s, PAGE_SIZE); |
463 | kunmap_atomic(s, KM_USER0); | 466 | kunmap_atomic(s, KM_IRQ0); |
467 | local_irq_restore(flags); | ||
464 | dst->pages[page] = d; | 468 | dst->pages[page] = d; |
465 | } | 469 | } |
466 | dst->page_count = page_count; | 470 | dst->page_count = page_count; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4b05563d99e1..b3749d47be7b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -216,6 +216,7 @@ static struct drm_driver driver_old = { | |||
216 | .mmap = drm_mmap, | 216 | .mmap = drm_mmap, |
217 | .poll = drm_poll, | 217 | .poll = drm_poll, |
218 | .fasync = drm_fasync, | 218 | .fasync = drm_fasync, |
219 | .read = drm_read, | ||
219 | #ifdef CONFIG_COMPAT | 220 | #ifdef CONFIG_COMPAT |
220 | .compat_ioctl = radeon_compat_ioctl, | 221 | .compat_ioctl = radeon_compat_ioctl, |
221 | #endif | 222 | #endif |
@@ -304,6 +305,7 @@ static struct drm_driver kms_driver = { | |||
304 | .mmap = radeon_mmap, | 305 | .mmap = radeon_mmap, |
305 | .poll = drm_poll, | 306 | .poll = drm_poll, |
306 | .fasync = drm_fasync, | 307 | .fasync = drm_fasync, |
308 | .read = drm_read, | ||
307 | #ifdef CONFIG_COMPAT | 309 | #ifdef CONFIG_COMPAT |
308 | .compat_ioctl = radeon_kms_compat_ioctl, | 310 | .compat_ioctl = radeon_kms_compat_ioctl, |
309 | #endif | 311 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 40ab6d9c3736..cc5316dcf580 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
424 | if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && | 424 | if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && |
425 | (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { | 425 | (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { |
426 | u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); | 426 | u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); |
427 | offset = *cmd << 10; | 427 | offset = *cmd3 << 10; |
428 | if (radeon_check_and_fixup_offset | 428 | if (radeon_check_and_fixup_offset |
429 | (dev_priv, file_priv, &offset)) { | 429 | (dev_priv, file_priv, &offset)) { |
430 | DRM_ERROR("Invalid second packet offset\n"); | 430 | DRM_ERROR("Invalid second packet offset\n"); |
@@ -2895,9 +2895,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, | |||
2895 | return rv; | 2895 | return rv; |
2896 | rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, | 2896 | rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, |
2897 | cmdbuf->bufsz); | 2897 | cmdbuf->bufsz); |
2898 | if (rv) | 2898 | if (rv) { |
2899 | drm_buffer_free(cmdbuf->buffer); | ||
2899 | return rv; | 2900 | return rv; |
2900 | } | 2901 | } |
2902 | } else | ||
2903 | goto done; | ||
2901 | 2904 | ||
2902 | orig_nbox = cmdbuf->nbox; | 2905 | orig_nbox = cmdbuf->nbox; |
2903 | 2906 | ||
@@ -2905,8 +2908,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, | |||
2905 | int temp; | 2908 | int temp; |
2906 | temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); | 2909 | temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); |
2907 | 2910 | ||
2908 | if (cmdbuf->bufsz != 0) | 2911 | drm_buffer_free(cmdbuf->buffer); |
2909 | drm_buffer_free(cmdbuf->buffer); | ||
2910 | 2912 | ||
2911 | return temp; | 2913 | return temp; |
2912 | } | 2914 | } |
@@ -3012,16 +3014,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, | |||
3012 | } | 3014 | } |
3013 | } | 3015 | } |
3014 | 3016 | ||
3015 | if (cmdbuf->bufsz != 0) | 3017 | drm_buffer_free(cmdbuf->buffer); |
3016 | drm_buffer_free(cmdbuf->buffer); | ||
3017 | 3018 | ||
3019 | done: | ||
3018 | DRM_DEBUG("DONE\n"); | 3020 | DRM_DEBUG("DONE\n"); |
3019 | COMMIT_RING(); | 3021 | COMMIT_RING(); |
3020 | return 0; | 3022 | return 0; |
3021 | 3023 | ||
3022 | err: | 3024 | err: |
3023 | if (cmdbuf->bufsz != 0) | 3025 | drm_buffer_free(cmdbuf->buffer); |
3024 | drm_buffer_free(cmdbuf->buffer); | ||
3025 | return -EINVAL; | 3026 | return -EINVAL; |
3026 | } | 3027 | } |
3027 | 3028 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index dd47b2a9a791..0e3754a3a303 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1716,40 +1716,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1716 | } | 1716 | } |
1717 | EXPORT_SYMBOL(ttm_bo_wait); | 1717 | EXPORT_SYMBOL(ttm_bo_wait); |
1718 | 1718 | ||
1719 | void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) | ||
1720 | { | ||
1721 | atomic_set(&bo->reserved, 0); | ||
1722 | wake_up_all(&bo->event_queue); | ||
1723 | } | ||
1724 | |||
1725 | int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, | ||
1726 | bool no_wait) | ||
1727 | { | ||
1728 | int ret; | ||
1729 | |||
1730 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | ||
1731 | if (no_wait) | ||
1732 | return -EBUSY; | ||
1733 | else if (interruptible) { | ||
1734 | ret = wait_event_interruptible | ||
1735 | (bo->event_queue, atomic_read(&bo->reserved) == 0); | ||
1736 | if (unlikely(ret != 0)) | ||
1737 | return ret; | ||
1738 | } else { | ||
1739 | wait_event(bo->event_queue, | ||
1740 | atomic_read(&bo->reserved) == 0); | ||
1741 | } | ||
1742 | } | ||
1743 | return 0; | ||
1744 | } | ||
1745 | |||
1746 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1719 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1747 | { | 1720 | { |
1748 | int ret = 0; | 1721 | int ret = 0; |
1749 | 1722 | ||
1750 | /* | 1723 | /* |
1751 | * Using ttm_bo_reserve instead of ttm_bo_block_reservation | 1724 | * Using ttm_bo_reserve makes sure the lru lists are updated. |
1752 | * makes sure the lru lists are updated. | ||
1753 | */ | 1725 | */ |
1754 | 1726 | ||
1755 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | 1727 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 3d172ef04ee1..de41e55a944a 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -204,7 +204,6 @@ static int __ttm_vt_unlock(struct ttm_lock *lock) | |||
204 | lock->flags &= ~TTM_VT_LOCK; | 204 | lock->flags &= ~TTM_VT_LOCK; |
205 | wake_up_all(&lock->queue); | 205 | wake_up_all(&lock->queue); |
206 | spin_unlock(&lock->lock); | 206 | spin_unlock(&lock->lock); |
207 | printk(KERN_INFO TTM_PFX "vt unlock.\n"); | ||
208 | 207 | ||
209 | return ret; | 208 | return ret; |
210 | } | 209 | } |
@@ -265,10 +264,8 @@ int ttm_vt_lock(struct ttm_lock *lock, | |||
265 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | 264 | ttm_lock_type, &ttm_vt_lock_remove, NULL); |
266 | if (ret) | 265 | if (ret) |
267 | (void)__ttm_vt_unlock(lock); | 266 | (void)__ttm_vt_unlock(lock); |
268 | else { | 267 | else |
269 | lock->vt_holder = tfile; | 268 | lock->vt_holder = tfile; |
270 | printk(KERN_INFO TTM_PFX "vt lock.\n"); | ||
271 | } | ||
272 | 269 | ||
273 | return ret; | 270 | return ret; |
274 | } | 271 | } |
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c index 7e597d7f770f..24663a8717b1 100644 --- a/drivers/hid/hid-cherry.c +++ b/drivers/hid/hid-cherry.c | |||
@@ -59,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
59 | 59 | ||
60 | static const struct hid_device_id ch_devices[] = { | 60 | static const struct hid_device_id ch_devices[] = { |
61 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 61 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
62 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, | ||
62 | { } | 63 | { } |
63 | }; | 64 | }; |
64 | MODULE_DEVICE_TABLE(hid, ch_devices); | 65 | MODULE_DEVICE_TABLE(hid, ch_devices); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 2e2aa759d230..143e788b729b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1043,13 +1043,8 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, | |||
1043 | 1043 | ||
1044 | if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) | 1044 | if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) |
1045 | hid->hiddev_report_event(hid, report); | 1045 | hid->hiddev_report_event(hid, report); |
1046 | if (hid->claimed & HID_CLAIMED_HIDRAW) { | 1046 | if (hid->claimed & HID_CLAIMED_HIDRAW) |
1047 | /* numbered reports need to be passed with the report num */ | 1047 | hidraw_report_event(hid, data, size); |
1048 | if (report_enum->numbered) | ||
1049 | hidraw_report_event(hid, data - 1, size + 1); | ||
1050 | else | ||
1051 | hidraw_report_event(hid, data, size); | ||
1052 | } | ||
1053 | 1048 | ||
1054 | for (a = 0; a < report->maxfield; a++) | 1049 | for (a = 0; a < report->maxfield; a++) |
1055 | hid_input_field(hid, report->field[a], cdata, interrupt); | 1050 | hid_input_field(hid, report->field[a], cdata, interrupt); |
@@ -1296,6 +1291,7 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1296 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | 1291 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
1297 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | 1292 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, |
1298 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 1293 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
1294 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, | ||
1299 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, | 1295 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
1300 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, | 1296 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, |
1301 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, | 1297 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 797e06470356..09d27649a0f7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -131,6 +131,7 @@ | |||
131 | 131 | ||
132 | #define USB_VENDOR_ID_CHERRY 0x046a | 132 | #define USB_VENDOR_ID_CHERRY 0x046a |
133 | #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 | 133 | #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 |
134 | #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 | ||
134 | 135 | ||
135 | #define USB_VENDOR_ID_CHIC 0x05fe | 136 | #define USB_VENDOR_ID_CHIC 0x05fe |
136 | #define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 | 137 | #define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014 |
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 9b24fc510712..4777bbfa1cc2 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * HID driver for N-Trig touchscreens | 2 | * HID driver for N-Trig touchscreens |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Rafi Rubin | 4 | * Copyright (c) 2008-2010 Rafi Rubin |
5 | * Copyright (c) 2009 Stephane Chatty | 5 | * Copyright (c) 2009-2010 Stephane Chatty |
6 | * | 6 | * |
7 | */ | 7 | */ |
8 | 8 | ||
@@ -15,6 +15,8 @@ | |||
15 | 15 | ||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/hid.h> | 17 | #include <linux/hid.h> |
18 | #include <linux/usb.h> | ||
19 | #include "usbhid/usbhid.h" | ||
18 | #include <linux/module.h> | 20 | #include <linux/module.h> |
19 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
20 | 22 | ||
@@ -22,17 +24,16 @@ | |||
22 | 24 | ||
23 | #define NTRIG_DUPLICATE_USAGES 0x001 | 25 | #define NTRIG_DUPLICATE_USAGES 0x001 |
24 | 26 | ||
25 | #define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ | ||
26 | EV_KEY, (c)) | ||
27 | |||
28 | struct ntrig_data { | 27 | struct ntrig_data { |
29 | /* Incoming raw values for a single contact */ | 28 | /* Incoming raw values for a single contact */ |
30 | __u16 x, y, w, h; | 29 | __u16 x, y, w, h; |
31 | __u16 id; | 30 | __u16 id; |
32 | __u8 confidence; | 31 | |
32 | bool tipswitch; | ||
33 | bool confidence; | ||
34 | bool first_contact_touch; | ||
33 | 35 | ||
34 | bool reading_mt; | 36 | bool reading_mt; |
35 | __u8 first_contact_confidence; | ||
36 | 37 | ||
37 | __u8 mt_footer[4]; | 38 | __u8 mt_footer[4]; |
38 | __u8 mt_foot_count; | 39 | __u8 mt_foot_count; |
@@ -139,9 +140,10 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
139 | case 0xff000001: | 140 | case 0xff000001: |
140 | /* Tag indicating the start of a multitouch group */ | 141 | /* Tag indicating the start of a multitouch group */ |
141 | nd->reading_mt = 1; | 142 | nd->reading_mt = 1; |
142 | nd->first_contact_confidence = 0; | 143 | nd->first_contact_touch = 0; |
143 | break; | 144 | break; |
144 | case HID_DG_TIPSWITCH: | 145 | case HID_DG_TIPSWITCH: |
146 | nd->tipswitch = value; | ||
145 | /* Prevent emission of touch until validated */ | 147 | /* Prevent emission of touch until validated */ |
146 | return 1; | 148 | return 1; |
147 | case HID_DG_CONFIDENCE: | 149 | case HID_DG_CONFIDENCE: |
@@ -169,8 +171,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
169 | * to emit a normal (X, Y) position | 171 | * to emit a normal (X, Y) position |
170 | */ | 172 | */ |
171 | if (!nd->reading_mt) { | 173 | if (!nd->reading_mt) { |
174 | /* | ||
175 | * TipSwitch indicates the presence of a | ||
176 | * finger in single touch mode. | ||
177 | */ | ||
178 | input_report_key(input, BTN_TOUCH, | ||
179 | nd->tipswitch); | ||
172 | input_report_key(input, BTN_TOOL_DOUBLETAP, | 180 | input_report_key(input, BTN_TOOL_DOUBLETAP, |
173 | (nd->confidence != 0)); | 181 | nd->tipswitch); |
174 | input_event(input, EV_ABS, ABS_X, nd->x); | 182 | input_event(input, EV_ABS, ABS_X, nd->x); |
175 | input_event(input, EV_ABS, ABS_Y, nd->y); | 183 | input_event(input, EV_ABS, ABS_Y, nd->y); |
176 | } | 184 | } |
@@ -209,7 +217,13 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
209 | 217 | ||
210 | /* emit a normal (X, Y) for the first point only */ | 218 | /* emit a normal (X, Y) for the first point only */ |
211 | if (nd->id == 0) { | 219 | if (nd->id == 0) { |
212 | nd->first_contact_confidence = nd->confidence; | 220 | /* |
221 | * TipSwitch is superfluous in multitouch | ||
222 | * mode. The footer events tell us | ||
223 | * if there is a finger on the screen or | ||
224 | * not. | ||
225 | */ | ||
226 | nd->first_contact_touch = nd->confidence; | ||
213 | input_event(input, EV_ABS, ABS_X, nd->x); | 227 | input_event(input, EV_ABS, ABS_X, nd->x); |
214 | input_event(input, EV_ABS, ABS_Y, nd->y); | 228 | input_event(input, EV_ABS, ABS_Y, nd->y); |
215 | } | 229 | } |
@@ -239,30 +253,11 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, | |||
239 | 253 | ||
240 | nd->reading_mt = 0; | 254 | nd->reading_mt = 0; |
241 | 255 | ||
242 | if (nd->first_contact_confidence) { | 256 | if (nd->first_contact_touch) { |
243 | switch (value) { | 257 | input_report_key(input, BTN_TOOL_DOUBLETAP, 1); |
244 | case 0: /* for single touch devices */ | ||
245 | case 1: | ||
246 | input_report_key(input, | ||
247 | BTN_TOOL_DOUBLETAP, 1); | ||
248 | break; | ||
249 | case 2: | ||
250 | input_report_key(input, | ||
251 | BTN_TOOL_TRIPLETAP, 1); | ||
252 | break; | ||
253 | case 3: | ||
254 | default: | ||
255 | input_report_key(input, | ||
256 | BTN_TOOL_QUADTAP, 1); | ||
257 | } | ||
258 | input_report_key(input, BTN_TOUCH, 1); | 258 | input_report_key(input, BTN_TOUCH, 1); |
259 | } else { | 259 | } else { |
260 | input_report_key(input, | 260 | input_report_key(input, BTN_TOOL_DOUBLETAP, 0); |
261 | BTN_TOOL_DOUBLETAP, 0); | ||
262 | input_report_key(input, | ||
263 | BTN_TOOL_TRIPLETAP, 0); | ||
264 | input_report_key(input, | ||
265 | BTN_TOOL_QUADTAP, 0); | ||
266 | input_report_key(input, BTN_TOUCH, 0); | 261 | input_report_key(input, BTN_TOUCH, 0); |
267 | } | 262 | } |
268 | break; | 263 | break; |
@@ -286,6 +281,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
286 | struct ntrig_data *nd; | 281 | struct ntrig_data *nd; |
287 | struct hid_input *hidinput; | 282 | struct hid_input *hidinput; |
288 | struct input_dev *input; | 283 | struct input_dev *input; |
284 | struct hid_report *report; | ||
289 | 285 | ||
290 | if (id->driver_data) | 286 | if (id->driver_data) |
291 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | 287 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; |
@@ -327,13 +323,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
327 | __clear_bit(BTN_TOOL_PEN, input->keybit); | 323 | __clear_bit(BTN_TOOL_PEN, input->keybit); |
328 | __clear_bit(BTN_TOOL_FINGER, input->keybit); | 324 | __clear_bit(BTN_TOOL_FINGER, input->keybit); |
329 | __clear_bit(BTN_0, input->keybit); | 325 | __clear_bit(BTN_0, input->keybit); |
330 | /* | ||
331 | * A little something special to enable | ||
332 | * two and three finger taps. | ||
333 | */ | ||
334 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | 326 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); |
335 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
336 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
337 | /* | 327 | /* |
338 | * The physical touchscreen (single touch) | 328 | * The physical touchscreen (single touch) |
339 | * input has a value for physical, whereas | 329 | * input has a value for physical, whereas |
@@ -349,6 +339,12 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
349 | } | 339 | } |
350 | } | 340 | } |
351 | 341 | ||
342 | /* This is needed for devices with more recent firmware versions */ | ||
343 | report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a]; | ||
344 | if (report) | ||
345 | usbhid_submit_report(hdev, report, USB_DIR_OUT); | ||
346 | |||
347 | |||
352 | return 0; | 348 | return 0; |
353 | err_free: | 349 | err_free: |
354 | kfree(nd); | 350 | kfree(nd); |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 7502a4b2fa86..402d5574b574 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -76,7 +76,7 @@ static int sony_set_operational_usb(struct hid_device *hdev) | |||
76 | 76 | ||
77 | static int sony_set_operational_bt(struct hid_device *hdev) | 77 | static int sony_set_operational_bt(struct hid_device *hdev) |
78 | { | 78 | { |
79 | unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; | 79 | unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; |
80 | return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); | 80 | return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); |
81 | } | 81 | } |
82 | 82 | ||
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index f7700cf49721..f947d8337e21 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c | |||
@@ -277,7 +277,6 @@ static int __init wacom_init(void) | |||
277 | ret = hid_register_driver(&wacom_driver); | 277 | ret = hid_register_driver(&wacom_driver); |
278 | if (ret) | 278 | if (ret) |
279 | printk(KERN_ERR "can't register wacom driver\n"); | 279 | printk(KERN_ERR "can't register wacom driver\n"); |
280 | printk(KERN_ERR "wacom driver registered\n"); | ||
281 | return ret; | 280 | return ret; |
282 | } | 281 | } |
283 | 282 | ||
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 56d06cd8075b..7b85b696fdab 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -999,13 +999,6 @@ static int usbhid_start(struct hid_device *hid) | |||
999 | } | 999 | } |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | init_waitqueue_head(&usbhid->wait); | ||
1003 | INIT_WORK(&usbhid->reset_work, hid_reset); | ||
1004 | INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues); | ||
1005 | setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); | ||
1006 | |||
1007 | spin_lock_init(&usbhid->lock); | ||
1008 | |||
1009 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); | 1002 | usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); |
1010 | if (!usbhid->urbctrl) { | 1003 | if (!usbhid->urbctrl) { |
1011 | ret = -ENOMEM; | 1004 | ret = -ENOMEM; |
@@ -1179,6 +1172,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * | |||
1179 | usbhid->intf = intf; | 1172 | usbhid->intf = intf; |
1180 | usbhid->ifnum = interface->desc.bInterfaceNumber; | 1173 | usbhid->ifnum = interface->desc.bInterfaceNumber; |
1181 | 1174 | ||
1175 | init_waitqueue_head(&usbhid->wait); | ||
1176 | INIT_WORK(&usbhid->reset_work, hid_reset); | ||
1177 | INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues); | ||
1178 | setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); | ||
1179 | spin_lock_init(&usbhid->lock); | ||
1180 | |||
1182 | ret = hid_add_device(hid); | 1181 | ret = hid_add_device(hid); |
1183 | if (ret) { | 1182 | if (ret) { |
1184 | if (ret != -ENODEV) | 1183 | if (ret != -ENODEV) |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 0f28d91f29d8..f085c18d2905 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -195,6 +195,9 @@ static unsigned int applesmc_accelerometer; | |||
195 | /* Indicates whether this computer has light sensors and keyboard backlight. */ | 195 | /* Indicates whether this computer has light sensors and keyboard backlight. */ |
196 | static unsigned int applesmc_light; | 196 | static unsigned int applesmc_light; |
197 | 197 | ||
198 | /* The number of fans handled by the driver */ | ||
199 | static unsigned int fans_handled; | ||
200 | |||
198 | /* Indicates which temperature sensors set to use. */ | 201 | /* Indicates which temperature sensors set to use. */ |
199 | static unsigned int applesmc_temperature_set; | 202 | static unsigned int applesmc_temperature_set; |
200 | 203 | ||
@@ -1492,39 +1495,24 @@ static int __init applesmc_init(void) | |||
1492 | 1495 | ||
1493 | /* create fan files */ | 1496 | /* create fan files */ |
1494 | count = applesmc_get_fan_count(); | 1497 | count = applesmc_get_fan_count(); |
1495 | if (count < 0) { | 1498 | if (count < 0) |
1496 | printk(KERN_ERR "applesmc: Cannot get the number of fans.\n"); | 1499 | printk(KERN_ERR "applesmc: Cannot get the number of fans.\n"); |
1497 | } else { | 1500 | else |
1498 | printk(KERN_INFO "applesmc: %d fans found.\n", count); | 1501 | printk(KERN_INFO "applesmc: %d fans found.\n", count); |
1499 | 1502 | ||
1500 | switch (count) { | 1503 | if (count > 4) { |
1501 | default: | 1504 | count = 4; |
1502 | printk(KERN_WARNING "applesmc: More than 4 fans found," | 1505 | printk(KERN_WARNING "applesmc: More than 4 fans found," |
1503 | " but at most 4 fans are supported" | 1506 | " but at most 4 fans are supported" |
1504 | " by the driver.\n"); | 1507 | " by the driver.\n"); |
1505 | case 4: | 1508 | } |
1506 | ret = sysfs_create_group(&pdev->dev.kobj, | 1509 | |
1507 | &fan_attribute_groups[3]); | 1510 | while (fans_handled < count) { |
1508 | if (ret) | 1511 | ret = sysfs_create_group(&pdev->dev.kobj, |
1509 | goto out_key_enumeration; | 1512 | &fan_attribute_groups[fans_handled]); |
1510 | case 3: | 1513 | if (ret) |
1511 | ret = sysfs_create_group(&pdev->dev.kobj, | 1514 | goto out_fans; |
1512 | &fan_attribute_groups[2]); | 1515 | fans_handled++; |
1513 | if (ret) | ||
1514 | goto out_key_enumeration; | ||
1515 | case 2: | ||
1516 | ret = sysfs_create_group(&pdev->dev.kobj, | ||
1517 | &fan_attribute_groups[1]); | ||
1518 | if (ret) | ||
1519 | goto out_key_enumeration; | ||
1520 | case 1: | ||
1521 | ret = sysfs_create_group(&pdev->dev.kobj, | ||
1522 | &fan_attribute_groups[0]); | ||
1523 | if (ret) | ||
1524 | goto out_fan_1; | ||
1525 | case 0: | ||
1526 | ; | ||
1527 | } | ||
1528 | } | 1516 | } |
1529 | 1517 | ||
1530 | for (i = 0; | 1518 | for (i = 0; |
@@ -1593,10 +1581,10 @@ out_accelerometer: | |||
1593 | applesmc_release_accelerometer(); | 1581 | applesmc_release_accelerometer(); |
1594 | out_temperature: | 1582 | out_temperature: |
1595 | sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); | 1583 | sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); |
1596 | sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); | 1584 | out_fans: |
1597 | out_fan_1: | 1585 | while (fans_handled) |
1598 | sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); | 1586 | sysfs_remove_group(&pdev->dev.kobj, |
1599 | out_key_enumeration: | 1587 | &fan_attribute_groups[--fans_handled]); |
1600 | sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); | 1588 | sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); |
1601 | out_name: | 1589 | out_name: |
1602 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); | 1590 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); |
@@ -1622,8 +1610,9 @@ static void __exit applesmc_exit(void) | |||
1622 | if (applesmc_accelerometer) | 1610 | if (applesmc_accelerometer) |
1623 | applesmc_release_accelerometer(); | 1611 | applesmc_release_accelerometer(); |
1624 | sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); | 1612 | sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); |
1625 | sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]); | 1613 | while (fans_handled) |
1626 | sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]); | 1614 | sysfs_remove_group(&pdev->dev.kobj, |
1615 | &fan_attribute_groups[--fans_handled]); | ||
1627 | sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); | 1616 | sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group); |
1628 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); | 1617 | sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr); |
1629 | platform_device_unregister(pdev); | 1618 | platform_device_unregister(pdev); |
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c index 7f948105d8ad..0f388adc6187 100644 --- a/drivers/hwmon/asc7621.c +++ b/drivers/hwmon/asc7621.c | |||
@@ -268,8 +268,11 @@ static ssize_t store_fan16(struct device *dev, | |||
268 | if (strict_strtol(buf, 10, &reqval)) | 268 | if (strict_strtol(buf, 10, &reqval)) |
269 | return -EINVAL; | 269 | return -EINVAL; |
270 | 270 | ||
271 | /* If a minimum RPM of zero is requested, then we set the register to | ||
272 | 0xffff. This value allows the fan to be stopped completely without | ||
273 | generating an alarm. */ | ||
271 | reqval = | 274 | reqval = |
272 | (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534)); | 275 | (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe)); |
273 | 276 | ||
274 | mutex_lock(&data->update_lock); | 277 | mutex_lock(&data->update_lock); |
275 | data->reg[param->msb[0]] = (reqval >> 8) & 0xff; | 278 | data->reg[param->msb[0]] = (reqval >> 8) & 0xff; |
@@ -285,8 +288,9 @@ static ssize_t store_fan16(struct device *dev, | |||
285 | * Voltages are scaled in the device so that the nominal voltage | 288 | * Voltages are scaled in the device so that the nominal voltage |
286 | * is 3/4ths of the 0-255 range (i.e. 192). | 289 | * is 3/4ths of the 0-255 range (i.e. 192). |
287 | * If all voltages are 'normal' then all voltage registers will | 290 | * If all voltages are 'normal' then all voltage registers will |
288 | * read 0xC0. This doesn't help us if we don't have a point of refernce. | 291 | * read 0xC0. |
289 | * The data sheet however provides us with the full scale value for each | 292 | * |
293 | * The data sheet provides us with the 3/4 scale value for each voltage | ||
290 | * which is stored in in_scaling. The sda->index parameter value provides | 294 | * which is stored in in_scaling. The sda->index parameter value provides |
291 | * the index into in_scaling. | 295 | * the index into in_scaling. |
292 | * | 296 | * |
@@ -295,7 +299,7 @@ static ssize_t store_fan16(struct device *dev, | |||
295 | */ | 299 | */ |
296 | 300 | ||
297 | static int asc7621_in_scaling[] = { | 301 | static int asc7621_in_scaling[] = { |
298 | 3320, 3000, 4380, 6640, 16000 | 302 | 2500, 2250, 3300, 5000, 12000 |
299 | }; | 303 | }; |
300 | 304 | ||
301 | static ssize_t show_in10(struct device *dev, struct device_attribute *attr, | 305 | static ssize_t show_in10(struct device *dev, struct device_attribute *attr, |
@@ -306,19 +310,12 @@ static ssize_t show_in10(struct device *dev, struct device_attribute *attr, | |||
306 | u8 nr = sda->index; | 310 | u8 nr = sda->index; |
307 | 311 | ||
308 | mutex_lock(&data->update_lock); | 312 | mutex_lock(&data->update_lock); |
309 | regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256; | 313 | regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]); |
310 | |||
311 | /* The LSB value is a 2-bit scaling of the MSB's LSbit value. | ||
312 | * I.E. If the maximim voltage for this input is 6640 millivolts then | ||
313 | * a MSB register value of 0 = 0mv and 255 = 6640mv. | ||
314 | * A 1 step change therefore represents 25.9mv (6640 / 256). | ||
315 | * The extra 2-bits therefore represent increments of 6.48mv. | ||
316 | */ | ||
317 | regval += ((asc7621_in_scaling[nr] / 256) / 4) * | ||
318 | (data->reg[param->lsb[0]] >> 6); | ||
319 | |||
320 | mutex_unlock(&data->update_lock); | 314 | mutex_unlock(&data->update_lock); |
321 | 315 | ||
316 | /* The LSB value is a 2-bit scaling of the MSB's LSbit value. */ | ||
317 | regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2); | ||
318 | |||
322 | return sprintf(buf, "%u\n", regval); | 319 | return sprintf(buf, "%u\n", regval); |
323 | } | 320 | } |
324 | 321 | ||
@@ -331,7 +328,7 @@ static ssize_t show_in8(struct device *dev, struct device_attribute *attr, | |||
331 | 328 | ||
332 | return sprintf(buf, "%u\n", | 329 | return sprintf(buf, "%u\n", |
333 | ((data->reg[param->msb[0]] * | 330 | ((data->reg[param->msb[0]] * |
334 | asc7621_in_scaling[nr]) / 256)); | 331 | asc7621_in_scaling[nr]) / 0xc0)); |
335 | } | 332 | } |
336 | 333 | ||
337 | static ssize_t store_in8(struct device *dev, struct device_attribute *attr, | 334 | static ssize_t store_in8(struct device *dev, struct device_attribute *attr, |
@@ -344,9 +341,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr, | |||
344 | if (strict_strtol(buf, 10, &reqval)) | 341 | if (strict_strtol(buf, 10, &reqval)) |
345 | return -EINVAL; | 342 | return -EINVAL; |
346 | 343 | ||
347 | reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]); | 344 | reqval = SENSORS_LIMIT(reqval, 0, 0xffff); |
345 | |||
346 | reqval = reqval * 0xc0 / asc7621_in_scaling[nr]; | ||
348 | 347 | ||
349 | reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr]; | 348 | reqval = SENSORS_LIMIT(reqval, 0, 0xff); |
350 | 349 | ||
351 | mutex_lock(&data->update_lock); | 350 | mutex_lock(&data->update_lock); |
352 | data->reg[param->msb[0]] = reqval; | 351 | data->reg[param->msb[0]] = reqval; |
@@ -846,11 +845,11 @@ static struct asc7621_param asc7621_params[] = { | |||
846 | PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8), | 845 | PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8), |
847 | PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8), | 846 | PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8), |
848 | 847 | ||
849 | PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask), | 848 | PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask), |
850 | PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask), | 849 | PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask), |
851 | PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask), | 850 | PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask), |
852 | PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask), | 851 | PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask), |
853 | PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), | 852 | PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask), |
854 | 853 | ||
855 | PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16), | 854 | PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16), |
856 | PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16), | 855 | PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16), |
@@ -862,10 +861,10 @@ static struct asc7621_param asc7621_params[] = { | |||
862 | PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16), | 861 | PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16), |
863 | PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16), | 862 | PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16), |
864 | 863 | ||
865 | PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask), | 864 | PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask), |
866 | PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask), | 865 | PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask), |
867 | PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask), | 866 | PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask), |
868 | PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask), | 867 | PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask), |
869 | 868 | ||
870 | PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10), | 869 | PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10), |
871 | PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10), | 870 | PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10), |
@@ -886,10 +885,10 @@ static struct asc7621_param asc7621_params[] = { | |||
886 | PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8), | 885 | PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8), |
887 | PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8), | 886 | PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8), |
888 | 887 | ||
889 | PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask), | 888 | PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask), |
890 | PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask), | 889 | PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask), |
891 | PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask), | 890 | PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask), |
892 | PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask), | 891 | PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask), |
893 | 892 | ||
894 | PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask), | 893 | PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask), |
895 | PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask), | 894 | PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask), |
@@ -898,7 +897,7 @@ static struct asc7621_param asc7621_params[] = { | |||
898 | 897 | ||
899 | PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask), | 898 | PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask), |
900 | PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask), | 899 | PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask), |
901 | PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask), | 900 | PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask), |
902 | PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask), | 901 | PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask), |
903 | 902 | ||
904 | PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st), | 903 | PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st), |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index c8ab50516672..7580f55e67e3 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
@@ -328,8 +328,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) | |||
328 | lis3lv02d_joystick_disable(); | 328 | lis3lv02d_joystick_disable(); |
329 | lis3lv02d_poweroff(&lis3_dev); | 329 | lis3lv02d_poweroff(&lis3_dev); |
330 | 330 | ||
331 | flush_work(&hpled_led.work); | ||
332 | led_classdev_unregister(&hpled_led.led_classdev); | 331 | led_classdev_unregister(&hpled_led.led_classdev); |
332 | flush_work(&hpled_led.work); | ||
333 | 333 | ||
334 | return lis3lv02d_remove_fs(&lis3_dev); | 334 | return lis3lv02d_remove_fs(&lis3_dev); |
335 | } | 335 | } |
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 7e18bcf05a66..46239e47a260 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void) | |||
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | unsigned int count; | 60 | unsigned int count; |
61 | 61 | ||
62 | spin_lock_irqsave(&i8253_lock, flags); | 62 | raw_spin_lock_irqsave(&i8253_lock, flags); |
63 | outb_p(0x00, 0x43); | 63 | outb_p(0x00, 0x43); |
64 | count = inb_p(0x40); | 64 | count = inb_p(0x40); |
65 | count |= inb_p(0x40) << 8; | 65 | count |= inb_p(0x40) << 8; |
66 | spin_unlock_irqrestore(&i8253_lock, flags); | 66 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
67 | 67 | ||
68 | return count; | 68 | return count; |
69 | } | 69 | } |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 1c0b529c06aa..4afe0a3b4884 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void) | |||
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | unsigned int count; | 147 | unsigned int count; |
148 | 148 | ||
149 | spin_lock_irqsave(&i8253_lock, flags); | 149 | raw_spin_lock_irqsave(&i8253_lock, flags); |
150 | outb_p(0x00, 0x43); | 150 | outb_p(0x00, 0x43); |
151 | count = inb_p(0x40); | 151 | count = inb_p(0x40); |
152 | count |= inb_p(0x40) << 8; | 152 | count |= inb_p(0x40) << 8; |
153 | spin_unlock_irqrestore(&i8253_lock, flags); | 153 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
154 | 154 | ||
155 | return count; | 155 | return count; |
156 | } | 156 | } |
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index b1edd778639c..405febd94f24 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c | |||
@@ -54,6 +54,9 @@ static signed short btn_avb_wheel[] = | |||
54 | static signed short abs_joystick[] = | 54 | static signed short abs_joystick[] = |
55 | { ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; | 55 | { ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; |
56 | 56 | ||
57 | static signed short abs_joystick_rudder[] = | ||
58 | { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 }; | ||
59 | |||
57 | static signed short abs_avb_pegasus[] = | 60 | static signed short abs_avb_pegasus[] = |
58 | { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, | 61 | { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, |
59 | ABS_HAT1X, ABS_HAT1Y, -1 }; | 62 | ABS_HAT1X, ABS_HAT1Y, -1 }; |
@@ -76,8 +79,9 @@ static struct iforce_device iforce_device[] = { | |||
76 | { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? | 79 | { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? |
77 | { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, | 80 | { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, |
78 | { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? | 81 | { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? |
82 | { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce }, | ||
79 | { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? | 83 | { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? |
80 | { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? | 84 | { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? |
81 | { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, | 85 | { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, |
82 | { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } | 86 | { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } |
83 | }; | 87 | }; |
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index b41303d3ec54..6c96631ae5d9 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c | |||
@@ -212,6 +212,7 @@ static struct usb_device_id iforce_usb_ids [] = { | |||
212 | { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ | 212 | { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ |
213 | { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ | 213 | { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ |
214 | { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ | 214 | { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ |
215 | { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */ | ||
215 | { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ | 216 | { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ |
216 | { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ | 217 | { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ |
217 | { } /* Terminating entry */ | 218 | { } /* Terminating entry */ |
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index ea4e1fd12651..f080dd31499b 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c | |||
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr"); | |||
30 | #include <asm/i8253.h> | 30 | #include <asm/i8253.h> |
31 | #else | 31 | #else |
32 | #include <asm/8253pit.h> | 32 | #include <asm/8253pit.h> |
33 | static DEFINE_SPINLOCK(i8253_lock); | 33 | static DEFINE_RAW_SPINLOCK(i8253_lock); |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) | 36 | static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) |
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c | |||
50 | if (value > 20 && value < 32767) | 50 | if (value > 20 && value < 32767) |
51 | count = PIT_TICK_RATE / value; | 51 | count = PIT_TICK_RATE / value; |
52 | 52 | ||
53 | spin_lock_irqsave(&i8253_lock, flags); | 53 | raw_spin_lock_irqsave(&i8253_lock, flags); |
54 | 54 | ||
55 | if (count) { | 55 | if (count) { |
56 | /* set command for counter 2, 2 byte write */ | 56 | /* set command for counter 2, 2 byte write */ |
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c | |||
65 | outb(inb_p(0x61) & 0xFC, 0x61); | 65 | outb(inb_p(0x61) & 0xFC, 0x61); |
66 | } | 66 | } |
67 | 67 | ||
68 | spin_unlock_irqrestore(&i8253_lock, flags); | 68 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
69 | 69 | ||
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 0520c2e19927..112b4ee52ff2 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -185,7 +185,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse) | |||
185 | int fingers; | 185 | int fingers; |
186 | static int old_fingers; | 186 | static int old_fingers; |
187 | 187 | ||
188 | if (etd->fw_version_maj == 0x01) { | 188 | if (etd->fw_version < 0x020000) { |
189 | /* | 189 | /* |
190 | * byte 0: D U p1 p2 1 p3 R L | 190 | * byte 0: D U p1 p2 1 p3 R L |
191 | * byte 1: f 0 th tw x9 x8 y9 y8 | 191 | * byte 1: f 0 th tw x9 x8 y9 y8 |
@@ -227,7 +227,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse) | |||
227 | input_report_key(dev, BTN_LEFT, packet[0] & 0x01); | 227 | input_report_key(dev, BTN_LEFT, packet[0] & 0x01); |
228 | input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); | 228 | input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); |
229 | 229 | ||
230 | if ((etd->fw_version_maj == 0x01) && | 230 | if (etd->fw_version < 0x020000 && |
231 | (etd->capabilities & ETP_CAP_HAS_ROCKER)) { | 231 | (etd->capabilities & ETP_CAP_HAS_ROCKER)) { |
232 | /* rocker up */ | 232 | /* rocker up */ |
233 | input_report_key(dev, BTN_FORWARD, packet[0] & 0x40); | 233 | input_report_key(dev, BTN_FORWARD, packet[0] & 0x40); |
@@ -321,7 +321,7 @@ static int elantech_check_parity_v1(struct psmouse *psmouse) | |||
321 | unsigned char p1, p2, p3; | 321 | unsigned char p1, p2, p3; |
322 | 322 | ||
323 | /* Parity bits are placed differently */ | 323 | /* Parity bits are placed differently */ |
324 | if (etd->fw_version_maj == 0x01) { | 324 | if (etd->fw_version < 0x020000) { |
325 | /* byte 0: D U p1 p2 1 p3 R L */ | 325 | /* byte 0: D U p1 p2 1 p3 R L */ |
326 | p1 = (packet[0] & 0x20) >> 5; | 326 | p1 = (packet[0] & 0x20) >> 5; |
327 | p2 = (packet[0] & 0x10) >> 4; | 327 | p2 = (packet[0] & 0x10) >> 4; |
@@ -457,7 +457,7 @@ static void elantech_set_input_params(struct psmouse *psmouse) | |||
457 | switch (etd->hw_version) { | 457 | switch (etd->hw_version) { |
458 | case 1: | 458 | case 1: |
459 | /* Rocker button */ | 459 | /* Rocker button */ |
460 | if ((etd->fw_version_maj == 0x01) && | 460 | if (etd->fw_version < 0x020000 && |
461 | (etd->capabilities & ETP_CAP_HAS_ROCKER)) { | 461 | (etd->capabilities & ETP_CAP_HAS_ROCKER)) { |
462 | __set_bit(BTN_FORWARD, dev->keybit); | 462 | __set_bit(BTN_FORWARD, dev->keybit); |
463 | __set_bit(BTN_BACK, dev->keybit); | 463 | __set_bit(BTN_BACK, dev->keybit); |
@@ -686,15 +686,14 @@ int elantech_init(struct psmouse *psmouse) | |||
686 | pr_err("elantech.c: failed to query firmware version.\n"); | 686 | pr_err("elantech.c: failed to query firmware version.\n"); |
687 | goto init_fail; | 687 | goto init_fail; |
688 | } | 688 | } |
689 | etd->fw_version_maj = param[0]; | 689 | |
690 | etd->fw_version_min = param[2]; | 690 | etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2]; |
691 | 691 | ||
692 | /* | 692 | /* |
693 | * Assume every version greater than this is new EeePC style | 693 | * Assume every version greater than this is new EeePC style |
694 | * hardware with 6 byte packets | 694 | * hardware with 6 byte packets |
695 | */ | 695 | */ |
696 | if ((etd->fw_version_maj == 0x02 && etd->fw_version_min >= 0x30) || | 696 | if (etd->fw_version >= 0x020030) { |
697 | etd->fw_version_maj > 0x02) { | ||
698 | etd->hw_version = 2; | 697 | etd->hw_version = 2; |
699 | /* For now show extra debug information */ | 698 | /* For now show extra debug information */ |
700 | etd->debug = 1; | 699 | etd->debug = 1; |
@@ -704,8 +703,9 @@ int elantech_init(struct psmouse *psmouse) | |||
704 | etd->hw_version = 1; | 703 | etd->hw_version = 1; |
705 | etd->paritycheck = 1; | 704 | etd->paritycheck = 1; |
706 | } | 705 | } |
707 | pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n", | 706 | |
708 | etd->hw_version, etd->fw_version_maj, etd->fw_version_min); | 707 | pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d.%d\n", |
708 | etd->hw_version, param[0], param[1], param[2]); | ||
709 | 709 | ||
710 | if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) { | 710 | if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) { |
711 | pr_err("elantech.c: failed to query capabilities.\n"); | 711 | pr_err("elantech.c: failed to query capabilities.\n"); |
@@ -720,8 +720,8 @@ int elantech_init(struct psmouse *psmouse) | |||
720 | * a touch action starts causing the mouse cursor or scrolled page | 720 | * a touch action starts causing the mouse cursor or scrolled page |
721 | * to jump. Enable a workaround. | 721 | * to jump. Enable a workaround. |
722 | */ | 722 | */ |
723 | if (etd->fw_version_maj == 0x02 && etd->fw_version_min == 0x22) { | 723 | if (etd->fw_version == 0x020022) { |
724 | pr_info("elantech.c: firmware version 2.34 detected, " | 724 | pr_info("elantech.c: firmware version 2.0.34 detected, " |
725 | "enabling jumpy cursor workaround\n"); | 725 | "enabling jumpy cursor workaround\n"); |
726 | etd->jumpy_cursor = 1; | 726 | etd->jumpy_cursor = 1; |
727 | } | 727 | } |
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index feac5f7af966..ac57bde1bb9f 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h | |||
@@ -100,11 +100,10 @@ struct elantech_data { | |||
100 | unsigned char reg_26; | 100 | unsigned char reg_26; |
101 | unsigned char debug; | 101 | unsigned char debug; |
102 | unsigned char capabilities; | 102 | unsigned char capabilities; |
103 | unsigned char fw_version_maj; | ||
104 | unsigned char fw_version_min; | ||
105 | unsigned char hw_version; | ||
106 | unsigned char paritycheck; | 103 | unsigned char paritycheck; |
107 | unsigned char jumpy_cursor; | 104 | unsigned char jumpy_cursor; |
105 | unsigned char hw_version; | ||
106 | unsigned int fw_version; | ||
108 | unsigned char parity[256]; | 107 | unsigned char parity[256]; |
109 | }; | 108 | }; |
110 | 109 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index cbc807264940..a3c97315a473 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -1394,6 +1394,7 @@ static int psmouse_reconnect(struct serio *serio) | |||
1394 | struct psmouse *psmouse = serio_get_drvdata(serio); | 1394 | struct psmouse *psmouse = serio_get_drvdata(serio); |
1395 | struct psmouse *parent = NULL; | 1395 | struct psmouse *parent = NULL; |
1396 | struct serio_driver *drv = serio->drv; | 1396 | struct serio_driver *drv = serio->drv; |
1397 | unsigned char type; | ||
1397 | int rc = -1; | 1398 | int rc = -1; |
1398 | 1399 | ||
1399 | if (!drv || !psmouse) { | 1400 | if (!drv || !psmouse) { |
@@ -1413,10 +1414,15 @@ static int psmouse_reconnect(struct serio *serio) | |||
1413 | if (psmouse->reconnect) { | 1414 | if (psmouse->reconnect) { |
1414 | if (psmouse->reconnect(psmouse)) | 1415 | if (psmouse->reconnect(psmouse)) |
1415 | goto out; | 1416 | goto out; |
1416 | } else if (psmouse_probe(psmouse) < 0 || | 1417 | } else { |
1417 | psmouse->type != psmouse_extensions(psmouse, | 1418 | psmouse_reset(psmouse); |
1418 | psmouse_max_proto, false)) { | 1419 | |
1419 | goto out; | 1420 | if (psmouse_probe(psmouse) < 0) |
1421 | goto out; | ||
1422 | |||
1423 | type = psmouse_extensions(psmouse, psmouse_max_proto, false); | ||
1424 | if (psmouse->type != type) | ||
1425 | goto out; | ||
1420 | } | 1426 | } |
1421 | 1427 | ||
1422 | /* ok, the device type (and capabilities) match the old one, | 1428 | /* ok, the device type (and capabilities) match the old one, |
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c index e019d53d1ab4..0d2d7e54b465 100644 --- a/drivers/input/touchscreen/ad7877.c +++ b/drivers/input/touchscreen/ad7877.c | |||
@@ -156,9 +156,14 @@ struct ser_req { | |||
156 | u16 reset; | 156 | u16 reset; |
157 | u16 ref_on; | 157 | u16 ref_on; |
158 | u16 command; | 158 | u16 command; |
159 | u16 sample; | ||
160 | struct spi_message msg; | 159 | struct spi_message msg; |
161 | struct spi_transfer xfer[6]; | 160 | struct spi_transfer xfer[6]; |
161 | |||
162 | /* | ||
163 | * DMA (thus cache coherency maintenance) requires the | ||
164 | * transfer buffers to live in their own cache lines. | ||
165 | */ | ||
166 | u16 sample ____cacheline_aligned; | ||
162 | }; | 167 | }; |
163 | 168 | ||
164 | struct ad7877 { | 169 | struct ad7877 { |
@@ -182,8 +187,6 @@ struct ad7877 { | |||
182 | u8 averaging; | 187 | u8 averaging; |
183 | u8 pen_down_acc_interval; | 188 | u8 pen_down_acc_interval; |
184 | 189 | ||
185 | u16 conversion_data[AD7877_NR_SENSE]; | ||
186 | |||
187 | struct spi_transfer xfer[AD7877_NR_SENSE + 2]; | 190 | struct spi_transfer xfer[AD7877_NR_SENSE + 2]; |
188 | struct spi_message msg; | 191 | struct spi_message msg; |
189 | 192 | ||
@@ -195,6 +198,12 @@ struct ad7877 { | |||
195 | spinlock_t lock; | 198 | spinlock_t lock; |
196 | struct timer_list timer; /* P: lock */ | 199 | struct timer_list timer; /* P: lock */ |
197 | unsigned pending:1; /* P: lock */ | 200 | unsigned pending:1; /* P: lock */ |
201 | |||
202 | /* | ||
203 | * DMA (thus cache coherency maintenance) requires the | ||
204 | * transfer buffers to live in their own cache lines. | ||
205 | */ | ||
206 | u16 conversion_data[AD7877_NR_SENSE] ____cacheline_aligned; | ||
198 | }; | 207 | }; |
199 | 208 | ||
200 | static int gpio3; | 209 | static int gpio3; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 9712b2e97be4..cefd63daff31 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2109,12 +2109,18 @@ repeat: | |||
2109 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ | 2109 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ |
2110 | /* .. if the array isn't clean, an 'even' event must also go | 2110 | /* .. if the array isn't clean, an 'even' event must also go |
2111 | * to spares. */ | 2111 | * to spares. */ |
2112 | if ((mddev->events&1)==0) | 2112 | if ((mddev->events&1)==0) { |
2113 | nospares = 0; | 2113 | nospares = 0; |
2114 | sync_req = 2; /* force a second update to get the | ||
2115 | * even/odd in sync */ | ||
2116 | } | ||
2114 | } else { | 2117 | } else { |
2115 | /* otherwise an 'odd' event must go to spares */ | 2118 | /* otherwise an 'odd' event must go to spares */ |
2116 | if ((mddev->events&1)) | 2119 | if ((mddev->events&1)) { |
2117 | nospares = 0; | 2120 | nospares = 0; |
2121 | sync_req = 2; /* force a second update to get the | ||
2122 | * even/odd in sync */ | ||
2123 | } | ||
2118 | } | 2124 | } |
2119 | } | 2125 | } |
2120 | 2126 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 58ea0ecae7c3..15348c393b5d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1527,7 +1527,7 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1527 | 1527 | ||
1528 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); | 1528 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); |
1529 | atomic_inc(&rdev->read_errors); | 1529 | atomic_inc(&rdev->read_errors); |
1530 | if (conf->mddev->degraded) | 1530 | if (conf->mddev->degraded >= conf->max_degraded) |
1531 | printk_rl(KERN_WARNING | 1531 | printk_rl(KERN_WARNING |
1532 | "raid5:%s: read error not correctable " | 1532 | "raid5:%s: read error not correctable " |
1533 | "(sector %llu on %s).\n", | 1533 | "(sector %llu on %s).\n", |
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c index fd8e1f45be36..7364b9642d00 100644 --- a/drivers/media/common/saa7146_fops.c +++ b/drivers/media/common/saa7146_fops.c | |||
@@ -423,15 +423,14 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status) | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | int saa7146_vv_devinit(struct saa7146_dev *dev) | ||
427 | { | ||
428 | return v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); | ||
429 | } | ||
430 | EXPORT_SYMBOL_GPL(saa7146_vv_devinit); | ||
431 | |||
432 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) | 426 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) |
433 | { | 427 | { |
434 | struct saa7146_vv *vv; | 428 | struct saa7146_vv *vv; |
429 | int err; | ||
430 | |||
431 | err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); | ||
432 | if (err) | ||
433 | return err; | ||
435 | 434 | ||
436 | vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); | 435 | vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); |
437 | if (vv == NULL) { | 436 | if (vv == NULL) { |
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c index 5ed75263340a..b8b2c551a1e2 100644 --- a/drivers/media/common/saa7146_video.c +++ b/drivers/media/common/saa7146_video.c | |||
@@ -558,9 +558,11 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f | |||
558 | /* ok, accept it */ | 558 | /* ok, accept it */ |
559 | vv->ov_fb = *fb; | 559 | vv->ov_fb = *fb; |
560 | vv->ov_fmt = fmt; | 560 | vv->ov_fmt = fmt; |
561 | if (0 == vv->ov_fb.fmt.bytesperline) | 561 | |
562 | vv->ov_fb.fmt.bytesperline = | 562 | if (vv->ov_fb.fmt.bytesperline < vv->ov_fb.fmt.width) { |
563 | vv->ov_fb.fmt.width * fmt->depth / 8; | 563 | vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8; |
564 | DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline)); | ||
565 | } | ||
564 | 566 | ||
565 | mutex_unlock(&dev->lock); | 567 | mutex_unlock(&dev->lock); |
566 | return 0; | 568 | return 0; |
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c index a3c07fe0e6c4..96972804f4ad 100644 --- a/drivers/media/dvb/frontends/stv090x.c +++ b/drivers/media/dvb/frontends/stv090x.c | |||
@@ -4470,6 +4470,10 @@ static int stv090x_setup(struct dvb_frontend *fe) | |||
4470 | if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0) | 4470 | if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0) |
4471 | goto err; | 4471 | goto err; |
4472 | 4472 | ||
4473 | /* workaround for stuck DiSEqC output */ | ||
4474 | if (config->diseqc_envelope_mode) | ||
4475 | stv090x_send_diseqc_burst(fe, SEC_MINI_A); | ||
4476 | |||
4473 | return 0; | 4477 | return 0; |
4474 | err: | 4478 | err: |
4475 | dprintk(FE_ERROR, 1, "I/O error"); | 4479 | dprintk(FE_ERROR, 1, "I/O error"); |
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c index 9fdf26cc6998..1500210c06cf 100644 --- a/drivers/media/dvb/ttpci/budget.c +++ b/drivers/media/dvb/ttpci/budget.c | |||
@@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget) | |||
643 | &budget->i2c_adap, | 643 | &budget->i2c_adap, |
644 | &tt1600_isl6423_config); | 644 | &tt1600_isl6423_config); |
645 | 645 | ||
646 | } else { | ||
647 | dvb_frontend_detach(budget->dvb_frontend); | ||
648 | budget->dvb_frontend = NULL; | ||
649 | } | 646 | } |
650 | } | 647 | } |
651 | break; | 648 | break; |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index f8fc8654693d..9644cf760aaa 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -361,7 +361,7 @@ config VIDEO_SAA717X | |||
361 | 361 | ||
362 | config VIDEO_SAA7191 | 362 | config VIDEO_SAA7191 |
363 | tristate "Philips SAA7191 video decoder" | 363 | tristate "Philips SAA7191 video decoder" |
364 | depends on VIDEO_V4L1 && I2C | 364 | depends on VIDEO_V4L2 && I2C |
365 | ---help--- | 365 | ---help--- |
366 | Support for the Philips SAA7191 video decoder. | 366 | Support for the Philips SAA7191 video decoder. |
367 | 367 | ||
@@ -756,7 +756,7 @@ source "drivers/media/video/saa7134/Kconfig" | |||
756 | 756 | ||
757 | config VIDEO_MXB | 757 | config VIDEO_MXB |
758 | tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" | 758 | tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" |
759 | depends on PCI && VIDEO_V4L1 && I2C | 759 | depends on PCI && VIDEO_V4L2 && I2C |
760 | select VIDEO_SAA7146_VV | 760 | select VIDEO_SAA7146_VV |
761 | select VIDEO_TUNER | 761 | select VIDEO_TUNER |
762 | select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO | 762 | select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO |
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index b88b6174a331..c51c386559f2 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile | |||
@@ -160,8 +160,6 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o | |||
160 | obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o | 160 | obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o |
161 | obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o | 161 | obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o |
162 | 162 | ||
163 | obj-$(CONFIG_ARCH_DAVINCI) += davinci/ | ||
164 | |||
165 | obj-$(CONFIG_VIDEO_AU0828) += au0828/ | 163 | obj-$(CONFIG_VIDEO_AU0828) += au0828/ |
166 | 164 | ||
167 | obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/ | 165 | obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/ |
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c index 7cf042f9b377..398dbe71cb82 100644 --- a/drivers/media/video/davinci/vpfe_capture.c +++ b/drivers/media/video/davinci/vpfe_capture.c | |||
@@ -223,7 +223,6 @@ int vpfe_register_ccdc_device(struct ccdc_hw_device *dev) | |||
223 | BUG_ON(!dev->hw_ops.get_frame_format); | 223 | BUG_ON(!dev->hw_ops.get_frame_format); |
224 | BUG_ON(!dev->hw_ops.get_pixel_format); | 224 | BUG_ON(!dev->hw_ops.get_pixel_format); |
225 | BUG_ON(!dev->hw_ops.set_pixel_format); | 225 | BUG_ON(!dev->hw_ops.set_pixel_format); |
226 | BUG_ON(!dev->hw_ops.set_params); | ||
227 | BUG_ON(!dev->hw_ops.set_image_window); | 226 | BUG_ON(!dev->hw_ops.set_image_window); |
228 | BUG_ON(!dev->hw_ops.get_image_window); | 227 | BUG_ON(!dev->hw_ops.get_image_window); |
229 | BUG_ON(!dev->hw_ops.get_line_length); | 228 | BUG_ON(!dev->hw_ops.get_line_length); |
@@ -1689,11 +1688,12 @@ static long vpfe_param_handler(struct file *file, void *priv, | |||
1689 | struct vpfe_device *vpfe_dev = video_drvdata(file); | 1688 | struct vpfe_device *vpfe_dev = video_drvdata(file); |
1690 | int ret = 0; | 1689 | int ret = 0; |
1691 | 1690 | ||
1692 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); | 1691 | v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); |
1693 | 1692 | ||
1694 | if (vpfe_dev->started) { | 1693 | if (vpfe_dev->started) { |
1695 | /* only allowed if streaming is not started */ | 1694 | /* only allowed if streaming is not started */ |
1696 | v4l2_err(&vpfe_dev->v4l2_dev, "device already started\n"); | 1695 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
1696 | "device already started\n"); | ||
1697 | return -EBUSY; | 1697 | return -EBUSY; |
1698 | } | 1698 | } |
1699 | 1699 | ||
@@ -1705,16 +1705,23 @@ static long vpfe_param_handler(struct file *file, void *priv, | |||
1705 | case VPFE_CMD_S_CCDC_RAW_PARAMS: | 1705 | case VPFE_CMD_S_CCDC_RAW_PARAMS: |
1706 | v4l2_warn(&vpfe_dev->v4l2_dev, | 1706 | v4l2_warn(&vpfe_dev->v4l2_dev, |
1707 | "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); | 1707 | "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); |
1708 | ret = ccdc_dev->hw_ops.set_params(param); | 1708 | if (ccdc_dev->hw_ops.set_params) { |
1709 | if (ret) { | 1709 | ret = ccdc_dev->hw_ops.set_params(param); |
1710 | v4l2_err(&vpfe_dev->v4l2_dev, | 1710 | if (ret) { |
1711 | "Error in setting parameters in CCDC\n"); | 1711 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
1712 | goto unlock_out; | 1712 | "Error setting parameters in CCDC\n"); |
1713 | } | 1713 | goto unlock_out; |
1714 | if (vpfe_get_ccdc_image_format(vpfe_dev, &vpfe_dev->fmt) < 0) { | 1714 | } |
1715 | v4l2_err(&vpfe_dev->v4l2_dev, | 1715 | if (vpfe_get_ccdc_image_format(vpfe_dev, |
1716 | "Invalid image format at CCDC\n"); | 1716 | &vpfe_dev->fmt) < 0) { |
1717 | goto unlock_out; | 1717 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
1718 | "Invalid image format at CCDC\n"); | ||
1719 | goto unlock_out; | ||
1720 | } | ||
1721 | } else { | ||
1722 | ret = -EINVAL; | ||
1723 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, | ||
1724 | "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); | ||
1718 | } | 1725 | } |
1719 | break; | 1726 | break; |
1720 | default: | 1727 | default: |
@@ -1830,7 +1837,7 @@ static __init int vpfe_probe(struct platform_device *pdev) | |||
1830 | if (NULL == ccdc_cfg) { | 1837 | if (NULL == ccdc_cfg) { |
1831 | v4l2_err(pdev->dev.driver, | 1838 | v4l2_err(pdev->dev.driver, |
1832 | "Memory allocation failed for ccdc_cfg\n"); | 1839 | "Memory allocation failed for ccdc_cfg\n"); |
1833 | goto probe_free_dev_mem; | 1840 | goto probe_free_lock; |
1834 | } | 1841 | } |
1835 | 1842 | ||
1836 | strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32); | 1843 | strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32); |
@@ -1982,8 +1989,9 @@ probe_out_video_release: | |||
1982 | probe_out_release_irq: | 1989 | probe_out_release_irq: |
1983 | free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); | 1990 | free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); |
1984 | probe_free_ccdc_cfg_mem: | 1991 | probe_free_ccdc_cfg_mem: |
1985 | mutex_unlock(&ccdc_lock); | ||
1986 | kfree(ccdc_cfg); | 1992 | kfree(ccdc_cfg); |
1993 | probe_free_lock: | ||
1994 | mutex_unlock(&ccdc_lock); | ||
1987 | probe_free_dev_mem: | 1995 | probe_free_dev_mem: |
1988 | kfree(vpfe_dev); | 1996 | kfree(vpfe_dev); |
1989 | return ret; | 1997 | return ret; |
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c index 38a6e15e096b..3dee3e5844b6 100644 --- a/drivers/media/video/gspca/sn9c20x.c +++ b/drivers/media/video/gspca/sn9c20x.c | |||
@@ -1427,7 +1427,7 @@ static int input_kthread(void *data) | |||
1427 | struct gspca_dev *gspca_dev = (struct gspca_dev *)data; | 1427 | struct gspca_dev *gspca_dev = (struct gspca_dev *)data; |
1428 | struct sd *sd = (struct sd *) gspca_dev; | 1428 | struct sd *sd = (struct sd *) gspca_dev; |
1429 | 1429 | ||
1430 | DECLARE_WAIT_QUEUE_HEAD(wait); | 1430 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait); |
1431 | set_freezable(); | 1431 | set_freezable(); |
1432 | for (;;) { | 1432 | for (;;) { |
1433 | if (kthread_should_stop()) | 1433 | if (kthread_should_stop()) |
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c index 15b2eef8a3f6..edf0fe157501 100644 --- a/drivers/media/video/gspca/spca508.c +++ b/drivers/media/video/gspca/spca508.c | |||
@@ -1513,7 +1513,6 @@ static const struct sd_desc sd_desc = { | |||
1513 | static const __devinitdata struct usb_device_id device_table[] = { | 1513 | static const __devinitdata struct usb_device_id device_table[] = { |
1514 | {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, | 1514 | {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, |
1515 | {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, | 1515 | {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, |
1516 | {USB_DEVICE(0x0461, 0x0815), .driver_info = MicroInnovationIC200}, | ||
1517 | {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, | 1516 | {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, |
1518 | {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam}, | 1517 | {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam}, |
1519 | {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2}, | 1518 | {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2}, |
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c index dc7f2b0fbc79..b9c80e2103b9 100644 --- a/drivers/media/video/gspca/spca561.c +++ b/drivers/media/video/gspca/spca561.c | |||
@@ -1053,6 +1053,7 @@ static const __devinitdata struct usb_device_id device_table[] = { | |||
1053 | {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, | 1053 | {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, |
1054 | {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, | 1054 | {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, |
1055 | {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, | 1055 | {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, |
1056 | {USB_DEVICE(0x0461, 0x0815), .driver_info = Rev072A}, | ||
1056 | {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A}, | 1057 | {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A}, |
1057 | {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A}, | 1058 | {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A}, |
1058 | {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A}, | 1059 | {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A}, |
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c index af73da34c83f..14f179a19485 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx.c | |||
@@ -524,8 +524,6 @@ static const __devinitdata struct usb_device_id device_table[] = { | |||
524 | {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, | 524 | {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, |
525 | /* QuickCam Messenger (new) */ | 525 | /* QuickCam Messenger (new) */ |
526 | {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, | 526 | {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, |
527 | /* QuickCam Messenger (new) */ | ||
528 | {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 }, | ||
529 | {} | 527 | {} |
530 | }; | 528 | }; |
531 | MODULE_DEVICE_TABLE(usb, device_table); | 529 | MODULE_DEVICE_TABLE(usb, device_table); |
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c index e620a3a92f25..ad2c232baa6d 100644 --- a/drivers/media/video/hexium_gemini.c +++ b/drivers/media/video/hexium_gemini.c | |||
@@ -356,9 +356,6 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d | |||
356 | 356 | ||
357 | DEB_EE((".\n")); | 357 | DEB_EE((".\n")); |
358 | 358 | ||
359 | ret = saa7146_vv_devinit(dev); | ||
360 | if (ret) | ||
361 | return ret; | ||
362 | hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); | 359 | hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); |
363 | if (NULL == hexium) { | 360 | if (NULL == hexium) { |
364 | printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); | 361 | printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); |
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c index fe596a1c12a8..938a1f8f880a 100644 --- a/drivers/media/video/hexium_orion.c +++ b/drivers/media/video/hexium_orion.c | |||
@@ -216,10 +216,6 @@ static int hexium_probe(struct saa7146_dev *dev) | |||
216 | return -EFAULT; | 216 | return -EFAULT; |
217 | } | 217 | } |
218 | 218 | ||
219 | err = saa7146_vv_devinit(dev); | ||
220 | if (err) | ||
221 | return err; | ||
222 | |||
223 | hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); | 219 | hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); |
224 | if (NULL == hexium) { | 220 | if (NULL == hexium) { |
225 | printk("hexium_orion: hexium_probe: not enough kernel memory.\n"); | 221 | printk("hexium_orion: hexium_probe: not enough kernel memory.\n"); |
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 3c8ebfcb742e..34a66019190e 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c | |||
@@ -49,8 +49,6 @@ | |||
49 | /* | 49 | /* |
50 | * CSI registers | 50 | * CSI registers |
51 | */ | 51 | */ |
52 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | ||
53 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | ||
54 | #define CSICR1 0x00 /* CSI Control Register 1 */ | 52 | #define CSICR1 0x00 /* CSI Control Register 1 */ |
55 | #define CSISR 0x08 /* CSI Status Register */ | 53 | #define CSISR 0x08 /* CSI Status Register */ |
56 | #define CSIRXR 0x10 /* CSI RxFIFO Register */ | 54 | #define CSIRXR 0x10 /* CSI RxFIFO Register */ |
@@ -784,7 +782,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev) | |||
784 | pcdev); | 782 | pcdev); |
785 | 783 | ||
786 | imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, | 784 | imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, |
787 | IMX_DMA_MEMSIZE_32, DMA_REQ_CSI_R, 0); | 785 | IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0); |
788 | /* burst length : 16 words = 64 bytes */ | 786 | /* burst length : 16 words = 64 bytes */ |
789 | imx_dma_config_burstlen(pcdev->dma_chan, 0); | 787 | imx_dma_config_burstlen(pcdev->dma_chan, 0); |
790 | 788 | ||
@@ -798,8 +796,8 @@ static int __init mx1_camera_probe(struct platform_device *pdev) | |||
798 | set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - | 796 | set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - |
799 | &mx1_camera_sof_fiq_start); | 797 | &mx1_camera_sof_fiq_start); |
800 | 798 | ||
801 | regs.ARM_r8 = DMA_BASE + DMA_DIMR; | 799 | regs.ARM_r8 = (long)MX1_DMA_DIMR; |
802 | regs.ARM_r9 = DMA_BASE + DMA_CCR(pcdev->dma_chan); | 800 | regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan); |
803 | regs.ARM_r10 = (long)pcdev->base + CSICR1; | 801 | regs.ARM_r10 = (long)pcdev->base + CSICR1; |
804 | regs.ARM_fp = (long)pcdev->base + CSISR; | 802 | regs.ARM_fp = (long)pcdev->base + CSISR; |
805 | regs.ARM_sp = 1 << pcdev->dma_chan; | 803 | regs.ARM_sp = 1 << pcdev->dma_chan; |
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 9f01f14e4aa2..ef0c8178f255 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c | |||
@@ -169,11 +169,7 @@ static struct saa7146_extension extension; | |||
169 | static int mxb_probe(struct saa7146_dev *dev) | 169 | static int mxb_probe(struct saa7146_dev *dev) |
170 | { | 170 | { |
171 | struct mxb *mxb = NULL; | 171 | struct mxb *mxb = NULL; |
172 | int err; | ||
173 | 172 | ||
174 | err = saa7146_vv_devinit(dev); | ||
175 | if (err) | ||
176 | return err; | ||
177 | mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL); | 173 | mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL); |
178 | if (mxb == NULL) { | 174 | if (mxb == NULL) { |
179 | DEB_D(("not enough kernel memory.\n")); | 175 | DEB_D(("not enough kernel memory.\n")); |
@@ -699,14 +695,17 @@ static struct saa7146_ext_vv vv_data; | |||
699 | /* this function only gets called when the probing was successful */ | 695 | /* this function only gets called when the probing was successful */ |
700 | static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) | 696 | static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) |
701 | { | 697 | { |
702 | struct mxb *mxb = (struct mxb *)dev->ext_priv; | 698 | struct mxb *mxb; |
703 | 699 | ||
704 | DEB_EE(("dev:%p\n", dev)); | 700 | DEB_EE(("dev:%p\n", dev)); |
705 | 701 | ||
706 | /* checking for i2c-devices can be omitted here, because we | ||
707 | already did this in "mxb_vl42_probe" */ | ||
708 | |||
709 | saa7146_vv_init(dev, &vv_data); | 702 | saa7146_vv_init(dev, &vv_data); |
703 | if (mxb_probe(dev)) { | ||
704 | saa7146_vv_release(dev); | ||
705 | return -1; | ||
706 | } | ||
707 | mxb = (struct mxb *)dev->ext_priv; | ||
708 | |||
710 | vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; | 709 | vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; |
711 | vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; | 710 | vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; |
712 | vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; | 711 | vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; |
@@ -726,6 +725,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data | |||
726 | vv_data.ops.vidioc_default = vidioc_default; | 725 | vv_data.ops.vidioc_default = vidioc_default; |
727 | if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { | 726 | if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) { |
728 | ERR(("cannot register capture v4l2 device. skipping.\n")); | 727 | ERR(("cannot register capture v4l2 device. skipping.\n")); |
728 | saa7146_vv_release(dev); | ||
729 | return -1; | 729 | return -1; |
730 | } | 730 | } |
731 | 731 | ||
@@ -846,7 +846,6 @@ static struct saa7146_extension extension = { | |||
846 | .pci_tbl = &pci_tbl[0], | 846 | .pci_tbl = &pci_tbl[0], |
847 | .module = THIS_MODULE, | 847 | .module = THIS_MODULE, |
848 | 848 | ||
849 | .probe = mxb_probe, | ||
850 | .attach = mxb_attach, | 849 | .attach = mxb_attach, |
851 | .detach = mxb_detach, | 850 | .detach = mxb_detach, |
852 | 851 | ||
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c index b189fe63394b..ce76d952e161 100644 --- a/drivers/media/video/omap24xxcam.c +++ b/drivers/media/video/omap24xxcam.c | |||
@@ -1405,7 +1405,7 @@ static int omap24xxcam_mmap_buffers(struct file *file, | |||
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | size = 0; | 1407 | size = 0; |
1408 | for (i = first; i <= last; i++) { | 1408 | for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) { |
1409 | struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]); | 1409 | struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]); |
1410 | 1410 | ||
1411 | for (j = 0; j < dma->sglen; j++) { | 1411 | for (j = 0; j < dma->sglen; j++) { |
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 5ecc30daef2d..04bf5c11308d 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c | |||
@@ -609,12 +609,9 @@ static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev, | |||
609 | */ | 609 | */ |
610 | static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) | 610 | static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) |
611 | { | 611 | { |
612 | unsigned long cicr0, cifr; | 612 | unsigned long cicr0; |
613 | 613 | ||
614 | dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); | 614 | dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); |
615 | /* Reset the FIFOs */ | ||
616 | cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; | ||
617 | __raw_writel(cifr, pcdev->base + CIFR); | ||
618 | /* Enable End-Of-Frame Interrupt */ | 615 | /* Enable End-Of-Frame Interrupt */ |
619 | cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB; | 616 | cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB; |
620 | cicr0 &= ~CICR0_EOFM; | 617 | cicr0 &= ~CICR0_EOFM; |
@@ -935,7 +932,7 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev) | |||
935 | static irqreturn_t pxa_camera_irq(int irq, void *data) | 932 | static irqreturn_t pxa_camera_irq(int irq, void *data) |
936 | { | 933 | { |
937 | struct pxa_camera_dev *pcdev = data; | 934 | struct pxa_camera_dev *pcdev = data; |
938 | unsigned long status, cicr0; | 935 | unsigned long status, cifr, cicr0; |
939 | struct pxa_buffer *buf; | 936 | struct pxa_buffer *buf; |
940 | struct videobuf_buffer *vb; | 937 | struct videobuf_buffer *vb; |
941 | 938 | ||
@@ -949,6 +946,10 @@ static irqreturn_t pxa_camera_irq(int irq, void *data) | |||
949 | __raw_writel(status, pcdev->base + CISR); | 946 | __raw_writel(status, pcdev->base + CISR); |
950 | 947 | ||
951 | if (status & CISR_EOF) { | 948 | if (status & CISR_EOF) { |
949 | /* Reset the FIFOs */ | ||
950 | cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; | ||
951 | __raw_writel(cifr, pcdev->base + CIFR); | ||
952 | |||
952 | pcdev->active = list_first_entry(&pcdev->capture, | 953 | pcdev->active = list_first_entry(&pcdev->capture, |
953 | struct pxa_buffer, vb.queue); | 954 | struct pxa_buffer, vb.queue); |
954 | vb = &pcdev->active->vb; | 955 | vb = &pcdev->active->vb; |
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 6e16b3979326..1ad980f8e770 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c | |||
@@ -1633,7 +1633,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, | |||
1633 | height = pix->height; | 1633 | height = pix->height; |
1634 | 1634 | ||
1635 | pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); | 1635 | pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); |
1636 | if (pix->bytesperline < 0) | 1636 | if ((int)pix->bytesperline < 0) |
1637 | return pix->bytesperline; | 1637 | return pix->bytesperline; |
1638 | pix->sizeimage = height * pix->bytesperline; | 1638 | pix->sizeimage = height * pix->bytesperline; |
1639 | 1639 | ||
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index a3d5728b6449..f2ab025ad97a 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c | |||
@@ -349,6 +349,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) | |||
349 | goto disable; | 349 | goto disable; |
350 | } | 350 | } |
351 | 351 | ||
352 | /* If an interrupt arrived late clean up after it */ | ||
353 | try_wait_for_completion(&wm831x->auxadc_done); | ||
354 | |||
352 | /* Ignore the result to allow us to soldier on without IRQ hookup */ | 355 | /* Ignore the result to allow us to soldier on without IRQ hookup */ |
353 | wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); | 356 | wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); |
354 | 357 | ||
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index e400a3bed063..b5807484b4c9 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c | |||
@@ -363,6 +363,10 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref) | |||
363 | reg |= 1 << channel | WM8350_AUXADC_POLL; | 363 | reg |= 1 << channel | WM8350_AUXADC_POLL; |
364 | wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg); | 364 | wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg); |
365 | 365 | ||
366 | /* If a late IRQ left the completion signalled then consume | ||
367 | * the completion. */ | ||
368 | try_wait_for_completion(&wm8350->auxadc_done); | ||
369 | |||
366 | /* We ignore the result of the completion and just check for a | 370 | /* We ignore the result of the completion and just check for a |
367 | * conversion result, allowing us to soldier on if the IRQ | 371 | * conversion result, allowing us to soldier on if the IRQ |
368 | * infrastructure is not set up for the chip. */ | 372 | * infrastructure is not set up for the chip. */ |
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index a6dd7da37357..336d9f553f3e 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c | |||
@@ -314,8 +314,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host) | |||
314 | dmabuf = (unsigned *)tmpv; | 314 | dmabuf = (unsigned *)tmpv; |
315 | } | 315 | } |
316 | 316 | ||
317 | flush_kernel_dcache_page(sg_page(sg)); | ||
317 | kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); | 318 | kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); |
318 | dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount); | ||
319 | data->bytes_xfered += amount; | 319 | data->bytes_xfered += amount; |
320 | if (size == 0) | 320 | if (size == 0) |
321 | break; | 321 | break; |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 88be37d9e9a5..fb279f4ed8b3 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -266,7 +266,7 @@ static int atmci_req_show(struct seq_file *s, void *v) | |||
266 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | 266 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", |
267 | cmd->opcode, cmd->arg, cmd->flags, | 267 | cmd->opcode, cmd->arg, cmd->flags, |
268 | cmd->resp[0], cmd->resp[1], cmd->resp[2], | 268 | cmd->resp[0], cmd->resp[1], cmd->resp[2], |
269 | cmd->resp[2], cmd->error); | 269 | cmd->resp[3], cmd->error); |
270 | if (data) | 270 | if (data) |
271 | seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", | 271 | seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", |
272 | data->bytes_xfered, data->blocks, | 272 | data->bytes_xfered, data->blocks, |
@@ -276,7 +276,7 @@ static int atmci_req_show(struct seq_file *s, void *v) | |||
276 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | 276 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", |
277 | stop->opcode, stop->arg, stop->flags, | 277 | stop->opcode, stop->arg, stop->flags, |
278 | stop->resp[0], stop->resp[1], stop->resp[2], | 278 | stop->resp[0], stop->resp[1], stop->resp[2], |
279 | stop->resp[2], stop->error); | 279 | stop->resp[3], stop->error); |
280 | } | 280 | } |
281 | 281 | ||
282 | spin_unlock_bh(&slot->host->lock); | 282 | spin_unlock_bh(&slot->host->lock); |
@@ -569,9 +569,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host) | |||
569 | { | 569 | { |
570 | struct mmc_data *data = host->data; | 570 | struct mmc_data *data = host->data; |
571 | 571 | ||
572 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, | 572 | if (data) |
573 | ((data->flags & MMC_DATA_WRITE) | 573 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, |
574 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | 574 | ((data->flags & MMC_DATA_WRITE) |
575 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | ||
575 | } | 576 | } |
576 | 577 | ||
577 | static void atmci_stop_dma(struct atmel_mci *host) | 578 | static void atmci_stop_dma(struct atmel_mci *host) |
@@ -1099,8 +1100,8 @@ static void atmci_command_complete(struct atmel_mci *host, | |||
1099 | "command error: status=0x%08x\n", status); | 1100 | "command error: status=0x%08x\n", status); |
1100 | 1101 | ||
1101 | if (cmd->data) { | 1102 | if (cmd->data) { |
1102 | host->data = NULL; | ||
1103 | atmci_stop_dma(host); | 1103 | atmci_stop_dma(host); |
1104 | host->data = NULL; | ||
1104 | mci_writel(host, IDR, MCI_NOTBUSY | 1105 | mci_writel(host, IDR, MCI_NOTBUSY |
1105 | | MCI_TXRDY | MCI_RXRDY | 1106 | | MCI_TXRDY | MCI_RXRDY |
1106 | | ATMCI_DATA_ERROR_FLAGS); | 1107 | | ATMCI_DATA_ERROR_FLAGS); |
@@ -1293,6 +1294,7 @@ static void atmci_tasklet_func(unsigned long priv) | |||
1293 | } else { | 1294 | } else { |
1294 | data->bytes_xfered = data->blocks * data->blksz; | 1295 | data->bytes_xfered = data->blocks * data->blksz; |
1295 | data->error = 0; | 1296 | data->error = 0; |
1297 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS); | ||
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | if (!data->stop) { | 1300 | if (!data->stop) { |
@@ -1751,13 +1753,13 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1751 | ret = -ENODEV; | 1753 | ret = -ENODEV; |
1752 | if (pdata->slot[0].bus_width) { | 1754 | if (pdata->slot[0].bus_width) { |
1753 | ret = atmci_init_slot(host, &pdata->slot[0], | 1755 | ret = atmci_init_slot(host, &pdata->slot[0], |
1754 | MCI_SDCSEL_SLOT_A, 0); | 1756 | 0, MCI_SDCSEL_SLOT_A); |
1755 | if (!ret) | 1757 | if (!ret) |
1756 | nr_slots++; | 1758 | nr_slots++; |
1757 | } | 1759 | } |
1758 | if (pdata->slot[1].bus_width) { | 1760 | if (pdata->slot[1].bus_width) { |
1759 | ret = atmci_init_slot(host, &pdata->slot[1], | 1761 | ret = atmci_init_slot(host, &pdata->slot[1], |
1760 | MCI_SDCSEL_SLOT_B, 1); | 1762 | 1, MCI_SDCSEL_SLOT_B); |
1761 | if (!ret) | 1763 | if (!ret) |
1762 | nr_slots++; | 1764 | nr_slots++; |
1763 | } | 1765 | } |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index ed5e9742be2c..a8f0512bad38 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -674,6 +674,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { | |||
674 | { ZORRO_PROD_AMERISTAR_A2065 }, | 674 | { ZORRO_PROD_AMERISTAR_A2065 }, |
675 | { 0 } | 675 | { 0 } |
676 | }; | 676 | }; |
677 | MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); | ||
677 | 678 | ||
678 | static struct zorro_driver a2065_driver = { | 679 | static struct zorro_driver a2065_driver = { |
679 | .name = "a2065", | 680 | .name = "a2065", |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index fa1a2354f5f9..4b30a46486e2 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = { | |||
145 | { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, | 145 | { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, |
146 | { 0 } | 146 | { 0 } |
147 | }; | 147 | }; |
148 | MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl); | ||
148 | 149 | ||
149 | static struct zorro_driver ariadne_driver = { | 150 | static struct zorro_driver ariadne_driver = { |
150 | .name = "ariadne", | 151 | .name = "ariadne", |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 4e97ca182997..5d3763fb3472 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1649,6 +1649,7 @@ static void free_skb_resources(struct gfar_private *priv) | |||
1649 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | 1649 | sizeof(struct rxbd8) * priv->total_rx_ring_size, |
1650 | priv->tx_queue[0]->tx_bd_base, | 1650 | priv->tx_queue[0]->tx_bd_base, |
1651 | priv->tx_queue[0]->tx_bd_dma_base); | 1651 | priv->tx_queue[0]->tx_bd_dma_base); |
1652 | skb_queue_purge(&priv->rx_recycle); | ||
1652 | } | 1653 | } |
1653 | 1654 | ||
1654 | void gfar_start(struct net_device *dev) | 1655 | void gfar_start(struct net_device *dev) |
@@ -2088,7 +2089,6 @@ static int gfar_close(struct net_device *dev) | |||
2088 | 2089 | ||
2089 | disable_napi(priv); | 2090 | disable_napi(priv); |
2090 | 2091 | ||
2091 | skb_queue_purge(&priv->rx_recycle); | ||
2092 | cancel_work_sync(&priv->reset_task); | 2092 | cancel_work_sync(&priv->reset_task); |
2093 | stop_gfar(dev); | 2093 | stop_gfar(dev); |
2094 | 2094 | ||
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index 24724b4ad709..07d8e5b634f3 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c | |||
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = { | |||
71 | { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, | 71 | { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, |
72 | { 0 } | 72 | { 0 } |
73 | }; | 73 | }; |
74 | MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl); | ||
74 | 75 | ||
75 | static struct zorro_driver hydra_driver = { | 76 | static struct zorro_driver hydra_driver = { |
76 | .name = "hydra", | 77 | .name = "hydra", |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0cd80e4d71d9..e67691dca4ab 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -32,6 +32,7 @@ static int kszphy_config_init(struct phy_device *phydev) | |||
32 | 32 | ||
33 | static struct phy_driver ks8001_driver = { | 33 | static struct phy_driver ks8001_driver = { |
34 | .phy_id = PHY_ID_KS8001, | 34 | .phy_id = PHY_ID_KS8001, |
35 | .name = "Micrel KS8001", | ||
35 | .phy_id_mask = 0x00fffff0, | 36 | .phy_id_mask = 0x00fffff0, |
36 | .features = PHY_BASIC_FEATURES, | 37 | .features = PHY_BASIC_FEATURES, |
37 | .flags = PHY_POLL, | 38 | .flags = PHY_POLL, |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f9f0730b53d5..5ec542dd5b50 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -187,7 +187,6 @@ tx_drop: | |||
187 | return NETDEV_TX_OK; | 187 | return NETDEV_TX_OK; |
188 | 188 | ||
189 | rx_drop: | 189 | rx_drop: |
190 | kfree_skb(skb); | ||
191 | rcv_stats->rx_dropped++; | 190 | rcv_stats->rx_dropped++; |
192 | return NETDEV_TX_OK; | 191 | return NETDEV_TX_OK; |
193 | } | 192 | } |
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 99a6da464bd3..e1c2fcaa8bed 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c | |||
@@ -727,12 +727,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru) | |||
727 | { | 727 | { |
728 | struct device *parent = aru->udev->dev.parent; | 728 | struct device *parent = aru->udev->dev.parent; |
729 | 729 | ||
730 | complete(&aru->firmware_loading_complete); | ||
731 | |||
730 | /* unbind anything failed */ | 732 | /* unbind anything failed */ |
731 | if (parent) | 733 | if (parent) |
732 | down(&parent->sem); | 734 | down(&parent->sem); |
733 | device_release_driver(&aru->udev->dev); | 735 | device_release_driver(&aru->udev->dev); |
734 | if (parent) | 736 | if (parent) |
735 | up(&parent->sem); | 737 | up(&parent->sem); |
738 | |||
739 | usb_put_dev(aru->udev); | ||
736 | } | 740 | } |
737 | 741 | ||
738 | static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) | 742 | static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) |
@@ -761,6 +765,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) | |||
761 | if (err) | 765 | if (err) |
762 | goto err_unrx; | 766 | goto err_unrx; |
763 | 767 | ||
768 | complete(&aru->firmware_loading_complete); | ||
769 | usb_put_dev(aru->udev); | ||
764 | return; | 770 | return; |
765 | 771 | ||
766 | err_unrx: | 772 | err_unrx: |
@@ -858,6 +864,7 @@ static int ar9170_usb_probe(struct usb_interface *intf, | |||
858 | init_usb_anchor(&aru->tx_pending); | 864 | init_usb_anchor(&aru->tx_pending); |
859 | init_usb_anchor(&aru->tx_submitted); | 865 | init_usb_anchor(&aru->tx_submitted); |
860 | init_completion(&aru->cmd_wait); | 866 | init_completion(&aru->cmd_wait); |
867 | init_completion(&aru->firmware_loading_complete); | ||
861 | spin_lock_init(&aru->tx_urb_lock); | 868 | spin_lock_init(&aru->tx_urb_lock); |
862 | 869 | ||
863 | aru->tx_pending_urbs = 0; | 870 | aru->tx_pending_urbs = 0; |
@@ -877,6 +884,7 @@ static int ar9170_usb_probe(struct usb_interface *intf, | |||
877 | if (err) | 884 | if (err) |
878 | goto err_freehw; | 885 | goto err_freehw; |
879 | 886 | ||
887 | usb_get_dev(aru->udev); | ||
880 | return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", | 888 | return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", |
881 | &aru->udev->dev, GFP_KERNEL, aru, | 889 | &aru->udev->dev, GFP_KERNEL, aru, |
882 | ar9170_usb_firmware_step2); | 890 | ar9170_usb_firmware_step2); |
@@ -896,6 +904,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf) | |||
896 | return; | 904 | return; |
897 | 905 | ||
898 | aru->common.state = AR9170_IDLE; | 906 | aru->common.state = AR9170_IDLE; |
907 | |||
908 | wait_for_completion(&aru->firmware_loading_complete); | ||
909 | |||
899 | ar9170_unregister(&aru->common); | 910 | ar9170_unregister(&aru->common); |
900 | ar9170_usb_cancel_urbs(aru); | 911 | ar9170_usb_cancel_urbs(aru); |
901 | 912 | ||
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h index a2ce3b169ceb..919b06046eb3 100644 --- a/drivers/net/wireless/ath/ar9170/usb.h +++ b/drivers/net/wireless/ath/ar9170/usb.h | |||
@@ -71,6 +71,7 @@ struct ar9170_usb { | |||
71 | unsigned int tx_pending_urbs; | 71 | unsigned int tx_pending_urbs; |
72 | 72 | ||
73 | struct completion cmd_wait; | 73 | struct completion cmd_wait; |
74 | struct completion firmware_loading_complete; | ||
74 | int readlen; | 75 | int readlen; |
75 | u8 *readbuf; | 76 | u8 *readbuf; |
76 | 77 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 6383d9f8c9b3..f4e59ae07f8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h | |||
@@ -2621,7 +2621,9 @@ struct iwl_ssid_ie { | |||
2621 | #define PROBE_OPTION_MAX_3945 4 | 2621 | #define PROBE_OPTION_MAX_3945 4 |
2622 | #define PROBE_OPTION_MAX 20 | 2622 | #define PROBE_OPTION_MAX 20 |
2623 | #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) | 2623 | #define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) |
2624 | #define IWL_GOOD_CRC_TH cpu_to_le16(1) | 2624 | #define IWL_GOOD_CRC_TH_DISABLED 0 |
2625 | #define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) | ||
2626 | #define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) | ||
2625 | #define IWL_MAX_SCAN_SIZE 1024 | 2627 | #define IWL_MAX_SCAN_SIZE 1024 |
2626 | #define IWL_MAX_CMD_SIZE 4096 | 2628 | #define IWL_MAX_CMD_SIZE 4096 |
2627 | #define IWL_MAX_PROBE_REQUEST 200 | 2629 | #define IWL_MAX_PROBE_REQUEST 200 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 12e455a4b90e..741e65ec8301 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -813,16 +813,29 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
813 | rate = IWL_RATE_1M_PLCP; | 813 | rate = IWL_RATE_1M_PLCP; |
814 | rate_flags = RATE_MCS_CCK_MSK; | 814 | rate_flags = RATE_MCS_CCK_MSK; |
815 | } | 815 | } |
816 | scan->good_CRC_th = 0; | 816 | scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; |
817 | } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { | 817 | } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { |
818 | band = IEEE80211_BAND_5GHZ; | 818 | band = IEEE80211_BAND_5GHZ; |
819 | rate = IWL_RATE_6M_PLCP; | 819 | rate = IWL_RATE_6M_PLCP; |
820 | /* | 820 | /* |
821 | * If active scaning is requested but a certain channel | 821 | * If active scanning is requested but a certain channel is |
822 | * is marked passive, we can do active scanning if we | 822 | * marked passive, we can do active scanning if we detect |
823 | * detect transmissions. | 823 | * transmissions. |
824 | * | ||
825 | * There is an issue with some firmware versions that triggers | ||
826 | * a sysassert on a "good CRC threshold" of zero (== disabled), | ||
827 | * on a radar channel even though this means that we should NOT | ||
828 | * send probes. | ||
829 | * | ||
830 | * The "good CRC threshold" is the number of frames that we | ||
831 | * need to receive during our dwell time on a channel before | ||
832 | * sending out probes -- setting this to a huge value will | ||
833 | * mean we never reach it, but at the same time work around | ||
834 | * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER | ||
835 | * here instead of IWL_GOOD_CRC_TH_DISABLED. | ||
824 | */ | 836 | */ |
825 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; | 837 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : |
838 | IWL_GOOD_CRC_TH_NEVER; | ||
826 | 839 | ||
827 | /* Force use of chains B and C (0x6) for scan Rx for 4965 | 840 | /* Force use of chains B and C (0x6) for scan Rx for 4965 |
828 | * Avoid A (0x1) because of its off-channel reception on A-band. | 841 | * Avoid A (0x1) because of its off-channel reception on A-band. |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index b55e4f39a9e1..b74a56c48d26 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -2967,7 +2967,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data) | |||
2967 | * is marked passive, we can do active scanning if we | 2967 | * is marked passive, we can do active scanning if we |
2968 | * detect transmissions. | 2968 | * detect transmissions. |
2969 | */ | 2969 | */ |
2970 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; | 2970 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : |
2971 | IWL_GOOD_CRC_TH_DISABLED; | ||
2971 | band = IEEE80211_BAND_5GHZ; | 2972 | band = IEEE80211_BAND_5GHZ; |
2972 | } else { | 2973 | } else { |
2973 | IWL_WARN(priv, "Invalid scan band count\n"); | 2974 | IWL_WARN(priv, "Invalid scan band count\n"); |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 81c753a617ab..9548cbb5012a 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { | |||
102 | { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, | 102 | { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, |
103 | { 0 } | 103 | { 0 } |
104 | }; | 104 | }; |
105 | MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); | ||
105 | 106 | ||
106 | static struct zorro_driver zorro8390_driver = { | 107 | static struct zorro_driver zorro8390_driver = { |
107 | .name = "zorro8390", | 108 | .name = "zorro8390", |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 166b67ea622f..219f79e2210a 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -30,23 +30,7 @@ | |||
30 | 30 | ||
31 | #define OP_BUFFER_FLAGS 0 | 31 | #define OP_BUFFER_FLAGS 0 |
32 | 32 | ||
33 | /* | 33 | static struct ring_buffer *op_ring_buffer; |
34 | * Read and write access is using spin locking. Thus, writing to the | ||
35 | * buffer by NMI handler (x86) could occur also during critical | ||
36 | * sections when reading the buffer. To avoid this, there are 2 | ||
37 | * buffers for independent read and write access. Read access is in | ||
38 | * process context only, write access only in the NMI handler. If the | ||
39 | * read buffer runs empty, both buffers are swapped atomically. There | ||
40 | * is potentially a small window during swapping where the buffers are | ||
41 | * disabled and samples could be lost. | ||
42 | * | ||
43 | * Using 2 buffers is a little bit overhead, but the solution is clear | ||
44 | * and does not require changes in the ring buffer implementation. It | ||
45 | * can be changed to a single buffer solution when the ring buffer | ||
46 | * access is implemented as non-locking atomic code. | ||
47 | */ | ||
48 | static struct ring_buffer *op_ring_buffer_read; | ||
49 | static struct ring_buffer *op_ring_buffer_write; | ||
50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); | 34 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
51 | 35 | ||
52 | static void wq_sync_buffer(struct work_struct *work); | 36 | static void wq_sync_buffer(struct work_struct *work); |
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) | |||
68 | 52 | ||
69 | void free_cpu_buffers(void) | 53 | void free_cpu_buffers(void) |
70 | { | 54 | { |
71 | if (op_ring_buffer_read) | 55 | if (op_ring_buffer) |
72 | ring_buffer_free(op_ring_buffer_read); | 56 | ring_buffer_free(op_ring_buffer); |
73 | op_ring_buffer_read = NULL; | 57 | op_ring_buffer = NULL; |
74 | if (op_ring_buffer_write) | ||
75 | ring_buffer_free(op_ring_buffer_write); | ||
76 | op_ring_buffer_write = NULL; | ||
77 | } | 58 | } |
78 | 59 | ||
79 | #define RB_EVENT_HDR_SIZE 4 | 60 | #define RB_EVENT_HDR_SIZE 4 |
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void) | |||
86 | unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + | 67 | unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + |
87 | RB_EVENT_HDR_SIZE); | 68 | RB_EVENT_HDR_SIZE); |
88 | 69 | ||
89 | op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); | 70 | op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
90 | if (!op_ring_buffer_read) | 71 | if (!op_ring_buffer) |
91 | goto fail; | ||
92 | op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); | ||
93 | if (!op_ring_buffer_write) | ||
94 | goto fail; | 72 | goto fail; |
95 | 73 | ||
96 | for_each_possible_cpu(i) { | 74 | for_each_possible_cpu(i) { |
@@ -162,16 +140,11 @@ struct op_sample | |||
162 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) | 140 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) |
163 | { | 141 | { |
164 | entry->event = ring_buffer_lock_reserve | 142 | entry->event = ring_buffer_lock_reserve |
165 | (op_ring_buffer_write, sizeof(struct op_sample) + | 143 | (op_ring_buffer, sizeof(struct op_sample) + |
166 | size * sizeof(entry->sample->data[0])); | 144 | size * sizeof(entry->sample->data[0])); |
167 | if (entry->event) | 145 | if (!entry->event) |
168 | entry->sample = ring_buffer_event_data(entry->event); | ||
169 | else | ||
170 | entry->sample = NULL; | ||
171 | |||
172 | if (!entry->sample) | ||
173 | return NULL; | 146 | return NULL; |
174 | 147 | entry->sample = ring_buffer_event_data(entry->event); | |
175 | entry->size = size; | 148 | entry->size = size; |
176 | entry->data = entry->sample->data; | 149 | entry->data = entry->sample->data; |
177 | 150 | ||
@@ -180,25 +153,16 @@ struct op_sample | |||
180 | 153 | ||
181 | int op_cpu_buffer_write_commit(struct op_entry *entry) | 154 | int op_cpu_buffer_write_commit(struct op_entry *entry) |
182 | { | 155 | { |
183 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); | 156 | return ring_buffer_unlock_commit(op_ring_buffer, entry->event); |
184 | } | 157 | } |
185 | 158 | ||
186 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) | 159 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
187 | { | 160 | { |
188 | struct ring_buffer_event *e; | 161 | struct ring_buffer_event *e; |
189 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | 162 | e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); |
190 | if (e) | 163 | if (!e) |
191 | goto event; | ||
192 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | ||
193 | op_ring_buffer_write, | ||
194 | cpu)) | ||
195 | return NULL; | 164 | return NULL; |
196 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | ||
197 | if (e) | ||
198 | goto event; | ||
199 | return NULL; | ||
200 | 165 | ||
201 | event: | ||
202 | entry->event = e; | 166 | entry->event = e; |
203 | entry->sample = ring_buffer_event_data(e); | 167 | entry->sample = ring_buffer_event_data(e); |
204 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) | 168 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) |
@@ -209,8 +173,7 @@ event: | |||
209 | 173 | ||
210 | unsigned long op_cpu_buffer_entries(int cpu) | 174 | unsigned long op_cpu_buffer_entries(int cpu) |
211 | { | 175 | { |
212 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | 176 | return ring_buffer_entries_cpu(op_ring_buffer, cpu); |
213 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | ||
214 | } | 177 | } |
215 | 178 | ||
216 | static int | 179 | static int |
@@ -356,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | |||
356 | 319 | ||
357 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) | 320 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
358 | { | 321 | { |
359 | int is_kernel = !user_mode(regs); | 322 | int is_kernel; |
360 | unsigned long pc = profile_pc(regs); | 323 | unsigned long pc; |
324 | |||
325 | if (likely(regs)) { | ||
326 | is_kernel = !user_mode(regs); | ||
327 | pc = profile_pc(regs); | ||
328 | } else { | ||
329 | is_kernel = 0; /* This value will not be used */ | ||
330 | pc = ESCAPE_CODE; /* as this causes an early return. */ | ||
331 | } | ||
361 | 332 | ||
362 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); | 333 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
363 | } | 334 | } |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index dc8a0428260d..b336cd9ee7a1 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -253,22 +253,26 @@ static int __init oprofile_init(void) | |||
253 | int err; | 253 | int err; |
254 | 254 | ||
255 | err = oprofile_arch_init(&oprofile_ops); | 255 | err = oprofile_arch_init(&oprofile_ops); |
256 | |||
257 | if (err < 0 || timer) { | 256 | if (err < 0 || timer) { |
258 | printk(KERN_INFO "oprofile: using timer interrupt.\n"); | 257 | printk(KERN_INFO "oprofile: using timer interrupt.\n"); |
259 | oprofile_timer_init(&oprofile_ops); | 258 | err = oprofile_timer_init(&oprofile_ops); |
259 | if (err) | ||
260 | goto out_arch; | ||
260 | } | 261 | } |
261 | |||
262 | err = oprofilefs_register(); | 262 | err = oprofilefs_register(); |
263 | if (err) | 263 | if (err) |
264 | oprofile_arch_exit(); | 264 | goto out_arch; |
265 | return 0; | ||
265 | 266 | ||
267 | out_arch: | ||
268 | oprofile_arch_exit(); | ||
266 | return err; | 269 | return err; |
267 | } | 270 | } |
268 | 271 | ||
269 | 272 | ||
270 | static void __exit oprofile_exit(void) | 273 | static void __exit oprofile_exit(void) |
271 | { | 274 | { |
275 | oprofile_timer_exit(); | ||
272 | oprofilefs_unregister(); | 276 | oprofilefs_unregister(); |
273 | oprofile_arch_exit(); | 277 | oprofile_arch_exit(); |
274 | } | 278 | } |
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index cb92f5c98c1a..47e12cb4ee8b 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h | |||
@@ -34,7 +34,8 @@ struct super_block; | |||
34 | struct dentry; | 34 | struct dentry; |
35 | 35 | ||
36 | void oprofile_create_files(struct super_block *sb, struct dentry *root); | 36 | void oprofile_create_files(struct super_block *sb, struct dentry *root); |
37 | void oprofile_timer_init(struct oprofile_operations *ops); | 37 | int oprofile_timer_init(struct oprofile_operations *ops); |
38 | void oprofile_timer_exit(void); | ||
38 | 39 | ||
39 | int oprofile_set_backtrace(unsigned long depth); | 40 | int oprofile_set_backtrace(unsigned long depth); |
40 | int oprofile_set_timeout(unsigned long time); | 41 | int oprofile_set_timeout(unsigned long time); |
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 333f915568c7..dc0ae4d14dff 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c | |||
@@ -13,34 +13,94 @@ | |||
13 | #include <linux/oprofile.h> | 13 | #include <linux/oprofile.h> |
14 | #include <linux/profile.h> | 14 | #include <linux/profile.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/cpu.h> | ||
17 | #include <linux/hrtimer.h> | ||
18 | #include <asm/irq_regs.h> | ||
16 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
17 | 20 | ||
18 | #include "oprof.h" | 21 | #include "oprof.h" |
19 | 22 | ||
20 | static int timer_notify(struct pt_regs *regs) | 23 | static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer); |
24 | |||
25 | static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) | ||
26 | { | ||
27 | oprofile_add_sample(get_irq_regs(), 0); | ||
28 | hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC)); | ||
29 | return HRTIMER_RESTART; | ||
30 | } | ||
31 | |||
32 | static void __oprofile_hrtimer_start(void *unused) | ||
33 | { | ||
34 | struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); | ||
35 | |||
36 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
37 | hrtimer->function = oprofile_hrtimer_notify; | ||
38 | |||
39 | hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC), | ||
40 | HRTIMER_MODE_REL_PINNED); | ||
41 | } | ||
42 | |||
43 | static int oprofile_hrtimer_start(void) | ||
21 | { | 44 | { |
22 | oprofile_add_sample(regs, 0); | 45 | on_each_cpu(__oprofile_hrtimer_start, NULL, 1); |
23 | return 0; | 46 | return 0; |
24 | } | 47 | } |
25 | 48 | ||
26 | static int timer_start(void) | 49 | static void __oprofile_hrtimer_stop(int cpu) |
27 | { | 50 | { |
28 | return register_timer_hook(timer_notify); | 51 | struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); |
52 | |||
53 | hrtimer_cancel(hrtimer); | ||
29 | } | 54 | } |
30 | 55 | ||
56 | static void oprofile_hrtimer_stop(void) | ||
57 | { | ||
58 | int cpu; | ||
59 | |||
60 | for_each_online_cpu(cpu) | ||
61 | __oprofile_hrtimer_stop(cpu); | ||
62 | } | ||
31 | 63 | ||
32 | static void timer_stop(void) | 64 | static int __cpuinit oprofile_cpu_notify(struct notifier_block *self, |
65 | unsigned long action, void *hcpu) | ||
33 | { | 66 | { |
34 | unregister_timer_hook(timer_notify); | 67 | long cpu = (long) hcpu; |
68 | |||
69 | switch (action) { | ||
70 | case CPU_ONLINE: | ||
71 | case CPU_ONLINE_FROZEN: | ||
72 | smp_call_function_single(cpu, __oprofile_hrtimer_start, | ||
73 | NULL, 1); | ||
74 | break; | ||
75 | case CPU_DEAD: | ||
76 | case CPU_DEAD_FROZEN: | ||
77 | __oprofile_hrtimer_stop(cpu); | ||
78 | break; | ||
79 | } | ||
80 | return NOTIFY_OK; | ||
35 | } | 81 | } |
36 | 82 | ||
83 | static struct notifier_block __refdata oprofile_cpu_notifier = { | ||
84 | .notifier_call = oprofile_cpu_notify, | ||
85 | }; | ||
37 | 86 | ||
38 | void __init oprofile_timer_init(struct oprofile_operations *ops) | 87 | int __init oprofile_timer_init(struct oprofile_operations *ops) |
39 | { | 88 | { |
89 | int rc; | ||
90 | |||
91 | rc = register_hotcpu_notifier(&oprofile_cpu_notifier); | ||
92 | if (rc) | ||
93 | return rc; | ||
40 | ops->create_files = NULL; | 94 | ops->create_files = NULL; |
41 | ops->setup = NULL; | 95 | ops->setup = NULL; |
42 | ops->shutdown = NULL; | 96 | ops->shutdown = NULL; |
43 | ops->start = timer_start; | 97 | ops->start = oprofile_hrtimer_start; |
44 | ops->stop = timer_stop; | 98 | ops->stop = oprofile_hrtimer_stop; |
45 | ops->cpu_type = "timer"; | 99 | ops->cpu_type = "timer"; |
100 | return 0; | ||
101 | } | ||
102 | |||
103 | void __exit oprofile_timer_exit(void) | ||
104 | { | ||
105 | unregister_hotcpu_notifier(&oprofile_cpu_notifier); | ||
46 | } | 106 | } |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 417312528ddf..371dc564e2e4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, | |||
3626 | domain_remove_one_dev_info(dmar_domain, pdev); | 3626 | domain_remove_one_dev_info(dmar_domain, pdev); |
3627 | } | 3627 | } |
3628 | 3628 | ||
3629 | static int intel_iommu_map_range(struct iommu_domain *domain, | 3629 | static int intel_iommu_map(struct iommu_domain *domain, |
3630 | unsigned long iova, phys_addr_t hpa, | 3630 | unsigned long iova, phys_addr_t hpa, |
3631 | size_t size, int iommu_prot) | 3631 | int gfp_order, int iommu_prot) |
3632 | { | 3632 | { |
3633 | struct dmar_domain *dmar_domain = domain->priv; | 3633 | struct dmar_domain *dmar_domain = domain->priv; |
3634 | u64 max_addr; | 3634 | u64 max_addr; |
3635 | int addr_width; | 3635 | int addr_width; |
3636 | int prot = 0; | 3636 | int prot = 0; |
3637 | size_t size; | ||
3637 | int ret; | 3638 | int ret; |
3638 | 3639 | ||
3639 | if (iommu_prot & IOMMU_READ) | 3640 | if (iommu_prot & IOMMU_READ) |
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3643 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) | 3644 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
3644 | prot |= DMA_PTE_SNP; | 3645 | prot |= DMA_PTE_SNP; |
3645 | 3646 | ||
3647 | size = PAGE_SIZE << gfp_order; | ||
3646 | max_addr = iova + size; | 3648 | max_addr = iova + size; |
3647 | if (dmar_domain->max_addr < max_addr) { | 3649 | if (dmar_domain->max_addr < max_addr) { |
3648 | int min_agaw; | 3650 | int min_agaw; |
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain, | |||
3669 | return ret; | 3671 | return ret; |
3670 | } | 3672 | } |
3671 | 3673 | ||
3672 | static void intel_iommu_unmap_range(struct iommu_domain *domain, | 3674 | static int intel_iommu_unmap(struct iommu_domain *domain, |
3673 | unsigned long iova, size_t size) | 3675 | unsigned long iova, int gfp_order) |
3674 | { | 3676 | { |
3675 | struct dmar_domain *dmar_domain = domain->priv; | 3677 | struct dmar_domain *dmar_domain = domain->priv; |
3676 | 3678 | size_t size = PAGE_SIZE << gfp_order; | |
3677 | if (!size) | ||
3678 | return; | ||
3679 | 3679 | ||
3680 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3680 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3681 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3681 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3682 | 3682 | ||
3683 | if (dmar_domain->max_addr == iova + size) | 3683 | if (dmar_domain->max_addr == iova + size) |
3684 | dmar_domain->max_addr = iova; | 3684 | dmar_domain->max_addr = iova; |
3685 | |||
3686 | return gfp_order; | ||
3685 | } | 3687 | } |
3686 | 3688 | ||
3687 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3689 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = { | |||
3714 | .domain_destroy = intel_iommu_domain_destroy, | 3716 | .domain_destroy = intel_iommu_domain_destroy, |
3715 | .attach_dev = intel_iommu_attach_device, | 3717 | .attach_dev = intel_iommu_attach_device, |
3716 | .detach_dev = intel_iommu_detach_device, | 3718 | .detach_dev = intel_iommu_detach_device, |
3717 | .map = intel_iommu_map_range, | 3719 | .map = intel_iommu_map, |
3718 | .unmap = intel_iommu_unmap_range, | 3720 | .unmap = intel_iommu_unmap, |
3719 | .iova_to_phys = intel_iommu_iova_to_phys, | 3721 | .iova_to_phys = intel_iommu_iova_to_phys, |
3720 | .domain_has_cap = intel_iommu_domain_has_cap, | 3722 | .domain_has_cap = intel_iommu_domain_has_cap, |
3721 | }; | 3723 | }; |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4fe36d2e1049..19b111383f62 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -838,65 +838,11 @@ static void pci_bus_dump_resources(struct pci_bus *bus) | |||
838 | } | 838 | } |
839 | } | 839 | } |
840 | 840 | ||
841 | static int __init pci_bus_get_depth(struct pci_bus *bus) | ||
842 | { | ||
843 | int depth = 0; | ||
844 | struct pci_dev *dev; | ||
845 | |||
846 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
847 | int ret; | ||
848 | struct pci_bus *b = dev->subordinate; | ||
849 | if (!b) | ||
850 | continue; | ||
851 | |||
852 | ret = pci_bus_get_depth(b); | ||
853 | if (ret + 1 > depth) | ||
854 | depth = ret + 1; | ||
855 | } | ||
856 | |||
857 | return depth; | ||
858 | } | ||
859 | static int __init pci_get_max_depth(void) | ||
860 | { | ||
861 | int depth = 0; | ||
862 | struct pci_bus *bus; | ||
863 | |||
864 | list_for_each_entry(bus, &pci_root_buses, node) { | ||
865 | int ret; | ||
866 | |||
867 | ret = pci_bus_get_depth(bus); | ||
868 | if (ret > depth) | ||
869 | depth = ret; | ||
870 | } | ||
871 | |||
872 | return depth; | ||
873 | } | ||
874 | |||
875 | /* | ||
876 | * first try will not touch pci bridge res | ||
877 | * second and later try will clear small leaf bridge res | ||
878 | * will stop till to the max deepth if can not find good one | ||
879 | */ | ||
880 | void __init | 841 | void __init |
881 | pci_assign_unassigned_resources(void) | 842 | pci_assign_unassigned_resources(void) |
882 | { | 843 | { |
883 | struct pci_bus *bus; | 844 | struct pci_bus *bus; |
884 | int tried_times = 0; | ||
885 | enum release_type rel_type = leaf_only; | ||
886 | struct resource_list_x head, *list; | ||
887 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
888 | IORESOURCE_PREFETCH; | ||
889 | unsigned long failed_type; | ||
890 | int max_depth = pci_get_max_depth(); | ||
891 | int pci_try_num; | ||
892 | 845 | ||
893 | head.next = NULL; | ||
894 | |||
895 | pci_try_num = max_depth + 1; | ||
896 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | ||
897 | max_depth, pci_try_num); | ||
898 | |||
899 | again: | ||
900 | /* Depth first, calculate sizes and alignments of all | 846 | /* Depth first, calculate sizes and alignments of all |
901 | subordinate buses. */ | 847 | subordinate buses. */ |
902 | list_for_each_entry(bus, &pci_root_buses, node) { | 848 | list_for_each_entry(bus, &pci_root_buses, node) { |
@@ -904,65 +850,9 @@ again: | |||
904 | } | 850 | } |
905 | /* Depth last, allocate resources and update the hardware. */ | 851 | /* Depth last, allocate resources and update the hardware. */ |
906 | list_for_each_entry(bus, &pci_root_buses, node) { | 852 | list_for_each_entry(bus, &pci_root_buses, node) { |
907 | __pci_bus_assign_resources(bus, &head); | 853 | pci_bus_assign_resources(bus); |
908 | } | ||
909 | tried_times++; | ||
910 | |||
911 | /* any device complain? */ | ||
912 | if (!head.next) | ||
913 | goto enable_and_dump; | ||
914 | failed_type = 0; | ||
915 | for (list = head.next; list;) { | ||
916 | failed_type |= list->flags; | ||
917 | list = list->next; | ||
918 | } | ||
919 | /* | ||
920 | * io port are tight, don't try extra | ||
921 | * or if reach the limit, don't want to try more | ||
922 | */ | ||
923 | failed_type &= type_mask; | ||
924 | if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { | ||
925 | free_failed_list(&head); | ||
926 | goto enable_and_dump; | ||
927 | } | ||
928 | |||
929 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
930 | tried_times + 1); | ||
931 | |||
932 | /* third times and later will not check if it is leaf */ | ||
933 | if ((tried_times + 1) > 2) | ||
934 | rel_type = whole_subtree; | ||
935 | |||
936 | /* | ||
937 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
938 | * child device under that bridge | ||
939 | */ | ||
940 | for (list = head.next; list;) { | ||
941 | bus = list->dev->bus; | ||
942 | pci_bus_release_bridge_resources(bus, list->flags & type_mask, | ||
943 | rel_type); | ||
944 | list = list->next; | ||
945 | } | ||
946 | /* restore size and flags */ | ||
947 | for (list = head.next; list;) { | ||
948 | struct resource *res = list->res; | ||
949 | |||
950 | res->start = list->start; | ||
951 | res->end = list->end; | ||
952 | res->flags = list->flags; | ||
953 | if (list->dev->subordinate) | ||
954 | res->flags = 0; | ||
955 | |||
956 | list = list->next; | ||
957 | } | ||
958 | free_failed_list(&head); | ||
959 | |||
960 | goto again; | ||
961 | |||
962 | enable_and_dump: | ||
963 | /* Depth last, update the hardware. */ | ||
964 | list_for_each_entry(bus, &pci_root_buses, node) | ||
965 | pci_enable_bridges(bus); | 854 | pci_enable_bridges(bus); |
855 | } | ||
966 | 856 | ||
967 | /* dump the resource on buses */ | 857 | /* dump the resource on buses */ |
968 | list_for_each_entry(bus, &pci_root_buses, node) { | 858 | list_for_each_entry(bus, &pci_root_buses, node) { |
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 75ed866e6953..c3383750e333 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c | |||
@@ -671,20 +671,22 @@ static int pccardd(void *__skt) | |||
671 | socket_remove(skt); | 671 | socket_remove(skt); |
672 | if (sysfs_events & PCMCIA_UEVENT_INSERT) | 672 | if (sysfs_events & PCMCIA_UEVENT_INSERT) |
673 | socket_insert(skt); | 673 | socket_insert(skt); |
674 | if ((sysfs_events & PCMCIA_UEVENT_RESUME) && | ||
675 | !(skt->state & SOCKET_CARDBUS)) { | ||
676 | ret = socket_resume(skt); | ||
677 | if (!ret && skt->callback) | ||
678 | skt->callback->resume(skt); | ||
679 | } | ||
680 | if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) && | 674 | if ((sysfs_events & PCMCIA_UEVENT_SUSPEND) && |
681 | !(skt->state & SOCKET_CARDBUS)) { | 675 | !(skt->state & SOCKET_CARDBUS)) { |
682 | if (skt->callback) | 676 | if (skt->callback) |
683 | ret = skt->callback->suspend(skt); | 677 | ret = skt->callback->suspend(skt); |
684 | else | 678 | else |
685 | ret = 0; | 679 | ret = 0; |
686 | if (!ret) | 680 | if (!ret) { |
687 | socket_suspend(skt); | 681 | socket_suspend(skt); |
682 | msleep(100); | ||
683 | } | ||
684 | } | ||
685 | if ((sysfs_events & PCMCIA_UEVENT_RESUME) && | ||
686 | !(skt->state & SOCKET_CARDBUS)) { | ||
687 | ret = socket_resume(skt); | ||
688 | if (!ret && skt->callback) | ||
689 | skt->callback->resume(skt); | ||
688 | } | 690 | } |
689 | if ((sysfs_events & PCMCIA_UEVENT_REQUERY) && | 691 | if ((sysfs_events & PCMCIA_UEVENT_REQUERY) && |
690 | !(skt->state & SOCKET_CARDBUS)) { | 692 | !(skt->state & SOCKET_CARDBUS)) { |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 508f94a2a78d..041eee43fd8d 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -1283,6 +1283,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1283 | destroy_cis_cache(skt); | 1283 | destroy_cis_cache(skt); |
1284 | kfree(skt->fake_cis); | 1284 | kfree(skt->fake_cis); |
1285 | skt->fake_cis = NULL; | 1285 | skt->fake_cis = NULL; |
1286 | s->functions = 0; | ||
1286 | mutex_unlock(&s->ops_mutex); | 1287 | mutex_unlock(&s->ops_mutex); |
1287 | /* now, add the new card */ | 1288 | /* now, add the new card */ |
1288 | ds_event(skt, CS_EVENT_CARD_INSERTION, | 1289 | ds_event(skt, CS_EVENT_CARD_INSERTION, |
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index 104e73d5d86c..7631faa0cadd 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c | |||
@@ -711,7 +711,7 @@ static int ds_open(struct inode *inode, struct file *file) | |||
711 | warning_printed = 1; | 711 | warning_printed = 1; |
712 | } | 712 | } |
713 | 713 | ||
714 | if (s->pcmcia_state.present) | 714 | if (atomic_read(&s->present)) |
715 | queue_event(user, CS_EVENT_CARD_INSERTION); | 715 | queue_event(user, CS_EVENT_CARD_INSERTION); |
716 | out: | 716 | out: |
717 | unlock_kernel(); | 717 | unlock_kernel(); |
@@ -770,9 +770,6 @@ static ssize_t ds_read(struct file *file, char __user *buf, | |||
770 | return -EIO; | 770 | return -EIO; |
771 | 771 | ||
772 | s = user->socket; | 772 | s = user->socket; |
773 | if (s->pcmcia_state.dead) | ||
774 | return -EIO; | ||
775 | |||
776 | ret = wait_event_interruptible(s->queue, !queue_empty(user)); | 773 | ret = wait_event_interruptible(s->queue, !queue_empty(user)); |
777 | if (ret == 0) | 774 | if (ret == 0) |
778 | ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4; | 775 | ret = put_user(get_queued_event(user), (int __user *)buf) ? -EFAULT : 4; |
@@ -838,8 +835,6 @@ static int ds_ioctl(struct inode *inode, struct file *file, | |||
838 | return -EIO; | 835 | return -EIO; |
839 | 836 | ||
840 | s = user->socket; | 837 | s = user->socket; |
841 | if (s->pcmcia_state.dead) | ||
842 | return -EIO; | ||
843 | 838 | ||
844 | size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; | 839 | size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; |
845 | if (size > sizeof(ds_ioctl_arg_t)) | 840 | if (size > sizeof(ds_ioctl_arg_t)) |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 35bb44af49b3..100e4d9372f1 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -274,26 +274,6 @@ static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, | |||
274 | pnp_add_bus_resource(dev, start, end); | 274 | pnp_add_bus_resource(dev, start, end); |
275 | } | 275 | } |
276 | 276 | ||
277 | static u64 addr_space_length(struct pnp_dev *dev, u64 min, u64 max, u64 len) | ||
278 | { | ||
279 | u64 max_len; | ||
280 | |||
281 | max_len = max - min + 1; | ||
282 | if (len <= max_len) | ||
283 | return len; | ||
284 | |||
285 | /* | ||
286 | * Per 6.4.3.5, _LEN cannot exceed _MAX - _MIN + 1, but some BIOSes | ||
287 | * don't do this correctly, e.g., | ||
288 | * https://bugzilla.kernel.org/show_bug.cgi?id=15480 | ||
289 | */ | ||
290 | dev_info(&dev->dev, | ||
291 | "resource length %#llx doesn't fit in %#llx-%#llx, trimming\n", | ||
292 | (unsigned long long) len, (unsigned long long) min, | ||
293 | (unsigned long long) max); | ||
294 | return max_len; | ||
295 | } | ||
296 | |||
297 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | 277 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, |
298 | struct acpi_resource *res) | 278 | struct acpi_resource *res) |
299 | { | 279 | { |
@@ -309,7 +289,8 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | |||
309 | return; | 289 | return; |
310 | } | 290 | } |
311 | 291 | ||
312 | len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); | 292 | /* Windows apparently computes length rather than using _LEN */ |
293 | len = p->maximum - p->minimum + 1; | ||
313 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | 294 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; |
314 | 295 | ||
315 | if (p->resource_type == ACPI_MEMORY_RANGE) | 296 | if (p->resource_type == ACPI_MEMORY_RANGE) |
@@ -330,7 +311,8 @@ static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | |||
330 | int window; | 311 | int window; |
331 | u64 len; | 312 | u64 len; |
332 | 313 | ||
333 | len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); | 314 | /* Windows apparently computes length rather than using _LEN */ |
315 | len = p->maximum - p->minimum + 1; | ||
334 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | 316 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; |
335 | 317 | ||
336 | if (p->resource_type == ACPI_MEMORY_RANGE) | 318 | if (p->resource_type == ACPI_MEMORY_RANGE) |
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index 2e54e6a23c72..e3446ab8b563 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c | |||
@@ -211,6 +211,8 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res) | |||
211 | if (tres->flags & IORESOURCE_IO) { | 211 | if (tres->flags & IORESOURCE_IO) { |
212 | if (cannot_compare(tres->flags)) | 212 | if (cannot_compare(tres->flags)) |
213 | continue; | 213 | continue; |
214 | if (tres->flags & IORESOURCE_WINDOW) | ||
215 | continue; | ||
214 | tport = &tres->start; | 216 | tport = &tres->start; |
215 | tend = &tres->end; | 217 | tend = &tres->end; |
216 | if (ranged_conflict(port, end, tport, tend)) | 218 | if (ranged_conflict(port, end, tport, tend)) |
@@ -271,6 +273,8 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res) | |||
271 | if (tres->flags & IORESOURCE_MEM) { | 273 | if (tres->flags & IORESOURCE_MEM) { |
272 | if (cannot_compare(tres->flags)) | 274 | if (cannot_compare(tres->flags)) |
273 | continue; | 275 | continue; |
276 | if (tres->flags & IORESOURCE_WINDOW) | ||
277 | continue; | ||
274 | taddr = &tres->start; | 278 | taddr = &tres->start; |
275 | tend = &tres->end; | 279 | tend = &tres->end; |
276 | if (ranged_conflict(addr, end, taddr, tend)) | 280 | if (ranged_conflict(addr, end, taddr, tend)) |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index acf222f91f5a..fa2339cb1681 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -37,6 +37,9 @@ | |||
37 | */ | 37 | */ |
38 | #define DASD_CHANQ_MAX_SIZE 4 | 38 | #define DASD_CHANQ_MAX_SIZE 4 |
39 | 39 | ||
40 | #define DASD_SLEEPON_START_TAG (void *) 1 | ||
41 | #define DASD_SLEEPON_END_TAG (void *) 2 | ||
42 | |||
40 | /* | 43 | /* |
41 | * SECTION: exported variables of dasd.c | 44 | * SECTION: exported variables of dasd.c |
42 | */ | 45 | */ |
@@ -1472,7 +1475,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr) | |||
1472 | */ | 1475 | */ |
1473 | static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) | 1476 | static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) |
1474 | { | 1477 | { |
1475 | wake_up((wait_queue_head_t *) data); | 1478 | spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); |
1479 | cqr->callback_data = DASD_SLEEPON_END_TAG; | ||
1480 | spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); | ||
1481 | wake_up(&generic_waitq); | ||
1476 | } | 1482 | } |
1477 | 1483 | ||
1478 | static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) | 1484 | static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) |
@@ -1482,10 +1488,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) | |||
1482 | 1488 | ||
1483 | device = cqr->startdev; | 1489 | device = cqr->startdev; |
1484 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1490 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1485 | rc = ((cqr->status == DASD_CQR_DONE || | 1491 | rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); |
1486 | cqr->status == DASD_CQR_NEED_ERP || | ||
1487 | cqr->status == DASD_CQR_TERMINATED) && | ||
1488 | list_empty(&cqr->devlist)); | ||
1489 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1492 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1490 | return rc; | 1493 | return rc; |
1491 | } | 1494 | } |
@@ -1573,7 +1576,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) | |||
1573 | wait_event(generic_waitq, !(device->stopped)); | 1576 | wait_event(generic_waitq, !(device->stopped)); |
1574 | 1577 | ||
1575 | cqr->callback = dasd_wakeup_cb; | 1578 | cqr->callback = dasd_wakeup_cb; |
1576 | cqr->callback_data = (void *) &generic_waitq; | 1579 | cqr->callback_data = DASD_SLEEPON_START_TAG; |
1577 | dasd_add_request_tail(cqr); | 1580 | dasd_add_request_tail(cqr); |
1578 | if (interruptible) { | 1581 | if (interruptible) { |
1579 | rc = wait_event_interruptible( | 1582 | rc = wait_event_interruptible( |
@@ -1652,7 +1655,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
1652 | } | 1655 | } |
1653 | 1656 | ||
1654 | cqr->callback = dasd_wakeup_cb; | 1657 | cqr->callback = dasd_wakeup_cb; |
1655 | cqr->callback_data = (void *) &generic_waitq; | 1658 | cqr->callback_data = DASD_SLEEPON_START_TAG; |
1656 | cqr->status = DASD_CQR_QUEUED; | 1659 | cqr->status = DASD_CQR_QUEUED; |
1657 | list_add(&cqr->devlist, &device->ccw_queue); | 1660 | list_add(&cqr->devlist, &device->ccw_queue); |
1658 | 1661 | ||
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 9201afe65609..7f87979da22d 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -4724,6 +4724,10 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) | |||
4724 | BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); | 4724 | BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); |
4725 | asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, | 4725 | asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, |
4726 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); | 4726 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); |
4727 | if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { | ||
4728 | warn_code = -ENOMEM; | ||
4729 | goto err_dma_map; | ||
4730 | } | ||
4727 | phy_addr = cpu_to_le32(asc_dvc->overrun_dma); | 4731 | phy_addr = cpu_to_le32(asc_dvc->overrun_dma); |
4728 | AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, | 4732 | AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, |
4729 | (uchar *)&phy_addr, 1); | 4733 | (uchar *)&phy_addr, 1); |
@@ -4739,14 +4743,23 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) | |||
4739 | AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); | 4743 | AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); |
4740 | if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { | 4744 | if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { |
4741 | asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; | 4745 | asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; |
4742 | return warn_code; | 4746 | warn_code = UW_ERR; |
4747 | goto err_mcode_start; | ||
4743 | } | 4748 | } |
4744 | if (AscStartChip(iop_base) != 1) { | 4749 | if (AscStartChip(iop_base) != 1) { |
4745 | asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; | 4750 | asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; |
4746 | return warn_code; | 4751 | warn_code = UW_ERR; |
4752 | goto err_mcode_start; | ||
4747 | } | 4753 | } |
4748 | 4754 | ||
4749 | return warn_code; | 4755 | return warn_code; |
4756 | |||
4757 | err_mcode_start: | ||
4758 | dma_unmap_single(board->dev, asc_dvc->overrun_dma, | ||
4759 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); | ||
4760 | err_dma_map: | ||
4761 | asc_dvc->overrun_dma = 0; | ||
4762 | return warn_code; | ||
4750 | } | 4763 | } |
4751 | 4764 | ||
4752 | static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) | 4765 | static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) |
@@ -4802,6 +4815,8 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) | |||
4802 | } | 4815 | } |
4803 | release_firmware(fw); | 4816 | release_firmware(fw); |
4804 | warn_code |= AscInitMicroCodeVar(asc_dvc); | 4817 | warn_code |= AscInitMicroCodeVar(asc_dvc); |
4818 | if (!asc_dvc->overrun_dma) | ||
4819 | return warn_code; | ||
4805 | asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; | 4820 | asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; |
4806 | AscEnableInterrupt(iop_base); | 4821 | AscEnableInterrupt(iop_base); |
4807 | return warn_code; | 4822 | return warn_code; |
@@ -7978,9 +7993,10 @@ static int advansys_reset(struct scsi_cmnd *scp) | |||
7978 | status = AscInitAsc1000Driver(asc_dvc); | 7993 | status = AscInitAsc1000Driver(asc_dvc); |
7979 | 7994 | ||
7980 | /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ | 7995 | /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ |
7981 | if (asc_dvc->err_code) { | 7996 | if (asc_dvc->err_code || !asc_dvc->overrun_dma) { |
7982 | scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " | 7997 | scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " |
7983 | "0x%x\n", asc_dvc->err_code); | 7998 | "0x%x, status: 0x%x\n", asc_dvc->err_code, |
7999 | status); | ||
7984 | ret = FAILED; | 8000 | ret = FAILED; |
7985 | } else if (status) { | 8001 | } else if (status) { |
7986 | scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " | 8002 | scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " |
@@ -12311,7 +12327,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
12311 | asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); | 12327 | asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); |
12312 | if (!asc_dvc_varp->overrun_buf) { | 12328 | if (!asc_dvc_varp->overrun_buf) { |
12313 | ret = -ENOMEM; | 12329 | ret = -ENOMEM; |
12314 | goto err_free_wide_mem; | 12330 | goto err_free_irq; |
12315 | } | 12331 | } |
12316 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); | 12332 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); |
12317 | 12333 | ||
@@ -12320,30 +12336,36 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
12320 | "warn 0x%x, error 0x%x\n", | 12336 | "warn 0x%x, error 0x%x\n", |
12321 | asc_dvc_varp->init_state, warn_code, | 12337 | asc_dvc_varp->init_state, warn_code, |
12322 | asc_dvc_varp->err_code); | 12338 | asc_dvc_varp->err_code); |
12323 | if (asc_dvc_varp->err_code) { | 12339 | if (!asc_dvc_varp->overrun_dma) { |
12324 | ret = -ENODEV; | 12340 | ret = -ENODEV; |
12325 | kfree(asc_dvc_varp->overrun_buf); | 12341 | goto err_free_mem; |
12326 | } | 12342 | } |
12327 | } | 12343 | } |
12328 | } else { | 12344 | } else { |
12329 | if (advansys_wide_init_chip(shost)) | 12345 | if (advansys_wide_init_chip(shost)) { |
12330 | ret = -ENODEV; | 12346 | ret = -ENODEV; |
12347 | goto err_free_mem; | ||
12348 | } | ||
12331 | } | 12349 | } |
12332 | 12350 | ||
12333 | if (ret) | ||
12334 | goto err_free_wide_mem; | ||
12335 | |||
12336 | ASC_DBG_PRT_SCSI_HOST(2, shost); | 12351 | ASC_DBG_PRT_SCSI_HOST(2, shost); |
12337 | 12352 | ||
12338 | ret = scsi_add_host(shost, boardp->dev); | 12353 | ret = scsi_add_host(shost, boardp->dev); |
12339 | if (ret) | 12354 | if (ret) |
12340 | goto err_free_wide_mem; | 12355 | goto err_free_mem; |
12341 | 12356 | ||
12342 | scsi_scan_host(shost); | 12357 | scsi_scan_host(shost); |
12343 | return 0; | 12358 | return 0; |
12344 | 12359 | ||
12345 | err_free_wide_mem: | 12360 | err_free_mem: |
12346 | advansys_wide_free_mem(boardp); | 12361 | if (ASC_NARROW_BOARD(boardp)) { |
12362 | if (asc_dvc_varp->overrun_dma) | ||
12363 | dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, | ||
12364 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); | ||
12365 | kfree(asc_dvc_varp->overrun_buf); | ||
12366 | } else | ||
12367 | advansys_wide_free_mem(boardp); | ||
12368 | err_free_irq: | ||
12347 | free_irq(boardp->irq, shost); | 12369 | free_irq(boardp->irq, shost); |
12348 | err_free_dma: | 12370 | err_free_dma: |
12349 | #ifdef CONFIG_ISA | 12371 | #ifdef CONFIG_ISA |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 6d5ae4474bb3..633e09036357 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -471,12 +471,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) | |||
471 | 471 | ||
472 | WARN_ON(hdrlength >= 256); | 472 | WARN_ON(hdrlength >= 256); |
473 | hdr->hlength = hdrlength & 0xFF; | 473 | hdr->hlength = hdrlength & 0xFF; |
474 | hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); | ||
474 | 475 | ||
475 | if (session->tt->init_task && session->tt->init_task(task)) | 476 | if (session->tt->init_task && session->tt->init_task(task)) |
476 | return -EIO; | 477 | return -EIO; |
477 | 478 | ||
478 | task->state = ISCSI_TASK_RUNNING; | 479 | task->state = ISCSI_TASK_RUNNING; |
479 | hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); | ||
480 | session->cmdsn++; | 480 | session->cmdsn++; |
481 | 481 | ||
482 | conn->scsicmd_pdus_cnt++; | 482 | conn->scsicmd_pdus_cnt++; |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index b00efd19aadb..88f744672576 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -395,11 +395,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev, | |||
395 | void sas_ata_task_abort(struct sas_task *task) | 395 | void sas_ata_task_abort(struct sas_task *task) |
396 | { | 396 | { |
397 | struct ata_queued_cmd *qc = task->uldd_task; | 397 | struct ata_queued_cmd *qc = task->uldd_task; |
398 | struct request_queue *q = qc->scsicmd->device->request_queue; | ||
398 | struct completion *waiting; | 399 | struct completion *waiting; |
400 | unsigned long flags; | ||
399 | 401 | ||
400 | /* Bounce SCSI-initiated commands to the SCSI EH */ | 402 | /* Bounce SCSI-initiated commands to the SCSI EH */ |
401 | if (qc->scsicmd) { | 403 | if (qc->scsicmd) { |
404 | spin_lock_irqsave(q->queue_lock, flags); | ||
402 | blk_abort_request(qc->scsicmd->request); | 405 | blk_abort_request(qc->scsicmd->request); |
406 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
403 | scsi_schedule_eh(qc->scsicmd->device->host); | 407 | scsi_schedule_eh(qc->scsicmd->device->host); |
404 | return; | 408 | return; |
405 | } | 409 | } |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 2660e1b4569a..822835055cef 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -1030,6 +1030,8 @@ int __sas_task_abort(struct sas_task *task) | |||
1030 | void sas_task_abort(struct sas_task *task) | 1030 | void sas_task_abort(struct sas_task *task) |
1031 | { | 1031 | { |
1032 | struct scsi_cmnd *sc = task->uldd_task; | 1032 | struct scsi_cmnd *sc = task->uldd_task; |
1033 | struct request_queue *q = sc->device->request_queue; | ||
1034 | unsigned long flags; | ||
1033 | 1035 | ||
1034 | /* Escape for libsas internal commands */ | 1036 | /* Escape for libsas internal commands */ |
1035 | if (!sc) { | 1037 | if (!sc) { |
@@ -1044,7 +1046,9 @@ void sas_task_abort(struct sas_task *task) | |||
1044 | return; | 1046 | return; |
1045 | } | 1047 | } |
1046 | 1048 | ||
1049 | spin_lock_irqsave(q->queue_lock, flags); | ||
1047 | blk_abort_request(sc->request); | 1050 | blk_abort_request(sc->request); |
1051 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1048 | scsi_schedule_eh(sc->device->host); | 1052 | scsi_schedule_eh(sc->device->host); |
1049 | } | 1053 | } |
1050 | 1054 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 3e10c306de94..3a5bfd10b2cb 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -957,7 +957,8 @@ static int resp_start_stop(struct scsi_cmnd * scp, | |||
957 | static sector_t get_sdebug_capacity(void) | 957 | static sector_t get_sdebug_capacity(void) |
958 | { | 958 | { |
959 | if (scsi_debug_virtual_gb > 0) | 959 | if (scsi_debug_virtual_gb > 0) |
960 | return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; | 960 | return (sector_t)scsi_debug_virtual_gb * |
961 | (1073741824 / scsi_debug_sector_size); | ||
961 | else | 962 | else |
962 | return sdebug_store_sectors; | 963 | return sdebug_store_sectors; |
963 | } | 964 | } |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d45c69ca5737..7ad53fa42766 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -302,7 +302,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) | |||
302 | if (scmd->device->allow_restart && | 302 | if (scmd->device->allow_restart && |
303 | (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) | 303 | (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) |
304 | return FAILED; | 304 | return FAILED; |
305 | return SUCCESS; | 305 | |
306 | if (blk_barrier_rq(scmd->request)) | ||
307 | /* | ||
308 | * barrier requests should always retry on UA | ||
309 | * otherwise block will get a spurious error | ||
310 | */ | ||
311 | return NEEDS_RETRY; | ||
312 | else | ||
313 | /* | ||
314 | * for normal (non barrier) commands, pass the | ||
315 | * UA upwards for a determination in the | ||
316 | * completion functions | ||
317 | */ | ||
318 | return SUCCESS; | ||
306 | 319 | ||
307 | /* these three are not supported */ | 320 | /* these three are not supported */ |
308 | case COPY_ABORTED: | 321 | case COPY_ABORTED: |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8b827f37b03e..de6c60320f6f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1040,6 +1040,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq) | |||
1040 | { | 1040 | { |
1041 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 1041 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
1042 | rq->timeout = SD_TIMEOUT; | 1042 | rq->timeout = SD_TIMEOUT; |
1043 | rq->retries = SD_MAX_RETRIES; | ||
1043 | rq->cmd[0] = SYNCHRONIZE_CACHE; | 1044 | rq->cmd[0] = SYNCHRONIZE_CACHE; |
1044 | rq->cmd_len = 10; | 1045 | rq->cmd_len = 10; |
1045 | } | 1046 | } |
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 105449c15fa9..e17764d71476 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c | |||
@@ -69,6 +69,7 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = { | |||
69 | }, | 69 | }, |
70 | { 0 } | 70 | { 0 } |
71 | }; | 71 | }; |
72 | MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl); | ||
72 | 73 | ||
73 | static int __devinit zorro7xx_init_one(struct zorro_dev *z, | 74 | static int __devinit zorro7xx_init_one(struct zorro_dev *z, |
74 | const struct zorro_device_id *ent) | 75 | const struct zorro_device_id *ent) |
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 4315b23590bd..eacb588a9345 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c | |||
@@ -120,7 +120,8 @@ | |||
120 | #define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */ | 120 | #define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */ |
121 | #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ | 121 | #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ |
122 | #define UCR3_BPEN (1<<0) /* Preset registers enable */ | 122 | #define UCR3_BPEN (1<<0) /* Preset registers enable */ |
123 | #define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */ | 123 | #define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ |
124 | #define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ | ||
124 | #define UCR4_INVR (1<<9) /* Inverted infrared reception */ | 125 | #define UCR4_INVR (1<<9) /* Inverted infrared reception */ |
125 | #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ | 126 | #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ |
126 | #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ | 127 | #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ |
@@ -591,6 +592,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode) | |||
591 | return 0; | 592 | return 0; |
592 | } | 593 | } |
593 | 594 | ||
595 | /* half the RX buffer size */ | ||
596 | #define CTSTL 16 | ||
597 | |||
594 | static int imx_startup(struct uart_port *port) | 598 | static int imx_startup(struct uart_port *port) |
595 | { | 599 | { |
596 | struct imx_port *sport = (struct imx_port *)port; | 600 | struct imx_port *sport = (struct imx_port *)port; |
@@ -607,6 +611,10 @@ static int imx_startup(struct uart_port *port) | |||
607 | if (USE_IRDA(sport)) | 611 | if (USE_IRDA(sport)) |
608 | temp |= UCR4_IRSC; | 612 | temp |= UCR4_IRSC; |
609 | 613 | ||
614 | /* set the trigger level for CTS */ | ||
615 | temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF); | ||
616 | temp |= CTSTL<< UCR4_CTSTL_SHF; | ||
617 | |||
610 | writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); | 618 | writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); |
611 | 619 | ||
612 | if (USE_IRDA(sport)) { | 620 | if (USE_IRDA(sport)) { |
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index a176ab4bd65b..02469c31bf0b 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c | |||
@@ -1467,7 +1467,7 @@ mpc52xx_uart_init(void) | |||
1467 | /* | 1467 | /* |
1468 | * Map the PSC FIFO Controller and init if on MPC512x. | 1468 | * Map the PSC FIFO Controller and init if on MPC512x. |
1469 | */ | 1469 | */ |
1470 | if (psc_ops->fifoc_init) { | 1470 | if (psc_ops && psc_ops->fifoc_init) { |
1471 | ret = psc_ops->fifoc_init(); | 1471 | ret = psc_ops->fifoc_init(); |
1472 | if (ret) | 1472 | if (ret) |
1473 | return ret; | 1473 | return ret; |
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 4a6366a42129..111a01a747fc 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c | |||
@@ -380,6 +380,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
380 | mutex_lock(&inode->i_mutex); | 380 | mutex_lock(&inode->i_mutex); |
381 | dentry_unhash(dentry); | 381 | dentry_unhash(dentry); |
382 | if (usbfs_empty(dentry)) { | 382 | if (usbfs_empty(dentry)) { |
383 | dont_mount(dentry); | ||
383 | drop_nlink(dentry->d_inode); | 384 | drop_nlink(dentry->d_inode); |
384 | drop_nlink(dentry->d_inode); | 385 | drop_nlink(dentry->d_inode); |
385 | dput(dentry); | 386 | dput(dentry); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e69d238c5af0..49fa953aaf6e 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1035,7 +1035,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) | |||
1035 | /* This actually signals the guest, using eventfd. */ | 1035 | /* This actually signals the guest, using eventfd. */ |
1036 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) | 1036 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
1037 | { | 1037 | { |
1038 | __u16 flags = 0; | 1038 | __u16 flags; |
1039 | /* Flush out used index updates. This is paired | ||
1040 | * with the barrier that the Guest executes when enabling | ||
1041 | * interrupts. */ | ||
1042 | smp_mb(); | ||
1043 | |||
1039 | if (get_user(flags, &vq->avail->flags)) { | 1044 | if (get_user(flags, &vq->avail->flags)) { |
1040 | vq_err(vq, "Failed to get flags"); | 1045 | vq_err(vq, "Failed to get flags"); |
1041 | return; | 1046 | return; |
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c index dca48df98444..e5d6b56d4447 100644 --- a/drivers/video/amifb.c +++ b/drivers/video/amifb.c | |||
@@ -50,8 +50,9 @@ | |||
50 | #include <linux/fb.h> | 50 | #include <linux/fb.h> |
51 | #include <linux/init.h> | 51 | #include <linux/init.h> |
52 | #include <linux/ioport.h> | 52 | #include <linux/ioport.h> |
53 | 53 | #include <linux/platform_device.h> | |
54 | #include <linux/uaccess.h> | 54 | #include <linux/uaccess.h> |
55 | |||
55 | #include <asm/system.h> | 56 | #include <asm/system.h> |
56 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
57 | #include <asm/amigahw.h> | 58 | #include <asm/amigahw.h> |
@@ -1135,7 +1136,7 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg | |||
1135 | * Interface to the low level console driver | 1136 | * Interface to the low level console driver |
1136 | */ | 1137 | */ |
1137 | 1138 | ||
1138 | static void amifb_deinit(void); | 1139 | static void amifb_deinit(struct platform_device *pdev); |
1139 | 1140 | ||
1140 | /* | 1141 | /* |
1141 | * Internal routines | 1142 | * Internal routines |
@@ -2246,7 +2247,7 @@ static inline void chipfree(void) | |||
2246 | * Initialisation | 2247 | * Initialisation |
2247 | */ | 2248 | */ |
2248 | 2249 | ||
2249 | static int __init amifb_init(void) | 2250 | static int __init amifb_probe(struct platform_device *pdev) |
2250 | { | 2251 | { |
2251 | int tag, i, err = 0; | 2252 | int tag, i, err = 0; |
2252 | u_long chipptr; | 2253 | u_long chipptr; |
@@ -2261,16 +2262,6 @@ static int __init amifb_init(void) | |||
2261 | } | 2262 | } |
2262 | amifb_setup(option); | 2263 | amifb_setup(option); |
2263 | #endif | 2264 | #endif |
2264 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_VIDEO)) | ||
2265 | return -ENODEV; | ||
2266 | |||
2267 | /* | ||
2268 | * We request all registers starting from bplpt[0] | ||
2269 | */ | ||
2270 | if (!request_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120, | ||
2271 | "amifb [Denise/Lisa]")) | ||
2272 | return -EBUSY; | ||
2273 | |||
2274 | custom.dmacon = DMAF_ALL | DMAF_MASTER; | 2265 | custom.dmacon = DMAF_ALL | DMAF_MASTER; |
2275 | 2266 | ||
2276 | switch (amiga_chipset) { | 2267 | switch (amiga_chipset) { |
@@ -2377,6 +2368,7 @@ default_chipset: | |||
2377 | fb_info.fbops = &amifb_ops; | 2368 | fb_info.fbops = &amifb_ops; |
2378 | fb_info.par = ¤tpar; | 2369 | fb_info.par = ¤tpar; |
2379 | fb_info.flags = FBINFO_DEFAULT; | 2370 | fb_info.flags = FBINFO_DEFAULT; |
2371 | fb_info.device = &pdev->dev; | ||
2380 | 2372 | ||
2381 | if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb, | 2373 | if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb, |
2382 | NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) { | 2374 | NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) { |
@@ -2451,18 +2443,18 @@ default_chipset: | |||
2451 | return 0; | 2443 | return 0; |
2452 | 2444 | ||
2453 | amifb_error: | 2445 | amifb_error: |
2454 | amifb_deinit(); | 2446 | amifb_deinit(pdev); |
2455 | return err; | 2447 | return err; |
2456 | } | 2448 | } |
2457 | 2449 | ||
2458 | static void amifb_deinit(void) | 2450 | static void amifb_deinit(struct platform_device *pdev) |
2459 | { | 2451 | { |
2460 | if (fb_info.cmap.len) | 2452 | if (fb_info.cmap.len) |
2461 | fb_dealloc_cmap(&fb_info.cmap); | 2453 | fb_dealloc_cmap(&fb_info.cmap); |
2454 | fb_dealloc_cmap(&fb_info.cmap); | ||
2462 | chipfree(); | 2455 | chipfree(); |
2463 | if (videomemory) | 2456 | if (videomemory) |
2464 | iounmap((void*)videomemory); | 2457 | iounmap((void*)videomemory); |
2465 | release_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120); | ||
2466 | custom.dmacon = DMAF_ALL | DMAF_MASTER; | 2458 | custom.dmacon = DMAF_ALL | DMAF_MASTER; |
2467 | } | 2459 | } |
2468 | 2460 | ||
@@ -3794,14 +3786,35 @@ static void ami_rebuild_copper(void) | |||
3794 | } | 3786 | } |
3795 | } | 3787 | } |
3796 | 3788 | ||
3797 | static void __exit amifb_exit(void) | 3789 | static int __exit amifb_remove(struct platform_device *pdev) |
3798 | { | 3790 | { |
3799 | unregister_framebuffer(&fb_info); | 3791 | unregister_framebuffer(&fb_info); |
3800 | amifb_deinit(); | 3792 | amifb_deinit(pdev); |
3801 | amifb_video_off(); | 3793 | amifb_video_off(); |
3794 | return 0; | ||
3795 | } | ||
3796 | |||
3797 | static struct platform_driver amifb_driver = { | ||
3798 | .remove = __exit_p(amifb_remove), | ||
3799 | .driver = { | ||
3800 | .name = "amiga-video", | ||
3801 | .owner = THIS_MODULE, | ||
3802 | }, | ||
3803 | }; | ||
3804 | |||
3805 | static int __init amifb_init(void) | ||
3806 | { | ||
3807 | return platform_driver_probe(&amifb_driver, amifb_probe); | ||
3802 | } | 3808 | } |
3803 | 3809 | ||
3804 | module_init(amifb_init); | 3810 | module_init(amifb_init); |
3811 | |||
3812 | static void __exit amifb_exit(void) | ||
3813 | { | ||
3814 | platform_driver_unregister(&amifb_driver); | ||
3815 | } | ||
3816 | |||
3805 | module_exit(amifb_exit); | 3817 | module_exit(amifb_exit); |
3806 | 3818 | ||
3807 | MODULE_LICENSE("GPL"); | 3819 | MODULE_LICENSE("GPL"); |
3820 | MODULE_ALIAS("platform:amiga-video"); | ||
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index 44e49c28b2a7..c2ec3dcd4e91 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c | |||
@@ -488,9 +488,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) | |||
488 | fbinfo->fbops = &bfin_t350mcqb_fb_ops; | 488 | fbinfo->fbops = &bfin_t350mcqb_fb_ops; |
489 | fbinfo->flags = FBINFO_FLAG_DEFAULT; | 489 | fbinfo->flags = FBINFO_FLAG_DEFAULT; |
490 | 490 | ||
491 | info->fb_buffer = | 491 | info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len + |
492 | dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle, | 492 | ACTIVE_VIDEO_MEM_OFFSET, |
493 | GFP_KERNEL); | 493 | &info->dma_handle, GFP_KERNEL); |
494 | 494 | ||
495 | if (NULL == info->fb_buffer) { | 495 | if (NULL == info->fb_buffer) { |
496 | printk(KERN_ERR DRIVER_NAME | 496 | printk(KERN_ERR DRIVER_NAME |
@@ -568,8 +568,8 @@ out7: | |||
568 | out6: | 568 | out6: |
569 | fb_dealloc_cmap(&fbinfo->cmap); | 569 | fb_dealloc_cmap(&fbinfo->cmap); |
570 | out4: | 570 | out4: |
571 | dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, | 571 | dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET, |
572 | info->dma_handle); | 572 | info->fb_buffer, info->dma_handle); |
573 | out3: | 573 | out3: |
574 | framebuffer_release(fbinfo); | 574 | framebuffer_release(fbinfo); |
575 | out2: | 575 | out2: |
@@ -592,8 +592,9 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev) | |||
592 | free_irq(info->irq, info); | 592 | free_irq(info->irq, info); |
593 | 593 | ||
594 | if (info->fb_buffer != NULL) | 594 | if (info->fb_buffer != NULL) |
595 | dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, | 595 | dma_free_coherent(NULL, fbinfo->fix.smem_len + |
596 | info->dma_handle); | 596 | ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer, |
597 | info->dma_handle); | ||
597 | 598 | ||
598 | fb_dealloc_cmap(&fbinfo->cmap); | 599 | fb_dealloc_cmap(&fbinfo->cmap); |
599 | 600 | ||
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c index 8d8dfda2f868..6df7c54db0a3 100644 --- a/drivers/video/cirrusfb.c +++ b/drivers/video/cirrusfb.c | |||
@@ -299,6 +299,7 @@ static const struct zorro_device_id cirrusfb_zorro_table[] = { | |||
299 | }, | 299 | }, |
300 | { 0 } | 300 | { 0 } |
301 | }; | 301 | }; |
302 | MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table); | ||
302 | 303 | ||
303 | static const struct { | 304 | static const struct { |
304 | zorro_id id2; | 305 | zorro_id id2; |
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c index 6c91c61cdb63..1b0feb8e7244 100644 --- a/drivers/video/fm2fb.c +++ b/drivers/video/fm2fb.c | |||
@@ -219,6 +219,7 @@ static struct zorro_device_id fm2fb_devices[] __devinitdata = { | |||
219 | { ZORRO_PROD_HELFRICH_RAINBOW_II }, | 219 | { ZORRO_PROD_HELFRICH_RAINBOW_II }, |
220 | { 0 } | 220 | { 0 } |
221 | }; | 221 | }; |
222 | MODULE_DEVICE_TABLE(zorro, fm2fb_devices); | ||
222 | 223 | ||
223 | static struct zorro_driver fm2fb_driver = { | 224 | static struct zorro_driver fm2fb_driver = { |
224 | .name = "fm2fb", | 225 | .name = "fm2fb", |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index e14bd0749129..e8c769944812 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -695,6 +695,7 @@ static int sh_mobile_lcdc_setup_clocks(struct platform_device *pdev, | |||
695 | * 1) Enable Runtime PM | 695 | * 1) Enable Runtime PM |
696 | * 2) Force Runtime PM Resume since hardware is accessed from probe() | 696 | * 2) Force Runtime PM Resume since hardware is accessed from probe() |
697 | */ | 697 | */ |
698 | priv->dev = &pdev->dev; | ||
698 | pm_runtime_enable(priv->dev); | 699 | pm_runtime_enable(priv->dev); |
699 | pm_runtime_resume(priv->dev); | 700 | pm_runtime_resume(priv->dev); |
700 | return 0; | 701 | return 0; |
@@ -957,25 +958,24 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
957 | 958 | ||
958 | if (!pdev->dev.platform_data) { | 959 | if (!pdev->dev.platform_data) { |
959 | dev_err(&pdev->dev, "no platform data defined\n"); | 960 | dev_err(&pdev->dev, "no platform data defined\n"); |
960 | error = -EINVAL; | 961 | return -EINVAL; |
961 | goto err0; | ||
962 | } | 962 | } |
963 | 963 | ||
964 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 964 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
965 | i = platform_get_irq(pdev, 0); | 965 | i = platform_get_irq(pdev, 0); |
966 | if (!res || i < 0) { | 966 | if (!res || i < 0) { |
967 | dev_err(&pdev->dev, "cannot get platform resources\n"); | 967 | dev_err(&pdev->dev, "cannot get platform resources\n"); |
968 | error = -ENOENT; | 968 | return -ENOENT; |
969 | goto err0; | ||
970 | } | 969 | } |
971 | 970 | ||
972 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 971 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
973 | if (!priv) { | 972 | if (!priv) { |
974 | dev_err(&pdev->dev, "cannot allocate device data\n"); | 973 | dev_err(&pdev->dev, "cannot allocate device data\n"); |
975 | error = -ENOMEM; | 974 | return -ENOMEM; |
976 | goto err0; | ||
977 | } | 975 | } |
978 | 976 | ||
977 | platform_set_drvdata(pdev, priv); | ||
978 | |||
979 | error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED, | 979 | error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED, |
980 | dev_name(&pdev->dev), priv); | 980 | dev_name(&pdev->dev), priv); |
981 | if (error) { | 981 | if (error) { |
@@ -984,8 +984,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
984 | } | 984 | } |
985 | 985 | ||
986 | priv->irq = i; | 986 | priv->irq = i; |
987 | priv->dev = &pdev->dev; | ||
988 | platform_set_drvdata(pdev, priv); | ||
989 | pdata = pdev->dev.platform_data; | 987 | pdata = pdev->dev.platform_data; |
990 | 988 | ||
991 | j = 0; | 989 | j = 0; |
@@ -1099,9 +1097,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1099 | info = ch->info; | 1097 | info = ch->info; |
1100 | 1098 | ||
1101 | if (info->fbdefio) { | 1099 | if (info->fbdefio) { |
1102 | priv->ch->sglist = vmalloc(sizeof(struct scatterlist) * | 1100 | ch->sglist = vmalloc(sizeof(struct scatterlist) * |
1103 | info->fix.smem_len >> PAGE_SHIFT); | 1101 | info->fix.smem_len >> PAGE_SHIFT); |
1104 | if (!priv->ch->sglist) { | 1102 | if (!ch->sglist) { |
1105 | dev_err(&pdev->dev, "cannot allocate sglist\n"); | 1103 | dev_err(&pdev->dev, "cannot allocate sglist\n"); |
1106 | goto err1; | 1104 | goto err1; |
1107 | } | 1105 | } |
@@ -1126,9 +1124,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
1126 | } | 1124 | } |
1127 | 1125 | ||
1128 | return 0; | 1126 | return 0; |
1129 | err1: | 1127 | err1: |
1130 | sh_mobile_lcdc_remove(pdev); | 1128 | sh_mobile_lcdc_remove(pdev); |
1131 | err0: | 1129 | |
1132 | return error; | 1130 | return error; |
1133 | } | 1131 | } |
1134 | 1132 | ||
@@ -1139,7 +1137,7 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) | |||
1139 | int i; | 1137 | int i; |
1140 | 1138 | ||
1141 | for (i = 0; i < ARRAY_SIZE(priv->ch); i++) | 1139 | for (i = 0; i < ARRAY_SIZE(priv->ch); i++) |
1142 | if (priv->ch[i].info->dev) | 1140 | if (priv->ch[i].info && priv->ch[i].info->dev) |
1143 | unregister_framebuffer(priv->ch[i].info); | 1141 | unregister_framebuffer(priv->ch[i].info); |
1144 | 1142 | ||
1145 | sh_mobile_lcdc_stop(priv); | 1143 | sh_mobile_lcdc_stop(priv); |
@@ -1162,7 +1160,8 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) | |||
1162 | if (priv->dot_clk) | 1160 | if (priv->dot_clk) |
1163 | clk_put(priv->dot_clk); | 1161 | clk_put(priv->dot_clk); |
1164 | 1162 | ||
1165 | pm_runtime_disable(priv->dev); | 1163 | if (priv->dev) |
1164 | pm_runtime_disable(priv->dev); | ||
1166 | 1165 | ||
1167 | if (priv->base) | 1166 | if (priv->base) |
1168 | iounmap(priv->base); | 1167 | iounmap(priv->base); |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0bf5020d0d32..b87ba23442d2 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -175,7 +175,7 @@ config SA1100_WATCHDOG | |||
175 | 175 | ||
176 | config MPCORE_WATCHDOG | 176 | config MPCORE_WATCHDOG |
177 | tristate "MPcore watchdog" | 177 | tristate "MPcore watchdog" |
178 | depends on ARM_MPCORE_PLATFORM && LOCAL_TIMERS | 178 | depends on HAVE_ARM_TWD |
179 | help | 179 | help |
180 | Watchdog timer embedded into the MPcore system. | 180 | Watchdog timer embedded into the MPcore system. |
181 | 181 | ||
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c index 016c6a791cab..b8ec7aca3c8e 100644 --- a/drivers/watchdog/mpcore_wdt.c +++ b/drivers/watchdog/mpcore_wdt.c | |||
@@ -31,8 +31,9 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/io.h> | ||
34 | 35 | ||
35 | #include <asm/hardware/arm_twd.h> | 36 | #include <asm/smp_twd.h> |
36 | 37 | ||
37 | struct mpcore_wdt { | 38 | struct mpcore_wdt { |
38 | unsigned long timer_alive; | 39 | unsigned long timer_alive; |
@@ -44,7 +45,7 @@ struct mpcore_wdt { | |||
44 | }; | 45 | }; |
45 | 46 | ||
46 | static struct platform_device *mpcore_wdt_dev; | 47 | static struct platform_device *mpcore_wdt_dev; |
47 | extern unsigned int mpcore_timer_rate; | 48 | static DEFINE_SPINLOCK(wdt_lock); |
48 | 49 | ||
49 | #define TIMER_MARGIN 60 | 50 | #define TIMER_MARGIN 60 |
50 | static int mpcore_margin = TIMER_MARGIN; | 51 | static int mpcore_margin = TIMER_MARGIN; |
@@ -94,13 +95,15 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg) | |||
94 | */ | 95 | */ |
95 | static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) | 96 | static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) |
96 | { | 97 | { |
97 | unsigned int count; | 98 | unsigned long count; |
98 | 99 | ||
100 | spin_lock(&wdt_lock); | ||
99 | /* Assume prescale is set to 256 */ | 101 | /* Assume prescale is set to 256 */ |
100 | count = (mpcore_timer_rate / 256) * mpcore_margin; | 102 | count = __raw_readl(wdt->base + TWD_WDOG_COUNTER); |
103 | count = (0xFFFFFFFFU - count) * (HZ / 5); | ||
104 | count = (count / 256) * mpcore_margin; | ||
101 | 105 | ||
102 | /* Reload the counter */ | 106 | /* Reload the counter */ |
103 | spin_lock(&wdt_lock); | ||
104 | writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); | 107 | writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); |
105 | wdt->perturb = wdt->perturb ? 0 : 1; | 108 | wdt->perturb = wdt->perturb ? 0 : 1; |
106 | spin_unlock(&wdt_lock); | 109 | spin_unlock(&wdt_lock); |
@@ -119,7 +122,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt) | |||
119 | { | 122 | { |
120 | dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); | 123 | dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); |
121 | 124 | ||
122 | spin_lock(&wdt_lock); | ||
123 | /* This loads the count register but does NOT start the count yet */ | 125 | /* This loads the count register but does NOT start the count yet */ |
124 | mpcore_wdt_keepalive(wdt); | 126 | mpcore_wdt_keepalive(wdt); |
125 | 127 | ||
@@ -130,7 +132,6 @@ static void mpcore_wdt_start(struct mpcore_wdt *wdt) | |||
130 | /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ | 132 | /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ |
131 | writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); | 133 | writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); |
132 | } | 134 | } |
133 | spin_unlock(&wdt_lock); | ||
134 | } | 135 | } |
135 | 136 | ||
136 | static int mpcore_wdt_set_heartbeat(int t) | 137 | static int mpcore_wdt_set_heartbeat(int t) |
@@ -360,7 +361,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev) | |||
360 | mpcore_wdt_miscdev.parent = &dev->dev; | 361 | mpcore_wdt_miscdev.parent = &dev->dev; |
361 | ret = misc_register(&mpcore_wdt_miscdev); | 362 | ret = misc_register(&mpcore_wdt_miscdev); |
362 | if (ret) { | 363 | if (ret) { |
363 | dev_printk(KERN_ERR, _dev, | 364 | dev_printk(KERN_ERR, wdt->dev, |
364 | "cannot register miscdev on minor=%d (err=%d)\n", | 365 | "cannot register miscdev on minor=%d (err=%d)\n", |
365 | WATCHDOG_MINOR, ret); | 366 | WATCHDOG_MINOR, ret); |
366 | goto err_misc; | 367 | goto err_misc; |
@@ -369,13 +370,13 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev) | |||
369 | ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, | 370 | ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, |
370 | "mpcore_wdt", wdt); | 371 | "mpcore_wdt", wdt); |
371 | if (ret) { | 372 | if (ret) { |
372 | dev_printk(KERN_ERR, _dev, | 373 | dev_printk(KERN_ERR, wdt->dev, |
373 | "cannot register IRQ%d for watchdog\n", wdt->irq); | 374 | "cannot register IRQ%d for watchdog\n", wdt->irq); |
374 | goto err_irq; | 375 | goto err_irq; |
375 | } | 376 | } |
376 | 377 | ||
377 | mpcore_wdt_stop(wdt); | 378 | mpcore_wdt_stop(wdt); |
378 | platform_set_drvdata(&dev->dev, wdt); | 379 | platform_set_drvdata(dev, wdt); |
379 | mpcore_wdt_dev = dev; | 380 | mpcore_wdt_dev = dev; |
380 | 381 | ||
381 | return 0; | 382 | return 0; |
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index d47c47fc048f..3c7046d79654 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c | |||
@@ -97,7 +97,7 @@ static void zorro_seq_stop(struct seq_file *m, void *v) | |||
97 | 97 | ||
98 | static int zorro_seq_show(struct seq_file *m, void *v) | 98 | static int zorro_seq_show(struct seq_file *m, void *v) |
99 | { | 99 | { |
100 | u_int slot = *(loff_t *)v; | 100 | unsigned int slot = *(loff_t *)v; |
101 | struct zorro_dev *z = &zorro_autocon[slot]; | 101 | struct zorro_dev *z = &zorro_autocon[slot]; |
102 | 102 | ||
103 | seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, | 103 | seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, |
@@ -129,7 +129,7 @@ static const struct file_operations zorro_devices_proc_fops = { | |||
129 | 129 | ||
130 | static struct proc_dir_entry *proc_bus_zorro_dir; | 130 | static struct proc_dir_entry *proc_bus_zorro_dir; |
131 | 131 | ||
132 | static int __init zorro_proc_attach_device(u_int slot) | 132 | static int __init zorro_proc_attach_device(unsigned int slot) |
133 | { | 133 | { |
134 | struct proc_dir_entry *entry; | 134 | struct proc_dir_entry *entry; |
135 | char name[4]; | 135 | char name[4]; |
@@ -146,7 +146,7 @@ static int __init zorro_proc_attach_device(u_int slot) | |||
146 | 146 | ||
147 | static int __init zorro_proc_init(void) | 147 | static int __init zorro_proc_init(void) |
148 | { | 148 | { |
149 | u_int slot; | 149 | unsigned int slot; |
150 | 150 | ||
151 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { | 151 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { |
152 | proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); | 152 | proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); |
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c index 53180a37cc9a..7ee2b6e71786 100644 --- a/drivers/zorro/zorro-driver.c +++ b/drivers/zorro/zorro-driver.c | |||
@@ -137,10 +137,34 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv) | |||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
141 | { | ||
142 | #ifdef CONFIG_HOTPLUG | ||
143 | struct zorro_dev *z; | ||
144 | |||
145 | if (!dev) | ||
146 | return -ENODEV; | ||
147 | |||
148 | z = to_zorro_dev(dev); | ||
149 | if (!z) | ||
150 | return -ENODEV; | ||
151 | |||
152 | if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) || | ||
153 | add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) || | ||
154 | add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) || | ||
155 | add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id)) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | return 0; | ||
159 | #else /* !CONFIG_HOTPLUG */ | ||
160 | return -ENODEV; | ||
161 | #endif /* !CONFIG_HOTPLUG */ | ||
162 | } | ||
140 | 163 | ||
141 | struct bus_type zorro_bus_type = { | 164 | struct bus_type zorro_bus_type = { |
142 | .name = "zorro", | 165 | .name = "zorro", |
143 | .match = zorro_bus_match, | 166 | .match = zorro_bus_match, |
167 | .uevent = zorro_uevent, | ||
144 | .probe = zorro_device_probe, | 168 | .probe = zorro_device_probe, |
145 | .remove = zorro_device_remove, | 169 | .remove = zorro_device_remove, |
146 | }; | 170 | }; |
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c index 1d2a772ea14c..eb924e0a64ce 100644 --- a/drivers/zorro/zorro-sysfs.c +++ b/drivers/zorro/zorro-sysfs.c | |||
@@ -77,6 +77,16 @@ static struct bin_attribute zorro_config_attr = { | |||
77 | .read = zorro_read_config, | 77 | .read = zorro_read_config, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | ||
81 | char *buf) | ||
82 | { | ||
83 | struct zorro_dev *z = to_zorro_dev(dev); | ||
84 | |||
85 | return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id); | ||
86 | } | ||
87 | |||
88 | static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL); | ||
89 | |||
80 | int zorro_create_sysfs_dev_files(struct zorro_dev *z) | 90 | int zorro_create_sysfs_dev_files(struct zorro_dev *z) |
81 | { | 91 | { |
82 | struct device *dev = &z->dev; | 92 | struct device *dev = &z->dev; |
@@ -89,6 +99,7 @@ int zorro_create_sysfs_dev_files(struct zorro_dev *z) | |||
89 | (error = device_create_file(dev, &dev_attr_slotaddr)) || | 99 | (error = device_create_file(dev, &dev_attr_slotaddr)) || |
90 | (error = device_create_file(dev, &dev_attr_slotsize)) || | 100 | (error = device_create_file(dev, &dev_attr_slotsize)) || |
91 | (error = device_create_file(dev, &dev_attr_resource)) || | 101 | (error = device_create_file(dev, &dev_attr_resource)) || |
102 | (error = device_create_file(dev, &dev_attr_modalias)) || | ||
92 | (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) | 103 | (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) |
93 | return error; | 104 | return error; |
94 | 105 | ||
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index d45fb34e2d23..6455f3a244c5 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/zorro.h> | 15 | #include <linux/zorro.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/slab.h> | ||
18 | 20 | ||
19 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
20 | #include <asm/amigahw.h> | 22 | #include <asm/amigahw.h> |
@@ -26,24 +28,17 @@ | |||
26 | * Zorro Expansion Devices | 28 | * Zorro Expansion Devices |
27 | */ | 29 | */ |
28 | 30 | ||
29 | u_int zorro_num_autocon = 0; | 31 | unsigned int zorro_num_autocon; |
30 | struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; | 32 | struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; |
31 | 33 | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * Single Zorro bus | 36 | * Zorro bus |
35 | */ | 37 | */ |
36 | 38 | ||
37 | struct zorro_bus zorro_bus = {\ | 39 | struct zorro_bus { |
38 | .resources = { | 40 | struct list_head devices; /* list of devices on this bus */ |
39 | /* Zorro II regions (on Zorro II/III) */ | 41 | struct device dev; |
40 | { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff }, | ||
41 | { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff }, | ||
42 | /* Zorro III regions (on Zorro III only) */ | ||
43 | { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff }, | ||
44 | { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff } | ||
45 | }, | ||
46 | .name = "Zorro bus" | ||
47 | }; | 42 | }; |
48 | 43 | ||
49 | 44 | ||
@@ -53,18 +48,19 @@ struct zorro_bus zorro_bus = {\ | |||
53 | 48 | ||
54 | struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) | 49 | struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) |
55 | { | 50 | { |
56 | struct zorro_dev *z; | 51 | struct zorro_dev *z; |
57 | 52 | ||
58 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) | 53 | if (!zorro_num_autocon) |
59 | return NULL; | 54 | return NULL; |
60 | 55 | ||
61 | for (z = from ? from+1 : &zorro_autocon[0]; | 56 | for (z = from ? from+1 : &zorro_autocon[0]; |
62 | z < zorro_autocon+zorro_num_autocon; | 57 | z < zorro_autocon+zorro_num_autocon; |
63 | z++) | 58 | z++) |
64 | if (id == ZORRO_WILDCARD || id == z->id) | 59 | if (id == ZORRO_WILDCARD || id == z->id) |
65 | return z; | 60 | return z; |
66 | return NULL; | 61 | return NULL; |
67 | } | 62 | } |
63 | EXPORT_SYMBOL(zorro_find_device); | ||
68 | 64 | ||
69 | 65 | ||
70 | /* | 66 | /* |
@@ -83,121 +79,138 @@ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) | |||
83 | */ | 79 | */ |
84 | 80 | ||
85 | DECLARE_BITMAP(zorro_unused_z2ram, 128); | 81 | DECLARE_BITMAP(zorro_unused_z2ram, 128); |
82 | EXPORT_SYMBOL(zorro_unused_z2ram); | ||
86 | 83 | ||
87 | 84 | ||
88 | static void __init mark_region(unsigned long start, unsigned long end, | 85 | static void __init mark_region(unsigned long start, unsigned long end, |
89 | int flag) | 86 | int flag) |
90 | { | 87 | { |
91 | if (flag) | ||
92 | start += Z2RAM_CHUNKMASK; | ||
93 | else | ||
94 | end += Z2RAM_CHUNKMASK; | ||
95 | start &= ~Z2RAM_CHUNKMASK; | ||
96 | end &= ~Z2RAM_CHUNKMASK; | ||
97 | |||
98 | if (end <= Z2RAM_START || start >= Z2RAM_END) | ||
99 | return; | ||
100 | start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; | ||
101 | end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START; | ||
102 | while (start < end) { | ||
103 | u32 chunk = start>>Z2RAM_CHUNKSHIFT; | ||
104 | if (flag) | 88 | if (flag) |
105 | set_bit(chunk, zorro_unused_z2ram); | 89 | start += Z2RAM_CHUNKMASK; |
106 | else | 90 | else |
107 | clear_bit(chunk, zorro_unused_z2ram); | 91 | end += Z2RAM_CHUNKMASK; |
108 | start += Z2RAM_CHUNKSIZE; | 92 | start &= ~Z2RAM_CHUNKMASK; |
109 | } | 93 | end &= ~Z2RAM_CHUNKMASK; |
94 | |||
95 | if (end <= Z2RAM_START || start >= Z2RAM_END) | ||
96 | return; | ||
97 | start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; | ||
98 | end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START; | ||
99 | while (start < end) { | ||
100 | u32 chunk = start>>Z2RAM_CHUNKSHIFT; | ||
101 | if (flag) | ||
102 | set_bit(chunk, zorro_unused_z2ram); | ||
103 | else | ||
104 | clear_bit(chunk, zorro_unused_z2ram); | ||
105 | start += Z2RAM_CHUNKSIZE; | ||
106 | } | ||
110 | } | 107 | } |
111 | 108 | ||
112 | 109 | ||
113 | static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z) | 110 | static struct resource __init *zorro_find_parent_resource( |
111 | struct platform_device *bridge, struct zorro_dev *z) | ||
114 | { | 112 | { |
115 | int i; | 113 | int i; |
116 | 114 | ||
117 | for (i = 0; i < zorro_bus.num_resources; i++) | 115 | for (i = 0; i < bridge->num_resources; i++) { |
118 | if (zorro_resource_start(z) >= zorro_bus.resources[i].start && | 116 | struct resource *r = &bridge->resource[i]; |
119 | zorro_resource_end(z) <= zorro_bus.resources[i].end) | 117 | if (zorro_resource_start(z) >= r->start && |
120 | return &zorro_bus.resources[i]; | 118 | zorro_resource_end(z) <= r->end) |
121 | return &iomem_resource; | 119 | return r; |
120 | } | ||
121 | return &iomem_resource; | ||
122 | } | 122 | } |
123 | 123 | ||
124 | 124 | ||
125 | /* | ||
126 | * Initialization | ||
127 | */ | ||
128 | 125 | ||
129 | static int __init zorro_init(void) | 126 | static int __init amiga_zorro_probe(struct platform_device *pdev) |
130 | { | 127 | { |
131 | struct zorro_dev *z; | 128 | struct zorro_bus *bus; |
132 | unsigned int i; | 129 | struct zorro_dev *z; |
133 | int error; | 130 | struct resource *r; |
134 | 131 | unsigned int i; | |
135 | if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO)) | 132 | int error; |
136 | return 0; | 133 | |
137 | 134 | /* Initialize the Zorro bus */ | |
138 | pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n", | 135 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); |
139 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); | 136 | if (!bus) |
140 | 137 | return -ENOMEM; | |
141 | /* Initialize the Zorro bus */ | 138 | |
142 | INIT_LIST_HEAD(&zorro_bus.devices); | 139 | INIT_LIST_HEAD(&bus->devices); |
143 | dev_set_name(&zorro_bus.dev, "zorro"); | 140 | bus->dev.parent = &pdev->dev; |
144 | error = device_register(&zorro_bus.dev); | 141 | dev_set_name(&bus->dev, "zorro"); |
145 | if (error) { | 142 | error = device_register(&bus->dev); |
146 | pr_err("Zorro: Error registering zorro_bus\n"); | ||
147 | return error; | ||
148 | } | ||
149 | |||
150 | /* Request the resources */ | ||
151 | zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2; | ||
152 | for (i = 0; i < zorro_bus.num_resources; i++) | ||
153 | request_resource(&iomem_resource, &zorro_bus.resources[i]); | ||
154 | |||
155 | /* Register all devices */ | ||
156 | for (i = 0; i < zorro_num_autocon; i++) { | ||
157 | z = &zorro_autocon[i]; | ||
158 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); | ||
159 | if (z->id == ZORRO_PROD_GVP_EPC_BASE) { | ||
160 | /* GVP quirk */ | ||
161 | unsigned long magic = zorro_resource_start(z)+0x8000; | ||
162 | z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; | ||
163 | } | ||
164 | sprintf(z->name, "Zorro device %08x", z->id); | ||
165 | zorro_name_device(z); | ||
166 | z->resource.name = z->name; | ||
167 | if (request_resource(zorro_find_parent_resource(z), &z->resource)) | ||
168 | pr_err("Zorro: Address space collision on device %s %pR\n", | ||
169 | z->name, &z->resource); | ||
170 | dev_set_name(&z->dev, "%02x", i); | ||
171 | z->dev.parent = &zorro_bus.dev; | ||
172 | z->dev.bus = &zorro_bus_type; | ||
173 | error = device_register(&z->dev); | ||
174 | if (error) { | 143 | if (error) { |
175 | pr_err("Zorro: Error registering device %s\n", z->name); | 144 | pr_err("Zorro: Error registering zorro_bus\n"); |
176 | continue; | 145 | kfree(bus); |
146 | return error; | ||
177 | } | 147 | } |
178 | error = zorro_create_sysfs_dev_files(z); | 148 | platform_set_drvdata(pdev, bus); |
179 | if (error) | 149 | |
180 | dev_err(&z->dev, "Error creating sysfs files\n"); | 150 | /* Register all devices */ |
181 | } | 151 | pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", |
182 | 152 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); | |
183 | /* Mark all available Zorro II memory */ | 153 | |
184 | zorro_for_each_dev(z) { | 154 | for (i = 0; i < zorro_num_autocon; i++) { |
185 | if (z->rom.er_Type & ERTF_MEMLIST) | 155 | z = &zorro_autocon[i]; |
186 | mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1); | 156 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); |
187 | } | 157 | if (z->id == ZORRO_PROD_GVP_EPC_BASE) { |
188 | 158 | /* GVP quirk */ | |
189 | /* Unmark all used Zorro II memory */ | 159 | unsigned long magic = zorro_resource_start(z)+0x8000; |
190 | for (i = 0; i < m68k_num_memory; i++) | 160 | z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; |
191 | if (m68k_memory[i].addr < 16*1024*1024) | 161 | } |
192 | mark_region(m68k_memory[i].addr, | 162 | sprintf(z->name, "Zorro device %08x", z->id); |
193 | m68k_memory[i].addr+m68k_memory[i].size, 0); | 163 | zorro_name_device(z); |
194 | 164 | z->resource.name = z->name; | |
195 | return 0; | 165 | r = zorro_find_parent_resource(pdev, z); |
166 | error = request_resource(r, &z->resource); | ||
167 | if (error) | ||
168 | dev_err(&bus->dev, | ||
169 | "Address space collision on device %s %pR\n", | ||
170 | z->name, &z->resource); | ||
171 | dev_set_name(&z->dev, "%02x", i); | ||
172 | z->dev.parent = &bus->dev; | ||
173 | z->dev.bus = &zorro_bus_type; | ||
174 | error = device_register(&z->dev); | ||
175 | if (error) { | ||
176 | dev_err(&bus->dev, "Error registering device %s\n", | ||
177 | z->name); | ||
178 | continue; | ||
179 | } | ||
180 | error = zorro_create_sysfs_dev_files(z); | ||
181 | if (error) | ||
182 | dev_err(&z->dev, "Error creating sysfs files\n"); | ||
183 | } | ||
184 | |||
185 | /* Mark all available Zorro II memory */ | ||
186 | zorro_for_each_dev(z) { | ||
187 | if (z->rom.er_Type & ERTF_MEMLIST) | ||
188 | mark_region(zorro_resource_start(z), | ||
189 | zorro_resource_end(z)+1, 1); | ||
190 | } | ||
191 | |||
192 | /* Unmark all used Zorro II memory */ | ||
193 | for (i = 0; i < m68k_num_memory; i++) | ||
194 | if (m68k_memory[i].addr < 16*1024*1024) | ||
195 | mark_region(m68k_memory[i].addr, | ||
196 | m68k_memory[i].addr+m68k_memory[i].size, | ||
197 | 0); | ||
198 | |||
199 | return 0; | ||
196 | } | 200 | } |
197 | 201 | ||
198 | subsys_initcall(zorro_init); | 202 | static struct platform_driver amiga_zorro_driver = { |
203 | .driver = { | ||
204 | .name = "amiga-zorro", | ||
205 | .owner = THIS_MODULE, | ||
206 | }, | ||
207 | }; | ||
199 | 208 | ||
200 | EXPORT_SYMBOL(zorro_find_device); | 209 | static int __init amiga_zorro_init(void) |
201 | EXPORT_SYMBOL(zorro_unused_z2ram); | 210 | { |
211 | return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe); | ||
212 | } | ||
213 | |||
214 | module_init(amiga_zorro_init); | ||
202 | 215 | ||
203 | MODULE_LICENSE("GPL"); | 216 | MODULE_LICENSE("GPL"); |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 109a6c606d92..e8e5e63ac950 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -177,8 +177,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags) | |||
177 | } | 177 | } |
178 | /* Trigger mount for path component or follow link */ | 178 | /* Trigger mount for path component or follow link */ |
179 | } else if (ino->flags & AUTOFS_INF_PENDING || | 179 | } else if (ino->flags & AUTOFS_INF_PENDING || |
180 | autofs4_need_mount(flags) || | 180 | autofs4_need_mount(flags)) { |
181 | current->link_count) { | ||
182 | DPRINTK("waiting for mount name=%.*s", | 181 | DPRINTK("waiting for mount name=%.*s", |
183 | dentry->d_name.len, dentry->d_name.name); | 182 | dentry->d_name.len, dentry->d_name.name); |
184 | 183 | ||
@@ -262,7 +261,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
262 | spin_unlock(&dcache_lock); | 261 | spin_unlock(&dcache_lock); |
263 | spin_unlock(&sbi->fs_lock); | 262 | spin_unlock(&sbi->fs_lock); |
264 | 263 | ||
265 | status = try_to_fill_dentry(dentry, 0); | 264 | status = try_to_fill_dentry(dentry, nd->flags); |
266 | if (status) | 265 | if (status) |
267 | goto out_error; | 266 | goto out_error; |
268 | 267 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e84ef60ffe35..97a97839a867 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1481,12 +1481,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1481 | ret = -EBADF; | 1481 | ret = -EBADF; |
1482 | goto out_drop_write; | 1482 | goto out_drop_write; |
1483 | } | 1483 | } |
1484 | |||
1484 | src = src_file->f_dentry->d_inode; | 1485 | src = src_file->f_dentry->d_inode; |
1485 | 1486 | ||
1486 | ret = -EINVAL; | 1487 | ret = -EINVAL; |
1487 | if (src == inode) | 1488 | if (src == inode) |
1488 | goto out_fput; | 1489 | goto out_fput; |
1489 | 1490 | ||
1491 | /* the src must be open for reading */ | ||
1492 | if (!(src_file->f_mode & FMODE_READ)) | ||
1493 | goto out_fput; | ||
1494 | |||
1490 | ret = -EISDIR; | 1495 | ret = -EISDIR; |
1491 | if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) | 1496 | if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) |
1492 | goto out_fput; | 1497 | goto out_fput; |
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index f7c255f9c624..a8cd821226da 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
@@ -34,6 +34,7 @@ struct cachefiles_object { | |||
34 | loff_t i_size; /* object size */ | 34 | loff_t i_size; /* object size */ |
35 | unsigned long flags; | 35 | unsigned long flags; |
36 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ | 36 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ |
37 | #define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */ | ||
37 | atomic_t usage; /* object usage count */ | 38 | atomic_t usage; /* object usage count */ |
38 | uint8_t type; /* object type */ | 39 | uint8_t type; /* object type */ |
39 | uint8_t new; /* T if object new */ | 40 | uint8_t new; /* T if object new */ |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index d5db84a1ee0d..f4a7840bf42c 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -93,6 +93,59 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object, | |||
93 | } | 93 | } |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * mark the owner of a dentry, if there is one, to indicate that that dentry | ||
97 | * has been preemptively deleted | ||
98 | * - the caller must hold the i_mutex on the dentry's parent as required to | ||
99 | * call vfs_unlink(), vfs_rmdir() or vfs_rename() | ||
100 | */ | ||
101 | static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, | ||
102 | struct dentry *dentry) | ||
103 | { | ||
104 | struct cachefiles_object *object; | ||
105 | struct rb_node *p; | ||
106 | |||
107 | _enter(",'%*.*s'", | ||
108 | dentry->d_name.len, dentry->d_name.len, dentry->d_name.name); | ||
109 | |||
110 | write_lock(&cache->active_lock); | ||
111 | |||
112 | p = cache->active_nodes.rb_node; | ||
113 | while (p) { | ||
114 | object = rb_entry(p, struct cachefiles_object, active_node); | ||
115 | if (object->dentry > dentry) | ||
116 | p = p->rb_left; | ||
117 | else if (object->dentry < dentry) | ||
118 | p = p->rb_right; | ||
119 | else | ||
120 | goto found_dentry; | ||
121 | } | ||
122 | |||
123 | write_unlock(&cache->active_lock); | ||
124 | _leave(" [no owner]"); | ||
125 | return; | ||
126 | |||
127 | /* found the dentry for */ | ||
128 | found_dentry: | ||
129 | kdebug("preemptive burial: OBJ%x [%s] %p", | ||
130 | object->fscache.debug_id, | ||
131 | fscache_object_states[object->fscache.state], | ||
132 | dentry); | ||
133 | |||
134 | if (object->fscache.state < FSCACHE_OBJECT_DYING) { | ||
135 | printk(KERN_ERR "\n"); | ||
136 | printk(KERN_ERR "CacheFiles: Error:" | ||
137 | " Can't preemptively bury live object\n"); | ||
138 | cachefiles_printk_object(object, NULL); | ||
139 | } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { | ||
140 | printk(KERN_ERR "CacheFiles: Error:" | ||
141 | " Object already preemptively buried\n"); | ||
142 | } | ||
143 | |||
144 | write_unlock(&cache->active_lock); | ||
145 | _leave(" [owner marked]"); | ||
146 | } | ||
147 | |||
148 | /* | ||
96 | * record the fact that an object is now active | 149 | * record the fact that an object is now active |
97 | */ | 150 | */ |
98 | static int cachefiles_mark_object_active(struct cachefiles_cache *cache, | 151 | static int cachefiles_mark_object_active(struct cachefiles_cache *cache, |
@@ -219,7 +272,8 @@ requeue: | |||
219 | */ | 272 | */ |
220 | static int cachefiles_bury_object(struct cachefiles_cache *cache, | 273 | static int cachefiles_bury_object(struct cachefiles_cache *cache, |
221 | struct dentry *dir, | 274 | struct dentry *dir, |
222 | struct dentry *rep) | 275 | struct dentry *rep, |
276 | bool preemptive) | ||
223 | { | 277 | { |
224 | struct dentry *grave, *trap; | 278 | struct dentry *grave, *trap; |
225 | char nbuffer[8 + 8 + 1]; | 279 | char nbuffer[8 + 8 + 1]; |
@@ -229,11 +283,16 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, | |||
229 | dir->d_name.len, dir->d_name.len, dir->d_name.name, | 283 | dir->d_name.len, dir->d_name.len, dir->d_name.name, |
230 | rep->d_name.len, rep->d_name.len, rep->d_name.name); | 284 | rep->d_name.len, rep->d_name.len, rep->d_name.name); |
231 | 285 | ||
286 | _debug("remove %p from %p", rep, dir); | ||
287 | |||
232 | /* non-directories can just be unlinked */ | 288 | /* non-directories can just be unlinked */ |
233 | if (!S_ISDIR(rep->d_inode->i_mode)) { | 289 | if (!S_ISDIR(rep->d_inode->i_mode)) { |
234 | _debug("unlink stale object"); | 290 | _debug("unlink stale object"); |
235 | ret = vfs_unlink(dir->d_inode, rep); | 291 | ret = vfs_unlink(dir->d_inode, rep); |
236 | 292 | ||
293 | if (preemptive) | ||
294 | cachefiles_mark_object_buried(cache, rep); | ||
295 | |||
237 | mutex_unlock(&dir->d_inode->i_mutex); | 296 | mutex_unlock(&dir->d_inode->i_mutex); |
238 | 297 | ||
239 | if (ret == -EIO) | 298 | if (ret == -EIO) |
@@ -325,6 +384,9 @@ try_again: | |||
325 | if (ret != 0 && ret != -ENOMEM) | 384 | if (ret != 0 && ret != -ENOMEM) |
326 | cachefiles_io_error(cache, "Rename failed with error %d", ret); | 385 | cachefiles_io_error(cache, "Rename failed with error %d", ret); |
327 | 386 | ||
387 | if (preemptive) | ||
388 | cachefiles_mark_object_buried(cache, rep); | ||
389 | |||
328 | unlock_rename(cache->graveyard, dir); | 390 | unlock_rename(cache->graveyard, dir); |
329 | dput(grave); | 391 | dput(grave); |
330 | _leave(" = 0"); | 392 | _leave(" = 0"); |
@@ -340,7 +402,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
340 | struct dentry *dir; | 402 | struct dentry *dir; |
341 | int ret; | 403 | int ret; |
342 | 404 | ||
343 | _enter(",{%p}", object->dentry); | 405 | _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); |
344 | 406 | ||
345 | ASSERT(object->dentry); | 407 | ASSERT(object->dentry); |
346 | ASSERT(object->dentry->d_inode); | 408 | ASSERT(object->dentry->d_inode); |
@@ -350,15 +412,25 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
350 | 412 | ||
351 | mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); | 413 | mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); |
352 | 414 | ||
353 | /* we need to check that our parent is _still_ our parent - it may have | 415 | if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { |
354 | * been renamed */ | 416 | /* object allocation for the same key preemptively deleted this |
355 | if (dir == object->dentry->d_parent) { | 417 | * object's file so that it could create its own file */ |
356 | ret = cachefiles_bury_object(cache, dir, object->dentry); | 418 | _debug("object preemptively buried"); |
357 | } else { | ||
358 | /* it got moved, presumably by cachefilesd culling it, so it's | ||
359 | * no longer in the key path and we can ignore it */ | ||
360 | mutex_unlock(&dir->d_inode->i_mutex); | 419 | mutex_unlock(&dir->d_inode->i_mutex); |
361 | ret = 0; | 420 | ret = 0; |
421 | } else { | ||
422 | /* we need to check that our parent is _still_ our parent - it | ||
423 | * may have been renamed */ | ||
424 | if (dir == object->dentry->d_parent) { | ||
425 | ret = cachefiles_bury_object(cache, dir, | ||
426 | object->dentry, false); | ||
427 | } else { | ||
428 | /* it got moved, presumably by cachefilesd culling it, | ||
429 | * so it's no longer in the key path and we can ignore | ||
430 | * it */ | ||
431 | mutex_unlock(&dir->d_inode->i_mutex); | ||
432 | ret = 0; | ||
433 | } | ||
362 | } | 434 | } |
363 | 435 | ||
364 | dput(dir); | 436 | dput(dir); |
@@ -381,7 +453,9 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, | |||
381 | const char *name; | 453 | const char *name; |
382 | int ret, nlen; | 454 | int ret, nlen; |
383 | 455 | ||
384 | _enter("{%p},,%s,", parent->dentry, key); | 456 | _enter("OBJ%x{%p},OBJ%x,%s,", |
457 | parent->fscache.debug_id, parent->dentry, | ||
458 | object->fscache.debug_id, key); | ||
385 | 459 | ||
386 | cache = container_of(parent->fscache.cache, | 460 | cache = container_of(parent->fscache.cache, |
387 | struct cachefiles_cache, cache); | 461 | struct cachefiles_cache, cache); |
@@ -509,7 +583,7 @@ lookup_again: | |||
509 | * mutex) */ | 583 | * mutex) */ |
510 | object->dentry = NULL; | 584 | object->dentry = NULL; |
511 | 585 | ||
512 | ret = cachefiles_bury_object(cache, dir, next); | 586 | ret = cachefiles_bury_object(cache, dir, next, true); |
513 | dput(next); | 587 | dput(next); |
514 | next = NULL; | 588 | next = NULL; |
515 | 589 | ||
@@ -828,7 +902,7 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, | |||
828 | /* actually remove the victim (drops the dir mutex) */ | 902 | /* actually remove the victim (drops the dir mutex) */ |
829 | _debug("bury"); | 903 | _debug("bury"); |
830 | 904 | ||
831 | ret = cachefiles_bury_object(cache, dir, victim); | 905 | ret = cachefiles_bury_object(cache, dir, victim, false); |
832 | if (ret < 0) | 906 | if (ret < 0) |
833 | goto error; | 907 | goto error; |
834 | 908 | ||
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c index b5808cdb2232..039b5011d83b 100644 --- a/fs/cachefiles/security.c +++ b/fs/cachefiles/security.c | |||
@@ -77,6 +77,8 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, | |||
77 | /* | 77 | /* |
78 | * check the security details of the on-disk cache | 78 | * check the security details of the on-disk cache |
79 | * - must be called with security override in force | 79 | * - must be called with security override in force |
80 | * - must return with a security override in force - even in the case of an | ||
81 | * error | ||
80 | */ | 82 | */ |
81 | int cachefiles_determine_cache_security(struct cachefiles_cache *cache, | 83 | int cachefiles_determine_cache_security(struct cachefiles_cache *cache, |
82 | struct dentry *root, | 84 | struct dentry *root, |
@@ -99,6 +101,8 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache, | |||
99 | * which create files */ | 101 | * which create files */ |
100 | ret = set_create_files_as(new, root->d_inode); | 102 | ret = set_create_files_as(new, root->d_inode); |
101 | if (ret < 0) { | 103 | if (ret < 0) { |
104 | abort_creds(new); | ||
105 | cachefiles_begin_secure(cache, _saved_cred); | ||
102 | _leave(" = %d [cfa]", ret); | 106 | _leave(" = %d [cfa]", ret); |
103 | return ret; | 107 | return ret; |
104 | } | 108 | } |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 4b42c2bb603f..a9005d862ed4 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -504,7 +504,6 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
504 | int i; | 504 | int i; |
505 | struct ceph_snap_context *snapc = req->r_snapc; | 505 | struct ceph_snap_context *snapc = req->r_snapc; |
506 | struct address_space *mapping = inode->i_mapping; | 506 | struct address_space *mapping = inode->i_mapping; |
507 | struct writeback_control *wbc = req->r_wbc; | ||
508 | __s32 rc = -EIO; | 507 | __s32 rc = -EIO; |
509 | u64 bytes = 0; | 508 | u64 bytes = 0; |
510 | struct ceph_client *client = ceph_inode_to_client(inode); | 509 | struct ceph_client *client = ceph_inode_to_client(inode); |
@@ -546,10 +545,6 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
546 | clear_bdi_congested(&client->backing_dev_info, | 545 | clear_bdi_congested(&client->backing_dev_info, |
547 | BLK_RW_ASYNC); | 546 | BLK_RW_ASYNC); |
548 | 547 | ||
549 | if (i >= wrote) { | ||
550 | dout("inode %p skipping page %p\n", inode, page); | ||
551 | wbc->pages_skipped++; | ||
552 | } | ||
553 | ceph_put_snap_context((void *)page->private); | 548 | ceph_put_snap_context((void *)page->private); |
554 | page->private = 0; | 549 | page->private = 0; |
555 | ClearPagePrivate(page); | 550 | ClearPagePrivate(page); |
@@ -799,7 +794,6 @@ get_more_pages: | |||
799 | alloc_page_vec(client, req); | 794 | alloc_page_vec(client, req); |
800 | req->r_callback = writepages_finish; | 795 | req->r_callback = writepages_finish; |
801 | req->r_inode = inode; | 796 | req->r_inode = inode; |
802 | req->r_wbc = wbc; | ||
803 | } | 797 | } |
804 | 798 | ||
805 | /* note position of first page in pvec */ | 799 | /* note position of first page in pvec */ |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 0c1681806867..d9400534b279 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -858,6 +858,8 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci) | |||
858 | } | 858 | } |
859 | 859 | ||
860 | /* | 860 | /* |
861 | * Remove a cap. Take steps to deal with a racing iterate_session_caps. | ||
862 | * | ||
861 | * caller should hold i_lock. | 863 | * caller should hold i_lock. |
862 | * caller will not hold session s_mutex if called from destroy_inode. | 864 | * caller will not hold session s_mutex if called from destroy_inode. |
863 | */ | 865 | */ |
@@ -866,15 +868,10 @@ void __ceph_remove_cap(struct ceph_cap *cap) | |||
866 | struct ceph_mds_session *session = cap->session; | 868 | struct ceph_mds_session *session = cap->session; |
867 | struct ceph_inode_info *ci = cap->ci; | 869 | struct ceph_inode_info *ci = cap->ci; |
868 | struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; | 870 | struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; |
871 | int removed = 0; | ||
869 | 872 | ||
870 | dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); | 873 | dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); |
871 | 874 | ||
872 | /* remove from inode list */ | ||
873 | rb_erase(&cap->ci_node, &ci->i_caps); | ||
874 | cap->ci = NULL; | ||
875 | if (ci->i_auth_cap == cap) | ||
876 | ci->i_auth_cap = NULL; | ||
877 | |||
878 | /* remove from session list */ | 875 | /* remove from session list */ |
879 | spin_lock(&session->s_cap_lock); | 876 | spin_lock(&session->s_cap_lock); |
880 | if (session->s_cap_iterator == cap) { | 877 | if (session->s_cap_iterator == cap) { |
@@ -885,10 +882,18 @@ void __ceph_remove_cap(struct ceph_cap *cap) | |||
885 | list_del_init(&cap->session_caps); | 882 | list_del_init(&cap->session_caps); |
886 | session->s_nr_caps--; | 883 | session->s_nr_caps--; |
887 | cap->session = NULL; | 884 | cap->session = NULL; |
885 | removed = 1; | ||
888 | } | 886 | } |
887 | /* protect backpointer with s_cap_lock: see iterate_session_caps */ | ||
888 | cap->ci = NULL; | ||
889 | spin_unlock(&session->s_cap_lock); | 889 | spin_unlock(&session->s_cap_lock); |
890 | 890 | ||
891 | if (cap->session == NULL) | 891 | /* remove from inode list */ |
892 | rb_erase(&cap->ci_node, &ci->i_caps); | ||
893 | if (ci->i_auth_cap == cap) | ||
894 | ci->i_auth_cap = NULL; | ||
895 | |||
896 | if (removed) | ||
892 | ceph_put_cap(cap); | 897 | ceph_put_cap(cap); |
893 | 898 | ||
894 | if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { | 899 | if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 261f3e6c0bcf..85b4d2ffdeba 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -733,6 +733,10 @@ no_change: | |||
733 | __ceph_get_fmode(ci, cap_fmode); | 733 | __ceph_get_fmode(ci, cap_fmode); |
734 | spin_unlock(&inode->i_lock); | 734 | spin_unlock(&inode->i_lock); |
735 | } | 735 | } |
736 | } else if (cap_fmode >= 0) { | ||
737 | pr_warning("mds issued no caps on %llx.%llx\n", | ||
738 | ceph_vinop(inode)); | ||
739 | __ceph_get_fmode(ci, cap_fmode); | ||
736 | } | 740 | } |
737 | 741 | ||
738 | /* update delegation info? */ | 742 | /* update delegation info? */ |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 60a9a4ae47be..24561a557e01 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -736,9 +736,10 @@ static void cleanup_cap_releases(struct ceph_mds_session *session) | |||
736 | } | 736 | } |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * Helper to safely iterate over all caps associated with a session. | 739 | * Helper to safely iterate over all caps associated with a session, with |
740 | * special care taken to handle a racing __ceph_remove_cap(). | ||
740 | * | 741 | * |
741 | * caller must hold session s_mutex | 742 | * Caller must hold session s_mutex. |
742 | */ | 743 | */ |
743 | static int iterate_session_caps(struct ceph_mds_session *session, | 744 | static int iterate_session_caps(struct ceph_mds_session *session, |
744 | int (*cb)(struct inode *, struct ceph_cap *, | 745 | int (*cb)(struct inode *, struct ceph_cap *, |
@@ -2136,7 +2137,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) | |||
2136 | struct ceph_mds_session *session = NULL; | 2137 | struct ceph_mds_session *session = NULL; |
2137 | struct ceph_msg *reply; | 2138 | struct ceph_msg *reply; |
2138 | struct rb_node *p; | 2139 | struct rb_node *p; |
2139 | int err; | 2140 | int err = -ENOMEM; |
2140 | struct ceph_pagelist *pagelist; | 2141 | struct ceph_pagelist *pagelist; |
2141 | 2142 | ||
2142 | pr_info("reconnect to recovering mds%d\n", mds); | 2143 | pr_info("reconnect to recovering mds%d\n", mds); |
@@ -2185,7 +2186,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) | |||
2185 | goto fail; | 2186 | goto fail; |
2186 | err = iterate_session_caps(session, encode_caps_cb, pagelist); | 2187 | err = iterate_session_caps(session, encode_caps_cb, pagelist); |
2187 | if (err < 0) | 2188 | if (err < 0) |
2188 | goto out; | 2189 | goto fail; |
2189 | 2190 | ||
2190 | /* | 2191 | /* |
2191 | * snaprealms. we provide mds with the ino, seq (version), and | 2192 | * snaprealms. we provide mds with the ino, seq (version), and |
@@ -2213,28 +2214,31 @@ send: | |||
2213 | reply->nr_pages = calc_pages_for(0, pagelist->length); | 2214 | reply->nr_pages = calc_pages_for(0, pagelist->length); |
2214 | ceph_con_send(&session->s_con, reply); | 2215 | ceph_con_send(&session->s_con, reply); |
2215 | 2216 | ||
2216 | if (session) { | 2217 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2217 | session->s_state = CEPH_MDS_SESSION_OPEN; | 2218 | mutex_unlock(&session->s_mutex); |
2218 | __wake_requests(mdsc, &session->s_waiting); | 2219 | |
2219 | } | 2220 | mutex_lock(&mdsc->mutex); |
2221 | __wake_requests(mdsc, &session->s_waiting); | ||
2222 | mutex_unlock(&mdsc->mutex); | ||
2223 | |||
2224 | ceph_put_mds_session(session); | ||
2220 | 2225 | ||
2221 | out: | ||
2222 | up_read(&mdsc->snap_rwsem); | 2226 | up_read(&mdsc->snap_rwsem); |
2223 | if (session) { | ||
2224 | mutex_unlock(&session->s_mutex); | ||
2225 | ceph_put_mds_session(session); | ||
2226 | } | ||
2227 | mutex_lock(&mdsc->mutex); | 2227 | mutex_lock(&mdsc->mutex); |
2228 | return; | 2228 | return; |
2229 | 2229 | ||
2230 | fail: | 2230 | fail: |
2231 | ceph_msg_put(reply); | 2231 | ceph_msg_put(reply); |
2232 | up_read(&mdsc->snap_rwsem); | ||
2233 | mutex_unlock(&session->s_mutex); | ||
2234 | ceph_put_mds_session(session); | ||
2232 | fail_nomsg: | 2235 | fail_nomsg: |
2233 | ceph_pagelist_release(pagelist); | 2236 | ceph_pagelist_release(pagelist); |
2234 | kfree(pagelist); | 2237 | kfree(pagelist); |
2235 | fail_nopagelist: | 2238 | fail_nopagelist: |
2236 | pr_err("ENOMEM preparing reconnect for mds%d\n", mds); | 2239 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
2237 | goto out; | 2240 | mutex_lock(&mdsc->mutex); |
2241 | return; | ||
2238 | } | 2242 | } |
2239 | 2243 | ||
2240 | 2244 | ||
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 509f57d9ccb3..cd4fadb6491a 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
@@ -492,7 +492,14 @@ static void prepare_write_message(struct ceph_connection *con) | |||
492 | list_move_tail(&m->list_head, &con->out_sent); | 492 | list_move_tail(&m->list_head, &con->out_sent); |
493 | } | 493 | } |
494 | 494 | ||
495 | m->hdr.seq = cpu_to_le64(++con->out_seq); | 495 | /* |
496 | * only assign outgoing seq # if we haven't sent this message | ||
497 | * yet. if it is requeued, resend with it's original seq. | ||
498 | */ | ||
499 | if (m->needs_out_seq) { | ||
500 | m->hdr.seq = cpu_to_le64(++con->out_seq); | ||
501 | m->needs_out_seq = false; | ||
502 | } | ||
496 | 503 | ||
497 | dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", | 504 | dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", |
498 | m, con->out_seq, le16_to_cpu(m->hdr.type), | 505 | m, con->out_seq, le16_to_cpu(m->hdr.type), |
@@ -1986,6 +1993,8 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
1986 | 1993 | ||
1987 | BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); | 1994 | BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); |
1988 | 1995 | ||
1996 | msg->needs_out_seq = true; | ||
1997 | |||
1989 | /* queue */ | 1998 | /* queue */ |
1990 | mutex_lock(&con->mutex); | 1999 | mutex_lock(&con->mutex); |
1991 | BUG_ON(!list_empty(&msg->list_head)); | 2000 | BUG_ON(!list_empty(&msg->list_head)); |
@@ -2085,15 +2094,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, | |||
2085 | kref_init(&m->kref); | 2094 | kref_init(&m->kref); |
2086 | INIT_LIST_HEAD(&m->list_head); | 2095 | INIT_LIST_HEAD(&m->list_head); |
2087 | 2096 | ||
2097 | m->hdr.tid = 0; | ||
2088 | m->hdr.type = cpu_to_le16(type); | 2098 | m->hdr.type = cpu_to_le16(type); |
2099 | m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); | ||
2100 | m->hdr.version = 0; | ||
2089 | m->hdr.front_len = cpu_to_le32(front_len); | 2101 | m->hdr.front_len = cpu_to_le32(front_len); |
2090 | m->hdr.middle_len = 0; | 2102 | m->hdr.middle_len = 0; |
2091 | m->hdr.data_len = cpu_to_le32(page_len); | 2103 | m->hdr.data_len = cpu_to_le32(page_len); |
2092 | m->hdr.data_off = cpu_to_le16(page_off); | 2104 | m->hdr.data_off = cpu_to_le16(page_off); |
2093 | m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); | 2105 | m->hdr.reserved = 0; |
2094 | m->footer.front_crc = 0; | 2106 | m->footer.front_crc = 0; |
2095 | m->footer.middle_crc = 0; | 2107 | m->footer.middle_crc = 0; |
2096 | m->footer.data_crc = 0; | 2108 | m->footer.data_crc = 0; |
2109 | m->footer.flags = 0; | ||
2097 | m->front_max = front_len; | 2110 | m->front_max = front_len; |
2098 | m->front_is_vmalloc = false; | 2111 | m->front_is_vmalloc = false; |
2099 | m->more_to_follow = false; | 2112 | m->more_to_follow = false; |
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index a343dae73cdc..a5caf91cc971 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h | |||
@@ -86,6 +86,7 @@ struct ceph_msg { | |||
86 | struct kref kref; | 86 | struct kref kref; |
87 | bool front_is_vmalloc; | 87 | bool front_is_vmalloc; |
88 | bool more_to_follow; | 88 | bool more_to_follow; |
89 | bool needs_out_seq; | ||
89 | int front_max; | 90 | int front_max; |
90 | 91 | ||
91 | struct ceph_msgpool *pool; | 92 | struct ceph_msgpool *pool; |
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index c7b4dedaace6..3514f71ff85f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c | |||
@@ -565,7 +565,8 @@ static int __map_osds(struct ceph_osd_client *osdc, | |||
565 | { | 565 | { |
566 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; | 566 | struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; |
567 | struct ceph_pg pgid; | 567 | struct ceph_pg pgid; |
568 | int o = -1; | 568 | int acting[CEPH_PG_MAX_SIZE]; |
569 | int o = -1, num = 0; | ||
569 | int err; | 570 | int err; |
570 | 571 | ||
571 | dout("map_osds %p tid %lld\n", req, req->r_tid); | 572 | dout("map_osds %p tid %lld\n", req, req->r_tid); |
@@ -576,10 +577,16 @@ static int __map_osds(struct ceph_osd_client *osdc, | |||
576 | pgid = reqhead->layout.ol_pgid; | 577 | pgid = reqhead->layout.ol_pgid; |
577 | req->r_pgid = pgid; | 578 | req->r_pgid = pgid; |
578 | 579 | ||
579 | o = ceph_calc_pg_primary(osdc->osdmap, pgid); | 580 | err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); |
581 | if (err > 0) { | ||
582 | o = acting[0]; | ||
583 | num = err; | ||
584 | } | ||
580 | 585 | ||
581 | if ((req->r_osd && req->r_osd->o_osd == o && | 586 | if ((req->r_osd && req->r_osd->o_osd == o && |
582 | req->r_sent >= req->r_osd->o_incarnation) || | 587 | req->r_sent >= req->r_osd->o_incarnation && |
588 | req->r_num_pg_osds == num && | ||
589 | memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || | ||
583 | (req->r_osd == NULL && o == -1)) | 590 | (req->r_osd == NULL && o == -1)) |
584 | return 0; /* no change */ | 591 | return 0; /* no change */ |
585 | 592 | ||
@@ -587,6 +594,10 @@ static int __map_osds(struct ceph_osd_client *osdc, | |||
587 | req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, | 594 | req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, |
588 | req->r_osd ? req->r_osd->o_osd : -1); | 595 | req->r_osd ? req->r_osd->o_osd : -1); |
589 | 596 | ||
597 | /* record full pg acting set */ | ||
598 | memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); | ||
599 | req->r_num_pg_osds = num; | ||
600 | |||
590 | if (req->r_osd) { | 601 | if (req->r_osd) { |
591 | __cancel_request(req); | 602 | __cancel_request(req); |
592 | list_del_init(&req->r_osd_item); | 603 | list_del_init(&req->r_osd_item); |
@@ -612,7 +623,7 @@ static int __map_osds(struct ceph_osd_client *osdc, | |||
612 | __remove_osd_from_lru(req->r_osd); | 623 | __remove_osd_from_lru(req->r_osd); |
613 | list_add(&req->r_osd_item, &req->r_osd->o_requests); | 624 | list_add(&req->r_osd_item, &req->r_osd->o_requests); |
614 | } | 625 | } |
615 | err = 1; /* osd changed */ | 626 | err = 1; /* osd or pg changed */ |
616 | 627 | ||
617 | out: | 628 | out: |
618 | return err; | 629 | return err; |
@@ -779,16 +790,18 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
779 | struct ceph_osd_request *req; | 790 | struct ceph_osd_request *req; |
780 | u64 tid; | 791 | u64 tid; |
781 | int numops, object_len, flags; | 792 | int numops, object_len, flags; |
793 | s32 result; | ||
782 | 794 | ||
783 | tid = le64_to_cpu(msg->hdr.tid); | 795 | tid = le64_to_cpu(msg->hdr.tid); |
784 | if (msg->front.iov_len < sizeof(*rhead)) | 796 | if (msg->front.iov_len < sizeof(*rhead)) |
785 | goto bad; | 797 | goto bad; |
786 | numops = le32_to_cpu(rhead->num_ops); | 798 | numops = le32_to_cpu(rhead->num_ops); |
787 | object_len = le32_to_cpu(rhead->object_len); | 799 | object_len = le32_to_cpu(rhead->object_len); |
800 | result = le32_to_cpu(rhead->result); | ||
788 | if (msg->front.iov_len != sizeof(*rhead) + object_len + | 801 | if (msg->front.iov_len != sizeof(*rhead) + object_len + |
789 | numops * sizeof(struct ceph_osd_op)) | 802 | numops * sizeof(struct ceph_osd_op)) |
790 | goto bad; | 803 | goto bad; |
791 | dout("handle_reply %p tid %llu\n", msg, tid); | 804 | dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result); |
792 | 805 | ||
793 | /* lookup */ | 806 | /* lookup */ |
794 | mutex_lock(&osdc->request_mutex); | 807 | mutex_lock(&osdc->request_mutex); |
@@ -834,7 +847,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
834 | dout("handle_reply tid %llu flags %d\n", tid, flags); | 847 | dout("handle_reply tid %llu flags %d\n", tid, flags); |
835 | 848 | ||
836 | /* either this is a read, or we got the safe response */ | 849 | /* either this is a read, or we got the safe response */ |
837 | if ((flags & CEPH_OSD_FLAG_ONDISK) || | 850 | if (result < 0 || |
851 | (flags & CEPH_OSD_FLAG_ONDISK) || | ||
838 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) | 852 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) |
839 | __unregister_request(osdc, req); | 853 | __unregister_request(osdc, req); |
840 | 854 | ||
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index b0759911e7c3..ce776989ef6a 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h | |||
@@ -48,6 +48,8 @@ struct ceph_osd_request { | |||
48 | struct list_head r_osd_item; | 48 | struct list_head r_osd_item; |
49 | struct ceph_osd *r_osd; | 49 | struct ceph_osd *r_osd; |
50 | struct ceph_pg r_pgid; | 50 | struct ceph_pg r_pgid; |
51 | int r_pg_osds[CEPH_PG_MAX_SIZE]; | ||
52 | int r_num_pg_osds; | ||
51 | 53 | ||
52 | struct ceph_connection *r_con_filling_msg; | 54 | struct ceph_connection *r_con_filling_msg; |
53 | 55 | ||
@@ -66,7 +68,6 @@ struct ceph_osd_request { | |||
66 | struct list_head r_unsafe_item; | 68 | struct list_head r_unsafe_item; |
67 | 69 | ||
68 | struct inode *r_inode; /* for use by callbacks */ | 70 | struct inode *r_inode; /* for use by callbacks */ |
69 | struct writeback_control *r_wbc; /* ditto */ | ||
70 | 71 | ||
71 | char r_oid[40]; /* object name */ | 72 | char r_oid[40]; /* object name */ |
72 | int r_oid_len; | 73 | int r_oid_len; |
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 2e2c15eed82a..cfdd8f4388b7 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
@@ -1041,12 +1041,33 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * Return acting set for given pgid. | ||
1045 | */ | ||
1046 | int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | ||
1047 | int *acting) | ||
1048 | { | ||
1049 | int rawosds[CEPH_PG_MAX_SIZE], *osds; | ||
1050 | int i, o, num = CEPH_PG_MAX_SIZE; | ||
1051 | |||
1052 | osds = calc_pg_raw(osdmap, pgid, rawosds, &num); | ||
1053 | if (!osds) | ||
1054 | return -1; | ||
1055 | |||
1056 | /* primary is first up osd */ | ||
1057 | o = 0; | ||
1058 | for (i = 0; i < num; i++) | ||
1059 | if (ceph_osd_is_up(osdmap, osds[i])) | ||
1060 | acting[o++] = osds[i]; | ||
1061 | return o; | ||
1062 | } | ||
1063 | |||
1064 | /* | ||
1044 | * Return primary osd for given pgid, or -1 if none. | 1065 | * Return primary osd for given pgid, or -1 if none. |
1045 | */ | 1066 | */ |
1046 | int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) | 1067 | int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) |
1047 | { | 1068 | { |
1048 | int rawosds[10], *osds; | 1069 | int rawosds[CEPH_PG_MAX_SIZE], *osds; |
1049 | int i, num = ARRAY_SIZE(rawosds); | 1070 | int i, num = CEPH_PG_MAX_SIZE; |
1050 | 1071 | ||
1051 | osds = calc_pg_raw(osdmap, pgid, rawosds, &num); | 1072 | osds = calc_pg_raw(osdmap, pgid, rawosds, &num); |
1052 | if (!osds) | 1073 | if (!osds) |
@@ -1054,9 +1075,7 @@ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) | |||
1054 | 1075 | ||
1055 | /* primary is first up osd */ | 1076 | /* primary is first up osd */ |
1056 | for (i = 0; i < num; i++) | 1077 | for (i = 0; i < num; i++) |
1057 | if (ceph_osd_is_up(osdmap, osds[i])) { | 1078 | if (ceph_osd_is_up(osdmap, osds[i])) |
1058 | return osds[i]; | 1079 | return osds[i]; |
1059 | break; | ||
1060 | } | ||
1061 | return -1; | 1080 | return -1; |
1062 | } | 1081 | } |
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index 8bc9f1e4f562..970b547e510d 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h | |||
@@ -120,6 +120,8 @@ extern int ceph_calc_object_layout(struct ceph_object_layout *ol, | |||
120 | const char *oid, | 120 | const char *oid, |
121 | struct ceph_file_layout *fl, | 121 | struct ceph_file_layout *fl, |
122 | struct ceph_osdmap *osdmap); | 122 | struct ceph_osdmap *osdmap); |
123 | extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | ||
124 | int *acting); | ||
123 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, | 125 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, |
124 | struct ceph_pg pgid); | 126 | struct ceph_pg pgid); |
125 | 127 | ||
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index a1fc1d017b58..fd56451a871f 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h | |||
@@ -58,6 +58,7 @@ struct ceph_timespec { | |||
58 | #define CEPH_PG_LAYOUT_LINEAR 2 | 58 | #define CEPH_PG_LAYOUT_LINEAR 2 |
59 | #define CEPH_PG_LAYOUT_HYBRID 3 | 59 | #define CEPH_PG_LAYOUT_HYBRID 3 |
60 | 60 | ||
61 | #define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ | ||
61 | 62 | ||
62 | /* | 63 | /* |
63 | * placement group. | 64 | * placement group. |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index f888cf487b7c..110857ba9269 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -47,10 +47,20 @@ const char *ceph_file_part(const char *s, int len) | |||
47 | */ | 47 | */ |
48 | static void ceph_put_super(struct super_block *s) | 48 | static void ceph_put_super(struct super_block *s) |
49 | { | 49 | { |
50 | struct ceph_client *cl = ceph_client(s); | 50 | struct ceph_client *client = ceph_sb_to_client(s); |
51 | 51 | ||
52 | dout("put_super\n"); | 52 | dout("put_super\n"); |
53 | ceph_mdsc_close_sessions(&cl->mdsc); | 53 | ceph_mdsc_close_sessions(&client->mdsc); |
54 | |||
55 | /* | ||
56 | * ensure we release the bdi before put_anon_super releases | ||
57 | * the device name. | ||
58 | */ | ||
59 | if (s->s_bdi == &client->backing_dev_info) { | ||
60 | bdi_unregister(&client->backing_dev_info); | ||
61 | s->s_bdi = NULL; | ||
62 | } | ||
63 | |||
54 | return; | 64 | return; |
55 | } | 65 | } |
56 | 66 | ||
@@ -636,6 +646,8 @@ static void ceph_destroy_client(struct ceph_client *client) | |||
636 | destroy_workqueue(client->pg_inv_wq); | 646 | destroy_workqueue(client->pg_inv_wq); |
637 | destroy_workqueue(client->trunc_wq); | 647 | destroy_workqueue(client->trunc_wq); |
638 | 648 | ||
649 | bdi_destroy(&client->backing_dev_info); | ||
650 | |||
639 | if (client->msgr) | 651 | if (client->msgr) |
640 | ceph_messenger_destroy(client->msgr); | 652 | ceph_messenger_destroy(client->msgr); |
641 | mempool_destroy(client->wb_pagevec_pool); | 653 | mempool_destroy(client->wb_pagevec_pool); |
@@ -876,14 +888,14 @@ static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) | |||
876 | { | 888 | { |
877 | int err; | 889 | int err; |
878 | 890 | ||
879 | sb->s_bdi = &client->backing_dev_info; | ||
880 | |||
881 | /* set ra_pages based on rsize mount option? */ | 891 | /* set ra_pages based on rsize mount option? */ |
882 | if (client->mount_args->rsize >= PAGE_CACHE_SIZE) | 892 | if (client->mount_args->rsize >= PAGE_CACHE_SIZE) |
883 | client->backing_dev_info.ra_pages = | 893 | client->backing_dev_info.ra_pages = |
884 | (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) | 894 | (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) |
885 | >> PAGE_SHIFT; | 895 | >> PAGE_SHIFT; |
886 | err = bdi_register_dev(&client->backing_dev_info, sb->s_dev); | 896 | err = bdi_register_dev(&client->backing_dev_info, sb->s_dev); |
897 | if (!err) | ||
898 | sb->s_bdi = &client->backing_dev_info; | ||
887 | return err; | 899 | return err; |
888 | } | 900 | } |
889 | 901 | ||
@@ -957,9 +969,6 @@ static void ceph_kill_sb(struct super_block *s) | |||
957 | dout("kill_sb %p\n", s); | 969 | dout("kill_sb %p\n", s); |
958 | ceph_mdsc_pre_umount(&client->mdsc); | 970 | ceph_mdsc_pre_umount(&client->mdsc); |
959 | kill_anon_super(s); /* will call put_super after sb is r/o */ | 971 | kill_anon_super(s); /* will call put_super after sb is r/o */ |
960 | if (s->s_bdi == &client->backing_dev_info) | ||
961 | bdi_unregister(&client->backing_dev_info); | ||
962 | bdi_destroy(&client->backing_dev_info); | ||
963 | ceph_destroy_client(client); | 972 | ceph_destroy_client(client); |
964 | } | 973 | } |
965 | 974 | ||
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index a20bea598933..cfd1ce34e0bc 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -492,17 +492,13 @@ compare_oid(unsigned long *oid1, unsigned int oid1len, | |||
492 | 492 | ||
493 | int | 493 | int |
494 | decode_negTokenInit(unsigned char *security_blob, int length, | 494 | decode_negTokenInit(unsigned char *security_blob, int length, |
495 | enum securityEnum *secType) | 495 | struct TCP_Server_Info *server) |
496 | { | 496 | { |
497 | struct asn1_ctx ctx; | 497 | struct asn1_ctx ctx; |
498 | unsigned char *end; | 498 | unsigned char *end; |
499 | unsigned char *sequence_end; | 499 | unsigned char *sequence_end; |
500 | unsigned long *oid = NULL; | 500 | unsigned long *oid = NULL; |
501 | unsigned int cls, con, tag, oidlen, rc; | 501 | unsigned int cls, con, tag, oidlen, rc; |
502 | bool use_ntlmssp = false; | ||
503 | bool use_kerberos = false; | ||
504 | bool use_kerberosu2u = false; | ||
505 | bool use_mskerberos = false; | ||
506 | 502 | ||
507 | /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */ | 503 | /* cifs_dump_mem(" Received SecBlob ", security_blob, length); */ |
508 | 504 | ||
@@ -510,11 +506,11 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
510 | 506 | ||
511 | /* GSSAPI header */ | 507 | /* GSSAPI header */ |
512 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 508 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
513 | cFYI(1, ("Error decoding negTokenInit header")); | 509 | cFYI(1, "Error decoding negTokenInit header"); |
514 | return 0; | 510 | return 0; |
515 | } else if ((cls != ASN1_APL) || (con != ASN1_CON) | 511 | } else if ((cls != ASN1_APL) || (con != ASN1_CON) |
516 | || (tag != ASN1_EOC)) { | 512 | || (tag != ASN1_EOC)) { |
517 | cFYI(1, ("cls = %d con = %d tag = %d", cls, con, tag)); | 513 | cFYI(1, "cls = %d con = %d tag = %d", cls, con, tag); |
518 | return 0; | 514 | return 0; |
519 | } | 515 | } |
520 | 516 | ||
@@ -535,56 +531,52 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
535 | 531 | ||
536 | /* SPNEGO OID not present or garbled -- bail out */ | 532 | /* SPNEGO OID not present or garbled -- bail out */ |
537 | if (!rc) { | 533 | if (!rc) { |
538 | cFYI(1, ("Error decoding negTokenInit header")); | 534 | cFYI(1, "Error decoding negTokenInit header"); |
539 | return 0; | 535 | return 0; |
540 | } | 536 | } |
541 | 537 | ||
542 | /* SPNEGO */ | 538 | /* SPNEGO */ |
543 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 539 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
544 | cFYI(1, ("Error decoding negTokenInit")); | 540 | cFYI(1, "Error decoding negTokenInit"); |
545 | return 0; | 541 | return 0; |
546 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) | 542 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) |
547 | || (tag != ASN1_EOC)) { | 543 | || (tag != ASN1_EOC)) { |
548 | cFYI(1, | 544 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0", |
549 | ("cls = %d con = %d tag = %d end = %p (%d) exit 0", | 545 | cls, con, tag, end, *end); |
550 | cls, con, tag, end, *end)); | ||
551 | return 0; | 546 | return 0; |
552 | } | 547 | } |
553 | 548 | ||
554 | /* negTokenInit */ | 549 | /* negTokenInit */ |
555 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 550 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
556 | cFYI(1, ("Error decoding negTokenInit")); | 551 | cFYI(1, "Error decoding negTokenInit"); |
557 | return 0; | 552 | return 0; |
558 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | 553 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) |
559 | || (tag != ASN1_SEQ)) { | 554 | || (tag != ASN1_SEQ)) { |
560 | cFYI(1, | 555 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1", |
561 | ("cls = %d con = %d tag = %d end = %p (%d) exit 1", | 556 | cls, con, tag, end, *end); |
562 | cls, con, tag, end, *end)); | ||
563 | return 0; | 557 | return 0; |
564 | } | 558 | } |
565 | 559 | ||
566 | /* sequence */ | 560 | /* sequence */ |
567 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 561 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
568 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); | 562 | cFYI(1, "Error decoding 2nd part of negTokenInit"); |
569 | return 0; | 563 | return 0; |
570 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) | 564 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) |
571 | || (tag != ASN1_EOC)) { | 565 | || (tag != ASN1_EOC)) { |
572 | cFYI(1, | 566 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 0", |
573 | ("cls = %d con = %d tag = %d end = %p (%d) exit 0", | 567 | cls, con, tag, end, *end); |
574 | cls, con, tag, end, *end)); | ||
575 | return 0; | 568 | return 0; |
576 | } | 569 | } |
577 | 570 | ||
578 | /* sequence of */ | 571 | /* sequence of */ |
579 | if (asn1_header_decode | 572 | if (asn1_header_decode |
580 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { | 573 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { |
581 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); | 574 | cFYI(1, "Error decoding 2nd part of negTokenInit"); |
582 | return 0; | 575 | return 0; |
583 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | 576 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) |
584 | || (tag != ASN1_SEQ)) { | 577 | || (tag != ASN1_SEQ)) { |
585 | cFYI(1, | 578 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d) exit 1", |
586 | ("cls = %d con = %d tag = %d end = %p (%d) exit 1", | 579 | cls, con, tag, end, *end); |
587 | cls, con, tag, end, *end)); | ||
588 | return 0; | 580 | return 0; |
589 | } | 581 | } |
590 | 582 | ||
@@ -592,37 +584,33 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
592 | while (!asn1_eoc_decode(&ctx, sequence_end)) { | 584 | while (!asn1_eoc_decode(&ctx, sequence_end)) { |
593 | rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); | 585 | rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); |
594 | if (!rc) { | 586 | if (!rc) { |
595 | cFYI(1, | 587 | cFYI(1, "Error decoding negTokenInit hdr exit2"); |
596 | ("Error decoding negTokenInit hdr exit2")); | ||
597 | return 0; | 588 | return 0; |
598 | } | 589 | } |
599 | if ((tag == ASN1_OJI) && (con == ASN1_PRI)) { | 590 | if ((tag == ASN1_OJI) && (con == ASN1_PRI)) { |
600 | if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) { | 591 | if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) { |
601 | 592 | ||
602 | cFYI(1, ("OID len = %d oid = 0x%lx 0x%lx " | 593 | cFYI(1, "OID len = %d oid = 0x%lx 0x%lx " |
603 | "0x%lx 0x%lx", oidlen, *oid, | 594 | "0x%lx 0x%lx", oidlen, *oid, |
604 | *(oid + 1), *(oid + 2), *(oid + 3))); | 595 | *(oid + 1), *(oid + 2), *(oid + 3)); |
605 | 596 | ||
606 | if (compare_oid(oid, oidlen, MSKRB5_OID, | 597 | if (compare_oid(oid, oidlen, MSKRB5_OID, |
607 | MSKRB5_OID_LEN) && | 598 | MSKRB5_OID_LEN)) |
608 | !use_mskerberos) | 599 | server->sec_mskerberos = true; |
609 | use_mskerberos = true; | ||
610 | else if (compare_oid(oid, oidlen, KRB5U2U_OID, | 600 | else if (compare_oid(oid, oidlen, KRB5U2U_OID, |
611 | KRB5U2U_OID_LEN) && | 601 | KRB5U2U_OID_LEN)) |
612 | !use_kerberosu2u) | 602 | server->sec_kerberosu2u = true; |
613 | use_kerberosu2u = true; | ||
614 | else if (compare_oid(oid, oidlen, KRB5_OID, | 603 | else if (compare_oid(oid, oidlen, KRB5_OID, |
615 | KRB5_OID_LEN) && | 604 | KRB5_OID_LEN)) |
616 | !use_kerberos) | 605 | server->sec_kerberos = true; |
617 | use_kerberos = true; | ||
618 | else if (compare_oid(oid, oidlen, NTLMSSP_OID, | 606 | else if (compare_oid(oid, oidlen, NTLMSSP_OID, |
619 | NTLMSSP_OID_LEN)) | 607 | NTLMSSP_OID_LEN)) |
620 | use_ntlmssp = true; | 608 | server->sec_ntlmssp = true; |
621 | 609 | ||
622 | kfree(oid); | 610 | kfree(oid); |
623 | } | 611 | } |
624 | } else { | 612 | } else { |
625 | cFYI(1, ("Should be an oid what is going on?")); | 613 | cFYI(1, "Should be an oid what is going on?"); |
626 | } | 614 | } |
627 | } | 615 | } |
628 | 616 | ||
@@ -632,54 +620,47 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
632 | no mechListMic (e.g. NTLMSSP instead of KRB5) */ | 620 | no mechListMic (e.g. NTLMSSP instead of KRB5) */ |
633 | if (ctx.error == ASN1_ERR_DEC_EMPTY) | 621 | if (ctx.error == ASN1_ERR_DEC_EMPTY) |
634 | goto decode_negtoken_exit; | 622 | goto decode_negtoken_exit; |
635 | cFYI(1, ("Error decoding last part negTokenInit exit3")); | 623 | cFYI(1, "Error decoding last part negTokenInit exit3"); |
636 | return 0; | 624 | return 0; |
637 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | 625 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { |
638 | /* tag = 3 indicating mechListMIC */ | 626 | /* tag = 3 indicating mechListMIC */ |
639 | cFYI(1, ("Exit 4 cls = %d con = %d tag = %d end = %p (%d)", | 627 | cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", |
640 | cls, con, tag, end, *end)); | 628 | cls, con, tag, end, *end); |
641 | return 0; | 629 | return 0; |
642 | } | 630 | } |
643 | 631 | ||
644 | /* sequence */ | 632 | /* sequence */ |
645 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 633 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
646 | cFYI(1, ("Error decoding last part negTokenInit exit5")); | 634 | cFYI(1, "Error decoding last part negTokenInit exit5"); |
647 | return 0; | 635 | return 0; |
648 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | 636 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) |
649 | || (tag != ASN1_SEQ)) { | 637 | || (tag != ASN1_SEQ)) { |
650 | cFYI(1, ("cls = %d con = %d tag = %d end = %p (%d)", | 638 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", |
651 | cls, con, tag, end, *end)); | 639 | cls, con, tag, end, *end); |
652 | } | 640 | } |
653 | 641 | ||
654 | /* sequence of */ | 642 | /* sequence of */ |
655 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 643 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
656 | cFYI(1, ("Error decoding last part negTokenInit exit 7")); | 644 | cFYI(1, "Error decoding last part negTokenInit exit 7"); |
657 | return 0; | 645 | return 0; |
658 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | 646 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { |
659 | cFYI(1, ("Exit 8 cls = %d con = %d tag = %d end = %p (%d)", | 647 | cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", |
660 | cls, con, tag, end, *end)); | 648 | cls, con, tag, end, *end); |
661 | return 0; | 649 | return 0; |
662 | } | 650 | } |
663 | 651 | ||
664 | /* general string */ | 652 | /* general string */ |
665 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 653 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
666 | cFYI(1, ("Error decoding last part negTokenInit exit9")); | 654 | cFYI(1, "Error decoding last part negTokenInit exit9"); |
667 | return 0; | 655 | return 0; |
668 | } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) | 656 | } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) |
669 | || (tag != ASN1_GENSTR)) { | 657 | || (tag != ASN1_GENSTR)) { |
670 | cFYI(1, ("Exit10 cls = %d con = %d tag = %d end = %p (%d)", | 658 | cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", |
671 | cls, con, tag, end, *end)); | 659 | cls, con, tag, end, *end); |
672 | return 0; | 660 | return 0; |
673 | } | 661 | } |
674 | cFYI(1, ("Need to call asn1_octets_decode() function for %s", | 662 | cFYI(1, "Need to call asn1_octets_decode() function for %s", |
675 | ctx.pointer)); /* is this UTF-8 or ASCII? */ | 663 | ctx.pointer); /* is this UTF-8 or ASCII? */ |
676 | decode_negtoken_exit: | 664 | decode_negtoken_exit: |
677 | if (use_kerberos) | ||
678 | *secType = Kerberos; | ||
679 | else if (use_mskerberos) | ||
680 | *secType = MSKerberos; | ||
681 | else if (use_ntlmssp) | ||
682 | *secType = RawNTLMSSP; | ||
683 | |||
684 | return 1; | 665 | return 1; |
685 | } | 666 | } |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 42cec2a7c0cf..4fce6e61b34e 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -60,10 +60,10 @@ cifs_dump_mem(char *label, void *data, int length) | |||
60 | #ifdef CONFIG_CIFS_DEBUG2 | 60 | #ifdef CONFIG_CIFS_DEBUG2 |
61 | void cifs_dump_detail(struct smb_hdr *smb) | 61 | void cifs_dump_detail(struct smb_hdr *smb) |
62 | { | 62 | { |
63 | cERROR(1, ("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d", | 63 | cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d", |
64 | smb->Command, smb->Status.CifsError, | 64 | smb->Command, smb->Status.CifsError, |
65 | smb->Flags, smb->Flags2, smb->Mid, smb->Pid)); | 65 | smb->Flags, smb->Flags2, smb->Mid, smb->Pid); |
66 | cERROR(1, ("smb buf %p len %d", smb, smbCalcSize_LE(smb))); | 66 | cERROR(1, "smb buf %p len %d", smb, smbCalcSize_LE(smb)); |
67 | } | 67 | } |
68 | 68 | ||
69 | 69 | ||
@@ -75,25 +75,25 @@ void cifs_dump_mids(struct TCP_Server_Info *server) | |||
75 | if (server == NULL) | 75 | if (server == NULL) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | cERROR(1, ("Dump pending requests:")); | 78 | cERROR(1, "Dump pending requests:"); |
79 | spin_lock(&GlobalMid_Lock); | 79 | spin_lock(&GlobalMid_Lock); |
80 | list_for_each(tmp, &server->pending_mid_q) { | 80 | list_for_each(tmp, &server->pending_mid_q) { |
81 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 81 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
82 | cERROR(1, ("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d", | 82 | cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d", |
83 | mid_entry->midState, | 83 | mid_entry->midState, |
84 | (int)mid_entry->command, | 84 | (int)mid_entry->command, |
85 | mid_entry->pid, | 85 | mid_entry->pid, |
86 | mid_entry->tsk, | 86 | mid_entry->tsk, |
87 | mid_entry->mid)); | 87 | mid_entry->mid); |
88 | #ifdef CONFIG_CIFS_STATS2 | 88 | #ifdef CONFIG_CIFS_STATS2 |
89 | cERROR(1, ("IsLarge: %d buf: %p time rcv: %ld now: %ld", | 89 | cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld", |
90 | mid_entry->largeBuf, | 90 | mid_entry->largeBuf, |
91 | mid_entry->resp_buf, | 91 | mid_entry->resp_buf, |
92 | mid_entry->when_received, | 92 | mid_entry->when_received, |
93 | jiffies)); | 93 | jiffies); |
94 | #endif /* STATS2 */ | 94 | #endif /* STATS2 */ |
95 | cERROR(1, ("IsMult: %d IsEnd: %d", mid_entry->multiRsp, | 95 | cERROR(1, "IsMult: %d IsEnd: %d", mid_entry->multiRsp, |
96 | mid_entry->multiEnd)); | 96 | mid_entry->multiEnd); |
97 | if (mid_entry->resp_buf) { | 97 | if (mid_entry->resp_buf) { |
98 | cifs_dump_detail(mid_entry->resp_buf); | 98 | cifs_dump_detail(mid_entry->resp_buf); |
99 | cifs_dump_mem("existing buf: ", | 99 | cifs_dump_mem("existing buf: ", |
@@ -716,7 +716,7 @@ static const struct file_operations cifs_multiuser_mount_proc_fops = { | |||
716 | 716 | ||
717 | static int cifs_security_flags_proc_show(struct seq_file *m, void *v) | 717 | static int cifs_security_flags_proc_show(struct seq_file *m, void *v) |
718 | { | 718 | { |
719 | seq_printf(m, "0x%x\n", extended_security); | 719 | seq_printf(m, "0x%x\n", global_secflags); |
720 | return 0; | 720 | return 0; |
721 | } | 721 | } |
722 | 722 | ||
@@ -744,13 +744,13 @@ static ssize_t cifs_security_flags_proc_write(struct file *file, | |||
744 | /* single char or single char followed by null */ | 744 | /* single char or single char followed by null */ |
745 | c = flags_string[0]; | 745 | c = flags_string[0]; |
746 | if (c == '0' || c == 'n' || c == 'N') { | 746 | if (c == '0' || c == 'n' || c == 'N') { |
747 | extended_security = CIFSSEC_DEF; /* default */ | 747 | global_secflags = CIFSSEC_DEF; /* default */ |
748 | return count; | 748 | return count; |
749 | } else if (c == '1' || c == 'y' || c == 'Y') { | 749 | } else if (c == '1' || c == 'y' || c == 'Y') { |
750 | extended_security = CIFSSEC_MAX; | 750 | global_secflags = CIFSSEC_MAX; |
751 | return count; | 751 | return count; |
752 | } else if (!isdigit(c)) { | 752 | } else if (!isdigit(c)) { |
753 | cERROR(1, ("invalid flag %c", c)); | 753 | cERROR(1, "invalid flag %c", c); |
754 | return -EINVAL; | 754 | return -EINVAL; |
755 | } | 755 | } |
756 | } | 756 | } |
@@ -758,26 +758,26 @@ static ssize_t cifs_security_flags_proc_write(struct file *file, | |||
758 | 758 | ||
759 | flags = simple_strtoul(flags_string, NULL, 0); | 759 | flags = simple_strtoul(flags_string, NULL, 0); |
760 | 760 | ||
761 | cFYI(1, ("sec flags 0x%x", flags)); | 761 | cFYI(1, "sec flags 0x%x", flags); |
762 | 762 | ||
763 | if (flags <= 0) { | 763 | if (flags <= 0) { |
764 | cERROR(1, ("invalid security flags %s", flags_string)); | 764 | cERROR(1, "invalid security flags %s", flags_string); |
765 | return -EINVAL; | 765 | return -EINVAL; |
766 | } | 766 | } |
767 | 767 | ||
768 | if (flags & ~CIFSSEC_MASK) { | 768 | if (flags & ~CIFSSEC_MASK) { |
769 | cERROR(1, ("attempt to set unsupported security flags 0x%x", | 769 | cERROR(1, "attempt to set unsupported security flags 0x%x", |
770 | flags & ~CIFSSEC_MASK)); | 770 | flags & ~CIFSSEC_MASK); |
771 | return -EINVAL; | 771 | return -EINVAL; |
772 | } | 772 | } |
773 | /* flags look ok - update the global security flags for cifs module */ | 773 | /* flags look ok - update the global security flags for cifs module */ |
774 | extended_security = flags; | 774 | global_secflags = flags; |
775 | if (extended_security & CIFSSEC_MUST_SIGN) { | 775 | if (global_secflags & CIFSSEC_MUST_SIGN) { |
776 | /* requiring signing implies signing is allowed */ | 776 | /* requiring signing implies signing is allowed */ |
777 | extended_security |= CIFSSEC_MAY_SIGN; | 777 | global_secflags |= CIFSSEC_MAY_SIGN; |
778 | cFYI(1, ("packet signing now required")); | 778 | cFYI(1, "packet signing now required"); |
779 | } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) { | 779 | } else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) { |
780 | cFYI(1, ("packet signing disabled")); | 780 | cFYI(1, "packet signing disabled"); |
781 | } | 781 | } |
782 | /* BB should we turn on MAY flags for other MUST options? */ | 782 | /* BB should we turn on MAY flags for other MUST options? */ |
783 | return count; | 783 | return count; |
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index 5eb3b83bbfa7..aa316891ac0c 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h | |||
@@ -43,34 +43,54 @@ void dump_smb(struct smb_hdr *, int); | |||
43 | */ | 43 | */ |
44 | #ifdef CIFS_DEBUG | 44 | #ifdef CIFS_DEBUG |
45 | 45 | ||
46 | |||
47 | /* information message: e.g., configuration, major event */ | 46 | /* information message: e.g., configuration, major event */ |
48 | extern int cifsFYI; | 47 | extern int cifsFYI; |
49 | #define cifsfyi(format,arg...) if (cifsFYI & CIFS_INFO) printk(KERN_DEBUG " " __FILE__ ": " format "\n" "" , ## arg) | 48 | #define cifsfyi(fmt, arg...) \ |
49 | do { \ | ||
50 | if (cifsFYI & CIFS_INFO) \ | ||
51 | printk(KERN_DEBUG "%s: " fmt "\n", __FILE__, ##arg); \ | ||
52 | } while (0) | ||
50 | 53 | ||
51 | #define cFYI(button,prspec) if (button) cifsfyi prspec | 54 | #define cFYI(set, fmt, arg...) \ |
55 | do { \ | ||
56 | if (set) \ | ||
57 | cifsfyi(fmt, ##arg); \ | ||
58 | } while (0) | ||
52 | 59 | ||
53 | #define cifswarn(format, arg...) printk(KERN_WARNING ": " format "\n" , ## arg) | 60 | #define cifswarn(fmt, arg...) \ |
61 | printk(KERN_WARNING fmt "\n", ##arg) | ||
54 | 62 | ||
55 | /* debug event message: */ | 63 | /* debug event message: */ |
56 | extern int cifsERROR; | 64 | extern int cifsERROR; |
57 | 65 | ||
58 | #define cEVENT(format,arg...) if (cifsERROR) printk(KERN_EVENT __FILE__ ": " format "\n" , ## arg) | 66 | #define cEVENT(fmt, arg...) \ |
67 | do { \ | ||
68 | if (cifsERROR) \ | ||
69 | printk(KERN_EVENT "%s: " fmt "\n", __FILE__, ##arg); \ | ||
70 | } while (0) | ||
59 | 71 | ||
60 | /* error event message: e.g., i/o error */ | 72 | /* error event message: e.g., i/o error */ |
61 | #define cifserror(format,arg...) if (cifsERROR) printk(KERN_ERR " CIFS VFS: " format "\n" "" , ## arg) | 73 | #define cifserror(fmt, arg...) \ |
74 | do { \ | ||
75 | if (cifsERROR) \ | ||
76 | printk(KERN_ERR "CIFS VFS: " fmt "\n", ##arg); \ | ||
77 | } while (0) | ||
62 | 78 | ||
63 | #define cERROR(button, prspec) if (button) cifserror prspec | 79 | #define cERROR(set, fmt, arg...) \ |
80 | do { \ | ||
81 | if (set) \ | ||
82 | cifserror(fmt, ##arg); \ | ||
83 | } while (0) | ||
64 | 84 | ||
65 | /* | 85 | /* |
66 | * debug OFF | 86 | * debug OFF |
67 | * --------- | 87 | * --------- |
68 | */ | 88 | */ |
69 | #else /* _CIFS_DEBUG */ | 89 | #else /* _CIFS_DEBUG */ |
70 | #define cERROR(button, prspec) | 90 | #define cERROR(set, fmt, arg...) |
71 | #define cEVENT(format, arg...) | 91 | #define cEVENT(fmt, arg...) |
72 | #define cFYI(button, prspec) | 92 | #define cFYI(set, fmt, arg...) |
73 | #define cifserror(format, arg...) | 93 | #define cifserror(fmt, arg...) |
74 | #endif /* _CIFS_DEBUG */ | 94 | #endif /* _CIFS_DEBUG */ |
75 | 95 | ||
76 | #endif /* _H_CIFS_DEBUG */ | 96 | #endif /* _H_CIFS_DEBUG */ |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 78e4d2a3a68b..ac19a6f3dae0 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -85,8 +85,8 @@ static char *cifs_get_share_name(const char *node_name) | |||
85 | /* find server name end */ | 85 | /* find server name end */ |
86 | pSep = memchr(UNC+2, '\\', len-2); | 86 | pSep = memchr(UNC+2, '\\', len-2); |
87 | if (!pSep) { | 87 | if (!pSep) { |
88 | cERROR(1, ("%s: no server name end in node name: %s", | 88 | cERROR(1, "%s: no server name end in node name: %s", |
89 | __func__, node_name)); | 89 | __func__, node_name); |
90 | kfree(UNC); | 90 | kfree(UNC); |
91 | return ERR_PTR(-EINVAL); | 91 | return ERR_PTR(-EINVAL); |
92 | } | 92 | } |
@@ -142,8 +142,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, | |||
142 | 142 | ||
143 | rc = dns_resolve_server_name_to_ip(*devname, &srvIP); | 143 | rc = dns_resolve_server_name_to_ip(*devname, &srvIP); |
144 | if (rc != 0) { | 144 | if (rc != 0) { |
145 | cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", | 145 | cERROR(1, "%s: Failed to resolve server part of %s to IP: %d", |
146 | __func__, *devname, rc)); | 146 | __func__, *devname, rc); |
147 | goto compose_mount_options_err; | 147 | goto compose_mount_options_err; |
148 | } | 148 | } |
149 | /* md_len = strlen(...) + 12 for 'sep+prefixpath=' | 149 | /* md_len = strlen(...) + 12 for 'sep+prefixpath=' |
@@ -217,8 +217,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, | |||
217 | strcat(mountdata, fullpath + ref->path_consumed); | 217 | strcat(mountdata, fullpath + ref->path_consumed); |
218 | } | 218 | } |
219 | 219 | ||
220 | /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/ | 220 | /*cFYI(1, "%s: parent mountdata: %s", __func__,sb_mountdata);*/ |
221 | /*cFYI(1, ("%s: submount mountdata: %s", __func__, mountdata ));*/ | 221 | /*cFYI(1, "%s: submount mountdata: %s", __func__, mountdata );*/ |
222 | 222 | ||
223 | compose_mount_options_out: | 223 | compose_mount_options_out: |
224 | kfree(srvIP); | 224 | kfree(srvIP); |
@@ -294,11 +294,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd, | |||
294 | 294 | ||
295 | static void dump_referral(const struct dfs_info3_param *ref) | 295 | static void dump_referral(const struct dfs_info3_param *ref) |
296 | { | 296 | { |
297 | cFYI(1, ("DFS: ref path: %s", ref->path_name)); | 297 | cFYI(1, "DFS: ref path: %s", ref->path_name); |
298 | cFYI(1, ("DFS: node path: %s", ref->node_name)); | 298 | cFYI(1, "DFS: node path: %s", ref->node_name); |
299 | cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type)); | 299 | cFYI(1, "DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type); |
300 | cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag, | 300 | cFYI(1, "DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag, |
301 | ref->path_consumed)); | 301 | ref->path_consumed); |
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
@@ -314,7 +314,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
314 | int rc = 0; | 314 | int rc = 0; |
315 | struct vfsmount *mnt = ERR_PTR(-ENOENT); | 315 | struct vfsmount *mnt = ERR_PTR(-ENOENT); |
316 | 316 | ||
317 | cFYI(1, ("in %s", __func__)); | 317 | cFYI(1, "in %s", __func__); |
318 | BUG_ON(IS_ROOT(dentry)); | 318 | BUG_ON(IS_ROOT(dentry)); |
319 | 319 | ||
320 | xid = GetXid(); | 320 | xid = GetXid(); |
@@ -352,15 +352,15 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
352 | /* connect to a node */ | 352 | /* connect to a node */ |
353 | len = strlen(referrals[i].node_name); | 353 | len = strlen(referrals[i].node_name); |
354 | if (len < 2) { | 354 | if (len < 2) { |
355 | cERROR(1, ("%s: Net Address path too short: %s", | 355 | cERROR(1, "%s: Net Address path too short: %s", |
356 | __func__, referrals[i].node_name)); | 356 | __func__, referrals[i].node_name); |
357 | rc = -EINVAL; | 357 | rc = -EINVAL; |
358 | goto out_err; | 358 | goto out_err; |
359 | } | 359 | } |
360 | mnt = cifs_dfs_do_refmount(nd->path.mnt, | 360 | mnt = cifs_dfs_do_refmount(nd->path.mnt, |
361 | nd->path.dentry, referrals + i); | 361 | nd->path.dentry, referrals + i); |
362 | cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, | 362 | cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, |
363 | referrals[i].node_name, mnt)); | 363 | referrals[i].node_name, mnt); |
364 | 364 | ||
365 | /* complete mount procedure if we accured submount */ | 365 | /* complete mount procedure if we accured submount */ |
366 | if (!IS_ERR(mnt)) | 366 | if (!IS_ERR(mnt)) |
@@ -378,7 +378,7 @@ out: | |||
378 | FreeXid(xid); | 378 | FreeXid(xid); |
379 | free_dfs_info_array(referrals, num_referrals); | 379 | free_dfs_info_array(referrals, num_referrals); |
380 | kfree(full_path); | 380 | kfree(full_path); |
381 | cFYI(1, ("leaving %s" , __func__)); | 381 | cFYI(1, "leaving %s" , __func__); |
382 | return ERR_PTR(rc); | 382 | return ERR_PTR(rc); |
383 | out_err: | 383 | out_err: |
384 | path_put(&nd->path); | 384 | path_put(&nd->path); |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 310d12f69a92..379bd7d9c05f 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -133,9 +133,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
133 | dp = description + strlen(description); | 133 | dp = description + strlen(description); |
134 | 134 | ||
135 | /* for now, only sec=krb5 and sec=mskrb5 are valid */ | 135 | /* for now, only sec=krb5 and sec=mskrb5 are valid */ |
136 | if (server->secType == Kerberos) | 136 | if (server->sec_kerberos) |
137 | sprintf(dp, ";sec=krb5"); | 137 | sprintf(dp, ";sec=krb5"); |
138 | else if (server->secType == MSKerberos) | 138 | else if (server->sec_mskerberos) |
139 | sprintf(dp, ";sec=mskrb5"); | 139 | sprintf(dp, ";sec=mskrb5"); |
140 | else | 140 | else |
141 | goto out; | 141 | goto out; |
@@ -149,7 +149,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
149 | dp = description + strlen(description); | 149 | dp = description + strlen(description); |
150 | sprintf(dp, ";pid=0x%x", current->pid); | 150 | sprintf(dp, ";pid=0x%x", current->pid); |
151 | 151 | ||
152 | cFYI(1, ("key description = %s", description)); | 152 | cFYI(1, "key description = %s", description); |
153 | spnego_key = request_key(&cifs_spnego_key_type, description, ""); | 153 | spnego_key = request_key(&cifs_spnego_key_type, description, ""); |
154 | 154 | ||
155 | #ifdef CONFIG_CIFS_DEBUG2 | 155 | #ifdef CONFIG_CIFS_DEBUG2 |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index d07676bd76d2..430f510a1720 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -200,9 +200,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len, | |||
200 | /* works for 2.4.0 kernel or later */ | 200 | /* works for 2.4.0 kernel or later */ |
201 | charlen = codepage->char2uni(from, len, &wchar_to[i]); | 201 | charlen = codepage->char2uni(from, len, &wchar_to[i]); |
202 | if (charlen < 1) { | 202 | if (charlen < 1) { |
203 | cERROR(1, | 203 | cERROR(1, "strtoUCS: char2uni of %d returned %d", |
204 | ("strtoUCS: char2uni of %d returned %d", | 204 | (int)*from, charlen); |
205 | (int)*from, charlen)); | ||
206 | /* A question mark */ | 205 | /* A question mark */ |
207 | to[i] = cpu_to_le16(0x003f); | 206 | to[i] = cpu_to_le16(0x003f); |
208 | charlen = 1; | 207 | charlen = 1; |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 9b716d044bbd..85d7cf7ff2c8 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -87,11 +87,11 @@ int match_sid(struct cifs_sid *ctsid) | |||
87 | continue; /* all sub_auth values do not match */ | 87 | continue; /* all sub_auth values do not match */ |
88 | } | 88 | } |
89 | 89 | ||
90 | cFYI(1, ("matching sid: %s\n", wksidarr[i].sidname)); | 90 | cFYI(1, "matching sid: %s\n", wksidarr[i].sidname); |
91 | return 0; /* sids compare/match */ | 91 | return 0; /* sids compare/match */ |
92 | } | 92 | } |
93 | 93 | ||
94 | cFYI(1, ("No matching sid")); | 94 | cFYI(1, "No matching sid"); |
95 | return -1; | 95 | return -1; |
96 | } | 96 | } |
97 | 97 | ||
@@ -208,14 +208,14 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, | |||
208 | *pbits_to_set &= ~S_IXUGO; | 208 | *pbits_to_set &= ~S_IXUGO; |
209 | return; | 209 | return; |
210 | } else if (type != ACCESS_ALLOWED) { | 210 | } else if (type != ACCESS_ALLOWED) { |
211 | cERROR(1, ("unknown access control type %d", type)); | 211 | cERROR(1, "unknown access control type %d", type); |
212 | return; | 212 | return; |
213 | } | 213 | } |
214 | /* else ACCESS_ALLOWED type */ | 214 | /* else ACCESS_ALLOWED type */ |
215 | 215 | ||
216 | if (flags & GENERIC_ALL) { | 216 | if (flags & GENERIC_ALL) { |
217 | *pmode |= (S_IRWXUGO & (*pbits_to_set)); | 217 | *pmode |= (S_IRWXUGO & (*pbits_to_set)); |
218 | cFYI(DBG2, ("all perms")); | 218 | cFYI(DBG2, "all perms"); |
219 | return; | 219 | return; |
220 | } | 220 | } |
221 | if ((flags & GENERIC_WRITE) || | 221 | if ((flags & GENERIC_WRITE) || |
@@ -228,7 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, | |||
228 | ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) | 228 | ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) |
229 | *pmode |= (S_IXUGO & (*pbits_to_set)); | 229 | *pmode |= (S_IXUGO & (*pbits_to_set)); |
230 | 230 | ||
231 | cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode)); | 231 | cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode); |
232 | return; | 232 | return; |
233 | } | 233 | } |
234 | 234 | ||
@@ -257,7 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, | |||
257 | if (mode & S_IXUGO) | 257 | if (mode & S_IXUGO) |
258 | *pace_flags |= SET_FILE_EXEC_RIGHTS; | 258 | *pace_flags |= SET_FILE_EXEC_RIGHTS; |
259 | 259 | ||
260 | cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags)); | 260 | cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags); |
261 | return; | 261 | return; |
262 | } | 262 | } |
263 | 263 | ||
@@ -297,24 +297,24 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl) | |||
297 | /* validate that we do not go past end of acl */ | 297 | /* validate that we do not go past end of acl */ |
298 | 298 | ||
299 | if (le16_to_cpu(pace->size) < 16) { | 299 | if (le16_to_cpu(pace->size) < 16) { |
300 | cERROR(1, ("ACE too small, %d", le16_to_cpu(pace->size))); | 300 | cERROR(1, "ACE too small %d", le16_to_cpu(pace->size)); |
301 | return; | 301 | return; |
302 | } | 302 | } |
303 | 303 | ||
304 | if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) { | 304 | if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) { |
305 | cERROR(1, ("ACL too small to parse ACE")); | 305 | cERROR(1, "ACL too small to parse ACE"); |
306 | return; | 306 | return; |
307 | } | 307 | } |
308 | 308 | ||
309 | num_subauth = pace->sid.num_subauth; | 309 | num_subauth = pace->sid.num_subauth; |
310 | if (num_subauth) { | 310 | if (num_subauth) { |
311 | int i; | 311 | int i; |
312 | cFYI(1, ("ACE revision %d num_auth %d type %d flags %d size %d", | 312 | cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d", |
313 | pace->sid.revision, pace->sid.num_subauth, pace->type, | 313 | pace->sid.revision, pace->sid.num_subauth, pace->type, |
314 | pace->flags, le16_to_cpu(pace->size))); | 314 | pace->flags, le16_to_cpu(pace->size)); |
315 | for (i = 0; i < num_subauth; ++i) { | 315 | for (i = 0; i < num_subauth; ++i) { |
316 | cFYI(1, ("ACE sub_auth[%d]: 0x%x", i, | 316 | cFYI(1, "ACE sub_auth[%d]: 0x%x", i, |
317 | le32_to_cpu(pace->sid.sub_auth[i]))); | 317 | le32_to_cpu(pace->sid.sub_auth[i])); |
318 | } | 318 | } |
319 | 319 | ||
320 | /* BB add length check to make sure that we do not have huge | 320 | /* BB add length check to make sure that we do not have huge |
@@ -347,13 +347,13 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
347 | 347 | ||
348 | /* validate that we do not go past end of acl */ | 348 | /* validate that we do not go past end of acl */ |
349 | if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { | 349 | if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { |
350 | cERROR(1, ("ACL too small to parse DACL")); | 350 | cERROR(1, "ACL too small to parse DACL"); |
351 | return; | 351 | return; |
352 | } | 352 | } |
353 | 353 | ||
354 | cFYI(DBG2, ("DACL revision %d size %d num aces %d", | 354 | cFYI(DBG2, "DACL revision %d size %d num aces %d", |
355 | le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), | 355 | le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), |
356 | le32_to_cpu(pdacl->num_aces))); | 356 | le32_to_cpu(pdacl->num_aces)); |
357 | 357 | ||
358 | /* reset rwx permissions for user/group/other. | 358 | /* reset rwx permissions for user/group/other. |
359 | Also, if num_aces is 0 i.e. DACL has no ACEs, | 359 | Also, if num_aces is 0 i.e. DACL has no ACEs, |
@@ -437,25 +437,25 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl) | |||
437 | /* validate that we do not go past end of ACL - sid must be at least 8 | 437 | /* validate that we do not go past end of ACL - sid must be at least 8 |
438 | bytes long (assuming no sub-auths - e.g. the null SID */ | 438 | bytes long (assuming no sub-auths - e.g. the null SID */ |
439 | if (end_of_acl < (char *)psid + 8) { | 439 | if (end_of_acl < (char *)psid + 8) { |
440 | cERROR(1, ("ACL too small to parse SID %p", psid)); | 440 | cERROR(1, "ACL too small to parse SID %p", psid); |
441 | return -EINVAL; | 441 | return -EINVAL; |
442 | } | 442 | } |
443 | 443 | ||
444 | if (psid->num_subauth) { | 444 | if (psid->num_subauth) { |
445 | #ifdef CONFIG_CIFS_DEBUG2 | 445 | #ifdef CONFIG_CIFS_DEBUG2 |
446 | int i; | 446 | int i; |
447 | cFYI(1, ("SID revision %d num_auth %d", | 447 | cFYI(1, "SID revision %d num_auth %d", |
448 | psid->revision, psid->num_subauth)); | 448 | psid->revision, psid->num_subauth); |
449 | 449 | ||
450 | for (i = 0; i < psid->num_subauth; i++) { | 450 | for (i = 0; i < psid->num_subauth; i++) { |
451 | cFYI(1, ("SID sub_auth[%d]: 0x%x ", i, | 451 | cFYI(1, "SID sub_auth[%d]: 0x%x ", i, |
452 | le32_to_cpu(psid->sub_auth[i]))); | 452 | le32_to_cpu(psid->sub_auth[i])); |
453 | } | 453 | } |
454 | 454 | ||
455 | /* BB add length check to make sure that we do not have huge | 455 | /* BB add length check to make sure that we do not have huge |
456 | num auths and therefore go off the end */ | 456 | num auths and therefore go off the end */ |
457 | cFYI(1, ("RID 0x%x", | 457 | cFYI(1, "RID 0x%x", |
458 | le32_to_cpu(psid->sub_auth[psid->num_subauth-1]))); | 458 | le32_to_cpu(psid->sub_auth[psid->num_subauth-1])); |
459 | #endif | 459 | #endif |
460 | } | 460 | } |
461 | 461 | ||
@@ -482,11 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, | |||
482 | le32_to_cpu(pntsd->gsidoffset)); | 482 | le32_to_cpu(pntsd->gsidoffset)); |
483 | dacloffset = le32_to_cpu(pntsd->dacloffset); | 483 | dacloffset = le32_to_cpu(pntsd->dacloffset); |
484 | dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); | 484 | dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); |
485 | cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x " | 485 | cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x " |
486 | "sacloffset 0x%x dacloffset 0x%x", | 486 | "sacloffset 0x%x dacloffset 0x%x", |
487 | pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), | 487 | pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), |
488 | le32_to_cpu(pntsd->gsidoffset), | 488 | le32_to_cpu(pntsd->gsidoffset), |
489 | le32_to_cpu(pntsd->sacloffset), dacloffset)); | 489 | le32_to_cpu(pntsd->sacloffset), dacloffset); |
490 | /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ | 490 | /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ |
491 | rc = parse_sid(owner_sid_ptr, end_of_acl); | 491 | rc = parse_sid(owner_sid_ptr, end_of_acl); |
492 | if (rc) | 492 | if (rc) |
@@ -500,7 +500,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, | |||
500 | parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, | 500 | parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, |
501 | group_sid_ptr, fattr); | 501 | group_sid_ptr, fattr); |
502 | else | 502 | else |
503 | cFYI(1, ("no ACL")); /* BB grant all or default perms? */ | 503 | cFYI(1, "no ACL"); /* BB grant all or default perms? */ |
504 | 504 | ||
505 | /* cifscred->uid = owner_sid_ptr->rid; | 505 | /* cifscred->uid = owner_sid_ptr->rid; |
506 | cifscred->gid = group_sid_ptr->rid; | 506 | cifscred->gid = group_sid_ptr->rid; |
@@ -563,7 +563,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, | |||
563 | FreeXid(xid); | 563 | FreeXid(xid); |
564 | 564 | ||
565 | 565 | ||
566 | cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen)); | 566 | cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); |
567 | return pntsd; | 567 | return pntsd; |
568 | } | 568 | } |
569 | 569 | ||
@@ -581,12 +581,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, | |||
581 | &fid, &oplock, NULL, cifs_sb->local_nls, | 581 | &fid, &oplock, NULL, cifs_sb->local_nls, |
582 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 582 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
583 | if (rc) { | 583 | if (rc) { |
584 | cERROR(1, ("Unable to open file to get ACL")); | 584 | cERROR(1, "Unable to open file to get ACL"); |
585 | goto out; | 585 | goto out; |
586 | } | 586 | } |
587 | 587 | ||
588 | rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen); | 588 | rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen); |
589 | cFYI(1, ("GetCIFSACL rc = %d ACL len %d", rc, *pacllen)); | 589 | cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); |
590 | 590 | ||
591 | CIFSSMBClose(xid, cifs_sb->tcon, fid); | 591 | CIFSSMBClose(xid, cifs_sb->tcon, fid); |
592 | out: | 592 | out: |
@@ -621,7 +621,7 @@ static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid, | |||
621 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); | 621 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); |
622 | FreeXid(xid); | 622 | FreeXid(xid); |
623 | 623 | ||
624 | cFYI(DBG2, ("SetCIFSACL rc = %d", rc)); | 624 | cFYI(DBG2, "SetCIFSACL rc = %d", rc); |
625 | return rc; | 625 | return rc; |
626 | } | 626 | } |
627 | 627 | ||
@@ -638,12 +638,12 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, | |||
638 | &fid, &oplock, NULL, cifs_sb->local_nls, | 638 | &fid, &oplock, NULL, cifs_sb->local_nls, |
639 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 639 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
640 | if (rc) { | 640 | if (rc) { |
641 | cERROR(1, ("Unable to open file to set ACL")); | 641 | cERROR(1, "Unable to open file to set ACL"); |
642 | goto out; | 642 | goto out; |
643 | } | 643 | } |
644 | 644 | ||
645 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); | 645 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); |
646 | cFYI(DBG2, ("SetCIFSACL rc = %d", rc)); | 646 | cFYI(DBG2, "SetCIFSACL rc = %d", rc); |
647 | 647 | ||
648 | CIFSSMBClose(xid, cifs_sb->tcon, fid); | 648 | CIFSSMBClose(xid, cifs_sb->tcon, fid); |
649 | out: | 649 | out: |
@@ -659,7 +659,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, | |||
659 | struct cifsFileInfo *open_file; | 659 | struct cifsFileInfo *open_file; |
660 | int rc; | 660 | int rc; |
661 | 661 | ||
662 | cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode)); | 662 | cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); |
663 | 663 | ||
664 | open_file = find_readable_file(CIFS_I(inode)); | 664 | open_file = find_readable_file(CIFS_I(inode)); |
665 | if (!open_file) | 665 | if (!open_file) |
@@ -679,7 +679,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | |||
679 | u32 acllen = 0; | 679 | u32 acllen = 0; |
680 | int rc = 0; | 680 | int rc = 0; |
681 | 681 | ||
682 | cFYI(DBG2, ("converting ACL to mode for %s", path)); | 682 | cFYI(DBG2, "converting ACL to mode for %s", path); |
683 | 683 | ||
684 | if (pfid) | 684 | if (pfid) |
685 | pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); | 685 | pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); |
@@ -690,7 +690,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | |||
690 | if (pntsd) | 690 | if (pntsd) |
691 | rc = parse_sec_desc(pntsd, acllen, fattr); | 691 | rc = parse_sec_desc(pntsd, acllen, fattr); |
692 | if (rc) | 692 | if (rc) |
693 | cFYI(1, ("parse sec desc failed rc = %d", rc)); | 693 | cFYI(1, "parse sec desc failed rc = %d", rc); |
694 | 694 | ||
695 | kfree(pntsd); | 695 | kfree(pntsd); |
696 | return; | 696 | return; |
@@ -704,7 +704,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) | |||
704 | struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ | 704 | struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ |
705 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ | 705 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ |
706 | 706 | ||
707 | cFYI(DBG2, ("set ACL from mode for %s", path)); | 707 | cFYI(DBG2, "set ACL from mode for %s", path); |
708 | 708 | ||
709 | /* Get the security descriptor */ | 709 | /* Get the security descriptor */ |
710 | pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen); | 710 | pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen); |
@@ -721,19 +721,19 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) | |||
721 | DEFSECDESCLEN : secdesclen; | 721 | DEFSECDESCLEN : secdesclen; |
722 | pnntsd = kmalloc(secdesclen, GFP_KERNEL); | 722 | pnntsd = kmalloc(secdesclen, GFP_KERNEL); |
723 | if (!pnntsd) { | 723 | if (!pnntsd) { |
724 | cERROR(1, ("Unable to allocate security descriptor")); | 724 | cERROR(1, "Unable to allocate security descriptor"); |
725 | kfree(pntsd); | 725 | kfree(pntsd); |
726 | return -ENOMEM; | 726 | return -ENOMEM; |
727 | } | 727 | } |
728 | 728 | ||
729 | rc = build_sec_desc(pntsd, pnntsd, inode, nmode); | 729 | rc = build_sec_desc(pntsd, pnntsd, inode, nmode); |
730 | 730 | ||
731 | cFYI(DBG2, ("build_sec_desc rc: %d", rc)); | 731 | cFYI(DBG2, "build_sec_desc rc: %d", rc); |
732 | 732 | ||
733 | if (!rc) { | 733 | if (!rc) { |
734 | /* Set the security descriptor */ | 734 | /* Set the security descriptor */ |
735 | rc = set_cifs_acl(pnntsd, secdesclen, inode, path); | 735 | rc = set_cifs_acl(pnntsd, secdesclen, inode, path); |
736 | cFYI(DBG2, ("set_cifs_acl rc: %d", rc)); | 736 | cFYI(DBG2, "set_cifs_acl rc: %d", rc); |
737 | } | 737 | } |
738 | 738 | ||
739 | kfree(pnntsd); | 739 | kfree(pnntsd); |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index fbe986430d0c..847628dfdc44 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -103,7 +103,7 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, | |||
103 | if (iov[i].iov_len == 0) | 103 | if (iov[i].iov_len == 0) |
104 | continue; | 104 | continue; |
105 | if (iov[i].iov_base == NULL) { | 105 | if (iov[i].iov_base == NULL) { |
106 | cERROR(1, ("null iovec entry")); | 106 | cERROR(1, "null iovec entry"); |
107 | return -EIO; | 107 | return -EIO; |
108 | } | 108 | } |
109 | /* The first entry includes a length field (which does not get | 109 | /* The first entry includes a length field (which does not get |
@@ -181,8 +181,8 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
181 | 181 | ||
182 | /* Do not need to verify session setups with signature "BSRSPYL " */ | 182 | /* Do not need to verify session setups with signature "BSRSPYL " */ |
183 | if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0) | 183 | if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0) |
184 | cFYI(1, ("dummy signature received for smb command 0x%x", | 184 | cFYI(1, "dummy signature received for smb command 0x%x", |
185 | cifs_pdu->Command)); | 185 | cifs_pdu->Command); |
186 | 186 | ||
187 | /* save off the origiginal signature so we can modify the smb and check | 187 | /* save off the origiginal signature so we can modify the smb and check |
188 | its signature against what the server sent */ | 188 | its signature against what the server sent */ |
@@ -291,7 +291,7 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, | |||
291 | if (password) | 291 | if (password) |
292 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); | 292 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); |
293 | 293 | ||
294 | if (!encrypt && extended_security & CIFSSEC_MAY_PLNTXT) { | 294 | if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) { |
295 | memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE); | 295 | memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE); |
296 | memcpy(lnm_session_key, password_with_pad, | 296 | memcpy(lnm_session_key, password_with_pad, |
297 | CIFS_ENCPWD_SIZE); | 297 | CIFS_ENCPWD_SIZE); |
@@ -398,7 +398,7 @@ void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, | |||
398 | /* calculate buf->ntlmv2_hash */ | 398 | /* calculate buf->ntlmv2_hash */ |
399 | rc = calc_ntlmv2_hash(ses, nls_cp); | 399 | rc = calc_ntlmv2_hash(ses, nls_cp); |
400 | if (rc) | 400 | if (rc) |
401 | cERROR(1, ("could not get v2 hash rc %d", rc)); | 401 | cERROR(1, "could not get v2 hash rc %d", rc); |
402 | CalcNTLMv2_response(ses, resp_buf); | 402 | CalcNTLMv2_response(ses, resp_buf); |
403 | 403 | ||
404 | /* now calculate the MAC key for NTLMv2 */ | 404 | /* now calculate the MAC key for NTLMv2 */ |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ad235d604a0b..78c02eb4cb1f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -49,10 +49,6 @@ | |||
49 | #include "cifs_spnego.h" | 49 | #include "cifs_spnego.h" |
50 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ | 50 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ |
51 | 51 | ||
52 | #ifdef CONFIG_CIFS_QUOTA | ||
53 | static const struct quotactl_ops cifs_quotactl_ops; | ||
54 | #endif /* QUOTA */ | ||
55 | |||
56 | int cifsFYI = 0; | 52 | int cifsFYI = 0; |
57 | int cifsERROR = 1; | 53 | int cifsERROR = 1; |
58 | int traceSMB = 0; | 54 | int traceSMB = 0; |
@@ -61,7 +57,7 @@ unsigned int experimEnabled = 0; | |||
61 | unsigned int linuxExtEnabled = 1; | 57 | unsigned int linuxExtEnabled = 1; |
62 | unsigned int lookupCacheEnabled = 1; | 58 | unsigned int lookupCacheEnabled = 1; |
63 | unsigned int multiuser_mount = 0; | 59 | unsigned int multiuser_mount = 0; |
64 | unsigned int extended_security = CIFSSEC_DEF; | 60 | unsigned int global_secflags = CIFSSEC_DEF; |
65 | /* unsigned int ntlmv2_support = 0; */ | 61 | /* unsigned int ntlmv2_support = 0; */ |
66 | unsigned int sign_CIFS_PDUs = 1; | 62 | unsigned int sign_CIFS_PDUs = 1; |
67 | static const struct super_operations cifs_super_ops; | 63 | static const struct super_operations cifs_super_ops; |
@@ -86,8 +82,6 @@ extern mempool_t *cifs_sm_req_poolp; | |||
86 | extern mempool_t *cifs_req_poolp; | 82 | extern mempool_t *cifs_req_poolp; |
87 | extern mempool_t *cifs_mid_poolp; | 83 | extern mempool_t *cifs_mid_poolp; |
88 | 84 | ||
89 | extern struct kmem_cache *cifs_oplock_cachep; | ||
90 | |||
91 | static int | 85 | static int |
92 | cifs_read_super(struct super_block *sb, void *data, | 86 | cifs_read_super(struct super_block *sb, void *data, |
93 | const char *devname, int silent) | 87 | const char *devname, int silent) |
@@ -135,8 +129,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
135 | 129 | ||
136 | if (rc) { | 130 | if (rc) { |
137 | if (!silent) | 131 | if (!silent) |
138 | cERROR(1, | 132 | cERROR(1, "cifs_mount failed w/return code = %d", rc); |
139 | ("cifs_mount failed w/return code = %d", rc)); | ||
140 | goto out_mount_failed; | 133 | goto out_mount_failed; |
141 | } | 134 | } |
142 | 135 | ||
@@ -146,9 +139,6 @@ cifs_read_super(struct super_block *sb, void *data, | |||
146 | /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | 139 | /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) |
147 | sb->s_blocksize = | 140 | sb->s_blocksize = |
148 | cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | 141 | cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ |
149 | #ifdef CONFIG_CIFS_QUOTA | ||
150 | sb->s_qcop = &cifs_quotactl_ops; | ||
151 | #endif | ||
152 | sb->s_blocksize = CIFS_MAX_MSGSIZE; | 142 | sb->s_blocksize = CIFS_MAX_MSGSIZE; |
153 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ | 143 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ |
154 | inode = cifs_root_iget(sb, ROOT_I); | 144 | inode = cifs_root_iget(sb, ROOT_I); |
@@ -168,7 +158,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
168 | 158 | ||
169 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 159 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
170 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 160 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
171 | cFYI(1, ("export ops supported")); | 161 | cFYI(1, "export ops supported"); |
172 | sb->s_export_op = &cifs_export_ops; | 162 | sb->s_export_op = &cifs_export_ops; |
173 | } | 163 | } |
174 | #endif /* EXPERIMENTAL */ | 164 | #endif /* EXPERIMENTAL */ |
@@ -176,7 +166,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
176 | return 0; | 166 | return 0; |
177 | 167 | ||
178 | out_no_root: | 168 | out_no_root: |
179 | cERROR(1, ("cifs_read_super: get root inode failed")); | 169 | cERROR(1, "cifs_read_super: get root inode failed"); |
180 | if (inode) | 170 | if (inode) |
181 | iput(inode); | 171 | iput(inode); |
182 | 172 | ||
@@ -203,10 +193,10 @@ cifs_put_super(struct super_block *sb) | |||
203 | int rc = 0; | 193 | int rc = 0; |
204 | struct cifs_sb_info *cifs_sb; | 194 | struct cifs_sb_info *cifs_sb; |
205 | 195 | ||
206 | cFYI(1, ("In cifs_put_super")); | 196 | cFYI(1, "In cifs_put_super"); |
207 | cifs_sb = CIFS_SB(sb); | 197 | cifs_sb = CIFS_SB(sb); |
208 | if (cifs_sb == NULL) { | 198 | if (cifs_sb == NULL) { |
209 | cFYI(1, ("Empty cifs superblock info passed to unmount")); | 199 | cFYI(1, "Empty cifs superblock info passed to unmount"); |
210 | return; | 200 | return; |
211 | } | 201 | } |
212 | 202 | ||
@@ -214,7 +204,7 @@ cifs_put_super(struct super_block *sb) | |||
214 | 204 | ||
215 | rc = cifs_umount(sb, cifs_sb); | 205 | rc = cifs_umount(sb, cifs_sb); |
216 | if (rc) | 206 | if (rc) |
217 | cERROR(1, ("cifs_umount failed with return code %d", rc)); | 207 | cERROR(1, "cifs_umount failed with return code %d", rc); |
218 | #ifdef CONFIG_CIFS_DFS_UPCALL | 208 | #ifdef CONFIG_CIFS_DFS_UPCALL |
219 | if (cifs_sb->mountdata) { | 209 | if (cifs_sb->mountdata) { |
220 | kfree(cifs_sb->mountdata); | 210 | kfree(cifs_sb->mountdata); |
@@ -300,7 +290,6 @@ static int cifs_permission(struct inode *inode, int mask) | |||
300 | static struct kmem_cache *cifs_inode_cachep; | 290 | static struct kmem_cache *cifs_inode_cachep; |
301 | static struct kmem_cache *cifs_req_cachep; | 291 | static struct kmem_cache *cifs_req_cachep; |
302 | static struct kmem_cache *cifs_mid_cachep; | 292 | static struct kmem_cache *cifs_mid_cachep; |
303 | struct kmem_cache *cifs_oplock_cachep; | ||
304 | static struct kmem_cache *cifs_sm_req_cachep; | 293 | static struct kmem_cache *cifs_sm_req_cachep; |
305 | mempool_t *cifs_sm_req_poolp; | 294 | mempool_t *cifs_sm_req_poolp; |
306 | mempool_t *cifs_req_poolp; | 295 | mempool_t *cifs_req_poolp; |
@@ -432,106 +421,6 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
432 | return 0; | 421 | return 0; |
433 | } | 422 | } |
434 | 423 | ||
435 | #ifdef CONFIG_CIFS_QUOTA | ||
436 | int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid, | ||
437 | struct fs_disk_quota *pdquota) | ||
438 | { | ||
439 | int xid; | ||
440 | int rc = 0; | ||
441 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
442 | struct cifsTconInfo *pTcon; | ||
443 | |||
444 | if (cifs_sb) | ||
445 | pTcon = cifs_sb->tcon; | ||
446 | else | ||
447 | return -EIO; | ||
448 | |||
449 | |||
450 | xid = GetXid(); | ||
451 | if (pTcon) { | ||
452 | cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); | ||
453 | } else | ||
454 | rc = -EIO; | ||
455 | |||
456 | FreeXid(xid); | ||
457 | return rc; | ||
458 | } | ||
459 | |||
460 | int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid, | ||
461 | struct fs_disk_quota *pdquota) | ||
462 | { | ||
463 | int xid; | ||
464 | int rc = 0; | ||
465 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
466 | struct cifsTconInfo *pTcon; | ||
467 | |||
468 | if (cifs_sb) | ||
469 | pTcon = cifs_sb->tcon; | ||
470 | else | ||
471 | return -EIO; | ||
472 | |||
473 | xid = GetXid(); | ||
474 | if (pTcon) { | ||
475 | cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); | ||
476 | } else | ||
477 | rc = -EIO; | ||
478 | |||
479 | FreeXid(xid); | ||
480 | return rc; | ||
481 | } | ||
482 | |||
483 | int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation) | ||
484 | { | ||
485 | int xid; | ||
486 | int rc = 0; | ||
487 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
488 | struct cifsTconInfo *pTcon; | ||
489 | |||
490 | if (cifs_sb) | ||
491 | pTcon = cifs_sb->tcon; | ||
492 | else | ||
493 | return -EIO; | ||
494 | |||
495 | xid = GetXid(); | ||
496 | if (pTcon) { | ||
497 | cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation)); | ||
498 | } else | ||
499 | rc = -EIO; | ||
500 | |||
501 | FreeXid(xid); | ||
502 | return rc; | ||
503 | } | ||
504 | |||
505 | int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) | ||
506 | { | ||
507 | int xid; | ||
508 | int rc = 0; | ||
509 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
510 | struct cifsTconInfo *pTcon; | ||
511 | |||
512 | if (cifs_sb) | ||
513 | pTcon = cifs_sb->tcon; | ||
514 | else | ||
515 | return -EIO; | ||
516 | |||
517 | xid = GetXid(); | ||
518 | if (pTcon) { | ||
519 | cFYI(1, ("pqstats %p", qstats)); | ||
520 | } else | ||
521 | rc = -EIO; | ||
522 | |||
523 | FreeXid(xid); | ||
524 | return rc; | ||
525 | } | ||
526 | |||
527 | static const struct quotactl_ops cifs_quotactl_ops = { | ||
528 | .set_xquota = cifs_xquota_set, | ||
529 | .get_xquota = cifs_xquota_get, | ||
530 | .set_xstate = cifs_xstate_set, | ||
531 | .get_xstate = cifs_xstate_get, | ||
532 | }; | ||
533 | #endif | ||
534 | |||
535 | static void cifs_umount_begin(struct super_block *sb) | 424 | static void cifs_umount_begin(struct super_block *sb) |
536 | { | 425 | { |
537 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 426 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
@@ -558,7 +447,7 @@ static void cifs_umount_begin(struct super_block *sb) | |||
558 | /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ | 447 | /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ |
559 | /* cancel_notify_requests(tcon); */ | 448 | /* cancel_notify_requests(tcon); */ |
560 | if (tcon->ses && tcon->ses->server) { | 449 | if (tcon->ses && tcon->ses->server) { |
561 | cFYI(1, ("wake up tasks now - umount begin not complete")); | 450 | cFYI(1, "wake up tasks now - umount begin not complete"); |
562 | wake_up_all(&tcon->ses->server->request_q); | 451 | wake_up_all(&tcon->ses->server->request_q); |
563 | wake_up_all(&tcon->ses->server->response_q); | 452 | wake_up_all(&tcon->ses->server->response_q); |
564 | msleep(1); /* yield */ | 453 | msleep(1); /* yield */ |
@@ -609,7 +498,7 @@ cifs_get_sb(struct file_system_type *fs_type, | |||
609 | int rc; | 498 | int rc; |
610 | struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL); | 499 | struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL); |
611 | 500 | ||
612 | cFYI(1, ("Devname: %s flags: %d ", dev_name, flags)); | 501 | cFYI(1, "Devname: %s flags: %d ", dev_name, flags); |
613 | 502 | ||
614 | if (IS_ERR(sb)) | 503 | if (IS_ERR(sb)) |
615 | return PTR_ERR(sb); | 504 | return PTR_ERR(sb); |
@@ -656,7 +545,6 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) | |||
656 | return generic_file_llseek_unlocked(file, offset, origin); | 545 | return generic_file_llseek_unlocked(file, offset, origin); |
657 | } | 546 | } |
658 | 547 | ||
659 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
660 | static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) | 548 | static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) |
661 | { | 549 | { |
662 | /* note that this is called by vfs setlease with the BKL held | 550 | /* note that this is called by vfs setlease with the BKL held |
@@ -685,7 +573,6 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) | |||
685 | else | 573 | else |
686 | return -EAGAIN; | 574 | return -EAGAIN; |
687 | } | 575 | } |
688 | #endif | ||
689 | 576 | ||
690 | struct file_system_type cifs_fs_type = { | 577 | struct file_system_type cifs_fs_type = { |
691 | .owner = THIS_MODULE, | 578 | .owner = THIS_MODULE, |
@@ -762,10 +649,7 @@ const struct file_operations cifs_file_ops = { | |||
762 | #ifdef CONFIG_CIFS_POSIX | 649 | #ifdef CONFIG_CIFS_POSIX |
763 | .unlocked_ioctl = cifs_ioctl, | 650 | .unlocked_ioctl = cifs_ioctl, |
764 | #endif /* CONFIG_CIFS_POSIX */ | 651 | #endif /* CONFIG_CIFS_POSIX */ |
765 | |||
766 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
767 | .setlease = cifs_setlease, | 652 | .setlease = cifs_setlease, |
768 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
769 | }; | 653 | }; |
770 | 654 | ||
771 | const struct file_operations cifs_file_direct_ops = { | 655 | const struct file_operations cifs_file_direct_ops = { |
@@ -784,9 +668,7 @@ const struct file_operations cifs_file_direct_ops = { | |||
784 | .unlocked_ioctl = cifs_ioctl, | 668 | .unlocked_ioctl = cifs_ioctl, |
785 | #endif /* CONFIG_CIFS_POSIX */ | 669 | #endif /* CONFIG_CIFS_POSIX */ |
786 | .llseek = cifs_llseek, | 670 | .llseek = cifs_llseek, |
787 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
788 | .setlease = cifs_setlease, | 671 | .setlease = cifs_setlease, |
789 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
790 | }; | 672 | }; |
791 | const struct file_operations cifs_file_nobrl_ops = { | 673 | const struct file_operations cifs_file_nobrl_ops = { |
792 | .read = do_sync_read, | 674 | .read = do_sync_read, |
@@ -803,10 +685,7 @@ const struct file_operations cifs_file_nobrl_ops = { | |||
803 | #ifdef CONFIG_CIFS_POSIX | 685 | #ifdef CONFIG_CIFS_POSIX |
804 | .unlocked_ioctl = cifs_ioctl, | 686 | .unlocked_ioctl = cifs_ioctl, |
805 | #endif /* CONFIG_CIFS_POSIX */ | 687 | #endif /* CONFIG_CIFS_POSIX */ |
806 | |||
807 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
808 | .setlease = cifs_setlease, | 688 | .setlease = cifs_setlease, |
809 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
810 | }; | 689 | }; |
811 | 690 | ||
812 | const struct file_operations cifs_file_direct_nobrl_ops = { | 691 | const struct file_operations cifs_file_direct_nobrl_ops = { |
@@ -824,9 +703,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { | |||
824 | .unlocked_ioctl = cifs_ioctl, | 703 | .unlocked_ioctl = cifs_ioctl, |
825 | #endif /* CONFIG_CIFS_POSIX */ | 704 | #endif /* CONFIG_CIFS_POSIX */ |
826 | .llseek = cifs_llseek, | 705 | .llseek = cifs_llseek, |
827 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
828 | .setlease = cifs_setlease, | 706 | .setlease = cifs_setlease, |
829 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
830 | }; | 707 | }; |
831 | 708 | ||
832 | const struct file_operations cifs_dir_ops = { | 709 | const struct file_operations cifs_dir_ops = { |
@@ -878,7 +755,7 @@ cifs_init_request_bufs(void) | |||
878 | } else { | 755 | } else { |
879 | CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ | 756 | CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ |
880 | } | 757 | } |
881 | /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */ | 758 | /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */ |
882 | cifs_req_cachep = kmem_cache_create("cifs_request", | 759 | cifs_req_cachep = kmem_cache_create("cifs_request", |
883 | CIFSMaxBufSize + | 760 | CIFSMaxBufSize + |
884 | MAX_CIFS_HDR_SIZE, 0, | 761 | MAX_CIFS_HDR_SIZE, 0, |
@@ -890,7 +767,7 @@ cifs_init_request_bufs(void) | |||
890 | cifs_min_rcv = 1; | 767 | cifs_min_rcv = 1; |
891 | else if (cifs_min_rcv > 64) { | 768 | else if (cifs_min_rcv > 64) { |
892 | cifs_min_rcv = 64; | 769 | cifs_min_rcv = 64; |
893 | cERROR(1, ("cifs_min_rcv set to maximum (64)")); | 770 | cERROR(1, "cifs_min_rcv set to maximum (64)"); |
894 | } | 771 | } |
895 | 772 | ||
896 | cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, | 773 | cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, |
@@ -921,7 +798,7 @@ cifs_init_request_bufs(void) | |||
921 | cifs_min_small = 2; | 798 | cifs_min_small = 2; |
922 | else if (cifs_min_small > 256) { | 799 | else if (cifs_min_small > 256) { |
923 | cifs_min_small = 256; | 800 | cifs_min_small = 256; |
924 | cFYI(1, ("cifs_min_small set to maximum (256)")); | 801 | cFYI(1, "cifs_min_small set to maximum (256)"); |
925 | } | 802 | } |
926 | 803 | ||
927 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, | 804 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, |
@@ -962,15 +839,6 @@ cifs_init_mids(void) | |||
962 | return -ENOMEM; | 839 | return -ENOMEM; |
963 | } | 840 | } |
964 | 841 | ||
965 | cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", | ||
966 | sizeof(struct oplock_q_entry), 0, | ||
967 | SLAB_HWCACHE_ALIGN, NULL); | ||
968 | if (cifs_oplock_cachep == NULL) { | ||
969 | mempool_destroy(cifs_mid_poolp); | ||
970 | kmem_cache_destroy(cifs_mid_cachep); | ||
971 | return -ENOMEM; | ||
972 | } | ||
973 | |||
974 | return 0; | 842 | return 0; |
975 | } | 843 | } |
976 | 844 | ||
@@ -979,7 +847,6 @@ cifs_destroy_mids(void) | |||
979 | { | 847 | { |
980 | mempool_destroy(cifs_mid_poolp); | 848 | mempool_destroy(cifs_mid_poolp); |
981 | kmem_cache_destroy(cifs_mid_cachep); | 849 | kmem_cache_destroy(cifs_mid_cachep); |
982 | kmem_cache_destroy(cifs_oplock_cachep); | ||
983 | } | 850 | } |
984 | 851 | ||
985 | static int __init | 852 | static int __init |
@@ -1019,10 +886,10 @@ init_cifs(void) | |||
1019 | 886 | ||
1020 | if (cifs_max_pending < 2) { | 887 | if (cifs_max_pending < 2) { |
1021 | cifs_max_pending = 2; | 888 | cifs_max_pending = 2; |
1022 | cFYI(1, ("cifs_max_pending set to min of 2")); | 889 | cFYI(1, "cifs_max_pending set to min of 2"); |
1023 | } else if (cifs_max_pending > 256) { | 890 | } else if (cifs_max_pending > 256) { |
1024 | cifs_max_pending = 256; | 891 | cifs_max_pending = 256; |
1025 | cFYI(1, ("cifs_max_pending set to max of 256")); | 892 | cFYI(1, "cifs_max_pending set to max of 256"); |
1026 | } | 893 | } |
1027 | 894 | ||
1028 | rc = cifs_init_inodecache(); | 895 | rc = cifs_init_inodecache(); |
@@ -1080,7 +947,7 @@ init_cifs(void) | |||
1080 | static void __exit | 947 | static void __exit |
1081 | exit_cifs(void) | 948 | exit_cifs(void) |
1082 | { | 949 | { |
1083 | cFYI(DBG2, ("exit_cifs")); | 950 | cFYI(DBG2, "exit_cifs"); |
1084 | cifs_proc_clean(); | 951 | cifs_proc_clean(); |
1085 | #ifdef CONFIG_CIFS_DFS_UPCALL | 952 | #ifdef CONFIG_CIFS_DFS_UPCALL |
1086 | cifs_dfs_release_automount_timer(); | 953 | cifs_dfs_release_automount_timer(); |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 7aa57ecdc437..0242ff9cbf41 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -114,5 +114,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
114 | extern const struct export_operations cifs_export_ops; | 114 | extern const struct export_operations cifs_export_ops; |
115 | #endif /* EXPERIMENTAL */ | 115 | #endif /* EXPERIMENTAL */ |
116 | 116 | ||
117 | #define CIFS_VERSION "1.62" | 117 | #define CIFS_VERSION "1.64" |
118 | #endif /* _CIFSFS_H */ | 118 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index ecf0ffbe2b64..a88479ceaad5 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -87,7 +87,6 @@ enum securityEnum { | |||
87 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ | 87 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ |
88 | /* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */ | 88 | /* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */ |
89 | Kerberos, /* Kerberos via SPNEGO */ | 89 | Kerberos, /* Kerberos via SPNEGO */ |
90 | MSKerberos, /* MS Kerberos via SPNEGO */ | ||
91 | }; | 90 | }; |
92 | 91 | ||
93 | enum protocolEnum { | 92 | enum protocolEnum { |
@@ -185,6 +184,12 @@ struct TCP_Server_Info { | |||
185 | struct mac_key mac_signing_key; | 184 | struct mac_key mac_signing_key; |
186 | char ntlmv2_hash[16]; | 185 | char ntlmv2_hash[16]; |
187 | unsigned long lstrp; /* when we got last response from this server */ | 186 | unsigned long lstrp; /* when we got last response from this server */ |
187 | u16 dialect; /* dialect index that server chose */ | ||
188 | /* extended security flavors that server supports */ | ||
189 | bool sec_kerberos; /* supports plain Kerberos */ | ||
190 | bool sec_mskerberos; /* supports legacy MS Kerberos */ | ||
191 | bool sec_kerberosu2u; /* supports U2U Kerberos */ | ||
192 | bool sec_ntlmssp; /* supports NTLMSSP */ | ||
188 | }; | 193 | }; |
189 | 194 | ||
190 | /* | 195 | /* |
@@ -502,6 +507,7 @@ struct dfs_info3_param { | |||
502 | #define CIFS_FATTR_DFS_REFERRAL 0x1 | 507 | #define CIFS_FATTR_DFS_REFERRAL 0x1 |
503 | #define CIFS_FATTR_DELETE_PENDING 0x2 | 508 | #define CIFS_FATTR_DELETE_PENDING 0x2 |
504 | #define CIFS_FATTR_NEED_REVAL 0x4 | 509 | #define CIFS_FATTR_NEED_REVAL 0x4 |
510 | #define CIFS_FATTR_INO_COLLISION 0x8 | ||
505 | 511 | ||
506 | struct cifs_fattr { | 512 | struct cifs_fattr { |
507 | u32 cf_flags; | 513 | u32 cf_flags; |
@@ -717,7 +723,7 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions | |||
717 | GLOBAL_EXTERN unsigned int oplockEnabled; | 723 | GLOBAL_EXTERN unsigned int oplockEnabled; |
718 | GLOBAL_EXTERN unsigned int experimEnabled; | 724 | GLOBAL_EXTERN unsigned int experimEnabled; |
719 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; | 725 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; |
720 | GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent | 726 | GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent |
721 | with more secure ntlmssp2 challenge/resp */ | 727 | with more secure ntlmssp2 challenge/resp */ |
722 | GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ | 728 | GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ |
723 | GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ | 729 | GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 39e47f46dea5..fb1657e0fdb8 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -39,8 +39,20 @@ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, | |||
39 | unsigned int /* length */); | 39 | unsigned int /* length */); |
40 | extern unsigned int _GetXid(void); | 40 | extern unsigned int _GetXid(void); |
41 | extern void _FreeXid(unsigned int); | 41 | extern void _FreeXid(unsigned int); |
42 | #define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid())); | 42 | #define GetXid() \ |
43 | #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__func__,curr_xid,(int)rc));} | 43 | ({ \ |
44 | int __xid = (int)_GetXid(); \ | ||
45 | cFYI(1, "CIFS VFS: in %s as Xid: %d with uid: %d", \ | ||
46 | __func__, __xid, current_fsuid()); \ | ||
47 | __xid; \ | ||
48 | }) | ||
49 | |||
50 | #define FreeXid(curr_xid) \ | ||
51 | do { \ | ||
52 | _FreeXid(curr_xid); \ | ||
53 | cFYI(1, "CIFS VFS: leaving %s (xid = %d) rc = %d", \ | ||
54 | __func__, curr_xid, (int)rc); \ | ||
55 | } while (0) | ||
44 | extern char *build_path_from_dentry(struct dentry *); | 56 | extern char *build_path_from_dentry(struct dentry *); |
45 | extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb); | 57 | extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb); |
46 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); | 58 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); |
@@ -73,7 +85,7 @@ extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *); | |||
73 | extern unsigned int smbCalcSize(struct smb_hdr *ptr); | 85 | extern unsigned int smbCalcSize(struct smb_hdr *ptr); |
74 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); | 86 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); |
75 | extern int decode_negTokenInit(unsigned char *security_blob, int length, | 87 | extern int decode_negTokenInit(unsigned char *security_blob, int length, |
76 | enum securityEnum *secType); | 88 | struct TCP_Server_Info *server); |
77 | extern int cifs_convert_address(char *src, void *dst); | 89 | extern int cifs_convert_address(char *src, void *dst); |
78 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); | 90 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); |
79 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 91 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
@@ -83,7 +95,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | |||
83 | struct cifsSesInfo *ses, | 95 | struct cifsSesInfo *ses, |
84 | void **request_buf); | 96 | void **request_buf); |
85 | extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, | 97 | extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, |
86 | const int stage, | ||
87 | const struct nls_table *nls_cp); | 98 | const struct nls_table *nls_cp); |
88 | extern __u16 GetNextMid(struct TCP_Server_Info *server); | 99 | extern __u16 GetNextMid(struct TCP_Server_Info *server); |
89 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 100 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
@@ -95,8 +106,11 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, | |||
95 | __u16 fileHandle, struct file *file, | 106 | __u16 fileHandle, struct file *file, |
96 | struct vfsmount *mnt, unsigned int oflags); | 107 | struct vfsmount *mnt, unsigned int oflags); |
97 | extern int cifs_posix_open(char *full_path, struct inode **pinode, | 108 | extern int cifs_posix_open(char *full_path, struct inode **pinode, |
98 | struct vfsmount *mnt, int mode, int oflags, | 109 | struct vfsmount *mnt, |
99 | __u32 *poplock, __u16 *pnetfid, int xid); | 110 | struct super_block *sb, |
111 | int mode, int oflags, | ||
112 | __u32 *poplock, __u16 *pnetfid, int xid); | ||
113 | void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr); | ||
100 | extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, | 114 | extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, |
101 | FILE_UNIX_BASIC_INFO *info, | 115 | FILE_UNIX_BASIC_INFO *info, |
102 | struct cifs_sb_info *cifs_sb); | 116 | struct cifs_sb_info *cifs_sb); |
@@ -125,7 +139,9 @@ extern void cifs_dfs_release_automount_timer(void); | |||
125 | void cifs_proc_init(void); | 139 | void cifs_proc_init(void); |
126 | void cifs_proc_clean(void); | 140 | void cifs_proc_clean(void); |
127 | 141 | ||
128 | extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, | 142 | extern int cifs_negotiate_protocol(unsigned int xid, |
143 | struct cifsSesInfo *ses); | ||
144 | extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | ||
129 | struct nls_table *nls_info); | 145 | struct nls_table *nls_info); |
130 | extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses); | 146 | extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses); |
131 | 147 | ||
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 5d3f29fef532..c65c3419dd37 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/cifssmb.c | 2 | * fs/cifs/cifssmb.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2009 | 4 | * Copyright (C) International Business Machines Corp., 2002,2010 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * Contains the routines for constructing the SMB PDUs themselves | 7 | * Contains the routines for constructing the SMB PDUs themselves |
@@ -130,8 +130,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
130 | if (smb_command != SMB_COM_WRITE_ANDX && | 130 | if (smb_command != SMB_COM_WRITE_ANDX && |
131 | smb_command != SMB_COM_OPEN_ANDX && | 131 | smb_command != SMB_COM_OPEN_ANDX && |
132 | smb_command != SMB_COM_TREE_DISCONNECT) { | 132 | smb_command != SMB_COM_TREE_DISCONNECT) { |
133 | cFYI(1, ("can not send cmd %d while umounting", | 133 | cFYI(1, "can not send cmd %d while umounting", |
134 | smb_command)); | 134 | smb_command); |
135 | return -ENODEV; | 135 | return -ENODEV; |
136 | } | 136 | } |
137 | } | 137 | } |
@@ -157,7 +157,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
157 | * back on-line | 157 | * back on-line |
158 | */ | 158 | */ |
159 | if (!tcon->retry || ses->status == CifsExiting) { | 159 | if (!tcon->retry || ses->status == CifsExiting) { |
160 | cFYI(1, ("gave up waiting on reconnect in smb_init")); | 160 | cFYI(1, "gave up waiting on reconnect in smb_init"); |
161 | return -EHOSTDOWN; | 161 | return -EHOSTDOWN; |
162 | } | 162 | } |
163 | } | 163 | } |
@@ -172,7 +172,8 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
172 | * reconnect the same SMB session | 172 | * reconnect the same SMB session |
173 | */ | 173 | */ |
174 | mutex_lock(&ses->session_mutex); | 174 | mutex_lock(&ses->session_mutex); |
175 | if (ses->need_reconnect) | 175 | rc = cifs_negotiate_protocol(0, ses); |
176 | if (rc == 0 && ses->need_reconnect) | ||
176 | rc = cifs_setup_session(0, ses, nls_codepage); | 177 | rc = cifs_setup_session(0, ses, nls_codepage); |
177 | 178 | ||
178 | /* do we need to reconnect tcon? */ | 179 | /* do we need to reconnect tcon? */ |
@@ -184,7 +185,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
184 | mark_open_files_invalid(tcon); | 185 | mark_open_files_invalid(tcon); |
185 | rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); | 186 | rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); |
186 | mutex_unlock(&ses->session_mutex); | 187 | mutex_unlock(&ses->session_mutex); |
187 | cFYI(1, ("reconnect tcon rc = %d", rc)); | 188 | cFYI(1, "reconnect tcon rc = %d", rc); |
188 | 189 | ||
189 | if (rc) | 190 | if (rc) |
190 | goto out; | 191 | goto out; |
@@ -355,7 +356,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
355 | struct TCP_Server_Info *server; | 356 | struct TCP_Server_Info *server; |
356 | u16 count; | 357 | u16 count; |
357 | unsigned int secFlags; | 358 | unsigned int secFlags; |
358 | u16 dialect; | ||
359 | 359 | ||
360 | if (ses->server) | 360 | if (ses->server) |
361 | server = ses->server; | 361 | server = ses->server; |
@@ -372,9 +372,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
372 | if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) | 372 | if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) |
373 | secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ | 373 | secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ |
374 | else /* if override flags set only sign/seal OR them with global auth */ | 374 | else /* if override flags set only sign/seal OR them with global auth */ |
375 | secFlags = extended_security | ses->overrideSecFlg; | 375 | secFlags = global_secflags | ses->overrideSecFlg; |
376 | 376 | ||
377 | cFYI(1, ("secFlags 0x%x", secFlags)); | 377 | cFYI(1, "secFlags 0x%x", secFlags); |
378 | 378 | ||
379 | pSMB->hdr.Mid = GetNextMid(server); | 379 | pSMB->hdr.Mid = GetNextMid(server); |
380 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); | 380 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); |
@@ -382,14 +382,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
382 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) | 382 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
383 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; | 383 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; |
384 | else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { | 384 | else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { |
385 | cFYI(1, ("Kerberos only mechanism, enable extended security")); | 385 | cFYI(1, "Kerberos only mechanism, enable extended security"); |
386 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; | 386 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; |
387 | } | 387 | } |
388 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 388 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
389 | else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) | 389 | else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) |
390 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; | 390 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; |
391 | else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { | 391 | else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { |
392 | cFYI(1, ("NTLMSSP only mechanism, enable extended security")); | 392 | cFYI(1, "NTLMSSP only mechanism, enable extended security"); |
393 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; | 393 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; |
394 | } | 394 | } |
395 | #endif | 395 | #endif |
@@ -408,10 +408,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
408 | if (rc != 0) | 408 | if (rc != 0) |
409 | goto neg_err_exit; | 409 | goto neg_err_exit; |
410 | 410 | ||
411 | dialect = le16_to_cpu(pSMBr->DialectIndex); | 411 | server->dialect = le16_to_cpu(pSMBr->DialectIndex); |
412 | cFYI(1, ("Dialect: %d", dialect)); | 412 | cFYI(1, "Dialect: %d", server->dialect); |
413 | /* Check wct = 1 error case */ | 413 | /* Check wct = 1 error case */ |
414 | if ((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) { | 414 | if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) { |
415 | /* core returns wct = 1, but we do not ask for core - otherwise | 415 | /* core returns wct = 1, but we do not ask for core - otherwise |
416 | small wct just comes when dialect index is -1 indicating we | 416 | small wct just comes when dialect index is -1 indicating we |
417 | could not negotiate a common dialect */ | 417 | could not negotiate a common dialect */ |
@@ -419,8 +419,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
419 | goto neg_err_exit; | 419 | goto neg_err_exit; |
420 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 420 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
421 | } else if ((pSMBr->hdr.WordCount == 13) | 421 | } else if ((pSMBr->hdr.WordCount == 13) |
422 | && ((dialect == LANMAN_PROT) | 422 | && ((server->dialect == LANMAN_PROT) |
423 | || (dialect == LANMAN2_PROT))) { | 423 | || (server->dialect == LANMAN2_PROT))) { |
424 | __s16 tmp; | 424 | __s16 tmp; |
425 | struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; | 425 | struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; |
426 | 426 | ||
@@ -428,8 +428,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
428 | (secFlags & CIFSSEC_MAY_PLNTXT)) | 428 | (secFlags & CIFSSEC_MAY_PLNTXT)) |
429 | server->secType = LANMAN; | 429 | server->secType = LANMAN; |
430 | else { | 430 | else { |
431 | cERROR(1, ("mount failed weak security disabled" | 431 | cERROR(1, "mount failed weak security disabled" |
432 | " in /proc/fs/cifs/SecurityFlags")); | 432 | " in /proc/fs/cifs/SecurityFlags"); |
433 | rc = -EOPNOTSUPP; | 433 | rc = -EOPNOTSUPP; |
434 | goto neg_err_exit; | 434 | goto neg_err_exit; |
435 | } | 435 | } |
@@ -462,9 +462,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
462 | utc = CURRENT_TIME; | 462 | utc = CURRENT_TIME; |
463 | ts = cnvrtDosUnixTm(rsp->SrvTime.Date, | 463 | ts = cnvrtDosUnixTm(rsp->SrvTime.Date, |
464 | rsp->SrvTime.Time, 0); | 464 | rsp->SrvTime.Time, 0); |
465 | cFYI(1, ("SrvTime %d sec since 1970 (utc: %d) diff: %d", | 465 | cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d", |
466 | (int)ts.tv_sec, (int)utc.tv_sec, | 466 | (int)ts.tv_sec, (int)utc.tv_sec, |
467 | (int)(utc.tv_sec - ts.tv_sec))); | 467 | (int)(utc.tv_sec - ts.tv_sec)); |
468 | val = (int)(utc.tv_sec - ts.tv_sec); | 468 | val = (int)(utc.tv_sec - ts.tv_sec); |
469 | seconds = abs(val); | 469 | seconds = abs(val); |
470 | result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; | 470 | result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; |
@@ -478,7 +478,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
478 | server->timeAdj = (int)tmp; | 478 | server->timeAdj = (int)tmp; |
479 | server->timeAdj *= 60; /* also in seconds */ | 479 | server->timeAdj *= 60; /* also in seconds */ |
480 | } | 480 | } |
481 | cFYI(1, ("server->timeAdj: %d seconds", server->timeAdj)); | 481 | cFYI(1, "server->timeAdj: %d seconds", server->timeAdj); |
482 | 482 | ||
483 | 483 | ||
484 | /* BB get server time for time conversions and add | 484 | /* BB get server time for time conversions and add |
@@ -493,14 +493,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
493 | goto neg_err_exit; | 493 | goto neg_err_exit; |
494 | } | 494 | } |
495 | 495 | ||
496 | cFYI(1, ("LANMAN negotiated")); | 496 | cFYI(1, "LANMAN negotiated"); |
497 | /* we will not end up setting signing flags - as no signing | 497 | /* we will not end up setting signing flags - as no signing |
498 | was in LANMAN and server did not return the flags on */ | 498 | was in LANMAN and server did not return the flags on */ |
499 | goto signing_check; | 499 | goto signing_check; |
500 | #else /* weak security disabled */ | 500 | #else /* weak security disabled */ |
501 | } else if (pSMBr->hdr.WordCount == 13) { | 501 | } else if (pSMBr->hdr.WordCount == 13) { |
502 | cERROR(1, ("mount failed, cifs module not built " | 502 | cERROR(1, "mount failed, cifs module not built " |
503 | "with CIFS_WEAK_PW_HASH support")); | 503 | "with CIFS_WEAK_PW_HASH support"); |
504 | rc = -EOPNOTSUPP; | 504 | rc = -EOPNOTSUPP; |
505 | #endif /* WEAK_PW_HASH */ | 505 | #endif /* WEAK_PW_HASH */ |
506 | goto neg_err_exit; | 506 | goto neg_err_exit; |
@@ -512,14 +512,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
512 | /* else wct == 17 NTLM */ | 512 | /* else wct == 17 NTLM */ |
513 | server->secMode = pSMBr->SecurityMode; | 513 | server->secMode = pSMBr->SecurityMode; |
514 | if ((server->secMode & SECMODE_USER) == 0) | 514 | if ((server->secMode & SECMODE_USER) == 0) |
515 | cFYI(1, ("share mode security")); | 515 | cFYI(1, "share mode security"); |
516 | 516 | ||
517 | if ((server->secMode & SECMODE_PW_ENCRYPT) == 0) | 517 | if ((server->secMode & SECMODE_PW_ENCRYPT) == 0) |
518 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 518 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
519 | if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) | 519 | if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) |
520 | #endif /* CIFS_WEAK_PW_HASH */ | 520 | #endif /* CIFS_WEAK_PW_HASH */ |
521 | cERROR(1, ("Server requests plain text password" | 521 | cERROR(1, "Server requests plain text password" |
522 | " but client support disabled")); | 522 | " but client support disabled"); |
523 | 523 | ||
524 | if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) | 524 | if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) |
525 | server->secType = NTLMv2; | 525 | server->secType = NTLMv2; |
@@ -539,7 +539,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
539 | #endif */ | 539 | #endif */ |
540 | else { | 540 | else { |
541 | rc = -EOPNOTSUPP; | 541 | rc = -EOPNOTSUPP; |
542 | cERROR(1, ("Invalid security type")); | 542 | cERROR(1, "Invalid security type"); |
543 | goto neg_err_exit; | 543 | goto neg_err_exit; |
544 | } | 544 | } |
545 | /* else ... any others ...? */ | 545 | /* else ... any others ...? */ |
@@ -551,7 +551,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
551 | server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), | 551 | server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), |
552 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); | 552 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); |
553 | server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); | 553 | server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); |
554 | cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf)); | 554 | cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); |
555 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); | 555 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); |
556 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); | 556 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); |
557 | server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); | 557 | server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); |
@@ -582,7 +582,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
582 | if (memcmp(server->server_GUID, | 582 | if (memcmp(server->server_GUID, |
583 | pSMBr->u.extended_response. | 583 | pSMBr->u.extended_response. |
584 | GUID, 16) != 0) { | 584 | GUID, 16) != 0) { |
585 | cFYI(1, ("server UID changed")); | 585 | cFYI(1, "server UID changed"); |
586 | memcpy(server->server_GUID, | 586 | memcpy(server->server_GUID, |
587 | pSMBr->u.extended_response.GUID, | 587 | pSMBr->u.extended_response.GUID, |
588 | 16); | 588 | 16); |
@@ -597,13 +597,19 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
597 | server->secType = RawNTLMSSP; | 597 | server->secType = RawNTLMSSP; |
598 | } else { | 598 | } else { |
599 | rc = decode_negTokenInit(pSMBr->u.extended_response. | 599 | rc = decode_negTokenInit(pSMBr->u.extended_response. |
600 | SecurityBlob, | 600 | SecurityBlob, count - 16, |
601 | count - 16, | 601 | server); |
602 | &server->secType); | ||
603 | if (rc == 1) | 602 | if (rc == 1) |
604 | rc = 0; | 603 | rc = 0; |
605 | else | 604 | else |
606 | rc = -EINVAL; | 605 | rc = -EINVAL; |
606 | |||
607 | if (server->sec_kerberos || server->sec_mskerberos) | ||
608 | server->secType = Kerberos; | ||
609 | else if (server->sec_ntlmssp) | ||
610 | server->secType = RawNTLMSSP; | ||
611 | else | ||
612 | rc = -EOPNOTSUPP; | ||
607 | } | 613 | } |
608 | } else | 614 | } else |
609 | server->capabilities &= ~CAP_EXTENDED_SECURITY; | 615 | server->capabilities &= ~CAP_EXTENDED_SECURITY; |
@@ -614,22 +620,21 @@ signing_check: | |||
614 | if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { | 620 | if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { |
615 | /* MUST_SIGN already includes the MAY_SIGN FLAG | 621 | /* MUST_SIGN already includes the MAY_SIGN FLAG |
616 | so if this is zero it means that signing is disabled */ | 622 | so if this is zero it means that signing is disabled */ |
617 | cFYI(1, ("Signing disabled")); | 623 | cFYI(1, "Signing disabled"); |
618 | if (server->secMode & SECMODE_SIGN_REQUIRED) { | 624 | if (server->secMode & SECMODE_SIGN_REQUIRED) { |
619 | cERROR(1, ("Server requires " | 625 | cERROR(1, "Server requires " |
620 | "packet signing to be enabled in " | 626 | "packet signing to be enabled in " |
621 | "/proc/fs/cifs/SecurityFlags.")); | 627 | "/proc/fs/cifs/SecurityFlags."); |
622 | rc = -EOPNOTSUPP; | 628 | rc = -EOPNOTSUPP; |
623 | } | 629 | } |
624 | server->secMode &= | 630 | server->secMode &= |
625 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | 631 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); |
626 | } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { | 632 | } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { |
627 | /* signing required */ | 633 | /* signing required */ |
628 | cFYI(1, ("Must sign - secFlags 0x%x", secFlags)); | 634 | cFYI(1, "Must sign - secFlags 0x%x", secFlags); |
629 | if ((server->secMode & | 635 | if ((server->secMode & |
630 | (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { | 636 | (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { |
631 | cERROR(1, | 637 | cERROR(1, "signing required but server lacks support"); |
632 | ("signing required but server lacks support")); | ||
633 | rc = -EOPNOTSUPP; | 638 | rc = -EOPNOTSUPP; |
634 | } else | 639 | } else |
635 | server->secMode |= SECMODE_SIGN_REQUIRED; | 640 | server->secMode |= SECMODE_SIGN_REQUIRED; |
@@ -643,7 +648,7 @@ signing_check: | |||
643 | neg_err_exit: | 648 | neg_err_exit: |
644 | cifs_buf_release(pSMB); | 649 | cifs_buf_release(pSMB); |
645 | 650 | ||
646 | cFYI(1, ("negprot rc %d", rc)); | 651 | cFYI(1, "negprot rc %d", rc); |
647 | return rc; | 652 | return rc; |
648 | } | 653 | } |
649 | 654 | ||
@@ -653,7 +658,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) | |||
653 | struct smb_hdr *smb_buffer; | 658 | struct smb_hdr *smb_buffer; |
654 | int rc = 0; | 659 | int rc = 0; |
655 | 660 | ||
656 | cFYI(1, ("In tree disconnect")); | 661 | cFYI(1, "In tree disconnect"); |
657 | 662 | ||
658 | /* BB: do we need to check this? These should never be NULL. */ | 663 | /* BB: do we need to check this? These should never be NULL. */ |
659 | if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) | 664 | if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) |
@@ -675,7 +680,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) | |||
675 | 680 | ||
676 | rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); | 681 | rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); |
677 | if (rc) | 682 | if (rc) |
678 | cFYI(1, ("Tree disconnect failed %d", rc)); | 683 | cFYI(1, "Tree disconnect failed %d", rc); |
679 | 684 | ||
680 | /* No need to return error on this operation if tid invalidated and | 685 | /* No need to return error on this operation if tid invalidated and |
681 | closed on server already e.g. due to tcp session crashing */ | 686 | closed on server already e.g. due to tcp session crashing */ |
@@ -691,7 +696,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) | |||
691 | LOGOFF_ANDX_REQ *pSMB; | 696 | LOGOFF_ANDX_REQ *pSMB; |
692 | int rc = 0; | 697 | int rc = 0; |
693 | 698 | ||
694 | cFYI(1, ("In SMBLogoff for session disconnect")); | 699 | cFYI(1, "In SMBLogoff for session disconnect"); |
695 | 700 | ||
696 | /* | 701 | /* |
697 | * BB: do we need to check validity of ses and server? They should | 702 | * BB: do we need to check validity of ses and server? They should |
@@ -744,7 +749,7 @@ CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName, | |||
744 | int bytes_returned = 0; | 749 | int bytes_returned = 0; |
745 | __u16 params, param_offset, offset, byte_count; | 750 | __u16 params, param_offset, offset, byte_count; |
746 | 751 | ||
747 | cFYI(1, ("In POSIX delete")); | 752 | cFYI(1, "In POSIX delete"); |
748 | PsxDelete: | 753 | PsxDelete: |
749 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 754 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
750 | (void **) &pSMBr); | 755 | (void **) &pSMBr); |
@@ -796,7 +801,7 @@ PsxDelete: | |||
796 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 801 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
797 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 802 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
798 | if (rc) | 803 | if (rc) |
799 | cFYI(1, ("Posix delete returned %d", rc)); | 804 | cFYI(1, "Posix delete returned %d", rc); |
800 | cifs_buf_release(pSMB); | 805 | cifs_buf_release(pSMB); |
801 | 806 | ||
802 | cifs_stats_inc(&tcon->num_deletes); | 807 | cifs_stats_inc(&tcon->num_deletes); |
@@ -843,7 +848,7 @@ DelFileRetry: | |||
843 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 848 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
844 | cifs_stats_inc(&tcon->num_deletes); | 849 | cifs_stats_inc(&tcon->num_deletes); |
845 | if (rc) | 850 | if (rc) |
846 | cFYI(1, ("Error in RMFile = %d", rc)); | 851 | cFYI(1, "Error in RMFile = %d", rc); |
847 | 852 | ||
848 | cifs_buf_release(pSMB); | 853 | cifs_buf_release(pSMB); |
849 | if (rc == -EAGAIN) | 854 | if (rc == -EAGAIN) |
@@ -862,7 +867,7 @@ CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName, | |||
862 | int bytes_returned; | 867 | int bytes_returned; |
863 | int name_len; | 868 | int name_len; |
864 | 869 | ||
865 | cFYI(1, ("In CIFSSMBRmDir")); | 870 | cFYI(1, "In CIFSSMBRmDir"); |
866 | RmDirRetry: | 871 | RmDirRetry: |
867 | rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, | 872 | rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, |
868 | (void **) &pSMBr); | 873 | (void **) &pSMBr); |
@@ -887,7 +892,7 @@ RmDirRetry: | |||
887 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 892 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
888 | cifs_stats_inc(&tcon->num_rmdirs); | 893 | cifs_stats_inc(&tcon->num_rmdirs); |
889 | if (rc) | 894 | if (rc) |
890 | cFYI(1, ("Error in RMDir = %d", rc)); | 895 | cFYI(1, "Error in RMDir = %d", rc); |
891 | 896 | ||
892 | cifs_buf_release(pSMB); | 897 | cifs_buf_release(pSMB); |
893 | if (rc == -EAGAIN) | 898 | if (rc == -EAGAIN) |
@@ -905,7 +910,7 @@ CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon, | |||
905 | int bytes_returned; | 910 | int bytes_returned; |
906 | int name_len; | 911 | int name_len; |
907 | 912 | ||
908 | cFYI(1, ("In CIFSSMBMkDir")); | 913 | cFYI(1, "In CIFSSMBMkDir"); |
909 | MkDirRetry: | 914 | MkDirRetry: |
910 | rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, | 915 | rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, |
911 | (void **) &pSMBr); | 916 | (void **) &pSMBr); |
@@ -930,7 +935,7 @@ MkDirRetry: | |||
930 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 935 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
931 | cifs_stats_inc(&tcon->num_mkdirs); | 936 | cifs_stats_inc(&tcon->num_mkdirs); |
932 | if (rc) | 937 | if (rc) |
933 | cFYI(1, ("Error in Mkdir = %d", rc)); | 938 | cFYI(1, "Error in Mkdir = %d", rc); |
934 | 939 | ||
935 | cifs_buf_release(pSMB); | 940 | cifs_buf_release(pSMB); |
936 | if (rc == -EAGAIN) | 941 | if (rc == -EAGAIN) |
@@ -953,7 +958,7 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, | |||
953 | OPEN_PSX_REQ *pdata; | 958 | OPEN_PSX_REQ *pdata; |
954 | OPEN_PSX_RSP *psx_rsp; | 959 | OPEN_PSX_RSP *psx_rsp; |
955 | 960 | ||
956 | cFYI(1, ("In POSIX Create")); | 961 | cFYI(1, "In POSIX Create"); |
957 | PsxCreat: | 962 | PsxCreat: |
958 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 963 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
959 | (void **) &pSMBr); | 964 | (void **) &pSMBr); |
@@ -1007,11 +1012,11 @@ PsxCreat: | |||
1007 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 1012 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
1008 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 1013 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
1009 | if (rc) { | 1014 | if (rc) { |
1010 | cFYI(1, ("Posix create returned %d", rc)); | 1015 | cFYI(1, "Posix create returned %d", rc); |
1011 | goto psx_create_err; | 1016 | goto psx_create_err; |
1012 | } | 1017 | } |
1013 | 1018 | ||
1014 | cFYI(1, ("copying inode info")); | 1019 | cFYI(1, "copying inode info"); |
1015 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 1020 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
1016 | 1021 | ||
1017 | if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) { | 1022 | if (rc || (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP))) { |
@@ -1033,11 +1038,11 @@ PsxCreat: | |||
1033 | /* check to make sure response data is there */ | 1038 | /* check to make sure response data is there */ |
1034 | if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { | 1039 | if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { |
1035 | pRetData->Type = cpu_to_le32(-1); /* unknown */ | 1040 | pRetData->Type = cpu_to_le32(-1); /* unknown */ |
1036 | cFYI(DBG2, ("unknown type")); | 1041 | cFYI(DBG2, "unknown type"); |
1037 | } else { | 1042 | } else { |
1038 | if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP) | 1043 | if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP) |
1039 | + sizeof(FILE_UNIX_BASIC_INFO)) { | 1044 | + sizeof(FILE_UNIX_BASIC_INFO)) { |
1040 | cERROR(1, ("Open response data too small")); | 1045 | cERROR(1, "Open response data too small"); |
1041 | pRetData->Type = cpu_to_le32(-1); | 1046 | pRetData->Type = cpu_to_le32(-1); |
1042 | goto psx_create_err; | 1047 | goto psx_create_err; |
1043 | } | 1048 | } |
@@ -1084,7 +1089,7 @@ static __u16 convert_disposition(int disposition) | |||
1084 | ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; | 1089 | ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; |
1085 | break; | 1090 | break; |
1086 | default: | 1091 | default: |
1087 | cFYI(1, ("unknown disposition %d", disposition)); | 1092 | cFYI(1, "unknown disposition %d", disposition); |
1088 | ofun = SMBOPEN_OAPPEND; /* regular open */ | 1093 | ofun = SMBOPEN_OAPPEND; /* regular open */ |
1089 | } | 1094 | } |
1090 | return ofun; | 1095 | return ofun; |
@@ -1175,7 +1180,7 @@ OldOpenRetry: | |||
1175 | (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); | 1180 | (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); |
1176 | cifs_stats_inc(&tcon->num_opens); | 1181 | cifs_stats_inc(&tcon->num_opens); |
1177 | if (rc) { | 1182 | if (rc) { |
1178 | cFYI(1, ("Error in Open = %d", rc)); | 1183 | cFYI(1, "Error in Open = %d", rc); |
1179 | } else { | 1184 | } else { |
1180 | /* BB verify if wct == 15 */ | 1185 | /* BB verify if wct == 15 */ |
1181 | 1186 | ||
@@ -1288,7 +1293,7 @@ openRetry: | |||
1288 | (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); | 1293 | (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); |
1289 | cifs_stats_inc(&tcon->num_opens); | 1294 | cifs_stats_inc(&tcon->num_opens); |
1290 | if (rc) { | 1295 | if (rc) { |
1291 | cFYI(1, ("Error in Open = %d", rc)); | 1296 | cFYI(1, "Error in Open = %d", rc); |
1292 | } else { | 1297 | } else { |
1293 | *pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */ | 1298 | *pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */ |
1294 | *netfid = pSMBr->Fid; /* cifs fid stays in le */ | 1299 | *netfid = pSMBr->Fid; /* cifs fid stays in le */ |
@@ -1326,7 +1331,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1326 | int resp_buf_type = 0; | 1331 | int resp_buf_type = 0; |
1327 | struct kvec iov[1]; | 1332 | struct kvec iov[1]; |
1328 | 1333 | ||
1329 | cFYI(1, ("Reading %d bytes on fid %d", count, netfid)); | 1334 | cFYI(1, "Reading %d bytes on fid %d", count, netfid); |
1330 | if (tcon->ses->capabilities & CAP_LARGE_FILES) | 1335 | if (tcon->ses->capabilities & CAP_LARGE_FILES) |
1331 | wct = 12; | 1336 | wct = 12; |
1332 | else { | 1337 | else { |
@@ -1371,7 +1376,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1371 | cifs_stats_inc(&tcon->num_reads); | 1376 | cifs_stats_inc(&tcon->num_reads); |
1372 | pSMBr = (READ_RSP *)iov[0].iov_base; | 1377 | pSMBr = (READ_RSP *)iov[0].iov_base; |
1373 | if (rc) { | 1378 | if (rc) { |
1374 | cERROR(1, ("Send error in read = %d", rc)); | 1379 | cERROR(1, "Send error in read = %d", rc); |
1375 | } else { | 1380 | } else { |
1376 | int data_length = le16_to_cpu(pSMBr->DataLengthHigh); | 1381 | int data_length = le16_to_cpu(pSMBr->DataLengthHigh); |
1377 | data_length = data_length << 16; | 1382 | data_length = data_length << 16; |
@@ -1381,15 +1386,15 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1381 | /*check that DataLength would not go beyond end of SMB */ | 1386 | /*check that DataLength would not go beyond end of SMB */ |
1382 | if ((data_length > CIFSMaxBufSize) | 1387 | if ((data_length > CIFSMaxBufSize) |
1383 | || (data_length > count)) { | 1388 | || (data_length > count)) { |
1384 | cFYI(1, ("bad length %d for count %d", | 1389 | cFYI(1, "bad length %d for count %d", |
1385 | data_length, count)); | 1390 | data_length, count); |
1386 | rc = -EIO; | 1391 | rc = -EIO; |
1387 | *nbytes = 0; | 1392 | *nbytes = 0; |
1388 | } else { | 1393 | } else { |
1389 | pReadData = (char *) (&pSMBr->hdr.Protocol) + | 1394 | pReadData = (char *) (&pSMBr->hdr.Protocol) + |
1390 | le16_to_cpu(pSMBr->DataOffset); | 1395 | le16_to_cpu(pSMBr->DataOffset); |
1391 | /* if (rc = copy_to_user(buf, pReadData, data_length)) { | 1396 | /* if (rc = copy_to_user(buf, pReadData, data_length)) { |
1392 | cERROR(1,("Faulting on read rc = %d",rc)); | 1397 | cERROR(1, "Faulting on read rc = %d",rc); |
1393 | rc = -EFAULT; | 1398 | rc = -EFAULT; |
1394 | }*/ /* can not use copy_to_user when using page cache*/ | 1399 | }*/ /* can not use copy_to_user when using page cache*/ |
1395 | if (*buf) | 1400 | if (*buf) |
@@ -1433,7 +1438,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1433 | 1438 | ||
1434 | *nbytes = 0; | 1439 | *nbytes = 0; |
1435 | 1440 | ||
1436 | /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ | 1441 | /* cFYI(1, "write at %lld %d bytes", offset, count);*/ |
1437 | if (tcon->ses == NULL) | 1442 | if (tcon->ses == NULL) |
1438 | return -ECONNABORTED; | 1443 | return -ECONNABORTED; |
1439 | 1444 | ||
@@ -1514,7 +1519,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1514 | (struct smb_hdr *) pSMBr, &bytes_returned, long_op); | 1519 | (struct smb_hdr *) pSMBr, &bytes_returned, long_op); |
1515 | cifs_stats_inc(&tcon->num_writes); | 1520 | cifs_stats_inc(&tcon->num_writes); |
1516 | if (rc) { | 1521 | if (rc) { |
1517 | cFYI(1, ("Send error in write = %d", rc)); | 1522 | cFYI(1, "Send error in write = %d", rc); |
1518 | } else { | 1523 | } else { |
1519 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1524 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
1520 | *nbytes = (*nbytes) << 16; | 1525 | *nbytes = (*nbytes) << 16; |
@@ -1551,7 +1556,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1551 | 1556 | ||
1552 | *nbytes = 0; | 1557 | *nbytes = 0; |
1553 | 1558 | ||
1554 | cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count)); | 1559 | cFYI(1, "write2 at %lld %d bytes", (long long)offset, count); |
1555 | 1560 | ||
1556 | if (tcon->ses->capabilities & CAP_LARGE_FILES) { | 1561 | if (tcon->ses->capabilities & CAP_LARGE_FILES) { |
1557 | wct = 14; | 1562 | wct = 14; |
@@ -1606,7 +1611,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1606 | long_op); | 1611 | long_op); |
1607 | cifs_stats_inc(&tcon->num_writes); | 1612 | cifs_stats_inc(&tcon->num_writes); |
1608 | if (rc) { | 1613 | if (rc) { |
1609 | cFYI(1, ("Send error Write2 = %d", rc)); | 1614 | cFYI(1, "Send error Write2 = %d", rc); |
1610 | } else if (resp_buf_type == 0) { | 1615 | } else if (resp_buf_type == 0) { |
1611 | /* presumably this can not happen, but best to be safe */ | 1616 | /* presumably this can not happen, but best to be safe */ |
1612 | rc = -EIO; | 1617 | rc = -EIO; |
@@ -1651,7 +1656,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1651 | int timeout = 0; | 1656 | int timeout = 0; |
1652 | __u16 count; | 1657 | __u16 count; |
1653 | 1658 | ||
1654 | cFYI(1, ("CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock)); | 1659 | cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock); |
1655 | rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); | 1660 | rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); |
1656 | 1661 | ||
1657 | if (rc) | 1662 | if (rc) |
@@ -1699,7 +1704,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1699 | } | 1704 | } |
1700 | cifs_stats_inc(&tcon->num_locks); | 1705 | cifs_stats_inc(&tcon->num_locks); |
1701 | if (rc) | 1706 | if (rc) |
1702 | cFYI(1, ("Send error in Lock = %d", rc)); | 1707 | cFYI(1, "Send error in Lock = %d", rc); |
1703 | 1708 | ||
1704 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 1709 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
1705 | since file handle passed in no longer valid */ | 1710 | since file handle passed in no longer valid */ |
@@ -1722,7 +1727,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1722 | __u16 params, param_offset, offset, byte_count, count; | 1727 | __u16 params, param_offset, offset, byte_count, count; |
1723 | struct kvec iov[1]; | 1728 | struct kvec iov[1]; |
1724 | 1729 | ||
1725 | cFYI(1, ("Posix Lock")); | 1730 | cFYI(1, "Posix Lock"); |
1726 | 1731 | ||
1727 | if (pLockData == NULL) | 1732 | if (pLockData == NULL) |
1728 | return -EINVAL; | 1733 | return -EINVAL; |
@@ -1792,7 +1797,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1792 | } | 1797 | } |
1793 | 1798 | ||
1794 | if (rc) { | 1799 | if (rc) { |
1795 | cFYI(1, ("Send error in Posix Lock = %d", rc)); | 1800 | cFYI(1, "Send error in Posix Lock = %d", rc); |
1796 | } else if (get_flag) { | 1801 | } else if (get_flag) { |
1797 | /* lock structure can be returned on get */ | 1802 | /* lock structure can be returned on get */ |
1798 | __u16 data_offset; | 1803 | __u16 data_offset; |
@@ -1849,7 +1854,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1849 | { | 1854 | { |
1850 | int rc = 0; | 1855 | int rc = 0; |
1851 | CLOSE_REQ *pSMB = NULL; | 1856 | CLOSE_REQ *pSMB = NULL; |
1852 | cFYI(1, ("In CIFSSMBClose")); | 1857 | cFYI(1, "In CIFSSMBClose"); |
1853 | 1858 | ||
1854 | /* do not retry on dead session on close */ | 1859 | /* do not retry on dead session on close */ |
1855 | rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); | 1860 | rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); |
@@ -1866,7 +1871,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1866 | if (rc) { | 1871 | if (rc) { |
1867 | if (rc != -EINTR) { | 1872 | if (rc != -EINTR) { |
1868 | /* EINTR is expected when user ctl-c to kill app */ | 1873 | /* EINTR is expected when user ctl-c to kill app */ |
1869 | cERROR(1, ("Send error in Close = %d", rc)); | 1874 | cERROR(1, "Send error in Close = %d", rc); |
1870 | } | 1875 | } |
1871 | } | 1876 | } |
1872 | 1877 | ||
@@ -1882,7 +1887,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1882 | { | 1887 | { |
1883 | int rc = 0; | 1888 | int rc = 0; |
1884 | FLUSH_REQ *pSMB = NULL; | 1889 | FLUSH_REQ *pSMB = NULL; |
1885 | cFYI(1, ("In CIFSSMBFlush")); | 1890 | cFYI(1, "In CIFSSMBFlush"); |
1886 | 1891 | ||
1887 | rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); | 1892 | rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); |
1888 | if (rc) | 1893 | if (rc) |
@@ -1893,7 +1898,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1893 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 1898 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
1894 | cifs_stats_inc(&tcon->num_flushes); | 1899 | cifs_stats_inc(&tcon->num_flushes); |
1895 | if (rc) | 1900 | if (rc) |
1896 | cERROR(1, ("Send error in Flush = %d", rc)); | 1901 | cERROR(1, "Send error in Flush = %d", rc); |
1897 | 1902 | ||
1898 | return rc; | 1903 | return rc; |
1899 | } | 1904 | } |
@@ -1910,7 +1915,7 @@ CIFSSMBRename(const int xid, struct cifsTconInfo *tcon, | |||
1910 | int name_len, name_len2; | 1915 | int name_len, name_len2; |
1911 | __u16 count; | 1916 | __u16 count; |
1912 | 1917 | ||
1913 | cFYI(1, ("In CIFSSMBRename")); | 1918 | cFYI(1, "In CIFSSMBRename"); |
1914 | renameRetry: | 1919 | renameRetry: |
1915 | rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, | 1920 | rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, |
1916 | (void **) &pSMBr); | 1921 | (void **) &pSMBr); |
@@ -1956,7 +1961,7 @@ renameRetry: | |||
1956 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 1961 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
1957 | cifs_stats_inc(&tcon->num_renames); | 1962 | cifs_stats_inc(&tcon->num_renames); |
1958 | if (rc) | 1963 | if (rc) |
1959 | cFYI(1, ("Send error in rename = %d", rc)); | 1964 | cFYI(1, "Send error in rename = %d", rc); |
1960 | 1965 | ||
1961 | cifs_buf_release(pSMB); | 1966 | cifs_buf_release(pSMB); |
1962 | 1967 | ||
@@ -1980,7 +1985,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | |||
1980 | int len_of_str; | 1985 | int len_of_str; |
1981 | __u16 params, param_offset, offset, count, byte_count; | 1986 | __u16 params, param_offset, offset, count, byte_count; |
1982 | 1987 | ||
1983 | cFYI(1, ("Rename to File by handle")); | 1988 | cFYI(1, "Rename to File by handle"); |
1984 | rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, | 1989 | rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, |
1985 | (void **) &pSMBr); | 1990 | (void **) &pSMBr); |
1986 | if (rc) | 1991 | if (rc) |
@@ -2035,7 +2040,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | |||
2035 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2040 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2036 | cifs_stats_inc(&pTcon->num_t2renames); | 2041 | cifs_stats_inc(&pTcon->num_t2renames); |
2037 | if (rc) | 2042 | if (rc) |
2038 | cFYI(1, ("Send error in Rename (by file handle) = %d", rc)); | 2043 | cFYI(1, "Send error in Rename (by file handle) = %d", rc); |
2039 | 2044 | ||
2040 | cifs_buf_release(pSMB); | 2045 | cifs_buf_release(pSMB); |
2041 | 2046 | ||
@@ -2057,7 +2062,7 @@ CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName, | |||
2057 | int name_len, name_len2; | 2062 | int name_len, name_len2; |
2058 | __u16 count; | 2063 | __u16 count; |
2059 | 2064 | ||
2060 | cFYI(1, ("In CIFSSMBCopy")); | 2065 | cFYI(1, "In CIFSSMBCopy"); |
2061 | copyRetry: | 2066 | copyRetry: |
2062 | rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, | 2067 | rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, |
2063 | (void **) &pSMBr); | 2068 | (void **) &pSMBr); |
@@ -2102,8 +2107,8 @@ copyRetry: | |||
2102 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2107 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2103 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2108 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2104 | if (rc) { | 2109 | if (rc) { |
2105 | cFYI(1, ("Send error in copy = %d with %d files copied", | 2110 | cFYI(1, "Send error in copy = %d with %d files copied", |
2106 | rc, le16_to_cpu(pSMBr->CopyCount))); | 2111 | rc, le16_to_cpu(pSMBr->CopyCount)); |
2107 | } | 2112 | } |
2108 | cifs_buf_release(pSMB); | 2113 | cifs_buf_release(pSMB); |
2109 | 2114 | ||
@@ -2127,7 +2132,7 @@ CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon, | |||
2127 | int bytes_returned = 0; | 2132 | int bytes_returned = 0; |
2128 | __u16 params, param_offset, offset, byte_count; | 2133 | __u16 params, param_offset, offset, byte_count; |
2129 | 2134 | ||
2130 | cFYI(1, ("In Symlink Unix style")); | 2135 | cFYI(1, "In Symlink Unix style"); |
2131 | createSymLinkRetry: | 2136 | createSymLinkRetry: |
2132 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 2137 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
2133 | (void **) &pSMBr); | 2138 | (void **) &pSMBr); |
@@ -2192,7 +2197,7 @@ createSymLinkRetry: | |||
2192 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2197 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2193 | cifs_stats_inc(&tcon->num_symlinks); | 2198 | cifs_stats_inc(&tcon->num_symlinks); |
2194 | if (rc) | 2199 | if (rc) |
2195 | cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc)); | 2200 | cFYI(1, "Send error in SetPathInfo create symlink = %d", rc); |
2196 | 2201 | ||
2197 | cifs_buf_release(pSMB); | 2202 | cifs_buf_release(pSMB); |
2198 | 2203 | ||
@@ -2216,7 +2221,7 @@ CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon, | |||
2216 | int bytes_returned = 0; | 2221 | int bytes_returned = 0; |
2217 | __u16 params, param_offset, offset, byte_count; | 2222 | __u16 params, param_offset, offset, byte_count; |
2218 | 2223 | ||
2219 | cFYI(1, ("In Create Hard link Unix style")); | 2224 | cFYI(1, "In Create Hard link Unix style"); |
2220 | createHardLinkRetry: | 2225 | createHardLinkRetry: |
2221 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 2226 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
2222 | (void **) &pSMBr); | 2227 | (void **) &pSMBr); |
@@ -2278,7 +2283,7 @@ createHardLinkRetry: | |||
2278 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2283 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2279 | cifs_stats_inc(&tcon->num_hardlinks); | 2284 | cifs_stats_inc(&tcon->num_hardlinks); |
2280 | if (rc) | 2285 | if (rc) |
2281 | cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc)); | 2286 | cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc); |
2282 | 2287 | ||
2283 | cifs_buf_release(pSMB); | 2288 | cifs_buf_release(pSMB); |
2284 | if (rc == -EAGAIN) | 2289 | if (rc == -EAGAIN) |
@@ -2299,7 +2304,7 @@ CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon, | |||
2299 | int name_len, name_len2; | 2304 | int name_len, name_len2; |
2300 | __u16 count; | 2305 | __u16 count; |
2301 | 2306 | ||
2302 | cFYI(1, ("In CIFSCreateHardLink")); | 2307 | cFYI(1, "In CIFSCreateHardLink"); |
2303 | winCreateHardLinkRetry: | 2308 | winCreateHardLinkRetry: |
2304 | 2309 | ||
2305 | rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, | 2310 | rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, |
@@ -2350,7 +2355,7 @@ winCreateHardLinkRetry: | |||
2350 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2355 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2351 | cifs_stats_inc(&tcon->num_hardlinks); | 2356 | cifs_stats_inc(&tcon->num_hardlinks); |
2352 | if (rc) | 2357 | if (rc) |
2353 | cFYI(1, ("Send error in hard link (NT rename) = %d", rc)); | 2358 | cFYI(1, "Send error in hard link (NT rename) = %d", rc); |
2354 | 2359 | ||
2355 | cifs_buf_release(pSMB); | 2360 | cifs_buf_release(pSMB); |
2356 | if (rc == -EAGAIN) | 2361 | if (rc == -EAGAIN) |
@@ -2373,7 +2378,7 @@ CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon, | |||
2373 | __u16 params, byte_count; | 2378 | __u16 params, byte_count; |
2374 | char *data_start; | 2379 | char *data_start; |
2375 | 2380 | ||
2376 | cFYI(1, ("In QPathSymLinkInfo (Unix) for path %s", searchName)); | 2381 | cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName); |
2377 | 2382 | ||
2378 | querySymLinkRetry: | 2383 | querySymLinkRetry: |
2379 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 2384 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
@@ -2420,7 +2425,7 @@ querySymLinkRetry: | |||
2420 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2425 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2421 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2426 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2422 | if (rc) { | 2427 | if (rc) { |
2423 | cFYI(1, ("Send error in QuerySymLinkInfo = %d", rc)); | 2428 | cFYI(1, "Send error in QuerySymLinkInfo = %d", rc); |
2424 | } else { | 2429 | } else { |
2425 | /* decode response */ | 2430 | /* decode response */ |
2426 | 2431 | ||
@@ -2521,21 +2526,21 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata, | |||
2521 | 2526 | ||
2522 | /* should we also check that parm and data areas do not overlap? */ | 2527 | /* should we also check that parm and data areas do not overlap? */ |
2523 | if (*ppparm > end_of_smb) { | 2528 | if (*ppparm > end_of_smb) { |
2524 | cFYI(1, ("parms start after end of smb")); | 2529 | cFYI(1, "parms start after end of smb"); |
2525 | return -EINVAL; | 2530 | return -EINVAL; |
2526 | } else if (parm_count + *ppparm > end_of_smb) { | 2531 | } else if (parm_count + *ppparm > end_of_smb) { |
2527 | cFYI(1, ("parm end after end of smb")); | 2532 | cFYI(1, "parm end after end of smb"); |
2528 | return -EINVAL; | 2533 | return -EINVAL; |
2529 | } else if (*ppdata > end_of_smb) { | 2534 | } else if (*ppdata > end_of_smb) { |
2530 | cFYI(1, ("data starts after end of smb")); | 2535 | cFYI(1, "data starts after end of smb"); |
2531 | return -EINVAL; | 2536 | return -EINVAL; |
2532 | } else if (data_count + *ppdata > end_of_smb) { | 2537 | } else if (data_count + *ppdata > end_of_smb) { |
2533 | cFYI(1, ("data %p + count %d (%p) ends after end of smb %p start %p", | 2538 | cFYI(1, "data %p + count %d (%p) past smb end %p start %p", |
2534 | *ppdata, data_count, (data_count + *ppdata), | 2539 | *ppdata, data_count, (data_count + *ppdata), |
2535 | end_of_smb, pSMBr)); | 2540 | end_of_smb, pSMBr); |
2536 | return -EINVAL; | 2541 | return -EINVAL; |
2537 | } else if (parm_count + data_count > pSMBr->ByteCount) { | 2542 | } else if (parm_count + data_count > pSMBr->ByteCount) { |
2538 | cFYI(1, ("parm count and data count larger than SMB")); | 2543 | cFYI(1, "parm count and data count larger than SMB"); |
2539 | return -EINVAL; | 2544 | return -EINVAL; |
2540 | } | 2545 | } |
2541 | *pdatalen = data_count; | 2546 | *pdatalen = data_count; |
@@ -2554,7 +2559,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | |||
2554 | struct smb_com_transaction_ioctl_req *pSMB; | 2559 | struct smb_com_transaction_ioctl_req *pSMB; |
2555 | struct smb_com_transaction_ioctl_rsp *pSMBr; | 2560 | struct smb_com_transaction_ioctl_rsp *pSMBr; |
2556 | 2561 | ||
2557 | cFYI(1, ("In Windows reparse style QueryLink for path %s", searchName)); | 2562 | cFYI(1, "In Windows reparse style QueryLink for path %s", searchName); |
2558 | rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, | 2563 | rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, |
2559 | (void **) &pSMBr); | 2564 | (void **) &pSMBr); |
2560 | if (rc) | 2565 | if (rc) |
@@ -2583,7 +2588,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | |||
2583 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2588 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2584 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2589 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2585 | if (rc) { | 2590 | if (rc) { |
2586 | cFYI(1, ("Send error in QueryReparseLinkInfo = %d", rc)); | 2591 | cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc); |
2587 | } else { /* decode response */ | 2592 | } else { /* decode response */ |
2588 | __u32 data_offset = le32_to_cpu(pSMBr->DataOffset); | 2593 | __u32 data_offset = le32_to_cpu(pSMBr->DataOffset); |
2589 | __u32 data_count = le32_to_cpu(pSMBr->DataCount); | 2594 | __u32 data_count = le32_to_cpu(pSMBr->DataCount); |
@@ -2607,7 +2612,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | |||
2607 | if ((reparse_buf->LinkNamesBuf + | 2612 | if ((reparse_buf->LinkNamesBuf + |
2608 | reparse_buf->TargetNameOffset + | 2613 | reparse_buf->TargetNameOffset + |
2609 | reparse_buf->TargetNameLen) > end_of_smb) { | 2614 | reparse_buf->TargetNameLen) > end_of_smb) { |
2610 | cFYI(1, ("reparse buf beyond SMB")); | 2615 | cFYI(1, "reparse buf beyond SMB"); |
2611 | rc = -EIO; | 2616 | rc = -EIO; |
2612 | goto qreparse_out; | 2617 | goto qreparse_out; |
2613 | } | 2618 | } |
@@ -2628,12 +2633,12 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | |||
2628 | } | 2633 | } |
2629 | } else { | 2634 | } else { |
2630 | rc = -EIO; | 2635 | rc = -EIO; |
2631 | cFYI(1, ("Invalid return data count on " | 2636 | cFYI(1, "Invalid return data count on " |
2632 | "get reparse info ioctl")); | 2637 | "get reparse info ioctl"); |
2633 | } | 2638 | } |
2634 | symlinkinfo[buflen] = 0; /* just in case so the caller | 2639 | symlinkinfo[buflen] = 0; /* just in case so the caller |
2635 | does not go off the end of the buffer */ | 2640 | does not go off the end of the buffer */ |
2636 | cFYI(1, ("readlink result - %s", symlinkinfo)); | 2641 | cFYI(1, "readlink result - %s", symlinkinfo); |
2637 | } | 2642 | } |
2638 | 2643 | ||
2639 | qreparse_out: | 2644 | qreparse_out: |
@@ -2656,7 +2661,7 @@ static void cifs_convert_ace(posix_acl_xattr_entry *ace, | |||
2656 | ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm); | 2661 | ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm); |
2657 | ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag); | 2662 | ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag); |
2658 | ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid)); | 2663 | ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid)); |
2659 | /* cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id)); */ | 2664 | /* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */ |
2660 | 2665 | ||
2661 | return; | 2666 | return; |
2662 | } | 2667 | } |
@@ -2682,8 +2687,8 @@ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, | |||
2682 | size += sizeof(struct cifs_posix_ace) * count; | 2687 | size += sizeof(struct cifs_posix_ace) * count; |
2683 | /* check if we would go beyond end of SMB */ | 2688 | /* check if we would go beyond end of SMB */ |
2684 | if (size_of_data_area < size) { | 2689 | if (size_of_data_area < size) { |
2685 | cFYI(1, ("bad CIFS POSIX ACL size %d vs. %d", | 2690 | cFYI(1, "bad CIFS POSIX ACL size %d vs. %d", |
2686 | size_of_data_area, size)); | 2691 | size_of_data_area, size); |
2687 | return -EINVAL; | 2692 | return -EINVAL; |
2688 | } | 2693 | } |
2689 | } else if (acl_type & ACL_TYPE_DEFAULT) { | 2694 | } else if (acl_type & ACL_TYPE_DEFAULT) { |
@@ -2730,7 +2735,7 @@ static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace, | |||
2730 | cifs_ace->cifs_uid = cpu_to_le64(-1); | 2735 | cifs_ace->cifs_uid = cpu_to_le64(-1); |
2731 | } else | 2736 | } else |
2732 | cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id)); | 2737 | cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id)); |
2733 | /*cFYI(1,("perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id));*/ | 2738 | /*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/ |
2734 | return rc; | 2739 | return rc; |
2735 | } | 2740 | } |
2736 | 2741 | ||
@@ -2748,12 +2753,12 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, | |||
2748 | return 0; | 2753 | return 0; |
2749 | 2754 | ||
2750 | count = posix_acl_xattr_count((size_t)buflen); | 2755 | count = posix_acl_xattr_count((size_t)buflen); |
2751 | cFYI(1, ("setting acl with %d entries from buf of length %d and " | 2756 | cFYI(1, "setting acl with %d entries from buf of length %d and " |
2752 | "version of %d", | 2757 | "version of %d", |
2753 | count, buflen, le32_to_cpu(local_acl->a_version))); | 2758 | count, buflen, le32_to_cpu(local_acl->a_version)); |
2754 | if (le32_to_cpu(local_acl->a_version) != 2) { | 2759 | if (le32_to_cpu(local_acl->a_version) != 2) { |
2755 | cFYI(1, ("unknown POSIX ACL version %d", | 2760 | cFYI(1, "unknown POSIX ACL version %d", |
2756 | le32_to_cpu(local_acl->a_version))); | 2761 | le32_to_cpu(local_acl->a_version)); |
2757 | return 0; | 2762 | return 0; |
2758 | } | 2763 | } |
2759 | cifs_acl->version = cpu_to_le16(1); | 2764 | cifs_acl->version = cpu_to_le16(1); |
@@ -2762,7 +2767,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, | |||
2762 | else if (acl_type == ACL_TYPE_DEFAULT) | 2767 | else if (acl_type == ACL_TYPE_DEFAULT) |
2763 | cifs_acl->default_entry_count = cpu_to_le16(count); | 2768 | cifs_acl->default_entry_count = cpu_to_le16(count); |
2764 | else { | 2769 | else { |
2765 | cFYI(1, ("unknown ACL type %d", acl_type)); | 2770 | cFYI(1, "unknown ACL type %d", acl_type); |
2766 | return 0; | 2771 | return 0; |
2767 | } | 2772 | } |
2768 | for (i = 0; i < count; i++) { | 2773 | for (i = 0; i < count; i++) { |
@@ -2795,7 +2800,7 @@ CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, | |||
2795 | int name_len; | 2800 | int name_len; |
2796 | __u16 params, byte_count; | 2801 | __u16 params, byte_count; |
2797 | 2802 | ||
2798 | cFYI(1, ("In GetPosixACL (Unix) for path %s", searchName)); | 2803 | cFYI(1, "In GetPosixACL (Unix) for path %s", searchName); |
2799 | 2804 | ||
2800 | queryAclRetry: | 2805 | queryAclRetry: |
2801 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 2806 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
@@ -2847,7 +2852,7 @@ queryAclRetry: | |||
2847 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2852 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2848 | cifs_stats_inc(&tcon->num_acl_get); | 2853 | cifs_stats_inc(&tcon->num_acl_get); |
2849 | if (rc) { | 2854 | if (rc) { |
2850 | cFYI(1, ("Send error in Query POSIX ACL = %d", rc)); | 2855 | cFYI(1, "Send error in Query POSIX ACL = %d", rc); |
2851 | } else { | 2856 | } else { |
2852 | /* decode response */ | 2857 | /* decode response */ |
2853 | 2858 | ||
@@ -2884,7 +2889,7 @@ CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon, | |||
2884 | int bytes_returned = 0; | 2889 | int bytes_returned = 0; |
2885 | __u16 params, byte_count, data_count, param_offset, offset; | 2890 | __u16 params, byte_count, data_count, param_offset, offset; |
2886 | 2891 | ||
2887 | cFYI(1, ("In SetPosixACL (Unix) for path %s", fileName)); | 2892 | cFYI(1, "In SetPosixACL (Unix) for path %s", fileName); |
2888 | setAclRetry: | 2893 | setAclRetry: |
2889 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 2894 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
2890 | (void **) &pSMBr); | 2895 | (void **) &pSMBr); |
@@ -2939,7 +2944,7 @@ setAclRetry: | |||
2939 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2944 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2940 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2945 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2941 | if (rc) | 2946 | if (rc) |
2942 | cFYI(1, ("Set POSIX ACL returned %d", rc)); | 2947 | cFYI(1, "Set POSIX ACL returned %d", rc); |
2943 | 2948 | ||
2944 | setACLerrorExit: | 2949 | setACLerrorExit: |
2945 | cifs_buf_release(pSMB); | 2950 | cifs_buf_release(pSMB); |
@@ -2959,7 +2964,7 @@ CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, | |||
2959 | int bytes_returned; | 2964 | int bytes_returned; |
2960 | __u16 params, byte_count; | 2965 | __u16 params, byte_count; |
2961 | 2966 | ||
2962 | cFYI(1, ("In GetExtAttr")); | 2967 | cFYI(1, "In GetExtAttr"); |
2963 | if (tcon == NULL) | 2968 | if (tcon == NULL) |
2964 | return -ENODEV; | 2969 | return -ENODEV; |
2965 | 2970 | ||
@@ -2998,7 +3003,7 @@ GetExtAttrRetry: | |||
2998 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3003 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2999 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3004 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3000 | if (rc) { | 3005 | if (rc) { |
3001 | cFYI(1, ("error %d in GetExtAttr", rc)); | 3006 | cFYI(1, "error %d in GetExtAttr", rc); |
3002 | } else { | 3007 | } else { |
3003 | /* decode response */ | 3008 | /* decode response */ |
3004 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3009 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
@@ -3013,7 +3018,7 @@ GetExtAttrRetry: | |||
3013 | struct file_chattr_info *pfinfo; | 3018 | struct file_chattr_info *pfinfo; |
3014 | /* BB Do we need a cast or hash here ? */ | 3019 | /* BB Do we need a cast or hash here ? */ |
3015 | if (count != 16) { | 3020 | if (count != 16) { |
3016 | cFYI(1, ("Illegal size ret in GetExtAttr")); | 3021 | cFYI(1, "Illegal size ret in GetExtAttr"); |
3017 | rc = -EIO; | 3022 | rc = -EIO; |
3018 | goto GetExtAttrOut; | 3023 | goto GetExtAttrOut; |
3019 | } | 3024 | } |
@@ -3043,7 +3048,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3043 | QUERY_SEC_DESC_REQ *pSMB; | 3048 | QUERY_SEC_DESC_REQ *pSMB; |
3044 | struct kvec iov[1]; | 3049 | struct kvec iov[1]; |
3045 | 3050 | ||
3046 | cFYI(1, ("GetCifsACL")); | 3051 | cFYI(1, "GetCifsACL"); |
3047 | 3052 | ||
3048 | *pbuflen = 0; | 3053 | *pbuflen = 0; |
3049 | *acl_inf = NULL; | 3054 | *acl_inf = NULL; |
@@ -3068,7 +3073,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3068 | CIFS_STD_OP); | 3073 | CIFS_STD_OP); |
3069 | cifs_stats_inc(&tcon->num_acl_get); | 3074 | cifs_stats_inc(&tcon->num_acl_get); |
3070 | if (rc) { | 3075 | if (rc) { |
3071 | cFYI(1, ("Send error in QuerySecDesc = %d", rc)); | 3076 | cFYI(1, "Send error in QuerySecDesc = %d", rc); |
3072 | } else { /* decode response */ | 3077 | } else { /* decode response */ |
3073 | __le32 *parm; | 3078 | __le32 *parm; |
3074 | __u32 parm_len; | 3079 | __u32 parm_len; |
@@ -3083,7 +3088,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3083 | goto qsec_out; | 3088 | goto qsec_out; |
3084 | pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; | 3089 | pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; |
3085 | 3090 | ||
3086 | cFYI(1, ("smb %p parm %p data %p", pSMBr, parm, *acl_inf)); | 3091 | cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf); |
3087 | 3092 | ||
3088 | if (le32_to_cpu(pSMBr->ParameterCount) != 4) { | 3093 | if (le32_to_cpu(pSMBr->ParameterCount) != 4) { |
3089 | rc = -EIO; /* bad smb */ | 3094 | rc = -EIO; /* bad smb */ |
@@ -3095,8 +3100,8 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3095 | 3100 | ||
3096 | acl_len = le32_to_cpu(*parm); | 3101 | acl_len = le32_to_cpu(*parm); |
3097 | if (acl_len != *pbuflen) { | 3102 | if (acl_len != *pbuflen) { |
3098 | cERROR(1, ("acl length %d does not match %d", | 3103 | cERROR(1, "acl length %d does not match %d", |
3099 | acl_len, *pbuflen)); | 3104 | acl_len, *pbuflen); |
3100 | if (*pbuflen > acl_len) | 3105 | if (*pbuflen > acl_len) |
3101 | *pbuflen = acl_len; | 3106 | *pbuflen = acl_len; |
3102 | } | 3107 | } |
@@ -3105,7 +3110,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3105 | header followed by the smallest SID */ | 3110 | header followed by the smallest SID */ |
3106 | if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || | 3111 | if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || |
3107 | (*pbuflen >= 64 * 1024)) { | 3112 | (*pbuflen >= 64 * 1024)) { |
3108 | cERROR(1, ("bad acl length %d", *pbuflen)); | 3113 | cERROR(1, "bad acl length %d", *pbuflen); |
3109 | rc = -EINVAL; | 3114 | rc = -EINVAL; |
3110 | *pbuflen = 0; | 3115 | *pbuflen = 0; |
3111 | } else { | 3116 | } else { |
@@ -3179,9 +3184,9 @@ setCifsAclRetry: | |||
3179 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3184 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3180 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3185 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3181 | 3186 | ||
3182 | cFYI(1, ("SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc)); | 3187 | cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc); |
3183 | if (rc) | 3188 | if (rc) |
3184 | cFYI(1, ("Set CIFS ACL returned %d", rc)); | 3189 | cFYI(1, "Set CIFS ACL returned %d", rc); |
3185 | cifs_buf_release(pSMB); | 3190 | cifs_buf_release(pSMB); |
3186 | 3191 | ||
3187 | if (rc == -EAGAIN) | 3192 | if (rc == -EAGAIN) |
@@ -3205,7 +3210,7 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, | |||
3205 | int bytes_returned; | 3210 | int bytes_returned; |
3206 | int name_len; | 3211 | int name_len; |
3207 | 3212 | ||
3208 | cFYI(1, ("In SMBQPath path %s", searchName)); | 3213 | cFYI(1, "In SMBQPath path %s", searchName); |
3209 | QInfRetry: | 3214 | QInfRetry: |
3210 | rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, | 3215 | rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, |
3211 | (void **) &pSMBr); | 3216 | (void **) &pSMBr); |
@@ -3231,7 +3236,7 @@ QInfRetry: | |||
3231 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3236 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3232 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3237 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3233 | if (rc) { | 3238 | if (rc) { |
3234 | cFYI(1, ("Send error in QueryInfo = %d", rc)); | 3239 | cFYI(1, "Send error in QueryInfo = %d", rc); |
3235 | } else if (pFinfo) { | 3240 | } else if (pFinfo) { |
3236 | struct timespec ts; | 3241 | struct timespec ts; |
3237 | __u32 time = le32_to_cpu(pSMBr->last_write_time); | 3242 | __u32 time = le32_to_cpu(pSMBr->last_write_time); |
@@ -3305,7 +3310,7 @@ QFileInfoRetry: | |||
3305 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3310 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3306 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3311 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3307 | if (rc) { | 3312 | if (rc) { |
3308 | cFYI(1, ("Send error in QPathInfo = %d", rc)); | 3313 | cFYI(1, "Send error in QPathInfo = %d", rc); |
3309 | } else { /* decode response */ | 3314 | } else { /* decode response */ |
3310 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3315 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
3311 | 3316 | ||
@@ -3343,7 +3348,7 @@ CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, | |||
3343 | int name_len; | 3348 | int name_len; |
3344 | __u16 params, byte_count; | 3349 | __u16 params, byte_count; |
3345 | 3350 | ||
3346 | /* cFYI(1, ("In QPathInfo path %s", searchName)); */ | 3351 | /* cFYI(1, "In QPathInfo path %s", searchName); */ |
3347 | QPathInfoRetry: | 3352 | QPathInfoRetry: |
3348 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 3353 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
3349 | (void **) &pSMBr); | 3354 | (void **) &pSMBr); |
@@ -3393,7 +3398,7 @@ QPathInfoRetry: | |||
3393 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3398 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3394 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3399 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3395 | if (rc) { | 3400 | if (rc) { |
3396 | cFYI(1, ("Send error in QPathInfo = %d", rc)); | 3401 | cFYI(1, "Send error in QPathInfo = %d", rc); |
3397 | } else { /* decode response */ | 3402 | } else { /* decode response */ |
3398 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3403 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
3399 | 3404 | ||
@@ -3473,14 +3478,14 @@ UnixQFileInfoRetry: | |||
3473 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3478 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3474 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3479 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3475 | if (rc) { | 3480 | if (rc) { |
3476 | cFYI(1, ("Send error in QPathInfo = %d", rc)); | 3481 | cFYI(1, "Send error in QPathInfo = %d", rc); |
3477 | } else { /* decode response */ | 3482 | } else { /* decode response */ |
3478 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3483 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
3479 | 3484 | ||
3480 | if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) { | 3485 | if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) { |
3481 | cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n" | 3486 | cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" |
3482 | "Unix Extensions can be disabled on mount " | 3487 | "Unix Extensions can be disabled on mount " |
3483 | "by specifying the nosfu mount option.")); | 3488 | "by specifying the nosfu mount option."); |
3484 | rc = -EIO; /* bad smb */ | 3489 | rc = -EIO; /* bad smb */ |
3485 | } else { | 3490 | } else { |
3486 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); | 3491 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); |
@@ -3512,7 +3517,7 @@ CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, | |||
3512 | int name_len; | 3517 | int name_len; |
3513 | __u16 params, byte_count; | 3518 | __u16 params, byte_count; |
3514 | 3519 | ||
3515 | cFYI(1, ("In QPathInfo (Unix) the path %s", searchName)); | 3520 | cFYI(1, "In QPathInfo (Unix) the path %s", searchName); |
3516 | UnixQPathInfoRetry: | 3521 | UnixQPathInfoRetry: |
3517 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 3522 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
3518 | (void **) &pSMBr); | 3523 | (void **) &pSMBr); |
@@ -3559,14 +3564,14 @@ UnixQPathInfoRetry: | |||
3559 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3564 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3560 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3565 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3561 | if (rc) { | 3566 | if (rc) { |
3562 | cFYI(1, ("Send error in QPathInfo = %d", rc)); | 3567 | cFYI(1, "Send error in QPathInfo = %d", rc); |
3563 | } else { /* decode response */ | 3568 | } else { /* decode response */ |
3564 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3569 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
3565 | 3570 | ||
3566 | if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) { | 3571 | if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) { |
3567 | cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n" | 3572 | cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" |
3568 | "Unix Extensions can be disabled on mount " | 3573 | "Unix Extensions can be disabled on mount " |
3569 | "by specifying the nosfu mount option.")); | 3574 | "by specifying the nosfu mount option."); |
3570 | rc = -EIO; /* bad smb */ | 3575 | rc = -EIO; /* bad smb */ |
3571 | } else { | 3576 | } else { |
3572 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); | 3577 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); |
@@ -3600,7 +3605,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, | |||
3600 | int name_len; | 3605 | int name_len; |
3601 | __u16 params, byte_count; | 3606 | __u16 params, byte_count; |
3602 | 3607 | ||
3603 | cFYI(1, ("In FindFirst for %s", searchName)); | 3608 | cFYI(1, "In FindFirst for %s", searchName); |
3604 | 3609 | ||
3605 | findFirstRetry: | 3610 | findFirstRetry: |
3606 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 3611 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
@@ -3677,7 +3682,7 @@ findFirstRetry: | |||
3677 | if (rc) {/* BB add logic to retry regular search if Unix search | 3682 | if (rc) {/* BB add logic to retry regular search if Unix search |
3678 | rejected unexpectedly by server */ | 3683 | rejected unexpectedly by server */ |
3679 | /* BB Add code to handle unsupported level rc */ | 3684 | /* BB Add code to handle unsupported level rc */ |
3680 | cFYI(1, ("Error in FindFirst = %d", rc)); | 3685 | cFYI(1, "Error in FindFirst = %d", rc); |
3681 | 3686 | ||
3682 | cifs_buf_release(pSMB); | 3687 | cifs_buf_release(pSMB); |
3683 | 3688 | ||
@@ -3716,7 +3721,7 @@ findFirstRetry: | |||
3716 | lnoff = le16_to_cpu(parms->LastNameOffset); | 3721 | lnoff = le16_to_cpu(parms->LastNameOffset); |
3717 | if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < | 3722 | if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < |
3718 | lnoff) { | 3723 | lnoff) { |
3719 | cERROR(1, ("ignoring corrupt resume name")); | 3724 | cERROR(1, "ignoring corrupt resume name"); |
3720 | psrch_inf->last_entry = NULL; | 3725 | psrch_inf->last_entry = NULL; |
3721 | return rc; | 3726 | return rc; |
3722 | } | 3727 | } |
@@ -3744,7 +3749,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | |||
3744 | int bytes_returned, name_len; | 3749 | int bytes_returned, name_len; |
3745 | __u16 params, byte_count; | 3750 | __u16 params, byte_count; |
3746 | 3751 | ||
3747 | cFYI(1, ("In FindNext")); | 3752 | cFYI(1, "In FindNext"); |
3748 | 3753 | ||
3749 | if (psrch_inf->endOfSearch) | 3754 | if (psrch_inf->endOfSearch) |
3750 | return -ENOENT; | 3755 | return -ENOENT; |
@@ -3808,7 +3813,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | |||
3808 | cifs_buf_release(pSMB); | 3813 | cifs_buf_release(pSMB); |
3809 | rc = 0; /* search probably was closed at end of search*/ | 3814 | rc = 0; /* search probably was closed at end of search*/ |
3810 | } else | 3815 | } else |
3811 | cFYI(1, ("FindNext returned = %d", rc)); | 3816 | cFYI(1, "FindNext returned = %d", rc); |
3812 | } else { /* decode response */ | 3817 | } else { /* decode response */ |
3813 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3818 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
3814 | 3819 | ||
@@ -3844,15 +3849,15 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | |||
3844 | lnoff = le16_to_cpu(parms->LastNameOffset); | 3849 | lnoff = le16_to_cpu(parms->LastNameOffset); |
3845 | if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < | 3850 | if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < |
3846 | lnoff) { | 3851 | lnoff) { |
3847 | cERROR(1, ("ignoring corrupt resume name")); | 3852 | cERROR(1, "ignoring corrupt resume name"); |
3848 | psrch_inf->last_entry = NULL; | 3853 | psrch_inf->last_entry = NULL; |
3849 | return rc; | 3854 | return rc; |
3850 | } else | 3855 | } else |
3851 | psrch_inf->last_entry = | 3856 | psrch_inf->last_entry = |
3852 | psrch_inf->srch_entries_start + lnoff; | 3857 | psrch_inf->srch_entries_start + lnoff; |
3853 | 3858 | ||
3854 | /* cFYI(1,("fnxt2 entries in buf %d index_of_last %d", | 3859 | /* cFYI(1, "fnxt2 entries in buf %d index_of_last %d", |
3855 | psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry)); */ | 3860 | psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */ |
3856 | 3861 | ||
3857 | /* BB fixme add unlock here */ | 3862 | /* BB fixme add unlock here */ |
3858 | } | 3863 | } |
@@ -3877,7 +3882,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | |||
3877 | int rc = 0; | 3882 | int rc = 0; |
3878 | FINDCLOSE_REQ *pSMB = NULL; | 3883 | FINDCLOSE_REQ *pSMB = NULL; |
3879 | 3884 | ||
3880 | cFYI(1, ("In CIFSSMBFindClose")); | 3885 | cFYI(1, "In CIFSSMBFindClose"); |
3881 | rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); | 3886 | rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); |
3882 | 3887 | ||
3883 | /* no sense returning error if session restarted | 3888 | /* no sense returning error if session restarted |
@@ -3891,7 +3896,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | |||
3891 | pSMB->ByteCount = 0; | 3896 | pSMB->ByteCount = 0; |
3892 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 3897 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
3893 | if (rc) | 3898 | if (rc) |
3894 | cERROR(1, ("Send error in FindClose = %d", rc)); | 3899 | cERROR(1, "Send error in FindClose = %d", rc); |
3895 | 3900 | ||
3896 | cifs_stats_inc(&tcon->num_fclose); | 3901 | cifs_stats_inc(&tcon->num_fclose); |
3897 | 3902 | ||
@@ -3914,7 +3919,7 @@ CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, | |||
3914 | int name_len, bytes_returned; | 3919 | int name_len, bytes_returned; |
3915 | __u16 params, byte_count; | 3920 | __u16 params, byte_count; |
3916 | 3921 | ||
3917 | cFYI(1, ("In GetSrvInodeNum for %s", searchName)); | 3922 | cFYI(1, "In GetSrvInodeNum for %s", searchName); |
3918 | if (tcon == NULL) | 3923 | if (tcon == NULL) |
3919 | return -ENODEV; | 3924 | return -ENODEV; |
3920 | 3925 | ||
@@ -3964,7 +3969,7 @@ GetInodeNumberRetry: | |||
3964 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 3969 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
3965 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3970 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3966 | if (rc) { | 3971 | if (rc) { |
3967 | cFYI(1, ("error %d in QueryInternalInfo", rc)); | 3972 | cFYI(1, "error %d in QueryInternalInfo", rc); |
3968 | } else { | 3973 | } else { |
3969 | /* decode response */ | 3974 | /* decode response */ |
3970 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 3975 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
@@ -3979,7 +3984,7 @@ GetInodeNumberRetry: | |||
3979 | struct file_internal_info *pfinfo; | 3984 | struct file_internal_info *pfinfo; |
3980 | /* BB Do we need a cast or hash here ? */ | 3985 | /* BB Do we need a cast or hash here ? */ |
3981 | if (count < 8) { | 3986 | if (count < 8) { |
3982 | cFYI(1, ("Illegal size ret in QryIntrnlInf")); | 3987 | cFYI(1, "Illegal size ret in QryIntrnlInf"); |
3983 | rc = -EIO; | 3988 | rc = -EIO; |
3984 | goto GetInodeNumOut; | 3989 | goto GetInodeNumOut; |
3985 | } | 3990 | } |
@@ -4020,16 +4025,16 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, | |||
4020 | *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); | 4025 | *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); |
4021 | 4026 | ||
4022 | if (*num_of_nodes < 1) { | 4027 | if (*num_of_nodes < 1) { |
4023 | cERROR(1, ("num_referrals: must be at least > 0," | 4028 | cERROR(1, "num_referrals: must be at least > 0," |
4024 | "but we get num_referrals = %d\n", *num_of_nodes)); | 4029 | "but we get num_referrals = %d\n", *num_of_nodes); |
4025 | rc = -EINVAL; | 4030 | rc = -EINVAL; |
4026 | goto parse_DFS_referrals_exit; | 4031 | goto parse_DFS_referrals_exit; |
4027 | } | 4032 | } |
4028 | 4033 | ||
4029 | ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); | 4034 | ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); |
4030 | if (ref->VersionNumber != cpu_to_le16(3)) { | 4035 | if (ref->VersionNumber != cpu_to_le16(3)) { |
4031 | cERROR(1, ("Referrals of V%d version are not supported," | 4036 | cERROR(1, "Referrals of V%d version are not supported," |
4032 | "should be V3", le16_to_cpu(ref->VersionNumber))); | 4037 | "should be V3", le16_to_cpu(ref->VersionNumber)); |
4033 | rc = -EINVAL; | 4038 | rc = -EINVAL; |
4034 | goto parse_DFS_referrals_exit; | 4039 | goto parse_DFS_referrals_exit; |
4035 | } | 4040 | } |
@@ -4038,14 +4043,14 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, | |||
4038 | data_end = (char *)(&(pSMBr->PathConsumed)) + | 4043 | data_end = (char *)(&(pSMBr->PathConsumed)) + |
4039 | le16_to_cpu(pSMBr->t2.DataCount); | 4044 | le16_to_cpu(pSMBr->t2.DataCount); |
4040 | 4045 | ||
4041 | cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n", | 4046 | cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n", |
4042 | *num_of_nodes, | 4047 | *num_of_nodes, |
4043 | le32_to_cpu(pSMBr->DFSFlags))); | 4048 | le32_to_cpu(pSMBr->DFSFlags)); |
4044 | 4049 | ||
4045 | *target_nodes = kzalloc(sizeof(struct dfs_info3_param) * | 4050 | *target_nodes = kzalloc(sizeof(struct dfs_info3_param) * |
4046 | *num_of_nodes, GFP_KERNEL); | 4051 | *num_of_nodes, GFP_KERNEL); |
4047 | if (*target_nodes == NULL) { | 4052 | if (*target_nodes == NULL) { |
4048 | cERROR(1, ("Failed to allocate buffer for target_nodes\n")); | 4053 | cERROR(1, "Failed to allocate buffer for target_nodes\n"); |
4049 | rc = -ENOMEM; | 4054 | rc = -ENOMEM; |
4050 | goto parse_DFS_referrals_exit; | 4055 | goto parse_DFS_referrals_exit; |
4051 | } | 4056 | } |
@@ -4121,7 +4126,7 @@ CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses, | |||
4121 | *num_of_nodes = 0; | 4126 | *num_of_nodes = 0; |
4122 | *target_nodes = NULL; | 4127 | *target_nodes = NULL; |
4123 | 4128 | ||
4124 | cFYI(1, ("In GetDFSRefer the path %s", searchName)); | 4129 | cFYI(1, "In GetDFSRefer the path %s", searchName); |
4125 | if (ses == NULL) | 4130 | if (ses == NULL) |
4126 | return -ENODEV; | 4131 | return -ENODEV; |
4127 | getDFSRetry: | 4132 | getDFSRetry: |
@@ -4188,7 +4193,7 @@ getDFSRetry: | |||
4188 | rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, | 4193 | rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, |
4189 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4194 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4190 | if (rc) { | 4195 | if (rc) { |
4191 | cFYI(1, ("Send error in GetDFSRefer = %d", rc)); | 4196 | cFYI(1, "Send error in GetDFSRefer = %d", rc); |
4192 | goto GetDFSRefExit; | 4197 | goto GetDFSRefExit; |
4193 | } | 4198 | } |
4194 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4199 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
@@ -4199,9 +4204,9 @@ getDFSRetry: | |||
4199 | goto GetDFSRefExit; | 4204 | goto GetDFSRefExit; |
4200 | } | 4205 | } |
4201 | 4206 | ||
4202 | cFYI(1, ("Decoding GetDFSRefer response BCC: %d Offset %d", | 4207 | cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d", |
4203 | pSMBr->ByteCount, | 4208 | pSMBr->ByteCount, |
4204 | le16_to_cpu(pSMBr->t2.DataOffset))); | 4209 | le16_to_cpu(pSMBr->t2.DataOffset)); |
4205 | 4210 | ||
4206 | /* parse returned result into more usable form */ | 4211 | /* parse returned result into more usable form */ |
4207 | rc = parse_DFS_referrals(pSMBr, num_of_nodes, | 4212 | rc = parse_DFS_referrals(pSMBr, num_of_nodes, |
@@ -4229,7 +4234,7 @@ SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) | |||
4229 | int bytes_returned = 0; | 4234 | int bytes_returned = 0; |
4230 | __u16 params, byte_count; | 4235 | __u16 params, byte_count; |
4231 | 4236 | ||
4232 | cFYI(1, ("OldQFSInfo")); | 4237 | cFYI(1, "OldQFSInfo"); |
4233 | oldQFSInfoRetry: | 4238 | oldQFSInfoRetry: |
4234 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4239 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4235 | (void **) &pSMBr); | 4240 | (void **) &pSMBr); |
@@ -4262,7 +4267,7 @@ oldQFSInfoRetry: | |||
4262 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4267 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4263 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4268 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4264 | if (rc) { | 4269 | if (rc) { |
4265 | cFYI(1, ("Send error in QFSInfo = %d", rc)); | 4270 | cFYI(1, "Send error in QFSInfo = %d", rc); |
4266 | } else { /* decode response */ | 4271 | } else { /* decode response */ |
4267 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4272 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4268 | 4273 | ||
@@ -4270,8 +4275,8 @@ oldQFSInfoRetry: | |||
4270 | rc = -EIO; /* bad smb */ | 4275 | rc = -EIO; /* bad smb */ |
4271 | else { | 4276 | else { |
4272 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); | 4277 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); |
4273 | cFYI(1, ("qfsinf resp BCC: %d Offset %d", | 4278 | cFYI(1, "qfsinf resp BCC: %d Offset %d", |
4274 | pSMBr->ByteCount, data_offset)); | 4279 | pSMBr->ByteCount, data_offset); |
4275 | 4280 | ||
4276 | response_data = (FILE_SYSTEM_ALLOC_INFO *) | 4281 | response_data = (FILE_SYSTEM_ALLOC_INFO *) |
4277 | (((char *) &pSMBr->hdr.Protocol) + data_offset); | 4282 | (((char *) &pSMBr->hdr.Protocol) + data_offset); |
@@ -4283,11 +4288,10 @@ oldQFSInfoRetry: | |||
4283 | le32_to_cpu(response_data->TotalAllocationUnits); | 4288 | le32_to_cpu(response_data->TotalAllocationUnits); |
4284 | FSData->f_bfree = FSData->f_bavail = | 4289 | FSData->f_bfree = FSData->f_bavail = |
4285 | le32_to_cpu(response_data->FreeAllocationUnits); | 4290 | le32_to_cpu(response_data->FreeAllocationUnits); |
4286 | cFYI(1, | 4291 | cFYI(1, "Blocks: %lld Free: %lld Block size %ld", |
4287 | ("Blocks: %lld Free: %lld Block size %ld", | 4292 | (unsigned long long)FSData->f_blocks, |
4288 | (unsigned long long)FSData->f_blocks, | 4293 | (unsigned long long)FSData->f_bfree, |
4289 | (unsigned long long)FSData->f_bfree, | 4294 | FSData->f_bsize); |
4290 | FSData->f_bsize)); | ||
4291 | } | 4295 | } |
4292 | } | 4296 | } |
4293 | cifs_buf_release(pSMB); | 4297 | cifs_buf_release(pSMB); |
@@ -4309,7 +4313,7 @@ CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) | |||
4309 | int bytes_returned = 0; | 4313 | int bytes_returned = 0; |
4310 | __u16 params, byte_count; | 4314 | __u16 params, byte_count; |
4311 | 4315 | ||
4312 | cFYI(1, ("In QFSInfo")); | 4316 | cFYI(1, "In QFSInfo"); |
4313 | QFSInfoRetry: | 4317 | QFSInfoRetry: |
4314 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4318 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4315 | (void **) &pSMBr); | 4319 | (void **) &pSMBr); |
@@ -4342,7 +4346,7 @@ QFSInfoRetry: | |||
4342 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4346 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4343 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4347 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4344 | if (rc) { | 4348 | if (rc) { |
4345 | cFYI(1, ("Send error in QFSInfo = %d", rc)); | 4349 | cFYI(1, "Send error in QFSInfo = %d", rc); |
4346 | } else { /* decode response */ | 4350 | } else { /* decode response */ |
4347 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4351 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4348 | 4352 | ||
@@ -4363,11 +4367,10 @@ QFSInfoRetry: | |||
4363 | le64_to_cpu(response_data->TotalAllocationUnits); | 4367 | le64_to_cpu(response_data->TotalAllocationUnits); |
4364 | FSData->f_bfree = FSData->f_bavail = | 4368 | FSData->f_bfree = FSData->f_bavail = |
4365 | le64_to_cpu(response_data->FreeAllocationUnits); | 4369 | le64_to_cpu(response_data->FreeAllocationUnits); |
4366 | cFYI(1, | 4370 | cFYI(1, "Blocks: %lld Free: %lld Block size %ld", |
4367 | ("Blocks: %lld Free: %lld Block size %ld", | 4371 | (unsigned long long)FSData->f_blocks, |
4368 | (unsigned long long)FSData->f_blocks, | 4372 | (unsigned long long)FSData->f_bfree, |
4369 | (unsigned long long)FSData->f_bfree, | 4373 | FSData->f_bsize); |
4370 | FSData->f_bsize)); | ||
4371 | } | 4374 | } |
4372 | } | 4375 | } |
4373 | cifs_buf_release(pSMB); | 4376 | cifs_buf_release(pSMB); |
@@ -4389,7 +4392,7 @@ CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon) | |||
4389 | int bytes_returned = 0; | 4392 | int bytes_returned = 0; |
4390 | __u16 params, byte_count; | 4393 | __u16 params, byte_count; |
4391 | 4394 | ||
4392 | cFYI(1, ("In QFSAttributeInfo")); | 4395 | cFYI(1, "In QFSAttributeInfo"); |
4393 | QFSAttributeRetry: | 4396 | QFSAttributeRetry: |
4394 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4397 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4395 | (void **) &pSMBr); | 4398 | (void **) &pSMBr); |
@@ -4423,7 +4426,7 @@ QFSAttributeRetry: | |||
4423 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4426 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4424 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4427 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4425 | if (rc) { | 4428 | if (rc) { |
4426 | cERROR(1, ("Send error in QFSAttributeInfo = %d", rc)); | 4429 | cERROR(1, "Send error in QFSAttributeInfo = %d", rc); |
4427 | } else { /* decode response */ | 4430 | } else { /* decode response */ |
4428 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4431 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4429 | 4432 | ||
@@ -4459,7 +4462,7 @@ CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon) | |||
4459 | int bytes_returned = 0; | 4462 | int bytes_returned = 0; |
4460 | __u16 params, byte_count; | 4463 | __u16 params, byte_count; |
4461 | 4464 | ||
4462 | cFYI(1, ("In QFSDeviceInfo")); | 4465 | cFYI(1, "In QFSDeviceInfo"); |
4463 | QFSDeviceRetry: | 4466 | QFSDeviceRetry: |
4464 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4467 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4465 | (void **) &pSMBr); | 4468 | (void **) &pSMBr); |
@@ -4494,7 +4497,7 @@ QFSDeviceRetry: | |||
4494 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4497 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4495 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4498 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4496 | if (rc) { | 4499 | if (rc) { |
4497 | cFYI(1, ("Send error in QFSDeviceInfo = %d", rc)); | 4500 | cFYI(1, "Send error in QFSDeviceInfo = %d", rc); |
4498 | } else { /* decode response */ | 4501 | } else { /* decode response */ |
4499 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4502 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4500 | 4503 | ||
@@ -4529,7 +4532,7 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon) | |||
4529 | int bytes_returned = 0; | 4532 | int bytes_returned = 0; |
4530 | __u16 params, byte_count; | 4533 | __u16 params, byte_count; |
4531 | 4534 | ||
4532 | cFYI(1, ("In QFSUnixInfo")); | 4535 | cFYI(1, "In QFSUnixInfo"); |
4533 | QFSUnixRetry: | 4536 | QFSUnixRetry: |
4534 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4537 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4535 | (void **) &pSMBr); | 4538 | (void **) &pSMBr); |
@@ -4563,7 +4566,7 @@ QFSUnixRetry: | |||
4563 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4566 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4564 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4567 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4565 | if (rc) { | 4568 | if (rc) { |
4566 | cERROR(1, ("Send error in QFSUnixInfo = %d", rc)); | 4569 | cERROR(1, "Send error in QFSUnixInfo = %d", rc); |
4567 | } else { /* decode response */ | 4570 | } else { /* decode response */ |
4568 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4571 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4569 | 4572 | ||
@@ -4598,7 +4601,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap) | |||
4598 | int bytes_returned = 0; | 4601 | int bytes_returned = 0; |
4599 | __u16 params, param_offset, offset, byte_count; | 4602 | __u16 params, param_offset, offset, byte_count; |
4600 | 4603 | ||
4601 | cFYI(1, ("In SETFSUnixInfo")); | 4604 | cFYI(1, "In SETFSUnixInfo"); |
4602 | SETFSUnixRetry: | 4605 | SETFSUnixRetry: |
4603 | /* BB switch to small buf init to save memory */ | 4606 | /* BB switch to small buf init to save memory */ |
4604 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4607 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
@@ -4646,7 +4649,7 @@ SETFSUnixRetry: | |||
4646 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4649 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4647 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4650 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4648 | if (rc) { | 4651 | if (rc) { |
4649 | cERROR(1, ("Send error in SETFSUnixInfo = %d", rc)); | 4652 | cERROR(1, "Send error in SETFSUnixInfo = %d", rc); |
4650 | } else { /* decode response */ | 4653 | } else { /* decode response */ |
4651 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4654 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4652 | if (rc) | 4655 | if (rc) |
@@ -4674,7 +4677,7 @@ CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, | |||
4674 | int bytes_returned = 0; | 4677 | int bytes_returned = 0; |
4675 | __u16 params, byte_count; | 4678 | __u16 params, byte_count; |
4676 | 4679 | ||
4677 | cFYI(1, ("In QFSPosixInfo")); | 4680 | cFYI(1, "In QFSPosixInfo"); |
4678 | QFSPosixRetry: | 4681 | QFSPosixRetry: |
4679 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4682 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4680 | (void **) &pSMBr); | 4683 | (void **) &pSMBr); |
@@ -4708,7 +4711,7 @@ QFSPosixRetry: | |||
4708 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4711 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4709 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4712 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4710 | if (rc) { | 4713 | if (rc) { |
4711 | cFYI(1, ("Send error in QFSUnixInfo = %d", rc)); | 4714 | cFYI(1, "Send error in QFSUnixInfo = %d", rc); |
4712 | } else { /* decode response */ | 4715 | } else { /* decode response */ |
4713 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4716 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4714 | 4717 | ||
@@ -4768,7 +4771,7 @@ CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, | |||
4768 | int bytes_returned = 0; | 4771 | int bytes_returned = 0; |
4769 | __u16 params, byte_count, data_count, param_offset, offset; | 4772 | __u16 params, byte_count, data_count, param_offset, offset; |
4770 | 4773 | ||
4771 | cFYI(1, ("In SetEOF")); | 4774 | cFYI(1, "In SetEOF"); |
4772 | SetEOFRetry: | 4775 | SetEOFRetry: |
4773 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 4776 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
4774 | (void **) &pSMBr); | 4777 | (void **) &pSMBr); |
@@ -4834,7 +4837,7 @@ SetEOFRetry: | |||
4834 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4837 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4835 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4838 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4836 | if (rc) | 4839 | if (rc) |
4837 | cFYI(1, ("SetPathInfo (file size) returned %d", rc)); | 4840 | cFYI(1, "SetPathInfo (file size) returned %d", rc); |
4838 | 4841 | ||
4839 | cifs_buf_release(pSMB); | 4842 | cifs_buf_release(pSMB); |
4840 | 4843 | ||
@@ -4854,8 +4857,8 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | |||
4854 | int rc = 0; | 4857 | int rc = 0; |
4855 | __u16 params, param_offset, offset, byte_count, count; | 4858 | __u16 params, param_offset, offset, byte_count, count; |
4856 | 4859 | ||
4857 | cFYI(1, ("SetFileSize (via SetFileInfo) %lld", | 4860 | cFYI(1, "SetFileSize (via SetFileInfo) %lld", |
4858 | (long long)size)); | 4861 | (long long)size); |
4859 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); | 4862 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); |
4860 | 4863 | ||
4861 | if (rc) | 4864 | if (rc) |
@@ -4914,9 +4917,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | |||
4914 | pSMB->ByteCount = cpu_to_le16(byte_count); | 4917 | pSMB->ByteCount = cpu_to_le16(byte_count); |
4915 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 4918 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
4916 | if (rc) { | 4919 | if (rc) { |
4917 | cFYI(1, | 4920 | cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc); |
4918 | ("Send error in SetFileInfo (SetFileSize) = %d", | ||
4919 | rc)); | ||
4920 | } | 4921 | } |
4921 | 4922 | ||
4922 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 4923 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
@@ -4940,7 +4941,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
4940 | int rc = 0; | 4941 | int rc = 0; |
4941 | __u16 params, param_offset, offset, byte_count, count; | 4942 | __u16 params, param_offset, offset, byte_count, count; |
4942 | 4943 | ||
4943 | cFYI(1, ("Set Times (via SetFileInfo)")); | 4944 | cFYI(1, "Set Times (via SetFileInfo)"); |
4944 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); | 4945 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); |
4945 | 4946 | ||
4946 | if (rc) | 4947 | if (rc) |
@@ -4985,7 +4986,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
4985 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); | 4986 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); |
4986 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 4987 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
4987 | if (rc) | 4988 | if (rc) |
4988 | cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc)); | 4989 | cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); |
4989 | 4990 | ||
4990 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 4991 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
4991 | since file handle passed in no longer valid */ | 4992 | since file handle passed in no longer valid */ |
@@ -5002,7 +5003,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, | |||
5002 | int rc = 0; | 5003 | int rc = 0; |
5003 | __u16 params, param_offset, offset, byte_count, count; | 5004 | __u16 params, param_offset, offset, byte_count, count; |
5004 | 5005 | ||
5005 | cFYI(1, ("Set File Disposition (via SetFileInfo)")); | 5006 | cFYI(1, "Set File Disposition (via SetFileInfo)"); |
5006 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); | 5007 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); |
5007 | 5008 | ||
5008 | if (rc) | 5009 | if (rc) |
@@ -5044,7 +5045,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, | |||
5044 | *data_offset = delete_file ? 1 : 0; | 5045 | *data_offset = delete_file ? 1 : 0; |
5045 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 5046 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
5046 | if (rc) | 5047 | if (rc) |
5047 | cFYI(1, ("Send error in SetFileDisposition = %d", rc)); | 5048 | cFYI(1, "Send error in SetFileDisposition = %d", rc); |
5048 | 5049 | ||
5049 | return rc; | 5050 | return rc; |
5050 | } | 5051 | } |
@@ -5062,7 +5063,7 @@ CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon, | |||
5062 | char *data_offset; | 5063 | char *data_offset; |
5063 | __u16 params, param_offset, offset, byte_count, count; | 5064 | __u16 params, param_offset, offset, byte_count, count; |
5064 | 5065 | ||
5065 | cFYI(1, ("In SetTimes")); | 5066 | cFYI(1, "In SetTimes"); |
5066 | 5067 | ||
5067 | SetTimesRetry: | 5068 | SetTimesRetry: |
5068 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 5069 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
@@ -5118,7 +5119,7 @@ SetTimesRetry: | |||
5118 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5119 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5119 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5120 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5120 | if (rc) | 5121 | if (rc) |
5121 | cFYI(1, ("SetPathInfo (times) returned %d", rc)); | 5122 | cFYI(1, "SetPathInfo (times) returned %d", rc); |
5122 | 5123 | ||
5123 | cifs_buf_release(pSMB); | 5124 | cifs_buf_release(pSMB); |
5124 | 5125 | ||
@@ -5143,7 +5144,7 @@ CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName, | |||
5143 | int bytes_returned; | 5144 | int bytes_returned; |
5144 | int name_len; | 5145 | int name_len; |
5145 | 5146 | ||
5146 | cFYI(1, ("In SetAttrLegacy")); | 5147 | cFYI(1, "In SetAttrLegacy"); |
5147 | 5148 | ||
5148 | SetAttrLgcyRetry: | 5149 | SetAttrLgcyRetry: |
5149 | rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, | 5150 | rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, |
@@ -5169,7 +5170,7 @@ SetAttrLgcyRetry: | |||
5169 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5170 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5170 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5171 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5171 | if (rc) | 5172 | if (rc) |
5172 | cFYI(1, ("Error in LegacySetAttr = %d", rc)); | 5173 | cFYI(1, "Error in LegacySetAttr = %d", rc); |
5173 | 5174 | ||
5174 | cifs_buf_release(pSMB); | 5175 | cifs_buf_release(pSMB); |
5175 | 5176 | ||
@@ -5231,7 +5232,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
5231 | int rc = 0; | 5232 | int rc = 0; |
5232 | u16 params, param_offset, offset, byte_count, count; | 5233 | u16 params, param_offset, offset, byte_count, count; |
5233 | 5234 | ||
5234 | cFYI(1, ("Set Unix Info (via SetFileInfo)")); | 5235 | cFYI(1, "Set Unix Info (via SetFileInfo)"); |
5235 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); | 5236 | rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); |
5236 | 5237 | ||
5237 | if (rc) | 5238 | if (rc) |
@@ -5276,7 +5277,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
5276 | 5277 | ||
5277 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 5278 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
5278 | if (rc) | 5279 | if (rc) |
5279 | cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc)); | 5280 | cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); |
5280 | 5281 | ||
5281 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 5282 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
5282 | since file handle passed in no longer valid */ | 5283 | since file handle passed in no longer valid */ |
@@ -5297,7 +5298,7 @@ CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, | |||
5297 | FILE_UNIX_BASIC_INFO *data_offset; | 5298 | FILE_UNIX_BASIC_INFO *data_offset; |
5298 | __u16 params, param_offset, offset, count, byte_count; | 5299 | __u16 params, param_offset, offset, count, byte_count; |
5299 | 5300 | ||
5300 | cFYI(1, ("In SetUID/GID/Mode")); | 5301 | cFYI(1, "In SetUID/GID/Mode"); |
5301 | setPermsRetry: | 5302 | setPermsRetry: |
5302 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 5303 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
5303 | (void **) &pSMBr); | 5304 | (void **) &pSMBr); |
@@ -5353,7 +5354,7 @@ setPermsRetry: | |||
5353 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5354 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5354 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5355 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5355 | if (rc) | 5356 | if (rc) |
5356 | cFYI(1, ("SetPathInfo (perms) returned %d", rc)); | 5357 | cFYI(1, "SetPathInfo (perms) returned %d", rc); |
5357 | 5358 | ||
5358 | cifs_buf_release(pSMB); | 5359 | cifs_buf_release(pSMB); |
5359 | if (rc == -EAGAIN) | 5360 | if (rc == -EAGAIN) |
@@ -5372,7 +5373,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, | |||
5372 | struct dir_notify_req *dnotify_req; | 5373 | struct dir_notify_req *dnotify_req; |
5373 | int bytes_returned; | 5374 | int bytes_returned; |
5374 | 5375 | ||
5375 | cFYI(1, ("In CIFSSMBNotify for file handle %d", (int)netfid)); | 5376 | cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid); |
5376 | rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, | 5377 | rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, |
5377 | (void **) &pSMBr); | 5378 | (void **) &pSMBr); |
5378 | if (rc) | 5379 | if (rc) |
@@ -5406,7 +5407,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, | |||
5406 | (struct smb_hdr *)pSMBr, &bytes_returned, | 5407 | (struct smb_hdr *)pSMBr, &bytes_returned, |
5407 | CIFS_ASYNC_OP); | 5408 | CIFS_ASYNC_OP); |
5408 | if (rc) { | 5409 | if (rc) { |
5409 | cFYI(1, ("Error in Notify = %d", rc)); | 5410 | cFYI(1, "Error in Notify = %d", rc); |
5410 | } else { | 5411 | } else { |
5411 | /* Add file to outstanding requests */ | 5412 | /* Add file to outstanding requests */ |
5412 | /* BB change to kmem cache alloc */ | 5413 | /* BB change to kmem cache alloc */ |
@@ -5462,7 +5463,7 @@ CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, | |||
5462 | char *end_of_smb; | 5463 | char *end_of_smb; |
5463 | __u16 params, byte_count, data_offset; | 5464 | __u16 params, byte_count, data_offset; |
5464 | 5465 | ||
5465 | cFYI(1, ("In Query All EAs path %s", searchName)); | 5466 | cFYI(1, "In Query All EAs path %s", searchName); |
5466 | QAllEAsRetry: | 5467 | QAllEAsRetry: |
5467 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 5468 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
5468 | (void **) &pSMBr); | 5469 | (void **) &pSMBr); |
@@ -5509,7 +5510,7 @@ QAllEAsRetry: | |||
5509 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5510 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5510 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5511 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5511 | if (rc) { | 5512 | if (rc) { |
5512 | cFYI(1, ("Send error in QueryAllEAs = %d", rc)); | 5513 | cFYI(1, "Send error in QueryAllEAs = %d", rc); |
5513 | goto QAllEAsOut; | 5514 | goto QAllEAsOut; |
5514 | } | 5515 | } |
5515 | 5516 | ||
@@ -5537,16 +5538,16 @@ QAllEAsRetry: | |||
5537 | (((char *) &pSMBr->hdr.Protocol) + data_offset); | 5538 | (((char *) &pSMBr->hdr.Protocol) + data_offset); |
5538 | 5539 | ||
5539 | list_len = le32_to_cpu(ea_response_data->list_len); | 5540 | list_len = le32_to_cpu(ea_response_data->list_len); |
5540 | cFYI(1, ("ea length %d", list_len)); | 5541 | cFYI(1, "ea length %d", list_len); |
5541 | if (list_len <= 8) { | 5542 | if (list_len <= 8) { |
5542 | cFYI(1, ("empty EA list returned from server")); | 5543 | cFYI(1, "empty EA list returned from server"); |
5543 | goto QAllEAsOut; | 5544 | goto QAllEAsOut; |
5544 | } | 5545 | } |
5545 | 5546 | ||
5546 | /* make sure list_len doesn't go past end of SMB */ | 5547 | /* make sure list_len doesn't go past end of SMB */ |
5547 | end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr); | 5548 | end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr); |
5548 | if ((char *)ea_response_data + list_len > end_of_smb) { | 5549 | if ((char *)ea_response_data + list_len > end_of_smb) { |
5549 | cFYI(1, ("EA list appears to go beyond SMB")); | 5550 | cFYI(1, "EA list appears to go beyond SMB"); |
5550 | rc = -EIO; | 5551 | rc = -EIO; |
5551 | goto QAllEAsOut; | 5552 | goto QAllEAsOut; |
5552 | } | 5553 | } |
@@ -5563,7 +5564,7 @@ QAllEAsRetry: | |||
5563 | temp_ptr += 4; | 5564 | temp_ptr += 4; |
5564 | /* make sure we can read name_len and value_len */ | 5565 | /* make sure we can read name_len and value_len */ |
5565 | if (list_len < 0) { | 5566 | if (list_len < 0) { |
5566 | cFYI(1, ("EA entry goes beyond length of list")); | 5567 | cFYI(1, "EA entry goes beyond length of list"); |
5567 | rc = -EIO; | 5568 | rc = -EIO; |
5568 | goto QAllEAsOut; | 5569 | goto QAllEAsOut; |
5569 | } | 5570 | } |
@@ -5572,7 +5573,7 @@ QAllEAsRetry: | |||
5572 | value_len = le16_to_cpu(temp_fea->value_len); | 5573 | value_len = le16_to_cpu(temp_fea->value_len); |
5573 | list_len -= name_len + 1 + value_len; | 5574 | list_len -= name_len + 1 + value_len; |
5574 | if (list_len < 0) { | 5575 | if (list_len < 0) { |
5575 | cFYI(1, ("EA entry goes beyond length of list")); | 5576 | cFYI(1, "EA entry goes beyond length of list"); |
5576 | rc = -EIO; | 5577 | rc = -EIO; |
5577 | goto QAllEAsOut; | 5578 | goto QAllEAsOut; |
5578 | } | 5579 | } |
@@ -5639,7 +5640,7 @@ CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName, | |||
5639 | int bytes_returned = 0; | 5640 | int bytes_returned = 0; |
5640 | __u16 params, param_offset, byte_count, offset, count; | 5641 | __u16 params, param_offset, byte_count, offset, count; |
5641 | 5642 | ||
5642 | cFYI(1, ("In SetEA")); | 5643 | cFYI(1, "In SetEA"); |
5643 | SetEARetry: | 5644 | SetEARetry: |
5644 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | 5645 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, |
5645 | (void **) &pSMBr); | 5646 | (void **) &pSMBr); |
@@ -5721,7 +5722,7 @@ SetEARetry: | |||
5721 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5722 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5722 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5723 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5723 | if (rc) | 5724 | if (rc) |
5724 | cFYI(1, ("SetPathInfo (EA) returned %d", rc)); | 5725 | cFYI(1, "SetPathInfo (EA) returned %d", rc); |
5725 | 5726 | ||
5726 | cifs_buf_release(pSMB); | 5727 | cifs_buf_release(pSMB); |
5727 | 5728 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d9566bf8f917..2208f06e4c45 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -102,6 +102,7 @@ struct smb_vol { | |||
102 | bool sockopt_tcp_nodelay:1; | 102 | bool sockopt_tcp_nodelay:1; |
103 | unsigned short int port; | 103 | unsigned short int port; |
104 | char *prepath; | 104 | char *prepath; |
105 | struct nls_table *local_nls; | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | static int ipv4_connect(struct TCP_Server_Info *server); | 108 | static int ipv4_connect(struct TCP_Server_Info *server); |
@@ -135,7 +136,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
135 | spin_unlock(&GlobalMid_Lock); | 136 | spin_unlock(&GlobalMid_Lock); |
136 | server->maxBuf = 0; | 137 | server->maxBuf = 0; |
137 | 138 | ||
138 | cFYI(1, ("Reconnecting tcp session")); | 139 | cFYI(1, "Reconnecting tcp session"); |
139 | 140 | ||
140 | /* before reconnecting the tcp session, mark the smb session (uid) | 141 | /* before reconnecting the tcp session, mark the smb session (uid) |
141 | and the tid bad so they are not used until reconnected */ | 142 | and the tid bad so they are not used until reconnected */ |
@@ -153,12 +154,12 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
153 | /* do not want to be sending data on a socket we are freeing */ | 154 | /* do not want to be sending data on a socket we are freeing */ |
154 | mutex_lock(&server->srv_mutex); | 155 | mutex_lock(&server->srv_mutex); |
155 | if (server->ssocket) { | 156 | if (server->ssocket) { |
156 | cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state, | 157 | cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, |
157 | server->ssocket->flags)); | 158 | server->ssocket->flags); |
158 | kernel_sock_shutdown(server->ssocket, SHUT_WR); | 159 | kernel_sock_shutdown(server->ssocket, SHUT_WR); |
159 | cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx", | 160 | cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx", |
160 | server->ssocket->state, | 161 | server->ssocket->state, |
161 | server->ssocket->flags)); | 162 | server->ssocket->flags); |
162 | sock_release(server->ssocket); | 163 | sock_release(server->ssocket); |
163 | server->ssocket = NULL; | 164 | server->ssocket = NULL; |
164 | } | 165 | } |
@@ -187,7 +188,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
187 | else | 188 | else |
188 | rc = ipv4_connect(server); | 189 | rc = ipv4_connect(server); |
189 | if (rc) { | 190 | if (rc) { |
190 | cFYI(1, ("reconnect error %d", rc)); | 191 | cFYI(1, "reconnect error %d", rc); |
191 | msleep(3000); | 192 | msleep(3000); |
192 | } else { | 193 | } else { |
193 | atomic_inc(&tcpSesReconnectCount); | 194 | atomic_inc(&tcpSesReconnectCount); |
@@ -223,7 +224,7 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) | |||
223 | /* check for plausible wct, bcc and t2 data and parm sizes */ | 224 | /* check for plausible wct, bcc and t2 data and parm sizes */ |
224 | /* check for parm and data offset going beyond end of smb */ | 225 | /* check for parm and data offset going beyond end of smb */ |
225 | if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ | 226 | if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ |
226 | cFYI(1, ("invalid transact2 word count")); | 227 | cFYI(1, "invalid transact2 word count"); |
227 | return -EINVAL; | 228 | return -EINVAL; |
228 | } | 229 | } |
229 | 230 | ||
@@ -237,15 +238,15 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) | |||
237 | if (remaining == 0) | 238 | if (remaining == 0) |
238 | return 0; | 239 | return 0; |
239 | else if (remaining < 0) { | 240 | else if (remaining < 0) { |
240 | cFYI(1, ("total data %d smaller than data in frame %d", | 241 | cFYI(1, "total data %d smaller than data in frame %d", |
241 | total_data_size, data_in_this_rsp)); | 242 | total_data_size, data_in_this_rsp); |
242 | return -EINVAL; | 243 | return -EINVAL; |
243 | } else { | 244 | } else { |
244 | cFYI(1, ("missing %d bytes from transact2, check next response", | 245 | cFYI(1, "missing %d bytes from transact2, check next response", |
245 | remaining)); | 246 | remaining); |
246 | if (total_data_size > maxBufSize) { | 247 | if (total_data_size > maxBufSize) { |
247 | cERROR(1, ("TotalDataSize %d is over maximum buffer %d", | 248 | cERROR(1, "TotalDataSize %d is over maximum buffer %d", |
248 | total_data_size, maxBufSize)); | 249 | total_data_size, maxBufSize); |
249 | return -EINVAL; | 250 | return -EINVAL; |
250 | } | 251 | } |
251 | return remaining; | 252 | return remaining; |
@@ -267,7 +268,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | |||
267 | total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); | 268 | total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); |
268 | 269 | ||
269 | if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) { | 270 | if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) { |
270 | cFYI(1, ("total data size of primary and secondary t2 differ")); | 271 | cFYI(1, "total data size of primary and secondary t2 differ"); |
271 | } | 272 | } |
272 | 273 | ||
273 | total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount); | 274 | total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount); |
@@ -282,7 +283,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | |||
282 | 283 | ||
283 | total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount); | 284 | total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount); |
284 | if (remaining < total_in_buf2) { | 285 | if (remaining < total_in_buf2) { |
285 | cFYI(1, ("transact2 2nd response contains too much data")); | 286 | cFYI(1, "transact2 2nd response contains too much data"); |
286 | } | 287 | } |
287 | 288 | ||
288 | /* find end of first SMB data area */ | 289 | /* find end of first SMB data area */ |
@@ -311,7 +312,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | |||
311 | pTargetSMB->smb_buf_length = byte_count; | 312 | pTargetSMB->smb_buf_length = byte_count; |
312 | 313 | ||
313 | if (remaining == total_in_buf2) { | 314 | if (remaining == total_in_buf2) { |
314 | cFYI(1, ("found the last secondary response")); | 315 | cFYI(1, "found the last secondary response"); |
315 | return 0; /* we are done */ | 316 | return 0; /* we are done */ |
316 | } else /* more responses to go */ | 317 | } else /* more responses to go */ |
317 | return 1; | 318 | return 1; |
@@ -339,7 +340,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
339 | int reconnect; | 340 | int reconnect; |
340 | 341 | ||
341 | current->flags |= PF_MEMALLOC; | 342 | current->flags |= PF_MEMALLOC; |
342 | cFYI(1, ("Demultiplex PID: %d", task_pid_nr(current))); | 343 | cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); |
343 | 344 | ||
344 | length = atomic_inc_return(&tcpSesAllocCount); | 345 | length = atomic_inc_return(&tcpSesAllocCount); |
345 | if (length > 1) | 346 | if (length > 1) |
@@ -353,7 +354,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
353 | if (bigbuf == NULL) { | 354 | if (bigbuf == NULL) { |
354 | bigbuf = cifs_buf_get(); | 355 | bigbuf = cifs_buf_get(); |
355 | if (!bigbuf) { | 356 | if (!bigbuf) { |
356 | cERROR(1, ("No memory for large SMB response")); | 357 | cERROR(1, "No memory for large SMB response"); |
357 | msleep(3000); | 358 | msleep(3000); |
358 | /* retry will check if exiting */ | 359 | /* retry will check if exiting */ |
359 | continue; | 360 | continue; |
@@ -366,7 +367,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
366 | if (smallbuf == NULL) { | 367 | if (smallbuf == NULL) { |
367 | smallbuf = cifs_small_buf_get(); | 368 | smallbuf = cifs_small_buf_get(); |
368 | if (!smallbuf) { | 369 | if (!smallbuf) { |
369 | cERROR(1, ("No memory for SMB response")); | 370 | cERROR(1, "No memory for SMB response"); |
370 | msleep(1000); | 371 | msleep(1000); |
371 | /* retry will check if exiting */ | 372 | /* retry will check if exiting */ |
372 | continue; | 373 | continue; |
@@ -391,9 +392,9 @@ incomplete_rcv: | |||
391 | if (server->tcpStatus == CifsExiting) { | 392 | if (server->tcpStatus == CifsExiting) { |
392 | break; | 393 | break; |
393 | } else if (server->tcpStatus == CifsNeedReconnect) { | 394 | } else if (server->tcpStatus == CifsNeedReconnect) { |
394 | cFYI(1, ("Reconnect after server stopped responding")); | 395 | cFYI(1, "Reconnect after server stopped responding"); |
395 | cifs_reconnect(server); | 396 | cifs_reconnect(server); |
396 | cFYI(1, ("call to reconnect done")); | 397 | cFYI(1, "call to reconnect done"); |
397 | csocket = server->ssocket; | 398 | csocket = server->ssocket; |
398 | continue; | 399 | continue; |
399 | } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { | 400 | } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { |
@@ -411,7 +412,7 @@ incomplete_rcv: | |||
411 | continue; | 412 | continue; |
412 | } else if (length <= 0) { | 413 | } else if (length <= 0) { |
413 | if (server->tcpStatus == CifsNew) { | 414 | if (server->tcpStatus == CifsNew) { |
414 | cFYI(1, ("tcp session abend after SMBnegprot")); | 415 | cFYI(1, "tcp session abend after SMBnegprot"); |
415 | /* some servers kill the TCP session rather than | 416 | /* some servers kill the TCP session rather than |
416 | returning an SMB negprot error, in which | 417 | returning an SMB negprot error, in which |
417 | case reconnecting here is not going to help, | 418 | case reconnecting here is not going to help, |
@@ -419,18 +420,18 @@ incomplete_rcv: | |||
419 | break; | 420 | break; |
420 | } | 421 | } |
421 | if (!try_to_freeze() && (length == -EINTR)) { | 422 | if (!try_to_freeze() && (length == -EINTR)) { |
422 | cFYI(1, ("cifsd thread killed")); | 423 | cFYI(1, "cifsd thread killed"); |
423 | break; | 424 | break; |
424 | } | 425 | } |
425 | cFYI(1, ("Reconnect after unexpected peek error %d", | 426 | cFYI(1, "Reconnect after unexpected peek error %d", |
426 | length)); | 427 | length); |
427 | cifs_reconnect(server); | 428 | cifs_reconnect(server); |
428 | csocket = server->ssocket; | 429 | csocket = server->ssocket; |
429 | wake_up(&server->response_q); | 430 | wake_up(&server->response_q); |
430 | continue; | 431 | continue; |
431 | } else if (length < pdu_length) { | 432 | } else if (length < pdu_length) { |
432 | cFYI(1, ("requested %d bytes but only got %d bytes", | 433 | cFYI(1, "requested %d bytes but only got %d bytes", |
433 | pdu_length, length)); | 434 | pdu_length, length); |
434 | pdu_length -= length; | 435 | pdu_length -= length; |
435 | msleep(1); | 436 | msleep(1); |
436 | goto incomplete_rcv; | 437 | goto incomplete_rcv; |
@@ -450,18 +451,18 @@ incomplete_rcv: | |||
450 | pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length); | 451 | pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length); |
451 | smb_buffer->smb_buf_length = pdu_length; | 452 | smb_buffer->smb_buf_length = pdu_length; |
452 | 453 | ||
453 | cFYI(1, ("rfc1002 length 0x%x", pdu_length+4)); | 454 | cFYI(1, "rfc1002 length 0x%x", pdu_length+4); |
454 | 455 | ||
455 | if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { | 456 | if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { |
456 | continue; | 457 | continue; |
457 | } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { | 458 | } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { |
458 | cFYI(1, ("Good RFC 1002 session rsp")); | 459 | cFYI(1, "Good RFC 1002 session rsp"); |
459 | continue; | 460 | continue; |
460 | } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { | 461 | } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { |
461 | /* we get this from Windows 98 instead of | 462 | /* we get this from Windows 98 instead of |
462 | an error on SMB negprot response */ | 463 | an error on SMB negprot response */ |
463 | cFYI(1, ("Negative RFC1002 Session Response Error 0x%x)", | 464 | cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", |
464 | pdu_length)); | 465 | pdu_length); |
465 | if (server->tcpStatus == CifsNew) { | 466 | if (server->tcpStatus == CifsNew) { |
466 | /* if nack on negprot (rather than | 467 | /* if nack on negprot (rather than |
467 | ret of smb negprot error) reconnecting | 468 | ret of smb negprot error) reconnecting |
@@ -484,7 +485,7 @@ incomplete_rcv: | |||
484 | continue; | 485 | continue; |
485 | } | 486 | } |
486 | } else if (temp != (char) 0) { | 487 | } else if (temp != (char) 0) { |
487 | cERROR(1, ("Unknown RFC 1002 frame")); | 488 | cERROR(1, "Unknown RFC 1002 frame"); |
488 | cifs_dump_mem(" Received Data: ", (char *)smb_buffer, | 489 | cifs_dump_mem(" Received Data: ", (char *)smb_buffer, |
489 | length); | 490 | length); |
490 | cifs_reconnect(server); | 491 | cifs_reconnect(server); |
@@ -495,8 +496,8 @@ incomplete_rcv: | |||
495 | /* else we have an SMB response */ | 496 | /* else we have an SMB response */ |
496 | if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || | 497 | if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || |
497 | (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { | 498 | (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { |
498 | cERROR(1, ("Invalid size SMB length %d pdu_length %d", | 499 | cERROR(1, "Invalid size SMB length %d pdu_length %d", |
499 | length, pdu_length+4)); | 500 | length, pdu_length+4); |
500 | cifs_reconnect(server); | 501 | cifs_reconnect(server); |
501 | csocket = server->ssocket; | 502 | csocket = server->ssocket; |
502 | wake_up(&server->response_q); | 503 | wake_up(&server->response_q); |
@@ -539,8 +540,8 @@ incomplete_rcv: | |||
539 | length = 0; | 540 | length = 0; |
540 | continue; | 541 | continue; |
541 | } else if (length <= 0) { | 542 | } else if (length <= 0) { |
542 | cERROR(1, ("Received no data, expecting %d", | 543 | cERROR(1, "Received no data, expecting %d", |
543 | pdu_length - total_read)); | 544 | pdu_length - total_read); |
544 | cifs_reconnect(server); | 545 | cifs_reconnect(server); |
545 | csocket = server->ssocket; | 546 | csocket = server->ssocket; |
546 | reconnect = 1; | 547 | reconnect = 1; |
@@ -588,7 +589,7 @@ incomplete_rcv: | |||
588 | } | 589 | } |
589 | } else { | 590 | } else { |
590 | if (!isLargeBuf) { | 591 | if (!isLargeBuf) { |
591 | cERROR(1,("1st trans2 resp needs bigbuf")); | 592 | cERROR(1, "1st trans2 resp needs bigbuf"); |
592 | /* BB maybe we can fix this up, switch | 593 | /* BB maybe we can fix this up, switch |
593 | to already allocated large buffer? */ | 594 | to already allocated large buffer? */ |
594 | } else { | 595 | } else { |
@@ -630,8 +631,8 @@ multi_t2_fnd: | |||
630 | wake_up_process(task_to_wake); | 631 | wake_up_process(task_to_wake); |
631 | } else if (!is_valid_oplock_break(smb_buffer, server) && | 632 | } else if (!is_valid_oplock_break(smb_buffer, server) && |
632 | !isMultiRsp) { | 633 | !isMultiRsp) { |
633 | cERROR(1, ("No task to wake, unknown frame received! " | 634 | cERROR(1, "No task to wake, unknown frame received! " |
634 | "NumMids %d", midCount.counter)); | 635 | "NumMids %d", midCount.counter); |
635 | cifs_dump_mem("Received Data is: ", (char *)smb_buffer, | 636 | cifs_dump_mem("Received Data is: ", (char *)smb_buffer, |
636 | sizeof(struct smb_hdr)); | 637 | sizeof(struct smb_hdr)); |
637 | #ifdef CONFIG_CIFS_DEBUG2 | 638 | #ifdef CONFIG_CIFS_DEBUG2 |
@@ -708,8 +709,8 @@ multi_t2_fnd: | |||
708 | list_for_each(tmp, &server->pending_mid_q) { | 709 | list_for_each(tmp, &server->pending_mid_q) { |
709 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 710 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
710 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) { | 711 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) { |
711 | cFYI(1, ("Clearing Mid 0x%x - waking up ", | 712 | cFYI(1, "Clearing Mid 0x%x - waking up ", |
712 | mid_entry->mid)); | 713 | mid_entry->mid); |
713 | task_to_wake = mid_entry->tsk; | 714 | task_to_wake = mid_entry->tsk; |
714 | if (task_to_wake) | 715 | if (task_to_wake) |
715 | wake_up_process(task_to_wake); | 716 | wake_up_process(task_to_wake); |
@@ -728,7 +729,7 @@ multi_t2_fnd: | |||
728 | to wait at least 45 seconds before giving up | 729 | to wait at least 45 seconds before giving up |
729 | on a request getting a response and going ahead | 730 | on a request getting a response and going ahead |
730 | and killing cifsd */ | 731 | and killing cifsd */ |
731 | cFYI(1, ("Wait for exit from demultiplex thread")); | 732 | cFYI(1, "Wait for exit from demultiplex thread"); |
732 | msleep(46000); | 733 | msleep(46000); |
733 | /* if threads still have not exited they are probably never | 734 | /* if threads still have not exited they are probably never |
734 | coming home not much else we can do but free the memory */ | 735 | coming home not much else we can do but free the memory */ |
@@ -849,7 +850,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
849 | separator[0] = options[4]; | 850 | separator[0] = options[4]; |
850 | options += 5; | 851 | options += 5; |
851 | } else { | 852 | } else { |
852 | cFYI(1, ("Null separator not allowed")); | 853 | cFYI(1, "Null separator not allowed"); |
853 | } | 854 | } |
854 | } | 855 | } |
855 | 856 | ||
@@ -974,7 +975,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
974 | } | 975 | } |
975 | } else if (strnicmp(data, "sec", 3) == 0) { | 976 | } else if (strnicmp(data, "sec", 3) == 0) { |
976 | if (!value || !*value) { | 977 | if (!value || !*value) { |
977 | cERROR(1, ("no security value specified")); | 978 | cERROR(1, "no security value specified"); |
978 | continue; | 979 | continue; |
979 | } else if (strnicmp(value, "krb5i", 5) == 0) { | 980 | } else if (strnicmp(value, "krb5i", 5) == 0) { |
980 | vol->secFlg |= CIFSSEC_MAY_KRB5 | | 981 | vol->secFlg |= CIFSSEC_MAY_KRB5 | |
@@ -982,7 +983,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
982 | } else if (strnicmp(value, "krb5p", 5) == 0) { | 983 | } else if (strnicmp(value, "krb5p", 5) == 0) { |
983 | /* vol->secFlg |= CIFSSEC_MUST_SEAL | | 984 | /* vol->secFlg |= CIFSSEC_MUST_SEAL | |
984 | CIFSSEC_MAY_KRB5; */ | 985 | CIFSSEC_MAY_KRB5; */ |
985 | cERROR(1, ("Krb5 cifs privacy not supported")); | 986 | cERROR(1, "Krb5 cifs privacy not supported"); |
986 | return 1; | 987 | return 1; |
987 | } else if (strnicmp(value, "krb5", 4) == 0) { | 988 | } else if (strnicmp(value, "krb5", 4) == 0) { |
988 | vol->secFlg |= CIFSSEC_MAY_KRB5; | 989 | vol->secFlg |= CIFSSEC_MAY_KRB5; |
@@ -1014,7 +1015,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1014 | } else if (strnicmp(value, "none", 4) == 0) { | 1015 | } else if (strnicmp(value, "none", 4) == 0) { |
1015 | vol->nullauth = 1; | 1016 | vol->nullauth = 1; |
1016 | } else { | 1017 | } else { |
1017 | cERROR(1, ("bad security option: %s", value)); | 1018 | cERROR(1, "bad security option: %s", value); |
1018 | return 1; | 1019 | return 1; |
1019 | } | 1020 | } |
1020 | } else if ((strnicmp(data, "unc", 3) == 0) | 1021 | } else if ((strnicmp(data, "unc", 3) == 0) |
@@ -1053,7 +1054,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1053 | a domain name and need special handling? */ | 1054 | a domain name and need special handling? */ |
1054 | if (strnlen(value, 256) < 256) { | 1055 | if (strnlen(value, 256) < 256) { |
1055 | vol->domainname = value; | 1056 | vol->domainname = value; |
1056 | cFYI(1, ("Domain name set")); | 1057 | cFYI(1, "Domain name set"); |
1057 | } else { | 1058 | } else { |
1058 | printk(KERN_WARNING "CIFS: domain name too " | 1059 | printk(KERN_WARNING "CIFS: domain name too " |
1059 | "long\n"); | 1060 | "long\n"); |
@@ -1076,7 +1077,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1076 | strcpy(vol->prepath+1, value); | 1077 | strcpy(vol->prepath+1, value); |
1077 | } else | 1078 | } else |
1078 | strcpy(vol->prepath, value); | 1079 | strcpy(vol->prepath, value); |
1079 | cFYI(1, ("prefix path %s", vol->prepath)); | 1080 | cFYI(1, "prefix path %s", vol->prepath); |
1080 | } else { | 1081 | } else { |
1081 | printk(KERN_WARNING "CIFS: prefix too long\n"); | 1082 | printk(KERN_WARNING "CIFS: prefix too long\n"); |
1082 | return 1; | 1083 | return 1; |
@@ -1092,7 +1093,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1092 | vol->iocharset = value; | 1093 | vol->iocharset = value; |
1093 | /* if iocharset not set then load_nls_default | 1094 | /* if iocharset not set then load_nls_default |
1094 | is used by caller */ | 1095 | is used by caller */ |
1095 | cFYI(1, ("iocharset set to %s", value)); | 1096 | cFYI(1, "iocharset set to %s", value); |
1096 | } else { | 1097 | } else { |
1097 | printk(KERN_WARNING "CIFS: iocharset name " | 1098 | printk(KERN_WARNING "CIFS: iocharset name " |
1098 | "too long.\n"); | 1099 | "too long.\n"); |
@@ -1144,14 +1145,14 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1144 | } | 1145 | } |
1145 | } else if (strnicmp(data, "sockopt", 5) == 0) { | 1146 | } else if (strnicmp(data, "sockopt", 5) == 0) { |
1146 | if (!value || !*value) { | 1147 | if (!value || !*value) { |
1147 | cERROR(1, ("no socket option specified")); | 1148 | cERROR(1, "no socket option specified"); |
1148 | continue; | 1149 | continue; |
1149 | } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { | 1150 | } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { |
1150 | vol->sockopt_tcp_nodelay = 1; | 1151 | vol->sockopt_tcp_nodelay = 1; |
1151 | } | 1152 | } |
1152 | } else if (strnicmp(data, "netbiosname", 4) == 0) { | 1153 | } else if (strnicmp(data, "netbiosname", 4) == 0) { |
1153 | if (!value || !*value || (*value == ' ')) { | 1154 | if (!value || !*value || (*value == ' ')) { |
1154 | cFYI(1, ("invalid (empty) netbiosname")); | 1155 | cFYI(1, "invalid (empty) netbiosname"); |
1155 | } else { | 1156 | } else { |
1156 | memset(vol->source_rfc1001_name, 0x20, 15); | 1157 | memset(vol->source_rfc1001_name, 0x20, 15); |
1157 | for (i = 0; i < 15; i++) { | 1158 | for (i = 0; i < 15; i++) { |
@@ -1175,7 +1176,7 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1175 | } else if (strnicmp(data, "servern", 7) == 0) { | 1176 | } else if (strnicmp(data, "servern", 7) == 0) { |
1176 | /* servernetbiosname specified override *SMBSERVER */ | 1177 | /* servernetbiosname specified override *SMBSERVER */ |
1177 | if (!value || !*value || (*value == ' ')) { | 1178 | if (!value || !*value || (*value == ' ')) { |
1178 | cFYI(1, ("empty server netbiosname specified")); | 1179 | cFYI(1, "empty server netbiosname specified"); |
1179 | } else { | 1180 | } else { |
1180 | /* last byte, type, is 0x20 for servr type */ | 1181 | /* last byte, type, is 0x20 for servr type */ |
1181 | memset(vol->target_rfc1001_name, 0x20, 16); | 1182 | memset(vol->target_rfc1001_name, 0x20, 16); |
@@ -1434,7 +1435,7 @@ cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port) | |||
1434 | 1435 | ||
1435 | ++server->srv_count; | 1436 | ++server->srv_count; |
1436 | write_unlock(&cifs_tcp_ses_lock); | 1437 | write_unlock(&cifs_tcp_ses_lock); |
1437 | cFYI(1, ("Existing tcp session with server found")); | 1438 | cFYI(1, "Existing tcp session with server found"); |
1438 | return server; | 1439 | return server; |
1439 | } | 1440 | } |
1440 | write_unlock(&cifs_tcp_ses_lock); | 1441 | write_unlock(&cifs_tcp_ses_lock); |
@@ -1475,7 +1476,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1475 | 1476 | ||
1476 | memset(&addr, 0, sizeof(struct sockaddr_storage)); | 1477 | memset(&addr, 0, sizeof(struct sockaddr_storage)); |
1477 | 1478 | ||
1478 | cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip)); | 1479 | cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip); |
1479 | 1480 | ||
1480 | if (volume_info->UNCip && volume_info->UNC) { | 1481 | if (volume_info->UNCip && volume_info->UNC) { |
1481 | rc = cifs_convert_address(volume_info->UNCip, &addr); | 1482 | rc = cifs_convert_address(volume_info->UNCip, &addr); |
@@ -1487,13 +1488,12 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1487 | } else if (volume_info->UNCip) { | 1488 | } else if (volume_info->UNCip) { |
1488 | /* BB using ip addr as tcp_ses name to connect to the | 1489 | /* BB using ip addr as tcp_ses name to connect to the |
1489 | DFS root below */ | 1490 | DFS root below */ |
1490 | cERROR(1, ("Connecting to DFS root not implemented yet")); | 1491 | cERROR(1, "Connecting to DFS root not implemented yet"); |
1491 | rc = -EINVAL; | 1492 | rc = -EINVAL; |
1492 | goto out_err; | 1493 | goto out_err; |
1493 | } else /* which tcp_sess DFS root would we conect to */ { | 1494 | } else /* which tcp_sess DFS root would we conect to */ { |
1494 | cERROR(1, | 1495 | cERROR(1, "CIFS mount error: No UNC path (e.g. -o " |
1495 | ("CIFS mount error: No UNC path (e.g. -o " | 1496 | "unc=//192.168.1.100/public) specified"); |
1496 | "unc=//192.168.1.100/public) specified")); | ||
1497 | rc = -EINVAL; | 1497 | rc = -EINVAL; |
1498 | goto out_err; | 1498 | goto out_err; |
1499 | } | 1499 | } |
@@ -1540,7 +1540,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1540 | ++tcp_ses->srv_count; | 1540 | ++tcp_ses->srv_count; |
1541 | 1541 | ||
1542 | if (addr.ss_family == AF_INET6) { | 1542 | if (addr.ss_family == AF_INET6) { |
1543 | cFYI(1, ("attempting ipv6 connect")); | 1543 | cFYI(1, "attempting ipv6 connect"); |
1544 | /* BB should we allow ipv6 on port 139? */ | 1544 | /* BB should we allow ipv6 on port 139? */ |
1545 | /* other OS never observed in Wild doing 139 with v6 */ | 1545 | /* other OS never observed in Wild doing 139 with v6 */ |
1546 | sin_server6->sin6_port = htons(volume_info->port); | 1546 | sin_server6->sin6_port = htons(volume_info->port); |
@@ -1554,7 +1554,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1554 | rc = ipv4_connect(tcp_ses); | 1554 | rc = ipv4_connect(tcp_ses); |
1555 | } | 1555 | } |
1556 | if (rc < 0) { | 1556 | if (rc < 0) { |
1557 | cERROR(1, ("Error connecting to socket. Aborting operation")); | 1557 | cERROR(1, "Error connecting to socket. Aborting operation"); |
1558 | goto out_err; | 1558 | goto out_err; |
1559 | } | 1559 | } |
1560 | 1560 | ||
@@ -1567,7 +1567,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1567 | tcp_ses, "cifsd"); | 1567 | tcp_ses, "cifsd"); |
1568 | if (IS_ERR(tcp_ses->tsk)) { | 1568 | if (IS_ERR(tcp_ses->tsk)) { |
1569 | rc = PTR_ERR(tcp_ses->tsk); | 1569 | rc = PTR_ERR(tcp_ses->tsk); |
1570 | cERROR(1, ("error %d create cifsd thread", rc)); | 1570 | cERROR(1, "error %d create cifsd thread", rc); |
1571 | module_put(THIS_MODULE); | 1571 | module_put(THIS_MODULE); |
1572 | goto out_err; | 1572 | goto out_err; |
1573 | } | 1573 | } |
@@ -1616,6 +1616,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
1616 | int xid; | 1616 | int xid; |
1617 | struct TCP_Server_Info *server = ses->server; | 1617 | struct TCP_Server_Info *server = ses->server; |
1618 | 1618 | ||
1619 | cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count); | ||
1619 | write_lock(&cifs_tcp_ses_lock); | 1620 | write_lock(&cifs_tcp_ses_lock); |
1620 | if (--ses->ses_count > 0) { | 1621 | if (--ses->ses_count > 0) { |
1621 | write_unlock(&cifs_tcp_ses_lock); | 1622 | write_unlock(&cifs_tcp_ses_lock); |
@@ -1634,6 +1635,102 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
1634 | cifs_put_tcp_session(server); | 1635 | cifs_put_tcp_session(server); |
1635 | } | 1636 | } |
1636 | 1637 | ||
1638 | static struct cifsSesInfo * | ||
1639 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | ||
1640 | { | ||
1641 | int rc = -ENOMEM, xid; | ||
1642 | struct cifsSesInfo *ses; | ||
1643 | |||
1644 | xid = GetXid(); | ||
1645 | |||
1646 | ses = cifs_find_smb_ses(server, volume_info->username); | ||
1647 | if (ses) { | ||
1648 | cFYI(1, "Existing smb sess found (status=%d)", ses->status); | ||
1649 | |||
1650 | /* existing SMB ses has a server reference already */ | ||
1651 | cifs_put_tcp_session(server); | ||
1652 | |||
1653 | mutex_lock(&ses->session_mutex); | ||
1654 | rc = cifs_negotiate_protocol(xid, ses); | ||
1655 | if (rc) { | ||
1656 | mutex_unlock(&ses->session_mutex); | ||
1657 | /* problem -- put our ses reference */ | ||
1658 | cifs_put_smb_ses(ses); | ||
1659 | FreeXid(xid); | ||
1660 | return ERR_PTR(rc); | ||
1661 | } | ||
1662 | if (ses->need_reconnect) { | ||
1663 | cFYI(1, "Session needs reconnect"); | ||
1664 | rc = cifs_setup_session(xid, ses, | ||
1665 | volume_info->local_nls); | ||
1666 | if (rc) { | ||
1667 | mutex_unlock(&ses->session_mutex); | ||
1668 | /* problem -- put our reference */ | ||
1669 | cifs_put_smb_ses(ses); | ||
1670 | FreeXid(xid); | ||
1671 | return ERR_PTR(rc); | ||
1672 | } | ||
1673 | } | ||
1674 | mutex_unlock(&ses->session_mutex); | ||
1675 | FreeXid(xid); | ||
1676 | return ses; | ||
1677 | } | ||
1678 | |||
1679 | cFYI(1, "Existing smb sess not found"); | ||
1680 | ses = sesInfoAlloc(); | ||
1681 | if (ses == NULL) | ||
1682 | goto get_ses_fail; | ||
1683 | |||
1684 | /* new SMB session uses our server ref */ | ||
1685 | ses->server = server; | ||
1686 | if (server->addr.sockAddr6.sin6_family == AF_INET6) | ||
1687 | sprintf(ses->serverName, "%pI6", | ||
1688 | &server->addr.sockAddr6.sin6_addr); | ||
1689 | else | ||
1690 | sprintf(ses->serverName, "%pI4", | ||
1691 | &server->addr.sockAddr.sin_addr.s_addr); | ||
1692 | |||
1693 | if (volume_info->username) | ||
1694 | strncpy(ses->userName, volume_info->username, | ||
1695 | MAX_USERNAME_SIZE); | ||
1696 | |||
1697 | /* volume_info->password freed at unmount */ | ||
1698 | if (volume_info->password) { | ||
1699 | ses->password = kstrdup(volume_info->password, GFP_KERNEL); | ||
1700 | if (!ses->password) | ||
1701 | goto get_ses_fail; | ||
1702 | } | ||
1703 | if (volume_info->domainname) { | ||
1704 | int len = strlen(volume_info->domainname); | ||
1705 | ses->domainName = kmalloc(len + 1, GFP_KERNEL); | ||
1706 | if (ses->domainName) | ||
1707 | strcpy(ses->domainName, volume_info->domainname); | ||
1708 | } | ||
1709 | ses->linux_uid = volume_info->linux_uid; | ||
1710 | ses->overrideSecFlg = volume_info->secFlg; | ||
1711 | |||
1712 | mutex_lock(&ses->session_mutex); | ||
1713 | rc = cifs_negotiate_protocol(xid, ses); | ||
1714 | if (!rc) | ||
1715 | rc = cifs_setup_session(xid, ses, volume_info->local_nls); | ||
1716 | mutex_unlock(&ses->session_mutex); | ||
1717 | if (rc) | ||
1718 | goto get_ses_fail; | ||
1719 | |||
1720 | /* success, put it on the list */ | ||
1721 | write_lock(&cifs_tcp_ses_lock); | ||
1722 | list_add(&ses->smb_ses_list, &server->smb_ses_list); | ||
1723 | write_unlock(&cifs_tcp_ses_lock); | ||
1724 | |||
1725 | FreeXid(xid); | ||
1726 | return ses; | ||
1727 | |||
1728 | get_ses_fail: | ||
1729 | sesInfoFree(ses); | ||
1730 | FreeXid(xid); | ||
1731 | return ERR_PTR(rc); | ||
1732 | } | ||
1733 | |||
1637 | static struct cifsTconInfo * | 1734 | static struct cifsTconInfo * |
1638 | cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) | 1735 | cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) |
1639 | { | 1736 | { |
@@ -1662,6 +1759,7 @@ cifs_put_tcon(struct cifsTconInfo *tcon) | |||
1662 | int xid; | 1759 | int xid; |
1663 | struct cifsSesInfo *ses = tcon->ses; | 1760 | struct cifsSesInfo *ses = tcon->ses; |
1664 | 1761 | ||
1762 | cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); | ||
1665 | write_lock(&cifs_tcp_ses_lock); | 1763 | write_lock(&cifs_tcp_ses_lock); |
1666 | if (--tcon->tc_count > 0) { | 1764 | if (--tcon->tc_count > 0) { |
1667 | write_unlock(&cifs_tcp_ses_lock); | 1765 | write_unlock(&cifs_tcp_ses_lock); |
@@ -1679,6 +1777,80 @@ cifs_put_tcon(struct cifsTconInfo *tcon) | |||
1679 | cifs_put_smb_ses(ses); | 1777 | cifs_put_smb_ses(ses); |
1680 | } | 1778 | } |
1681 | 1779 | ||
1780 | static struct cifsTconInfo * | ||
1781 | cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) | ||
1782 | { | ||
1783 | int rc, xid; | ||
1784 | struct cifsTconInfo *tcon; | ||
1785 | |||
1786 | tcon = cifs_find_tcon(ses, volume_info->UNC); | ||
1787 | if (tcon) { | ||
1788 | cFYI(1, "Found match on UNC path"); | ||
1789 | /* existing tcon already has a reference */ | ||
1790 | cifs_put_smb_ses(ses); | ||
1791 | if (tcon->seal != volume_info->seal) | ||
1792 | cERROR(1, "transport encryption setting " | ||
1793 | "conflicts with existing tid"); | ||
1794 | return tcon; | ||
1795 | } | ||
1796 | |||
1797 | tcon = tconInfoAlloc(); | ||
1798 | if (tcon == NULL) { | ||
1799 | rc = -ENOMEM; | ||
1800 | goto out_fail; | ||
1801 | } | ||
1802 | |||
1803 | tcon->ses = ses; | ||
1804 | if (volume_info->password) { | ||
1805 | tcon->password = kstrdup(volume_info->password, GFP_KERNEL); | ||
1806 | if (!tcon->password) { | ||
1807 | rc = -ENOMEM; | ||
1808 | goto out_fail; | ||
1809 | } | ||
1810 | } | ||
1811 | |||
1812 | if (strchr(volume_info->UNC + 3, '\\') == NULL | ||
1813 | && strchr(volume_info->UNC + 3, '/') == NULL) { | ||
1814 | cERROR(1, "Missing share name"); | ||
1815 | rc = -ENODEV; | ||
1816 | goto out_fail; | ||
1817 | } | ||
1818 | |||
1819 | /* BB Do we need to wrap session_mutex around | ||
1820 | * this TCon call and Unix SetFS as | ||
1821 | * we do on SessSetup and reconnect? */ | ||
1822 | xid = GetXid(); | ||
1823 | rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls); | ||
1824 | FreeXid(xid); | ||
1825 | cFYI(1, "CIFS Tcon rc = %d", rc); | ||
1826 | if (rc) | ||
1827 | goto out_fail; | ||
1828 | |||
1829 | if (volume_info->nodfs) { | ||
1830 | tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; | ||
1831 | cFYI(1, "DFS disabled (%d)", tcon->Flags); | ||
1832 | } | ||
1833 | tcon->seal = volume_info->seal; | ||
1834 | /* we can have only one retry value for a connection | ||
1835 | to a share so for resources mounted more than once | ||
1836 | to the same server share the last value passed in | ||
1837 | for the retry flag is used */ | ||
1838 | tcon->retry = volume_info->retry; | ||
1839 | tcon->nocase = volume_info->nocase; | ||
1840 | tcon->local_lease = volume_info->local_lease; | ||
1841 | |||
1842 | write_lock(&cifs_tcp_ses_lock); | ||
1843 | list_add(&tcon->tcon_list, &ses->tcon_list); | ||
1844 | write_unlock(&cifs_tcp_ses_lock); | ||
1845 | |||
1846 | return tcon; | ||
1847 | |||
1848 | out_fail: | ||
1849 | tconInfoFree(tcon); | ||
1850 | return ERR_PTR(rc); | ||
1851 | } | ||
1852 | |||
1853 | |||
1682 | int | 1854 | int |
1683 | get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, | 1855 | get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, |
1684 | const struct nls_table *nls_codepage, unsigned int *pnum_referrals, | 1856 | const struct nls_table *nls_codepage, unsigned int *pnum_referrals, |
@@ -1703,8 +1875,7 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, | |||
1703 | strcpy(temp_unc + 2, pSesInfo->serverName); | 1875 | strcpy(temp_unc + 2, pSesInfo->serverName); |
1704 | strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$"); | 1876 | strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$"); |
1705 | rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage); | 1877 | rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage); |
1706 | cFYI(1, | 1878 | cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid); |
1707 | ("CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid)); | ||
1708 | kfree(temp_unc); | 1879 | kfree(temp_unc); |
1709 | } | 1880 | } |
1710 | if (rc == 0) | 1881 | if (rc == 0) |
@@ -1777,12 +1948,12 @@ ipv4_connect(struct TCP_Server_Info *server) | |||
1777 | rc = sock_create_kern(PF_INET, SOCK_STREAM, | 1948 | rc = sock_create_kern(PF_INET, SOCK_STREAM, |
1778 | IPPROTO_TCP, &socket); | 1949 | IPPROTO_TCP, &socket); |
1779 | if (rc < 0) { | 1950 | if (rc < 0) { |
1780 | cERROR(1, ("Error %d creating socket", rc)); | 1951 | cERROR(1, "Error %d creating socket", rc); |
1781 | return rc; | 1952 | return rc; |
1782 | } | 1953 | } |
1783 | 1954 | ||
1784 | /* BB other socket options to set KEEPALIVE, NODELAY? */ | 1955 | /* BB other socket options to set KEEPALIVE, NODELAY? */ |
1785 | cFYI(1, ("Socket created")); | 1956 | cFYI(1, "Socket created"); |
1786 | server->ssocket = socket; | 1957 | server->ssocket = socket; |
1787 | socket->sk->sk_allocation = GFP_NOFS; | 1958 | socket->sk->sk_allocation = GFP_NOFS; |
1788 | cifs_reclassify_socket4(socket); | 1959 | cifs_reclassify_socket4(socket); |
@@ -1827,7 +1998,7 @@ ipv4_connect(struct TCP_Server_Info *server) | |||
1827 | if (!connected) { | 1998 | if (!connected) { |
1828 | if (orig_port) | 1999 | if (orig_port) |
1829 | server->addr.sockAddr.sin_port = orig_port; | 2000 | server->addr.sockAddr.sin_port = orig_port; |
1830 | cFYI(1, ("Error %d connecting to server via ipv4", rc)); | 2001 | cFYI(1, "Error %d connecting to server via ipv4", rc); |
1831 | sock_release(socket); | 2002 | sock_release(socket); |
1832 | server->ssocket = NULL; | 2003 | server->ssocket = NULL; |
1833 | return rc; | 2004 | return rc; |
@@ -1855,12 +2026,12 @@ ipv4_connect(struct TCP_Server_Info *server) | |||
1855 | rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, | 2026 | rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, |
1856 | (char *)&val, sizeof(val)); | 2027 | (char *)&val, sizeof(val)); |
1857 | if (rc) | 2028 | if (rc) |
1858 | cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); | 2029 | cFYI(1, "set TCP_NODELAY socket option error %d", rc); |
1859 | } | 2030 | } |
1860 | 2031 | ||
1861 | cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", | 2032 | cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx", |
1862 | socket->sk->sk_sndbuf, | 2033 | socket->sk->sk_sndbuf, |
1863 | socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); | 2034 | socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); |
1864 | 2035 | ||
1865 | /* send RFC1001 sessinit */ | 2036 | /* send RFC1001 sessinit */ |
1866 | if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) { | 2037 | if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) { |
@@ -1938,13 +2109,13 @@ ipv6_connect(struct TCP_Server_Info *server) | |||
1938 | rc = sock_create_kern(PF_INET6, SOCK_STREAM, | 2109 | rc = sock_create_kern(PF_INET6, SOCK_STREAM, |
1939 | IPPROTO_TCP, &socket); | 2110 | IPPROTO_TCP, &socket); |
1940 | if (rc < 0) { | 2111 | if (rc < 0) { |
1941 | cERROR(1, ("Error %d creating ipv6 socket", rc)); | 2112 | cERROR(1, "Error %d creating ipv6 socket", rc); |
1942 | socket = NULL; | 2113 | socket = NULL; |
1943 | return rc; | 2114 | return rc; |
1944 | } | 2115 | } |
1945 | 2116 | ||
1946 | /* BB other socket options to set KEEPALIVE, NODELAY? */ | 2117 | /* BB other socket options to set KEEPALIVE, NODELAY? */ |
1947 | cFYI(1, ("ipv6 Socket created")); | 2118 | cFYI(1, "ipv6 Socket created"); |
1948 | server->ssocket = socket; | 2119 | server->ssocket = socket; |
1949 | socket->sk->sk_allocation = GFP_NOFS; | 2120 | socket->sk->sk_allocation = GFP_NOFS; |
1950 | cifs_reclassify_socket6(socket); | 2121 | cifs_reclassify_socket6(socket); |
@@ -1988,7 +2159,7 @@ ipv6_connect(struct TCP_Server_Info *server) | |||
1988 | if (!connected) { | 2159 | if (!connected) { |
1989 | if (orig_port) | 2160 | if (orig_port) |
1990 | server->addr.sockAddr6.sin6_port = orig_port; | 2161 | server->addr.sockAddr6.sin6_port = orig_port; |
1991 | cFYI(1, ("Error %d connecting to server via ipv6", rc)); | 2162 | cFYI(1, "Error %d connecting to server via ipv6", rc); |
1992 | sock_release(socket); | 2163 | sock_release(socket); |
1993 | server->ssocket = NULL; | 2164 | server->ssocket = NULL; |
1994 | return rc; | 2165 | return rc; |
@@ -2007,7 +2178,7 @@ ipv6_connect(struct TCP_Server_Info *server) | |||
2007 | rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, | 2178 | rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, |
2008 | (char *)&val, sizeof(val)); | 2179 | (char *)&val, sizeof(val)); |
2009 | if (rc) | 2180 | if (rc) |
2010 | cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); | 2181 | cFYI(1, "set TCP_NODELAY socket option error %d", rc); |
2011 | } | 2182 | } |
2012 | 2183 | ||
2013 | server->ssocket = socket; | 2184 | server->ssocket = socket; |
@@ -2032,13 +2203,13 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2032 | if (vol_info && vol_info->no_linux_ext) { | 2203 | if (vol_info && vol_info->no_linux_ext) { |
2033 | tcon->fsUnixInfo.Capability = 0; | 2204 | tcon->fsUnixInfo.Capability = 0; |
2034 | tcon->unix_ext = 0; /* Unix Extensions disabled */ | 2205 | tcon->unix_ext = 0; /* Unix Extensions disabled */ |
2035 | cFYI(1, ("Linux protocol extensions disabled")); | 2206 | cFYI(1, "Linux protocol extensions disabled"); |
2036 | return; | 2207 | return; |
2037 | } else if (vol_info) | 2208 | } else if (vol_info) |
2038 | tcon->unix_ext = 1; /* Unix Extensions supported */ | 2209 | tcon->unix_ext = 1; /* Unix Extensions supported */ |
2039 | 2210 | ||
2040 | if (tcon->unix_ext == 0) { | 2211 | if (tcon->unix_ext == 0) { |
2041 | cFYI(1, ("Unix extensions disabled so not set on reconnect")); | 2212 | cFYI(1, "Unix extensions disabled so not set on reconnect"); |
2042 | return; | 2213 | return; |
2043 | } | 2214 | } |
2044 | 2215 | ||
@@ -2054,12 +2225,11 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2054 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; | 2225 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; |
2055 | if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { | 2226 | if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { |
2056 | if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) | 2227 | if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) |
2057 | cERROR(1, ("POSIXPATH support change")); | 2228 | cERROR(1, "POSIXPATH support change"); |
2058 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; | 2229 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; |
2059 | } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { | 2230 | } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { |
2060 | cERROR(1, ("possible reconnect error")); | 2231 | cERROR(1, "possible reconnect error"); |
2061 | cERROR(1, | 2232 | cERROR(1, "server disabled POSIX path support"); |
2062 | ("server disabled POSIX path support")); | ||
2063 | } | 2233 | } |
2064 | } | 2234 | } |
2065 | 2235 | ||
@@ -2067,7 +2237,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2067 | if (vol_info && vol_info->no_psx_acl) | 2237 | if (vol_info && vol_info->no_psx_acl) |
2068 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; | 2238 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; |
2069 | else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { | 2239 | else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { |
2070 | cFYI(1, ("negotiated posix acl support")); | 2240 | cFYI(1, "negotiated posix acl support"); |
2071 | if (sb) | 2241 | if (sb) |
2072 | sb->s_flags |= MS_POSIXACL; | 2242 | sb->s_flags |= MS_POSIXACL; |
2073 | } | 2243 | } |
@@ -2075,7 +2245,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2075 | if (vol_info && vol_info->posix_paths == 0) | 2245 | if (vol_info && vol_info->posix_paths == 0) |
2076 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; | 2246 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; |
2077 | else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { | 2247 | else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { |
2078 | cFYI(1, ("negotiate posix pathnames")); | 2248 | cFYI(1, "negotiate posix pathnames"); |
2079 | if (sb) | 2249 | if (sb) |
2080 | CIFS_SB(sb)->mnt_cifs_flags |= | 2250 | CIFS_SB(sb)->mnt_cifs_flags |= |
2081 | CIFS_MOUNT_POSIX_PATHS; | 2251 | CIFS_MOUNT_POSIX_PATHS; |
@@ -2090,39 +2260,38 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2090 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { | 2260 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { |
2091 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { | 2261 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { |
2092 | CIFS_SB(sb)->rsize = 127 * 1024; | 2262 | CIFS_SB(sb)->rsize = 127 * 1024; |
2093 | cFYI(DBG2, | 2263 | cFYI(DBG2, "larger reads not supported by srv"); |
2094 | ("larger reads not supported by srv")); | ||
2095 | } | 2264 | } |
2096 | } | 2265 | } |
2097 | 2266 | ||
2098 | 2267 | ||
2099 | cFYI(1, ("Negotiate caps 0x%x", (int)cap)); | 2268 | cFYI(1, "Negotiate caps 0x%x", (int)cap); |
2100 | #ifdef CONFIG_CIFS_DEBUG2 | 2269 | #ifdef CONFIG_CIFS_DEBUG2 |
2101 | if (cap & CIFS_UNIX_FCNTL_CAP) | 2270 | if (cap & CIFS_UNIX_FCNTL_CAP) |
2102 | cFYI(1, ("FCNTL cap")); | 2271 | cFYI(1, "FCNTL cap"); |
2103 | if (cap & CIFS_UNIX_EXTATTR_CAP) | 2272 | if (cap & CIFS_UNIX_EXTATTR_CAP) |
2104 | cFYI(1, ("EXTATTR cap")); | 2273 | cFYI(1, "EXTATTR cap"); |
2105 | if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) | 2274 | if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) |
2106 | cFYI(1, ("POSIX path cap")); | 2275 | cFYI(1, "POSIX path cap"); |
2107 | if (cap & CIFS_UNIX_XATTR_CAP) | 2276 | if (cap & CIFS_UNIX_XATTR_CAP) |
2108 | cFYI(1, ("XATTR cap")); | 2277 | cFYI(1, "XATTR cap"); |
2109 | if (cap & CIFS_UNIX_POSIX_ACL_CAP) | 2278 | if (cap & CIFS_UNIX_POSIX_ACL_CAP) |
2110 | cFYI(1, ("POSIX ACL cap")); | 2279 | cFYI(1, "POSIX ACL cap"); |
2111 | if (cap & CIFS_UNIX_LARGE_READ_CAP) | 2280 | if (cap & CIFS_UNIX_LARGE_READ_CAP) |
2112 | cFYI(1, ("very large read cap")); | 2281 | cFYI(1, "very large read cap"); |
2113 | if (cap & CIFS_UNIX_LARGE_WRITE_CAP) | 2282 | if (cap & CIFS_UNIX_LARGE_WRITE_CAP) |
2114 | cFYI(1, ("very large write cap")); | 2283 | cFYI(1, "very large write cap"); |
2115 | #endif /* CIFS_DEBUG2 */ | 2284 | #endif /* CIFS_DEBUG2 */ |
2116 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { | 2285 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { |
2117 | if (vol_info == NULL) { | 2286 | if (vol_info == NULL) { |
2118 | cFYI(1, ("resetting capabilities failed")); | 2287 | cFYI(1, "resetting capabilities failed"); |
2119 | } else | 2288 | } else |
2120 | cERROR(1, ("Negotiating Unix capabilities " | 2289 | cERROR(1, "Negotiating Unix capabilities " |
2121 | "with the server failed. Consider " | 2290 | "with the server failed. Consider " |
2122 | "mounting with the Unix Extensions\n" | 2291 | "mounting with the Unix Extensions\n" |
2123 | "disabled, if problems are found, " | 2292 | "disabled, if problems are found, " |
2124 | "by specifying the nounix mount " | 2293 | "by specifying the nounix mount " |
2125 | "option.")); | 2294 | "option."); |
2126 | 2295 | ||
2127 | } | 2296 | } |
2128 | } | 2297 | } |
@@ -2152,8 +2321,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2152 | struct cifs_sb_info *cifs_sb) | 2321 | struct cifs_sb_info *cifs_sb) |
2153 | { | 2322 | { |
2154 | if (pvolume_info->rsize > CIFSMaxBufSize) { | 2323 | if (pvolume_info->rsize > CIFSMaxBufSize) { |
2155 | cERROR(1, ("rsize %d too large, using MaxBufSize", | 2324 | cERROR(1, "rsize %d too large, using MaxBufSize", |
2156 | pvolume_info->rsize)); | 2325 | pvolume_info->rsize); |
2157 | cifs_sb->rsize = CIFSMaxBufSize; | 2326 | cifs_sb->rsize = CIFSMaxBufSize; |
2158 | } else if ((pvolume_info->rsize) && | 2327 | } else if ((pvolume_info->rsize) && |
2159 | (pvolume_info->rsize <= CIFSMaxBufSize)) | 2328 | (pvolume_info->rsize <= CIFSMaxBufSize)) |
@@ -2162,8 +2331,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2162 | cifs_sb->rsize = CIFSMaxBufSize; | 2331 | cifs_sb->rsize = CIFSMaxBufSize; |
2163 | 2332 | ||
2164 | if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { | 2333 | if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { |
2165 | cERROR(1, ("wsize %d too large, using 4096 instead", | 2334 | cERROR(1, "wsize %d too large, using 4096 instead", |
2166 | pvolume_info->wsize)); | 2335 | pvolume_info->wsize); |
2167 | cifs_sb->wsize = 4096; | 2336 | cifs_sb->wsize = 4096; |
2168 | } else if (pvolume_info->wsize) | 2337 | } else if (pvolume_info->wsize) |
2169 | cifs_sb->wsize = pvolume_info->wsize; | 2338 | cifs_sb->wsize = pvolume_info->wsize; |
@@ -2181,7 +2350,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2181 | if (cifs_sb->rsize < 2048) { | 2350 | if (cifs_sb->rsize < 2048) { |
2182 | cifs_sb->rsize = 2048; | 2351 | cifs_sb->rsize = 2048; |
2183 | /* Windows ME may prefer this */ | 2352 | /* Windows ME may prefer this */ |
2184 | cFYI(1, ("readsize set to minimum: 2048")); | 2353 | cFYI(1, "readsize set to minimum: 2048"); |
2185 | } | 2354 | } |
2186 | /* calculate prepath */ | 2355 | /* calculate prepath */ |
2187 | cifs_sb->prepath = pvolume_info->prepath; | 2356 | cifs_sb->prepath = pvolume_info->prepath; |
@@ -2199,8 +2368,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2199 | cifs_sb->mnt_gid = pvolume_info->linux_gid; | 2368 | cifs_sb->mnt_gid = pvolume_info->linux_gid; |
2200 | cifs_sb->mnt_file_mode = pvolume_info->file_mode; | 2369 | cifs_sb->mnt_file_mode = pvolume_info->file_mode; |
2201 | cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; | 2370 | cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; |
2202 | cFYI(1, ("file mode: 0x%x dir mode: 0x%x", | 2371 | cFYI(1, "file mode: 0x%x dir mode: 0x%x", |
2203 | cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode)); | 2372 | cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); |
2204 | 2373 | ||
2205 | if (pvolume_info->noperm) | 2374 | if (pvolume_info->noperm) |
2206 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; | 2375 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; |
@@ -2229,13 +2398,13 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2229 | if (pvolume_info->dynperm) | 2398 | if (pvolume_info->dynperm) |
2230 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; | 2399 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; |
2231 | if (pvolume_info->direct_io) { | 2400 | if (pvolume_info->direct_io) { |
2232 | cFYI(1, ("mounting share using direct i/o")); | 2401 | cFYI(1, "mounting share using direct i/o"); |
2233 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; | 2402 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; |
2234 | } | 2403 | } |
2235 | 2404 | ||
2236 | if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) | 2405 | if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) |
2237 | cERROR(1, ("mount option dynperm ignored if cifsacl " | 2406 | cERROR(1, "mount option dynperm ignored if cifsacl " |
2238 | "mount option supported")); | 2407 | "mount option supported"); |
2239 | } | 2408 | } |
2240 | 2409 | ||
2241 | static int | 2410 | static int |
@@ -2262,7 +2431,7 @@ cleanup_volume_info(struct smb_vol **pvolume_info) | |||
2262 | { | 2431 | { |
2263 | struct smb_vol *volume_info; | 2432 | struct smb_vol *volume_info; |
2264 | 2433 | ||
2265 | if (!pvolume_info && !*pvolume_info) | 2434 | if (!pvolume_info || !*pvolume_info) |
2266 | return; | 2435 | return; |
2267 | 2436 | ||
2268 | volume_info = *pvolume_info; | 2437 | volume_info = *pvolume_info; |
@@ -2344,11 +2513,11 @@ try_mount_again: | |||
2344 | } | 2513 | } |
2345 | 2514 | ||
2346 | if (volume_info->nullauth) { | 2515 | if (volume_info->nullauth) { |
2347 | cFYI(1, ("null user")); | 2516 | cFYI(1, "null user"); |
2348 | volume_info->username = ""; | 2517 | volume_info->username = ""; |
2349 | } else if (volume_info->username) { | 2518 | } else if (volume_info->username) { |
2350 | /* BB fixme parse for domain name here */ | 2519 | /* BB fixme parse for domain name here */ |
2351 | cFYI(1, ("Username: %s", volume_info->username)); | 2520 | cFYI(1, "Username: %s", volume_info->username); |
2352 | } else { | 2521 | } else { |
2353 | cifserror("No username specified"); | 2522 | cifserror("No username specified"); |
2354 | /* In userspace mount helper we can get user name from alternate | 2523 | /* In userspace mount helper we can get user name from alternate |
@@ -2357,20 +2526,20 @@ try_mount_again: | |||
2357 | goto out; | 2526 | goto out; |
2358 | } | 2527 | } |
2359 | 2528 | ||
2360 | |||
2361 | /* this is needed for ASCII cp to Unicode converts */ | 2529 | /* this is needed for ASCII cp to Unicode converts */ |
2362 | if (volume_info->iocharset == NULL) { | 2530 | if (volume_info->iocharset == NULL) { |
2363 | cifs_sb->local_nls = load_nls_default(); | 2531 | /* load_nls_default cannot return null */ |
2364 | /* load_nls_default can not return null */ | 2532 | volume_info->local_nls = load_nls_default(); |
2365 | } else { | 2533 | } else { |
2366 | cifs_sb->local_nls = load_nls(volume_info->iocharset); | 2534 | volume_info->local_nls = load_nls(volume_info->iocharset); |
2367 | if (cifs_sb->local_nls == NULL) { | 2535 | if (volume_info->local_nls == NULL) { |
2368 | cERROR(1, ("CIFS mount error: iocharset %s not found", | 2536 | cERROR(1, "CIFS mount error: iocharset %s not found", |
2369 | volume_info->iocharset)); | 2537 | volume_info->iocharset); |
2370 | rc = -ELIBACC; | 2538 | rc = -ELIBACC; |
2371 | goto out; | 2539 | goto out; |
2372 | } | 2540 | } |
2373 | } | 2541 | } |
2542 | cifs_sb->local_nls = volume_info->local_nls; | ||
2374 | 2543 | ||
2375 | /* get a reference to a tcp session */ | 2544 | /* get a reference to a tcp session */ |
2376 | srvTcp = cifs_get_tcp_session(volume_info); | 2545 | srvTcp = cifs_get_tcp_session(volume_info); |
@@ -2379,148 +2548,30 @@ try_mount_again: | |||
2379 | goto out; | 2548 | goto out; |
2380 | } | 2549 | } |
2381 | 2550 | ||
2382 | pSesInfo = cifs_find_smb_ses(srvTcp, volume_info->username); | 2551 | /* get a reference to a SMB session */ |
2383 | if (pSesInfo) { | 2552 | pSesInfo = cifs_get_smb_ses(srvTcp, volume_info); |
2384 | cFYI(1, ("Existing smb sess found (status=%d)", | 2553 | if (IS_ERR(pSesInfo)) { |
2385 | pSesInfo->status)); | 2554 | rc = PTR_ERR(pSesInfo); |
2386 | /* | 2555 | pSesInfo = NULL; |
2387 | * The existing SMB session already has a reference to srvTcp, | 2556 | goto mount_fail_check; |
2388 | * so we can put back the extra one we got before | ||
2389 | */ | ||
2390 | cifs_put_tcp_session(srvTcp); | ||
2391 | |||
2392 | mutex_lock(&pSesInfo->session_mutex); | ||
2393 | if (pSesInfo->need_reconnect) { | ||
2394 | cFYI(1, ("Session needs reconnect")); | ||
2395 | rc = cifs_setup_session(xid, pSesInfo, | ||
2396 | cifs_sb->local_nls); | ||
2397 | } | ||
2398 | mutex_unlock(&pSesInfo->session_mutex); | ||
2399 | } else if (!rc) { | ||
2400 | cFYI(1, ("Existing smb sess not found")); | ||
2401 | pSesInfo = sesInfoAlloc(); | ||
2402 | if (pSesInfo == NULL) { | ||
2403 | rc = -ENOMEM; | ||
2404 | goto mount_fail_check; | ||
2405 | } | ||
2406 | |||
2407 | /* new SMB session uses our srvTcp ref */ | ||
2408 | pSesInfo->server = srvTcp; | ||
2409 | if (srvTcp->addr.sockAddr6.sin6_family == AF_INET6) | ||
2410 | sprintf(pSesInfo->serverName, "%pI6", | ||
2411 | &srvTcp->addr.sockAddr6.sin6_addr); | ||
2412 | else | ||
2413 | sprintf(pSesInfo->serverName, "%pI4", | ||
2414 | &srvTcp->addr.sockAddr.sin_addr.s_addr); | ||
2415 | |||
2416 | write_lock(&cifs_tcp_ses_lock); | ||
2417 | list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list); | ||
2418 | write_unlock(&cifs_tcp_ses_lock); | ||
2419 | |||
2420 | /* volume_info->password freed at unmount */ | ||
2421 | if (volume_info->password) { | ||
2422 | pSesInfo->password = kstrdup(volume_info->password, | ||
2423 | GFP_KERNEL); | ||
2424 | if (!pSesInfo->password) { | ||
2425 | rc = -ENOMEM; | ||
2426 | goto mount_fail_check; | ||
2427 | } | ||
2428 | } | ||
2429 | if (volume_info->username) | ||
2430 | strncpy(pSesInfo->userName, volume_info->username, | ||
2431 | MAX_USERNAME_SIZE); | ||
2432 | if (volume_info->domainname) { | ||
2433 | int len = strlen(volume_info->domainname); | ||
2434 | pSesInfo->domainName = kmalloc(len + 1, GFP_KERNEL); | ||
2435 | if (pSesInfo->domainName) | ||
2436 | strcpy(pSesInfo->domainName, | ||
2437 | volume_info->domainname); | ||
2438 | } | ||
2439 | pSesInfo->linux_uid = volume_info->linux_uid; | ||
2440 | pSesInfo->overrideSecFlg = volume_info->secFlg; | ||
2441 | mutex_lock(&pSesInfo->session_mutex); | ||
2442 | |||
2443 | /* BB FIXME need to pass vol->secFlgs BB */ | ||
2444 | rc = cifs_setup_session(xid, pSesInfo, | ||
2445 | cifs_sb->local_nls); | ||
2446 | mutex_unlock(&pSesInfo->session_mutex); | ||
2447 | } | 2557 | } |
2448 | 2558 | ||
2449 | /* search for existing tcon to this server share */ | 2559 | setup_cifs_sb(volume_info, cifs_sb); |
2450 | if (!rc) { | 2560 | if (pSesInfo->capabilities & CAP_LARGE_FILES) |
2451 | setup_cifs_sb(volume_info, cifs_sb); | 2561 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
2452 | 2562 | else | |
2453 | tcon = cifs_find_tcon(pSesInfo, volume_info->UNC); | 2563 | sb->s_maxbytes = MAX_NON_LFS; |
2454 | if (tcon) { | ||
2455 | cFYI(1, ("Found match on UNC path")); | ||
2456 | /* existing tcon already has a reference */ | ||
2457 | cifs_put_smb_ses(pSesInfo); | ||
2458 | if (tcon->seal != volume_info->seal) | ||
2459 | cERROR(1, ("transport encryption setting " | ||
2460 | "conflicts with existing tid")); | ||
2461 | } else { | ||
2462 | tcon = tconInfoAlloc(); | ||
2463 | if (tcon == NULL) { | ||
2464 | rc = -ENOMEM; | ||
2465 | goto mount_fail_check; | ||
2466 | } | ||
2467 | |||
2468 | tcon->ses = pSesInfo; | ||
2469 | if (volume_info->password) { | ||
2470 | tcon->password = kstrdup(volume_info->password, | ||
2471 | GFP_KERNEL); | ||
2472 | if (!tcon->password) { | ||
2473 | rc = -ENOMEM; | ||
2474 | goto mount_fail_check; | ||
2475 | } | ||
2476 | } | ||
2477 | |||
2478 | if ((strchr(volume_info->UNC + 3, '\\') == NULL) | ||
2479 | && (strchr(volume_info->UNC + 3, '/') == NULL)) { | ||
2480 | cERROR(1, ("Missing share name")); | ||
2481 | rc = -ENODEV; | ||
2482 | goto mount_fail_check; | ||
2483 | } else { | ||
2484 | /* BB Do we need to wrap sesSem around | ||
2485 | * this TCon call and Unix SetFS as | ||
2486 | * we do on SessSetup and reconnect? */ | ||
2487 | rc = CIFSTCon(xid, pSesInfo, volume_info->UNC, | ||
2488 | tcon, cifs_sb->local_nls); | ||
2489 | cFYI(1, ("CIFS Tcon rc = %d", rc)); | ||
2490 | if (volume_info->nodfs) { | ||
2491 | tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; | ||
2492 | cFYI(1, ("DFS disabled (%d)", | ||
2493 | tcon->Flags)); | ||
2494 | } | ||
2495 | } | ||
2496 | if (rc) | ||
2497 | goto remote_path_check; | ||
2498 | tcon->seal = volume_info->seal; | ||
2499 | write_lock(&cifs_tcp_ses_lock); | ||
2500 | list_add(&tcon->tcon_list, &pSesInfo->tcon_list); | ||
2501 | write_unlock(&cifs_tcp_ses_lock); | ||
2502 | } | ||
2503 | |||
2504 | /* we can have only one retry value for a connection | ||
2505 | to a share so for resources mounted more than once | ||
2506 | to the same server share the last value passed in | ||
2507 | for the retry flag is used */ | ||
2508 | tcon->retry = volume_info->retry; | ||
2509 | tcon->nocase = volume_info->nocase; | ||
2510 | tcon->local_lease = volume_info->local_lease; | ||
2511 | } | ||
2512 | if (pSesInfo) { | ||
2513 | if (pSesInfo->capabilities & CAP_LARGE_FILES) | ||
2514 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
2515 | else | ||
2516 | sb->s_maxbytes = MAX_NON_LFS; | ||
2517 | } | ||
2518 | 2564 | ||
2519 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ | 2565 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ |
2520 | sb->s_time_gran = 100; | 2566 | sb->s_time_gran = 100; |
2521 | 2567 | ||
2522 | if (rc) | 2568 | /* search for existing tcon to this server share */ |
2569 | tcon = cifs_get_tcon(pSesInfo, volume_info); | ||
2570 | if (IS_ERR(tcon)) { | ||
2571 | rc = PTR_ERR(tcon); | ||
2572 | tcon = NULL; | ||
2523 | goto remote_path_check; | 2573 | goto remote_path_check; |
2574 | } | ||
2524 | 2575 | ||
2525 | cifs_sb->tcon = tcon; | 2576 | cifs_sb->tcon = tcon; |
2526 | 2577 | ||
@@ -2544,7 +2595,7 @@ try_mount_again: | |||
2544 | 2595 | ||
2545 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { | 2596 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { |
2546 | cifs_sb->rsize = 1024 * 127; | 2597 | cifs_sb->rsize = 1024 * 127; |
2547 | cFYI(DBG2, ("no very large read support, rsize now 127K")); | 2598 | cFYI(DBG2, "no very large read support, rsize now 127K"); |
2548 | } | 2599 | } |
2549 | if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) | 2600 | if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) |
2550 | cifs_sb->wsize = min(cifs_sb->wsize, | 2601 | cifs_sb->wsize = min(cifs_sb->wsize, |
@@ -2593,7 +2644,7 @@ remote_path_check: | |||
2593 | goto mount_fail_check; | 2644 | goto mount_fail_check; |
2594 | } | 2645 | } |
2595 | 2646 | ||
2596 | cFYI(1, ("Getting referral for: %s", full_path)); | 2647 | cFYI(1, "Getting referral for: %s", full_path); |
2597 | rc = get_dfs_path(xid, pSesInfo , full_path + 1, | 2648 | rc = get_dfs_path(xid, pSesInfo , full_path + 1, |
2598 | cifs_sb->local_nls, &num_referrals, &referrals, | 2649 | cifs_sb->local_nls, &num_referrals, &referrals, |
2599 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 2650 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
@@ -2707,7 +2758,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
2707 | by Samba (not sure whether other servers allow | 2758 | by Samba (not sure whether other servers allow |
2708 | NTLMv2 password here) */ | 2759 | NTLMv2 password here) */ |
2709 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 2760 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
2710 | if ((extended_security & CIFSSEC_MAY_LANMAN) && | 2761 | if ((global_secflags & CIFSSEC_MAY_LANMAN) && |
2711 | (ses->server->secType == LANMAN)) | 2762 | (ses->server->secType == LANMAN)) |
2712 | calc_lanman_hash(tcon->password, ses->server->cryptKey, | 2763 | calc_lanman_hash(tcon->password, ses->server->cryptKey, |
2713 | ses->server->secMode & | 2764 | ses->server->secMode & |
@@ -2778,13 +2829,13 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
2778 | if (length == 3) { | 2829 | if (length == 3) { |
2779 | if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && | 2830 | if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && |
2780 | (bcc_ptr[2] == 'C')) { | 2831 | (bcc_ptr[2] == 'C')) { |
2781 | cFYI(1, ("IPC connection")); | 2832 | cFYI(1, "IPC connection"); |
2782 | tcon->ipc = 1; | 2833 | tcon->ipc = 1; |
2783 | } | 2834 | } |
2784 | } else if (length == 2) { | 2835 | } else if (length == 2) { |
2785 | if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { | 2836 | if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { |
2786 | /* the most common case */ | 2837 | /* the most common case */ |
2787 | cFYI(1, ("disk share connection")); | 2838 | cFYI(1, "disk share connection"); |
2788 | } | 2839 | } |
2789 | } | 2840 | } |
2790 | bcc_ptr += length + 1; | 2841 | bcc_ptr += length + 1; |
@@ -2797,7 +2848,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
2797 | bytes_left, is_unicode, | 2848 | bytes_left, is_unicode, |
2798 | nls_codepage); | 2849 | nls_codepage); |
2799 | 2850 | ||
2800 | cFYI(1, ("nativeFileSystem=%s", tcon->nativeFileSystem)); | 2851 | cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem); |
2801 | 2852 | ||
2802 | if ((smb_buffer_response->WordCount == 3) || | 2853 | if ((smb_buffer_response->WordCount == 3) || |
2803 | (smb_buffer_response->WordCount == 7)) | 2854 | (smb_buffer_response->WordCount == 7)) |
@@ -2805,7 +2856,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
2805 | tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); | 2856 | tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); |
2806 | else | 2857 | else |
2807 | tcon->Flags = 0; | 2858 | tcon->Flags = 0; |
2808 | cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags)); | 2859 | cFYI(1, "Tcon flags: 0x%x ", tcon->Flags); |
2809 | } else if ((rc == 0) && tcon == NULL) { | 2860 | } else if ((rc == 0) && tcon == NULL) { |
2810 | /* all we need to save for IPC$ connection */ | 2861 | /* all we need to save for IPC$ connection */ |
2811 | ses->ipc_tid = smb_buffer_response->Tid; | 2862 | ses->ipc_tid = smb_buffer_response->Tid; |
@@ -2833,57 +2884,61 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | |||
2833 | return rc; | 2884 | return rc; |
2834 | } | 2885 | } |
2835 | 2886 | ||
2836 | int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, | 2887 | int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) |
2837 | struct nls_table *nls_info) | ||
2838 | { | 2888 | { |
2839 | int rc = 0; | 2889 | int rc = 0; |
2840 | int first_time = 0; | 2890 | struct TCP_Server_Info *server = ses->server; |
2841 | struct TCP_Server_Info *server = pSesInfo->server; | 2891 | |
2842 | 2892 | /* only send once per connect */ | |
2843 | /* what if server changes its buffer size after dropping the session? */ | 2893 | if (server->maxBuf != 0) |
2844 | if (server->maxBuf == 0) /* no need to send on reconnect */ { | 2894 | return 0; |
2845 | rc = CIFSSMBNegotiate(xid, pSesInfo); | 2895 | |
2846 | if (rc == -EAGAIN) { | 2896 | rc = CIFSSMBNegotiate(xid, ses); |
2847 | /* retry only once on 1st time connection */ | 2897 | if (rc == -EAGAIN) { |
2848 | rc = CIFSSMBNegotiate(xid, pSesInfo); | 2898 | /* retry only once on 1st time connection */ |
2849 | if (rc == -EAGAIN) | 2899 | rc = CIFSSMBNegotiate(xid, ses); |
2850 | rc = -EHOSTDOWN; | 2900 | if (rc == -EAGAIN) |
2851 | } | 2901 | rc = -EHOSTDOWN; |
2852 | if (rc == 0) { | 2902 | } |
2853 | spin_lock(&GlobalMid_Lock); | 2903 | if (rc == 0) { |
2854 | if (server->tcpStatus != CifsExiting) | 2904 | spin_lock(&GlobalMid_Lock); |
2855 | server->tcpStatus = CifsGood; | 2905 | if (server->tcpStatus != CifsExiting) |
2856 | else | 2906 | server->tcpStatus = CifsGood; |
2857 | rc = -EHOSTDOWN; | 2907 | else |
2858 | spin_unlock(&GlobalMid_Lock); | 2908 | rc = -EHOSTDOWN; |
2909 | spin_unlock(&GlobalMid_Lock); | ||
2859 | 2910 | ||
2860 | } | ||
2861 | first_time = 1; | ||
2862 | } | 2911 | } |
2863 | 2912 | ||
2864 | if (rc) | 2913 | return rc; |
2865 | goto ss_err_exit; | 2914 | } |
2915 | |||
2916 | |||
2917 | int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | ||
2918 | struct nls_table *nls_info) | ||
2919 | { | ||
2920 | int rc = 0; | ||
2921 | struct TCP_Server_Info *server = ses->server; | ||
2866 | 2922 | ||
2867 | pSesInfo->flags = 0; | 2923 | ses->flags = 0; |
2868 | pSesInfo->capabilities = server->capabilities; | 2924 | ses->capabilities = server->capabilities; |
2869 | if (linuxExtEnabled == 0) | 2925 | if (linuxExtEnabled == 0) |
2870 | pSesInfo->capabilities &= (~CAP_UNIX); | 2926 | ses->capabilities &= (~CAP_UNIX); |
2871 | 2927 | ||
2872 | cFYI(1, ("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", | 2928 | cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", |
2873 | server->secMode, server->capabilities, server->timeAdj)); | 2929 | server->secMode, server->capabilities, server->timeAdj); |
2874 | 2930 | ||
2875 | rc = CIFS_SessSetup(xid, pSesInfo, first_time, nls_info); | 2931 | rc = CIFS_SessSetup(xid, ses, nls_info); |
2876 | if (rc) { | 2932 | if (rc) { |
2877 | cERROR(1, ("Send error in SessSetup = %d", rc)); | 2933 | cERROR(1, "Send error in SessSetup = %d", rc); |
2878 | } else { | 2934 | } else { |
2879 | cFYI(1, ("CIFS Session Established successfully")); | 2935 | cFYI(1, "CIFS Session Established successfully"); |
2880 | spin_lock(&GlobalMid_Lock); | 2936 | spin_lock(&GlobalMid_Lock); |
2881 | pSesInfo->status = CifsGood; | 2937 | ses->status = CifsGood; |
2882 | pSesInfo->need_reconnect = false; | 2938 | ses->need_reconnect = false; |
2883 | spin_unlock(&GlobalMid_Lock); | 2939 | spin_unlock(&GlobalMid_Lock); |
2884 | } | 2940 | } |
2885 | 2941 | ||
2886 | ss_err_exit: | ||
2887 | return rc; | 2942 | return rc; |
2888 | } | 2943 | } |
2889 | 2944 | ||
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index e9f7ecc2714b..391816b461ca 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -73,7 +73,7 @@ cifs_bp_rename_retry: | |||
73 | namelen += (1 + temp->d_name.len); | 73 | namelen += (1 + temp->d_name.len); |
74 | temp = temp->d_parent; | 74 | temp = temp->d_parent; |
75 | if (temp == NULL) { | 75 | if (temp == NULL) { |
76 | cERROR(1, ("corrupt dentry")); | 76 | cERROR(1, "corrupt dentry"); |
77 | return NULL; | 77 | return NULL; |
78 | } | 78 | } |
79 | } | 79 | } |
@@ -90,19 +90,18 @@ cifs_bp_rename_retry: | |||
90 | full_path[namelen] = dirsep; | 90 | full_path[namelen] = dirsep; |
91 | strncpy(full_path + namelen + 1, temp->d_name.name, | 91 | strncpy(full_path + namelen + 1, temp->d_name.name, |
92 | temp->d_name.len); | 92 | temp->d_name.len); |
93 | cFYI(0, ("name: %s", full_path + namelen)); | 93 | cFYI(0, "name: %s", full_path + namelen); |
94 | } | 94 | } |
95 | temp = temp->d_parent; | 95 | temp = temp->d_parent; |
96 | if (temp == NULL) { | 96 | if (temp == NULL) { |
97 | cERROR(1, ("corrupt dentry")); | 97 | cERROR(1, "corrupt dentry"); |
98 | kfree(full_path); | 98 | kfree(full_path); |
99 | return NULL; | 99 | return NULL; |
100 | } | 100 | } |
101 | } | 101 | } |
102 | if (namelen != pplen + dfsplen) { | 102 | if (namelen != pplen + dfsplen) { |
103 | cERROR(1, | 103 | cERROR(1, "did not end path lookup where expected namelen is %d", |
104 | ("did not end path lookup where expected namelen is %d", | 104 | namelen); |
105 | namelen)); | ||
106 | /* presumably this is only possible if racing with a rename | 105 | /* presumably this is only possible if racing with a rename |
107 | of one of the parent directories (we can not lock the dentries | 106 | of one of the parent directories (we can not lock the dentries |
108 | above us to prevent this, but retrying should be harmless) */ | 107 | above us to prevent this, but retrying should be harmless) */ |
@@ -130,6 +129,12 @@ cifs_bp_rename_retry: | |||
130 | return full_path; | 129 | return full_path; |
131 | } | 130 | } |
132 | 131 | ||
132 | /* | ||
133 | * When called with struct file pointer set to NULL, there is no way we could | ||
134 | * update file->private_data, but getting it stuck on openFileList provides a | ||
135 | * way to access it from cifs_fill_filedata and thereby set file->private_data | ||
136 | * from cifs_open. | ||
137 | */ | ||
133 | struct cifsFileInfo * | 138 | struct cifsFileInfo * |
134 | cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, | 139 | cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, |
135 | struct file *file, struct vfsmount *mnt, unsigned int oflags) | 140 | struct file *file, struct vfsmount *mnt, unsigned int oflags) |
@@ -173,7 +178,7 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, | |||
173 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 178 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
174 | pCifsInode->clientCanCacheAll = true; | 179 | pCifsInode->clientCanCacheAll = true; |
175 | pCifsInode->clientCanCacheRead = true; | 180 | pCifsInode->clientCanCacheRead = true; |
176 | cFYI(1, ("Exclusive Oplock inode %p", newinode)); | 181 | cFYI(1, "Exclusive Oplock inode %p", newinode); |
177 | } else if ((oplock & 0xF) == OPLOCK_READ) | 182 | } else if ((oplock & 0xF) == OPLOCK_READ) |
178 | pCifsInode->clientCanCacheRead = true; | 183 | pCifsInode->clientCanCacheRead = true; |
179 | } | 184 | } |
@@ -183,16 +188,17 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, | |||
183 | } | 188 | } |
184 | 189 | ||
185 | int cifs_posix_open(char *full_path, struct inode **pinode, | 190 | int cifs_posix_open(char *full_path, struct inode **pinode, |
186 | struct vfsmount *mnt, int mode, int oflags, | 191 | struct vfsmount *mnt, struct super_block *sb, |
187 | __u32 *poplock, __u16 *pnetfid, int xid) | 192 | int mode, int oflags, |
193 | __u32 *poplock, __u16 *pnetfid, int xid) | ||
188 | { | 194 | { |
189 | int rc; | 195 | int rc; |
190 | FILE_UNIX_BASIC_INFO *presp_data; | 196 | FILE_UNIX_BASIC_INFO *presp_data; |
191 | __u32 posix_flags = 0; | 197 | __u32 posix_flags = 0; |
192 | struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); | 198 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
193 | struct cifs_fattr fattr; | 199 | struct cifs_fattr fattr; |
194 | 200 | ||
195 | cFYI(1, ("posix open %s", full_path)); | 201 | cFYI(1, "posix open %s", full_path); |
196 | 202 | ||
197 | presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); | 203 | presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); |
198 | if (presp_data == NULL) | 204 | if (presp_data == NULL) |
@@ -242,7 +248,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
242 | 248 | ||
243 | /* get new inode and set it up */ | 249 | /* get new inode and set it up */ |
244 | if (*pinode == NULL) { | 250 | if (*pinode == NULL) { |
245 | *pinode = cifs_iget(mnt->mnt_sb, &fattr); | 251 | cifs_fill_uniqueid(sb, &fattr); |
252 | *pinode = cifs_iget(sb, &fattr); | ||
246 | if (!*pinode) { | 253 | if (!*pinode) { |
247 | rc = -ENOMEM; | 254 | rc = -ENOMEM; |
248 | goto posix_open_ret; | 255 | goto posix_open_ret; |
@@ -251,7 +258,18 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
251 | cifs_fattr_to_inode(*pinode, &fattr); | 258 | cifs_fattr_to_inode(*pinode, &fattr); |
252 | } | 259 | } |
253 | 260 | ||
254 | cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); | 261 | /* |
262 | * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to | ||
263 | * file->private_data. | ||
264 | */ | ||
265 | if (mnt) { | ||
266 | struct cifsFileInfo *pfile_info; | ||
267 | |||
268 | pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, | ||
269 | oflags); | ||
270 | if (pfile_info == NULL) | ||
271 | rc = -ENOMEM; | ||
272 | } | ||
255 | 273 | ||
256 | posix_open_ret: | 274 | posix_open_ret: |
257 | kfree(presp_data); | 275 | kfree(presp_data); |
@@ -315,13 +333,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
315 | if (nd && (nd->flags & LOOKUP_OPEN)) | 333 | if (nd && (nd->flags & LOOKUP_OPEN)) |
316 | oflags = nd->intent.open.flags; | 334 | oflags = nd->intent.open.flags; |
317 | else | 335 | else |
318 | oflags = FMODE_READ; | 336 | oflags = FMODE_READ | SMB_O_CREAT; |
319 | 337 | ||
320 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && | 338 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && |
321 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & | 339 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
322 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 340 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
323 | rc = cifs_posix_open(full_path, &newinode, nd->path.mnt, | 341 | rc = cifs_posix_open(full_path, &newinode, |
324 | mode, oflags, &oplock, &fileHandle, xid); | 342 | nd ? nd->path.mnt : NULL, |
343 | inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); | ||
325 | /* EIO could indicate that (posix open) operation is not | 344 | /* EIO could indicate that (posix open) operation is not |
326 | supported, despite what server claimed in capability | 345 | supported, despite what server claimed in capability |
327 | negotation. EREMOTE indicates DFS junction, which is not | 346 | negotation. EREMOTE indicates DFS junction, which is not |
@@ -358,7 +377,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
358 | else if ((oflags & O_CREAT) == O_CREAT) | 377 | else if ((oflags & O_CREAT) == O_CREAT) |
359 | disposition = FILE_OPEN_IF; | 378 | disposition = FILE_OPEN_IF; |
360 | else | 379 | else |
361 | cFYI(1, ("Create flag not set in create function")); | 380 | cFYI(1, "Create flag not set in create function"); |
362 | } | 381 | } |
363 | 382 | ||
364 | /* BB add processing to set equivalent of mode - e.g. via CreateX with | 383 | /* BB add processing to set equivalent of mode - e.g. via CreateX with |
@@ -394,7 +413,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
394 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 413 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
395 | } | 414 | } |
396 | if (rc) { | 415 | if (rc) { |
397 | cFYI(1, ("cifs_create returned 0x%x", rc)); | 416 | cFYI(1, "cifs_create returned 0x%x", rc); |
398 | goto cifs_create_out; | 417 | goto cifs_create_out; |
399 | } | 418 | } |
400 | 419 | ||
@@ -457,15 +476,22 @@ cifs_create_set_dentry: | |||
457 | if (rc == 0) | 476 | if (rc == 0) |
458 | setup_cifs_dentry(tcon, direntry, newinode); | 477 | setup_cifs_dentry(tcon, direntry, newinode); |
459 | else | 478 | else |
460 | cFYI(1, ("Create worked, get_inode_info failed rc = %d", rc)); | 479 | cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); |
461 | 480 | ||
462 | /* nfsd case - nfs srv does not set nd */ | 481 | /* nfsd case - nfs srv does not set nd */ |
463 | if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) { | 482 | if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) { |
464 | /* mknod case - do not leave file open */ | 483 | /* mknod case - do not leave file open */ |
465 | CIFSSMBClose(xid, tcon, fileHandle); | 484 | CIFSSMBClose(xid, tcon, fileHandle); |
466 | } else if (!(posix_create) && (newinode)) { | 485 | } else if (!(posix_create) && (newinode)) { |
467 | cifs_new_fileinfo(newinode, fileHandle, NULL, | 486 | struct cifsFileInfo *pfile_info; |
468 | nd->path.mnt, oflags); | 487 | /* |
488 | * cifs_fill_filedata() takes care of setting cifsFileInfo | ||
489 | * pointer to file->private_data. | ||
490 | */ | ||
491 | pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL, | ||
492 | nd->path.mnt, oflags); | ||
493 | if (pfile_info == NULL) | ||
494 | rc = -ENOMEM; | ||
469 | } | 495 | } |
470 | cifs_create_out: | 496 | cifs_create_out: |
471 | kfree(buf); | 497 | kfree(buf); |
@@ -531,7 +557,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
531 | u16 fileHandle; | 557 | u16 fileHandle; |
532 | FILE_ALL_INFO *buf; | 558 | FILE_ALL_INFO *buf; |
533 | 559 | ||
534 | cFYI(1, ("sfu compat create special file")); | 560 | cFYI(1, "sfu compat create special file"); |
535 | 561 | ||
536 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); | 562 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); |
537 | if (buf == NULL) { | 563 | if (buf == NULL) { |
@@ -616,8 +642,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
616 | 642 | ||
617 | xid = GetXid(); | 643 | xid = GetXid(); |
618 | 644 | ||
619 | cFYI(1, ("parent inode = 0x%p name is: %s and dentry = 0x%p", | 645 | cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p", |
620 | parent_dir_inode, direntry->d_name.name, direntry)); | 646 | parent_dir_inode, direntry->d_name.name, direntry); |
621 | 647 | ||
622 | /* check whether path exists */ | 648 | /* check whether path exists */ |
623 | 649 | ||
@@ -632,7 +658,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
632 | int i; | 658 | int i; |
633 | for (i = 0; i < direntry->d_name.len; i++) | 659 | for (i = 0; i < direntry->d_name.len; i++) |
634 | if (direntry->d_name.name[i] == '\\') { | 660 | if (direntry->d_name.name[i] == '\\') { |
635 | cFYI(1, ("Invalid file name")); | 661 | cFYI(1, "Invalid file name"); |
636 | FreeXid(xid); | 662 | FreeXid(xid); |
637 | return ERR_PTR(-EINVAL); | 663 | return ERR_PTR(-EINVAL); |
638 | } | 664 | } |
@@ -657,11 +683,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
657 | } | 683 | } |
658 | 684 | ||
659 | if (direntry->d_inode != NULL) { | 685 | if (direntry->d_inode != NULL) { |
660 | cFYI(1, ("non-NULL inode in lookup")); | 686 | cFYI(1, "non-NULL inode in lookup"); |
661 | } else { | 687 | } else { |
662 | cFYI(1, ("NULL inode in lookup")); | 688 | cFYI(1, "NULL inode in lookup"); |
663 | } | 689 | } |
664 | cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode)); | 690 | cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode); |
665 | 691 | ||
666 | /* Posix open is only called (at lookup time) for file create now. | 692 | /* Posix open is only called (at lookup time) for file create now. |
667 | * For opens (rather than creates), because we do not know if it | 693 | * For opens (rather than creates), because we do not know if it |
@@ -678,6 +704,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
678 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && | 704 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && |
679 | (nd->intent.open.flags & O_CREAT)) { | 705 | (nd->intent.open.flags & O_CREAT)) { |
680 | rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, | 706 | rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, |
707 | parent_dir_inode->i_sb, | ||
681 | nd->intent.open.create_mode, | 708 | nd->intent.open.create_mode, |
682 | nd->intent.open.flags, &oplock, | 709 | nd->intent.open.flags, &oplock, |
683 | &fileHandle, xid); | 710 | &fileHandle, xid); |
@@ -723,7 +750,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
723 | /* if it was once a directory (but how can we tell?) we could do | 750 | /* if it was once a directory (but how can we tell?) we could do |
724 | shrink_dcache_parent(direntry); */ | 751 | shrink_dcache_parent(direntry); */ |
725 | } else if (rc != -EACCES) { | 752 | } else if (rc != -EACCES) { |
726 | cERROR(1, ("Unexpected lookup error %d", rc)); | 753 | cERROR(1, "Unexpected lookup error %d", rc); |
727 | /* We special case check for Access Denied - since that | 754 | /* We special case check for Access Denied - since that |
728 | is a common return code */ | 755 | is a common return code */ |
729 | } | 756 | } |
@@ -742,8 +769,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) | |||
742 | if (cifs_revalidate_dentry(direntry)) | 769 | if (cifs_revalidate_dentry(direntry)) |
743 | return 0; | 770 | return 0; |
744 | } else { | 771 | } else { |
745 | cFYI(1, ("neg dentry 0x%p name = %s", | 772 | cFYI(1, "neg dentry 0x%p name = %s", |
746 | direntry, direntry->d_name.name)); | 773 | direntry, direntry->d_name.name); |
747 | if (time_after(jiffies, direntry->d_time + HZ) || | 774 | if (time_after(jiffies, direntry->d_time + HZ) || |
748 | !lookupCacheEnabled) { | 775 | !lookupCacheEnabled) { |
749 | d_drop(direntry); | 776 | d_drop(direntry); |
@@ -758,7 +785,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) | |||
758 | { | 785 | { |
759 | int rc = 0; | 786 | int rc = 0; |
760 | 787 | ||
761 | cFYI(1, ("In cifs d_delete, name = %s", direntry->d_name.name)); | 788 | cFYI(1, "In cifs d_delete, name = %s", direntry->d_name.name); |
762 | 789 | ||
763 | return rc; | 790 | return rc; |
764 | } */ | 791 | } */ |
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index 6f8a0e3fb25b..4db2c5e7283f 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c | |||
@@ -106,14 +106,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | |||
106 | /* search for server name delimiter */ | 106 | /* search for server name delimiter */ |
107 | len = strlen(unc); | 107 | len = strlen(unc); |
108 | if (len < 3) { | 108 | if (len < 3) { |
109 | cFYI(1, ("%s: unc is too short: %s", __func__, unc)); | 109 | cFYI(1, "%s: unc is too short: %s", __func__, unc); |
110 | return -EINVAL; | 110 | return -EINVAL; |
111 | } | 111 | } |
112 | len -= 2; | 112 | len -= 2; |
113 | name = memchr(unc+2, '\\', len); | 113 | name = memchr(unc+2, '\\', len); |
114 | if (!name) { | 114 | if (!name) { |
115 | cFYI(1, ("%s: probably server name is whole unc: %s", | 115 | cFYI(1, "%s: probably server name is whole unc: %s", |
116 | __func__, unc)); | 116 | __func__, unc); |
117 | } else { | 117 | } else { |
118 | len = (name - unc) - 2/* leading // */; | 118 | len = (name - unc) - 2/* leading // */; |
119 | } | 119 | } |
@@ -127,8 +127,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | |||
127 | name[len] = 0; | 127 | name[len] = 0; |
128 | 128 | ||
129 | if (is_ip(name)) { | 129 | if (is_ip(name)) { |
130 | cFYI(1, ("%s: it is IP, skipping dns upcall: %s", | 130 | cFYI(1, "%s: it is IP, skipping dns upcall: %s", |
131 | __func__, name)); | 131 | __func__, name); |
132 | data = name; | 132 | data = name; |
133 | goto skip_upcall; | 133 | goto skip_upcall; |
134 | } | 134 | } |
@@ -138,7 +138,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | |||
138 | len = rkey->type_data.x[0]; | 138 | len = rkey->type_data.x[0]; |
139 | data = rkey->payload.data; | 139 | data = rkey->payload.data; |
140 | } else { | 140 | } else { |
141 | cERROR(1, ("%s: unable to resolve: %s", __func__, name)); | 141 | cERROR(1, "%s: unable to resolve: %s", __func__, name); |
142 | goto out; | 142 | goto out; |
143 | } | 143 | } |
144 | 144 | ||
@@ -148,10 +148,10 @@ skip_upcall: | |||
148 | if (*ip_addr) { | 148 | if (*ip_addr) { |
149 | memcpy(*ip_addr, data, len + 1); | 149 | memcpy(*ip_addr, data, len + 1); |
150 | if (!IS_ERR(rkey)) | 150 | if (!IS_ERR(rkey)) |
151 | cFYI(1, ("%s: resolved: %s to %s", __func__, | 151 | cFYI(1, "%s: resolved: %s to %s", __func__, |
152 | name, | 152 | name, |
153 | *ip_addr | 153 | *ip_addr |
154 | )); | 154 | ); |
155 | rc = 0; | 155 | rc = 0; |
156 | } else { | 156 | } else { |
157 | rc = -ENOMEM; | 157 | rc = -ENOMEM; |
diff --git a/fs/cifs/export.c b/fs/cifs/export.c index 6177f7cca16a..993f82045bf6 100644 --- a/fs/cifs/export.c +++ b/fs/cifs/export.c | |||
@@ -49,7 +49,7 @@ | |||
49 | static struct dentry *cifs_get_parent(struct dentry *dentry) | 49 | static struct dentry *cifs_get_parent(struct dentry *dentry) |
50 | { | 50 | { |
51 | /* BB need to add code here eventually to enable export via NFSD */ | 51 | /* BB need to add code here eventually to enable export via NFSD */ |
52 | cFYI(1, ("get parent for %p", dentry)); | 52 | cFYI(1, "get parent for %p", dentry); |
53 | return ERR_PTR(-EACCES); | 53 | return ERR_PTR(-EACCES); |
54 | } | 54 | } |
55 | 55 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 9b11a8f56f3a..a83541ec9713 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * vfs operations that deal with files | 4 | * vfs operations that deal with files |
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2002,2007 | 6 | * Copyright (C) International Business Machines Corp., 2002,2010 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * Jeremy Allison (jra@samba.org) | 8 | * Jeremy Allison (jra@samba.org) |
9 | * | 9 | * |
@@ -108,8 +108,7 @@ static inline int cifs_get_disposition(unsigned int flags) | |||
108 | /* all arguments to this function must be checked for validity in caller */ | 108 | /* all arguments to this function must be checked for validity in caller */ |
109 | static inline int | 109 | static inline int |
110 | cifs_posix_open_inode_helper(struct inode *inode, struct file *file, | 110 | cifs_posix_open_inode_helper(struct inode *inode, struct file *file, |
111 | struct cifsInodeInfo *pCifsInode, | 111 | struct cifsInodeInfo *pCifsInode, __u32 oplock, |
112 | struct cifsFileInfo *pCifsFile, __u32 oplock, | ||
113 | u16 netfid) | 112 | u16 netfid) |
114 | { | 113 | { |
115 | 114 | ||
@@ -136,15 +135,15 @@ cifs_posix_open_inode_helper(struct inode *inode, struct file *file, | |||
136 | if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && | 135 | if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && |
137 | (file->f_path.dentry->d_inode->i_size == | 136 | (file->f_path.dentry->d_inode->i_size == |
138 | (loff_t)le64_to_cpu(buf->EndOfFile))) { | 137 | (loff_t)le64_to_cpu(buf->EndOfFile))) { |
139 | cFYI(1, ("inode unchanged on server")); | 138 | cFYI(1, "inode unchanged on server"); |
140 | } else { | 139 | } else { |
141 | if (file->f_path.dentry->d_inode->i_mapping) { | 140 | if (file->f_path.dentry->d_inode->i_mapping) { |
142 | rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); | 141 | rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); |
143 | if (rc != 0) | 142 | if (rc != 0) |
144 | CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; | 143 | CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; |
145 | } | 144 | } |
146 | cFYI(1, ("invalidating remote inode since open detected it " | 145 | cFYI(1, "invalidating remote inode since open detected it " |
147 | "changed")); | 146 | "changed"); |
148 | invalidate_remote_inode(file->f_path.dentry->d_inode); | 147 | invalidate_remote_inode(file->f_path.dentry->d_inode); |
149 | } */ | 148 | } */ |
150 | 149 | ||
@@ -152,8 +151,8 @@ psx_client_can_cache: | |||
152 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 151 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
153 | pCifsInode->clientCanCacheAll = true; | 152 | pCifsInode->clientCanCacheAll = true; |
154 | pCifsInode->clientCanCacheRead = true; | 153 | pCifsInode->clientCanCacheRead = true; |
155 | cFYI(1, ("Exclusive Oplock granted on inode %p", | 154 | cFYI(1, "Exclusive Oplock granted on inode %p", |
156 | file->f_path.dentry->d_inode)); | 155 | file->f_path.dentry->d_inode); |
157 | } else if ((oplock & 0xF) == OPLOCK_READ) | 156 | } else if ((oplock & 0xF) == OPLOCK_READ) |
158 | pCifsInode->clientCanCacheRead = true; | 157 | pCifsInode->clientCanCacheRead = true; |
159 | 158 | ||
@@ -190,8 +189,8 @@ cifs_fill_filedata(struct file *file) | |||
190 | if (file->private_data != NULL) { | 189 | if (file->private_data != NULL) { |
191 | return pCifsFile; | 190 | return pCifsFile; |
192 | } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL)) | 191 | } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL)) |
193 | cERROR(1, ("could not find file instance for " | 192 | cERROR(1, "could not find file instance for " |
194 | "new file %p", file)); | 193 | "new file %p", file); |
195 | return NULL; | 194 | return NULL; |
196 | } | 195 | } |
197 | 196 | ||
@@ -217,7 +216,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | |||
217 | if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && | 216 | if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && |
218 | (file->f_path.dentry->d_inode->i_size == | 217 | (file->f_path.dentry->d_inode->i_size == |
219 | (loff_t)le64_to_cpu(buf->EndOfFile))) { | 218 | (loff_t)le64_to_cpu(buf->EndOfFile))) { |
220 | cFYI(1, ("inode unchanged on server")); | 219 | cFYI(1, "inode unchanged on server"); |
221 | } else { | 220 | } else { |
222 | if (file->f_path.dentry->d_inode->i_mapping) { | 221 | if (file->f_path.dentry->d_inode->i_mapping) { |
223 | /* BB no need to lock inode until after invalidate | 222 | /* BB no need to lock inode until after invalidate |
@@ -226,8 +225,8 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | |||
226 | if (rc != 0) | 225 | if (rc != 0) |
227 | CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; | 226 | CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; |
228 | } | 227 | } |
229 | cFYI(1, ("invalidating remote inode since open detected it " | 228 | cFYI(1, "invalidating remote inode since open detected it " |
230 | "changed")); | 229 | "changed"); |
231 | invalidate_remote_inode(file->f_path.dentry->d_inode); | 230 | invalidate_remote_inode(file->f_path.dentry->d_inode); |
232 | } | 231 | } |
233 | 232 | ||
@@ -242,8 +241,8 @@ client_can_cache: | |||
242 | if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 241 | if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
243 | pCifsInode->clientCanCacheAll = true; | 242 | pCifsInode->clientCanCacheAll = true; |
244 | pCifsInode->clientCanCacheRead = true; | 243 | pCifsInode->clientCanCacheRead = true; |
245 | cFYI(1, ("Exclusive Oplock granted on inode %p", | 244 | cFYI(1, "Exclusive Oplock granted on inode %p", |
246 | file->f_path.dentry->d_inode)); | 245 | file->f_path.dentry->d_inode); |
247 | } else if ((*oplock & 0xF) == OPLOCK_READ) | 246 | } else if ((*oplock & 0xF) == OPLOCK_READ) |
248 | pCifsInode->clientCanCacheRead = true; | 247 | pCifsInode->clientCanCacheRead = true; |
249 | 248 | ||
@@ -285,8 +284,8 @@ int cifs_open(struct inode *inode, struct file *file) | |||
285 | return rc; | 284 | return rc; |
286 | } | 285 | } |
287 | 286 | ||
288 | cFYI(1, ("inode = 0x%p file flags are 0x%x for %s", | 287 | cFYI(1, "inode = 0x%p file flags are 0x%x for %s", |
289 | inode, file->f_flags, full_path)); | 288 | inode, file->f_flags, full_path); |
290 | 289 | ||
291 | if (oplockEnabled) | 290 | if (oplockEnabled) |
292 | oplock = REQ_OPLOCK; | 291 | oplock = REQ_OPLOCK; |
@@ -298,27 +297,29 @@ int cifs_open(struct inode *inode, struct file *file) | |||
298 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & | 297 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
299 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 298 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
300 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 299 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
300 | oflags |= SMB_O_CREAT; | ||
301 | /* can not refresh inode info since size could be stale */ | 301 | /* can not refresh inode info since size could be stale */ |
302 | rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, | 302 | rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, |
303 | cifs_sb->mnt_file_mode /* ignored */, | 303 | inode->i_sb, |
304 | oflags, &oplock, &netfid, xid); | 304 | cifs_sb->mnt_file_mode /* ignored */, |
305 | oflags, &oplock, &netfid, xid); | ||
305 | if (rc == 0) { | 306 | if (rc == 0) { |
306 | cFYI(1, ("posix open succeeded")); | 307 | cFYI(1, "posix open succeeded"); |
307 | /* no need for special case handling of setting mode | 308 | /* no need for special case handling of setting mode |
308 | on read only files needed here */ | 309 | on read only files needed here */ |
309 | 310 | ||
310 | pCifsFile = cifs_fill_filedata(file); | 311 | pCifsFile = cifs_fill_filedata(file); |
311 | cifs_posix_open_inode_helper(inode, file, pCifsInode, | 312 | cifs_posix_open_inode_helper(inode, file, pCifsInode, |
312 | pCifsFile, oplock, netfid); | 313 | oplock, netfid); |
313 | goto out; | 314 | goto out; |
314 | } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | 315 | } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
315 | if (tcon->ses->serverNOS) | 316 | if (tcon->ses->serverNOS) |
316 | cERROR(1, ("server %s of type %s returned" | 317 | cERROR(1, "server %s of type %s returned" |
317 | " unexpected error on SMB posix open" | 318 | " unexpected error on SMB posix open" |
318 | ", disabling posix open support." | 319 | ", disabling posix open support." |
319 | " Check if server update available.", | 320 | " Check if server update available.", |
320 | tcon->ses->serverName, | 321 | tcon->ses->serverName, |
321 | tcon->ses->serverNOS)); | 322 | tcon->ses->serverNOS); |
322 | tcon->broken_posix_open = true; | 323 | tcon->broken_posix_open = true; |
323 | } else if ((rc != -EIO) && (rc != -EREMOTE) && | 324 | } else if ((rc != -EIO) && (rc != -EREMOTE) && |
324 | (rc != -EOPNOTSUPP)) /* path not found or net err */ | 325 | (rc != -EOPNOTSUPP)) /* path not found or net err */ |
@@ -386,7 +387,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
386 | & CIFS_MOUNT_MAP_SPECIAL_CHR); | 387 | & CIFS_MOUNT_MAP_SPECIAL_CHR); |
387 | } | 388 | } |
388 | if (rc) { | 389 | if (rc) { |
389 | cFYI(1, ("cifs_open returned 0x%x", rc)); | 390 | cFYI(1, "cifs_open returned 0x%x", rc); |
390 | goto out; | 391 | goto out; |
391 | } | 392 | } |
392 | 393 | ||
@@ -469,7 +470,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
469 | } | 470 | } |
470 | 471 | ||
471 | if (file->f_path.dentry == NULL) { | 472 | if (file->f_path.dentry == NULL) { |
472 | cERROR(1, ("no valid name if dentry freed")); | 473 | cERROR(1, "no valid name if dentry freed"); |
473 | dump_stack(); | 474 | dump_stack(); |
474 | rc = -EBADF; | 475 | rc = -EBADF; |
475 | goto reopen_error_exit; | 476 | goto reopen_error_exit; |
@@ -477,7 +478,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
477 | 478 | ||
478 | inode = file->f_path.dentry->d_inode; | 479 | inode = file->f_path.dentry->d_inode; |
479 | if (inode == NULL) { | 480 | if (inode == NULL) { |
480 | cERROR(1, ("inode not valid")); | 481 | cERROR(1, "inode not valid"); |
481 | dump_stack(); | 482 | dump_stack(); |
482 | rc = -EBADF; | 483 | rc = -EBADF; |
483 | goto reopen_error_exit; | 484 | goto reopen_error_exit; |
@@ -499,8 +500,8 @@ reopen_error_exit: | |||
499 | return rc; | 500 | return rc; |
500 | } | 501 | } |
501 | 502 | ||
502 | cFYI(1, ("inode = 0x%p file flags 0x%x for %s", | 503 | cFYI(1, "inode = 0x%p file flags 0x%x for %s", |
503 | inode, file->f_flags, full_path)); | 504 | inode, file->f_flags, full_path); |
504 | 505 | ||
505 | if (oplockEnabled) | 506 | if (oplockEnabled) |
506 | oplock = REQ_OPLOCK; | 507 | oplock = REQ_OPLOCK; |
@@ -513,10 +514,11 @@ reopen_error_exit: | |||
513 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 514 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
514 | /* can not refresh inode info since size could be stale */ | 515 | /* can not refresh inode info since size could be stale */ |
515 | rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, | 516 | rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, |
516 | cifs_sb->mnt_file_mode /* ignored */, | 517 | inode->i_sb, |
517 | oflags, &oplock, &netfid, xid); | 518 | cifs_sb->mnt_file_mode /* ignored */, |
519 | oflags, &oplock, &netfid, xid); | ||
518 | if (rc == 0) { | 520 | if (rc == 0) { |
519 | cFYI(1, ("posix reopen succeeded")); | 521 | cFYI(1, "posix reopen succeeded"); |
520 | goto reopen_success; | 522 | goto reopen_success; |
521 | } | 523 | } |
522 | /* fallthrough to retry open the old way on errors, especially | 524 | /* fallthrough to retry open the old way on errors, especially |
@@ -537,8 +539,8 @@ reopen_error_exit: | |||
537 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 539 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
538 | if (rc) { | 540 | if (rc) { |
539 | mutex_unlock(&pCifsFile->fh_mutex); | 541 | mutex_unlock(&pCifsFile->fh_mutex); |
540 | cFYI(1, ("cifs_open returned 0x%x", rc)); | 542 | cFYI(1, "cifs_open returned 0x%x", rc); |
541 | cFYI(1, ("oplock: %d", oplock)); | 543 | cFYI(1, "oplock: %d", oplock); |
542 | } else { | 544 | } else { |
543 | reopen_success: | 545 | reopen_success: |
544 | pCifsFile->netfid = netfid; | 546 | pCifsFile->netfid = netfid; |
@@ -570,8 +572,8 @@ reopen_success: | |||
570 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 572 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
571 | pCifsInode->clientCanCacheAll = true; | 573 | pCifsInode->clientCanCacheAll = true; |
572 | pCifsInode->clientCanCacheRead = true; | 574 | pCifsInode->clientCanCacheRead = true; |
573 | cFYI(1, ("Exclusive Oplock granted on inode %p", | 575 | cFYI(1, "Exclusive Oplock granted on inode %p", |
574 | file->f_path.dentry->d_inode)); | 576 | file->f_path.dentry->d_inode); |
575 | } else if ((oplock & 0xF) == OPLOCK_READ) { | 577 | } else if ((oplock & 0xF) == OPLOCK_READ) { |
576 | pCifsInode->clientCanCacheRead = true; | 578 | pCifsInode->clientCanCacheRead = true; |
577 | pCifsInode->clientCanCacheAll = false; | 579 | pCifsInode->clientCanCacheAll = false; |
@@ -619,8 +621,7 @@ int cifs_close(struct inode *inode, struct file *file) | |||
619 | the struct would be in each open file, | 621 | the struct would be in each open file, |
620 | but this should give enough time to | 622 | but this should give enough time to |
621 | clear the socket */ | 623 | clear the socket */ |
622 | cFYI(DBG2, | 624 | cFYI(DBG2, "close delay, write pending"); |
623 | ("close delay, write pending")); | ||
624 | msleep(timeout); | 625 | msleep(timeout); |
625 | timeout *= 4; | 626 | timeout *= 4; |
626 | } | 627 | } |
@@ -653,7 +654,7 @@ int cifs_close(struct inode *inode, struct file *file) | |||
653 | 654 | ||
654 | read_lock(&GlobalSMBSeslock); | 655 | read_lock(&GlobalSMBSeslock); |
655 | if (list_empty(&(CIFS_I(inode)->openFileList))) { | 656 | if (list_empty(&(CIFS_I(inode)->openFileList))) { |
656 | cFYI(1, ("closing last open instance for inode %p", inode)); | 657 | cFYI(1, "closing last open instance for inode %p", inode); |
657 | /* if the file is not open we do not know if we can cache info | 658 | /* if the file is not open we do not know if we can cache info |
658 | on this inode, much less write behind and read ahead */ | 659 | on this inode, much less write behind and read ahead */ |
659 | CIFS_I(inode)->clientCanCacheRead = false; | 660 | CIFS_I(inode)->clientCanCacheRead = false; |
@@ -674,7 +675,7 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
674 | (struct cifsFileInfo *)file->private_data; | 675 | (struct cifsFileInfo *)file->private_data; |
675 | char *ptmp; | 676 | char *ptmp; |
676 | 677 | ||
677 | cFYI(1, ("Closedir inode = 0x%p", inode)); | 678 | cFYI(1, "Closedir inode = 0x%p", inode); |
678 | 679 | ||
679 | xid = GetXid(); | 680 | xid = GetXid(); |
680 | 681 | ||
@@ -685,22 +686,22 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
685 | 686 | ||
686 | pTcon = cifs_sb->tcon; | 687 | pTcon = cifs_sb->tcon; |
687 | 688 | ||
688 | cFYI(1, ("Freeing private data in close dir")); | 689 | cFYI(1, "Freeing private data in close dir"); |
689 | write_lock(&GlobalSMBSeslock); | 690 | write_lock(&GlobalSMBSeslock); |
690 | if (!pCFileStruct->srch_inf.endOfSearch && | 691 | if (!pCFileStruct->srch_inf.endOfSearch && |
691 | !pCFileStruct->invalidHandle) { | 692 | !pCFileStruct->invalidHandle) { |
692 | pCFileStruct->invalidHandle = true; | 693 | pCFileStruct->invalidHandle = true; |
693 | write_unlock(&GlobalSMBSeslock); | 694 | write_unlock(&GlobalSMBSeslock); |
694 | rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); | 695 | rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); |
695 | cFYI(1, ("Closing uncompleted readdir with rc %d", | 696 | cFYI(1, "Closing uncompleted readdir with rc %d", |
696 | rc)); | 697 | rc); |
697 | /* not much we can do if it fails anyway, ignore rc */ | 698 | /* not much we can do if it fails anyway, ignore rc */ |
698 | rc = 0; | 699 | rc = 0; |
699 | } else | 700 | } else |
700 | write_unlock(&GlobalSMBSeslock); | 701 | write_unlock(&GlobalSMBSeslock); |
701 | ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; | 702 | ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; |
702 | if (ptmp) { | 703 | if (ptmp) { |
703 | cFYI(1, ("closedir free smb buf in srch struct")); | 704 | cFYI(1, "closedir free smb buf in srch struct"); |
704 | pCFileStruct->srch_inf.ntwrk_buf_start = NULL; | 705 | pCFileStruct->srch_inf.ntwrk_buf_start = NULL; |
705 | if (pCFileStruct->srch_inf.smallBuf) | 706 | if (pCFileStruct->srch_inf.smallBuf) |
706 | cifs_small_buf_release(ptmp); | 707 | cifs_small_buf_release(ptmp); |
@@ -748,49 +749,49 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
748 | rc = -EACCES; | 749 | rc = -EACCES; |
749 | xid = GetXid(); | 750 | xid = GetXid(); |
750 | 751 | ||
751 | cFYI(1, ("Lock parm: 0x%x flockflags: " | 752 | cFYI(1, "Lock parm: 0x%x flockflags: " |
752 | "0x%x flocktype: 0x%x start: %lld end: %lld", | 753 | "0x%x flocktype: 0x%x start: %lld end: %lld", |
753 | cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, | 754 | cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, |
754 | pfLock->fl_end)); | 755 | pfLock->fl_end); |
755 | 756 | ||
756 | if (pfLock->fl_flags & FL_POSIX) | 757 | if (pfLock->fl_flags & FL_POSIX) |
757 | cFYI(1, ("Posix")); | 758 | cFYI(1, "Posix"); |
758 | if (pfLock->fl_flags & FL_FLOCK) | 759 | if (pfLock->fl_flags & FL_FLOCK) |
759 | cFYI(1, ("Flock")); | 760 | cFYI(1, "Flock"); |
760 | if (pfLock->fl_flags & FL_SLEEP) { | 761 | if (pfLock->fl_flags & FL_SLEEP) { |
761 | cFYI(1, ("Blocking lock")); | 762 | cFYI(1, "Blocking lock"); |
762 | wait_flag = true; | 763 | wait_flag = true; |
763 | } | 764 | } |
764 | if (pfLock->fl_flags & FL_ACCESS) | 765 | if (pfLock->fl_flags & FL_ACCESS) |
765 | cFYI(1, ("Process suspended by mandatory locking - " | 766 | cFYI(1, "Process suspended by mandatory locking - " |
766 | "not implemented yet")); | 767 | "not implemented yet"); |
767 | if (pfLock->fl_flags & FL_LEASE) | 768 | if (pfLock->fl_flags & FL_LEASE) |
768 | cFYI(1, ("Lease on file - not implemented yet")); | 769 | cFYI(1, "Lease on file - not implemented yet"); |
769 | if (pfLock->fl_flags & | 770 | if (pfLock->fl_flags & |
770 | (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) | 771 | (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) |
771 | cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags)); | 772 | cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags); |
772 | 773 | ||
773 | if (pfLock->fl_type == F_WRLCK) { | 774 | if (pfLock->fl_type == F_WRLCK) { |
774 | cFYI(1, ("F_WRLCK ")); | 775 | cFYI(1, "F_WRLCK "); |
775 | numLock = 1; | 776 | numLock = 1; |
776 | } else if (pfLock->fl_type == F_UNLCK) { | 777 | } else if (pfLock->fl_type == F_UNLCK) { |
777 | cFYI(1, ("F_UNLCK")); | 778 | cFYI(1, "F_UNLCK"); |
778 | numUnlock = 1; | 779 | numUnlock = 1; |
779 | /* Check if unlock includes more than | 780 | /* Check if unlock includes more than |
780 | one lock range */ | 781 | one lock range */ |
781 | } else if (pfLock->fl_type == F_RDLCK) { | 782 | } else if (pfLock->fl_type == F_RDLCK) { |
782 | cFYI(1, ("F_RDLCK")); | 783 | cFYI(1, "F_RDLCK"); |
783 | lockType |= LOCKING_ANDX_SHARED_LOCK; | 784 | lockType |= LOCKING_ANDX_SHARED_LOCK; |
784 | numLock = 1; | 785 | numLock = 1; |
785 | } else if (pfLock->fl_type == F_EXLCK) { | 786 | } else if (pfLock->fl_type == F_EXLCK) { |
786 | cFYI(1, ("F_EXLCK")); | 787 | cFYI(1, "F_EXLCK"); |
787 | numLock = 1; | 788 | numLock = 1; |
788 | } else if (pfLock->fl_type == F_SHLCK) { | 789 | } else if (pfLock->fl_type == F_SHLCK) { |
789 | cFYI(1, ("F_SHLCK")); | 790 | cFYI(1, "F_SHLCK"); |
790 | lockType |= LOCKING_ANDX_SHARED_LOCK; | 791 | lockType |= LOCKING_ANDX_SHARED_LOCK; |
791 | numLock = 1; | 792 | numLock = 1; |
792 | } else | 793 | } else |
793 | cFYI(1, ("Unknown type of lock")); | 794 | cFYI(1, "Unknown type of lock"); |
794 | 795 | ||
795 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 796 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
796 | tcon = cifs_sb->tcon; | 797 | tcon = cifs_sb->tcon; |
@@ -833,8 +834,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
833 | 0 /* wait flag */ ); | 834 | 0 /* wait flag */ ); |
834 | pfLock->fl_type = F_UNLCK; | 835 | pfLock->fl_type = F_UNLCK; |
835 | if (rc != 0) | 836 | if (rc != 0) |
836 | cERROR(1, ("Error unlocking previously locked " | 837 | cERROR(1, "Error unlocking previously locked " |
837 | "range %d during test of lock", rc)); | 838 | "range %d during test of lock", rc); |
838 | rc = 0; | 839 | rc = 0; |
839 | 840 | ||
840 | } else { | 841 | } else { |
@@ -856,9 +857,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
856 | 0 /* wait flag */); | 857 | 0 /* wait flag */); |
857 | pfLock->fl_type = F_RDLCK; | 858 | pfLock->fl_type = F_RDLCK; |
858 | if (rc != 0) | 859 | if (rc != 0) |
859 | cERROR(1, ("Error unlocking " | 860 | cERROR(1, "Error unlocking " |
860 | "previously locked range %d " | 861 | "previously locked range %d " |
861 | "during test of lock", rc)); | 862 | "during test of lock", rc); |
862 | rc = 0; | 863 | rc = 0; |
863 | } else { | 864 | } else { |
864 | pfLock->fl_type = F_WRLCK; | 865 | pfLock->fl_type = F_WRLCK; |
@@ -923,9 +924,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
923 | 1, 0, li->type, false); | 924 | 1, 0, li->type, false); |
924 | if (stored_rc) | 925 | if (stored_rc) |
925 | rc = stored_rc; | 926 | rc = stored_rc; |
926 | 927 | else { | |
927 | list_del(&li->llist); | 928 | list_del(&li->llist); |
928 | kfree(li); | 929 | kfree(li); |
930 | } | ||
929 | } | 931 | } |
930 | } | 932 | } |
931 | mutex_unlock(&fid->lock_mutex); | 933 | mutex_unlock(&fid->lock_mutex); |
@@ -988,9 +990,8 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
988 | 990 | ||
989 | pTcon = cifs_sb->tcon; | 991 | pTcon = cifs_sb->tcon; |
990 | 992 | ||
991 | /* cFYI(1, | 993 | /* cFYI(1, " write %d bytes to offset %lld of %s", write_size, |
992 | (" write %d bytes to offset %lld of %s", write_size, | 994 | *poffset, file->f_path.dentry->d_name.name); */ |
993 | *poffset, file->f_path.dentry->d_name.name)); */ | ||
994 | 995 | ||
995 | if (file->private_data == NULL) | 996 | if (file->private_data == NULL) |
996 | return -EBADF; | 997 | return -EBADF; |
@@ -1091,8 +1092,8 @@ static ssize_t cifs_write(struct file *file, const char *write_data, | |||
1091 | 1092 | ||
1092 | pTcon = cifs_sb->tcon; | 1093 | pTcon = cifs_sb->tcon; |
1093 | 1094 | ||
1094 | cFYI(1, ("write %zd bytes to offset %lld of %s", write_size, | 1095 | cFYI(1, "write %zd bytes to offset %lld of %s", write_size, |
1095 | *poffset, file->f_path.dentry->d_name.name)); | 1096 | *poffset, file->f_path.dentry->d_name.name); |
1096 | 1097 | ||
1097 | if (file->private_data == NULL) | 1098 | if (file->private_data == NULL) |
1098 | return -EBADF; | 1099 | return -EBADF; |
@@ -1233,7 +1234,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode) | |||
1233 | it being zero) during stress testcases so we need to check for it */ | 1234 | it being zero) during stress testcases so we need to check for it */ |
1234 | 1235 | ||
1235 | if (cifs_inode == NULL) { | 1236 | if (cifs_inode == NULL) { |
1236 | cERROR(1, ("Null inode passed to cifs_writeable_file")); | 1237 | cERROR(1, "Null inode passed to cifs_writeable_file"); |
1237 | dump_stack(); | 1238 | dump_stack(); |
1238 | return NULL; | 1239 | return NULL; |
1239 | } | 1240 | } |
@@ -1277,7 +1278,7 @@ refind_writable: | |||
1277 | again. Note that it would be bad | 1278 | again. Note that it would be bad |
1278 | to hold up writepages here (rather than | 1279 | to hold up writepages here (rather than |
1279 | in caller) with continuous retries */ | 1280 | in caller) with continuous retries */ |
1280 | cFYI(1, ("wp failed on reopen file")); | 1281 | cFYI(1, "wp failed on reopen file"); |
1281 | read_lock(&GlobalSMBSeslock); | 1282 | read_lock(&GlobalSMBSeslock); |
1282 | /* can not use this handle, no write | 1283 | /* can not use this handle, no write |
1283 | pending on this one after all */ | 1284 | pending on this one after all */ |
@@ -1353,7 +1354,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1353 | else if (bytes_written < 0) | 1354 | else if (bytes_written < 0) |
1354 | rc = bytes_written; | 1355 | rc = bytes_written; |
1355 | } else { | 1356 | } else { |
1356 | cFYI(1, ("No writeable filehandles for inode")); | 1357 | cFYI(1, "No writeable filehandles for inode"); |
1357 | rc = -EIO; | 1358 | rc = -EIO; |
1358 | } | 1359 | } |
1359 | 1360 | ||
@@ -1525,7 +1526,7 @@ retry: | |||
1525 | */ | 1526 | */ |
1526 | open_file = find_writable_file(CIFS_I(mapping->host)); | 1527 | open_file = find_writable_file(CIFS_I(mapping->host)); |
1527 | if (!open_file) { | 1528 | if (!open_file) { |
1528 | cERROR(1, ("No writable handles for inode")); | 1529 | cERROR(1, "No writable handles for inode"); |
1529 | rc = -EBADF; | 1530 | rc = -EBADF; |
1530 | } else { | 1531 | } else { |
1531 | long_op = cifs_write_timeout(cifsi, offset); | 1532 | long_op = cifs_write_timeout(cifsi, offset); |
@@ -1538,8 +1539,8 @@ retry: | |||
1538 | cifs_update_eof(cifsi, offset, bytes_written); | 1539 | cifs_update_eof(cifsi, offset, bytes_written); |
1539 | 1540 | ||
1540 | if (rc || bytes_written < bytes_to_write) { | 1541 | if (rc || bytes_written < bytes_to_write) { |
1541 | cERROR(1, ("Write2 ret %d, wrote %d", | 1542 | cERROR(1, "Write2 ret %d, wrote %d", |
1542 | rc, bytes_written)); | 1543 | rc, bytes_written); |
1543 | /* BB what if continued retry is | 1544 | /* BB what if continued retry is |
1544 | requested via mount flags? */ | 1545 | requested via mount flags? */ |
1545 | if (rc == -ENOSPC) | 1546 | if (rc == -ENOSPC) |
@@ -1600,7 +1601,7 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc) | |||
1600 | /* BB add check for wbc flags */ | 1601 | /* BB add check for wbc flags */ |
1601 | page_cache_get(page); | 1602 | page_cache_get(page); |
1602 | if (!PageUptodate(page)) | 1603 | if (!PageUptodate(page)) |
1603 | cFYI(1, ("ppw - page not up to date")); | 1604 | cFYI(1, "ppw - page not up to date"); |
1604 | 1605 | ||
1605 | /* | 1606 | /* |
1606 | * Set the "writeback" flag, and clear "dirty" in the radix tree. | 1607 | * Set the "writeback" flag, and clear "dirty" in the radix tree. |
@@ -1629,8 +1630,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, | |||
1629 | int rc; | 1630 | int rc; |
1630 | struct inode *inode = mapping->host; | 1631 | struct inode *inode = mapping->host; |
1631 | 1632 | ||
1632 | cFYI(1, ("write_end for page %p from pos %lld with %d bytes", | 1633 | cFYI(1, "write_end for page %p from pos %lld with %d bytes", |
1633 | page, pos, copied)); | 1634 | page, pos, copied); |
1634 | 1635 | ||
1635 | if (PageChecked(page)) { | 1636 | if (PageChecked(page)) { |
1636 | if (copied == len) | 1637 | if (copied == len) |
@@ -1686,8 +1687,8 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1686 | 1687 | ||
1687 | xid = GetXid(); | 1688 | xid = GetXid(); |
1688 | 1689 | ||
1689 | cFYI(1, ("Sync file - name: %s datasync: 0x%x", | 1690 | cFYI(1, "Sync file - name: %s datasync: 0x%x", |
1690 | dentry->d_name.name, datasync)); | 1691 | dentry->d_name.name, datasync); |
1691 | 1692 | ||
1692 | rc = filemap_write_and_wait(inode->i_mapping); | 1693 | rc = filemap_write_and_wait(inode->i_mapping); |
1693 | if (rc == 0) { | 1694 | if (rc == 0) { |
@@ -1711,7 +1712,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1711 | unsigned int rpages = 0; | 1712 | unsigned int rpages = 0; |
1712 | int rc = 0; | 1713 | int rc = 0; |
1713 | 1714 | ||
1714 | cFYI(1, ("sync page %p",page)); | 1715 | cFYI(1, "sync page %p", page); |
1715 | mapping = page->mapping; | 1716 | mapping = page->mapping; |
1716 | if (!mapping) | 1717 | if (!mapping) |
1717 | return 0; | 1718 | return 0; |
@@ -1722,7 +1723,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1722 | /* fill in rpages then | 1723 | /* fill in rpages then |
1723 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ | 1724 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ |
1724 | 1725 | ||
1725 | /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index)); | 1726 | /* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index); |
1726 | 1727 | ||
1727 | #if 0 | 1728 | #if 0 |
1728 | if (rc < 0) | 1729 | if (rc < 0) |
@@ -1756,7 +1757,7 @@ int cifs_flush(struct file *file, fl_owner_t id) | |||
1756 | CIFS_I(inode)->write_behind_rc = 0; | 1757 | CIFS_I(inode)->write_behind_rc = 0; |
1757 | } | 1758 | } |
1758 | 1759 | ||
1759 | cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc)); | 1760 | cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc); |
1760 | 1761 | ||
1761 | return rc; | 1762 | return rc; |
1762 | } | 1763 | } |
@@ -1788,7 +1789,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1788 | open_file = (struct cifsFileInfo *)file->private_data; | 1789 | open_file = (struct cifsFileInfo *)file->private_data; |
1789 | 1790 | ||
1790 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1791 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1791 | cFYI(1, ("attempting read on write only file instance")); | 1792 | cFYI(1, "attempting read on write only file instance"); |
1792 | 1793 | ||
1793 | for (total_read = 0, current_offset = read_data; | 1794 | for (total_read = 0, current_offset = read_data; |
1794 | read_size > total_read; | 1795 | read_size > total_read; |
@@ -1869,7 +1870,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1869 | open_file = (struct cifsFileInfo *)file->private_data; | 1870 | open_file = (struct cifsFileInfo *)file->private_data; |
1870 | 1871 | ||
1871 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1872 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1872 | cFYI(1, ("attempting read on write only file instance")); | 1873 | cFYI(1, "attempting read on write only file instance"); |
1873 | 1874 | ||
1874 | for (total_read = 0, current_offset = read_data; | 1875 | for (total_read = 0, current_offset = read_data; |
1875 | read_size > total_read; | 1876 | read_size > total_read; |
@@ -1920,7 +1921,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
1920 | xid = GetXid(); | 1921 | xid = GetXid(); |
1921 | rc = cifs_revalidate_file(file); | 1922 | rc = cifs_revalidate_file(file); |
1922 | if (rc) { | 1923 | if (rc) { |
1923 | cFYI(1, ("Validation prior to mmap failed, error=%d", rc)); | 1924 | cFYI(1, "Validation prior to mmap failed, error=%d", rc); |
1924 | FreeXid(xid); | 1925 | FreeXid(xid); |
1925 | return rc; | 1926 | return rc; |
1926 | } | 1927 | } |
@@ -1931,8 +1932,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
1931 | 1932 | ||
1932 | 1933 | ||
1933 | static void cifs_copy_cache_pages(struct address_space *mapping, | 1934 | static void cifs_copy_cache_pages(struct address_space *mapping, |
1934 | struct list_head *pages, int bytes_read, char *data, | 1935 | struct list_head *pages, int bytes_read, char *data) |
1935 | struct pagevec *plru_pvec) | ||
1936 | { | 1936 | { |
1937 | struct page *page; | 1937 | struct page *page; |
1938 | char *target; | 1938 | char *target; |
@@ -1944,10 +1944,10 @@ static void cifs_copy_cache_pages(struct address_space *mapping, | |||
1944 | page = list_entry(pages->prev, struct page, lru); | 1944 | page = list_entry(pages->prev, struct page, lru); |
1945 | list_del(&page->lru); | 1945 | list_del(&page->lru); |
1946 | 1946 | ||
1947 | if (add_to_page_cache(page, mapping, page->index, | 1947 | if (add_to_page_cache_lru(page, mapping, page->index, |
1948 | GFP_KERNEL)) { | 1948 | GFP_KERNEL)) { |
1949 | page_cache_release(page); | 1949 | page_cache_release(page); |
1950 | cFYI(1, ("Add page cache failed")); | 1950 | cFYI(1, "Add page cache failed"); |
1951 | data += PAGE_CACHE_SIZE; | 1951 | data += PAGE_CACHE_SIZE; |
1952 | bytes_read -= PAGE_CACHE_SIZE; | 1952 | bytes_read -= PAGE_CACHE_SIZE; |
1953 | continue; | 1953 | continue; |
@@ -1970,8 +1970,6 @@ static void cifs_copy_cache_pages(struct address_space *mapping, | |||
1970 | flush_dcache_page(page); | 1970 | flush_dcache_page(page); |
1971 | SetPageUptodate(page); | 1971 | SetPageUptodate(page); |
1972 | unlock_page(page); | 1972 | unlock_page(page); |
1973 | if (!pagevec_add(plru_pvec, page)) | ||
1974 | __pagevec_lru_add_file(plru_pvec); | ||
1975 | data += PAGE_CACHE_SIZE; | 1973 | data += PAGE_CACHE_SIZE; |
1976 | } | 1974 | } |
1977 | return; | 1975 | return; |
@@ -1990,7 +1988,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
1990 | unsigned int read_size, i; | 1988 | unsigned int read_size, i; |
1991 | char *smb_read_data = NULL; | 1989 | char *smb_read_data = NULL; |
1992 | struct smb_com_read_rsp *pSMBr; | 1990 | struct smb_com_read_rsp *pSMBr; |
1993 | struct pagevec lru_pvec; | ||
1994 | struct cifsFileInfo *open_file; | 1991 | struct cifsFileInfo *open_file; |
1995 | int buf_type = CIFS_NO_BUFFER; | 1992 | int buf_type = CIFS_NO_BUFFER; |
1996 | 1993 | ||
@@ -2004,8 +2001,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2004 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 2001 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
2005 | pTcon = cifs_sb->tcon; | 2002 | pTcon = cifs_sb->tcon; |
2006 | 2003 | ||
2007 | pagevec_init(&lru_pvec, 0); | 2004 | cFYI(DBG2, "rpages: num pages %d", num_pages); |
2008 | cFYI(DBG2, ("rpages: num pages %d", num_pages)); | ||
2009 | for (i = 0; i < num_pages; ) { | 2005 | for (i = 0; i < num_pages; ) { |
2010 | unsigned contig_pages; | 2006 | unsigned contig_pages; |
2011 | struct page *tmp_page; | 2007 | struct page *tmp_page; |
@@ -2038,8 +2034,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2038 | /* Read size needs to be in multiples of one page */ | 2034 | /* Read size needs to be in multiples of one page */ |
2039 | read_size = min_t(const unsigned int, read_size, | 2035 | read_size = min_t(const unsigned int, read_size, |
2040 | cifs_sb->rsize & PAGE_CACHE_MASK); | 2036 | cifs_sb->rsize & PAGE_CACHE_MASK); |
2041 | cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d", | 2037 | cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", |
2042 | read_size, contig_pages)); | 2038 | read_size, contig_pages); |
2043 | rc = -EAGAIN; | 2039 | rc = -EAGAIN; |
2044 | while (rc == -EAGAIN) { | 2040 | while (rc == -EAGAIN) { |
2045 | if ((open_file->invalidHandle) && | 2041 | if ((open_file->invalidHandle) && |
@@ -2066,14 +2062,14 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2066 | } | 2062 | } |
2067 | } | 2063 | } |
2068 | if ((rc < 0) || (smb_read_data == NULL)) { | 2064 | if ((rc < 0) || (smb_read_data == NULL)) { |
2069 | cFYI(1, ("Read error in readpages: %d", rc)); | 2065 | cFYI(1, "Read error in readpages: %d", rc); |
2070 | break; | 2066 | break; |
2071 | } else if (bytes_read > 0) { | 2067 | } else if (bytes_read > 0) { |
2072 | task_io_account_read(bytes_read); | 2068 | task_io_account_read(bytes_read); |
2073 | pSMBr = (struct smb_com_read_rsp *)smb_read_data; | 2069 | pSMBr = (struct smb_com_read_rsp *)smb_read_data; |
2074 | cifs_copy_cache_pages(mapping, page_list, bytes_read, | 2070 | cifs_copy_cache_pages(mapping, page_list, bytes_read, |
2075 | smb_read_data + 4 /* RFC1001 hdr */ + | 2071 | smb_read_data + 4 /* RFC1001 hdr */ + |
2076 | le16_to_cpu(pSMBr->DataOffset), &lru_pvec); | 2072 | le16_to_cpu(pSMBr->DataOffset)); |
2077 | 2073 | ||
2078 | i += bytes_read >> PAGE_CACHE_SHIFT; | 2074 | i += bytes_read >> PAGE_CACHE_SHIFT; |
2079 | cifs_stats_bytes_read(pTcon, bytes_read); | 2075 | cifs_stats_bytes_read(pTcon, bytes_read); |
@@ -2089,9 +2085,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2089 | /* break; */ | 2085 | /* break; */ |
2090 | } | 2086 | } |
2091 | } else { | 2087 | } else { |
2092 | cFYI(1, ("No bytes read (%d) at offset %lld . " | 2088 | cFYI(1, "No bytes read (%d) at offset %lld . " |
2093 | "Cleaning remaining pages from readahead list", | 2089 | "Cleaning remaining pages from readahead list", |
2094 | bytes_read, offset)); | 2090 | bytes_read, offset); |
2095 | /* BB turn off caching and do new lookup on | 2091 | /* BB turn off caching and do new lookup on |
2096 | file size at server? */ | 2092 | file size at server? */ |
2097 | break; | 2093 | break; |
@@ -2106,8 +2102,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2106 | bytes_read = 0; | 2102 | bytes_read = 0; |
2107 | } | 2103 | } |
2108 | 2104 | ||
2109 | pagevec_lru_add_file(&lru_pvec); | ||
2110 | |||
2111 | /* need to free smb_read_data buf before exit */ | 2105 | /* need to free smb_read_data buf before exit */ |
2112 | if (smb_read_data) { | 2106 | if (smb_read_data) { |
2113 | if (buf_type == CIFS_SMALL_BUFFER) | 2107 | if (buf_type == CIFS_SMALL_BUFFER) |
@@ -2136,7 +2130,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
2136 | if (rc < 0) | 2130 | if (rc < 0) |
2137 | goto io_error; | 2131 | goto io_error; |
2138 | else | 2132 | else |
2139 | cFYI(1, ("Bytes read %d", rc)); | 2133 | cFYI(1, "Bytes read %d", rc); |
2140 | 2134 | ||
2141 | file->f_path.dentry->d_inode->i_atime = | 2135 | file->f_path.dentry->d_inode->i_atime = |
2142 | current_fs_time(file->f_path.dentry->d_inode->i_sb); | 2136 | current_fs_time(file->f_path.dentry->d_inode->i_sb); |
@@ -2168,8 +2162,8 @@ static int cifs_readpage(struct file *file, struct page *page) | |||
2168 | return rc; | 2162 | return rc; |
2169 | } | 2163 | } |
2170 | 2164 | ||
2171 | cFYI(1, ("readpage %p at offset %d 0x%x\n", | 2165 | cFYI(1, "readpage %p at offset %d 0x%x\n", |
2172 | page, (int)offset, (int)offset)); | 2166 | page, (int)offset, (int)offset); |
2173 | 2167 | ||
2174 | rc = cifs_readpage_worker(file, page, &offset); | 2168 | rc = cifs_readpage_worker(file, page, &offset); |
2175 | 2169 | ||
@@ -2239,7 +2233,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, | |||
2239 | struct page *page; | 2233 | struct page *page; |
2240 | int rc = 0; | 2234 | int rc = 0; |
2241 | 2235 | ||
2242 | cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); | 2236 | cFYI(1, "write_begin from %lld len %d", (long long)pos, len); |
2243 | 2237 | ||
2244 | page = grab_cache_page_write_begin(mapping, index, flags); | 2238 | page = grab_cache_page_write_begin(mapping, index, flags); |
2245 | if (!page) { | 2239 | if (!page) { |
@@ -2311,12 +2305,10 @@ cifs_oplock_break(struct slow_work *work) | |||
2311 | int rc, waitrc = 0; | 2305 | int rc, waitrc = 0; |
2312 | 2306 | ||
2313 | if (inode && S_ISREG(inode->i_mode)) { | 2307 | if (inode && S_ISREG(inode->i_mode)) { |
2314 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 2308 | if (cinode->clientCanCacheRead) |
2315 | if (cinode->clientCanCacheAll == 0) | ||
2316 | break_lease(inode, O_RDONLY); | 2309 | break_lease(inode, O_RDONLY); |
2317 | else if (cinode->clientCanCacheRead == 0) | 2310 | else |
2318 | break_lease(inode, O_WRONLY); | 2311 | break_lease(inode, O_WRONLY); |
2319 | #endif | ||
2320 | rc = filemap_fdatawrite(inode->i_mapping); | 2312 | rc = filemap_fdatawrite(inode->i_mapping); |
2321 | if (cinode->clientCanCacheRead == 0) { | 2313 | if (cinode->clientCanCacheRead == 0) { |
2322 | waitrc = filemap_fdatawait(inode->i_mapping); | 2314 | waitrc = filemap_fdatawait(inode->i_mapping); |
@@ -2326,7 +2318,7 @@ cifs_oplock_break(struct slow_work *work) | |||
2326 | rc = waitrc; | 2318 | rc = waitrc; |
2327 | if (rc) | 2319 | if (rc) |
2328 | cinode->write_behind_rc = rc; | 2320 | cinode->write_behind_rc = rc; |
2329 | cFYI(1, ("Oplock flush inode %p rc %d", inode, rc)); | 2321 | cFYI(1, "Oplock flush inode %p rc %d", inode, rc); |
2330 | } | 2322 | } |
2331 | 2323 | ||
2332 | /* | 2324 | /* |
@@ -2338,7 +2330,7 @@ cifs_oplock_break(struct slow_work *work) | |||
2338 | if (!cfile->closePend && !cfile->oplock_break_cancelled) { | 2330 | if (!cfile->closePend && !cfile->oplock_break_cancelled) { |
2339 | rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, | 2331 | rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, |
2340 | LOCKING_ANDX_OPLOCK_RELEASE, false); | 2332 | LOCKING_ANDX_OPLOCK_RELEASE, false); |
2341 | cFYI(1, ("Oplock release rc = %d", rc)); | 2333 | cFYI(1, "Oplock release rc = %d", rc); |
2342 | } | 2334 | } |
2343 | } | 2335 | } |
2344 | 2336 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 35ec11716213..62b324f26a56 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/inode.c | 2 | * fs/cifs/inode.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2008 | 4 | * Copyright (C) International Business Machines Corp., 2002,2010 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * This library is free software; you can redistribute it and/or modify | 7 | * This library is free software; you can redistribute it and/or modify |
@@ -86,30 +86,30 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) | |||
86 | { | 86 | { |
87 | struct cifsInodeInfo *cifs_i = CIFS_I(inode); | 87 | struct cifsInodeInfo *cifs_i = CIFS_I(inode); |
88 | 88 | ||
89 | cFYI(1, ("%s: revalidating inode %llu", __func__, cifs_i->uniqueid)); | 89 | cFYI(1, "%s: revalidating inode %llu", __func__, cifs_i->uniqueid); |
90 | 90 | ||
91 | if (inode->i_state & I_NEW) { | 91 | if (inode->i_state & I_NEW) { |
92 | cFYI(1, ("%s: inode %llu is new", __func__, cifs_i->uniqueid)); | 92 | cFYI(1, "%s: inode %llu is new", __func__, cifs_i->uniqueid); |
93 | return; | 93 | return; |
94 | } | 94 | } |
95 | 95 | ||
96 | /* don't bother with revalidation if we have an oplock */ | 96 | /* don't bother with revalidation if we have an oplock */ |
97 | if (cifs_i->clientCanCacheRead) { | 97 | if (cifs_i->clientCanCacheRead) { |
98 | cFYI(1, ("%s: inode %llu is oplocked", __func__, | 98 | cFYI(1, "%s: inode %llu is oplocked", __func__, |
99 | cifs_i->uniqueid)); | 99 | cifs_i->uniqueid); |
100 | return; | 100 | return; |
101 | } | 101 | } |
102 | 102 | ||
103 | /* revalidate if mtime or size have changed */ | 103 | /* revalidate if mtime or size have changed */ |
104 | if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) && | 104 | if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) && |
105 | cifs_i->server_eof == fattr->cf_eof) { | 105 | cifs_i->server_eof == fattr->cf_eof) { |
106 | cFYI(1, ("%s: inode %llu is unchanged", __func__, | 106 | cFYI(1, "%s: inode %llu is unchanged", __func__, |
107 | cifs_i->uniqueid)); | 107 | cifs_i->uniqueid); |
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | 110 | ||
111 | cFYI(1, ("%s: invalidating inode %llu mapping", __func__, | 111 | cFYI(1, "%s: invalidating inode %llu mapping", __func__, |
112 | cifs_i->uniqueid)); | 112 | cifs_i->uniqueid); |
113 | cifs_i->invalid_mapping = true; | 113 | cifs_i->invalid_mapping = true; |
114 | } | 114 | } |
115 | 115 | ||
@@ -137,15 +137,14 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) | |||
137 | inode->i_mode = fattr->cf_mode; | 137 | inode->i_mode = fattr->cf_mode; |
138 | 138 | ||
139 | cifs_i->cifsAttrs = fattr->cf_cifsattrs; | 139 | cifs_i->cifsAttrs = fattr->cf_cifsattrs; |
140 | cifs_i->uniqueid = fattr->cf_uniqueid; | ||
141 | 140 | ||
142 | if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) | 141 | if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) |
143 | cifs_i->time = 0; | 142 | cifs_i->time = 0; |
144 | else | 143 | else |
145 | cifs_i->time = jiffies; | 144 | cifs_i->time = jiffies; |
146 | 145 | ||
147 | cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode, | 146 | cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode, |
148 | oldtime, cifs_i->time)); | 147 | oldtime, cifs_i->time); |
149 | 148 | ||
150 | cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; | 149 | cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; |
151 | 150 | ||
@@ -170,6 +169,17 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) | |||
170 | cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL); | 169 | cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL); |
171 | } | 170 | } |
172 | 171 | ||
172 | void | ||
173 | cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr) | ||
174 | { | ||
175 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
176 | |||
177 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) | ||
178 | return; | ||
179 | |||
180 | fattr->cf_uniqueid = iunique(sb, ROOT_I); | ||
181 | } | ||
182 | |||
173 | /* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */ | 183 | /* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */ |
174 | void | 184 | void |
175 | cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, | 185 | cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, |
@@ -227,7 +237,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, | |||
227 | /* safest to call it a file if we do not know */ | 237 | /* safest to call it a file if we do not know */ |
228 | fattr->cf_mode |= S_IFREG; | 238 | fattr->cf_mode |= S_IFREG; |
229 | fattr->cf_dtype = DT_REG; | 239 | fattr->cf_dtype = DT_REG; |
230 | cFYI(1, ("unknown type %d", le32_to_cpu(info->Type))); | 240 | cFYI(1, "unknown type %d", le32_to_cpu(info->Type)); |
231 | break; | 241 | break; |
232 | } | 242 | } |
233 | 243 | ||
@@ -256,7 +266,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) | |||
256 | { | 266 | { |
257 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 267 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
258 | 268 | ||
259 | cFYI(1, ("creating fake fattr for DFS referral")); | 269 | cFYI(1, "creating fake fattr for DFS referral"); |
260 | 270 | ||
261 | memset(fattr, 0, sizeof(*fattr)); | 271 | memset(fattr, 0, sizeof(*fattr)); |
262 | fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU; | 272 | fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU; |
@@ -305,7 +315,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
305 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 315 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
306 | 316 | ||
307 | tcon = cifs_sb->tcon; | 317 | tcon = cifs_sb->tcon; |
308 | cFYI(1, ("Getting info on %s", full_path)); | 318 | cFYI(1, "Getting info on %s", full_path); |
309 | 319 | ||
310 | /* could have done a find first instead but this returns more info */ | 320 | /* could have done a find first instead but this returns more info */ |
311 | rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, | 321 | rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, |
@@ -323,6 +333,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
323 | 333 | ||
324 | if (*pinode == NULL) { | 334 | if (*pinode == NULL) { |
325 | /* get new inode */ | 335 | /* get new inode */ |
336 | cifs_fill_uniqueid(sb, &fattr); | ||
326 | *pinode = cifs_iget(sb, &fattr); | 337 | *pinode = cifs_iget(sb, &fattr); |
327 | if (!*pinode) | 338 | if (!*pinode) |
328 | rc = -ENOMEM; | 339 | rc = -ENOMEM; |
@@ -373,7 +384,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, | |||
373 | &bytes_read, &pbuf, &buf_type); | 384 | &bytes_read, &pbuf, &buf_type); |
374 | if ((rc == 0) && (bytes_read >= 8)) { | 385 | if ((rc == 0) && (bytes_read >= 8)) { |
375 | if (memcmp("IntxBLK", pbuf, 8) == 0) { | 386 | if (memcmp("IntxBLK", pbuf, 8) == 0) { |
376 | cFYI(1, ("Block device")); | 387 | cFYI(1, "Block device"); |
377 | fattr->cf_mode |= S_IFBLK; | 388 | fattr->cf_mode |= S_IFBLK; |
378 | fattr->cf_dtype = DT_BLK; | 389 | fattr->cf_dtype = DT_BLK; |
379 | if (bytes_read == 24) { | 390 | if (bytes_read == 24) { |
@@ -385,7 +396,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, | |||
385 | fattr->cf_rdev = MKDEV(mjr, mnr); | 396 | fattr->cf_rdev = MKDEV(mjr, mnr); |
386 | } | 397 | } |
387 | } else if (memcmp("IntxCHR", pbuf, 8) == 0) { | 398 | } else if (memcmp("IntxCHR", pbuf, 8) == 0) { |
388 | cFYI(1, ("Char device")); | 399 | cFYI(1, "Char device"); |
389 | fattr->cf_mode |= S_IFCHR; | 400 | fattr->cf_mode |= S_IFCHR; |
390 | fattr->cf_dtype = DT_CHR; | 401 | fattr->cf_dtype = DT_CHR; |
391 | if (bytes_read == 24) { | 402 | if (bytes_read == 24) { |
@@ -397,7 +408,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, | |||
397 | fattr->cf_rdev = MKDEV(mjr, mnr); | 408 | fattr->cf_rdev = MKDEV(mjr, mnr); |
398 | } | 409 | } |
399 | } else if (memcmp("IntxLNK", pbuf, 7) == 0) { | 410 | } else if (memcmp("IntxLNK", pbuf, 7) == 0) { |
400 | cFYI(1, ("Symlink")); | 411 | cFYI(1, "Symlink"); |
401 | fattr->cf_mode |= S_IFLNK; | 412 | fattr->cf_mode |= S_IFLNK; |
402 | fattr->cf_dtype = DT_LNK; | 413 | fattr->cf_dtype = DT_LNK; |
403 | } else { | 414 | } else { |
@@ -439,10 +450,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, | |||
439 | else if (rc > 3) { | 450 | else if (rc > 3) { |
440 | mode = le32_to_cpu(*((__le32 *)ea_value)); | 451 | mode = le32_to_cpu(*((__le32 *)ea_value)); |
441 | fattr->cf_mode &= ~SFBITS_MASK; | 452 | fattr->cf_mode &= ~SFBITS_MASK; |
442 | cFYI(1, ("special bits 0%o org mode 0%o", mode, | 453 | cFYI(1, "special bits 0%o org mode 0%o", mode, |
443 | fattr->cf_mode)); | 454 | fattr->cf_mode); |
444 | fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode; | 455 | fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode; |
445 | cFYI(1, ("special mode bits 0%o", mode)); | 456 | cFYI(1, "special mode bits 0%o", mode); |
446 | } | 457 | } |
447 | 458 | ||
448 | return 0; | 459 | return 0; |
@@ -548,11 +559,11 @@ int cifs_get_inode_info(struct inode **pinode, | |||
548 | struct cifs_fattr fattr; | 559 | struct cifs_fattr fattr; |
549 | 560 | ||
550 | pTcon = cifs_sb->tcon; | 561 | pTcon = cifs_sb->tcon; |
551 | cFYI(1, ("Getting info on %s", full_path)); | 562 | cFYI(1, "Getting info on %s", full_path); |
552 | 563 | ||
553 | if ((pfindData == NULL) && (*pinode != NULL)) { | 564 | if ((pfindData == NULL) && (*pinode != NULL)) { |
554 | if (CIFS_I(*pinode)->clientCanCacheRead) { | 565 | if (CIFS_I(*pinode)->clientCanCacheRead) { |
555 | cFYI(1, ("No need to revalidate cached inode sizes")); | 566 | cFYI(1, "No need to revalidate cached inode sizes"); |
556 | return rc; | 567 | return rc; |
557 | } | 568 | } |
558 | } | 569 | } |
@@ -618,7 +629,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
618 | cifs_sb->mnt_cifs_flags & | 629 | cifs_sb->mnt_cifs_flags & |
619 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 630 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
620 | if (rc1 || !fattr.cf_uniqueid) { | 631 | if (rc1 || !fattr.cf_uniqueid) { |
621 | cFYI(1, ("GetSrvInodeNum rc %d", rc1)); | 632 | cFYI(1, "GetSrvInodeNum rc %d", rc1); |
622 | fattr.cf_uniqueid = iunique(sb, ROOT_I); | 633 | fattr.cf_uniqueid = iunique(sb, ROOT_I); |
623 | cifs_autodisable_serverino(cifs_sb); | 634 | cifs_autodisable_serverino(cifs_sb); |
624 | } | 635 | } |
@@ -634,13 +645,13 @@ int cifs_get_inode_info(struct inode **pinode, | |||
634 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { | 645 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { |
635 | tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid); | 646 | tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid); |
636 | if (tmprc) | 647 | if (tmprc) |
637 | cFYI(1, ("cifs_sfu_type failed: %d", tmprc)); | 648 | cFYI(1, "cifs_sfu_type failed: %d", tmprc); |
638 | } | 649 | } |
639 | 650 | ||
640 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 651 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
641 | /* fill in 0777 bits from ACL */ | 652 | /* fill in 0777 bits from ACL */ |
642 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { | 653 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { |
643 | cFYI(1, ("Getting mode bits from ACL")); | 654 | cFYI(1, "Getting mode bits from ACL"); |
644 | cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid); | 655 | cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid); |
645 | } | 656 | } |
646 | #endif | 657 | #endif |
@@ -715,6 +726,16 @@ cifs_find_inode(struct inode *inode, void *opaque) | |||
715 | if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) | 726 | if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) |
716 | return 0; | 727 | return 0; |
717 | 728 | ||
729 | /* | ||
730 | * uh oh -- it's a directory. We can't use it since hardlinked dirs are | ||
731 | * verboten. Disable serverino and return it as if it were found, the | ||
732 | * caller can discard it, generate a uniqueid and retry the find | ||
733 | */ | ||
734 | if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) { | ||
735 | fattr->cf_flags |= CIFS_FATTR_INO_COLLISION; | ||
736 | cifs_autodisable_serverino(CIFS_SB(inode->i_sb)); | ||
737 | } | ||
738 | |||
718 | return 1; | 739 | return 1; |
719 | } | 740 | } |
720 | 741 | ||
@@ -734,15 +755,22 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr) | |||
734 | unsigned long hash; | 755 | unsigned long hash; |
735 | struct inode *inode; | 756 | struct inode *inode; |
736 | 757 | ||
737 | cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid)); | 758 | retry_iget5_locked: |
759 | cFYI(1, "looking for uniqueid=%llu", fattr->cf_uniqueid); | ||
738 | 760 | ||
739 | /* hash down to 32-bits on 32-bit arch */ | 761 | /* hash down to 32-bits on 32-bit arch */ |
740 | hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); | 762 | hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); |
741 | 763 | ||
742 | inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr); | 764 | inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr); |
743 | |||
744 | /* we have fattrs in hand, update the inode */ | ||
745 | if (inode) { | 765 | if (inode) { |
766 | /* was there a problematic inode number collision? */ | ||
767 | if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) { | ||
768 | iput(inode); | ||
769 | fattr->cf_uniqueid = iunique(sb, ROOT_I); | ||
770 | fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION; | ||
771 | goto retry_iget5_locked; | ||
772 | } | ||
773 | |||
746 | cifs_fattr_to_inode(inode, fattr); | 774 | cifs_fattr_to_inode(inode, fattr); |
747 | if (sb->s_flags & MS_NOATIME) | 775 | if (sb->s_flags & MS_NOATIME) |
748 | inode->i_flags |= S_NOATIME | S_NOCMTIME; | 776 | inode->i_flags |= S_NOATIME | S_NOCMTIME; |
@@ -780,7 +808,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) | |||
780 | return ERR_PTR(-ENOMEM); | 808 | return ERR_PTR(-ENOMEM); |
781 | 809 | ||
782 | if (rc && cifs_sb->tcon->ipc) { | 810 | if (rc && cifs_sb->tcon->ipc) { |
783 | cFYI(1, ("ipc connection - fake read inode")); | 811 | cFYI(1, "ipc connection - fake read inode"); |
784 | inode->i_mode |= S_IFDIR; | 812 | inode->i_mode |= S_IFDIR; |
785 | inode->i_nlink = 2; | 813 | inode->i_nlink = 2; |
786 | inode->i_op = &cifs_ipc_inode_ops; | 814 | inode->i_op = &cifs_ipc_inode_ops; |
@@ -842,7 +870,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid, | |||
842 | * server times. | 870 | * server times. |
843 | */ | 871 | */ |
844 | if (set_time && (attrs->ia_valid & ATTR_CTIME)) { | 872 | if (set_time && (attrs->ia_valid & ATTR_CTIME)) { |
845 | cFYI(1, ("CIFS - CTIME changed")); | 873 | cFYI(1, "CIFS - CTIME changed"); |
846 | info_buf.ChangeTime = | 874 | info_buf.ChangeTime = |
847 | cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); | 875 | cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); |
848 | } else | 876 | } else |
@@ -877,8 +905,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid, | |||
877 | goto out; | 905 | goto out; |
878 | } | 906 | } |
879 | 907 | ||
880 | cFYI(1, ("calling SetFileInfo since SetPathInfo for " | 908 | cFYI(1, "calling SetFileInfo since SetPathInfo for " |
881 | "times not supported by this server")); | 909 | "times not supported by this server"); |
882 | rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, | 910 | rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, |
883 | SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, | 911 | SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, |
884 | CREATE_NOT_DIR, &netfid, &oplock, | 912 | CREATE_NOT_DIR, &netfid, &oplock, |
@@ -1036,7 +1064,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) | |||
1036 | struct iattr *attrs = NULL; | 1064 | struct iattr *attrs = NULL; |
1037 | __u32 dosattr = 0, origattr = 0; | 1065 | __u32 dosattr = 0, origattr = 0; |
1038 | 1066 | ||
1039 | cFYI(1, ("cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry)); | 1067 | cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry); |
1040 | 1068 | ||
1041 | xid = GetXid(); | 1069 | xid = GetXid(); |
1042 | 1070 | ||
@@ -1055,7 +1083,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) | |||
1055 | rc = CIFSPOSIXDelFile(xid, tcon, full_path, | 1083 | rc = CIFSPOSIXDelFile(xid, tcon, full_path, |
1056 | SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls, | 1084 | SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls, |
1057 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 1085 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
1058 | cFYI(1, ("posix del rc %d", rc)); | 1086 | cFYI(1, "posix del rc %d", rc); |
1059 | if ((rc == 0) || (rc == -ENOENT)) | 1087 | if ((rc == 0) || (rc == -ENOENT)) |
1060 | goto psx_del_no_retry; | 1088 | goto psx_del_no_retry; |
1061 | } | 1089 | } |
@@ -1129,7 +1157,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1129 | struct inode *newinode = NULL; | 1157 | struct inode *newinode = NULL; |
1130 | struct cifs_fattr fattr; | 1158 | struct cifs_fattr fattr; |
1131 | 1159 | ||
1132 | cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode)); | 1160 | cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode); |
1133 | 1161 | ||
1134 | xid = GetXid(); | 1162 | xid = GetXid(); |
1135 | 1163 | ||
@@ -1164,7 +1192,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1164 | kfree(pInfo); | 1192 | kfree(pInfo); |
1165 | goto mkdir_retry_old; | 1193 | goto mkdir_retry_old; |
1166 | } else if (rc) { | 1194 | } else if (rc) { |
1167 | cFYI(1, ("posix mkdir returned 0x%x", rc)); | 1195 | cFYI(1, "posix mkdir returned 0x%x", rc); |
1168 | d_drop(direntry); | 1196 | d_drop(direntry); |
1169 | } else { | 1197 | } else { |
1170 | if (pInfo->Type == cpu_to_le32(-1)) { | 1198 | if (pInfo->Type == cpu_to_le32(-1)) { |
@@ -1181,6 +1209,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1181 | direntry->d_op = &cifs_dentry_ops; | 1209 | direntry->d_op = &cifs_dentry_ops; |
1182 | 1210 | ||
1183 | cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); | 1211 | cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); |
1212 | cifs_fill_uniqueid(inode->i_sb, &fattr); | ||
1184 | newinode = cifs_iget(inode->i_sb, &fattr); | 1213 | newinode = cifs_iget(inode->i_sb, &fattr); |
1185 | if (!newinode) { | 1214 | if (!newinode) { |
1186 | kfree(pInfo); | 1215 | kfree(pInfo); |
@@ -1190,12 +1219,12 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1190 | d_instantiate(direntry, newinode); | 1219 | d_instantiate(direntry, newinode); |
1191 | 1220 | ||
1192 | #ifdef CONFIG_CIFS_DEBUG2 | 1221 | #ifdef CONFIG_CIFS_DEBUG2 |
1193 | cFYI(1, ("instantiated dentry %p %s to inode %p", | 1222 | cFYI(1, "instantiated dentry %p %s to inode %p", |
1194 | direntry, direntry->d_name.name, newinode)); | 1223 | direntry, direntry->d_name.name, newinode); |
1195 | 1224 | ||
1196 | if (newinode->i_nlink != 2) | 1225 | if (newinode->i_nlink != 2) |
1197 | cFYI(1, ("unexpected number of links %d", | 1226 | cFYI(1, "unexpected number of links %d", |
1198 | newinode->i_nlink)); | 1227 | newinode->i_nlink); |
1199 | #endif | 1228 | #endif |
1200 | } | 1229 | } |
1201 | kfree(pInfo); | 1230 | kfree(pInfo); |
@@ -1206,7 +1235,7 @@ mkdir_retry_old: | |||
1206 | rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls, | 1235 | rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls, |
1207 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 1236 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
1208 | if (rc) { | 1237 | if (rc) { |
1209 | cFYI(1, ("cifs_mkdir returned 0x%x", rc)); | 1238 | cFYI(1, "cifs_mkdir returned 0x%x", rc); |
1210 | d_drop(direntry); | 1239 | d_drop(direntry); |
1211 | } else { | 1240 | } else { |
1212 | mkdir_get_info: | 1241 | mkdir_get_info: |
@@ -1309,7 +1338,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
1309 | char *full_path = NULL; | 1338 | char *full_path = NULL; |
1310 | struct cifsInodeInfo *cifsInode; | 1339 | struct cifsInodeInfo *cifsInode; |
1311 | 1340 | ||
1312 | cFYI(1, ("cifs_rmdir, inode = 0x%p", inode)); | 1341 | cFYI(1, "cifs_rmdir, inode = 0x%p", inode); |
1313 | 1342 | ||
1314 | xid = GetXid(); | 1343 | xid = GetXid(); |
1315 | 1344 | ||
@@ -1511,6 +1540,11 @@ cifs_inode_needs_reval(struct inode *inode) | |||
1511 | if (time_after_eq(jiffies, cifs_i->time + HZ)) | 1540 | if (time_after_eq(jiffies, cifs_i->time + HZ)) |
1512 | return true; | 1541 | return true; |
1513 | 1542 | ||
1543 | /* hardlinked files w/ noserverino get "special" treatment */ | ||
1544 | if (!(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && | ||
1545 | S_ISREG(inode->i_mode) && inode->i_nlink != 1) | ||
1546 | return true; | ||
1547 | |||
1514 | return false; | 1548 | return false; |
1515 | } | 1549 | } |
1516 | 1550 | ||
@@ -1577,9 +1611,9 @@ int cifs_revalidate_dentry(struct dentry *dentry) | |||
1577 | goto check_inval; | 1611 | goto check_inval; |
1578 | } | 1612 | } |
1579 | 1613 | ||
1580 | cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " | 1614 | cFYI(1, "Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " |
1581 | "jiffies %ld", full_path, inode, inode->i_count.counter, | 1615 | "jiffies %ld", full_path, inode, inode->i_count.counter, |
1582 | dentry, dentry->d_time, jiffies)); | 1616 | dentry, dentry->d_time, jiffies); |
1583 | 1617 | ||
1584 | if (CIFS_SB(sb)->tcon->unix_ext) | 1618 | if (CIFS_SB(sb)->tcon->unix_ext) |
1585 | rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); | 1619 | rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); |
@@ -1673,12 +1707,12 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1673 | rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid, | 1707 | rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid, |
1674 | npid, false); | 1708 | npid, false); |
1675 | cifsFileInfo_put(open_file); | 1709 | cifsFileInfo_put(open_file); |
1676 | cFYI(1, ("SetFSize for attrs rc = %d", rc)); | 1710 | cFYI(1, "SetFSize for attrs rc = %d", rc); |
1677 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | 1711 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
1678 | unsigned int bytes_written; | 1712 | unsigned int bytes_written; |
1679 | rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size, | 1713 | rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size, |
1680 | &bytes_written, NULL, NULL, 1); | 1714 | &bytes_written, NULL, NULL, 1); |
1681 | cFYI(1, ("Wrt seteof rc %d", rc)); | 1715 | cFYI(1, "Wrt seteof rc %d", rc); |
1682 | } | 1716 | } |
1683 | } else | 1717 | } else |
1684 | rc = -EINVAL; | 1718 | rc = -EINVAL; |
@@ -1692,7 +1726,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1692 | false, cifs_sb->local_nls, | 1726 | false, cifs_sb->local_nls, |
1693 | cifs_sb->mnt_cifs_flags & | 1727 | cifs_sb->mnt_cifs_flags & |
1694 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1728 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1695 | cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc)); | 1729 | cFYI(1, "SetEOF by path (setattrs) rc = %d", rc); |
1696 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | 1730 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
1697 | __u16 netfid; | 1731 | __u16 netfid; |
1698 | int oplock = 0; | 1732 | int oplock = 0; |
@@ -1709,7 +1743,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1709 | attrs->ia_size, | 1743 | attrs->ia_size, |
1710 | &bytes_written, NULL, | 1744 | &bytes_written, NULL, |
1711 | NULL, 1); | 1745 | NULL, 1); |
1712 | cFYI(1, ("wrt seteof rc %d", rc)); | 1746 | cFYI(1, "wrt seteof rc %d", rc); |
1713 | CIFSSMBClose(xid, pTcon, netfid); | 1747 | CIFSSMBClose(xid, pTcon, netfid); |
1714 | } | 1748 | } |
1715 | } | 1749 | } |
@@ -1737,8 +1771,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) | |||
1737 | struct cifs_unix_set_info_args *args = NULL; | 1771 | struct cifs_unix_set_info_args *args = NULL; |
1738 | struct cifsFileInfo *open_file; | 1772 | struct cifsFileInfo *open_file; |
1739 | 1773 | ||
1740 | cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x", | 1774 | cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x", |
1741 | direntry->d_name.name, attrs->ia_valid)); | 1775 | direntry->d_name.name, attrs->ia_valid); |
1742 | 1776 | ||
1743 | xid = GetXid(); | 1777 | xid = GetXid(); |
1744 | 1778 | ||
@@ -1868,8 +1902,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) | |||
1868 | 1902 | ||
1869 | xid = GetXid(); | 1903 | xid = GetXid(); |
1870 | 1904 | ||
1871 | cFYI(1, ("setattr on file %s attrs->iavalid 0x%x", | 1905 | cFYI(1, "setattr on file %s attrs->iavalid 0x%x", |
1872 | direntry->d_name.name, attrs->ia_valid)); | 1906 | direntry->d_name.name, attrs->ia_valid); |
1873 | 1907 | ||
1874 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) { | 1908 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) { |
1875 | /* check if we have permission to change attrs */ | 1909 | /* check if we have permission to change attrs */ |
@@ -1926,7 +1960,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) | |||
1926 | attrs->ia_valid &= ~ATTR_MODE; | 1960 | attrs->ia_valid &= ~ATTR_MODE; |
1927 | 1961 | ||
1928 | if (attrs->ia_valid & ATTR_MODE) { | 1962 | if (attrs->ia_valid & ATTR_MODE) { |
1929 | cFYI(1, ("Mode changed to 0%o", attrs->ia_mode)); | 1963 | cFYI(1, "Mode changed to 0%o", attrs->ia_mode); |
1930 | mode = attrs->ia_mode; | 1964 | mode = attrs->ia_mode; |
1931 | } | 1965 | } |
1932 | 1966 | ||
@@ -2012,7 +2046,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
2012 | #if 0 | 2046 | #if 0 |
2013 | void cifs_delete_inode(struct inode *inode) | 2047 | void cifs_delete_inode(struct inode *inode) |
2014 | { | 2048 | { |
2015 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); | 2049 | cFYI(1, "In cifs_delete_inode, inode = 0x%p", inode); |
2016 | /* may have to add back in if and when safe distributed caching of | 2050 | /* may have to add back in if and when safe distributed caching of |
2017 | directories added e.g. via FindNotify */ | 2051 | directories added e.g. via FindNotify */ |
2018 | } | 2052 | } |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index f94650683a00..505926f1ee6b 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -47,7 +47,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
47 | 47 | ||
48 | xid = GetXid(); | 48 | xid = GetXid(); |
49 | 49 | ||
50 | cFYI(1, ("ioctl file %p cmd %u arg %lu", filep, command, arg)); | 50 | cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg); |
51 | 51 | ||
52 | cifs_sb = CIFS_SB(inode->i_sb); | 52 | cifs_sb = CIFS_SB(inode->i_sb); |
53 | 53 | ||
@@ -64,12 +64,12 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
64 | 64 | ||
65 | switch (command) { | 65 | switch (command) { |
66 | case CIFS_IOC_CHECKUMOUNT: | 66 | case CIFS_IOC_CHECKUMOUNT: |
67 | cFYI(1, ("User unmount attempted")); | 67 | cFYI(1, "User unmount attempted"); |
68 | if (cifs_sb->mnt_uid == current_uid()) | 68 | if (cifs_sb->mnt_uid == current_uid()) |
69 | rc = 0; | 69 | rc = 0; |
70 | else { | 70 | else { |
71 | rc = -EACCES; | 71 | rc = -EACCES; |
72 | cFYI(1, ("uids do not match")); | 72 | cFYI(1, "uids do not match"); |
73 | } | 73 | } |
74 | break; | 74 | break; |
75 | #ifdef CONFIG_CIFS_POSIX | 75 | #ifdef CONFIG_CIFS_POSIX |
@@ -97,11 +97,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
97 | /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, | 97 | /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, |
98 | extAttrBits, &ExtAttrMask);*/ | 98 | extAttrBits, &ExtAttrMask);*/ |
99 | } | 99 | } |
100 | cFYI(1, ("set flags not implemented yet")); | 100 | cFYI(1, "set flags not implemented yet"); |
101 | break; | 101 | break; |
102 | #endif /* CONFIG_CIFS_POSIX */ | 102 | #endif /* CONFIG_CIFS_POSIX */ |
103 | default: | 103 | default: |
104 | cFYI(1, ("unsupported ioctl")); | 104 | cFYI(1, "unsupported ioctl"); |
105 | break; | 105 | break; |
106 | } | 106 | } |
107 | 107 | ||
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index c1a9d4236a8c..473ca8033656 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -139,7 +139,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) | |||
139 | if (!full_path) | 139 | if (!full_path) |
140 | goto out; | 140 | goto out; |
141 | 141 | ||
142 | cFYI(1, ("Full path: %s inode = 0x%p", full_path, inode)); | 142 | cFYI(1, "Full path: %s inode = 0x%p", full_path, inode); |
143 | 143 | ||
144 | rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path, | 144 | rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path, |
145 | cifs_sb->local_nls); | 145 | cifs_sb->local_nls); |
@@ -178,8 +178,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
178 | return rc; | 178 | return rc; |
179 | } | 179 | } |
180 | 180 | ||
181 | cFYI(1, ("Full path: %s", full_path)); | 181 | cFYI(1, "Full path: %s", full_path); |
182 | cFYI(1, ("symname is %s", symname)); | 182 | cFYI(1, "symname is %s", symname); |
183 | 183 | ||
184 | /* BB what if DFS and this volume is on different share? BB */ | 184 | /* BB what if DFS and this volume is on different share? BB */ |
185 | if (pTcon->unix_ext) | 185 | if (pTcon->unix_ext) |
@@ -198,8 +198,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
198 | inode->i_sb, xid, NULL); | 198 | inode->i_sb, xid, NULL); |
199 | 199 | ||
200 | if (rc != 0) { | 200 | if (rc != 0) { |
201 | cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d", | 201 | cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d", |
202 | rc)); | 202 | rc); |
203 | } else { | 203 | } else { |
204 | if (pTcon->nocase) | 204 | if (pTcon->nocase) |
205 | direntry->d_op = &cifs_ci_dentry_ops; | 205 | direntry->d_op = &cifs_ci_dentry_ops; |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index d1474996a812..1394aa37f26c 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -51,7 +51,7 @@ _GetXid(void) | |||
51 | if (GlobalTotalActiveXid > GlobalMaxActiveXid) | 51 | if (GlobalTotalActiveXid > GlobalMaxActiveXid) |
52 | GlobalMaxActiveXid = GlobalTotalActiveXid; | 52 | GlobalMaxActiveXid = GlobalTotalActiveXid; |
53 | if (GlobalTotalActiveXid > 65000) | 53 | if (GlobalTotalActiveXid > 65000) |
54 | cFYI(1, ("warning: more than 65000 requests active")); | 54 | cFYI(1, "warning: more than 65000 requests active"); |
55 | xid = GlobalCurrentXid++; | 55 | xid = GlobalCurrentXid++; |
56 | spin_unlock(&GlobalMid_Lock); | 56 | spin_unlock(&GlobalMid_Lock); |
57 | return xid; | 57 | return xid; |
@@ -88,7 +88,7 @@ void | |||
88 | sesInfoFree(struct cifsSesInfo *buf_to_free) | 88 | sesInfoFree(struct cifsSesInfo *buf_to_free) |
89 | { | 89 | { |
90 | if (buf_to_free == NULL) { | 90 | if (buf_to_free == NULL) { |
91 | cFYI(1, ("Null buffer passed to sesInfoFree")); | 91 | cFYI(1, "Null buffer passed to sesInfoFree"); |
92 | return; | 92 | return; |
93 | } | 93 | } |
94 | 94 | ||
@@ -126,7 +126,7 @@ void | |||
126 | tconInfoFree(struct cifsTconInfo *buf_to_free) | 126 | tconInfoFree(struct cifsTconInfo *buf_to_free) |
127 | { | 127 | { |
128 | if (buf_to_free == NULL) { | 128 | if (buf_to_free == NULL) { |
129 | cFYI(1, ("Null buffer passed to tconInfoFree")); | 129 | cFYI(1, "Null buffer passed to tconInfoFree"); |
130 | return; | 130 | return; |
131 | } | 131 | } |
132 | atomic_dec(&tconInfoAllocCount); | 132 | atomic_dec(&tconInfoAllocCount); |
@@ -166,7 +166,7 @@ void | |||
166 | cifs_buf_release(void *buf_to_free) | 166 | cifs_buf_release(void *buf_to_free) |
167 | { | 167 | { |
168 | if (buf_to_free == NULL) { | 168 | if (buf_to_free == NULL) { |
169 | /* cFYI(1, ("Null buffer passed to cifs_buf_release"));*/ | 169 | /* cFYI(1, "Null buffer passed to cifs_buf_release");*/ |
170 | return; | 170 | return; |
171 | } | 171 | } |
172 | mempool_free(buf_to_free, cifs_req_poolp); | 172 | mempool_free(buf_to_free, cifs_req_poolp); |
@@ -202,7 +202,7 @@ cifs_small_buf_release(void *buf_to_free) | |||
202 | { | 202 | { |
203 | 203 | ||
204 | if (buf_to_free == NULL) { | 204 | if (buf_to_free == NULL) { |
205 | cFYI(1, ("Null buffer passed to cifs_small_buf_release")); | 205 | cFYI(1, "Null buffer passed to cifs_small_buf_release"); |
206 | return; | 206 | return; |
207 | } | 207 | } |
208 | mempool_free(buf_to_free, cifs_sm_req_poolp); | 208 | mempool_free(buf_to_free, cifs_sm_req_poolp); |
@@ -345,19 +345,19 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
345 | /* with userid/password pairs found on the smb session */ | 345 | /* with userid/password pairs found on the smb session */ |
346 | /* for other target tcp/ip addresses BB */ | 346 | /* for other target tcp/ip addresses BB */ |
347 | if (current_fsuid() != treeCon->ses->linux_uid) { | 347 | if (current_fsuid() != treeCon->ses->linux_uid) { |
348 | cFYI(1, ("Multiuser mode and UID " | 348 | cFYI(1, "Multiuser mode and UID " |
349 | "did not match tcon uid")); | 349 | "did not match tcon uid"); |
350 | read_lock(&cifs_tcp_ses_lock); | 350 | read_lock(&cifs_tcp_ses_lock); |
351 | list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { | 351 | list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { |
352 | ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); | 352 | ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); |
353 | if (ses->linux_uid == current_fsuid()) { | 353 | if (ses->linux_uid == current_fsuid()) { |
354 | if (ses->server == treeCon->ses->server) { | 354 | if (ses->server == treeCon->ses->server) { |
355 | cFYI(1, ("found matching uid substitute right smb_uid")); | 355 | cFYI(1, "found matching uid substitute right smb_uid"); |
356 | buffer->Uid = ses->Suid; | 356 | buffer->Uid = ses->Suid; |
357 | break; | 357 | break; |
358 | } else { | 358 | } else { |
359 | /* BB eventually call cifs_setup_session here */ | 359 | /* BB eventually call cifs_setup_session here */ |
360 | cFYI(1, ("local UID found but no smb sess with this server exists")); | 360 | cFYI(1, "local UID found but no smb sess with this server exists"); |
361 | } | 361 | } |
362 | } | 362 | } |
363 | } | 363 | } |
@@ -394,17 +394,16 @@ checkSMBhdr(struct smb_hdr *smb, __u16 mid) | |||
394 | if (smb->Command == SMB_COM_LOCKING_ANDX) | 394 | if (smb->Command == SMB_COM_LOCKING_ANDX) |
395 | return 0; | 395 | return 0; |
396 | else | 396 | else |
397 | cERROR(1, ("Received Request not response")); | 397 | cERROR(1, "Received Request not response"); |
398 | } | 398 | } |
399 | } else { /* bad signature or mid */ | 399 | } else { /* bad signature or mid */ |
400 | if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) | 400 | if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) |
401 | cERROR(1, | 401 | cERROR(1, "Bad protocol string signature header %x", |
402 | ("Bad protocol string signature header %x", | 402 | *(unsigned int *) smb->Protocol); |
403 | *(unsigned int *) smb->Protocol)); | ||
404 | if (mid != smb->Mid) | 403 | if (mid != smb->Mid) |
405 | cERROR(1, ("Mids do not match")); | 404 | cERROR(1, "Mids do not match"); |
406 | } | 405 | } |
407 | cERROR(1, ("bad smb detected. The Mid=%d", smb->Mid)); | 406 | cERROR(1, "bad smb detected. The Mid=%d", smb->Mid); |
408 | return 1; | 407 | return 1; |
409 | } | 408 | } |
410 | 409 | ||
@@ -413,7 +412,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
413 | { | 412 | { |
414 | __u32 len = smb->smb_buf_length; | 413 | __u32 len = smb->smb_buf_length; |
415 | __u32 clc_len; /* calculated length */ | 414 | __u32 clc_len; /* calculated length */ |
416 | cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len)); | 415 | cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len); |
417 | 416 | ||
418 | if (length < 2 + sizeof(struct smb_hdr)) { | 417 | if (length < 2 + sizeof(struct smb_hdr)) { |
419 | if ((length >= sizeof(struct smb_hdr) - 1) | 418 | if ((length >= sizeof(struct smb_hdr) - 1) |
@@ -437,15 +436,15 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
437 | tmp[sizeof(struct smb_hdr)+1] = 0; | 436 | tmp[sizeof(struct smb_hdr)+1] = 0; |
438 | return 0; | 437 | return 0; |
439 | } | 438 | } |
440 | cERROR(1, ("rcvd invalid byte count (bcc)")); | 439 | cERROR(1, "rcvd invalid byte count (bcc)"); |
441 | } else { | 440 | } else { |
442 | cERROR(1, ("Length less than smb header size")); | 441 | cERROR(1, "Length less than smb header size"); |
443 | } | 442 | } |
444 | return 1; | 443 | return 1; |
445 | } | 444 | } |
446 | if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | 445 | if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
447 | cERROR(1, ("smb length greater than MaxBufSize, mid=%d", | 446 | cERROR(1, "smb length greater than MaxBufSize, mid=%d", |
448 | smb->Mid)); | 447 | smb->Mid); |
449 | return 1; | 448 | return 1; |
450 | } | 449 | } |
451 | 450 | ||
@@ -454,8 +453,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
454 | clc_len = smbCalcSize_LE(smb); | 453 | clc_len = smbCalcSize_LE(smb); |
455 | 454 | ||
456 | if (4 + len != length) { | 455 | if (4 + len != length) { |
457 | cERROR(1, ("Length read does not match RFC1001 length %d", | 456 | cERROR(1, "Length read does not match RFC1001 length %d", |
458 | len)); | 457 | len); |
459 | return 1; | 458 | return 1; |
460 | } | 459 | } |
461 | 460 | ||
@@ -466,8 +465,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
466 | if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) | 465 | if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) |
467 | return 0; /* bcc wrapped */ | 466 | return 0; /* bcc wrapped */ |
468 | } | 467 | } |
469 | cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d", | 468 | cFYI(1, "Calculated size %d vs length %d mismatch for mid %d", |
470 | clc_len, 4 + len, smb->Mid)); | 469 | clc_len, 4 + len, smb->Mid); |
471 | /* Windows XP can return a few bytes too much, presumably | 470 | /* Windows XP can return a few bytes too much, presumably |
472 | an illegal pad, at the end of byte range lock responses | 471 | an illegal pad, at the end of byte range lock responses |
473 | so we allow for that three byte pad, as long as actual | 472 | so we allow for that three byte pad, as long as actual |
@@ -482,8 +481,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) | |||
482 | if ((4+len > clc_len) && (len <= clc_len + 512)) | 481 | if ((4+len > clc_len) && (len <= clc_len + 512)) |
483 | return 0; | 482 | return 0; |
484 | else { | 483 | else { |
485 | cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d", | 484 | cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d", |
486 | len, smb->Mid)); | 485 | len, smb->Mid); |
487 | return 1; | 486 | return 1; |
488 | } | 487 | } |
489 | } | 488 | } |
@@ -501,7 +500,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
501 | struct cifsFileInfo *netfile; | 500 | struct cifsFileInfo *netfile; |
502 | int rc; | 501 | int rc; |
503 | 502 | ||
504 | cFYI(1, ("Checking for oplock break or dnotify response")); | 503 | cFYI(1, "Checking for oplock break or dnotify response"); |
505 | if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && | 504 | if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && |
506 | (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { | 505 | (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { |
507 | struct smb_com_transaction_change_notify_rsp *pSMBr = | 506 | struct smb_com_transaction_change_notify_rsp *pSMBr = |
@@ -513,15 +512,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
513 | 512 | ||
514 | pnotify = (struct file_notify_information *) | 513 | pnotify = (struct file_notify_information *) |
515 | ((char *)&pSMBr->hdr.Protocol + data_offset); | 514 | ((char *)&pSMBr->hdr.Protocol + data_offset); |
516 | cFYI(1, ("dnotify on %s Action: 0x%x", | 515 | cFYI(1, "dnotify on %s Action: 0x%x", |
517 | pnotify->FileName, pnotify->Action)); | 516 | pnotify->FileName, pnotify->Action); |
518 | /* cifs_dump_mem("Rcvd notify Data: ",buf, | 517 | /* cifs_dump_mem("Rcvd notify Data: ",buf, |
519 | sizeof(struct smb_hdr)+60); */ | 518 | sizeof(struct smb_hdr)+60); */ |
520 | return true; | 519 | return true; |
521 | } | 520 | } |
522 | if (pSMBr->hdr.Status.CifsError) { | 521 | if (pSMBr->hdr.Status.CifsError) { |
523 | cFYI(1, ("notify err 0x%d", | 522 | cFYI(1, "notify err 0x%d", |
524 | pSMBr->hdr.Status.CifsError)); | 523 | pSMBr->hdr.Status.CifsError); |
525 | return true; | 524 | return true; |
526 | } | 525 | } |
527 | return false; | 526 | return false; |
@@ -535,7 +534,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
535 | large dirty files cached on the client */ | 534 | large dirty files cached on the client */ |
536 | if ((NT_STATUS_INVALID_HANDLE) == | 535 | if ((NT_STATUS_INVALID_HANDLE) == |
537 | le32_to_cpu(pSMB->hdr.Status.CifsError)) { | 536 | le32_to_cpu(pSMB->hdr.Status.CifsError)) { |
538 | cFYI(1, ("invalid handle on oplock break")); | 537 | cFYI(1, "invalid handle on oplock break"); |
539 | return true; | 538 | return true; |
540 | } else if (ERRbadfid == | 539 | } else if (ERRbadfid == |
541 | le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { | 540 | le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { |
@@ -547,8 +546,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
547 | if (pSMB->hdr.WordCount != 8) | 546 | if (pSMB->hdr.WordCount != 8) |
548 | return false; | 547 | return false; |
549 | 548 | ||
550 | cFYI(1, ("oplock type 0x%d level 0x%d", | 549 | cFYI(1, "oplock type 0x%d level 0x%d", |
551 | pSMB->LockType, pSMB->OplockLevel)); | 550 | pSMB->LockType, pSMB->OplockLevel); |
552 | if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) | 551 | if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) |
553 | return false; | 552 | return false; |
554 | 553 | ||
@@ -579,15 +578,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
579 | return true; | 578 | return true; |
580 | } | 579 | } |
581 | 580 | ||
582 | cFYI(1, ("file id match, oplock break")); | 581 | cFYI(1, "file id match, oplock break"); |
583 | pCifsInode = CIFS_I(netfile->pInode); | 582 | pCifsInode = CIFS_I(netfile->pInode); |
584 | pCifsInode->clientCanCacheAll = false; | 583 | pCifsInode->clientCanCacheAll = false; |
585 | if (pSMB->OplockLevel == 0) | 584 | if (pSMB->OplockLevel == 0) |
586 | pCifsInode->clientCanCacheRead = false; | 585 | pCifsInode->clientCanCacheRead = false; |
587 | rc = slow_work_enqueue(&netfile->oplock_break); | 586 | rc = slow_work_enqueue(&netfile->oplock_break); |
588 | if (rc) { | 587 | if (rc) { |
589 | cERROR(1, ("failed to enqueue oplock " | 588 | cERROR(1, "failed to enqueue oplock " |
590 | "break: %d\n", rc)); | 589 | "break: %d\n", rc); |
591 | } else { | 590 | } else { |
592 | netfile->oplock_break_cancelled = false; | 591 | netfile->oplock_break_cancelled = false; |
593 | } | 592 | } |
@@ -597,12 +596,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
597 | } | 596 | } |
598 | read_unlock(&GlobalSMBSeslock); | 597 | read_unlock(&GlobalSMBSeslock); |
599 | read_unlock(&cifs_tcp_ses_lock); | 598 | read_unlock(&cifs_tcp_ses_lock); |
600 | cFYI(1, ("No matching file for oplock break")); | 599 | cFYI(1, "No matching file for oplock break"); |
601 | return true; | 600 | return true; |
602 | } | 601 | } |
603 | } | 602 | } |
604 | read_unlock(&cifs_tcp_ses_lock); | 603 | read_unlock(&cifs_tcp_ses_lock); |
605 | cFYI(1, ("Can not process oplock break for non-existent connection")); | 604 | cFYI(1, "Can not process oplock break for non-existent connection"); |
606 | return true; | 605 | return true; |
607 | } | 606 | } |
608 | 607 | ||
@@ -721,11 +720,11 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) | |||
721 | { | 720 | { |
722 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 721 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
723 | cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; | 722 | cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; |
724 | cERROR(1, ("Autodisabling the use of server inode numbers on " | 723 | cERROR(1, "Autodisabling the use of server inode numbers on " |
725 | "%s. This server doesn't seem to support them " | 724 | "%s. This server doesn't seem to support them " |
726 | "properly. Hardlinks will not be recognized on this " | 725 | "properly. Hardlinks will not be recognized on this " |
727 | "mount. Consider mounting with the \"noserverino\" " | 726 | "mount. Consider mounting with the \"noserverino\" " |
728 | "option to silence this message.", | 727 | "option to silence this message.", |
729 | cifs_sb->tcon->treeName)); | 728 | cifs_sb->tcon->treeName); |
730 | } | 729 | } |
731 | } | 730 | } |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index bd6d6895730d..d35d52889cb5 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -149,7 +149,7 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst) | |||
149 | else if (address_family == AF_INET6) | 149 | else if (address_family == AF_INET6) |
150 | ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL); | 150 | ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL); |
151 | 151 | ||
152 | cFYI(DBG2, ("address conversion returned %d for %s", ret, cp)); | 152 | cFYI(DBG2, "address conversion returned %d for %s", ret, cp); |
153 | if (ret > 0) | 153 | if (ret > 0) |
154 | ret = 1; | 154 | ret = 1; |
155 | return ret; | 155 | return ret; |
@@ -870,8 +870,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr) | |||
870 | } | 870 | } |
871 | /* else ERRHRD class errors or junk - return EIO */ | 871 | /* else ERRHRD class errors or junk - return EIO */ |
872 | 872 | ||
873 | cFYI(1, ("Mapping smb error code %d to POSIX err %d", | 873 | cFYI(1, "Mapping smb error code %d to POSIX err %d", |
874 | smberrcode, rc)); | 874 | smberrcode, rc); |
875 | 875 | ||
876 | /* generic corrective action e.g. reconnect SMB session on | 876 | /* generic corrective action e.g. reconnect SMB session on |
877 | * ERRbaduid could be added */ | 877 | * ERRbaduid could be added */ |
@@ -940,20 +940,20 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) | |||
940 | SMB_TIME *st = (SMB_TIME *)&time; | 940 | SMB_TIME *st = (SMB_TIME *)&time; |
941 | SMB_DATE *sd = (SMB_DATE *)&date; | 941 | SMB_DATE *sd = (SMB_DATE *)&date; |
942 | 942 | ||
943 | cFYI(1, ("date %d time %d", date, time)); | 943 | cFYI(1, "date %d time %d", date, time); |
944 | 944 | ||
945 | sec = 2 * st->TwoSeconds; | 945 | sec = 2 * st->TwoSeconds; |
946 | min = st->Minutes; | 946 | min = st->Minutes; |
947 | if ((sec > 59) || (min > 59)) | 947 | if ((sec > 59) || (min > 59)) |
948 | cERROR(1, ("illegal time min %d sec %d", min, sec)); | 948 | cERROR(1, "illegal time min %d sec %d", min, sec); |
949 | sec += (min * 60); | 949 | sec += (min * 60); |
950 | sec += 60 * 60 * st->Hours; | 950 | sec += 60 * 60 * st->Hours; |
951 | if (st->Hours > 24) | 951 | if (st->Hours > 24) |
952 | cERROR(1, ("illegal hours %d", st->Hours)); | 952 | cERROR(1, "illegal hours %d", st->Hours); |
953 | days = sd->Day; | 953 | days = sd->Day; |
954 | month = sd->Month; | 954 | month = sd->Month; |
955 | if ((days > 31) || (month > 12)) { | 955 | if ((days > 31) || (month > 12)) { |
956 | cERROR(1, ("illegal date, month %d day: %d", month, days)); | 956 | cERROR(1, "illegal date, month %d day: %d", month, days); |
957 | if (month > 12) | 957 | if (month > 12) |
958 | month = 12; | 958 | month = 12; |
959 | } | 959 | } |
@@ -979,7 +979,7 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) | |||
979 | 979 | ||
980 | ts.tv_sec = sec + offset; | 980 | ts.tv_sec = sec + offset; |
981 | 981 | ||
982 | /* cFYI(1,("sec after cnvrt dos to unix time %d",sec)); */ | 982 | /* cFYI(1, "sec after cnvrt dos to unix time %d",sec); */ |
983 | 983 | ||
984 | ts.tv_nsec = 0; | 984 | ts.tv_nsec = 0; |
985 | return ts; | 985 | return ts; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 18e0bc1fb593..daf1753af674 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -47,15 +47,15 @@ static void dump_cifs_file_struct(struct file *file, char *label) | |||
47 | if (file) { | 47 | if (file) { |
48 | cf = file->private_data; | 48 | cf = file->private_data; |
49 | if (cf == NULL) { | 49 | if (cf == NULL) { |
50 | cFYI(1, ("empty cifs private file data")); | 50 | cFYI(1, "empty cifs private file data"); |
51 | return; | 51 | return; |
52 | } | 52 | } |
53 | if (cf->invalidHandle) | 53 | if (cf->invalidHandle) |
54 | cFYI(1, ("invalid handle")); | 54 | cFYI(1, "invalid handle"); |
55 | if (cf->srch_inf.endOfSearch) | 55 | if (cf->srch_inf.endOfSearch) |
56 | cFYI(1, ("end of search")); | 56 | cFYI(1, "end of search"); |
57 | if (cf->srch_inf.emptyDir) | 57 | if (cf->srch_inf.emptyDir) |
58 | cFYI(1, ("empty dir")); | 58 | cFYI(1, "empty dir"); |
59 | } | 59 | } |
60 | } | 60 | } |
61 | #else | 61 | #else |
@@ -76,7 +76,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, | |||
76 | struct inode *inode; | 76 | struct inode *inode; |
77 | struct super_block *sb = parent->d_inode->i_sb; | 77 | struct super_block *sb = parent->d_inode->i_sb; |
78 | 78 | ||
79 | cFYI(1, ("For %s", name->name)); | 79 | cFYI(1, "For %s", name->name); |
80 | 80 | ||
81 | if (parent->d_op && parent->d_op->d_hash) | 81 | if (parent->d_op && parent->d_op->d_hash) |
82 | parent->d_op->d_hash(parent, name); | 82 | parent->d_op->d_hash(parent, name); |
@@ -214,7 +214,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, | |||
214 | fid, | 214 | fid, |
215 | cifs_sb->local_nls); | 215 | cifs_sb->local_nls); |
216 | if (CIFSSMBClose(xid, ptcon, fid)) { | 216 | if (CIFSSMBClose(xid, ptcon, fid)) { |
217 | cFYI(1, ("Error closing temporary reparsepoint open)")); | 217 | cFYI(1, "Error closing temporary reparsepoint open"); |
218 | } | 218 | } |
219 | } | 219 | } |
220 | } | 220 | } |
@@ -252,7 +252,7 @@ static int initiate_cifs_search(const int xid, struct file *file) | |||
252 | if (full_path == NULL) | 252 | if (full_path == NULL) |
253 | return -ENOMEM; | 253 | return -ENOMEM; |
254 | 254 | ||
255 | cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos)); | 255 | cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos); |
256 | 256 | ||
257 | ffirst_retry: | 257 | ffirst_retry: |
258 | /* test for Unix extensions */ | 258 | /* test for Unix extensions */ |
@@ -297,7 +297,7 @@ static int cifs_unicode_bytelen(char *str) | |||
297 | if (ustr[len] == 0) | 297 | if (ustr[len] == 0) |
298 | return len << 1; | 298 | return len << 1; |
299 | } | 299 | } |
300 | cFYI(1, ("Unicode string longer than PATH_MAX found")); | 300 | cFYI(1, "Unicode string longer than PATH_MAX found"); |
301 | return len << 1; | 301 | return len << 1; |
302 | } | 302 | } |
303 | 303 | ||
@@ -314,19 +314,18 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) | |||
314 | pfData->FileNameLength; | 314 | pfData->FileNameLength; |
315 | } else | 315 | } else |
316 | new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); | 316 | new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); |
317 | cFYI(1, ("new entry %p old entry %p", new_entry, old_entry)); | 317 | cFYI(1, "new entry %p old entry %p", new_entry, old_entry); |
318 | /* validate that new_entry is not past end of SMB */ | 318 | /* validate that new_entry is not past end of SMB */ |
319 | if (new_entry >= end_of_smb) { | 319 | if (new_entry >= end_of_smb) { |
320 | cERROR(1, | 320 | cERROR(1, "search entry %p began after end of SMB %p old entry %p", |
321 | ("search entry %p began after end of SMB %p old entry %p", | 321 | new_entry, end_of_smb, old_entry); |
322 | new_entry, end_of_smb, old_entry)); | ||
323 | return NULL; | 322 | return NULL; |
324 | } else if (((level == SMB_FIND_FILE_INFO_STANDARD) && | 323 | } else if (((level == SMB_FIND_FILE_INFO_STANDARD) && |
325 | (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) | 324 | (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) |
326 | || ((level != SMB_FIND_FILE_INFO_STANDARD) && | 325 | || ((level != SMB_FIND_FILE_INFO_STANDARD) && |
327 | (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { | 326 | (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { |
328 | cERROR(1, ("search entry %p extends after end of SMB %p", | 327 | cERROR(1, "search entry %p extends after end of SMB %p", |
329 | new_entry, end_of_smb)); | 328 | new_entry, end_of_smb); |
330 | return NULL; | 329 | return NULL; |
331 | } else | 330 | } else |
332 | return new_entry; | 331 | return new_entry; |
@@ -380,8 +379,8 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
380 | filename = &pFindData->FileName[0]; | 379 | filename = &pFindData->FileName[0]; |
381 | len = pFindData->FileNameLength; | 380 | len = pFindData->FileNameLength; |
382 | } else { | 381 | } else { |
383 | cFYI(1, ("Unknown findfirst level %d", | 382 | cFYI(1, "Unknown findfirst level %d", |
384 | cfile->srch_inf.info_level)); | 383 | cfile->srch_inf.info_level); |
385 | } | 384 | } |
386 | 385 | ||
387 | if (filename) { | 386 | if (filename) { |
@@ -481,7 +480,7 @@ static int cifs_save_resume_key(const char *current_entry, | |||
481 | len = (unsigned int)pFindData->FileNameLength; | 480 | len = (unsigned int)pFindData->FileNameLength; |
482 | cifsFile->srch_inf.resume_key = pFindData->ResumeKey; | 481 | cifsFile->srch_inf.resume_key = pFindData->ResumeKey; |
483 | } else { | 482 | } else { |
484 | cFYI(1, ("Unknown findfirst level %d", level)); | 483 | cFYI(1, "Unknown findfirst level %d", level); |
485 | return -EINVAL; | 484 | return -EINVAL; |
486 | } | 485 | } |
487 | cifsFile->srch_inf.resume_name_len = len; | 486 | cifsFile->srch_inf.resume_name_len = len; |
@@ -525,7 +524,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
525 | is_dir_changed(file)) || | 524 | is_dir_changed(file)) || |
526 | (index_to_find < first_entry_in_buffer)) { | 525 | (index_to_find < first_entry_in_buffer)) { |
527 | /* close and restart search */ | 526 | /* close and restart search */ |
528 | cFYI(1, ("search backing up - close and restart search")); | 527 | cFYI(1, "search backing up - close and restart search"); |
529 | write_lock(&GlobalSMBSeslock); | 528 | write_lock(&GlobalSMBSeslock); |
530 | if (!cifsFile->srch_inf.endOfSearch && | 529 | if (!cifsFile->srch_inf.endOfSearch && |
531 | !cifsFile->invalidHandle) { | 530 | !cifsFile->invalidHandle) { |
@@ -535,7 +534,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
535 | } else | 534 | } else |
536 | write_unlock(&GlobalSMBSeslock); | 535 | write_unlock(&GlobalSMBSeslock); |
537 | if (cifsFile->srch_inf.ntwrk_buf_start) { | 536 | if (cifsFile->srch_inf.ntwrk_buf_start) { |
538 | cFYI(1, ("freeing SMB ff cache buf on search rewind")); | 537 | cFYI(1, "freeing SMB ff cache buf on search rewind"); |
539 | if (cifsFile->srch_inf.smallBuf) | 538 | if (cifsFile->srch_inf.smallBuf) |
540 | cifs_small_buf_release(cifsFile->srch_inf. | 539 | cifs_small_buf_release(cifsFile->srch_inf. |
541 | ntwrk_buf_start); | 540 | ntwrk_buf_start); |
@@ -546,8 +545,8 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
546 | } | 545 | } |
547 | rc = initiate_cifs_search(xid, file); | 546 | rc = initiate_cifs_search(xid, file); |
548 | if (rc) { | 547 | if (rc) { |
549 | cFYI(1, ("error %d reinitiating a search on rewind", | 548 | cFYI(1, "error %d reinitiating a search on rewind", |
550 | rc)); | 549 | rc); |
551 | return rc; | 550 | return rc; |
552 | } | 551 | } |
553 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); | 552 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); |
@@ -555,7 +554,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
555 | 554 | ||
556 | while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && | 555 | while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && |
557 | (rc == 0) && !cifsFile->srch_inf.endOfSearch) { | 556 | (rc == 0) && !cifsFile->srch_inf.endOfSearch) { |
558 | cFYI(1, ("calling findnext2")); | 557 | cFYI(1, "calling findnext2"); |
559 | rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, | 558 | rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, |
560 | &cifsFile->srch_inf); | 559 | &cifsFile->srch_inf); |
561 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); | 560 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); |
@@ -575,7 +574,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
575 | first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry | 574 | first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry |
576 | - cifsFile->srch_inf.entries_in_buffer; | 575 | - cifsFile->srch_inf.entries_in_buffer; |
577 | pos_in_buf = index_to_find - first_entry_in_buffer; | 576 | pos_in_buf = index_to_find - first_entry_in_buffer; |
578 | cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf)); | 577 | cFYI(1, "found entry - pos_in_buf %d", pos_in_buf); |
579 | 578 | ||
580 | for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) { | 579 | for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) { |
581 | /* go entry by entry figuring out which is first */ | 580 | /* go entry by entry figuring out which is first */ |
@@ -584,19 +583,19 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
584 | } | 583 | } |
585 | if ((current_entry == NULL) && (i < pos_in_buf)) { | 584 | if ((current_entry == NULL) && (i < pos_in_buf)) { |
586 | /* BB fixme - check if we should flag this error */ | 585 | /* BB fixme - check if we should flag this error */ |
587 | cERROR(1, ("reached end of buf searching for pos in buf" | 586 | cERROR(1, "reached end of buf searching for pos in buf" |
588 | " %d index to find %lld rc %d", | 587 | " %d index to find %lld rc %d", |
589 | pos_in_buf, index_to_find, rc)); | 588 | pos_in_buf, index_to_find, rc); |
590 | } | 589 | } |
591 | rc = 0; | 590 | rc = 0; |
592 | *ppCurrentEntry = current_entry; | 591 | *ppCurrentEntry = current_entry; |
593 | } else { | 592 | } else { |
594 | cFYI(1, ("index not in buffer - could not findnext into it")); | 593 | cFYI(1, "index not in buffer - could not findnext into it"); |
595 | return 0; | 594 | return 0; |
596 | } | 595 | } |
597 | 596 | ||
598 | if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) { | 597 | if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) { |
599 | cFYI(1, ("can not return entries pos_in_buf beyond last")); | 598 | cFYI(1, "can not return entries pos_in_buf beyond last"); |
600 | *num_to_ret = 0; | 599 | *num_to_ret = 0; |
601 | } else | 600 | } else |
602 | *num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf; | 601 | *num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf; |
@@ -656,12 +655,12 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, | |||
656 | /* one byte length, no name conversion */ | 655 | /* one byte length, no name conversion */ |
657 | len = (unsigned int)pFindData->FileNameLength; | 656 | len = (unsigned int)pFindData->FileNameLength; |
658 | } else { | 657 | } else { |
659 | cFYI(1, ("Unknown findfirst level %d", level)); | 658 | cFYI(1, "Unknown findfirst level %d", level); |
660 | return -EINVAL; | 659 | return -EINVAL; |
661 | } | 660 | } |
662 | 661 | ||
663 | if (len > max_len) { | 662 | if (len > max_len) { |
664 | cERROR(1, ("bad search response length %d past smb end", len)); | 663 | cERROR(1, "bad search response length %d past smb end", len); |
665 | return -EINVAL; | 664 | return -EINVAL; |
666 | } | 665 | } |
667 | 666 | ||
@@ -754,7 +753,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, | |||
754 | * case already. Why should we be clobbering other errors from it? | 753 | * case already. Why should we be clobbering other errors from it? |
755 | */ | 754 | */ |
756 | if (rc) { | 755 | if (rc) { |
757 | cFYI(1, ("filldir rc = %d", rc)); | 756 | cFYI(1, "filldir rc = %d", rc); |
758 | rc = -EOVERFLOW; | 757 | rc = -EOVERFLOW; |
759 | } | 758 | } |
760 | dput(tmp_dentry); | 759 | dput(tmp_dentry); |
@@ -786,7 +785,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
786 | case 0: | 785 | case 0: |
787 | if (filldir(direntry, ".", 1, file->f_pos, | 786 | if (filldir(direntry, ".", 1, file->f_pos, |
788 | file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) { | 787 | file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) { |
789 | cERROR(1, ("Filldir for current dir failed")); | 788 | cERROR(1, "Filldir for current dir failed"); |
790 | rc = -ENOMEM; | 789 | rc = -ENOMEM; |
791 | break; | 790 | break; |
792 | } | 791 | } |
@@ -794,7 +793,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
794 | case 1: | 793 | case 1: |
795 | if (filldir(direntry, "..", 2, file->f_pos, | 794 | if (filldir(direntry, "..", 2, file->f_pos, |
796 | file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { | 795 | file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { |
797 | cERROR(1, ("Filldir for parent dir failed")); | 796 | cERROR(1, "Filldir for parent dir failed"); |
798 | rc = -ENOMEM; | 797 | rc = -ENOMEM; |
799 | break; | 798 | break; |
800 | } | 799 | } |
@@ -807,7 +806,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
807 | 806 | ||
808 | if (file->private_data == NULL) { | 807 | if (file->private_data == NULL) { |
809 | rc = initiate_cifs_search(xid, file); | 808 | rc = initiate_cifs_search(xid, file); |
810 | cFYI(1, ("initiate cifs search rc %d", rc)); | 809 | cFYI(1, "initiate cifs search rc %d", rc); |
811 | if (rc) { | 810 | if (rc) { |
812 | FreeXid(xid); | 811 | FreeXid(xid); |
813 | return rc; | 812 | return rc; |
@@ -821,7 +820,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
821 | cifsFile = file->private_data; | 820 | cifsFile = file->private_data; |
822 | if (cifsFile->srch_inf.endOfSearch) { | 821 | if (cifsFile->srch_inf.endOfSearch) { |
823 | if (cifsFile->srch_inf.emptyDir) { | 822 | if (cifsFile->srch_inf.emptyDir) { |
824 | cFYI(1, ("End of search, empty dir")); | 823 | cFYI(1, "End of search, empty dir"); |
825 | rc = 0; | 824 | rc = 0; |
826 | break; | 825 | break; |
827 | } | 826 | } |
@@ -833,16 +832,16 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
833 | rc = find_cifs_entry(xid, pTcon, file, | 832 | rc = find_cifs_entry(xid, pTcon, file, |
834 | ¤t_entry, &num_to_fill); | 833 | ¤t_entry, &num_to_fill); |
835 | if (rc) { | 834 | if (rc) { |
836 | cFYI(1, ("fce error %d", rc)); | 835 | cFYI(1, "fce error %d", rc); |
837 | goto rddir2_exit; | 836 | goto rddir2_exit; |
838 | } else if (current_entry != NULL) { | 837 | } else if (current_entry != NULL) { |
839 | cFYI(1, ("entry %lld found", file->f_pos)); | 838 | cFYI(1, "entry %lld found", file->f_pos); |
840 | } else { | 839 | } else { |
841 | cFYI(1, ("could not find entry")); | 840 | cFYI(1, "could not find entry"); |
842 | goto rddir2_exit; | 841 | goto rddir2_exit; |
843 | } | 842 | } |
844 | cFYI(1, ("loop through %d times filling dir for net buf %p", | 843 | cFYI(1, "loop through %d times filling dir for net buf %p", |
845 | num_to_fill, cifsFile->srch_inf.ntwrk_buf_start)); | 844 | num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); |
846 | max_len = smbCalcSize((struct smb_hdr *) | 845 | max_len = smbCalcSize((struct smb_hdr *) |
847 | cifsFile->srch_inf.ntwrk_buf_start); | 846 | cifsFile->srch_inf.ntwrk_buf_start); |
848 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; | 847 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; |
@@ -851,8 +850,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
851 | for (i = 0; (i < num_to_fill) && (rc == 0); i++) { | 850 | for (i = 0; (i < num_to_fill) && (rc == 0); i++) { |
852 | if (current_entry == NULL) { | 851 | if (current_entry == NULL) { |
853 | /* evaluate whether this case is an error */ | 852 | /* evaluate whether this case is an error */ |
854 | cERROR(1, ("past SMB end, num to fill %d i %d", | 853 | cERROR(1, "past SMB end, num to fill %d i %d", |
855 | num_to_fill, i)); | 854 | num_to_fill, i); |
856 | break; | 855 | break; |
857 | } | 856 | } |
858 | /* if buggy server returns . and .. late do | 857 | /* if buggy server returns . and .. late do |
@@ -867,8 +866,8 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
867 | file->f_pos++; | 866 | file->f_pos++; |
868 | if (file->f_pos == | 867 | if (file->f_pos == |
869 | cifsFile->srch_inf.index_of_last_entry) { | 868 | cifsFile->srch_inf.index_of_last_entry) { |
870 | cFYI(1, ("last entry in buf at pos %lld %s", | 869 | cFYI(1, "last entry in buf at pos %lld %s", |
871 | file->f_pos, tmp_buf)); | 870 | file->f_pos, tmp_buf); |
872 | cifs_save_resume_key(current_entry, cifsFile); | 871 | cifs_save_resume_key(current_entry, cifsFile); |
873 | break; | 872 | break; |
874 | } else | 873 | } else |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7c3fd7463f44..7707389bdf2c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -35,9 +35,11 @@ | |||
35 | extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, | 35 | extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, |
36 | unsigned char *p24); | 36 | unsigned char *p24); |
37 | 37 | ||
38 | /* Checks if this is the first smb session to be reconnected after | 38 | /* |
39 | the socket has been reestablished (so we know whether to use vc 0). | 39 | * Checks if this is the first smb session to be reconnected after |
40 | Called while holding the cifs_tcp_ses_lock, so do not block */ | 40 | * the socket has been reestablished (so we know whether to use vc 0). |
41 | * Called while holding the cifs_tcp_ses_lock, so do not block | ||
42 | */ | ||
41 | static bool is_first_ses_reconnect(struct cifsSesInfo *ses) | 43 | static bool is_first_ses_reconnect(struct cifsSesInfo *ses) |
42 | { | 44 | { |
43 | struct list_head *tmp; | 45 | struct list_head *tmp; |
@@ -284,7 +286,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | |||
284 | int len; | 286 | int len; |
285 | char *data = *pbcc_area; | 287 | char *data = *pbcc_area; |
286 | 288 | ||
287 | cFYI(1, ("bleft %d", bleft)); | 289 | cFYI(1, "bleft %d", bleft); |
288 | 290 | ||
289 | /* | 291 | /* |
290 | * Windows servers do not always double null terminate their final | 292 | * Windows servers do not always double null terminate their final |
@@ -301,7 +303,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | |||
301 | 303 | ||
302 | kfree(ses->serverOS); | 304 | kfree(ses->serverOS); |
303 | ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 305 | ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); |
304 | cFYI(1, ("serverOS=%s", ses->serverOS)); | 306 | cFYI(1, "serverOS=%s", ses->serverOS); |
305 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; | 307 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; |
306 | data += len; | 308 | data += len; |
307 | bleft -= len; | 309 | bleft -= len; |
@@ -310,7 +312,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | |||
310 | 312 | ||
311 | kfree(ses->serverNOS); | 313 | kfree(ses->serverNOS); |
312 | ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 314 | ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); |
313 | cFYI(1, ("serverNOS=%s", ses->serverNOS)); | 315 | cFYI(1, "serverNOS=%s", ses->serverNOS); |
314 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; | 316 | len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; |
315 | data += len; | 317 | data += len; |
316 | bleft -= len; | 318 | bleft -= len; |
@@ -319,7 +321,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | |||
319 | 321 | ||
320 | kfree(ses->serverDomain); | 322 | kfree(ses->serverDomain); |
321 | ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp); | 323 | ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp); |
322 | cFYI(1, ("serverDomain=%s", ses->serverDomain)); | 324 | cFYI(1, "serverDomain=%s", ses->serverDomain); |
323 | 325 | ||
324 | return; | 326 | return; |
325 | } | 327 | } |
@@ -332,7 +334,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft, | |||
332 | int len; | 334 | int len; |
333 | char *bcc_ptr = *pbcc_area; | 335 | char *bcc_ptr = *pbcc_area; |
334 | 336 | ||
335 | cFYI(1, ("decode sessetup ascii. bleft %d", bleft)); | 337 | cFYI(1, "decode sessetup ascii. bleft %d", bleft); |
336 | 338 | ||
337 | len = strnlen(bcc_ptr, bleft); | 339 | len = strnlen(bcc_ptr, bleft); |
338 | if (len >= bleft) | 340 | if (len >= bleft) |
@@ -344,7 +346,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft, | |||
344 | if (ses->serverOS) | 346 | if (ses->serverOS) |
345 | strncpy(ses->serverOS, bcc_ptr, len); | 347 | strncpy(ses->serverOS, bcc_ptr, len); |
346 | if (strncmp(ses->serverOS, "OS/2", 4) == 0) { | 348 | if (strncmp(ses->serverOS, "OS/2", 4) == 0) { |
347 | cFYI(1, ("OS/2 server")); | 349 | cFYI(1, "OS/2 server"); |
348 | ses->flags |= CIFS_SES_OS2; | 350 | ses->flags |= CIFS_SES_OS2; |
349 | } | 351 | } |
350 | 352 | ||
@@ -373,7 +375,7 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft, | |||
373 | /* BB For newer servers which do not support Unicode, | 375 | /* BB For newer servers which do not support Unicode, |
374 | but thus do return domain here we could add parsing | 376 | but thus do return domain here we could add parsing |
375 | for it later, but it is not very important */ | 377 | for it later, but it is not very important */ |
376 | cFYI(1, ("ascii: bytes left %d", bleft)); | 378 | cFYI(1, "ascii: bytes left %d", bleft); |
377 | 379 | ||
378 | return rc; | 380 | return rc; |
379 | } | 381 | } |
@@ -384,16 +386,16 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
384 | CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; | 386 | CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; |
385 | 387 | ||
386 | if (blob_len < sizeof(CHALLENGE_MESSAGE)) { | 388 | if (blob_len < sizeof(CHALLENGE_MESSAGE)) { |
387 | cERROR(1, ("challenge blob len %d too small", blob_len)); | 389 | cERROR(1, "challenge blob len %d too small", blob_len); |
388 | return -EINVAL; | 390 | return -EINVAL; |
389 | } | 391 | } |
390 | 392 | ||
391 | if (memcmp(pblob->Signature, "NTLMSSP", 8)) { | 393 | if (memcmp(pblob->Signature, "NTLMSSP", 8)) { |
392 | cERROR(1, ("blob signature incorrect %s", pblob->Signature)); | 394 | cERROR(1, "blob signature incorrect %s", pblob->Signature); |
393 | return -EINVAL; | 395 | return -EINVAL; |
394 | } | 396 | } |
395 | if (pblob->MessageType != NtLmChallenge) { | 397 | if (pblob->MessageType != NtLmChallenge) { |
396 | cERROR(1, ("Incorrect message type %d", pblob->MessageType)); | 398 | cERROR(1, "Incorrect message type %d", pblob->MessageType); |
397 | return -EINVAL; | 399 | return -EINVAL; |
398 | } | 400 | } |
399 | 401 | ||
@@ -447,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | |||
447 | This function returns the length of the data in the blob */ | 449 | This function returns the length of the data in the blob */ |
448 | static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | 450 | static int build_ntlmssp_auth_blob(unsigned char *pbuffer, |
449 | struct cifsSesInfo *ses, | 451 | struct cifsSesInfo *ses, |
450 | const struct nls_table *nls_cp, int first) | 452 | const struct nls_table *nls_cp, bool first) |
451 | { | 453 | { |
452 | AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; | 454 | AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; |
453 | __u32 flags; | 455 | __u32 flags; |
@@ -546,7 +548,7 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB, | |||
546 | 548 | ||
547 | static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, | 549 | static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, |
548 | struct cifsSesInfo *ses, | 550 | struct cifsSesInfo *ses, |
549 | const struct nls_table *nls, int first_time) | 551 | const struct nls_table *nls, bool first_time) |
550 | { | 552 | { |
551 | int bloblen; | 553 | int bloblen; |
552 | 554 | ||
@@ -559,8 +561,8 @@ static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, | |||
559 | #endif | 561 | #endif |
560 | 562 | ||
561 | int | 563 | int |
562 | CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, | 564 | CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, |
563 | const struct nls_table *nls_cp) | 565 | const struct nls_table *nls_cp) |
564 | { | 566 | { |
565 | int rc = 0; | 567 | int rc = 0; |
566 | int wct; | 568 | int wct; |
@@ -577,13 +579,18 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, | |||
577 | int bytes_remaining; | 579 | int bytes_remaining; |
578 | struct key *spnego_key = NULL; | 580 | struct key *spnego_key = NULL; |
579 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ | 581 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ |
582 | bool first_time; | ||
580 | 583 | ||
581 | if (ses == NULL) | 584 | if (ses == NULL) |
582 | return -EINVAL; | 585 | return -EINVAL; |
583 | 586 | ||
587 | read_lock(&cifs_tcp_ses_lock); | ||
588 | first_time = is_first_ses_reconnect(ses); | ||
589 | read_unlock(&cifs_tcp_ses_lock); | ||
590 | |||
584 | type = ses->server->secType; | 591 | type = ses->server->secType; |
585 | 592 | ||
586 | cFYI(1, ("sess setup type %d", type)); | 593 | cFYI(1, "sess setup type %d", type); |
587 | ssetup_ntlmssp_authenticate: | 594 | ssetup_ntlmssp_authenticate: |
588 | if (phase == NtLmChallenge) | 595 | if (phase == NtLmChallenge) |
589 | phase = NtLmAuthenticate; /* if ntlmssp, now final phase */ | 596 | phase = NtLmAuthenticate; /* if ntlmssp, now final phase */ |
@@ -664,7 +671,7 @@ ssetup_ntlmssp_authenticate: | |||
664 | changed to do higher than lanman dialect and | 671 | changed to do higher than lanman dialect and |
665 | we reconnected would we ever calc signing_key? */ | 672 | we reconnected would we ever calc signing_key? */ |
666 | 673 | ||
667 | cFYI(1, ("Negotiating LANMAN setting up strings")); | 674 | cFYI(1, "Negotiating LANMAN setting up strings"); |
668 | /* Unicode not allowed for LANMAN dialects */ | 675 | /* Unicode not allowed for LANMAN dialects */ |
669 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | 676 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); |
670 | #endif | 677 | #endif |
@@ -744,7 +751,7 @@ ssetup_ntlmssp_authenticate: | |||
744 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); | 751 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); |
745 | } else | 752 | } else |
746 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | 753 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); |
747 | } else if (type == Kerberos || type == MSKerberos) { | 754 | } else if (type == Kerberos) { |
748 | #ifdef CONFIG_CIFS_UPCALL | 755 | #ifdef CONFIG_CIFS_UPCALL |
749 | struct cifs_spnego_msg *msg; | 756 | struct cifs_spnego_msg *msg; |
750 | spnego_key = cifs_get_spnego_key(ses); | 757 | spnego_key = cifs_get_spnego_key(ses); |
@@ -758,17 +765,17 @@ ssetup_ntlmssp_authenticate: | |||
758 | /* check version field to make sure that cifs.upcall is | 765 | /* check version field to make sure that cifs.upcall is |
759 | sending us a response in an expected form */ | 766 | sending us a response in an expected form */ |
760 | if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { | 767 | if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { |
761 | cERROR(1, ("incorrect version of cifs.upcall (expected" | 768 | cERROR(1, "incorrect version of cifs.upcall (expected" |
762 | " %d but got %d)", | 769 | " %d but got %d)", |
763 | CIFS_SPNEGO_UPCALL_VERSION, msg->version)); | 770 | CIFS_SPNEGO_UPCALL_VERSION, msg->version); |
764 | rc = -EKEYREJECTED; | 771 | rc = -EKEYREJECTED; |
765 | goto ssetup_exit; | 772 | goto ssetup_exit; |
766 | } | 773 | } |
767 | /* bail out if key is too long */ | 774 | /* bail out if key is too long */ |
768 | if (msg->sesskey_len > | 775 | if (msg->sesskey_len > |
769 | sizeof(ses->server->mac_signing_key.data.krb5)) { | 776 | sizeof(ses->server->mac_signing_key.data.krb5)) { |
770 | cERROR(1, ("Kerberos signing key too long (%u bytes)", | 777 | cERROR(1, "Kerberos signing key too long (%u bytes)", |
771 | msg->sesskey_len)); | 778 | msg->sesskey_len); |
772 | rc = -EOVERFLOW; | 779 | rc = -EOVERFLOW; |
773 | goto ssetup_exit; | 780 | goto ssetup_exit; |
774 | } | 781 | } |
@@ -796,7 +803,7 @@ ssetup_ntlmssp_authenticate: | |||
796 | /* BB: is this right? */ | 803 | /* BB: is this right? */ |
797 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | 804 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); |
798 | #else /* ! CONFIG_CIFS_UPCALL */ | 805 | #else /* ! CONFIG_CIFS_UPCALL */ |
799 | cERROR(1, ("Kerberos negotiated but upcall support disabled!")); | 806 | cERROR(1, "Kerberos negotiated but upcall support disabled!"); |
800 | rc = -ENOSYS; | 807 | rc = -ENOSYS; |
801 | goto ssetup_exit; | 808 | goto ssetup_exit; |
802 | #endif /* CONFIG_CIFS_UPCALL */ | 809 | #endif /* CONFIG_CIFS_UPCALL */ |
@@ -804,12 +811,12 @@ ssetup_ntlmssp_authenticate: | |||
804 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 811 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
805 | if (type == RawNTLMSSP) { | 812 | if (type == RawNTLMSSP) { |
806 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { | 813 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { |
807 | cERROR(1, ("NTLMSSP requires Unicode support")); | 814 | cERROR(1, "NTLMSSP requires Unicode support"); |
808 | rc = -ENOSYS; | 815 | rc = -ENOSYS; |
809 | goto ssetup_exit; | 816 | goto ssetup_exit; |
810 | } | 817 | } |
811 | 818 | ||
812 | cFYI(1, ("ntlmssp session setup phase %d", phase)); | 819 | cFYI(1, "ntlmssp session setup phase %d", phase); |
813 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | 820 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; |
814 | capabilities |= CAP_EXTENDED_SECURITY; | 821 | capabilities |= CAP_EXTENDED_SECURITY; |
815 | pSMB->req.Capabilities |= cpu_to_le32(capabilities); | 822 | pSMB->req.Capabilities |= cpu_to_le32(capabilities); |
@@ -827,7 +834,7 @@ ssetup_ntlmssp_authenticate: | |||
827 | on the response (challenge) */ | 834 | on the response (challenge) */ |
828 | smb_buf->Uid = ses->Suid; | 835 | smb_buf->Uid = ses->Suid; |
829 | } else { | 836 | } else { |
830 | cERROR(1, ("invalid phase %d", phase)); | 837 | cERROR(1, "invalid phase %d", phase); |
831 | rc = -ENOSYS; | 838 | rc = -ENOSYS; |
832 | goto ssetup_exit; | 839 | goto ssetup_exit; |
833 | } | 840 | } |
@@ -839,12 +846,12 @@ ssetup_ntlmssp_authenticate: | |||
839 | } | 846 | } |
840 | unicode_oslm_strings(&bcc_ptr, nls_cp); | 847 | unicode_oslm_strings(&bcc_ptr, nls_cp); |
841 | } else { | 848 | } else { |
842 | cERROR(1, ("secType %d not supported!", type)); | 849 | cERROR(1, "secType %d not supported!", type); |
843 | rc = -ENOSYS; | 850 | rc = -ENOSYS; |
844 | goto ssetup_exit; | 851 | goto ssetup_exit; |
845 | } | 852 | } |
846 | #else | 853 | #else |
847 | cERROR(1, ("secType %d not supported!", type)); | 854 | cERROR(1, "secType %d not supported!", type); |
848 | rc = -ENOSYS; | 855 | rc = -ENOSYS; |
849 | goto ssetup_exit; | 856 | goto ssetup_exit; |
850 | #endif | 857 | #endif |
@@ -862,7 +869,7 @@ ssetup_ntlmssp_authenticate: | |||
862 | CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR); | 869 | CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR); |
863 | /* SMB request buf freed in SendReceive2 */ | 870 | /* SMB request buf freed in SendReceive2 */ |
864 | 871 | ||
865 | cFYI(1, ("ssetup rc from sendrecv2 is %d", rc)); | 872 | cFYI(1, "ssetup rc from sendrecv2 is %d", rc); |
866 | 873 | ||
867 | pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base; | 874 | pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base; |
868 | smb_buf = (struct smb_hdr *)iov[0].iov_base; | 875 | smb_buf = (struct smb_hdr *)iov[0].iov_base; |
@@ -870,7 +877,7 @@ ssetup_ntlmssp_authenticate: | |||
870 | if ((type == RawNTLMSSP) && (smb_buf->Status.CifsError == | 877 | if ((type == RawNTLMSSP) && (smb_buf->Status.CifsError == |
871 | cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) { | 878 | cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) { |
872 | if (phase != NtLmNegotiate) { | 879 | if (phase != NtLmNegotiate) { |
873 | cERROR(1, ("Unexpected more processing error")); | 880 | cERROR(1, "Unexpected more processing error"); |
874 | goto ssetup_exit; | 881 | goto ssetup_exit; |
875 | } | 882 | } |
876 | /* NTLMSSP Negotiate sent now processing challenge (response) */ | 883 | /* NTLMSSP Negotiate sent now processing challenge (response) */ |
@@ -882,14 +889,14 @@ ssetup_ntlmssp_authenticate: | |||
882 | 889 | ||
883 | if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) { | 890 | if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) { |
884 | rc = -EIO; | 891 | rc = -EIO; |
885 | cERROR(1, ("bad word count %d", smb_buf->WordCount)); | 892 | cERROR(1, "bad word count %d", smb_buf->WordCount); |
886 | goto ssetup_exit; | 893 | goto ssetup_exit; |
887 | } | 894 | } |
888 | action = le16_to_cpu(pSMB->resp.Action); | 895 | action = le16_to_cpu(pSMB->resp.Action); |
889 | if (action & GUEST_LOGIN) | 896 | if (action & GUEST_LOGIN) |
890 | cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */ | 897 | cFYI(1, "Guest login"); /* BB mark SesInfo struct? */ |
891 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | 898 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ |
892 | cFYI(1, ("UID = %d ", ses->Suid)); | 899 | cFYI(1, "UID = %d ", ses->Suid); |
893 | /* response can have either 3 or 4 word count - Samba sends 3 */ | 900 | /* response can have either 3 or 4 word count - Samba sends 3 */ |
894 | /* and lanman response is 3 */ | 901 | /* and lanman response is 3 */ |
895 | bytes_remaining = BCC(smb_buf); | 902 | bytes_remaining = BCC(smb_buf); |
@@ -899,7 +906,7 @@ ssetup_ntlmssp_authenticate: | |||
899 | __u16 blob_len; | 906 | __u16 blob_len; |
900 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); | 907 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); |
901 | if (blob_len > bytes_remaining) { | 908 | if (blob_len > bytes_remaining) { |
902 | cERROR(1, ("bad security blob length %d", blob_len)); | 909 | cERROR(1, "bad security blob length %d", blob_len); |
903 | rc = -EINVAL; | 910 | rc = -EINVAL; |
904 | goto ssetup_exit; | 911 | goto ssetup_exit; |
905 | } | 912 | } |
@@ -933,7 +940,7 @@ ssetup_exit: | |||
933 | } | 940 | } |
934 | kfree(str_area); | 941 | kfree(str_area); |
935 | if (resp_buf_type == CIFS_SMALL_BUFFER) { | 942 | if (resp_buf_type == CIFS_SMALL_BUFFER) { |
936 | cFYI(1, ("ssetup freeing small buf %p", iov[0].iov_base)); | 943 | cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base); |
937 | cifs_small_buf_release(iov[0].iov_base); | 944 | cifs_small_buf_release(iov[0].iov_base); |
938 | } else if (resp_buf_type == CIFS_LARGE_BUFFER) | 945 | } else if (resp_buf_type == CIFS_LARGE_BUFFER) |
939 | cifs_buf_release(iov[0].iov_base); | 946 | cifs_buf_release(iov[0].iov_base); |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index ad081fe7eb18..82f78c4d6978 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include "cifs_debug.h" | 35 | #include "cifs_debug.h" |
36 | 36 | ||
37 | extern mempool_t *cifs_mid_poolp; | 37 | extern mempool_t *cifs_mid_poolp; |
38 | extern struct kmem_cache *cifs_oplock_cachep; | ||
39 | 38 | ||
40 | static struct mid_q_entry * | 39 | static struct mid_q_entry * |
41 | AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | 40 | AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) |
@@ -43,7 +42,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
43 | struct mid_q_entry *temp; | 42 | struct mid_q_entry *temp; |
44 | 43 | ||
45 | if (server == NULL) { | 44 | if (server == NULL) { |
46 | cERROR(1, ("Null TCP session in AllocMidQEntry")); | 45 | cERROR(1, "Null TCP session in AllocMidQEntry"); |
47 | return NULL; | 46 | return NULL; |
48 | } | 47 | } |
49 | 48 | ||
@@ -55,7 +54,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
55 | temp->mid = smb_buffer->Mid; /* always LE */ | 54 | temp->mid = smb_buffer->Mid; /* always LE */ |
56 | temp->pid = current->pid; | 55 | temp->pid = current->pid; |
57 | temp->command = smb_buffer->Command; | 56 | temp->command = smb_buffer->Command; |
58 | cFYI(1, ("For smb_command %d", temp->command)); | 57 | cFYI(1, "For smb_command %d", temp->command); |
59 | /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ | 58 | /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ |
60 | /* when mid allocated can be before when sent */ | 59 | /* when mid allocated can be before when sent */ |
61 | temp->when_alloc = jiffies; | 60 | temp->when_alloc = jiffies; |
@@ -140,7 +139,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
140 | total_len += iov[i].iov_len; | 139 | total_len += iov[i].iov_len; |
141 | 140 | ||
142 | smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length); | 141 | smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length); |
143 | cFYI(1, ("Sending smb: total_len %d", total_len)); | 142 | cFYI(1, "Sending smb: total_len %d", total_len); |
144 | dump_smb(smb_buffer, len); | 143 | dump_smb(smb_buffer, len); |
145 | 144 | ||
146 | i = 0; | 145 | i = 0; |
@@ -168,9 +167,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
168 | reconnect which may clear the network problem. | 167 | reconnect which may clear the network problem. |
169 | */ | 168 | */ |
170 | if ((i >= 14) || (!server->noblocksnd && (i > 2))) { | 169 | if ((i >= 14) || (!server->noblocksnd && (i > 2))) { |
171 | cERROR(1, | 170 | cERROR(1, "sends on sock %p stuck for 15 seconds", |
172 | ("sends on sock %p stuck for 15 seconds", | 171 | ssocket); |
173 | ssocket)); | ||
174 | rc = -EAGAIN; | 172 | rc = -EAGAIN; |
175 | break; | 173 | break; |
176 | } | 174 | } |
@@ -184,13 +182,13 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
184 | total_len = 0; | 182 | total_len = 0; |
185 | break; | 183 | break; |
186 | } else if (rc > total_len) { | 184 | } else if (rc > total_len) { |
187 | cERROR(1, ("sent %d requested %d", rc, total_len)); | 185 | cERROR(1, "sent %d requested %d", rc, total_len); |
188 | break; | 186 | break; |
189 | } | 187 | } |
190 | if (rc == 0) { | 188 | if (rc == 0) { |
191 | /* should never happen, letting socket clear before | 189 | /* should never happen, letting socket clear before |
192 | retrying is our only obvious option here */ | 190 | retrying is our only obvious option here */ |
193 | cERROR(1, ("tcp sent no data")); | 191 | cERROR(1, "tcp sent no data"); |
194 | msleep(500); | 192 | msleep(500); |
195 | continue; | 193 | continue; |
196 | } | 194 | } |
@@ -213,8 +211,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
213 | } | 211 | } |
214 | 212 | ||
215 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { | 213 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { |
216 | cFYI(1, ("partial send (%d remaining), terminating session", | 214 | cFYI(1, "partial send (%d remaining), terminating session", |
217 | total_len)); | 215 | total_len); |
218 | /* If we have only sent part of an SMB then the next SMB | 216 | /* If we have only sent part of an SMB then the next SMB |
219 | could be taken as the remainder of this one. We need | 217 | could be taken as the remainder of this one. We need |
220 | to kill the socket so the server throws away the partial | 218 | to kill the socket so the server throws away the partial |
@@ -223,7 +221,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
223 | } | 221 | } |
224 | 222 | ||
225 | if (rc < 0) { | 223 | if (rc < 0) { |
226 | cERROR(1, ("Error %d sending data on socket to server", rc)); | 224 | cERROR(1, "Error %d sending data on socket to server", rc); |
227 | } else | 225 | } else |
228 | rc = 0; | 226 | rc = 0; |
229 | 227 | ||
@@ -296,7 +294,7 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, | |||
296 | } | 294 | } |
297 | 295 | ||
298 | if (ses->server->tcpStatus == CifsNeedReconnect) { | 296 | if (ses->server->tcpStatus == CifsNeedReconnect) { |
299 | cFYI(1, ("tcp session dead - return to caller to retry")); | 297 | cFYI(1, "tcp session dead - return to caller to retry"); |
300 | return -EAGAIN; | 298 | return -EAGAIN; |
301 | } | 299 | } |
302 | 300 | ||
@@ -348,7 +346,7 @@ static int wait_for_response(struct cifsSesInfo *ses, | |||
348 | lrt += time_to_wait; | 346 | lrt += time_to_wait; |
349 | if (time_after(jiffies, lrt)) { | 347 | if (time_after(jiffies, lrt)) { |
350 | /* No replies for time_to_wait. */ | 348 | /* No replies for time_to_wait. */ |
351 | cERROR(1, ("server not responding")); | 349 | cERROR(1, "server not responding"); |
352 | return -1; | 350 | return -1; |
353 | } | 351 | } |
354 | } else { | 352 | } else { |
@@ -379,7 +377,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | |||
379 | iov[0].iov_len = in_buf->smb_buf_length + 4; | 377 | iov[0].iov_len = in_buf->smb_buf_length + 4; |
380 | flags |= CIFS_NO_RESP; | 378 | flags |= CIFS_NO_RESP; |
381 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); | 379 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); |
382 | cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc)); | 380 | cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc); |
383 | 381 | ||
384 | return rc; | 382 | return rc; |
385 | } | 383 | } |
@@ -402,7 +400,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
402 | 400 | ||
403 | if ((ses == NULL) || (ses->server == NULL)) { | 401 | if ((ses == NULL) || (ses->server == NULL)) { |
404 | cifs_small_buf_release(in_buf); | 402 | cifs_small_buf_release(in_buf); |
405 | cERROR(1, ("Null session")); | 403 | cERROR(1, "Null session"); |
406 | return -EIO; | 404 | return -EIO; |
407 | } | 405 | } |
408 | 406 | ||
@@ -471,7 +469,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
471 | else if (long_op == CIFS_BLOCKING_OP) | 469 | else if (long_op == CIFS_BLOCKING_OP) |
472 | timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */ | 470 | timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */ |
473 | else { | 471 | else { |
474 | cERROR(1, ("unknown timeout flag %d", long_op)); | 472 | cERROR(1, "unknown timeout flag %d", long_op); |
475 | rc = -EIO; | 473 | rc = -EIO; |
476 | goto out; | 474 | goto out; |
477 | } | 475 | } |
@@ -490,8 +488,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
490 | spin_lock(&GlobalMid_Lock); | 488 | spin_lock(&GlobalMid_Lock); |
491 | 489 | ||
492 | if (midQ->resp_buf == NULL) { | 490 | if (midQ->resp_buf == NULL) { |
493 | cERROR(1, ("No response to cmd %d mid %d", | 491 | cERROR(1, "No response to cmd %d mid %d", |
494 | midQ->command, midQ->mid)); | 492 | midQ->command, midQ->mid); |
495 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | 493 | if (midQ->midState == MID_REQUEST_SUBMITTED) { |
496 | if (ses->server->tcpStatus == CifsExiting) | 494 | if (ses->server->tcpStatus == CifsExiting) |
497 | rc = -EHOSTDOWN; | 495 | rc = -EHOSTDOWN; |
@@ -504,7 +502,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
504 | if (rc != -EHOSTDOWN) { | 502 | if (rc != -EHOSTDOWN) { |
505 | if (midQ->midState == MID_RETRY_NEEDED) { | 503 | if (midQ->midState == MID_RETRY_NEEDED) { |
506 | rc = -EAGAIN; | 504 | rc = -EAGAIN; |
507 | cFYI(1, ("marking request for retry")); | 505 | cFYI(1, "marking request for retry"); |
508 | } else { | 506 | } else { |
509 | rc = -EIO; | 507 | rc = -EIO; |
510 | } | 508 | } |
@@ -521,8 +519,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
521 | receive_len = midQ->resp_buf->smb_buf_length; | 519 | receive_len = midQ->resp_buf->smb_buf_length; |
522 | 520 | ||
523 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | 521 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { |
524 | cERROR(1, ("Frame too large received. Length: %d Xid: %d", | 522 | cERROR(1, "Frame too large received. Length: %d Xid: %d", |
525 | receive_len, xid)); | 523 | receive_len, xid); |
526 | rc = -EIO; | 524 | rc = -EIO; |
527 | goto out; | 525 | goto out; |
528 | } | 526 | } |
@@ -548,7 +546,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
548 | &ses->server->mac_signing_key, | 546 | &ses->server->mac_signing_key, |
549 | midQ->sequence_number+1); | 547 | midQ->sequence_number+1); |
550 | if (rc) { | 548 | if (rc) { |
551 | cERROR(1, ("Unexpected SMB signature")); | 549 | cERROR(1, "Unexpected SMB signature"); |
552 | /* BB FIXME add code to kill session */ | 550 | /* BB FIXME add code to kill session */ |
553 | } | 551 | } |
554 | } | 552 | } |
@@ -569,7 +567,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
569 | DeleteMidQEntry */ | 567 | DeleteMidQEntry */ |
570 | } else { | 568 | } else { |
571 | rc = -EIO; | 569 | rc = -EIO; |
572 | cFYI(1, ("Bad MID state?")); | 570 | cFYI(1, "Bad MID state?"); |
573 | } | 571 | } |
574 | 572 | ||
575 | out: | 573 | out: |
@@ -591,11 +589,11 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
591 | struct mid_q_entry *midQ; | 589 | struct mid_q_entry *midQ; |
592 | 590 | ||
593 | if (ses == NULL) { | 591 | if (ses == NULL) { |
594 | cERROR(1, ("Null smb session")); | 592 | cERROR(1, "Null smb session"); |
595 | return -EIO; | 593 | return -EIO; |
596 | } | 594 | } |
597 | if (ses->server == NULL) { | 595 | if (ses->server == NULL) { |
598 | cERROR(1, ("Null tcp session")); | 596 | cERROR(1, "Null tcp session"); |
599 | return -EIO; | 597 | return -EIO; |
600 | } | 598 | } |
601 | 599 | ||
@@ -607,8 +605,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
607 | use ses->maxReq */ | 605 | use ses->maxReq */ |
608 | 606 | ||
609 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | 607 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
610 | cERROR(1, ("Illegal length, greater than maximum frame, %d", | 608 | cERROR(1, "Illegal length, greater than maximum frame, %d", |
611 | in_buf->smb_buf_length)); | 609 | in_buf->smb_buf_length); |
612 | return -EIO; | 610 | return -EIO; |
613 | } | 611 | } |
614 | 612 | ||
@@ -665,7 +663,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
665 | else if (long_op == CIFS_BLOCKING_OP) | 663 | else if (long_op == CIFS_BLOCKING_OP) |
666 | timeout = 0x7FFFFFFF; /* large but no so large as to wrap */ | 664 | timeout = 0x7FFFFFFF; /* large but no so large as to wrap */ |
667 | else { | 665 | else { |
668 | cERROR(1, ("unknown timeout flag %d", long_op)); | 666 | cERROR(1, "unknown timeout flag %d", long_op); |
669 | rc = -EIO; | 667 | rc = -EIO; |
670 | goto out; | 668 | goto out; |
671 | } | 669 | } |
@@ -681,8 +679,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
681 | 679 | ||
682 | spin_lock(&GlobalMid_Lock); | 680 | spin_lock(&GlobalMid_Lock); |
683 | if (midQ->resp_buf == NULL) { | 681 | if (midQ->resp_buf == NULL) { |
684 | cERROR(1, ("No response for cmd %d mid %d", | 682 | cERROR(1, "No response for cmd %d mid %d", |
685 | midQ->command, midQ->mid)); | 683 | midQ->command, midQ->mid); |
686 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | 684 | if (midQ->midState == MID_REQUEST_SUBMITTED) { |
687 | if (ses->server->tcpStatus == CifsExiting) | 685 | if (ses->server->tcpStatus == CifsExiting) |
688 | rc = -EHOSTDOWN; | 686 | rc = -EHOSTDOWN; |
@@ -695,7 +693,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
695 | if (rc != -EHOSTDOWN) { | 693 | if (rc != -EHOSTDOWN) { |
696 | if (midQ->midState == MID_RETRY_NEEDED) { | 694 | if (midQ->midState == MID_RETRY_NEEDED) { |
697 | rc = -EAGAIN; | 695 | rc = -EAGAIN; |
698 | cFYI(1, ("marking request for retry")); | 696 | cFYI(1, "marking request for retry"); |
699 | } else { | 697 | } else { |
700 | rc = -EIO; | 698 | rc = -EIO; |
701 | } | 699 | } |
@@ -712,8 +710,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
712 | receive_len = midQ->resp_buf->smb_buf_length; | 710 | receive_len = midQ->resp_buf->smb_buf_length; |
713 | 711 | ||
714 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | 712 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { |
715 | cERROR(1, ("Frame too large received. Length: %d Xid: %d", | 713 | cERROR(1, "Frame too large received. Length: %d Xid: %d", |
716 | receive_len, xid)); | 714 | receive_len, xid); |
717 | rc = -EIO; | 715 | rc = -EIO; |
718 | goto out; | 716 | goto out; |
719 | } | 717 | } |
@@ -736,7 +734,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
736 | &ses->server->mac_signing_key, | 734 | &ses->server->mac_signing_key, |
737 | midQ->sequence_number+1); | 735 | midQ->sequence_number+1); |
738 | if (rc) { | 736 | if (rc) { |
739 | cERROR(1, ("Unexpected SMB signature")); | 737 | cERROR(1, "Unexpected SMB signature"); |
740 | /* BB FIXME add code to kill session */ | 738 | /* BB FIXME add code to kill session */ |
741 | } | 739 | } |
742 | } | 740 | } |
@@ -753,7 +751,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
753 | BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); | 751 | BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); |
754 | } else { | 752 | } else { |
755 | rc = -EIO; | 753 | rc = -EIO; |
756 | cERROR(1, ("Bad MID state?")); | 754 | cERROR(1, "Bad MID state?"); |
757 | } | 755 | } |
758 | 756 | ||
759 | out: | 757 | out: |
@@ -824,13 +822,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
824 | struct cifsSesInfo *ses; | 822 | struct cifsSesInfo *ses; |
825 | 823 | ||
826 | if (tcon == NULL || tcon->ses == NULL) { | 824 | if (tcon == NULL || tcon->ses == NULL) { |
827 | cERROR(1, ("Null smb session")); | 825 | cERROR(1, "Null smb session"); |
828 | return -EIO; | 826 | return -EIO; |
829 | } | 827 | } |
830 | ses = tcon->ses; | 828 | ses = tcon->ses; |
831 | 829 | ||
832 | if (ses->server == NULL) { | 830 | if (ses->server == NULL) { |
833 | cERROR(1, ("Null tcp session")); | 831 | cERROR(1, "Null tcp session"); |
834 | return -EIO; | 832 | return -EIO; |
835 | } | 833 | } |
836 | 834 | ||
@@ -842,8 +840,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
842 | use ses->maxReq */ | 840 | use ses->maxReq */ |
843 | 841 | ||
844 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | 842 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
845 | cERROR(1, ("Illegal length, greater than maximum frame, %d", | 843 | cERROR(1, "Illegal length, greater than maximum frame, %d", |
846 | in_buf->smb_buf_length)); | 844 | in_buf->smb_buf_length); |
847 | return -EIO; | 845 | return -EIO; |
848 | } | 846 | } |
849 | 847 | ||
@@ -933,8 +931,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
933 | spin_unlock(&GlobalMid_Lock); | 931 | spin_unlock(&GlobalMid_Lock); |
934 | receive_len = midQ->resp_buf->smb_buf_length; | 932 | receive_len = midQ->resp_buf->smb_buf_length; |
935 | } else { | 933 | } else { |
936 | cERROR(1, ("No response for cmd %d mid %d", | 934 | cERROR(1, "No response for cmd %d mid %d", |
937 | midQ->command, midQ->mid)); | 935 | midQ->command, midQ->mid); |
938 | if (midQ->midState == MID_REQUEST_SUBMITTED) { | 936 | if (midQ->midState == MID_REQUEST_SUBMITTED) { |
939 | if (ses->server->tcpStatus == CifsExiting) | 937 | if (ses->server->tcpStatus == CifsExiting) |
940 | rc = -EHOSTDOWN; | 938 | rc = -EHOSTDOWN; |
@@ -947,7 +945,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
947 | if (rc != -EHOSTDOWN) { | 945 | if (rc != -EHOSTDOWN) { |
948 | if (midQ->midState == MID_RETRY_NEEDED) { | 946 | if (midQ->midState == MID_RETRY_NEEDED) { |
949 | rc = -EAGAIN; | 947 | rc = -EAGAIN; |
950 | cFYI(1, ("marking request for retry")); | 948 | cFYI(1, "marking request for retry"); |
951 | } else { | 949 | } else { |
952 | rc = -EIO; | 950 | rc = -EIO; |
953 | } | 951 | } |
@@ -958,8 +956,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
958 | } | 956 | } |
959 | 957 | ||
960 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | 958 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { |
961 | cERROR(1, ("Frame too large received. Length: %d Xid: %d", | 959 | cERROR(1, "Frame too large received. Length: %d Xid: %d", |
962 | receive_len, xid)); | 960 | receive_len, xid); |
963 | rc = -EIO; | 961 | rc = -EIO; |
964 | goto out; | 962 | goto out; |
965 | } | 963 | } |
@@ -968,7 +966,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
968 | 966 | ||
969 | if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) { | 967 | if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) { |
970 | rc = -EIO; | 968 | rc = -EIO; |
971 | cERROR(1, ("Bad MID state?")); | 969 | cERROR(1, "Bad MID state?"); |
972 | goto out; | 970 | goto out; |
973 | } | 971 | } |
974 | 972 | ||
@@ -986,7 +984,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
986 | &ses->server->mac_signing_key, | 984 | &ses->server->mac_signing_key, |
987 | midQ->sequence_number+1); | 985 | midQ->sequence_number+1); |
988 | if (rc) { | 986 | if (rc) { |
989 | cERROR(1, ("Unexpected SMB signature")); | 987 | cERROR(1, "Unexpected SMB signature"); |
990 | /* BB FIXME add code to kill session */ | 988 | /* BB FIXME add code to kill session */ |
991 | } | 989 | } |
992 | } | 990 | } |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index f555ce077d4f..a1509207bfa6 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -70,12 +70,12 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name) | |||
70 | return rc; | 70 | return rc; |
71 | } | 71 | } |
72 | if (ea_name == NULL) { | 72 | if (ea_name == NULL) { |
73 | cFYI(1, ("Null xattr names not supported")); | 73 | cFYI(1, "Null xattr names not supported"); |
74 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) | 74 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) |
75 | && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) { | 75 | && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) { |
76 | cFYI(1, | 76 | cFYI(1, |
77 | ("illegal xattr request %s (only user namespace supported)", | 77 | "illegal xattr request %s (only user namespace supported)", |
78 | ea_name)); | 78 | ea_name); |
79 | /* BB what if no namespace prefix? */ | 79 | /* BB what if no namespace prefix? */ |
80 | /* Should we just pass them to server, except for | 80 | /* Should we just pass them to server, except for |
81 | system and perhaps security prefixes? */ | 81 | system and perhaps security prefixes? */ |
@@ -131,19 +131,19 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
131 | search server for EAs or streams to | 131 | search server for EAs or streams to |
132 | returns as xattrs */ | 132 | returns as xattrs */ |
133 | if (value_size > MAX_EA_VALUE_SIZE) { | 133 | if (value_size > MAX_EA_VALUE_SIZE) { |
134 | cFYI(1, ("size of EA value too large")); | 134 | cFYI(1, "size of EA value too large"); |
135 | kfree(full_path); | 135 | kfree(full_path); |
136 | FreeXid(xid); | 136 | FreeXid(xid); |
137 | return -EOPNOTSUPP; | 137 | return -EOPNOTSUPP; |
138 | } | 138 | } |
139 | 139 | ||
140 | if (ea_name == NULL) { | 140 | if (ea_name == NULL) { |
141 | cFYI(1, ("Null xattr names not supported")); | 141 | cFYI(1, "Null xattr names not supported"); |
142 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { | 142 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { |
143 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | 143 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) |
144 | goto set_ea_exit; | 144 | goto set_ea_exit; |
145 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) | 145 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) |
146 | cFYI(1, ("attempt to set cifs inode metadata")); | 146 | cFYI(1, "attempt to set cifs inode metadata"); |
147 | 147 | ||
148 | ea_name += 5; /* skip past user. prefix */ | 148 | ea_name += 5; /* skip past user. prefix */ |
149 | rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, | 149 | rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, |
@@ -169,9 +169,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
169 | ACL_TYPE_ACCESS, cifs_sb->local_nls, | 169 | ACL_TYPE_ACCESS, cifs_sb->local_nls, |
170 | cifs_sb->mnt_cifs_flags & | 170 | cifs_sb->mnt_cifs_flags & |
171 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 171 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
172 | cFYI(1, ("set POSIX ACL rc %d", rc)); | 172 | cFYI(1, "set POSIX ACL rc %d", rc); |
173 | #else | 173 | #else |
174 | cFYI(1, ("set POSIX ACL not supported")); | 174 | cFYI(1, "set POSIX ACL not supported"); |
175 | #endif | 175 | #endif |
176 | } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, | 176 | } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, |
177 | strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { | 177 | strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { |
@@ -182,13 +182,13 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
182 | ACL_TYPE_DEFAULT, cifs_sb->local_nls, | 182 | ACL_TYPE_DEFAULT, cifs_sb->local_nls, |
183 | cifs_sb->mnt_cifs_flags & | 183 | cifs_sb->mnt_cifs_flags & |
184 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 184 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
185 | cFYI(1, ("set POSIX default ACL rc %d", rc)); | 185 | cFYI(1, "set POSIX default ACL rc %d", rc); |
186 | #else | 186 | #else |
187 | cFYI(1, ("set default POSIX ACL not supported")); | 187 | cFYI(1, "set default POSIX ACL not supported"); |
188 | #endif | 188 | #endif |
189 | } else { | 189 | } else { |
190 | cFYI(1, ("illegal xattr request %s (only user namespace" | 190 | cFYI(1, "illegal xattr request %s (only user namespace" |
191 | " supported)", ea_name)); | 191 | " supported)", ea_name); |
192 | /* BB what if no namespace prefix? */ | 192 | /* BB what if no namespace prefix? */ |
193 | /* Should we just pass them to server, except for | 193 | /* Should we just pass them to server, except for |
194 | system and perhaps security prefixes? */ | 194 | system and perhaps security prefixes? */ |
@@ -235,13 +235,13 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
235 | /* return dos attributes as pseudo xattr */ | 235 | /* return dos attributes as pseudo xattr */ |
236 | /* return alt name if available as pseudo attr */ | 236 | /* return alt name if available as pseudo attr */ |
237 | if (ea_name == NULL) { | 237 | if (ea_name == NULL) { |
238 | cFYI(1, ("Null xattr names not supported")); | 238 | cFYI(1, "Null xattr names not supported"); |
239 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { | 239 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { |
240 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | 240 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) |
241 | goto get_ea_exit; | 241 | goto get_ea_exit; |
242 | 242 | ||
243 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) { | 243 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) { |
244 | cFYI(1, ("attempt to query cifs inode metadata")); | 244 | cFYI(1, "attempt to query cifs inode metadata"); |
245 | /* revalidate/getattr then populate from inode */ | 245 | /* revalidate/getattr then populate from inode */ |
246 | } /* BB add else when above is implemented */ | 246 | } /* BB add else when above is implemented */ |
247 | ea_name += 5; /* skip past user. prefix */ | 247 | ea_name += 5; /* skip past user. prefix */ |
@@ -287,7 +287,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
287 | } | 287 | } |
288 | #endif /* EXPERIMENTAL */ | 288 | #endif /* EXPERIMENTAL */ |
289 | #else | 289 | #else |
290 | cFYI(1, ("query POSIX ACL not supported yet")); | 290 | cFYI(1, "query POSIX ACL not supported yet"); |
291 | #endif /* CONFIG_CIFS_POSIX */ | 291 | #endif /* CONFIG_CIFS_POSIX */ |
292 | } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, | 292 | } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, |
293 | strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { | 293 | strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { |
@@ -299,18 +299,18 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
299 | cifs_sb->mnt_cifs_flags & | 299 | cifs_sb->mnt_cifs_flags & |
300 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 300 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
301 | #else | 301 | #else |
302 | cFYI(1, ("query POSIX default ACL not supported yet")); | 302 | cFYI(1, "query POSIX default ACL not supported yet"); |
303 | #endif | 303 | #endif |
304 | } else if (strncmp(ea_name, | 304 | } else if (strncmp(ea_name, |
305 | CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { | 305 | CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { |
306 | cFYI(1, ("Trusted xattr namespace not supported yet")); | 306 | cFYI(1, "Trusted xattr namespace not supported yet"); |
307 | } else if (strncmp(ea_name, | 307 | } else if (strncmp(ea_name, |
308 | CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { | 308 | CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { |
309 | cFYI(1, ("Security xattr namespace not supported yet")); | 309 | cFYI(1, "Security xattr namespace not supported yet"); |
310 | } else | 310 | } else |
311 | cFYI(1, | 311 | cFYI(1, |
312 | ("illegal xattr request %s (only user namespace supported)", | 312 | "illegal xattr request %s (only user namespace supported)", |
313 | ea_name)); | 313 | ea_name); |
314 | 314 | ||
315 | /* We could add an additional check for streams ie | 315 | /* We could add an additional check for streams ie |
316 | if proc/fs/cifs/streamstoxattr is set then | 316 | if proc/fs/cifs/streamstoxattr is set then |
diff --git a/fs/compat.c b/fs/compat.c index 4b6ed03cc478..05448730f840 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1531,8 +1531,6 @@ int compat_do_execve(char * filename, | |||
1531 | if (retval < 0) | 1531 | if (retval < 0) |
1532 | goto out; | 1532 | goto out; |
1533 | 1533 | ||
1534 | current->stack_start = current->mm->start_stack; | ||
1535 | |||
1536 | /* execve succeeded */ | 1534 | /* execve succeeded */ |
1537 | current->fs->in_exec = 0; | 1535 | current->fs->in_exec = 0; |
1538 | current->in_execve = 0; | 1536 | current->in_execve = 0; |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 8e48b52205aa..0b502f80c691 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -645,6 +645,7 @@ static void detach_groups(struct config_group *group) | |||
645 | 645 | ||
646 | configfs_detach_group(sd->s_element); | 646 | configfs_detach_group(sd->s_element); |
647 | child->d_inode->i_flags |= S_DEAD; | 647 | child->d_inode->i_flags |= S_DEAD; |
648 | dont_mount(child); | ||
648 | 649 | ||
649 | mutex_unlock(&child->d_inode->i_mutex); | 650 | mutex_unlock(&child->d_inode->i_mutex); |
650 | 651 | ||
@@ -840,6 +841,7 @@ static int configfs_attach_item(struct config_item *parent_item, | |||
840 | mutex_lock(&dentry->d_inode->i_mutex); | 841 | mutex_lock(&dentry->d_inode->i_mutex); |
841 | configfs_remove_dir(item); | 842 | configfs_remove_dir(item); |
842 | dentry->d_inode->i_flags |= S_DEAD; | 843 | dentry->d_inode->i_flags |= S_DEAD; |
844 | dont_mount(dentry); | ||
843 | mutex_unlock(&dentry->d_inode->i_mutex); | 845 | mutex_unlock(&dentry->d_inode->i_mutex); |
844 | d_delete(dentry); | 846 | d_delete(dentry); |
845 | } | 847 | } |
@@ -882,6 +884,7 @@ static int configfs_attach_group(struct config_item *parent_item, | |||
882 | if (ret) { | 884 | if (ret) { |
883 | configfs_detach_item(item); | 885 | configfs_detach_item(item); |
884 | dentry->d_inode->i_flags |= S_DEAD; | 886 | dentry->d_inode->i_flags |= S_DEAD; |
887 | dont_mount(dentry); | ||
885 | } | 888 | } |
886 | configfs_adjust_dir_dirent_depth_after_populate(sd); | 889 | configfs_adjust_dir_dirent_depth_after_populate(sd); |
887 | mutex_unlock(&dentry->d_inode->i_mutex); | 890 | mutex_unlock(&dentry->d_inode->i_mutex); |
@@ -1725,6 +1728,7 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) | |||
1725 | mutex_unlock(&configfs_symlink_mutex); | 1728 | mutex_unlock(&configfs_symlink_mutex); |
1726 | configfs_detach_group(&group->cg_item); | 1729 | configfs_detach_group(&group->cg_item); |
1727 | dentry->d_inode->i_flags |= S_DEAD; | 1730 | dentry->d_inode->i_flags |= S_DEAD; |
1731 | dont_mount(dentry); | ||
1728 | mutex_unlock(&dentry->d_inode->i_mutex); | 1732 | mutex_unlock(&dentry->d_inode->i_mutex); |
1729 | 1733 | ||
1730 | d_delete(dentry); | 1734 | d_delete(dentry); |
@@ -1387,8 +1387,6 @@ int do_execve(char * filename, | |||
1387 | if (retval < 0) | 1387 | if (retval < 0) |
1388 | goto out; | 1388 | goto out; |
1389 | 1389 | ||
1390 | current->stack_start = current->mm->start_stack; | ||
1391 | |||
1392 | /* execve succeeded */ | 1390 | /* execve succeeded */ |
1393 | current->fs->in_exec = 0; | 1391 | current->fs->in_exec = 0; |
1394 | current->in_execve = 0; | 1392 | current->in_execve = 0; |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 157382fa6256..b66832ac33ac 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -446,10 +446,8 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
446 | /* initialize the mount flag and determine the default error handler */ | 446 | /* initialize the mount flag and determine the default error handler */ |
447 | flag = JFS_ERR_REMOUNT_RO; | 447 | flag = JFS_ERR_REMOUNT_RO; |
448 | 448 | ||
449 | if (!parse_options((char *) data, sb, &newLVSize, &flag)) { | 449 | if (!parse_options((char *) data, sb, &newLVSize, &flag)) |
450 | kfree(sbi); | 450 | goto out_kfree; |
451 | return -EINVAL; | ||
452 | } | ||
453 | sbi->flag = flag; | 451 | sbi->flag = flag; |
454 | 452 | ||
455 | #ifdef CONFIG_JFS_POSIX_ACL | 453 | #ifdef CONFIG_JFS_POSIX_ACL |
@@ -458,7 +456,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
458 | 456 | ||
459 | if (newLVSize) { | 457 | if (newLVSize) { |
460 | printk(KERN_ERR "resize option for remount only\n"); | 458 | printk(KERN_ERR "resize option for remount only\n"); |
461 | return -EINVAL; | 459 | goto out_kfree; |
462 | } | 460 | } |
463 | 461 | ||
464 | /* | 462 | /* |
@@ -478,7 +476,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
478 | inode = new_inode(sb); | 476 | inode = new_inode(sb); |
479 | if (inode == NULL) { | 477 | if (inode == NULL) { |
480 | ret = -ENOMEM; | 478 | ret = -ENOMEM; |
481 | goto out_kfree; | 479 | goto out_unload; |
482 | } | 480 | } |
483 | inode->i_ino = 0; | 481 | inode->i_ino = 0; |
484 | inode->i_nlink = 1; | 482 | inode->i_nlink = 1; |
@@ -550,9 +548,10 @@ out_mount_failed: | |||
550 | make_bad_inode(sbi->direct_inode); | 548 | make_bad_inode(sbi->direct_inode); |
551 | iput(sbi->direct_inode); | 549 | iput(sbi->direct_inode); |
552 | sbi->direct_inode = NULL; | 550 | sbi->direct_inode = NULL; |
553 | out_kfree: | 551 | out_unload: |
554 | if (sbi->nls_tab) | 552 | if (sbi->nls_tab) |
555 | unload_nls(sbi->nls_tab); | 553 | unload_nls(sbi->nls_tab); |
554 | out_kfree: | ||
556 | kfree(sbi); | 555 | kfree(sbi); |
557 | return ret; | 556 | return ret; |
558 | } | 557 | } |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 243c00071f76..9bd2ce2a3040 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -303,6 +303,11 @@ static void bdev_put_device(struct super_block *sb) | |||
303 | close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE); | 303 | close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE); |
304 | } | 304 | } |
305 | 305 | ||
306 | static int bdev_can_write_buf(struct super_block *sb, u64 ofs) | ||
307 | { | ||
308 | return 0; | ||
309 | } | ||
310 | |||
306 | static const struct logfs_device_ops bd_devops = { | 311 | static const struct logfs_device_ops bd_devops = { |
307 | .find_first_sb = bdev_find_first_sb, | 312 | .find_first_sb = bdev_find_first_sb, |
308 | .find_last_sb = bdev_find_last_sb, | 313 | .find_last_sb = bdev_find_last_sb, |
@@ -310,6 +315,7 @@ static const struct logfs_device_ops bd_devops = { | |||
310 | .readpage = bdev_readpage, | 315 | .readpage = bdev_readpage, |
311 | .writeseg = bdev_writeseg, | 316 | .writeseg = bdev_writeseg, |
312 | .erase = bdev_erase, | 317 | .erase = bdev_erase, |
318 | .can_write_buf = bdev_can_write_buf, | ||
313 | .sync = bdev_sync, | 319 | .sync = bdev_sync, |
314 | .put_device = bdev_put_device, | 320 | .put_device = bdev_put_device, |
315 | }; | 321 | }; |
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c index cafb6ef2e05b..a85d47d13e4b 100644 --- a/fs/logfs/dev_mtd.c +++ b/fs/logfs/dev_mtd.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/completion.h> | 9 | #include <linux/completion.h> |
10 | #include <linux/mount.h> | 10 | #include <linux/mount.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/slab.h> | ||
12 | 13 | ||
13 | #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) | 14 | #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) |
14 | 15 | ||
@@ -126,7 +127,8 @@ static int mtd_readpage(void *_sb, struct page *page) | |||
126 | 127 | ||
127 | err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, | 128 | err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, |
128 | page_address(page)); | 129 | page_address(page)); |
129 | if (err == -EUCLEAN) { | 130 | if (err == -EUCLEAN || err == -EBADMSG) { |
131 | /* -EBADMSG happens regularly on power failures */ | ||
130 | err = 0; | 132 | err = 0; |
131 | /* FIXME: force GC this segment */ | 133 | /* FIXME: force GC this segment */ |
132 | } | 134 | } |
@@ -233,12 +235,32 @@ static void mtd_put_device(struct super_block *sb) | |||
233 | put_mtd_device(logfs_super(sb)->s_mtd); | 235 | put_mtd_device(logfs_super(sb)->s_mtd); |
234 | } | 236 | } |
235 | 237 | ||
238 | static int mtd_can_write_buf(struct super_block *sb, u64 ofs) | ||
239 | { | ||
240 | struct logfs_super *super = logfs_super(sb); | ||
241 | void *buf; | ||
242 | int err; | ||
243 | |||
244 | buf = kmalloc(super->s_writesize, GFP_KERNEL); | ||
245 | if (!buf) | ||
246 | return -ENOMEM; | ||
247 | err = mtd_read(sb, ofs, super->s_writesize, buf); | ||
248 | if (err) | ||
249 | goto out; | ||
250 | if (memchr_inv(buf, 0xff, super->s_writesize)) | ||
251 | err = -EIO; | ||
252 | kfree(buf); | ||
253 | out: | ||
254 | return err; | ||
255 | } | ||
256 | |||
236 | static const struct logfs_device_ops mtd_devops = { | 257 | static const struct logfs_device_ops mtd_devops = { |
237 | .find_first_sb = mtd_find_first_sb, | 258 | .find_first_sb = mtd_find_first_sb, |
238 | .find_last_sb = mtd_find_last_sb, | 259 | .find_last_sb = mtd_find_last_sb, |
239 | .readpage = mtd_readpage, | 260 | .readpage = mtd_readpage, |
240 | .writeseg = mtd_writeseg, | 261 | .writeseg = mtd_writeseg, |
241 | .erase = mtd_erase, | 262 | .erase = mtd_erase, |
263 | .can_write_buf = mtd_can_write_buf, | ||
242 | .sync = mtd_sync, | 264 | .sync = mtd_sync, |
243 | .put_device = mtd_put_device, | 265 | .put_device = mtd_put_device, |
244 | }; | 266 | }; |
@@ -250,5 +272,7 @@ int logfs_get_sb_mtd(struct file_system_type *type, int flags, | |||
250 | const struct logfs_device_ops *devops = &mtd_devops; | 272 | const struct logfs_device_ops *devops = &mtd_devops; |
251 | 273 | ||
252 | mtd = get_mtd_device(NULL, mtdnr); | 274 | mtd = get_mtd_device(NULL, mtdnr); |
275 | if (IS_ERR(mtd)) | ||
276 | return PTR_ERR(mtd); | ||
253 | return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt); | 277 | return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt); |
254 | } | 278 | } |
diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 370f367a933e..0de524071870 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c | |||
@@ -161,7 +161,17 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) | |||
161 | 161 | ||
162 | static void logfs_invalidatepage(struct page *page, unsigned long offset) | 162 | static void logfs_invalidatepage(struct page *page, unsigned long offset) |
163 | { | 163 | { |
164 | move_page_to_btree(page); | 164 | struct logfs_block *block = logfs_block(page); |
165 | |||
166 | if (block->reserved_bytes) { | ||
167 | struct super_block *sb = page->mapping->host->i_sb; | ||
168 | struct logfs_super *super = logfs_super(sb); | ||
169 | |||
170 | super->s_dirty_pages -= block->reserved_bytes; | ||
171 | block->ops->free_block(sb, block); | ||
172 | BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR)); | ||
173 | } else | ||
174 | move_page_to_btree(page); | ||
165 | BUG_ON(PagePrivate(page) || page->private); | 175 | BUG_ON(PagePrivate(page) || page->private); |
166 | } | 176 | } |
167 | 177 | ||
@@ -212,10 +222,8 @@ int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
212 | int logfs_fsync(struct file *file, struct dentry *dentry, int datasync) | 222 | int logfs_fsync(struct file *file, struct dentry *dentry, int datasync) |
213 | { | 223 | { |
214 | struct super_block *sb = dentry->d_inode->i_sb; | 224 | struct super_block *sb = dentry->d_inode->i_sb; |
215 | struct logfs_super *super = logfs_super(sb); | ||
216 | 225 | ||
217 | /* FIXME: write anchor */ | 226 | logfs_write_anchor(sb); |
218 | super->s_devops->sync(sb); | ||
219 | return 0; | 227 | return 0; |
220 | } | 228 | } |
221 | 229 | ||
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index 76c242fbe1b0..caa4419285dc 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c | |||
@@ -122,7 +122,7 @@ static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino, | |||
122 | logfs_safe_iput(inode, cookie); | 122 | logfs_safe_iput(inode, cookie); |
123 | } | 123 | } |
124 | 124 | ||
125 | static u32 logfs_gc_segment(struct super_block *sb, u32 segno, u8 dist) | 125 | static u32 logfs_gc_segment(struct super_block *sb, u32 segno) |
126 | { | 126 | { |
127 | struct logfs_super *super = logfs_super(sb); | 127 | struct logfs_super *super = logfs_super(sb); |
128 | struct logfs_segment_header sh; | 128 | struct logfs_segment_header sh; |
@@ -401,7 +401,7 @@ static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand) | |||
401 | segno, (u64)segno << super->s_segshift, | 401 | segno, (u64)segno << super->s_segshift, |
402 | dist, no_free_segments(sb), valid, | 402 | dist, no_free_segments(sb), valid, |
403 | super->s_free_bytes); | 403 | super->s_free_bytes); |
404 | cleaned = logfs_gc_segment(sb, segno, dist); | 404 | cleaned = logfs_gc_segment(sb, segno); |
405 | log_gc("GC segment #%02x complete - now %x valid\n", segno, | 405 | log_gc("GC segment #%02x complete - now %x valid\n", segno, |
406 | valid - cleaned); | 406 | valid - cleaned); |
407 | BUG_ON(cleaned != valid); | 407 | BUG_ON(cleaned != valid); |
@@ -632,38 +632,31 @@ static int check_area(struct super_block *sb, int i) | |||
632 | { | 632 | { |
633 | struct logfs_super *super = logfs_super(sb); | 633 | struct logfs_super *super = logfs_super(sb); |
634 | struct logfs_area *area = super->s_area[i]; | 634 | struct logfs_area *area = super->s_area[i]; |
635 | struct logfs_object_header oh; | 635 | gc_level_t gc_level; |
636 | u32 cleaned, valid, ec; | ||
636 | u32 segno = area->a_segno; | 637 | u32 segno = area->a_segno; |
637 | u32 ofs = area->a_used_bytes; | 638 | u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); |
638 | __be32 crc; | ||
639 | int err; | ||
640 | 639 | ||
641 | if (!area->a_is_open) | 640 | if (!area->a_is_open) |
642 | return 0; | 641 | return 0; |
643 | 642 | ||
644 | for (ofs = area->a_used_bytes; | 643 | if (super->s_devops->can_write_buf(sb, ofs) == 0) |
645 | ofs <= super->s_segsize - sizeof(oh); | 644 | return 0; |
646 | ofs += (u32)be16_to_cpu(oh.len) + sizeof(oh)) { | ||
647 | err = wbuf_read(sb, dev_ofs(sb, segno, ofs), sizeof(oh), &oh); | ||
648 | if (err) | ||
649 | return err; | ||
650 | |||
651 | if (!memchr_inv(&oh, 0xff, sizeof(oh))) | ||
652 | break; | ||
653 | 645 | ||
654 | crc = logfs_crc32(&oh, sizeof(oh) - 4, 4); | 646 | printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs); |
655 | if (crc != oh.crc) { | 647 | /* |
656 | printk(KERN_INFO "interrupted header at %llx\n", | 648 | * The device cannot write back the write buffer. Most likely the |
657 | dev_ofs(sb, segno, ofs)); | 649 | * wbuf was already written out and the system crashed at some point |
658 | return 0; | 650 | * before the journal commit happened. In that case we wouldn't have |
659 | } | 651 | * to do anything. But if the crash happened before the wbuf was |
660 | } | 652 | * written out correctly, we must GC this segment. So assume the |
661 | if (ofs != area->a_used_bytes) { | 653 | * worst and always do the GC run. |
662 | printk(KERN_INFO "%x bytes unaccounted data found at %llx\n", | 654 | */ |
663 | ofs - area->a_used_bytes, | 655 | area->a_is_open = 0; |
664 | dev_ofs(sb, segno, area->a_used_bytes)); | 656 | valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); |
665 | area->a_used_bytes = ofs; | 657 | cleaned = logfs_gc_segment(sb, segno); |
666 | } | 658 | if (cleaned != valid) |
659 | return -EIO; | ||
667 | return 0; | 660 | return 0; |
668 | } | 661 | } |
669 | 662 | ||
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index 14ed27274da2..755a92e8daa7 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c | |||
@@ -193,6 +193,7 @@ static void logfs_init_inode(struct super_block *sb, struct inode *inode) | |||
193 | inode->i_ctime = CURRENT_TIME; | 193 | inode->i_ctime = CURRENT_TIME; |
194 | inode->i_mtime = CURRENT_TIME; | 194 | inode->i_mtime = CURRENT_TIME; |
195 | inode->i_nlink = 1; | 195 | inode->i_nlink = 1; |
196 | li->li_refcount = 1; | ||
196 | INIT_LIST_HEAD(&li->li_freeing_list); | 197 | INIT_LIST_HEAD(&li->li_freeing_list); |
197 | 198 | ||
198 | for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) | 199 | for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++) |
@@ -326,7 +327,7 @@ static void logfs_set_ino_generation(struct super_block *sb, | |||
326 | u64 ino; | 327 | u64 ino; |
327 | 328 | ||
328 | mutex_lock(&super->s_journal_mutex); | 329 | mutex_lock(&super->s_journal_mutex); |
329 | ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino); | 330 | ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1); |
330 | super->s_last_ino = ino; | 331 | super->s_last_ino = ino; |
331 | super->s_inos_till_wrap--; | 332 | super->s_inos_till_wrap--; |
332 | if (super->s_inos_till_wrap < 0) { | 333 | if (super->s_inos_till_wrap < 0) { |
@@ -386,8 +387,7 @@ static void logfs_init_once(void *_li) | |||
386 | 387 | ||
387 | static int logfs_sync_fs(struct super_block *sb, int wait) | 388 | static int logfs_sync_fs(struct super_block *sb, int wait) |
388 | { | 389 | { |
389 | /* FIXME: write anchor */ | 390 | logfs_write_anchor(sb); |
390 | logfs_super(sb)->s_devops->sync(sb); | ||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index fb0a613f885b..4b0e0616b357 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
@@ -132,10 +132,9 @@ static int read_area(struct super_block *sb, struct logfs_je_area *a) | |||
132 | 132 | ||
133 | ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); | 133 | ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); |
134 | if (super->s_writesize > 1) | 134 | if (super->s_writesize > 1) |
135 | logfs_buf_recover(area, ofs, a + 1, super->s_writesize); | 135 | return logfs_buf_recover(area, ofs, a + 1, super->s_writesize); |
136 | else | 136 | else |
137 | logfs_buf_recover(area, ofs, NULL, 0); | 137 | return logfs_buf_recover(area, ofs, NULL, 0); |
138 | return 0; | ||
139 | } | 138 | } |
140 | 139 | ||
141 | static void *unpack(void *from, void *to) | 140 | static void *unpack(void *from, void *to) |
@@ -245,7 +244,7 @@ static int read_je(struct super_block *sb, u64 ofs) | |||
245 | read_erasecount(sb, unpack(jh, scratch)); | 244 | read_erasecount(sb, unpack(jh, scratch)); |
246 | break; | 245 | break; |
247 | case JE_AREA: | 246 | case JE_AREA: |
248 | read_area(sb, unpack(jh, scratch)); | 247 | err = read_area(sb, unpack(jh, scratch)); |
249 | break; | 248 | break; |
250 | case JE_OBJ_ALIAS: | 249 | case JE_OBJ_ALIAS: |
251 | err = logfs_load_object_aliases(sb, unpack(jh, scratch), | 250 | err = logfs_load_object_aliases(sb, unpack(jh, scratch), |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index 0a3df1a0c936..93b55f337245 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
@@ -144,6 +144,7 @@ struct logfs_area_ops { | |||
144 | * @erase: erase one segment | 144 | * @erase: erase one segment |
145 | * @read: read from the device | 145 | * @read: read from the device |
146 | * @erase: erase part of the device | 146 | * @erase: erase part of the device |
147 | * @can_write_buf: decide whether wbuf can be written to ofs | ||
147 | */ | 148 | */ |
148 | struct logfs_device_ops { | 149 | struct logfs_device_ops { |
149 | struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs); | 150 | struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs); |
@@ -153,6 +154,7 @@ struct logfs_device_ops { | |||
153 | void (*writeseg)(struct super_block *sb, u64 ofs, size_t len); | 154 | void (*writeseg)(struct super_block *sb, u64 ofs, size_t len); |
154 | int (*erase)(struct super_block *sb, loff_t ofs, size_t len, | 155 | int (*erase)(struct super_block *sb, loff_t ofs, size_t len, |
155 | int ensure_write); | 156 | int ensure_write); |
157 | int (*can_write_buf)(struct super_block *sb, u64 ofs); | ||
156 | void (*sync)(struct super_block *sb); | 158 | void (*sync)(struct super_block *sb); |
157 | void (*put_device)(struct super_block *sb); | 159 | void (*put_device)(struct super_block *sb); |
158 | }; | 160 | }; |
@@ -394,6 +396,7 @@ struct logfs_super { | |||
394 | int s_lock_count; | 396 | int s_lock_count; |
395 | mempool_t *s_block_pool; /* struct logfs_block pool */ | 397 | mempool_t *s_block_pool; /* struct logfs_block pool */ |
396 | mempool_t *s_shadow_pool; /* struct logfs_shadow pool */ | 398 | mempool_t *s_shadow_pool; /* struct logfs_shadow pool */ |
399 | struct list_head s_writeback_list; /* writeback pages */ | ||
397 | /* | 400 | /* |
398 | * Space accounting: | 401 | * Space accounting: |
399 | * - s_used_bytes specifies space used to store valid data objects. | 402 | * - s_used_bytes specifies space used to store valid data objects. |
@@ -598,19 +601,19 @@ void freeseg(struct super_block *sb, u32 segno); | |||
598 | int logfs_init_areas(struct super_block *sb); | 601 | int logfs_init_areas(struct super_block *sb); |
599 | void logfs_cleanup_areas(struct super_block *sb); | 602 | void logfs_cleanup_areas(struct super_block *sb); |
600 | int logfs_open_area(struct logfs_area *area, size_t bytes); | 603 | int logfs_open_area(struct logfs_area *area, size_t bytes); |
601 | void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | 604 | int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, |
602 | int use_filler); | 605 | int use_filler); |
603 | 606 | ||
604 | static inline void logfs_buf_write(struct logfs_area *area, u64 ofs, | 607 | static inline int logfs_buf_write(struct logfs_area *area, u64 ofs, |
605 | void *buf, size_t len) | 608 | void *buf, size_t len) |
606 | { | 609 | { |
607 | __logfs_buf_write(area, ofs, buf, len, 0); | 610 | return __logfs_buf_write(area, ofs, buf, len, 0); |
608 | } | 611 | } |
609 | 612 | ||
610 | static inline void logfs_buf_recover(struct logfs_area *area, u64 ofs, | 613 | static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs, |
611 | void *buf, size_t len) | 614 | void *buf, size_t len) |
612 | { | 615 | { |
613 | __logfs_buf_write(area, ofs, buf, len, 1); | 616 | return __logfs_buf_write(area, ofs, buf, len, 1); |
614 | } | 617 | } |
615 | 618 | ||
616 | /* super.c */ | 619 | /* super.c */ |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 3159db6958e5..0718d112a1a5 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
@@ -892,6 +892,8 @@ u64 logfs_seek_hole(struct inode *inode, u64 bix) | |||
892 | return bix; | 892 | return bix; |
893 | else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED) | 893 | else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED) |
894 | bix = maxbix(li->li_height); | 894 | bix = maxbix(li->li_height); |
895 | else if (bix >= maxbix(li->li_height)) | ||
896 | return bix; | ||
895 | else { | 897 | else { |
896 | bix = seek_holedata_loop(inode, bix, 0); | 898 | bix = seek_holedata_loop(inode, bix, 0); |
897 | if (bix < maxbix(li->li_height)) | 899 | if (bix < maxbix(li->li_height)) |
@@ -1093,17 +1095,25 @@ static int logfs_reserve_bytes(struct inode *inode, int bytes) | |||
1093 | int get_page_reserve(struct inode *inode, struct page *page) | 1095 | int get_page_reserve(struct inode *inode, struct page *page) |
1094 | { | 1096 | { |
1095 | struct logfs_super *super = logfs_super(inode->i_sb); | 1097 | struct logfs_super *super = logfs_super(inode->i_sb); |
1098 | struct logfs_block *block = logfs_block(page); | ||
1096 | int ret; | 1099 | int ret; |
1097 | 1100 | ||
1098 | if (logfs_block(page) && logfs_block(page)->reserved_bytes) | 1101 | if (block && block->reserved_bytes) |
1099 | return 0; | 1102 | return 0; |
1100 | 1103 | ||
1101 | logfs_get_wblocks(inode->i_sb, page, WF_LOCK); | 1104 | logfs_get_wblocks(inode->i_sb, page, WF_LOCK); |
1102 | ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE); | 1105 | while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) && |
1106 | !list_empty(&super->s_writeback_list)) { | ||
1107 | block = list_entry(super->s_writeback_list.next, | ||
1108 | struct logfs_block, alias_list); | ||
1109 | block->ops->write_block(block); | ||
1110 | } | ||
1103 | if (!ret) { | 1111 | if (!ret) { |
1104 | alloc_data_block(inode, page); | 1112 | alloc_data_block(inode, page); |
1105 | logfs_block(page)->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE; | 1113 | block = logfs_block(page); |
1114 | block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE; | ||
1106 | super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE; | 1115 | super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE; |
1116 | list_move_tail(&block->alias_list, &super->s_writeback_list); | ||
1107 | } | 1117 | } |
1108 | logfs_put_wblocks(inode->i_sb, page, WF_LOCK); | 1118 | logfs_put_wblocks(inode->i_sb, page, WF_LOCK); |
1109 | return ret; | 1119 | return ret; |
@@ -1861,7 +1871,7 @@ int logfs_truncate(struct inode *inode, u64 target) | |||
1861 | size = target; | 1871 | size = target; |
1862 | 1872 | ||
1863 | logfs_get_wblocks(sb, NULL, 1); | 1873 | logfs_get_wblocks(sb, NULL, 1); |
1864 | err = __logfs_truncate(inode, target); | 1874 | err = __logfs_truncate(inode, size); |
1865 | if (!err) | 1875 | if (!err) |
1866 | err = __logfs_write_inode(inode, 0); | 1876 | err = __logfs_write_inode(inode, 0); |
1867 | logfs_put_wblocks(sb, NULL, 1); | 1877 | logfs_put_wblocks(sb, NULL, 1); |
@@ -2249,6 +2259,7 @@ int logfs_init_rw(struct super_block *sb) | |||
2249 | int min_fill = 3 * super->s_no_blocks; | 2259 | int min_fill = 3 * super->s_no_blocks; |
2250 | 2260 | ||
2251 | INIT_LIST_HEAD(&super->s_object_alias); | 2261 | INIT_LIST_HEAD(&super->s_object_alias); |
2262 | INIT_LIST_HEAD(&super->s_writeback_list); | ||
2252 | mutex_init(&super->s_write_mutex); | 2263 | mutex_init(&super->s_write_mutex); |
2253 | super->s_block_pool = mempool_create_kmalloc_pool(min_fill, | 2264 | super->s_block_pool = mempool_create_kmalloc_pool(min_fill, |
2254 | sizeof(struct logfs_block)); | 2265 | sizeof(struct logfs_block)); |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index f77ce2b470ba..a9657afb70ad 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -67,7 +67,7 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, | |||
67 | return page; | 67 | return page; |
68 | } | 68 | } |
69 | 69 | ||
70 | void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | 70 | int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, |
71 | int use_filler) | 71 | int use_filler) |
72 | { | 72 | { |
73 | pgoff_t index = ofs >> PAGE_SHIFT; | 73 | pgoff_t index = ofs >> PAGE_SHIFT; |
@@ -81,8 +81,10 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | |||
81 | copylen = min((ulong)len, PAGE_SIZE - offset); | 81 | copylen = min((ulong)len, PAGE_SIZE - offset); |
82 | 82 | ||
83 | page = get_mapping_page(area->a_sb, index, use_filler); | 83 | page = get_mapping_page(area->a_sb, index, use_filler); |
84 | SetPageUptodate(page); | 84 | if (IS_ERR(page)) |
85 | return PTR_ERR(page); | ||
85 | BUG_ON(!page); /* FIXME: reserve a pool */ | 86 | BUG_ON(!page); /* FIXME: reserve a pool */ |
87 | SetPageUptodate(page); | ||
86 | memcpy(page_address(page) + offset, buf, copylen); | 88 | memcpy(page_address(page) + offset, buf, copylen); |
87 | SetPagePrivate(page); | 89 | SetPagePrivate(page); |
88 | page_cache_release(page); | 90 | page_cache_release(page); |
@@ -92,6 +94,7 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, | |||
92 | offset = 0; | 94 | offset = 0; |
93 | index++; | 95 | index++; |
94 | } while (len); | 96 | } while (len); |
97 | return 0; | ||
95 | } | 98 | } |
96 | 99 | ||
97 | static void pad_partial_page(struct logfs_area *area) | 100 | static void pad_partial_page(struct logfs_area *area) |
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index 5866ee6e1327..d651e10a1e9c 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
@@ -138,10 +138,14 @@ static int logfs_sb_set(struct super_block *sb, void *_super) | |||
138 | sb->s_fs_info = super; | 138 | sb->s_fs_info = super; |
139 | sb->s_mtd = super->s_mtd; | 139 | sb->s_mtd = super->s_mtd; |
140 | sb->s_bdev = super->s_bdev; | 140 | sb->s_bdev = super->s_bdev; |
141 | #ifdef CONFIG_BLOCK | ||
141 | if (sb->s_bdev) | 142 | if (sb->s_bdev) |
142 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; | 143 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; |
144 | #endif | ||
145 | #ifdef CONFIG_MTD | ||
143 | if (sb->s_mtd) | 146 | if (sb->s_mtd) |
144 | sb->s_bdi = sb->s_mtd->backing_dev_info; | 147 | sb->s_bdi = sb->s_mtd->backing_dev_info; |
148 | #endif | ||
145 | return 0; | 149 | return 0; |
146 | } | 150 | } |
147 | 151 | ||
@@ -333,27 +337,27 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt) | |||
333 | goto fail; | 337 | goto fail; |
334 | 338 | ||
335 | sb->s_root = d_alloc_root(rootdir); | 339 | sb->s_root = d_alloc_root(rootdir); |
336 | if (!sb->s_root) | 340 | if (!sb->s_root) { |
337 | goto fail2; | 341 | iput(rootdir); |
342 | goto fail; | ||
343 | } | ||
338 | 344 | ||
339 | super->s_erase_page = alloc_pages(GFP_KERNEL, 0); | 345 | super->s_erase_page = alloc_pages(GFP_KERNEL, 0); |
340 | if (!super->s_erase_page) | 346 | if (!super->s_erase_page) |
341 | goto fail2; | 347 | goto fail; |
342 | memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE); | 348 | memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE); |
343 | 349 | ||
344 | /* FIXME: check for read-only mounts */ | 350 | /* FIXME: check for read-only mounts */ |
345 | err = logfs_make_writeable(sb); | 351 | err = logfs_make_writeable(sb); |
346 | if (err) | 352 | if (err) |
347 | goto fail3; | 353 | goto fail1; |
348 | 354 | ||
349 | log_super("LogFS: Finished mounting\n"); | 355 | log_super("LogFS: Finished mounting\n"); |
350 | simple_set_mnt(mnt, sb); | 356 | simple_set_mnt(mnt, sb); |
351 | return 0; | 357 | return 0; |
352 | 358 | ||
353 | fail3: | 359 | fail1: |
354 | __free_page(super->s_erase_page); | 360 | __free_page(super->s_erase_page); |
355 | fail2: | ||
356 | iput(rootdir); | ||
357 | fail: | 361 | fail: |
358 | iput(logfs_super(sb)->s_master_inode); | 362 | iput(logfs_super(sb)->s_master_inode); |
359 | return -EIO; | 363 | return -EIO; |
@@ -382,7 +386,7 @@ static struct page *find_super_block(struct super_block *sb) | |||
382 | if (!first || IS_ERR(first)) | 386 | if (!first || IS_ERR(first)) |
383 | return NULL; | 387 | return NULL; |
384 | last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); | 388 | last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); |
385 | if (!last || IS_ERR(first)) { | 389 | if (!last || IS_ERR(last)) { |
386 | page_cache_release(first); | 390 | page_cache_release(first); |
387 | return NULL; | 391 | return NULL; |
388 | } | 392 | } |
@@ -413,7 +417,7 @@ static int __logfs_read_sb(struct super_block *sb) | |||
413 | 417 | ||
414 | page = find_super_block(sb); | 418 | page = find_super_block(sb); |
415 | if (!page) | 419 | if (!page) |
416 | return -EIO; | 420 | return -EINVAL; |
417 | 421 | ||
418 | ds = page_address(page); | 422 | ds = page_address(page); |
419 | super->s_size = be64_to_cpu(ds->ds_filesystem_size); | 423 | super->s_size = be64_to_cpu(ds->ds_filesystem_size); |
diff --git a/fs/namei.c b/fs/namei.c index a7dce91a7e42..b86b96fe1dc3 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1641,7 +1641,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
1641 | if (nd->last.name[nd->last.len]) { | 1641 | if (nd->last.name[nd->last.len]) { |
1642 | if (open_flag & O_CREAT) | 1642 | if (open_flag & O_CREAT) |
1643 | goto exit; | 1643 | goto exit; |
1644 | nd->flags |= LOOKUP_DIRECTORY; | 1644 | nd->flags |= LOOKUP_DIRECTORY | LOOKUP_FOLLOW; |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | /* just plain open? */ | 1647 | /* just plain open? */ |
@@ -1830,6 +1830,8 @@ reval: | |||
1830 | } | 1830 | } |
1831 | if (open_flag & O_DIRECTORY) | 1831 | if (open_flag & O_DIRECTORY) |
1832 | nd.flags |= LOOKUP_DIRECTORY; | 1832 | nd.flags |= LOOKUP_DIRECTORY; |
1833 | if (!(open_flag & O_NOFOLLOW)) | ||
1834 | nd.flags |= LOOKUP_FOLLOW; | ||
1833 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); | 1835 | filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); |
1834 | while (unlikely(!filp)) { /* trailing symlink */ | 1836 | while (unlikely(!filp)) { /* trailing symlink */ |
1835 | struct path holder; | 1837 | struct path holder; |
@@ -1837,7 +1839,7 @@ reval: | |||
1837 | void *cookie; | 1839 | void *cookie; |
1838 | error = -ELOOP; | 1840 | error = -ELOOP; |
1839 | /* S_ISDIR part is a temporary automount kludge */ | 1841 | /* S_ISDIR part is a temporary automount kludge */ |
1840 | if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode)) | 1842 | if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(inode->i_mode)) |
1841 | goto exit_dput; | 1843 | goto exit_dput; |
1842 | if (count++ == 32) | 1844 | if (count++ == 32) |
1843 | goto exit_dput; | 1845 | goto exit_dput; |
@@ -2174,8 +2176,10 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
2174 | error = security_inode_rmdir(dir, dentry); | 2176 | error = security_inode_rmdir(dir, dentry); |
2175 | if (!error) { | 2177 | if (!error) { |
2176 | error = dir->i_op->rmdir(dir, dentry); | 2178 | error = dir->i_op->rmdir(dir, dentry); |
2177 | if (!error) | 2179 | if (!error) { |
2178 | dentry->d_inode->i_flags |= S_DEAD; | 2180 | dentry->d_inode->i_flags |= S_DEAD; |
2181 | dont_mount(dentry); | ||
2182 | } | ||
2179 | } | 2183 | } |
2180 | } | 2184 | } |
2181 | mutex_unlock(&dentry->d_inode->i_mutex); | 2185 | mutex_unlock(&dentry->d_inode->i_mutex); |
@@ -2259,7 +2263,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) | |||
2259 | if (!error) { | 2263 | if (!error) { |
2260 | error = dir->i_op->unlink(dir, dentry); | 2264 | error = dir->i_op->unlink(dir, dentry); |
2261 | if (!error) | 2265 | if (!error) |
2262 | dentry->d_inode->i_flags |= S_DEAD; | 2266 | dont_mount(dentry); |
2263 | } | 2267 | } |
2264 | } | 2268 | } |
2265 | mutex_unlock(&dentry->d_inode->i_mutex); | 2269 | mutex_unlock(&dentry->d_inode->i_mutex); |
@@ -2570,17 +2574,20 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, | |||
2570 | return error; | 2574 | return error; |
2571 | 2575 | ||
2572 | target = new_dentry->d_inode; | 2576 | target = new_dentry->d_inode; |
2573 | if (target) { | 2577 | if (target) |
2574 | mutex_lock(&target->i_mutex); | 2578 | mutex_lock(&target->i_mutex); |
2575 | dentry_unhash(new_dentry); | ||
2576 | } | ||
2577 | if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) | 2579 | if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) |
2578 | error = -EBUSY; | 2580 | error = -EBUSY; |
2579 | else | 2581 | else { |
2582 | if (target) | ||
2583 | dentry_unhash(new_dentry); | ||
2580 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); | 2584 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); |
2585 | } | ||
2581 | if (target) { | 2586 | if (target) { |
2582 | if (!error) | 2587 | if (!error) { |
2583 | target->i_flags |= S_DEAD; | 2588 | target->i_flags |= S_DEAD; |
2589 | dont_mount(new_dentry); | ||
2590 | } | ||
2584 | mutex_unlock(&target->i_mutex); | 2591 | mutex_unlock(&target->i_mutex); |
2585 | if (d_unhashed(new_dentry)) | 2592 | if (d_unhashed(new_dentry)) |
2586 | d_rehash(new_dentry); | 2593 | d_rehash(new_dentry); |
@@ -2612,7 +2619,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, | |||
2612 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); | 2619 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); |
2613 | if (!error) { | 2620 | if (!error) { |
2614 | if (target) | 2621 | if (target) |
2615 | target->i_flags |= S_DEAD; | 2622 | dont_mount(new_dentry); |
2616 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) | 2623 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) |
2617 | d_move(old_dentry, new_dentry); | 2624 | d_move(old_dentry, new_dentry); |
2618 | } | 2625 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 8174c8ab5c70..f20cb57d1067 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1432,7 +1432,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path) | |||
1432 | 1432 | ||
1433 | err = -ENOENT; | 1433 | err = -ENOENT; |
1434 | mutex_lock(&path->dentry->d_inode->i_mutex); | 1434 | mutex_lock(&path->dentry->d_inode->i_mutex); |
1435 | if (IS_DEADDIR(path->dentry->d_inode)) | 1435 | if (cant_mount(path->dentry)) |
1436 | goto out_unlock; | 1436 | goto out_unlock; |
1437 | 1437 | ||
1438 | err = security_sb_check_sb(mnt, path); | 1438 | err = security_sb_check_sb(mnt, path); |
@@ -1623,7 +1623,7 @@ static int do_move_mount(struct path *path, char *old_name) | |||
1623 | 1623 | ||
1624 | err = -ENOENT; | 1624 | err = -ENOENT; |
1625 | mutex_lock(&path->dentry->d_inode->i_mutex); | 1625 | mutex_lock(&path->dentry->d_inode->i_mutex); |
1626 | if (IS_DEADDIR(path->dentry->d_inode)) | 1626 | if (cant_mount(path->dentry)) |
1627 | goto out1; | 1627 | goto out1; |
1628 | 1628 | ||
1629 | if (d_unlinked(path->dentry)) | 1629 | if (d_unlinked(path->dentry)) |
@@ -2234,7 +2234,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, | |||
2234 | if (!check_mnt(root.mnt)) | 2234 | if (!check_mnt(root.mnt)) |
2235 | goto out2; | 2235 | goto out2; |
2236 | error = -ENOENT; | 2236 | error = -ENOENT; |
2237 | if (IS_DEADDIR(new.dentry->d_inode)) | 2237 | if (cant_mount(old.dentry)) |
2238 | goto out2; | 2238 | goto out2; |
2239 | if (d_unlinked(new.dentry)) | 2239 | if (d_unlinked(new.dentry)) |
2240 | goto out2; | 2240 | goto out2; |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 15671245c6ee..ea61d26e7871 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | static void nfs_do_free_delegation(struct nfs_delegation *delegation) | 25 | static void nfs_do_free_delegation(struct nfs_delegation *delegation) |
26 | { | 26 | { |
27 | if (delegation->cred) | ||
28 | put_rpccred(delegation->cred); | ||
27 | kfree(delegation); | 29 | kfree(delegation); |
28 | } | 30 | } |
29 | 31 | ||
@@ -36,13 +38,7 @@ static void nfs_free_delegation_callback(struct rcu_head *head) | |||
36 | 38 | ||
37 | static void nfs_free_delegation(struct nfs_delegation *delegation) | 39 | static void nfs_free_delegation(struct nfs_delegation *delegation) |
38 | { | 40 | { |
39 | struct rpc_cred *cred; | ||
40 | |||
41 | cred = rcu_dereference(delegation->cred); | ||
42 | rcu_assign_pointer(delegation->cred, NULL); | ||
43 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); | 41 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); |
44 | if (cred) | ||
45 | put_rpccred(cred); | ||
46 | } | 42 | } |
47 | 43 | ||
48 | void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) | 44 | void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) |
@@ -129,21 +125,35 @@ again: | |||
129 | */ | 125 | */ |
130 | void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) | 126 | void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) |
131 | { | 127 | { |
132 | struct nfs_delegation *delegation = NFS_I(inode)->delegation; | 128 | struct nfs_delegation *delegation; |
133 | struct rpc_cred *oldcred; | 129 | struct rpc_cred *oldcred = NULL; |
134 | 130 | ||
135 | if (delegation == NULL) | 131 | rcu_read_lock(); |
136 | return; | 132 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
137 | memcpy(delegation->stateid.data, res->delegation.data, | 133 | if (delegation != NULL) { |
138 | sizeof(delegation->stateid.data)); | 134 | spin_lock(&delegation->lock); |
139 | delegation->type = res->delegation_type; | 135 | if (delegation->inode != NULL) { |
140 | delegation->maxsize = res->maxsize; | 136 | memcpy(delegation->stateid.data, res->delegation.data, |
141 | oldcred = delegation->cred; | 137 | sizeof(delegation->stateid.data)); |
142 | delegation->cred = get_rpccred(cred); | 138 | delegation->type = res->delegation_type; |
143 | clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); | 139 | delegation->maxsize = res->maxsize; |
144 | NFS_I(inode)->delegation_state = delegation->type; | 140 | oldcred = delegation->cred; |
145 | smp_wmb(); | 141 | delegation->cred = get_rpccred(cred); |
146 | put_rpccred(oldcred); | 142 | clear_bit(NFS_DELEGATION_NEED_RECLAIM, |
143 | &delegation->flags); | ||
144 | NFS_I(inode)->delegation_state = delegation->type; | ||
145 | spin_unlock(&delegation->lock); | ||
146 | put_rpccred(oldcred); | ||
147 | rcu_read_unlock(); | ||
148 | } else { | ||
149 | /* We appear to have raced with a delegation return. */ | ||
150 | spin_unlock(&delegation->lock); | ||
151 | rcu_read_unlock(); | ||
152 | nfs_inode_set_delegation(inode, cred, res); | ||
153 | } | ||
154 | } else { | ||
155 | rcu_read_unlock(); | ||
156 | } | ||
147 | } | 157 | } |
148 | 158 | ||
149 | static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) | 159 | static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) |
@@ -166,9 +176,13 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation | |||
166 | return inode; | 176 | return inode; |
167 | } | 177 | } |
168 | 178 | ||
169 | static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid) | 179 | static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, |
180 | const nfs4_stateid *stateid, | ||
181 | struct nfs_client *clp) | ||
170 | { | 182 | { |
171 | struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); | 183 | struct nfs_delegation *delegation = |
184 | rcu_dereference_protected(nfsi->delegation, | ||
185 | lockdep_is_held(&clp->cl_lock)); | ||
172 | 186 | ||
173 | if (delegation == NULL) | 187 | if (delegation == NULL) |
174 | goto nomatch; | 188 | goto nomatch; |
@@ -195,7 +209,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
195 | { | 209 | { |
196 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; | 210 | struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; |
197 | struct nfs_inode *nfsi = NFS_I(inode); | 211 | struct nfs_inode *nfsi = NFS_I(inode); |
198 | struct nfs_delegation *delegation; | 212 | struct nfs_delegation *delegation, *old_delegation; |
199 | struct nfs_delegation *freeme = NULL; | 213 | struct nfs_delegation *freeme = NULL; |
200 | int status = 0; | 214 | int status = 0; |
201 | 215 | ||
@@ -213,10 +227,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
213 | spin_lock_init(&delegation->lock); | 227 | spin_lock_init(&delegation->lock); |
214 | 228 | ||
215 | spin_lock(&clp->cl_lock); | 229 | spin_lock(&clp->cl_lock); |
216 | if (rcu_dereference(nfsi->delegation) != NULL) { | 230 | old_delegation = rcu_dereference_protected(nfsi->delegation, |
217 | if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, | 231 | lockdep_is_held(&clp->cl_lock)); |
218 | sizeof(delegation->stateid)) == 0 && | 232 | if (old_delegation != NULL) { |
219 | delegation->type == nfsi->delegation->type) { | 233 | if (memcmp(&delegation->stateid, &old_delegation->stateid, |
234 | sizeof(old_delegation->stateid)) == 0 && | ||
235 | delegation->type == old_delegation->type) { | ||
220 | goto out; | 236 | goto out; |
221 | } | 237 | } |
222 | /* | 238 | /* |
@@ -226,12 +242,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
226 | dfprintk(FILE, "%s: server %s handed out " | 242 | dfprintk(FILE, "%s: server %s handed out " |
227 | "a duplicate delegation!\n", | 243 | "a duplicate delegation!\n", |
228 | __func__, clp->cl_hostname); | 244 | __func__, clp->cl_hostname); |
229 | if (delegation->type <= nfsi->delegation->type) { | 245 | if (delegation->type <= old_delegation->type) { |
230 | freeme = delegation; | 246 | freeme = delegation; |
231 | delegation = NULL; | 247 | delegation = NULL; |
232 | goto out; | 248 | goto out; |
233 | } | 249 | } |
234 | freeme = nfs_detach_delegation_locked(nfsi, NULL); | 250 | freeme = nfs_detach_delegation_locked(nfsi, NULL, clp); |
235 | } | 251 | } |
236 | list_add_rcu(&delegation->super_list, &clp->cl_delegations); | 252 | list_add_rcu(&delegation->super_list, &clp->cl_delegations); |
237 | nfsi->delegation_state = delegation->type; | 253 | nfsi->delegation_state = delegation->type; |
@@ -301,7 +317,7 @@ restart: | |||
301 | if (inode == NULL) | 317 | if (inode == NULL) |
302 | continue; | 318 | continue; |
303 | spin_lock(&clp->cl_lock); | 319 | spin_lock(&clp->cl_lock); |
304 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); | 320 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); |
305 | spin_unlock(&clp->cl_lock); | 321 | spin_unlock(&clp->cl_lock); |
306 | rcu_read_unlock(); | 322 | rcu_read_unlock(); |
307 | if (delegation != NULL) { | 323 | if (delegation != NULL) { |
@@ -330,9 +346,9 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode) | |||
330 | struct nfs_inode *nfsi = NFS_I(inode); | 346 | struct nfs_inode *nfsi = NFS_I(inode); |
331 | struct nfs_delegation *delegation; | 347 | struct nfs_delegation *delegation; |
332 | 348 | ||
333 | if (rcu_dereference(nfsi->delegation) != NULL) { | 349 | if (rcu_access_pointer(nfsi->delegation) != NULL) { |
334 | spin_lock(&clp->cl_lock); | 350 | spin_lock(&clp->cl_lock); |
335 | delegation = nfs_detach_delegation_locked(nfsi, NULL); | 351 | delegation = nfs_detach_delegation_locked(nfsi, NULL, clp); |
336 | spin_unlock(&clp->cl_lock); | 352 | spin_unlock(&clp->cl_lock); |
337 | if (delegation != NULL) | 353 | if (delegation != NULL) |
338 | nfs_do_return_delegation(inode, delegation, 0); | 354 | nfs_do_return_delegation(inode, delegation, 0); |
@@ -346,9 +362,9 @@ int nfs_inode_return_delegation(struct inode *inode) | |||
346 | struct nfs_delegation *delegation; | 362 | struct nfs_delegation *delegation; |
347 | int err = 0; | 363 | int err = 0; |
348 | 364 | ||
349 | if (rcu_dereference(nfsi->delegation) != NULL) { | 365 | if (rcu_access_pointer(nfsi->delegation) != NULL) { |
350 | spin_lock(&clp->cl_lock); | 366 | spin_lock(&clp->cl_lock); |
351 | delegation = nfs_detach_delegation_locked(nfsi, NULL); | 367 | delegation = nfs_detach_delegation_locked(nfsi, NULL, clp); |
352 | spin_unlock(&clp->cl_lock); | 368 | spin_unlock(&clp->cl_lock); |
353 | if (delegation != NULL) { | 369 | if (delegation != NULL) { |
354 | nfs_msync_inode(inode); | 370 | nfs_msync_inode(inode); |
@@ -526,7 +542,7 @@ restart: | |||
526 | if (inode == NULL) | 542 | if (inode == NULL) |
527 | continue; | 543 | continue; |
528 | spin_lock(&clp->cl_lock); | 544 | spin_lock(&clp->cl_lock); |
529 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); | 545 | delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); |
530 | spin_unlock(&clp->cl_lock); | 546 | spin_unlock(&clp->cl_lock); |
531 | rcu_read_unlock(); | 547 | rcu_read_unlock(); |
532 | if (delegation != NULL) | 548 | if (delegation != NULL) |
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 1afb0a10229f..e27960cd76ab 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/path.h> /* struct path */ | 28 | #include <linux/path.h> /* struct path */ |
29 | #include <linux/slab.h> /* kmem_* */ | 29 | #include <linux/slab.h> /* kmem_* */ |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/sched.h> | ||
31 | 32 | ||
32 | #include "inotify.h" | 33 | #include "inotify.h" |
33 | 34 | ||
@@ -146,6 +147,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group) | |||
146 | idr_for_each(&group->inotify_data.idr, idr_callback, group); | 147 | idr_for_each(&group->inotify_data.idr, idr_callback, group); |
147 | idr_remove_all(&group->inotify_data.idr); | 148 | idr_remove_all(&group->inotify_data.idr); |
148 | idr_destroy(&group->inotify_data.idr); | 149 | idr_destroy(&group->inotify_data.idr); |
150 | free_uid(group->inotify_data.user); | ||
149 | } | 151 | } |
150 | 152 | ||
151 | void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) | 153 | void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 472cdf29ef82..e46ca685b9be 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
@@ -546,21 +546,24 @@ retry: | |||
546 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) | 546 | if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) |
547 | goto out_err; | 547 | goto out_err; |
548 | 548 | ||
549 | /* we are putting the mark on the idr, take a reference */ | ||
550 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
551 | |||
549 | spin_lock(&group->inotify_data.idr_lock); | 552 | spin_lock(&group->inotify_data.idr_lock); |
550 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | 553 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, |
551 | group->inotify_data.last_wd+1, | 554 | group->inotify_data.last_wd+1, |
552 | &tmp_ientry->wd); | 555 | &tmp_ientry->wd); |
553 | spin_unlock(&group->inotify_data.idr_lock); | 556 | spin_unlock(&group->inotify_data.idr_lock); |
554 | if (ret) { | 557 | if (ret) { |
558 | /* we didn't get on the idr, drop the idr reference */ | ||
559 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
560 | |||
555 | /* idr was out of memory allocate and try again */ | 561 | /* idr was out of memory allocate and try again */ |
556 | if (ret == -EAGAIN) | 562 | if (ret == -EAGAIN) |
557 | goto retry; | 563 | goto retry; |
558 | goto out_err; | 564 | goto out_err; |
559 | } | 565 | } |
560 | 566 | ||
561 | /* we put the mark on the idr, take a reference */ | ||
562 | fsnotify_get_mark(&tmp_ientry->fsn_entry); | ||
563 | |||
564 | /* we are on the idr, now get on the inode */ | 567 | /* we are on the idr, now get on the inode */ |
565 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); | 568 | ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); |
566 | if (ret) { | 569 | if (ret) { |
@@ -578,16 +581,13 @@ retry: | |||
578 | /* return the watch descriptor for this new entry */ | 581 | /* return the watch descriptor for this new entry */ |
579 | ret = tmp_ientry->wd; | 582 | ret = tmp_ientry->wd; |
580 | 583 | ||
581 | /* match the ref from fsnotify_init_markentry() */ | ||
582 | fsnotify_put_mark(&tmp_ientry->fsn_entry); | ||
583 | |||
584 | /* if this mark added a new event update the group mask */ | 584 | /* if this mark added a new event update the group mask */ |
585 | if (mask & ~group->mask) | 585 | if (mask & ~group->mask) |
586 | fsnotify_recalc_group_mask(group); | 586 | fsnotify_recalc_group_mask(group); |
587 | 587 | ||
588 | out_err: | 588 | out_err: |
589 | if (ret < 0) | 589 | /* match the ref from fsnotify_init_markentry() */ |
590 | kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); | 590 | fsnotify_put_mark(&tmp_ientry->fsn_entry); |
591 | 591 | ||
592 | return ret; | 592 | return ret; |
593 | } | 593 | } |
diff --git a/fs/proc/array.c b/fs/proc/array.c index e51f2ec2c5e5..885ab5513ac5 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -81,7 +81,6 @@ | |||
81 | #include <linux/pid_namespace.h> | 81 | #include <linux/pid_namespace.h> |
82 | #include <linux/ptrace.h> | 82 | #include <linux/ptrace.h> |
83 | #include <linux/tracehook.h> | 83 | #include <linux/tracehook.h> |
84 | #include <linux/swapops.h> | ||
85 | 84 | ||
86 | #include <asm/pgtable.h> | 85 | #include <asm/pgtable.h> |
87 | #include <asm/processor.h> | 86 | #include <asm/processor.h> |
@@ -495,7 +494,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
495 | rsslim, | 494 | rsslim, |
496 | mm ? mm->start_code : 0, | 495 | mm ? mm->start_code : 0, |
497 | mm ? mm->end_code : 0, | 496 | mm ? mm->end_code : 0, |
498 | (permitted && mm) ? task->stack_start : 0, | 497 | (permitted && mm) ? mm->start_stack : 0, |
499 | esp, | 498 | esp, |
500 | eip, | 499 | eip, |
501 | /* The signal information here is obsolete. | 500 | /* The signal information here is obsolete. |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 070553427dd5..47f5b145f56e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -247,25 +247,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | |||
247 | } else if (vma->vm_start <= mm->start_stack && | 247 | } else if (vma->vm_start <= mm->start_stack && |
248 | vma->vm_end >= mm->start_stack) { | 248 | vma->vm_end >= mm->start_stack) { |
249 | name = "[stack]"; | 249 | name = "[stack]"; |
250 | } else { | ||
251 | unsigned long stack_start; | ||
252 | struct proc_maps_private *pmp; | ||
253 | |||
254 | pmp = m->private; | ||
255 | stack_start = pmp->task->stack_start; | ||
256 | |||
257 | if (vma->vm_start <= stack_start && | ||
258 | vma->vm_end >= stack_start) { | ||
259 | pad_len_spaces(m, len); | ||
260 | seq_printf(m, | ||
261 | "[threadstack:%08lx]", | ||
262 | #ifdef CONFIG_STACK_GROWSUP | ||
263 | vma->vm_end - stack_start | ||
264 | #else | ||
265 | stack_start - vma->vm_start | ||
266 | #endif | ||
267 | ); | ||
268 | } | ||
269 | } | 250 | } |
270 | } else { | 251 | } else { |
271 | name = "[vdso]"; | 252 | name = "[vdso]"; |
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 4e50286a4cc3..1dabed286b4c 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c | |||
@@ -164,8 +164,8 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_ | |||
164 | name, de->name)) | 164 | name, de->name)) |
165 | goto found; | 165 | goto found; |
166 | } | 166 | } |
167 | dir_put_page(page); | ||
167 | } | 168 | } |
168 | dir_put_page(page); | ||
169 | 169 | ||
170 | if (++n >= npages) | 170 | if (++n >= npages) |
171 | n = 0; | 171 | n = 0; |
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..c33749f95b32 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * Atomically reads the value of @v. Note that the guaranteed | 33 | * Atomically reads the value of @v. Note that the guaranteed |
34 | * useful range of an atomic_t is only 24 bits. | 34 | * useful range of an atomic_t is only 24 bits. |
35 | */ | 35 | */ |
36 | #define atomic_read(v) ((v)->counter) | 36 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
37 | 37 | ||
38 | /** | 38 | /** |
39 | * atomic_set - set atomic variable | 39 | * atomic_set - set atomic variable |
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index e694263445f7..69206957b72c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | 131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
132 | 132 | ||
133 | } else | 133 | } else |
134 | dma_sync_single_for_cpu(dev, addr, size, dir); | 134 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void dma_sync_single_range_for_device(struct device *dev, | 137 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | 148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
149 | 149 | ||
150 | } else | 150 | } else |
151 | dma_sync_single_for_device(dev, addr, size, dir); | 151 | dma_sync_single_for_device(dev, addr + offset, size, dir); |
152 | } | 152 | } |
153 | 153 | ||
154 | static inline void | 154 | static inline void |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e929c27ede22..6b9db917e717 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -789,34 +789,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); | |||
789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, | 789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, |
790 | bool interruptible); | 790 | bool interruptible); |
791 | 791 | ||
792 | /** | ||
793 | * ttm_bo_block_reservation | ||
794 | * | ||
795 | * @bo: A pointer to a struct ttm_buffer_object. | ||
796 | * @interruptible: Use interruptible sleep when waiting. | ||
797 | * @no_wait: Don't sleep, but rather return -EBUSY. | ||
798 | * | ||
799 | * Block reservation for validation by simply reserving the buffer. | ||
800 | * This is intended for single buffer use only without eviction, | ||
801 | * and thus needs no deadlock protection. | ||
802 | * | ||
803 | * Returns: | ||
804 | * -EBUSY: If no_wait == 1 and the buffer is already reserved. | ||
805 | * -ERESTARTSYS: If interruptible == 1 and the process received a signal | ||
806 | * while sleeping. | ||
807 | */ | ||
808 | extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, | ||
809 | bool interruptible, bool no_wait); | ||
810 | |||
811 | /** | ||
812 | * ttm_bo_unblock_reservation | ||
813 | * | ||
814 | * @bo: A pointer to a struct ttm_buffer_object. | ||
815 | * | ||
816 | * Unblocks reservation leaving lru lists untouched. | ||
817 | */ | ||
818 | extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); | ||
819 | |||
820 | /* | 792 | /* |
821 | * ttm_bo_util.c | 793 | * ttm_bo_util.c |
822 | */ | 794 | */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b8ad1ea99586..8f78073d7caa 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -530,6 +530,7 @@ static inline struct cgroup_subsys_state *task_subsys_state( | |||
530 | { | 530 | { |
531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], | 531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], |
532 | rcu_read_lock_held() || | 532 | rcu_read_lock_held() || |
533 | lockdep_is_held(&task->alloc_lock) || | ||
533 | cgroup_lock_is_held()); | 534 | cgroup_lock_is_held()); |
534 | } | 535 | } |
535 | 536 | ||
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 30b93b2a01a4..eebb617c17d8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -186,6 +186,8 @@ d_iput: no no no yes | |||
186 | 186 | ||
187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | 187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ |
188 | 188 | ||
189 | #define DCACHE_CANT_MOUNT 0x0100 | ||
190 | |||
189 | extern spinlock_t dcache_lock; | 191 | extern spinlock_t dcache_lock; |
190 | extern seqlock_t rename_lock; | 192 | extern seqlock_t rename_lock; |
191 | 193 | ||
@@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry) | |||
358 | return d_unhashed(dentry) && !IS_ROOT(dentry); | 360 | return d_unhashed(dentry) && !IS_ROOT(dentry); |
359 | } | 361 | } |
360 | 362 | ||
363 | static inline int cant_mount(struct dentry *dentry) | ||
364 | { | ||
365 | return (dentry->d_flags & DCACHE_CANT_MOUNT); | ||
366 | } | ||
367 | |||
368 | static inline void dont_mount(struct dentry *dentry) | ||
369 | { | ||
370 | spin_lock(&dentry->d_lock); | ||
371 | dentry->d_flags |= DCACHE_CANT_MOUNT; | ||
372 | spin_unlock(&dentry->d_lock); | ||
373 | } | ||
374 | |||
361 | static inline struct dentry *dget_parent(struct dentry *dentry) | 375 | static inline struct dentry *dget_parent(struct dentry *dentry) |
362 | { | 376 | { |
363 | struct dentry *ret; | 377 | struct dentry *ret; |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a7..597692f1fc8d 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
@@ -20,12 +20,14 @@ struct debug_obj_descr; | |||
20 | * struct debug_obj - representaion of an tracked object | 20 | * struct debug_obj - representaion of an tracked object |
21 | * @node: hlist node to link the object into the tracker list | 21 | * @node: hlist node to link the object into the tracker list |
22 | * @state: tracked object state | 22 | * @state: tracked object state |
23 | * @astate: current active state | ||
23 | * @object: pointer to the real object | 24 | * @object: pointer to the real object |
24 | * @descr: pointer to an object type specific debug description structure | 25 | * @descr: pointer to an object type specific debug description structure |
25 | */ | 26 | */ |
26 | struct debug_obj { | 27 | struct debug_obj { |
27 | struct hlist_node node; | 28 | struct hlist_node node; |
28 | enum debug_obj_state state; | 29 | enum debug_obj_state state; |
30 | unsigned int astate; | ||
29 | void *object; | 31 | void *object; |
30 | struct debug_obj_descr *descr; | 32 | struct debug_obj_descr *descr; |
31 | }; | 33 | }; |
@@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | |||
60 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 62 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
61 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 63 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
62 | 64 | ||
65 | /* | ||
66 | * Active state: | ||
67 | * - Set at 0 upon initialization. | ||
68 | * - Must return to 0 before deactivation. | ||
69 | */ | ||
70 | extern void | ||
71 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
72 | unsigned int expect, unsigned int next); | ||
73 | |||
63 | extern void debug_objects_early_init(void); | 74 | extern void debug_objects_early_init(void); |
64 | extern void debug_objects_mem_init(void); | 75 | extern void debug_objects_mem_init(void); |
65 | #else | 76 | #else |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c711..39e71b0a3bfd 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -58,6 +58,7 @@ struct trace_iterator { | |||
58 | /* The below is zeroed out in pipe_read */ | 58 | /* The below is zeroed out in pipe_read */ |
59 | struct trace_seq seq; | 59 | struct trace_seq seq; |
60 | struct trace_entry *ent; | 60 | struct trace_entry *ent; |
61 | unsigned long lost_events; | ||
61 | int leftover; | 62 | int leftover; |
62 | int cpu; | 63 | int cpu; |
63 | u64 ts; | 64 | u64 ts; |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c9bf92cd7653..d94963b379d9 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -79,10 +79,7 @@ enum { | |||
79 | IFLA_NET_NS_PID, | 79 | IFLA_NET_NS_PID, |
80 | IFLA_IFALIAS, | 80 | IFLA_IFALIAS, |
81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ | 81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ |
82 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | 82 | IFLA_VFINFO_LIST, |
83 | IFLA_VF_VLAN, | ||
84 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
85 | IFLA_VFINFO, | ||
86 | __IFLA_MAX | 83 | __IFLA_MAX |
87 | }; | 84 | }; |
88 | 85 | ||
@@ -203,6 +200,24 @@ enum macvlan_mode { | |||
203 | 200 | ||
204 | /* SR-IOV virtual function managment section */ | 201 | /* SR-IOV virtual function managment section */ |
205 | 202 | ||
203 | enum { | ||
204 | IFLA_VF_INFO_UNSPEC, | ||
205 | IFLA_VF_INFO, | ||
206 | __IFLA_VF_INFO_MAX, | ||
207 | }; | ||
208 | |||
209 | #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) | ||
210 | |||
211 | enum { | ||
212 | IFLA_VF_UNSPEC, | ||
213 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | ||
214 | IFLA_VF_VLAN, | ||
215 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
216 | __IFLA_VF_MAX, | ||
217 | }; | ||
218 | |||
219 | #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) | ||
220 | |||
206 | struct ifla_vf_mac { | 221 | struct ifla_vf_mac { |
207 | __u32 vf; | 222 | __u32 vf; |
208 | __u8 mac[32]; /* MAX_ADDR_LEN */ | 223 | __u8 mac[32]; /* MAX_ADDR_LEN */ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a8..7996fc2c9ba9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -49,7 +49,6 @@ extern struct group_info init_groups; | |||
49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ | 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ |
50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ | 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ |
51 | }, \ | 51 | }, \ |
52 | .rcu = RCU_HEAD_INIT, \ | ||
53 | .level = 0, \ | 52 | .level = 0, \ |
54 | .numbers = { { \ | 53 | .numbers = { { \ |
55 | .nr = 0, \ | 54 | .nr = 0, \ |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3af4ffd591b9..be22ad83689c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -37,9 +37,9 @@ struct iommu_ops { | |||
37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); |
38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); | 38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); |
39 | int (*map)(struct iommu_domain *domain, unsigned long iova, | 39 | int (*map)(struct iommu_domain *domain, unsigned long iova, |
40 | phys_addr_t paddr, size_t size, int prot); | 40 | phys_addr_t paddr, int gfp_order, int prot); |
41 | void (*unmap)(struct iommu_domain *domain, unsigned long iova, | 41 | int (*unmap)(struct iommu_domain *domain, unsigned long iova, |
42 | size_t size); | 42 | int gfp_order); |
43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, |
44 | unsigned long iova); | 44 | unsigned long iova); |
45 | int (*domain_has_cap)(struct iommu_domain *domain, | 45 | int (*domain_has_cap)(struct iommu_domain *domain, |
@@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain, | |||
56 | struct device *dev); | 56 | struct device *dev); |
57 | extern void iommu_detach_device(struct iommu_domain *domain, | 57 | extern void iommu_detach_device(struct iommu_domain *domain, |
58 | struct device *dev); | 58 | struct device *dev); |
59 | extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | 59 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, |
60 | phys_addr_t paddr, size_t size, int prot); | 60 | phys_addr_t paddr, int gfp_order, int prot); |
61 | extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | 61 | extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
62 | size_t size); | 62 | int gfp_order); |
63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
64 | unsigned long iova); | 64 | unsigned long iova); |
65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, |
@@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain, | |||
96 | { | 96 | { |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int iommu_map_range(struct iommu_domain *domain, | 99 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, |
100 | unsigned long iova, phys_addr_t paddr, | 100 | phys_addr_t paddr, int gfp_order, int prot) |
101 | size_t size, int prot) | ||
102 | { | 101 | { |
103 | return -ENODEV; | 102 | return -ENODEV; |
104 | } | 103 | } |
105 | 104 | ||
106 | static inline void iommu_unmap_range(struct iommu_domain *domain, | 105 | static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
107 | unsigned long iova, size_t size) | 106 | int gfp_order) |
108 | { | 107 | { |
108 | return -ENODEV; | ||
109 | } | 109 | } |
110 | 110 | ||
111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f58e9d836f32..56fde4364e4c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -474,4 +474,13 @@ struct platform_device_id { | |||
474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | 474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); |
475 | }; | 475 | }; |
476 | 476 | ||
477 | struct zorro_device_id { | ||
478 | __u32 id; /* Device ID or ZORRO_WILDCARD */ | ||
479 | kernel_ulong_t driver_data; /* Data private to the driver */ | ||
480 | }; | ||
481 | |||
482 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
483 | |||
484 | #define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" | ||
485 | |||
477 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 486 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 515d53ae6a79..6914fcad4673 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module) | |||
465 | if (module) { | 465 | if (module) { |
466 | preempt_disable(); | 466 | preempt_disable(); |
467 | __this_cpu_inc(module->refptr->incs); | 467 | __this_cpu_inc(module->refptr->incs); |
468 | trace_module_get(module, _THIS_IP_, | 468 | trace_module_get(module, _THIS_IP_); |
469 | __this_cpu_read(module->refptr->incs)); | ||
470 | preempt_enable(); | 469 | preempt_enable(); |
471 | } | 470 | } |
472 | } | 471 | } |
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module) | |||
480 | 479 | ||
481 | if (likely(module_is_live(module))) { | 480 | if (likely(module_is_live(module))) { |
482 | __this_cpu_inc(module->refptr->incs); | 481 | __this_cpu_inc(module->refptr->incs); |
483 | trace_module_get(module, _THIS_IP_, | 482 | trace_module_get(module, _THIS_IP_); |
484 | __this_cpu_read(module->refptr->incs)); | ||
485 | } else | 483 | } else |
486 | ret = 0; | 484 | ret = 0; |
487 | 485 | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 212da17d06af..5417944d3687 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *); | |||
44 | extern int platform_add_devices(struct platform_device **, int); | 44 | extern int platform_add_devices(struct platform_device **, int); |
45 | 45 | ||
46 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 46 | extern struct platform_device *platform_device_register_simple(const char *, int id, |
47 | struct resource *, unsigned int); | 47 | const struct resource *, unsigned int); |
48 | extern struct platform_device *platform_device_register_data(struct device *, | 48 | extern struct platform_device *platform_device_register_data(struct device *, |
49 | const char *, int, const void *, size_t); | 49 | const char *, int, const void *, size_t); |
50 | 50 | ||
51 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 51 | extern struct platform_device *platform_device_alloc(const char *name, int id); |
52 | extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); | 52 | extern int platform_device_add_resources(struct platform_device *pdev, |
53 | const struct resource *res, | ||
54 | unsigned int num); | ||
53 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); | 55 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); |
54 | extern int platform_device_add(struct platform_device *pdev); | 56 | extern int platform_device_add(struct platform_device *pdev); |
55 | extern void platform_device_del(struct platform_device *pdev); | 57 | extern void platform_device_del(struct platform_device *pdev); |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 07db2feb8572..b653b4aaa8a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -56,8 +56,6 @@ struct rcu_head { | |||
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* Exported common interfaces */ | 58 | /* Exported common interfaces */ |
59 | extern void synchronize_rcu_bh(void); | ||
60 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier(void); | 59 | extern void rcu_barrier(void); |
62 | extern void rcu_barrier_bh(void); | 60 | extern void rcu_barrier_bh(void); |
63 | extern void rcu_barrier_sched(void); | 61 | extern void rcu_barrier_sched(void); |
@@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); | |||
66 | 64 | ||
67 | /* Internal to kernel */ | 65 | /* Internal to kernel */ |
68 | extern void rcu_init(void); | 66 | extern void rcu_init(void); |
69 | extern int rcu_scheduler_active; | ||
70 | extern void rcu_scheduler_starting(void); | ||
71 | 67 | ||
72 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 68 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
73 | #include <linux/rcutree.h> | 69 | #include <linux/rcutree.h> |
@@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void); | |||
83 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 79 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
84 | } while (0) | 80 | } while (0) |
85 | 81 | ||
82 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | ||
87 | { | ||
88 | } | ||
89 | |||
86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 90 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
87 | 91 | ||
88 | extern struct lockdep_map rcu_lock_map; | 92 | extern struct lockdep_map rcu_lock_map; |
@@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void); | |||
106 | /** | 110 | /** |
107 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 111 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
108 | * | 112 | * |
109 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 113 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
110 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 114 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
111 | * this assumes we are in an RCU read-side critical section unless it can | 115 | * this assumes we are in an RCU read-side critical section unless it can |
112 | * prove otherwise. | 116 | * prove otherwise. |
113 | * | 117 | * |
114 | * Check rcu_scheduler_active to prevent false positives during boot. | 118 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
119 | * and while lockdep is disabled. | ||
115 | */ | 120 | */ |
116 | static inline int rcu_read_lock_held(void) | 121 | static inline int rcu_read_lock_held(void) |
117 | { | 122 | { |
@@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void); | |||
129 | /** | 134 | /** |
130 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 135 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
131 | * | 136 | * |
132 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | 137 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
133 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 138 | * RCU-sched read-side critical section. In absence of |
134 | * this assumes we are in an RCU-sched read-side critical section unless it | 139 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
135 | * can prove otherwise. Note that disabling of preemption (including | 140 | * critical section unless it can prove otherwise. Note that disabling |
136 | * disabling irqs) counts as an RCU-sched read-side critical section. | 141 | * of preemption (including disabling irqs) counts as an RCU-sched |
142 | * read-side critical section. | ||
137 | * | 143 | * |
138 | * Check rcu_scheduler_active to prevent false positives during boot. | 144 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
145 | * and while lockdep is disabled. | ||
139 | */ | 146 | */ |
140 | #ifdef CONFIG_PREEMPT | 147 | #ifdef CONFIG_PREEMPT |
141 | static inline int rcu_read_lock_sched_held(void) | 148 | static inline int rcu_read_lock_sched_held(void) |
@@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
177 | #ifdef CONFIG_PREEMPT | 184 | #ifdef CONFIG_PREEMPT |
178 | static inline int rcu_read_lock_sched_held(void) | 185 | static inline int rcu_read_lock_sched_held(void) |
179 | { | 186 | { |
180 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); | 187 | return preempt_count() != 0 || irqs_disabled(); |
181 | } | 188 | } |
182 | #else /* #ifdef CONFIG_PREEMPT */ | 189 | #else /* #ifdef CONFIG_PREEMPT */ |
183 | static inline int rcu_read_lock_sched_held(void) | 190 | static inline int rcu_read_lock_sched_held(void) |
@@ -190,6 +197,17 @@ static inline int rcu_read_lock_sched_held(void) | |||
190 | 197 | ||
191 | #ifdef CONFIG_PROVE_RCU | 198 | #ifdef CONFIG_PROVE_RCU |
192 | 199 | ||
200 | extern int rcu_my_thread_group_empty(void); | ||
201 | |||
202 | #define __do_rcu_dereference_check(c) \ | ||
203 | do { \ | ||
204 | static bool __warned; \ | ||
205 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
206 | __warned = true; \ | ||
207 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
208 | } \ | ||
209 | } while (0) | ||
210 | |||
193 | /** | 211 | /** |
194 | * rcu_dereference_check - rcu_dereference with debug checking | 212 | * rcu_dereference_check - rcu_dereference with debug checking |
195 | * @p: The pointer to read, prior to dereferencing | 213 | * @p: The pointer to read, prior to dereferencing |
@@ -219,8 +237,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
219 | */ | 237 | */ |
220 | #define rcu_dereference_check(p, c) \ | 238 | #define rcu_dereference_check(p, c) \ |
221 | ({ \ | 239 | ({ \ |
222 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 240 | __do_rcu_dereference_check(c); \ |
223 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
224 | rcu_dereference_raw(p); \ | 241 | rcu_dereference_raw(p); \ |
225 | }) | 242 | }) |
226 | 243 | ||
@@ -237,8 +254,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
237 | */ | 254 | */ |
238 | #define rcu_dereference_protected(p, c) \ | 255 | #define rcu_dereference_protected(p, c) \ |
239 | ({ \ | 256 | ({ \ |
240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 257 | __do_rcu_dereference_check(c); \ |
241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
242 | (p); \ | 258 | (p); \ |
243 | }) | 259 | }) |
244 | 260 | ||
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480a..14e5a76b2c06 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -29,6 +29,10 @@ | |||
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | void rcu_sched_qs(int cpu); |
31 | void rcu_bh_qs(int cpu); | 31 | void rcu_bh_qs(int cpu); |
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | ||
34 | rcu_sched_qs(cpu); | ||
35 | } | ||
32 | 36 | ||
33 | #define __rcu_read_lock() preempt_disable() | 37 | #define __rcu_read_lock() preempt_disable() |
34 | #define __rcu_read_unlock() preempt_enable() | 38 | #define __rcu_read_unlock() preempt_enable() |
@@ -74,7 +78,17 @@ static inline void rcu_sched_force_quiescent_state(void) | |||
74 | { | 78 | { |
75 | } | 79 | } |
76 | 80 | ||
77 | #define synchronize_rcu synchronize_sched | 81 | extern void synchronize_sched(void); |
82 | |||
83 | static inline void synchronize_rcu(void) | ||
84 | { | ||
85 | synchronize_sched(); | ||
86 | } | ||
87 | |||
88 | static inline void synchronize_rcu_bh(void) | ||
89 | { | ||
90 | synchronize_sched(); | ||
91 | } | ||
78 | 92 | ||
79 | static inline void synchronize_rcu_expedited(void) | 93 | static inline void synchronize_rcu_expedited(void) |
80 | { | 94 | { |
@@ -114,4 +128,17 @@ static inline int rcu_preempt_depth(void) | |||
114 | return 0; | 128 | return 0; |
115 | } | 129 | } |
116 | 130 | ||
131 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
132 | |||
133 | extern int rcu_scheduler_active __read_mostly; | ||
134 | extern void rcu_scheduler_starting(void); | ||
135 | |||
136 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
137 | |||
138 | static inline void rcu_scheduler_starting(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
143 | |||
117 | #endif /* __LINUX_RCUTINY_H */ | 144 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779e..48282055e83d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -34,6 +34,7 @@ struct notifier_block; | |||
34 | 34 | ||
35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); |
36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
37 | extern void rcu_note_context_switch(int cpu); | ||
37 | extern int rcu_needs_cpu(int cpu); | 38 | extern int rcu_needs_cpu(int cpu); |
38 | extern int rcu_expedited_torture_stats(char *page); | 39 | extern int rcu_expedited_torture_stats(char *page); |
39 | 40 | ||
@@ -86,6 +87,8 @@ static inline void __rcu_read_unlock_bh(void) | |||
86 | 87 | ||
87 | extern void call_rcu_sched(struct rcu_head *head, | 88 | extern void call_rcu_sched(struct rcu_head *head, |
88 | void (*func)(struct rcu_head *rcu)); | 89 | void (*func)(struct rcu_head *rcu)); |
90 | extern void synchronize_rcu_bh(void); | ||
91 | extern void synchronize_sched(void); | ||
89 | extern void synchronize_rcu_expedited(void); | 92 | extern void synchronize_rcu_expedited(void); |
90 | 93 | ||
91 | static inline void synchronize_rcu_bh_expedited(void) | 94 | static inline void synchronize_rcu_bh_expedited(void) |
@@ -120,4 +123,7 @@ static inline int rcu_blocking_is_gp(void) | |||
120 | return num_online_cpus() == 1; | 123 | return num_online_cpus() == 1; |
121 | } | 124 | } |
122 | 125 | ||
126 | extern void rcu_scheduler_starting(void); | ||
127 | extern int rcu_scheduler_active __read_mostly; | ||
128 | |||
123 | #endif /* __LINUX_RCUTREE_H */ | 129 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5fcc31ed5771..c8297761e414 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -120,9 +120,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
120 | unsigned long length, void *data); | 120 | unsigned long length, void *data); |
121 | 121 | ||
122 | struct ring_buffer_event * | 122 | struct ring_buffer_event * |
123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); | 123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
124 | unsigned long *lost_events); | ||
124 | struct ring_buffer_event * | 125 | struct ring_buffer_event * |
125 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); | 126 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
127 | unsigned long *lost_events); | ||
126 | 128 | ||
127 | struct ring_buffer_iter * | 129 | struct ring_buffer_iter * |
128 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); | 130 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e0447c64af6a..28b71ee133f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1490,7 +1490,6 @@ struct task_struct { | |||
1490 | /* bitmask of trace recursion */ | 1490 | /* bitmask of trace recursion */ |
1491 | unsigned long trace_recursion; | 1491 | unsigned long trace_recursion; |
1492 | #endif /* CONFIG_TRACING */ | 1492 | #endif /* CONFIG_TRACING */ |
1493 | unsigned long stack_start; | ||
1494 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1493 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
1495 | struct memcg_batch_info { | 1494 | struct memcg_batch_info { |
1496 | int do_batch; /* incremented when batch uncharge started */ | 1495 | int do_batch; /* incremented when batch uncharge started */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af9..4d5d2f546dbf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #ifndef _LINUX_SRCU_H | 27 | #ifndef _LINUX_SRCU_H |
28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H |
29 | 29 | ||
30 | #include <linux/mutex.h> | ||
31 | |||
30 | struct srcu_struct_array { | 32 | struct srcu_struct_array { |
31 | int c[2]; | 33 | int c[2]; |
32 | }; | 34 | }; |
@@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
84 | /** | 86 | /** |
85 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 87 | * srcu_read_lock_held - might we be in SRCU read-side critical section? |
86 | * | 88 | * |
87 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 89 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU |
88 | * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
89 | * this assumes we are in an SRCU read-side critical section unless it can | 91 | * this assumes we are in an SRCU read-side critical section unless it can |
90 | * prove otherwise. | 92 | * prove otherwise. |
91 | */ | 93 | */ |
diff --git a/include/linux/types.h b/include/linux/types.h index c42724f8c802..23d237a075e2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -188,12 +188,12 @@ typedef u32 phys_addr_t; | |||
188 | typedef phys_addr_t resource_size_t; | 188 | typedef phys_addr_t resource_size_t; |
189 | 189 | ||
190 | typedef struct { | 190 | typedef struct { |
191 | volatile int counter; | 191 | int counter; |
192 | } atomic_t; | 192 | } atomic_t; |
193 | 193 | ||
194 | #ifdef CONFIG_64BIT | 194 | #ifdef CONFIG_64BIT |
195 | typedef struct { | 195 | typedef struct { |
196 | volatile long counter; | 196 | long counter; |
197 | } atomic64_t; | 197 | } atomic64_t; |
198 | #endif | 198 | #endif |
199 | 199 | ||
diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 913bfc226dda..7bf9db525e9e 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h | |||
@@ -38,8 +38,6 @@ | |||
38 | typedef __u32 zorro_id; | 38 | typedef __u32 zorro_id; |
39 | 39 | ||
40 | 40 | ||
41 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
42 | |||
43 | /* Include the ID list */ | 41 | /* Include the ID list */ |
44 | #include <linux/zorro_ids.h> | 42 | #include <linux/zorro_ids.h> |
45 | 43 | ||
@@ -116,6 +114,7 @@ struct ConfigDev { | |||
116 | 114 | ||
117 | #include <linux/init.h> | 115 | #include <linux/init.h> |
118 | #include <linux/ioport.h> | 116 | #include <linux/ioport.h> |
117 | #include <linux/mod_devicetable.h> | ||
119 | 118 | ||
120 | #include <asm/zorro.h> | 119 | #include <asm/zorro.h> |
121 | 120 | ||
@@ -142,29 +141,10 @@ struct zorro_dev { | |||
142 | * Zorro bus | 141 | * Zorro bus |
143 | */ | 142 | */ |
144 | 143 | ||
145 | struct zorro_bus { | ||
146 | struct list_head devices; /* list of devices on this bus */ | ||
147 | unsigned int num_resources; /* number of resources */ | ||
148 | struct resource resources[4]; /* address space routed to this bus */ | ||
149 | struct device dev; | ||
150 | char name[10]; | ||
151 | }; | ||
152 | |||
153 | extern struct zorro_bus zorro_bus; /* single Zorro bus */ | ||
154 | extern struct bus_type zorro_bus_type; | 144 | extern struct bus_type zorro_bus_type; |
155 | 145 | ||
156 | 146 | ||
157 | /* | 147 | /* |
158 | * Zorro device IDs | ||
159 | */ | ||
160 | |||
161 | struct zorro_device_id { | ||
162 | zorro_id id; /* Device ID or ZORRO_WILDCARD */ | ||
163 | unsigned long driver_data; /* Data private to the driver */ | ||
164 | }; | ||
165 | |||
166 | |||
167 | /* | ||
168 | * Zorro device drivers | 148 | * Zorro device drivers |
169 | */ | 149 | */ |
170 | 150 | ||
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index b9da1f5591e7..4aeff96ff7d8 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h | |||
@@ -188,7 +188,6 @@ void saa7146_buffer_timeout(unsigned long data); | |||
188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, | 188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, |
189 | struct saa7146_buf *buf); | 189 | struct saa7146_buf *buf); |
190 | 190 | ||
191 | int saa7146_vv_devinit(struct saa7146_dev *dev); | ||
192 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); | 191 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); |
193 | int saa7146_vv_release(struct saa7146_dev* dev); | 192 | int saa7146_vv_release(struct saa7146_dev* dev); |
194 | 193 | ||
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 851c813adb3a..61d73e37d543 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -279,6 +279,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | |||
279 | /* 2nd level prototypes */ | 279 | /* 2nd level prototypes */ |
280 | void sctp_generate_t3_rtx_event(unsigned long peer); | 280 | void sctp_generate_t3_rtx_event(unsigned long peer); |
281 | void sctp_generate_heartbeat_event(unsigned long peer); | 281 | void sctp_generate_heartbeat_event(unsigned long peer); |
282 | void sctp_generate_proto_unreach_event(unsigned long peer); | ||
282 | 283 | ||
283 | void sctp_ootb_pkt_free(struct sctp_packet *); | 284 | void sctp_ootb_pkt_free(struct sctp_packet *); |
284 | 285 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 597f8e27aaf6..219043a67bf7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1010,6 +1010,9 @@ struct sctp_transport { | |||
1010 | /* Heartbeat timer is per destination. */ | 1010 | /* Heartbeat timer is per destination. */ |
1011 | struct timer_list hb_timer; | 1011 | struct timer_list hb_timer; |
1012 | 1012 | ||
1013 | /* Timer to handle ICMP proto unreachable envets */ | ||
1014 | struct timer_list proto_unreach_timer; | ||
1015 | |||
1013 | /* Since we're using per-destination retransmission timers | 1016 | /* Since we're using per-destination retransmission timers |
1014 | * (see above), we're also using per-destination "transmitted" | 1017 | * (see above), we're also using per-destination "transmitted" |
1015 | * queues. This probably ought to be a private struct | 1018 | * queues. This probably ought to be a private struct |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 75be5a28815d..aa04b9a5093b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1197,30 +1197,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, | |||
1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); | 1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); |
1198 | extern void tcp_free_md5sig_pool(void); | 1198 | extern void tcp_free_md5sig_pool(void); |
1199 | 1199 | ||
1200 | extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); | 1200 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); |
1201 | extern void __tcp_put_md5sig_pool(void); | 1201 | extern void tcp_put_md5sig_pool(void); |
1202 | |||
1202 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 1203 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); |
1203 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 1204 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, |
1204 | unsigned header_len); | 1205 | unsigned header_len); |
1205 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 1206 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, |
1206 | struct tcp_md5sig_key *key); | 1207 | struct tcp_md5sig_key *key); |
1207 | 1208 | ||
1208 | static inline | ||
1209 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | ||
1210 | { | ||
1211 | int cpu = get_cpu(); | ||
1212 | struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | ||
1213 | if (!ret) | ||
1214 | put_cpu(); | ||
1215 | return ret; | ||
1216 | } | ||
1217 | |||
1218 | static inline void tcp_put_md5sig_pool(void) | ||
1219 | { | ||
1220 | __tcp_put_md5sig_pool(); | ||
1221 | put_cpu(); | ||
1222 | } | ||
1223 | |||
1224 | /* write queue abstraction */ | 1209 | /* write queue abstraction */ |
1225 | static inline void tcp_write_queue_purge(struct sock *sk) | 1210 | static inline void tcp_write_queue_purge(struct sock *sk) |
1226 | { | 1211 | { |
diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 4b0f48ba16a6..c7bb2f0482fe 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h | |||
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free, | |||
51 | TP_printk("%s", __get_str(name)) | 51 | TP_printk("%s", __get_str(name)) |
52 | ); | 52 | ); |
53 | 53 | ||
54 | #ifdef CONFIG_MODULE_UNLOAD | ||
55 | /* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ | ||
56 | |||
54 | DECLARE_EVENT_CLASS(module_refcnt, | 57 | DECLARE_EVENT_CLASS(module_refcnt, |
55 | 58 | ||
56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 59 | TP_PROTO(struct module *mod, unsigned long ip), |
57 | 60 | ||
58 | TP_ARGS(mod, ip, refcnt), | 61 | TP_ARGS(mod, ip), |
59 | 62 | ||
60 | TP_STRUCT__entry( | 63 | TP_STRUCT__entry( |
61 | __field( unsigned long, ip ) | 64 | __field( unsigned long, ip ) |
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
65 | 68 | ||
66 | TP_fast_assign( | 69 | TP_fast_assign( |
67 | __entry->ip = ip; | 70 | __entry->ip = ip; |
68 | __entry->refcnt = refcnt; | 71 | __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); |
69 | __assign_str(name, mod->name); | 72 | __assign_str(name, mod->name); |
70 | ), | 73 | ), |
71 | 74 | ||
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
75 | 78 | ||
76 | DEFINE_EVENT(module_refcnt, module_get, | 79 | DEFINE_EVENT(module_refcnt, module_get, |
77 | 80 | ||
78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 81 | TP_PROTO(struct module *mod, unsigned long ip), |
79 | 82 | ||
80 | TP_ARGS(mod, ip, refcnt) | 83 | TP_ARGS(mod, ip) |
81 | ); | 84 | ); |
82 | 85 | ||
83 | DEFINE_EVENT(module_refcnt, module_put, | 86 | DEFINE_EVENT(module_refcnt, module_put, |
84 | 87 | ||
85 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 88 | TP_PROTO(struct module *mod, unsigned long ip), |
86 | 89 | ||
87 | TP_ARGS(mod, ip, refcnt) | 90 | TP_ARGS(mod, ip) |
88 | ); | 91 | ); |
92 | #endif /* CONFIG_MODULE_UNLOAD */ | ||
89 | 93 | ||
90 | TRACE_EVENT(module_request, | 94 | TRACE_EVENT(module_request, |
91 | 95 | ||
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a510b75ac304..814566c99d29 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h | |||
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver, | |||
100 | __entry->sa_handler, __entry->sa_flags) | 100 | __entry->sa_handler, __entry->sa_flags) |
101 | ); | 101 | ); |
102 | 102 | ||
103 | /** | 103 | DECLARE_EVENT_CLASS(signal_queue_overflow, |
104 | * signal_overflow_fail - called when signal queue is overflow | ||
105 | * @sig: signal number | ||
106 | * @group: signal to process group or not (bool) | ||
107 | * @info: pointer to struct siginfo | ||
108 | * | ||
109 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
110 | * siginfo queue is overflow, and the signal is dropped. | ||
111 | * 'group' is not 0 if the signal will be sent to a process group. | ||
112 | * 'sig' is always one of RT signals. | ||
113 | */ | ||
114 | TRACE_EVENT(signal_overflow_fail, | ||
115 | 104 | ||
116 | TP_PROTO(int sig, int group, struct siginfo *info), | 105 | TP_PROTO(int sig, int group, struct siginfo *info), |
117 | 106 | ||
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail, | |||
135 | ); | 124 | ); |
136 | 125 | ||
137 | /** | 126 | /** |
127 | * signal_overflow_fail - called when signal queue is overflow | ||
128 | * @sig: signal number | ||
129 | * @group: signal to process group or not (bool) | ||
130 | * @info: pointer to struct siginfo | ||
131 | * | ||
132 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
133 | * siginfo queue is overflow, and the signal is dropped. | ||
134 | * 'group' is not 0 if the signal will be sent to a process group. | ||
135 | * 'sig' is always one of RT signals. | ||
136 | */ | ||
137 | DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, | ||
138 | |||
139 | TP_PROTO(int sig, int group, struct siginfo *info), | ||
140 | |||
141 | TP_ARGS(sig, group, info) | ||
142 | ); | ||
143 | |||
144 | /** | ||
138 | * signal_lose_info - called when siginfo is lost | 145 | * signal_lose_info - called when siginfo is lost |
139 | * @sig: signal number | 146 | * @sig: signal number |
140 | * @group: signal to process group or not (bool) | 147 | * @group: signal to process group or not (bool) |
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail, | |||
145 | * 'group' is not 0 if the signal will be sent to a process group. | 152 | * 'group' is not 0 if the signal will be sent to a process group. |
146 | * 'sig' is always one of non-RT signals. | 153 | * 'sig' is always one of non-RT signals. |
147 | */ | 154 | */ |
148 | TRACE_EVENT(signal_lose_info, | 155 | DEFINE_EVENT(signal_queue_overflow, signal_lose_info, |
149 | 156 | ||
150 | TP_PROTO(int sig, int group, struct siginfo *info), | 157 | TP_PROTO(int sig, int group, struct siginfo *info), |
151 | 158 | ||
152 | TP_ARGS(sig, group, info), | 159 | TP_ARGS(sig, group, info) |
153 | |||
154 | TP_STRUCT__entry( | ||
155 | __field( int, sig ) | ||
156 | __field( int, group ) | ||
157 | __field( int, errno ) | ||
158 | __field( int, code ) | ||
159 | ), | ||
160 | |||
161 | TP_fast_assign( | ||
162 | __entry->sig = sig; | ||
163 | __entry->group = group; | ||
164 | TP_STORE_SIGINFO(__entry, info); | ||
165 | ), | ||
166 | |||
167 | TP_printk("sig=%d group=%d errno=%d code=%d", | ||
168 | __entry->sig, __entry->group, __entry->errno, __entry->code) | ||
169 | ); | 160 | ); |
161 | |||
170 | #endif /* _TRACE_SIGNAL_H */ | 162 | #endif /* _TRACE_SIGNAL_H */ |
171 | 163 | ||
172 | /* This part must be outside protection */ | 164 | /* This part must be outside protection */ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 882c64832ffe..16253db38d73 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -154,9 +154,11 @@ | |||
154 | * | 154 | * |
155 | * field = (typeof(field))entry; | 155 | * field = (typeof(field))entry; |
156 | * | 156 | * |
157 | * p = get_cpu_var(ftrace_event_seq); | 157 | * p = &get_cpu_var(ftrace_event_seq); |
158 | * trace_seq_init(p); | 158 | * trace_seq_init(p); |
159 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 159 | * ret = trace_seq_printf(s, "%s: ", <call>); |
160 | * if (ret) | ||
161 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
160 | * put_cpu(); | 162 | * put_cpu(); |
161 | * if (!ret) | 163 | * if (!ret) |
162 | * return TRACE_TYPE_PARTIAL_LINE; | 164 | * return TRACE_TYPE_PARTIAL_LINE; |
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
450 | * | 452 | * |
451 | * static void ftrace_raw_event_<call>(proto) | 453 | * static void ftrace_raw_event_<call>(proto) |
452 | * { | 454 | * { |
455 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
453 | * struct ring_buffer_event *event; | 456 | * struct ring_buffer_event *event; |
454 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 457 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
455 | * struct ring_buffer *buffer; | 458 | * struct ring_buffer *buffer; |
456 | * unsigned long irq_flags; | 459 | * unsigned long irq_flags; |
460 | * int __data_size; | ||
457 | * int pc; | 461 | * int pc; |
458 | * | 462 | * |
459 | * local_save_flags(irq_flags); | 463 | * local_save_flags(irq_flags); |
460 | * pc = preempt_count(); | 464 | * pc = preempt_count(); |
461 | * | 465 | * |
466 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
467 | * | ||
462 | * event = trace_current_buffer_lock_reserve(&buffer, | 468 | * event = trace_current_buffer_lock_reserve(&buffer, |
463 | * event_<call>.id, | 469 | * event_<call>.id, |
464 | * sizeof(struct ftrace_raw_<call>), | 470 | * sizeof(*entry) + __data_size, |
465 | * irq_flags, pc); | 471 | * irq_flags, pc); |
466 | * if (!event) | 472 | * if (!event) |
467 | * return; | 473 | * return; |
468 | * entry = ring_buffer_event_data(event); | 474 | * entry = ring_buffer_event_data(event); |
469 | * | 475 | * |
470 | * <assign>; <-- Here we assign the entries by the __field and | 476 | * { <assign>; } <-- Here we assign the entries by the __field and |
471 | * __array macros. | 477 | * __array macros. |
472 | * | 478 | * |
473 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 479 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) |
480 | * trace_current_buffer_unlock_commit(buffer, | ||
481 | * event, irq_flags, pc); | ||
474 | * } | 482 | * } |
475 | * | 483 | * |
476 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | 484 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) |
477 | * { | 485 | * { |
478 | * int ret; | 486 | * return register_trace_<call>(ftrace_raw_event_<call>); |
479 | * | ||
480 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
481 | * if (!ret) | ||
482 | * pr_info("event trace: Could not activate trace point " | ||
483 | * "probe to <call>"); | ||
484 | * return ret; | ||
485 | * } | 487 | * } |
486 | * | 488 | * |
487 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 489 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
493 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 495 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
494 | * }; | 496 | * }; |
495 | * | 497 | * |
498 | * static const char print_fmt_<call>[] = <TP_printk>; | ||
499 | * | ||
496 | * static struct ftrace_event_call __used | 500 | * static struct ftrace_event_call __used |
497 | * __attribute__((__aligned__(4))) | 501 | * __attribute__((__aligned__(4))) |
498 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 502 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
501 | * .raw_init = trace_event_raw_init, | 505 | * .raw_init = trace_event_raw_init, |
502 | * .regfunc = ftrace_reg_event_<call>, | 506 | * .regfunc = ftrace_reg_event_<call>, |
503 | * .unregfunc = ftrace_unreg_event_<call>, | 507 | * .unregfunc = ftrace_unreg_event_<call>, |
508 | * .print_fmt = print_fmt_<call>, | ||
509 | * .define_fields = ftrace_define_fields_<call>, | ||
504 | * } | 510 | * } |
505 | * | 511 | * |
506 | */ | 512 | */ |
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
569 | return; \ | 575 | return; \ |
570 | entry = ring_buffer_event_data(event); \ | 576 | entry = ring_buffer_event_data(event); \ |
571 | \ | 577 | \ |
572 | \ | ||
573 | tstruct \ | 578 | tstruct \ |
574 | \ | 579 | \ |
575 | { assign; } \ | 580 | { assign; } \ |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 722b0130aa94..59a009dc54a8 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -158,7 +158,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, | |||
158 | u->mq_bytes + mq_bytes > | 158 | u->mq_bytes + mq_bytes > |
159 | task_rlimit(p, RLIMIT_MSGQUEUE)) { | 159 | task_rlimit(p, RLIMIT_MSGQUEUE)) { |
160 | spin_unlock(&mq_lock); | 160 | spin_unlock(&mq_lock); |
161 | kfree(info->messages); | 161 | /* mqueue_delete_inode() releases info->messages */ |
162 | goto out_inode; | 162 | goto out_inode; |
163 | } | 163 | } |
164 | u->mq_bytes += mq_bytes; | 164 | u->mq_bytes += mq_bytes; |
diff --git a/kernel/acct.c b/kernel/acct.c index 24f8c81fc48d..e4c0e1fee9b0 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -353,17 +353,18 @@ restart: | |||
353 | 353 | ||
354 | void acct_exit_ns(struct pid_namespace *ns) | 354 | void acct_exit_ns(struct pid_namespace *ns) |
355 | { | 355 | { |
356 | struct bsd_acct_struct *acct; | 356 | struct bsd_acct_struct *acct = ns->bacct; |
357 | 357 | ||
358 | spin_lock(&acct_lock); | 358 | if (acct == NULL) |
359 | acct = ns->bacct; | 359 | return; |
360 | if (acct != NULL) { | ||
361 | if (acct->file != NULL) | ||
362 | acct_file_reopen(acct, NULL, NULL); | ||
363 | 360 | ||
364 | kfree(acct); | 361 | del_timer_sync(&acct->timer); |
365 | } | 362 | spin_lock(&acct_lock); |
363 | if (acct->file != NULL) | ||
364 | acct_file_reopen(acct, NULL, NULL); | ||
366 | spin_unlock(&acct_lock); | 365 | spin_unlock(&acct_lock); |
366 | |||
367 | kfree(acct); | ||
367 | } | 368 | } |
368 | 369 | ||
369 | /* | 370 | /* |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2769e13980c..6d870f2d1228 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1646,7 +1646,9 @@ static inline struct cftype *__d_cft(struct dentry *dentry) | |||
1646 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | 1646 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) |
1647 | { | 1647 | { |
1648 | char *start; | 1648 | char *start; |
1649 | struct dentry *dentry = rcu_dereference(cgrp->dentry); | 1649 | struct dentry *dentry = rcu_dereference_check(cgrp->dentry, |
1650 | rcu_read_lock_held() || | ||
1651 | cgroup_lock_is_held()); | ||
1650 | 1652 | ||
1651 | if (!dentry || cgrp == dummytop) { | 1653 | if (!dentry || cgrp == dummytop) { |
1652 | /* | 1654 | /* |
@@ -1662,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | |||
1662 | *--start = '\0'; | 1664 | *--start = '\0'; |
1663 | for (;;) { | 1665 | for (;;) { |
1664 | int len = dentry->d_name.len; | 1666 | int len = dentry->d_name.len; |
1667 | |||
1665 | if ((start -= len) < buf) | 1668 | if ((start -= len) < buf) |
1666 | return -ENAMETOOLONG; | 1669 | return -ENAMETOOLONG; |
1667 | memcpy(start, cgrp->dentry->d_name.name, len); | 1670 | memcpy(start, dentry->d_name.name, len); |
1668 | cgrp = cgrp->parent; | 1671 | cgrp = cgrp->parent; |
1669 | if (!cgrp) | 1672 | if (!cgrp) |
1670 | break; | 1673 | break; |
1671 | dentry = rcu_dereference(cgrp->dentry); | 1674 | |
1675 | dentry = rcu_dereference_check(cgrp->dentry, | ||
1676 | rcu_read_lock_held() || | ||
1677 | cgroup_lock_is_held()); | ||
1672 | if (!cgrp->parent) | 1678 | if (!cgrp->parent) |
1673 | continue; | 1679 | continue; |
1674 | if (--start < buf) | 1680 | if (--start < buf) |
@@ -4429,7 +4435,15 @@ __setup("cgroup_disable=", cgroup_disable); | |||
4429 | */ | 4435 | */ |
4430 | unsigned short css_id(struct cgroup_subsys_state *css) | 4436 | unsigned short css_id(struct cgroup_subsys_state *css) |
4431 | { | 4437 | { |
4432 | struct css_id *cssid = rcu_dereference(css->id); | 4438 | struct css_id *cssid; |
4439 | |||
4440 | /* | ||
4441 | * This css_id() can return correct value when somone has refcnt | ||
4442 | * on this or this is under rcu_read_lock(). Once css->id is allocated, | ||
4443 | * it's unchanged until freed. | ||
4444 | */ | ||
4445 | cssid = rcu_dereference_check(css->id, | ||
4446 | rcu_read_lock_held() || atomic_read(&css->refcnt)); | ||
4433 | 4447 | ||
4434 | if (cssid) | 4448 | if (cssid) |
4435 | return cssid->id; | 4449 | return cssid->id; |
@@ -4439,7 +4453,10 @@ EXPORT_SYMBOL_GPL(css_id); | |||
4439 | 4453 | ||
4440 | unsigned short css_depth(struct cgroup_subsys_state *css) | 4454 | unsigned short css_depth(struct cgroup_subsys_state *css) |
4441 | { | 4455 | { |
4442 | struct css_id *cssid = rcu_dereference(css->id); | 4456 | struct css_id *cssid; |
4457 | |||
4458 | cssid = rcu_dereference_check(css->id, | ||
4459 | rcu_read_lock_held() || atomic_read(&css->refcnt)); | ||
4443 | 4460 | ||
4444 | if (cssid) | 4461 | if (cssid) |
4445 | return cssid->depth; | 4462 | return cssid->depth; |
@@ -4447,15 +4464,36 @@ unsigned short css_depth(struct cgroup_subsys_state *css) | |||
4447 | } | 4464 | } |
4448 | EXPORT_SYMBOL_GPL(css_depth); | 4465 | EXPORT_SYMBOL_GPL(css_depth); |
4449 | 4466 | ||
4467 | /** | ||
4468 | * css_is_ancestor - test "root" css is an ancestor of "child" | ||
4469 | * @child: the css to be tested. | ||
4470 | * @root: the css supporsed to be an ancestor of the child. | ||
4471 | * | ||
4472 | * Returns true if "root" is an ancestor of "child" in its hierarchy. Because | ||
4473 | * this function reads css->id, this use rcu_dereference() and rcu_read_lock(). | ||
4474 | * But, considering usual usage, the csses should be valid objects after test. | ||
4475 | * Assuming that the caller will do some action to the child if this returns | ||
4476 | * returns true, the caller must take "child";s reference count. | ||
4477 | * If "child" is valid object and this returns true, "root" is valid, too. | ||
4478 | */ | ||
4479 | |||
4450 | bool css_is_ancestor(struct cgroup_subsys_state *child, | 4480 | bool css_is_ancestor(struct cgroup_subsys_state *child, |
4451 | const struct cgroup_subsys_state *root) | 4481 | const struct cgroup_subsys_state *root) |
4452 | { | 4482 | { |
4453 | struct css_id *child_id = rcu_dereference(child->id); | 4483 | struct css_id *child_id; |
4454 | struct css_id *root_id = rcu_dereference(root->id); | 4484 | struct css_id *root_id; |
4485 | bool ret = true; | ||
4455 | 4486 | ||
4456 | if (!child_id || !root_id || (child_id->depth < root_id->depth)) | 4487 | rcu_read_lock(); |
4457 | return false; | 4488 | child_id = rcu_dereference(child->id); |
4458 | return child_id->stack[root_id->depth] == root_id->id; | 4489 | root_id = rcu_dereference(root->id); |
4490 | if (!child_id | ||
4491 | || !root_id | ||
4492 | || (child_id->depth < root_id->depth) | ||
4493 | || (child_id->stack[root_id->depth] != root_id->id)) | ||
4494 | ret = false; | ||
4495 | rcu_read_unlock(); | ||
4496 | return ret; | ||
4459 | } | 4497 | } |
4460 | 4498 | ||
4461 | static void __free_css_id_cb(struct rcu_head *head) | 4499 | static void __free_css_id_cb(struct rcu_head *head) |
@@ -4555,13 +4593,13 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent, | |||
4555 | { | 4593 | { |
4556 | int subsys_id, i, depth = 0; | 4594 | int subsys_id, i, depth = 0; |
4557 | struct cgroup_subsys_state *parent_css, *child_css; | 4595 | struct cgroup_subsys_state *parent_css, *child_css; |
4558 | struct css_id *child_id, *parent_id = NULL; | 4596 | struct css_id *child_id, *parent_id; |
4559 | 4597 | ||
4560 | subsys_id = ss->subsys_id; | 4598 | subsys_id = ss->subsys_id; |
4561 | parent_css = parent->subsys[subsys_id]; | 4599 | parent_css = parent->subsys[subsys_id]; |
4562 | child_css = child->subsys[subsys_id]; | 4600 | child_css = child->subsys[subsys_id]; |
4563 | depth = css_depth(parent_css) + 1; | ||
4564 | parent_id = parent_css->id; | 4601 | parent_id = parent_css->id; |
4602 | depth = parent_id->depth; | ||
4565 | 4603 | ||
4566 | child_id = get_new_cssid(ss, depth); | 4604 | child_id = get_new_cssid(ss, depth); |
4567 | if (IS_ERR(child_id)) | 4605 | if (IS_ERR(child_id)) |
diff --git a/kernel/fork.c b/kernel/fork.c index 5d3592deaf71..4d57d9e3a6e9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1111,7 +1111,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1111 | p->memcg_batch.do_batch = 0; | 1111 | p->memcg_batch.do_batch = 0; |
1112 | p->memcg_batch.memcg = NULL; | 1112 | p->memcg_batch.memcg = NULL; |
1113 | #endif | 1113 | #endif |
1114 | p->stack_start = stack_start; | ||
1115 | 1114 | ||
1116 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1115 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1117 | sched_fork(p, clone_flags); | 1116 | sched_fork(p, clone_flags); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 87ebe8adc474..474a84715eac 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1134,11 +1134,9 @@ int crash_shrink_memory(unsigned long new_size) | |||
1134 | 1134 | ||
1135 | free_reserved_phys_range(end, crashk_res.end); | 1135 | free_reserved_phys_range(end, crashk_res.end); |
1136 | 1136 | ||
1137 | if (start == end) { | 1137 | if (start == end) |
1138 | crashk_res.end = end; | ||
1139 | release_resource(&crashk_res); | 1138 | release_resource(&crashk_res); |
1140 | } else | 1139 | crashk_res.end = end - 1; |
1141 | crashk_res.end = end - 1; | ||
1142 | 1140 | ||
1143 | unlock: | 1141 | unlock: |
1144 | mutex_unlock(&kexec_mutex); | 1142 | mutex_unlock(&kexec_mutex); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e9c759f06c1d..ec21304856d1 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -431,20 +431,7 @@ static struct stack_trace lockdep_init_trace = { | |||
431 | /* | 431 | /* |
432 | * Various lockdep statistics: | 432 | * Various lockdep statistics: |
433 | */ | 433 | */ |
434 | atomic_t chain_lookup_hits; | 434 | DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); |
435 | atomic_t chain_lookup_misses; | ||
436 | atomic_t hardirqs_on_events; | ||
437 | atomic_t hardirqs_off_events; | ||
438 | atomic_t redundant_hardirqs_on; | ||
439 | atomic_t redundant_hardirqs_off; | ||
440 | atomic_t softirqs_on_events; | ||
441 | atomic_t softirqs_off_events; | ||
442 | atomic_t redundant_softirqs_on; | ||
443 | atomic_t redundant_softirqs_off; | ||
444 | atomic_t nr_unused_locks; | ||
445 | atomic_t nr_cyclic_checks; | ||
446 | atomic_t nr_find_usage_forwards_checks; | ||
447 | atomic_t nr_find_usage_backwards_checks; | ||
448 | #endif | 435 | #endif |
449 | 436 | ||
450 | /* | 437 | /* |
@@ -748,7 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
748 | return NULL; | 735 | return NULL; |
749 | } | 736 | } |
750 | class = lock_classes + nr_lock_classes++; | 737 | class = lock_classes + nr_lock_classes++; |
751 | debug_atomic_inc(&nr_unused_locks); | 738 | debug_atomic_inc(nr_unused_locks); |
752 | class->key = key; | 739 | class->key = key; |
753 | class->name = lock->name; | 740 | class->name = lock->name; |
754 | class->subclass = subclass; | 741 | class->subclass = subclass; |
@@ -818,7 +805,8 @@ static struct lock_list *alloc_list_entry(void) | |||
818 | * Add a new dependency to the head of the list: | 805 | * Add a new dependency to the head of the list: |
819 | */ | 806 | */ |
820 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | 807 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, |
821 | struct list_head *head, unsigned long ip, int distance) | 808 | struct list_head *head, unsigned long ip, |
809 | int distance, struct stack_trace *trace) | ||
822 | { | 810 | { |
823 | struct lock_list *entry; | 811 | struct lock_list *entry; |
824 | /* | 812 | /* |
@@ -829,11 +817,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
829 | if (!entry) | 817 | if (!entry) |
830 | return 0; | 818 | return 0; |
831 | 819 | ||
832 | if (!save_trace(&entry->trace)) | ||
833 | return 0; | ||
834 | |||
835 | entry->class = this; | 820 | entry->class = this; |
836 | entry->distance = distance; | 821 | entry->distance = distance; |
822 | entry->trace = *trace; | ||
837 | /* | 823 | /* |
838 | * Since we never remove from the dependency list, the list can | 824 | * Since we never remove from the dependency list, the list can |
839 | * be walked lockless by other CPUs, it's only allocation | 825 | * be walked lockless by other CPUs, it's only allocation |
@@ -1205,7 +1191,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target, | |||
1205 | { | 1191 | { |
1206 | int result; | 1192 | int result; |
1207 | 1193 | ||
1208 | debug_atomic_inc(&nr_cyclic_checks); | 1194 | debug_atomic_inc(nr_cyclic_checks); |
1209 | 1195 | ||
1210 | result = __bfs_forwards(root, target, class_equal, target_entry); | 1196 | result = __bfs_forwards(root, target, class_equal, target_entry); |
1211 | 1197 | ||
@@ -1242,7 +1228,7 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, | |||
1242 | { | 1228 | { |
1243 | int result; | 1229 | int result; |
1244 | 1230 | ||
1245 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 1231 | debug_atomic_inc(nr_find_usage_forwards_checks); |
1246 | 1232 | ||
1247 | result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); | 1233 | result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); |
1248 | 1234 | ||
@@ -1265,7 +1251,7 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, | |||
1265 | { | 1251 | { |
1266 | int result; | 1252 | int result; |
1267 | 1253 | ||
1268 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 1254 | debug_atomic_inc(nr_find_usage_backwards_checks); |
1269 | 1255 | ||
1270 | result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); | 1256 | result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); |
1271 | 1257 | ||
@@ -1635,12 +1621,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1635 | */ | 1621 | */ |
1636 | static int | 1622 | static int |
1637 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | 1623 | check_prev_add(struct task_struct *curr, struct held_lock *prev, |
1638 | struct held_lock *next, int distance) | 1624 | struct held_lock *next, int distance, int trylock_loop) |
1639 | { | 1625 | { |
1640 | struct lock_list *entry; | 1626 | struct lock_list *entry; |
1641 | int ret; | 1627 | int ret; |
1642 | struct lock_list this; | 1628 | struct lock_list this; |
1643 | struct lock_list *uninitialized_var(target_entry); | 1629 | struct lock_list *uninitialized_var(target_entry); |
1630 | /* | ||
1631 | * Static variable, serialized by the graph_lock(). | ||
1632 | * | ||
1633 | * We use this static variable to save the stack trace in case | ||
1634 | * we call into this function multiple times due to encountering | ||
1635 | * trylocks in the held lock stack. | ||
1636 | */ | ||
1637 | static struct stack_trace trace; | ||
1644 | 1638 | ||
1645 | /* | 1639 | /* |
1646 | * Prove that the new <prev> -> <next> dependency would not | 1640 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1688,20 +1682,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1688 | } | 1682 | } |
1689 | } | 1683 | } |
1690 | 1684 | ||
1685 | if (!trylock_loop && !save_trace(&trace)) | ||
1686 | return 0; | ||
1687 | |||
1691 | /* | 1688 | /* |
1692 | * Ok, all validations passed, add the new lock | 1689 | * Ok, all validations passed, add the new lock |
1693 | * to the previous lock's dependency list: | 1690 | * to the previous lock's dependency list: |
1694 | */ | 1691 | */ |
1695 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), | 1692 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
1696 | &hlock_class(prev)->locks_after, | 1693 | &hlock_class(prev)->locks_after, |
1697 | next->acquire_ip, distance); | 1694 | next->acquire_ip, distance, &trace); |
1698 | 1695 | ||
1699 | if (!ret) | 1696 | if (!ret) |
1700 | return 0; | 1697 | return 0; |
1701 | 1698 | ||
1702 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), | 1699 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
1703 | &hlock_class(next)->locks_before, | 1700 | &hlock_class(next)->locks_before, |
1704 | next->acquire_ip, distance); | 1701 | next->acquire_ip, distance, &trace); |
1705 | if (!ret) | 1702 | if (!ret) |
1706 | return 0; | 1703 | return 0; |
1707 | 1704 | ||
@@ -1731,6 +1728,7 @@ static int | |||
1731 | check_prevs_add(struct task_struct *curr, struct held_lock *next) | 1728 | check_prevs_add(struct task_struct *curr, struct held_lock *next) |
1732 | { | 1729 | { |
1733 | int depth = curr->lockdep_depth; | 1730 | int depth = curr->lockdep_depth; |
1731 | int trylock_loop = 0; | ||
1734 | struct held_lock *hlock; | 1732 | struct held_lock *hlock; |
1735 | 1733 | ||
1736 | /* | 1734 | /* |
@@ -1756,7 +1754,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1756 | * added: | 1754 | * added: |
1757 | */ | 1755 | */ |
1758 | if (hlock->read != 2) { | 1756 | if (hlock->read != 2) { |
1759 | if (!check_prev_add(curr, hlock, next, distance)) | 1757 | if (!check_prev_add(curr, hlock, next, |
1758 | distance, trylock_loop)) | ||
1760 | return 0; | 1759 | return 0; |
1761 | /* | 1760 | /* |
1762 | * Stop after the first non-trylock entry, | 1761 | * Stop after the first non-trylock entry, |
@@ -1779,6 +1778,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1779 | if (curr->held_locks[depth].irq_context != | 1778 | if (curr->held_locks[depth].irq_context != |
1780 | curr->held_locks[depth-1].irq_context) | 1779 | curr->held_locks[depth-1].irq_context) |
1781 | break; | 1780 | break; |
1781 | trylock_loop = 1; | ||
1782 | } | 1782 | } |
1783 | return 1; | 1783 | return 1; |
1784 | out_bug: | 1784 | out_bug: |
@@ -1825,7 +1825,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1825 | list_for_each_entry(chain, hash_head, entry) { | 1825 | list_for_each_entry(chain, hash_head, entry) { |
1826 | if (chain->chain_key == chain_key) { | 1826 | if (chain->chain_key == chain_key) { |
1827 | cache_hit: | 1827 | cache_hit: |
1828 | debug_atomic_inc(&chain_lookup_hits); | 1828 | debug_atomic_inc(chain_lookup_hits); |
1829 | if (very_verbose(class)) | 1829 | if (very_verbose(class)) |
1830 | printk("\nhash chain already cached, key: " | 1830 | printk("\nhash chain already cached, key: " |
1831 | "%016Lx tail class: [%p] %s\n", | 1831 | "%016Lx tail class: [%p] %s\n", |
@@ -1890,7 +1890,7 @@ cache_hit: | |||
1890 | chain_hlocks[chain->base + j] = class - lock_classes; | 1890 | chain_hlocks[chain->base + j] = class - lock_classes; |
1891 | } | 1891 | } |
1892 | list_add_tail_rcu(&chain->entry, hash_head); | 1892 | list_add_tail_rcu(&chain->entry, hash_head); |
1893 | debug_atomic_inc(&chain_lookup_misses); | 1893 | debug_atomic_inc(chain_lookup_misses); |
1894 | inc_chains(); | 1894 | inc_chains(); |
1895 | 1895 | ||
1896 | return 1; | 1896 | return 1; |
@@ -2311,7 +2311,12 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2311 | return; | 2311 | return; |
2312 | 2312 | ||
2313 | if (unlikely(curr->hardirqs_enabled)) { | 2313 | if (unlikely(curr->hardirqs_enabled)) { |
2314 | debug_atomic_inc(&redundant_hardirqs_on); | 2314 | /* |
2315 | * Neither irq nor preemption are disabled here | ||
2316 | * so this is racy by nature but loosing one hit | ||
2317 | * in a stat is not a big deal. | ||
2318 | */ | ||
2319 | __debug_atomic_inc(redundant_hardirqs_on); | ||
2315 | return; | 2320 | return; |
2316 | } | 2321 | } |
2317 | /* we'll do an OFF -> ON transition: */ | 2322 | /* we'll do an OFF -> ON transition: */ |
@@ -2338,7 +2343,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2338 | 2343 | ||
2339 | curr->hardirq_enable_ip = ip; | 2344 | curr->hardirq_enable_ip = ip; |
2340 | curr->hardirq_enable_event = ++curr->irq_events; | 2345 | curr->hardirq_enable_event = ++curr->irq_events; |
2341 | debug_atomic_inc(&hardirqs_on_events); | 2346 | debug_atomic_inc(hardirqs_on_events); |
2342 | } | 2347 | } |
2343 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 2348 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
2344 | 2349 | ||
@@ -2370,9 +2375,9 @@ void trace_hardirqs_off_caller(unsigned long ip) | |||
2370 | curr->hardirqs_enabled = 0; | 2375 | curr->hardirqs_enabled = 0; |
2371 | curr->hardirq_disable_ip = ip; | 2376 | curr->hardirq_disable_ip = ip; |
2372 | curr->hardirq_disable_event = ++curr->irq_events; | 2377 | curr->hardirq_disable_event = ++curr->irq_events; |
2373 | debug_atomic_inc(&hardirqs_off_events); | 2378 | debug_atomic_inc(hardirqs_off_events); |
2374 | } else | 2379 | } else |
2375 | debug_atomic_inc(&redundant_hardirqs_off); | 2380 | debug_atomic_inc(redundant_hardirqs_off); |
2376 | } | 2381 | } |
2377 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 2382 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
2378 | 2383 | ||
@@ -2396,7 +2401,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2396 | return; | 2401 | return; |
2397 | 2402 | ||
2398 | if (curr->softirqs_enabled) { | 2403 | if (curr->softirqs_enabled) { |
2399 | debug_atomic_inc(&redundant_softirqs_on); | 2404 | debug_atomic_inc(redundant_softirqs_on); |
2400 | return; | 2405 | return; |
2401 | } | 2406 | } |
2402 | 2407 | ||
@@ -2406,7 +2411,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2406 | curr->softirqs_enabled = 1; | 2411 | curr->softirqs_enabled = 1; |
2407 | curr->softirq_enable_ip = ip; | 2412 | curr->softirq_enable_ip = ip; |
2408 | curr->softirq_enable_event = ++curr->irq_events; | 2413 | curr->softirq_enable_event = ++curr->irq_events; |
2409 | debug_atomic_inc(&softirqs_on_events); | 2414 | debug_atomic_inc(softirqs_on_events); |
2410 | /* | 2415 | /* |
2411 | * We are going to turn softirqs on, so set the | 2416 | * We are going to turn softirqs on, so set the |
2412 | * usage bit for all held locks, if hardirqs are | 2417 | * usage bit for all held locks, if hardirqs are |
@@ -2436,10 +2441,10 @@ void trace_softirqs_off(unsigned long ip) | |||
2436 | curr->softirqs_enabled = 0; | 2441 | curr->softirqs_enabled = 0; |
2437 | curr->softirq_disable_ip = ip; | 2442 | curr->softirq_disable_ip = ip; |
2438 | curr->softirq_disable_event = ++curr->irq_events; | 2443 | curr->softirq_disable_event = ++curr->irq_events; |
2439 | debug_atomic_inc(&softirqs_off_events); | 2444 | debug_atomic_inc(softirqs_off_events); |
2440 | DEBUG_LOCKS_WARN_ON(!softirq_count()); | 2445 | DEBUG_LOCKS_WARN_ON(!softirq_count()); |
2441 | } else | 2446 | } else |
2442 | debug_atomic_inc(&redundant_softirqs_off); | 2447 | debug_atomic_inc(redundant_softirqs_off); |
2443 | } | 2448 | } |
2444 | 2449 | ||
2445 | static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) | 2450 | static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) |
@@ -2644,7 +2649,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2644 | return 0; | 2649 | return 0; |
2645 | break; | 2650 | break; |
2646 | case LOCK_USED: | 2651 | case LOCK_USED: |
2647 | debug_atomic_dec(&nr_unused_locks); | 2652 | debug_atomic_dec(nr_unused_locks); |
2648 | break; | 2653 | break; |
2649 | default: | 2654 | default: |
2650 | if (!debug_locks_off_graph_unlock()) | 2655 | if (!debug_locks_off_graph_unlock()) |
@@ -2750,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2750 | if (!class) | 2755 | if (!class) |
2751 | return 0; | 2756 | return 0; |
2752 | } | 2757 | } |
2753 | debug_atomic_inc((atomic_t *)&class->ops); | 2758 | atomic_inc((atomic_t *)&class->ops); |
2754 | if (very_verbose(class)) { | 2759 | if (very_verbose(class)) { |
2755 | printk("\nacquire class [%p] %s", class->key, class->name); | 2760 | printk("\nacquire class [%p] %s", class->key, class->name); |
2756 | if (class->name_version > 1) | 2761 | if (class->name_version > 1) |
@@ -3801,8 +3806,11 @@ void lockdep_rcu_dereference(const char *file, const int line) | |||
3801 | { | 3806 | { |
3802 | struct task_struct *curr = current; | 3807 | struct task_struct *curr = current; |
3803 | 3808 | ||
3809 | #ifndef CONFIG_PROVE_RCU_REPEATEDLY | ||
3804 | if (!debug_locks_off()) | 3810 | if (!debug_locks_off()) |
3805 | return; | 3811 | return; |
3812 | #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ | ||
3813 | /* Note: the following can be executed concurrently, so be careful. */ | ||
3806 | printk("\n===================================================\n"); | 3814 | printk("\n===================================================\n"); |
3807 | printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); | 3815 | printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); |
3808 | printk( "---------------------------------------------------\n"); | 3816 | printk( "---------------------------------------------------\n"); |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index a2ee95ad1313..4f560cfedc8f 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -110,30 +110,60 @@ lockdep_count_backward_deps(struct lock_class *class) | |||
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #ifdef CONFIG_DEBUG_LOCKDEP | 112 | #ifdef CONFIG_DEBUG_LOCKDEP |
113 | |||
114 | #include <asm/local.h> | ||
113 | /* | 115 | /* |
114 | * Various lockdep statistics: | 116 | * Various lockdep statistics. |
117 | * We want them per cpu as they are often accessed in fast path | ||
118 | * and we want to avoid too much cache bouncing. | ||
115 | */ | 119 | */ |
116 | extern atomic_t chain_lookup_hits; | 120 | struct lockdep_stats { |
117 | extern atomic_t chain_lookup_misses; | 121 | int chain_lookup_hits; |
118 | extern atomic_t hardirqs_on_events; | 122 | int chain_lookup_misses; |
119 | extern atomic_t hardirqs_off_events; | 123 | int hardirqs_on_events; |
120 | extern atomic_t redundant_hardirqs_on; | 124 | int hardirqs_off_events; |
121 | extern atomic_t redundant_hardirqs_off; | 125 | int redundant_hardirqs_on; |
122 | extern atomic_t softirqs_on_events; | 126 | int redundant_hardirqs_off; |
123 | extern atomic_t softirqs_off_events; | 127 | int softirqs_on_events; |
124 | extern atomic_t redundant_softirqs_on; | 128 | int softirqs_off_events; |
125 | extern atomic_t redundant_softirqs_off; | 129 | int redundant_softirqs_on; |
126 | extern atomic_t nr_unused_locks; | 130 | int redundant_softirqs_off; |
127 | extern atomic_t nr_cyclic_checks; | 131 | int nr_unused_locks; |
128 | extern atomic_t nr_cyclic_check_recursions; | 132 | int nr_cyclic_checks; |
129 | extern atomic_t nr_find_usage_forwards_checks; | 133 | int nr_cyclic_check_recursions; |
130 | extern atomic_t nr_find_usage_forwards_recursions; | 134 | int nr_find_usage_forwards_checks; |
131 | extern atomic_t nr_find_usage_backwards_checks; | 135 | int nr_find_usage_forwards_recursions; |
132 | extern atomic_t nr_find_usage_backwards_recursions; | 136 | int nr_find_usage_backwards_checks; |
133 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | 137 | int nr_find_usage_backwards_recursions; |
134 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | 138 | }; |
135 | # define debug_atomic_read(ptr) atomic_read(ptr) | 139 | |
140 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | ||
141 | |||
142 | #define __debug_atomic_inc(ptr) \ | ||
143 | this_cpu_inc(lockdep_stats.ptr); | ||
144 | |||
145 | #define debug_atomic_inc(ptr) { \ | ||
146 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
147 | __this_cpu_inc(lockdep_stats.ptr); \ | ||
148 | } | ||
149 | |||
150 | #define debug_atomic_dec(ptr) { \ | ||
151 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
152 | __this_cpu_dec(lockdep_stats.ptr); \ | ||
153 | } | ||
154 | |||
155 | #define debug_atomic_read(ptr) ({ \ | ||
156 | struct lockdep_stats *__cpu_lockdep_stats; \ | ||
157 | unsigned long long __total = 0; \ | ||
158 | int __cpu; \ | ||
159 | for_each_possible_cpu(__cpu) { \ | ||
160 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ | ||
161 | __total += __cpu_lockdep_stats->ptr; \ | ||
162 | } \ | ||
163 | __total; \ | ||
164 | }) | ||
136 | #else | 165 | #else |
166 | # define __debug_atomic_inc(ptr) do { } while (0) | ||
137 | # define debug_atomic_inc(ptr) do { } while (0) | 167 | # define debug_atomic_inc(ptr) do { } while (0) |
138 | # define debug_atomic_dec(ptr) do { } while (0) | 168 | # define debug_atomic_dec(ptr) do { } while (0) |
139 | # define debug_atomic_read(ptr) 0 | 169 | # define debug_atomic_read(ptr) 0 |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d4aba4f3584c..59b76c8ce9d7 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -184,34 +184,34 @@ static const struct file_operations proc_lockdep_chains_operations = { | |||
184 | static void lockdep_stats_debug_show(struct seq_file *m) | 184 | static void lockdep_stats_debug_show(struct seq_file *m) |
185 | { | 185 | { |
186 | #ifdef CONFIG_DEBUG_LOCKDEP | 186 | #ifdef CONFIG_DEBUG_LOCKDEP |
187 | unsigned int hi1 = debug_atomic_read(&hardirqs_on_events), | 187 | unsigned long long hi1 = debug_atomic_read(hardirqs_on_events), |
188 | hi2 = debug_atomic_read(&hardirqs_off_events), | 188 | hi2 = debug_atomic_read(hardirqs_off_events), |
189 | hr1 = debug_atomic_read(&redundant_hardirqs_on), | 189 | hr1 = debug_atomic_read(redundant_hardirqs_on), |
190 | hr2 = debug_atomic_read(&redundant_hardirqs_off), | 190 | hr2 = debug_atomic_read(redundant_hardirqs_off), |
191 | si1 = debug_atomic_read(&softirqs_on_events), | 191 | si1 = debug_atomic_read(softirqs_on_events), |
192 | si2 = debug_atomic_read(&softirqs_off_events), | 192 | si2 = debug_atomic_read(softirqs_off_events), |
193 | sr1 = debug_atomic_read(&redundant_softirqs_on), | 193 | sr1 = debug_atomic_read(redundant_softirqs_on), |
194 | sr2 = debug_atomic_read(&redundant_softirqs_off); | 194 | sr2 = debug_atomic_read(redundant_softirqs_off); |
195 | 195 | ||
196 | seq_printf(m, " chain lookup misses: %11u\n", | 196 | seq_printf(m, " chain lookup misses: %11llu\n", |
197 | debug_atomic_read(&chain_lookup_misses)); | 197 | debug_atomic_read(chain_lookup_misses)); |
198 | seq_printf(m, " chain lookup hits: %11u\n", | 198 | seq_printf(m, " chain lookup hits: %11llu\n", |
199 | debug_atomic_read(&chain_lookup_hits)); | 199 | debug_atomic_read(chain_lookup_hits)); |
200 | seq_printf(m, " cyclic checks: %11u\n", | 200 | seq_printf(m, " cyclic checks: %11llu\n", |
201 | debug_atomic_read(&nr_cyclic_checks)); | 201 | debug_atomic_read(nr_cyclic_checks)); |
202 | seq_printf(m, " find-mask forwards checks: %11u\n", | 202 | seq_printf(m, " find-mask forwards checks: %11llu\n", |
203 | debug_atomic_read(&nr_find_usage_forwards_checks)); | 203 | debug_atomic_read(nr_find_usage_forwards_checks)); |
204 | seq_printf(m, " find-mask backwards checks: %11u\n", | 204 | seq_printf(m, " find-mask backwards checks: %11llu\n", |
205 | debug_atomic_read(&nr_find_usage_backwards_checks)); | 205 | debug_atomic_read(nr_find_usage_backwards_checks)); |
206 | 206 | ||
207 | seq_printf(m, " hardirq on events: %11u\n", hi1); | 207 | seq_printf(m, " hardirq on events: %11llu\n", hi1); |
208 | seq_printf(m, " hardirq off events: %11u\n", hi2); | 208 | seq_printf(m, " hardirq off events: %11llu\n", hi2); |
209 | seq_printf(m, " redundant hardirq ons: %11u\n", hr1); | 209 | seq_printf(m, " redundant hardirq ons: %11llu\n", hr1); |
210 | seq_printf(m, " redundant hardirq offs: %11u\n", hr2); | 210 | seq_printf(m, " redundant hardirq offs: %11llu\n", hr2); |
211 | seq_printf(m, " softirq on events: %11u\n", si1); | 211 | seq_printf(m, " softirq on events: %11llu\n", si1); |
212 | seq_printf(m, " softirq off events: %11u\n", si2); | 212 | seq_printf(m, " softirq off events: %11llu\n", si2); |
213 | seq_printf(m, " redundant softirq ons: %11u\n", sr1); | 213 | seq_printf(m, " redundant softirq ons: %11llu\n", sr1); |
214 | seq_printf(m, " redundant softirq offs: %11u\n", sr2); | 214 | seq_printf(m, " redundant softirq offs: %11llu\n", sr2); |
215 | #endif | 215 | #endif |
216 | } | 216 | } |
217 | 217 | ||
@@ -263,7 +263,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
263 | #endif | 263 | #endif |
264 | } | 264 | } |
265 | #ifdef CONFIG_DEBUG_LOCKDEP | 265 | #ifdef CONFIG_DEBUG_LOCKDEP |
266 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 266 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); |
267 | #endif | 267 | #endif |
268 | seq_printf(m, " lock-classes: %11lu [max: %lu]\n", | 268 | seq_printf(m, " lock-classes: %11lu [max: %lu]\n", |
269 | nr_lock_classes, MAX_LOCKDEP_KEYS); | 269 | nr_lock_classes, MAX_LOCKDEP_KEYS); |
diff --git a/kernel/module.c b/kernel/module.c index 1016b75b026a..b8a1e313448c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -59,8 +59,6 @@ | |||
59 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
60 | #include <trace/events/module.h> | 60 | #include <trace/events/module.h> |
61 | 61 | ||
62 | EXPORT_TRACEPOINT_SYMBOL(module_get); | ||
63 | |||
64 | #if 0 | 62 | #if 0 |
65 | #define DEBUGP printk | 63 | #define DEBUGP printk |
66 | #else | 64 | #else |
@@ -515,6 +513,9 @@ MODINFO_ATTR(srcversion); | |||
515 | static char last_unloaded_module[MODULE_NAME_LEN+1]; | 513 | static char last_unloaded_module[MODULE_NAME_LEN+1]; |
516 | 514 | ||
517 | #ifdef CONFIG_MODULE_UNLOAD | 515 | #ifdef CONFIG_MODULE_UNLOAD |
516 | |||
517 | EXPORT_TRACEPOINT_SYMBOL(module_get); | ||
518 | |||
518 | /* Init the unload section of the module. */ | 519 | /* Init the unload section of the module. */ |
519 | static void module_unload_init(struct module *mod) | 520 | static void module_unload_init(struct module *mod) |
520 | { | 521 | { |
@@ -867,8 +868,7 @@ void module_put(struct module *module) | |||
867 | smp_wmb(); /* see comment in module_refcount */ | 868 | smp_wmb(); /* see comment in module_refcount */ |
868 | __this_cpu_inc(module->refptr->decs); | 869 | __this_cpu_inc(module->refptr->decs); |
869 | 870 | ||
870 | trace_module_put(module, _RET_IP_, | 871 | trace_module_put(module, _RET_IP_); |
871 | __this_cpu_read(module->refptr->decs)); | ||
872 | /* Maybe they're waiting for us to drop reference? */ | 872 | /* Maybe they're waiting for us to drop reference? */ |
873 | if (unlikely(!module_is_live(module))) | 873 | if (unlikely(!module_is_live(module))) |
874 | wake_up_process(module->waiter); | 874 | wake_up_process(module->waiter); |
diff --git a/kernel/profile.c b/kernel/profile.c index a55d3a367ae8..dfadc5b729f1 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -127,8 +127,10 @@ int __ref profile_init(void) | |||
127 | return 0; | 127 | return 0; |
128 | 128 | ||
129 | prof_buffer = vmalloc(buffer_bytes); | 129 | prof_buffer = vmalloc(buffer_bytes); |
130 | if (prof_buffer) | 130 | if (prof_buffer) { |
131 | memset(prof_buffer, 0, buffer_bytes); | ||
131 | return 0; | 132 | return 0; |
133 | } | ||
132 | 134 | ||
133 | free_cpumask_var(prof_cpu_mask); | 135 | free_cpumask_var(prof_cpu_mask); |
134 | return -ENOMEM; | 136 | return -ENOMEM; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 9fb51237b18c..6af9cdd558b7 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
19 | #include <linux/security.h> | 18 | #include <linux/security.h> |
20 | #include <linux/signal.h> | 19 | #include <linux/signal.h> |
@@ -665,10 +664,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) | |||
665 | struct task_struct *child; | 664 | struct task_struct *child; |
666 | long ret; | 665 | long ret; |
667 | 666 | ||
668 | /* | ||
669 | * This lock_kernel fixes a subtle race with suid exec | ||
670 | */ | ||
671 | lock_kernel(); | ||
672 | if (request == PTRACE_TRACEME) { | 667 | if (request == PTRACE_TRACEME) { |
673 | ret = ptrace_traceme(); | 668 | ret = ptrace_traceme(); |
674 | if (!ret) | 669 | if (!ret) |
@@ -702,7 +697,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) | |||
702 | out_put_task_struct: | 697 | out_put_task_struct: |
703 | put_task_struct(child); | 698 | put_task_struct(child); |
704 | out: | 699 | out: |
705 | unlock_kernel(); | ||
706 | return ret; | 700 | return ret; |
707 | } | 701 | } |
708 | 702 | ||
@@ -812,10 +806,6 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
812 | struct task_struct *child; | 806 | struct task_struct *child; |
813 | long ret; | 807 | long ret; |
814 | 808 | ||
815 | /* | ||
816 | * This lock_kernel fixes a subtle race with suid exec | ||
817 | */ | ||
818 | lock_kernel(); | ||
819 | if (request == PTRACE_TRACEME) { | 809 | if (request == PTRACE_TRACEME) { |
820 | ret = ptrace_traceme(); | 810 | ret = ptrace_traceme(); |
821 | goto out; | 811 | goto out; |
@@ -845,7 +835,6 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
845 | out_put_task_struct: | 835 | out_put_task_struct: |
846 | put_task_struct(child); | 836 | put_task_struct(child); |
847 | out: | 837 | out: |
848 | unlock_kernel(); | ||
849 | return ret; | 838 | return ret; |
850 | } | 839 | } |
851 | #endif /* CONFIG_COMPAT */ | 840 | #endif /* CONFIG_COMPAT */ |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 03a7ea1579f6..72a8dc9567f5 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | ||
48 | #include <linux/hardirq.h> | 47 | #include <linux/hardirq.h> |
49 | 48 | ||
50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -64,9 +63,6 @@ struct lockdep_map rcu_sched_lock_map = | |||
64 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | 63 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
65 | #endif | 64 | #endif |
66 | 65 | ||
67 | int rcu_scheduler_active __read_mostly; | ||
68 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
69 | |||
70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 66 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
71 | 67 | ||
72 | int debug_lockdep_rcu_enabled(void) | 68 | int debug_lockdep_rcu_enabled(void) |
@@ -97,21 +93,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | |||
97 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 93 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
98 | 94 | ||
99 | /* | 95 | /* |
100 | * This function is invoked towards the end of the scheduler's initialization | ||
101 | * process. Before this is called, the idle task might contain | ||
102 | * RCU read-side critical sections (during which time, this idle | ||
103 | * task is booting the system). After this function is called, the | ||
104 | * idle tasks are prohibited from containing RCU read-side critical | ||
105 | * sections. | ||
106 | */ | ||
107 | void rcu_scheduler_starting(void) | ||
108 | { | ||
109 | WARN_ON(num_online_cpus() != 1); | ||
110 | WARN_ON(nr_context_switches() > 0); | ||
111 | rcu_scheduler_active = 1; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Awaken the corresponding synchronize_rcu() instance now that a | 96 | * Awaken the corresponding synchronize_rcu() instance now that a |
116 | * grace period has elapsed. | 97 | * grace period has elapsed. |
117 | */ | 98 | */ |
@@ -122,3 +103,14 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
122 | rcu = container_of(head, struct rcu_synchronize, head); | 103 | rcu = container_of(head, struct rcu_synchronize, head); |
123 | complete(&rcu->completion); | 104 | complete(&rcu->completion); |
124 | } | 105 | } |
106 | |||
107 | #ifdef CONFIG_PROVE_RCU | ||
108 | /* | ||
109 | * wrapper function to avoid #include problems. | ||
110 | */ | ||
111 | int rcu_my_thread_group_empty(void) | ||
112 | { | ||
113 | return thread_group_empty(current); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); | ||
116 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9f6d9ff2572c..38729d3cd236 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -44,9 +44,9 @@ struct rcu_ctrlblk { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /* Definition for rcupdate control block. */ | 46 | /* Definition for rcupdate control block. */ |
47 | static struct rcu_ctrlblk rcu_ctrlblk = { | 47 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { |
48 | .donetail = &rcu_ctrlblk.rcucblist, | 48 | .donetail = &rcu_sched_ctrlblk.rcucblist, |
49 | .curtail = &rcu_ctrlblk.rcucblist, | 49 | .curtail = &rcu_sched_ctrlblk.rcucblist, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
@@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | 54 | .curtail = &rcu_bh_ctrlblk.rcucblist, |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
58 | int rcu_scheduler_active __read_mostly; | ||
59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
61 | |||
57 | #ifdef CONFIG_NO_HZ | 62 | #ifdef CONFIG_NO_HZ |
58 | 63 | ||
59 | static long rcu_dynticks_nesting = 1; | 64 | static long rcu_dynticks_nesting = 1; |
@@ -108,7 +113,8 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |||
108 | */ | 113 | */ |
109 | void rcu_sched_qs(int cpu) | 114 | void rcu_sched_qs(int cpu) |
110 | { | 115 | { |
111 | if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) | 116 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
117 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
112 | raise_softirq(RCU_SOFTIRQ); | 118 | raise_softirq(RCU_SOFTIRQ); |
113 | } | 119 | } |
114 | 120 | ||
@@ -173,7 +179,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
173 | */ | 179 | */ |
174 | static void rcu_process_callbacks(struct softirq_action *unused) | 180 | static void rcu_process_callbacks(struct softirq_action *unused) |
175 | { | 181 | { |
176 | __rcu_process_callbacks(&rcu_ctrlblk); | 182 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
177 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 183 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
178 | } | 184 | } |
179 | 185 | ||
@@ -187,7 +193,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
187 | * | 193 | * |
188 | * Cool, huh? (Due to Josh Triplett.) | 194 | * Cool, huh? (Due to Josh Triplett.) |
189 | * | 195 | * |
190 | * But we want to make this a static inline later. | 196 | * But we want to make this a static inline later. The cond_resched() |
197 | * currently makes this problematic. | ||
191 | */ | 198 | */ |
192 | void synchronize_sched(void) | 199 | void synchronize_sched(void) |
193 | { | 200 | { |
@@ -195,12 +202,6 @@ void synchronize_sched(void) | |||
195 | } | 202 | } |
196 | EXPORT_SYMBOL_GPL(synchronize_sched); | 203 | EXPORT_SYMBOL_GPL(synchronize_sched); |
197 | 204 | ||
198 | void synchronize_rcu_bh(void) | ||
199 | { | ||
200 | synchronize_sched(); | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
203 | |||
204 | /* | 205 | /* |
205 | * Helper function for call_rcu() and call_rcu_bh(). | 206 | * Helper function for call_rcu() and call_rcu_bh(). |
206 | */ | 207 | */ |
@@ -226,7 +227,7 @@ static void __call_rcu(struct rcu_head *head, | |||
226 | */ | 227 | */ |
227 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 228 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
228 | { | 229 | { |
229 | __call_rcu(head, func, &rcu_ctrlblk); | 230 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
230 | } | 231 | } |
231 | EXPORT_SYMBOL_GPL(call_rcu); | 232 | EXPORT_SYMBOL_GPL(call_rcu); |
232 | 233 | ||
@@ -244,11 +245,13 @@ void rcu_barrier(void) | |||
244 | { | 245 | { |
245 | struct rcu_synchronize rcu; | 246 | struct rcu_synchronize rcu; |
246 | 247 | ||
248 | init_rcu_head_on_stack(&rcu.head); | ||
247 | init_completion(&rcu.completion); | 249 | init_completion(&rcu.completion); |
248 | /* Will wake me after RCU finished. */ | 250 | /* Will wake me after RCU finished. */ |
249 | call_rcu(&rcu.head, wakeme_after_rcu); | 251 | call_rcu(&rcu.head, wakeme_after_rcu); |
250 | /* Wait for it. */ | 252 | /* Wait for it. */ |
251 | wait_for_completion(&rcu.completion); | 253 | wait_for_completion(&rcu.completion); |
254 | destroy_rcu_head_on_stack(&rcu.head); | ||
252 | } | 255 | } |
253 | EXPORT_SYMBOL_GPL(rcu_barrier); | 256 | EXPORT_SYMBOL_GPL(rcu_barrier); |
254 | 257 | ||
@@ -256,11 +259,13 @@ void rcu_barrier_bh(void) | |||
256 | { | 259 | { |
257 | struct rcu_synchronize rcu; | 260 | struct rcu_synchronize rcu; |
258 | 261 | ||
262 | init_rcu_head_on_stack(&rcu.head); | ||
259 | init_completion(&rcu.completion); | 263 | init_completion(&rcu.completion); |
260 | /* Will wake me after RCU finished. */ | 264 | /* Will wake me after RCU finished. */ |
261 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | 265 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
262 | /* Wait for it. */ | 266 | /* Wait for it. */ |
263 | wait_for_completion(&rcu.completion); | 267 | wait_for_completion(&rcu.completion); |
268 | destroy_rcu_head_on_stack(&rcu.head); | ||
264 | } | 269 | } |
265 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | 270 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
266 | 271 | ||
@@ -268,11 +273,13 @@ void rcu_barrier_sched(void) | |||
268 | { | 273 | { |
269 | struct rcu_synchronize rcu; | 274 | struct rcu_synchronize rcu; |
270 | 275 | ||
276 | init_rcu_head_on_stack(&rcu.head); | ||
271 | init_completion(&rcu.completion); | 277 | init_completion(&rcu.completion); |
272 | /* Will wake me after RCU finished. */ | 278 | /* Will wake me after RCU finished. */ |
273 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 279 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
274 | /* Wait for it. */ | 280 | /* Wait for it. */ |
275 | wait_for_completion(&rcu.completion); | 281 | wait_for_completion(&rcu.completion); |
282 | destroy_rcu_head_on_stack(&rcu.head); | ||
276 | } | 283 | } |
277 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 284 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
278 | 285 | ||
@@ -280,3 +287,5 @@ void __init rcu_init(void) | |||
280 | { | 287 | { |
281 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 288 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
282 | } | 289 | } |
290 | |||
291 | #include "rcutiny_plugin.h" | ||
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h new file mode 100644 index 000000000000..d223a92bc742 --- /dev/null +++ b/kernel/rcutiny_plugin.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
3 | * Internal non-public definitions that provide either classic | ||
4 | * or preemptable semantics. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright IBM Corporation, 2009 | ||
21 | * | ||
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
23 | */ | ||
24 | |||
25 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
26 | |||
27 | #include <linux/kernel_stat.h> | ||
28 | |||
29 | /* | ||
30 | * During boot, we forgive RCU lockdep issues. After this function is | ||
31 | * invoked, we start taking RCU lockdep issues seriously. | ||
32 | */ | ||
33 | void rcu_scheduler_starting(void) | ||
34 | { | ||
35 | WARN_ON(nr_context_switches() > 0); | ||
36 | rcu_scheduler_active = 1; | ||
37 | } | ||
38 | |||
39 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 58df55bf83ed..077defb34571 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -464,9 +464,11 @@ static void rcu_bh_torture_synchronize(void) | |||
464 | { | 464 | { |
465 | struct rcu_bh_torture_synchronize rcu; | 465 | struct rcu_bh_torture_synchronize rcu; |
466 | 466 | ||
467 | init_rcu_head_on_stack(&rcu.head); | ||
467 | init_completion(&rcu.completion); | 468 | init_completion(&rcu.completion); |
468 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); | 469 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); |
469 | wait_for_completion(&rcu.completion); | 470 | wait_for_completion(&rcu.completion); |
471 | destroy_rcu_head_on_stack(&rcu.head); | ||
470 | } | 472 | } |
471 | 473 | ||
472 | static struct rcu_torture_ops rcu_bh_ops = { | 474 | static struct rcu_torture_ops rcu_bh_ops = { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3ec8160fc75f..d4437345706f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
49 | 50 | ||
50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
51 | 52 | ||
@@ -53,8 +54,8 @@ | |||
53 | 54 | ||
54 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | 55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; |
55 | 56 | ||
56 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(structname) { \ |
57 | .level = { &name.node[0] }, \ | 58 | .level = { &structname.node[0] }, \ |
58 | .levelcnt = { \ | 59 | .levelcnt = { \ |
59 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
60 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
@@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | |||
65 | .signaled = RCU_GP_IDLE, \ | 66 | .signaled = RCU_GP_IDLE, \ |
66 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
67 | .completed = -300, \ | 68 | .completed = -300, \ |
68 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ |
69 | .orphan_cbs_list = NULL, \ | 70 | .orphan_cbs_list = NULL, \ |
70 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | 71 | .orphan_cbs_tail = &structname.orphan_cbs_list, \ |
71 | .orphan_qlen = 0, \ | 72 | .orphan_qlen = 0, \ |
72 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \ | 73 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ |
73 | .n_force_qs = 0, \ | 74 | .n_force_qs = 0, \ |
74 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
76 | .name = #structname, \ | ||
75 | } | 77 | } |
76 | 78 | ||
77 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); | 79 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
@@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 82 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
82 | 84 | ||
85 | int rcu_scheduler_active __read_mostly; | ||
86 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
87 | |||
83 | /* | 88 | /* |
84 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 89 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
85 | * permit this function to be invoked without holding the root rcu_node | 90 | * permit this function to be invoked without holding the root rcu_node |
@@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) | |||
97 | */ | 102 | */ |
98 | void rcu_sched_qs(int cpu) | 103 | void rcu_sched_qs(int cpu) |
99 | { | 104 | { |
100 | struct rcu_data *rdp; | 105 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
101 | 106 | ||
102 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
103 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
104 | barrier(); | 108 | barrier(); |
105 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
106 | rcu_preempt_note_context_switch(cpu); | ||
107 | } | 110 | } |
108 | 111 | ||
109 | void rcu_bh_qs(int cpu) | 112 | void rcu_bh_qs(int cpu) |
110 | { | 113 | { |
111 | struct rcu_data *rdp; | 114 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
112 | 115 | ||
113 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
114 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 116 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
115 | barrier(); | 117 | barrier(); |
116 | rdp->passed_quiesc = 1; | 118 | rdp->passed_quiesc = 1; |
117 | } | 119 | } |
118 | 120 | ||
121 | /* | ||
122 | * Note a context switch. This is a quiescent state for RCU-sched, | ||
123 | * and requires special handling for preemptible RCU. | ||
124 | */ | ||
125 | void rcu_note_context_switch(int cpu) | ||
126 | { | ||
127 | rcu_sched_qs(cpu); | ||
128 | rcu_preempt_note_context_switch(cpu); | ||
129 | } | ||
130 | |||
119 | #ifdef CONFIG_NO_HZ | 131 | #ifdef CONFIG_NO_HZ |
120 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 132 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
121 | .dynticks_nesting = 1, | 133 | .dynticks_nesting = 1, |
@@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
438 | 450 | ||
439 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 451 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
440 | 452 | ||
453 | int rcu_cpu_stall_panicking __read_mostly; | ||
454 | |||
441 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 455 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
442 | { | 456 | { |
443 | rsp->gp_start = jiffies; | 457 | rsp->gp_start = jiffies; |
@@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
470 | 484 | ||
471 | /* OK, time to rat on our buddy... */ | 485 | /* OK, time to rat on our buddy... */ |
472 | 486 | ||
473 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 487 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", |
488 | rsp->name); | ||
474 | rcu_for_each_leaf_node(rsp, rnp) { | 489 | rcu_for_each_leaf_node(rsp, rnp) { |
475 | raw_spin_lock_irqsave(&rnp->lock, flags); | 490 | raw_spin_lock_irqsave(&rnp->lock, flags); |
476 | rcu_print_task_stall(rnp); | 491 | rcu_print_task_stall(rnp); |
@@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
481 | if (rnp->qsmask & (1UL << cpu)) | 496 | if (rnp->qsmask & (1UL << cpu)) |
482 | printk(" %d", rnp->grplo + cpu); | 497 | printk(" %d", rnp->grplo + cpu); |
483 | } | 498 | } |
484 | printk(" (detected by %d, t=%ld jiffies)\n", | 499 | printk("} (detected by %d, t=%ld jiffies)\n", |
485 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 500 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
486 | trigger_all_cpu_backtrace(); | 501 | trigger_all_cpu_backtrace(); |
487 | 502 | ||
@@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
497 | unsigned long flags; | 512 | unsigned long flags; |
498 | struct rcu_node *rnp = rcu_get_root(rsp); | 513 | struct rcu_node *rnp = rcu_get_root(rsp); |
499 | 514 | ||
500 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | 515 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", |
501 | smp_processor_id(), jiffies - rsp->gp_start); | 516 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
502 | trigger_all_cpu_backtrace(); | 517 | trigger_all_cpu_backtrace(); |
503 | 518 | ||
504 | raw_spin_lock_irqsave(&rnp->lock, flags); | 519 | raw_spin_lock_irqsave(&rnp->lock, flags); |
@@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
515 | long delta; | 530 | long delta; |
516 | struct rcu_node *rnp; | 531 | struct rcu_node *rnp; |
517 | 532 | ||
533 | if (rcu_cpu_stall_panicking) | ||
534 | return; | ||
518 | delta = jiffies - rsp->jiffies_stall; | 535 | delta = jiffies - rsp->jiffies_stall; |
519 | rnp = rdp->mynode; | 536 | rnp = rdp->mynode; |
520 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | 537 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { |
@@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
529 | } | 546 | } |
530 | } | 547 | } |
531 | 548 | ||
549 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | ||
550 | { | ||
551 | rcu_cpu_stall_panicking = 1; | ||
552 | return NOTIFY_DONE; | ||
553 | } | ||
554 | |||
555 | static struct notifier_block rcu_panic_block = { | ||
556 | .notifier_call = rcu_panic, | ||
557 | }; | ||
558 | |||
559 | static void __init check_cpu_stall_init(void) | ||
560 | { | ||
561 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | ||
562 | } | ||
563 | |||
532 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 564 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
533 | 565 | ||
534 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 566 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
@@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
539 | { | 571 | { |
540 | } | 572 | } |
541 | 573 | ||
574 | static void __init check_cpu_stall_init(void) | ||
575 | { | ||
576 | } | ||
577 | |||
542 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 578 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
543 | 579 | ||
544 | /* | 580 | /* |
@@ -1125,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1125 | */ | 1161 | */ |
1126 | void rcu_check_callbacks(int cpu, int user) | 1162 | void rcu_check_callbacks(int cpu, int user) |
1127 | { | 1163 | { |
1128 | if (!rcu_pending(cpu)) | ||
1129 | return; /* if nothing for RCU to do. */ | ||
1130 | if (user || | 1164 | if (user || |
1131 | (idle_cpu(cpu) && rcu_scheduler_active && | 1165 | (idle_cpu(cpu) && rcu_scheduler_active && |
1132 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1166 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -1158,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
1158 | rcu_bh_qs(cpu); | 1192 | rcu_bh_qs(cpu); |
1159 | } | 1193 | } |
1160 | rcu_preempt_check_callbacks(cpu); | 1194 | rcu_preempt_check_callbacks(cpu); |
1161 | raise_softirq(RCU_SOFTIRQ); | 1195 | if (rcu_pending(cpu)) |
1196 | raise_softirq(RCU_SOFTIRQ); | ||
1162 | } | 1197 | } |
1163 | 1198 | ||
1164 | #ifdef CONFIG_SMP | 1199 | #ifdef CONFIG_SMP |
@@ -1236,11 +1271,11 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1236 | break; /* grace period idle or initializing, ignore. */ | 1271 | break; /* grace period idle or initializing, ignore. */ |
1237 | 1272 | ||
1238 | case RCU_SAVE_DYNTICK: | 1273 | case RCU_SAVE_DYNTICK: |
1239 | |||
1240 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1241 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | 1274 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) |
1242 | break; /* So gcc recognizes the dead code. */ | 1275 | break; /* So gcc recognizes the dead code. */ |
1243 | 1276 | ||
1277 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1278 | |||
1244 | /* Record dyntick-idle state. */ | 1279 | /* Record dyntick-idle state. */ |
1245 | force_qs_rnp(rsp, dyntick_save_progress_counter); | 1280 | force_qs_rnp(rsp, dyntick_save_progress_counter); |
1246 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 1281 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
@@ -1449,11 +1484,13 @@ void synchronize_sched(void) | |||
1449 | if (rcu_blocking_is_gp()) | 1484 | if (rcu_blocking_is_gp()) |
1450 | return; | 1485 | return; |
1451 | 1486 | ||
1487 | init_rcu_head_on_stack(&rcu.head); | ||
1452 | init_completion(&rcu.completion); | 1488 | init_completion(&rcu.completion); |
1453 | /* Will wake me after RCU finished. */ | 1489 | /* Will wake me after RCU finished. */ |
1454 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 1490 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
1455 | /* Wait for it. */ | 1491 | /* Wait for it. */ |
1456 | wait_for_completion(&rcu.completion); | 1492 | wait_for_completion(&rcu.completion); |
1493 | destroy_rcu_head_on_stack(&rcu.head); | ||
1457 | } | 1494 | } |
1458 | EXPORT_SYMBOL_GPL(synchronize_sched); | 1495 | EXPORT_SYMBOL_GPL(synchronize_sched); |
1459 | 1496 | ||
@@ -1473,11 +1510,13 @@ void synchronize_rcu_bh(void) | |||
1473 | if (rcu_blocking_is_gp()) | 1510 | if (rcu_blocking_is_gp()) |
1474 | return; | 1511 | return; |
1475 | 1512 | ||
1513 | init_rcu_head_on_stack(&rcu.head); | ||
1476 | init_completion(&rcu.completion); | 1514 | init_completion(&rcu.completion); |
1477 | /* Will wake me after RCU finished. */ | 1515 | /* Will wake me after RCU finished. */ |
1478 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | 1516 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
1479 | /* Wait for it. */ | 1517 | /* Wait for it. */ |
1480 | wait_for_completion(&rcu.completion); | 1518 | wait_for_completion(&rcu.completion); |
1519 | destroy_rcu_head_on_stack(&rcu.head); | ||
1481 | } | 1520 | } |
1482 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 1521 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
1483 | 1522 | ||
@@ -1498,8 +1537,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1498 | check_cpu_stall(rsp, rdp); | 1537 | check_cpu_stall(rsp, rdp); |
1499 | 1538 | ||
1500 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 1539 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
1501 | if (rdp->qs_pending) { | 1540 | if (rdp->qs_pending && !rdp->passed_quiesc) { |
1541 | |||
1542 | /* | ||
1543 | * If force_quiescent_state() coming soon and this CPU | ||
1544 | * needs a quiescent state, and this is either RCU-sched | ||
1545 | * or RCU-bh, force a local reschedule. | ||
1546 | */ | ||
1502 | rdp->n_rp_qs_pending++; | 1547 | rdp->n_rp_qs_pending++; |
1548 | if (!rdp->preemptable && | ||
1549 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | ||
1550 | jiffies)) | ||
1551 | set_need_resched(); | ||
1552 | } else if (rdp->qs_pending && rdp->passed_quiesc) { | ||
1553 | rdp->n_rp_report_qs++; | ||
1503 | return 1; | 1554 | return 1; |
1504 | } | 1555 | } |
1505 | 1556 | ||
@@ -1767,6 +1818,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1767 | } | 1818 | } |
1768 | 1819 | ||
1769 | /* | 1820 | /* |
1821 | * This function is invoked towards the end of the scheduler's initialization | ||
1822 | * process. Before this is called, the idle task might contain | ||
1823 | * RCU read-side critical sections (during which time, this idle | ||
1824 | * task is booting the system). After this function is called, the | ||
1825 | * idle tasks are prohibited from containing RCU read-side critical | ||
1826 | * sections. This function also enables RCU lockdep checking. | ||
1827 | */ | ||
1828 | void rcu_scheduler_starting(void) | ||
1829 | { | ||
1830 | WARN_ON(num_online_cpus() != 1); | ||
1831 | WARN_ON(nr_context_switches() > 0); | ||
1832 | rcu_scheduler_active = 1; | ||
1833 | } | ||
1834 | |||
1835 | /* | ||
1770 | * Compute the per-level fanout, either using the exact fanout specified | 1836 | * Compute the per-level fanout, either using the exact fanout specified |
1771 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. | 1837 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. |
1772 | */ | 1838 | */ |
@@ -1849,6 +1915,14 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | 1915 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); |
1850 | } | 1916 | } |
1851 | } | 1917 | } |
1918 | |||
1919 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | ||
1920 | for_each_possible_cpu(i) { | ||
1921 | while (i > rnp->grphi) | ||
1922 | rnp++; | ||
1923 | rsp->rda[i]->mynode = rnp; | ||
1924 | rcu_boot_init_percpu_data(i, rsp); | ||
1925 | } | ||
1852 | } | 1926 | } |
1853 | 1927 | ||
1854 | /* | 1928 | /* |
@@ -1859,19 +1933,11 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1859 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | 1933 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1860 | do { \ | 1934 | do { \ |
1861 | int i; \ | 1935 | int i; \ |
1862 | int j; \ | ||
1863 | struct rcu_node *rnp; \ | ||
1864 | \ | 1936 | \ |
1865 | rcu_init_one(rsp); \ | ||
1866 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | ||
1867 | j = 0; \ | ||
1868 | for_each_possible_cpu(i) { \ | 1937 | for_each_possible_cpu(i) { \ |
1869 | if (i > rnp[j].grphi) \ | ||
1870 | j++; \ | ||
1871 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | ||
1872 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1938 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
1873 | rcu_boot_init_percpu_data(i, rsp); \ | ||
1874 | } \ | 1939 | } \ |
1940 | rcu_init_one(rsp); \ | ||
1875 | } while (0) | 1941 | } while (0) |
1876 | 1942 | ||
1877 | void __init rcu_init(void) | 1943 | void __init rcu_init(void) |
@@ -1879,12 +1945,6 @@ void __init rcu_init(void) | |||
1879 | int cpu; | 1945 | int cpu; |
1880 | 1946 | ||
1881 | rcu_bootup_announce(); | 1947 | rcu_bootup_announce(); |
1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
1885 | #if NUM_RCU_LVL_4 != 0 | ||
1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1948 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1949 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1890 | __rcu_init_preempt(); | 1950 | __rcu_init_preempt(); |
@@ -1898,6 +1958,7 @@ void __init rcu_init(void) | |||
1898 | cpu_notifier(rcu_cpu_notify, 0); | 1958 | cpu_notifier(rcu_cpu_notify, 0); |
1899 | for_each_online_cpu(cpu) | 1959 | for_each_online_cpu(cpu) |
1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 1960 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
1961 | check_cpu_stall_init(); | ||
1901 | } | 1962 | } |
1902 | 1963 | ||
1903 | #include "rcutree_plugin.h" | 1964 | #include "rcutree_plugin.h" |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4a525a30e08e..14c040b18ed0 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -223,6 +223,7 @@ struct rcu_data { | |||
223 | /* 5) __rcu_pending() statistics. */ | 223 | /* 5) __rcu_pending() statistics. */ |
224 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ | 224 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ |
225 | unsigned long n_rp_qs_pending; | 225 | unsigned long n_rp_qs_pending; |
226 | unsigned long n_rp_report_qs; | ||
226 | unsigned long n_rp_cb_ready; | 227 | unsigned long n_rp_cb_ready; |
227 | unsigned long n_rp_cpu_needs_gp; | 228 | unsigned long n_rp_cpu_needs_gp; |
228 | unsigned long n_rp_gp_completed; | 229 | unsigned long n_rp_gp_completed; |
@@ -326,6 +327,7 @@ struct rcu_state { | |||
326 | unsigned long jiffies_stall; /* Time at which to check */ | 327 | unsigned long jiffies_stall; /* Time at which to check */ |
327 | /* for CPU stalls. */ | 328 | /* for CPU stalls. */ |
328 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 329 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
330 | char *name; /* Name of structure. */ | ||
329 | }; | 331 | }; |
330 | 332 | ||
331 | /* Return values for rcu_preempt_offline_tasks(). */ | 333 | /* Return values for rcu_preempt_offline_tasks(). */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 79b53bda8943..0e4f420245d9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -26,6 +26,45 @@ | |||
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | 28 | ||
29 | /* | ||
30 | * Check the RCU kernel configuration parameters and print informative | ||
31 | * messages about anything out of the ordinary. If you like #ifdef, you | ||
32 | * will love this function. | ||
33 | */ | ||
34 | static void __init rcu_bootup_announce_oddness(void) | ||
35 | { | ||
36 | #ifdef CONFIG_RCU_TRACE | ||
37 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | ||
38 | #endif | ||
39 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | ||
40 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | ||
41 | CONFIG_RCU_FANOUT); | ||
42 | #endif | ||
43 | #ifdef CONFIG_RCU_FANOUT_EXACT | ||
44 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | ||
45 | #endif | ||
46 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
47 | printk(KERN_INFO | ||
48 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | ||
49 | #endif | ||
50 | #ifdef CONFIG_PROVE_RCU | ||
51 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | ||
52 | #endif | ||
53 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | ||
54 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | ||
55 | #endif | ||
56 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | ||
57 | printk(KERN_INFO | ||
58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | ||
59 | #endif | ||
60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | ||
61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | ||
62 | #endif | ||
63 | #if NUM_RCU_LVL_4 != 0 | ||
64 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | ||
65 | #endif | ||
66 | } | ||
67 | |||
29 | #ifdef CONFIG_TREE_PREEMPT_RCU | 68 | #ifdef CONFIG_TREE_PREEMPT_RCU |
30 | 69 | ||
31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 70 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
@@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp); | |||
38 | */ | 77 | */ |
39 | static void __init rcu_bootup_announce(void) | 78 | static void __init rcu_bootup_announce(void) |
40 | { | 79 | { |
41 | printk(KERN_INFO | 80 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); |
42 | "Experimental preemptable hierarchical RCU implementation.\n"); | 81 | rcu_bootup_announce_oddness(); |
43 | } | 82 | } |
44 | 83 | ||
45 | /* | 84 | /* |
@@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |||
75 | * that this just means that the task currently running on the CPU is | 114 | * that this just means that the task currently running on the CPU is |
76 | * not in a quiescent state. There might be any number of tasks blocked | 115 | * not in a quiescent state. There might be any number of tasks blocked |
77 | * while in an RCU read-side critical section. | 116 | * while in an RCU read-side critical section. |
117 | * | ||
118 | * Unlike the other rcu_*_qs() functions, callers to this function | ||
119 | * must disable irqs in order to protect the assignment to | ||
120 | * ->rcu_read_unlock_special. | ||
78 | */ | 121 | */ |
79 | static void rcu_preempt_qs(int cpu) | 122 | static void rcu_preempt_qs(int cpu) |
80 | { | 123 | { |
81 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 124 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
125 | |||
82 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 126 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
83 | barrier(); | 127 | barrier(); |
84 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; |
129 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
85 | } | 130 | } |
86 | 131 | ||
87 | /* | 132 | /* |
@@ -144,9 +189,8 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
144 | * grace period, then the fact that the task has been enqueued | 189 | * grace period, then the fact that the task has been enqueued |
145 | * means that we continue to block the current grace period. | 190 | * means that we continue to block the current grace period. |
146 | */ | 191 | */ |
147 | rcu_preempt_qs(cpu); | ||
148 | local_irq_save(flags); | 192 | local_irq_save(flags); |
149 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 193 | rcu_preempt_qs(cpu); |
150 | local_irq_restore(flags); | 194 | local_irq_restore(flags); |
151 | } | 195 | } |
152 | 196 | ||
@@ -236,7 +280,6 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
236 | */ | 280 | */ |
237 | special = t->rcu_read_unlock_special; | 281 | special = t->rcu_read_unlock_special; |
238 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 282 | if (special & RCU_READ_UNLOCK_NEED_QS) { |
239 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
240 | rcu_preempt_qs(smp_processor_id()); | 283 | rcu_preempt_qs(smp_processor_id()); |
241 | } | 284 | } |
242 | 285 | ||
@@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
473 | struct task_struct *t = current; | 516 | struct task_struct *t = current; |
474 | 517 | ||
475 | if (t->rcu_read_lock_nesting == 0) { | 518 | if (t->rcu_read_lock_nesting == 0) { |
476 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
477 | rcu_preempt_qs(cpu); | 519 | rcu_preempt_qs(cpu); |
478 | return; | 520 | return; |
479 | } | 521 | } |
@@ -515,11 +557,13 @@ void synchronize_rcu(void) | |||
515 | if (!rcu_scheduler_active) | 557 | if (!rcu_scheduler_active) |
516 | return; | 558 | return; |
517 | 559 | ||
560 | init_rcu_head_on_stack(&rcu.head); | ||
518 | init_completion(&rcu.completion); | 561 | init_completion(&rcu.completion); |
519 | /* Will wake me after RCU finished. */ | 562 | /* Will wake me after RCU finished. */ |
520 | call_rcu(&rcu.head, wakeme_after_rcu); | 563 | call_rcu(&rcu.head, wakeme_after_rcu); |
521 | /* Wait for it. */ | 564 | /* Wait for it. */ |
522 | wait_for_completion(&rcu.completion); | 565 | wait_for_completion(&rcu.completion); |
566 | destroy_rcu_head_on_stack(&rcu.head); | ||
523 | } | 567 | } |
524 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 568 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
525 | 569 | ||
@@ -754,6 +798,7 @@ void exit_rcu(void) | |||
754 | static void __init rcu_bootup_announce(void) | 798 | static void __init rcu_bootup_announce(void) |
755 | { | 799 | { |
756 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 800 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); |
801 | rcu_bootup_announce_oddness(); | ||
757 | } | 802 | } |
758 | 803 | ||
759 | /* | 804 | /* |
@@ -1008,6 +1053,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
1008 | int rcu_needs_cpu(int cpu) | 1053 | int rcu_needs_cpu(int cpu) |
1009 | { | 1054 | { |
1010 | int c = 0; | 1055 | int c = 0; |
1056 | int snap; | ||
1057 | int snap_nmi; | ||
1011 | int thatcpu; | 1058 | int thatcpu; |
1012 | 1059 | ||
1013 | /* Check for being in the holdoff period. */ | 1060 | /* Check for being in the holdoff period. */ |
@@ -1015,12 +1062,18 @@ int rcu_needs_cpu(int cpu) | |||
1015 | return rcu_needs_cpu_quick_check(cpu); | 1062 | return rcu_needs_cpu_quick_check(cpu); |
1016 | 1063 | ||
1017 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1064 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
1018 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1065 | for_each_online_cpu(thatcpu) { |
1019 | if (thatcpu != cpu) { | 1066 | if (thatcpu == cpu) |
1067 | continue; | ||
1068 | snap = per_cpu(rcu_dynticks, thatcpu).dynticks; | ||
1069 | snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; | ||
1070 | smp_mb(); /* Order sampling of snap with end of grace period. */ | ||
1071 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | ||
1020 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 1072 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
1021 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 1073 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
1022 | return rcu_needs_cpu_quick_check(cpu); | 1074 | return rcu_needs_cpu_quick_check(cpu); |
1023 | } | 1075 | } |
1076 | } | ||
1024 | 1077 | ||
1025 | /* Check and update the rcu_dyntick_drain sequencing. */ | 1078 | /* Check and update the rcu_dyntick_drain sequencing. */ |
1026 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 1079 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index d45db2e35d27..36c95b45738e 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -241,11 +241,13 @@ static const struct file_operations rcugp_fops = { | |||
241 | static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) | 241 | static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) |
242 | { | 242 | { |
243 | seq_printf(m, "%3d%cnp=%ld " | 243 | seq_printf(m, "%3d%cnp=%ld " |
244 | "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n", | 244 | "qsp=%ld rpq=%ld cbr=%ld cng=%ld " |
245 | "gpc=%ld gps=%ld nf=%ld nn=%ld\n", | ||
245 | rdp->cpu, | 246 | rdp->cpu, |
246 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 247 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
247 | rdp->n_rcu_pending, | 248 | rdp->n_rcu_pending, |
248 | rdp->n_rp_qs_pending, | 249 | rdp->n_rp_qs_pending, |
250 | rdp->n_rp_report_qs, | ||
249 | rdp->n_rp_cb_ready, | 251 | rdp->n_rp_cb_ready, |
250 | rdp->n_rp_cpu_needs_gp, | 252 | rdp->n_rp_cpu_needs_gp, |
251 | rdp->n_rp_gp_completed, | 253 | rdp->n_rp_gp_completed, |
diff --git a/kernel/sched.c b/kernel/sched.c index b11b80a3eed3..5cd607ec8405 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3663,7 +3663,7 @@ need_resched: | |||
3663 | preempt_disable(); | 3663 | preempt_disable(); |
3664 | cpu = smp_processor_id(); | 3664 | cpu = smp_processor_id(); |
3665 | rq = cpu_rq(cpu); | 3665 | rq = cpu_rq(cpu); |
3666 | rcu_sched_qs(cpu); | 3666 | rcu_note_context_switch(cpu); |
3667 | prev = rq->curr; | 3667 | prev = rq->curr; |
3668 | switch_count = &prev->nivcsw; | 3668 | switch_count = &prev->nivcsw; |
3669 | 3669 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 9b49db144037..19be00ba6123 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
114 | { | 114 | { |
115 | char path[64]; | 115 | char path[64]; |
116 | 116 | ||
117 | rcu_read_lock(); | ||
117 | cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); | 118 | cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); |
119 | rcu_read_unlock(); | ||
118 | SEQ_printf(m, " %s", path); | 120 | SEQ_printf(m, " %s", path); |
119 | } | 121 | } |
120 | #endif | 122 | #endif |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 7c1a67ef0274..0db913a5c60f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
716 | preempt_enable_no_resched(); | 716 | preempt_enable_no_resched(); |
717 | cond_resched(); | 717 | cond_resched(); |
718 | preempt_disable(); | 718 | preempt_disable(); |
719 | rcu_sched_qs((long)__bind_cpu); | 719 | rcu_note_context_switch((long)__bind_cpu); |
720 | } | 720 | } |
721 | preempt_enable(); | 721 | preempt_enable(); |
722 | set_current_state(TASK_INTERRUPTIBLE); | 722 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41ca394feb22..5885cdfc41f3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 319 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
320 | #define TS_DELTA_TEST (~TS_MASK) | 320 | #define TS_DELTA_TEST (~TS_MASK) |
321 | 321 | ||
322 | /* Flag when events were overwritten */ | ||
323 | #define RB_MISSED_EVENTS (1 << 31) | ||
324 | /* Missed count stored at end */ | ||
325 | #define RB_MISSED_STORED (1 << 30) | ||
326 | |||
322 | struct buffer_data_page { | 327 | struct buffer_data_page { |
323 | u64 time_stamp; /* page time stamp */ | 328 | u64 time_stamp; /* page time stamp */ |
324 | local_t commit; /* write committed index */ | 329 | local_t commit; /* write committed index */ |
@@ -338,6 +343,7 @@ struct buffer_page { | |||
338 | local_t write; /* index for next write */ | 343 | local_t write; /* index for next write */ |
339 | unsigned read; /* index for next read */ | 344 | unsigned read; /* index for next read */ |
340 | local_t entries; /* entries on this page */ | 345 | local_t entries; /* entries on this page */ |
346 | unsigned long real_end; /* real end of data */ | ||
341 | struct buffer_data_page *page; /* Actual data page */ | 347 | struct buffer_data_page *page; /* Actual data page */ |
342 | }; | 348 | }; |
343 | 349 | ||
@@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
417 | (unsigned int)sizeof(field.commit), | 423 | (unsigned int)sizeof(field.commit), |
418 | (unsigned int)is_signed_type(long)); | 424 | (unsigned int)is_signed_type(long)); |
419 | 425 | ||
426 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
428 | (unsigned int)offsetof(typeof(field), commit), | ||
429 | 1, | ||
430 | (unsigned int)is_signed_type(long)); | ||
431 | |||
420 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 432 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
421 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
422 | (unsigned int)offsetof(typeof(field), data), | 434 | (unsigned int)offsetof(typeof(field), data), |
@@ -440,6 +452,8 @@ struct ring_buffer_per_cpu { | |||
440 | struct buffer_page *tail_page; /* write to tail */ | 452 | struct buffer_page *tail_page; /* write to tail */ |
441 | struct buffer_page *commit_page; /* committed pages */ | 453 | struct buffer_page *commit_page; /* committed pages */ |
442 | struct buffer_page *reader_page; | 454 | struct buffer_page *reader_page; |
455 | unsigned long lost_events; | ||
456 | unsigned long last_overrun; | ||
443 | local_t commit_overrun; | 457 | local_t commit_overrun; |
444 | local_t overrun; | 458 | local_t overrun; |
445 | local_t entries; | 459 | local_t entries; |
@@ -1762,6 +1776,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1762 | kmemcheck_annotate_bitfield(event, bitfield); | 1776 | kmemcheck_annotate_bitfield(event, bitfield); |
1763 | 1777 | ||
1764 | /* | 1778 | /* |
1779 | * Save the original length to the meta data. | ||
1780 | * This will be used by the reader to add lost event | ||
1781 | * counter. | ||
1782 | */ | ||
1783 | tail_page->real_end = tail; | ||
1784 | |||
1785 | /* | ||
1765 | * If this event is bigger than the minimum size, then | 1786 | * If this event is bigger than the minimum size, then |
1766 | * we need to be careful that we don't subtract the | 1787 | * we need to be careful that we don't subtract the |
1767 | * write counter enough to allow another writer to slip | 1788 | * write counter enough to allow another writer to slip |
@@ -2838,6 +2859,7 @@ static struct buffer_page * | |||
2838 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 2859 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
2839 | { | 2860 | { |
2840 | struct buffer_page *reader = NULL; | 2861 | struct buffer_page *reader = NULL; |
2862 | unsigned long overwrite; | ||
2841 | unsigned long flags; | 2863 | unsigned long flags; |
2842 | int nr_loops = 0; | 2864 | int nr_loops = 0; |
2843 | int ret; | 2865 | int ret; |
@@ -2879,6 +2901,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2879 | local_set(&cpu_buffer->reader_page->write, 0); | 2901 | local_set(&cpu_buffer->reader_page->write, 0); |
2880 | local_set(&cpu_buffer->reader_page->entries, 0); | 2902 | local_set(&cpu_buffer->reader_page->entries, 0); |
2881 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2903 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2904 | cpu_buffer->reader_page->real_end = 0; | ||
2882 | 2905 | ||
2883 | spin: | 2906 | spin: |
2884 | /* | 2907 | /* |
@@ -2899,6 +2922,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2899 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 2922 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); |
2900 | 2923 | ||
2901 | /* | 2924 | /* |
2925 | * We want to make sure we read the overruns after we set up our | ||
2926 | * pointers to the next object. The writer side does a | ||
2927 | * cmpxchg to cross pages which acts as the mb on the writer | ||
2928 | * side. Note, the reader will constantly fail the swap | ||
2929 | * while the writer is updating the pointers, so this | ||
2930 | * guarantees that the overwrite recorded here is the one we | ||
2931 | * want to compare with the last_overrun. | ||
2932 | */ | ||
2933 | smp_mb(); | ||
2934 | overwrite = local_read(&(cpu_buffer->overrun)); | ||
2935 | |||
2936 | /* | ||
2902 | * Here's the tricky part. | 2937 | * Here's the tricky part. |
2903 | * | 2938 | * |
2904 | * We need to move the pointer past the header page. | 2939 | * We need to move the pointer past the header page. |
@@ -2929,6 +2964,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2929 | cpu_buffer->reader_page = reader; | 2964 | cpu_buffer->reader_page = reader; |
2930 | rb_reset_reader_page(cpu_buffer); | 2965 | rb_reset_reader_page(cpu_buffer); |
2931 | 2966 | ||
2967 | if (overwrite != cpu_buffer->last_overrun) { | ||
2968 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | ||
2969 | cpu_buffer->last_overrun = overwrite; | ||
2970 | } | ||
2971 | |||
2932 | goto again; | 2972 | goto again; |
2933 | 2973 | ||
2934 | out: | 2974 | out: |
@@ -3005,8 +3045,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
3005 | rb_advance_iter(iter); | 3045 | rb_advance_iter(iter); |
3006 | } | 3046 | } |
3007 | 3047 | ||
3048 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | ||
3049 | { | ||
3050 | return cpu_buffer->lost_events; | ||
3051 | } | ||
3052 | |||
3008 | static struct ring_buffer_event * | 3053 | static struct ring_buffer_event * |
3009 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | 3054 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, |
3055 | unsigned long *lost_events) | ||
3010 | { | 3056 | { |
3011 | struct ring_buffer_event *event; | 3057 | struct ring_buffer_event *event; |
3012 | struct buffer_page *reader; | 3058 | struct buffer_page *reader; |
@@ -3058,6 +3104,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) | |||
3058 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 3104 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
3059 | cpu_buffer->cpu, ts); | 3105 | cpu_buffer->cpu, ts); |
3060 | } | 3106 | } |
3107 | if (lost_events) | ||
3108 | *lost_events = rb_lost_events(cpu_buffer); | ||
3061 | return event; | 3109 | return event; |
3062 | 3110 | ||
3063 | default: | 3111 | default: |
@@ -3168,12 +3216,14 @@ static inline int rb_ok_to_lock(void) | |||
3168 | * @buffer: The ring buffer to read | 3216 | * @buffer: The ring buffer to read |
3169 | * @cpu: The cpu to peak at | 3217 | * @cpu: The cpu to peak at |
3170 | * @ts: The timestamp counter of this event. | 3218 | * @ts: The timestamp counter of this event. |
3219 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
3171 | * | 3220 | * |
3172 | * This will return the event that will be read next, but does | 3221 | * This will return the event that will be read next, but does |
3173 | * not consume the data. | 3222 | * not consume the data. |
3174 | */ | 3223 | */ |
3175 | struct ring_buffer_event * | 3224 | struct ring_buffer_event * |
3176 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3225 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
3226 | unsigned long *lost_events) | ||
3177 | { | 3227 | { |
3178 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 3228 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
3179 | struct ring_buffer_event *event; | 3229 | struct ring_buffer_event *event; |
@@ -3188,7 +3238,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3188 | local_irq_save(flags); | 3238 | local_irq_save(flags); |
3189 | if (dolock) | 3239 | if (dolock) |
3190 | spin_lock(&cpu_buffer->reader_lock); | 3240 | spin_lock(&cpu_buffer->reader_lock); |
3191 | event = rb_buffer_peek(cpu_buffer, ts); | 3241 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3192 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3242 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3193 | rb_advance_reader(cpu_buffer); | 3243 | rb_advance_reader(cpu_buffer); |
3194 | if (dolock) | 3244 | if (dolock) |
@@ -3230,13 +3280,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3230 | /** | 3280 | /** |
3231 | * ring_buffer_consume - return an event and consume it | 3281 | * ring_buffer_consume - return an event and consume it |
3232 | * @buffer: The ring buffer to get the next event from | 3282 | * @buffer: The ring buffer to get the next event from |
3283 | * @cpu: the cpu to read the buffer from | ||
3284 | * @ts: a variable to store the timestamp (may be NULL) | ||
3285 | * @lost_events: a variable to store if events were lost (may be NULL) | ||
3233 | * | 3286 | * |
3234 | * Returns the next event in the ring buffer, and that event is consumed. | 3287 | * Returns the next event in the ring buffer, and that event is consumed. |
3235 | * Meaning, that sequential reads will keep returning a different event, | 3288 | * Meaning, that sequential reads will keep returning a different event, |
3236 | * and eventually empty the ring buffer if the producer is slower. | 3289 | * and eventually empty the ring buffer if the producer is slower. |
3237 | */ | 3290 | */ |
3238 | struct ring_buffer_event * | 3291 | struct ring_buffer_event * |
3239 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 3292 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
3293 | unsigned long *lost_events) | ||
3240 | { | 3294 | { |
3241 | struct ring_buffer_per_cpu *cpu_buffer; | 3295 | struct ring_buffer_per_cpu *cpu_buffer; |
3242 | struct ring_buffer_event *event = NULL; | 3296 | struct ring_buffer_event *event = NULL; |
@@ -3257,9 +3311,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3257 | if (dolock) | 3311 | if (dolock) |
3258 | spin_lock(&cpu_buffer->reader_lock); | 3312 | spin_lock(&cpu_buffer->reader_lock); |
3259 | 3313 | ||
3260 | event = rb_buffer_peek(cpu_buffer, ts); | 3314 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3261 | if (event) | 3315 | if (event) { |
3316 | cpu_buffer->lost_events = 0; | ||
3262 | rb_advance_reader(cpu_buffer); | 3317 | rb_advance_reader(cpu_buffer); |
3318 | } | ||
3263 | 3319 | ||
3264 | if (dolock) | 3320 | if (dolock) |
3265 | spin_unlock(&cpu_buffer->reader_lock); | 3321 | spin_unlock(&cpu_buffer->reader_lock); |
@@ -3408,6 +3464,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
3408 | cpu_buffer->write_stamp = 0; | 3464 | cpu_buffer->write_stamp = 0; |
3409 | cpu_buffer->read_stamp = 0; | 3465 | cpu_buffer->read_stamp = 0; |
3410 | 3466 | ||
3467 | cpu_buffer->lost_events = 0; | ||
3468 | cpu_buffer->last_overrun = 0; | ||
3469 | |||
3411 | rb_head_page_activate(cpu_buffer); | 3470 | rb_head_page_activate(cpu_buffer); |
3412 | } | 3471 | } |
3413 | 3472 | ||
@@ -3683,6 +3742,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3683 | struct ring_buffer_event *event; | 3742 | struct ring_buffer_event *event; |
3684 | struct buffer_data_page *bpage; | 3743 | struct buffer_data_page *bpage; |
3685 | struct buffer_page *reader; | 3744 | struct buffer_page *reader; |
3745 | unsigned long missed_events; | ||
3686 | unsigned long flags; | 3746 | unsigned long flags; |
3687 | unsigned int commit; | 3747 | unsigned int commit; |
3688 | unsigned int read; | 3748 | unsigned int read; |
@@ -3719,6 +3779,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3719 | read = reader->read; | 3779 | read = reader->read; |
3720 | commit = rb_page_commit(reader); | 3780 | commit = rb_page_commit(reader); |
3721 | 3781 | ||
3782 | /* Check if any events were dropped */ | ||
3783 | missed_events = cpu_buffer->lost_events; | ||
3784 | |||
3722 | /* | 3785 | /* |
3723 | * If this page has been partially read or | 3786 | * If this page has been partially read or |
3724 | * if len is not big enough to read the rest of the page or | 3787 | * if len is not big enough to read the rest of the page or |
@@ -3779,9 +3842,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3779 | local_set(&reader->entries, 0); | 3842 | local_set(&reader->entries, 0); |
3780 | reader->read = 0; | 3843 | reader->read = 0; |
3781 | *data_page = bpage; | 3844 | *data_page = bpage; |
3845 | |||
3846 | /* | ||
3847 | * Use the real_end for the data size, | ||
3848 | * This gives us a chance to store the lost events | ||
3849 | * on the page. | ||
3850 | */ | ||
3851 | if (reader->real_end) | ||
3852 | local_set(&bpage->commit, reader->real_end); | ||
3782 | } | 3853 | } |
3783 | ret = read; | 3854 | ret = read; |
3784 | 3855 | ||
3856 | cpu_buffer->lost_events = 0; | ||
3857 | /* | ||
3858 | * Set a flag in the commit field if we lost events | ||
3859 | */ | ||
3860 | if (missed_events) { | ||
3861 | commit = local_read(&bpage->commit); | ||
3862 | |||
3863 | /* If there is room at the end of the page to save the | ||
3864 | * missed events, then record it there. | ||
3865 | */ | ||
3866 | if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | ||
3867 | memcpy(&bpage->data[commit], &missed_events, | ||
3868 | sizeof(missed_events)); | ||
3869 | local_add(RB_MISSED_STORED, &bpage->commit); | ||
3870 | } | ||
3871 | local_add(RB_MISSED_EVENTS, &bpage->commit); | ||
3872 | } | ||
3873 | |||
3785 | out_unlock: | 3874 | out_unlock: |
3786 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3875 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3787 | 3876 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index df74c7982255..dc56556b55a2 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -81,7 +81,7 @@ static enum event_status read_event(int cpu) | |||
81 | int *entry; | 81 | int *entry; |
82 | u64 ts; | 82 | u64 ts; |
83 | 83 | ||
84 | event = ring_buffer_consume(buffer, cpu, &ts); | 84 | event = ring_buffer_consume(buffer, cpu, &ts, NULL); |
85 | if (!event) | 85 | if (!event) |
86 | return EVENT_DROPPED; | 86 | return EVENT_DROPPED; |
87 | 87 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 44f916a04065..60f3b6289731 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1545,7 +1545,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | static struct trace_entry * | 1547 | static struct trace_entry * |
1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
1549 | unsigned long *lost_events) | ||
1549 | { | 1550 | { |
1550 | struct ring_buffer_event *event; | 1551 | struct ring_buffer_event *event; |
1551 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1552 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
@@ -1556,7 +1557,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1556 | if (buf_iter) | 1557 | if (buf_iter) |
1557 | event = ring_buffer_iter_peek(buf_iter, ts); | 1558 | event = ring_buffer_iter_peek(buf_iter, ts); |
1558 | else | 1559 | else |
1559 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1560 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1561 | lost_events); | ||
1560 | 1562 | ||
1561 | ftrace_enable_cpu(); | 1563 | ftrace_enable_cpu(); |
1562 | 1564 | ||
@@ -1564,10 +1566,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1564 | } | 1566 | } |
1565 | 1567 | ||
1566 | static struct trace_entry * | 1568 | static struct trace_entry * |
1567 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1569 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
1570 | unsigned long *missing_events, u64 *ent_ts) | ||
1568 | { | 1571 | { |
1569 | struct ring_buffer *buffer = iter->tr->buffer; | 1572 | struct ring_buffer *buffer = iter->tr->buffer; |
1570 | struct trace_entry *ent, *next = NULL; | 1573 | struct trace_entry *ent, *next = NULL; |
1574 | unsigned long lost_events, next_lost = 0; | ||
1571 | int cpu_file = iter->cpu_file; | 1575 | int cpu_file = iter->cpu_file; |
1572 | u64 next_ts = 0, ts; | 1576 | u64 next_ts = 0, ts; |
1573 | int next_cpu = -1; | 1577 | int next_cpu = -1; |
@@ -1580,7 +1584,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1580 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1584 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1581 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1585 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1582 | return NULL; | 1586 | return NULL; |
1583 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1587 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
1584 | if (ent_cpu) | 1588 | if (ent_cpu) |
1585 | *ent_cpu = cpu_file; | 1589 | *ent_cpu = cpu_file; |
1586 | 1590 | ||
@@ -1592,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1592 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1596 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1593 | continue; | 1597 | continue; |
1594 | 1598 | ||
1595 | ent = peek_next_entry(iter, cpu, &ts); | 1599 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
1596 | 1600 | ||
1597 | /* | 1601 | /* |
1598 | * Pick the entry with the smallest timestamp: | 1602 | * Pick the entry with the smallest timestamp: |
@@ -1601,6 +1605,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1601 | next = ent; | 1605 | next = ent; |
1602 | next_cpu = cpu; | 1606 | next_cpu = cpu; |
1603 | next_ts = ts; | 1607 | next_ts = ts; |
1608 | next_lost = lost_events; | ||
1604 | } | 1609 | } |
1605 | } | 1610 | } |
1606 | 1611 | ||
@@ -1610,6 +1615,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1610 | if (ent_ts) | 1615 | if (ent_ts) |
1611 | *ent_ts = next_ts; | 1616 | *ent_ts = next_ts; |
1612 | 1617 | ||
1618 | if (missing_events) | ||
1619 | *missing_events = next_lost; | ||
1620 | |||
1613 | return next; | 1621 | return next; |
1614 | } | 1622 | } |
1615 | 1623 | ||
@@ -1617,13 +1625,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1617 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1625 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1618 | int *ent_cpu, u64 *ent_ts) | 1626 | int *ent_cpu, u64 *ent_ts) |
1619 | { | 1627 | { |
1620 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1628 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
1621 | } | 1629 | } |
1622 | 1630 | ||
1623 | /* Find the next real entry, and increment the iterator to the next entry */ | 1631 | /* Find the next real entry, and increment the iterator to the next entry */ |
1624 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1632 | static void *find_next_entry_inc(struct trace_iterator *iter) |
1625 | { | 1633 | { |
1626 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1634 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1635 | &iter->lost_events, &iter->ts); | ||
1627 | 1636 | ||
1628 | if (iter->ent) | 1637 | if (iter->ent) |
1629 | trace_iterator_increment(iter); | 1638 | trace_iterator_increment(iter); |
@@ -1635,7 +1644,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
1635 | { | 1644 | { |
1636 | /* Don't allow ftrace to trace into the ring buffers */ | 1645 | /* Don't allow ftrace to trace into the ring buffers */ |
1637 | ftrace_disable_cpu(); | 1646 | ftrace_disable_cpu(); |
1638 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1647 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1648 | &iter->lost_events); | ||
1639 | ftrace_enable_cpu(); | 1649 | ftrace_enable_cpu(); |
1640 | } | 1650 | } |
1641 | 1651 | ||
@@ -2030,6 +2040,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2030 | { | 2040 | { |
2031 | enum print_line_t ret; | 2041 | enum print_line_t ret; |
2032 | 2042 | ||
2043 | if (iter->lost_events) | ||
2044 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
2045 | iter->cpu, iter->lost_events); | ||
2046 | |||
2033 | if (iter->trace && iter->trace->print_line) { | 2047 | if (iter->trace && iter->trace->print_line) { |
2034 | ret = iter->trace->print_line(iter); | 2048 | ret = iter->trace->print_line(iter); |
2035 | if (ret != TRACE_TYPE_UNHANDLED) | 2049 | if (ret != TRACE_TYPE_UNHANDLED) |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 9aed1a5cf553..669b9c31861d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
490 | * We need to consume the current entry to see | 490 | * We need to consume the current entry to see |
491 | * the next one. | 491 | * the next one. |
492 | */ | 492 | */ |
493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 493 | ring_buffer_consume(iter->tr->buffer, iter->cpu, |
494 | NULL, NULL); | ||
494 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 495 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, |
495 | NULL); | 496 | NULL, NULL); |
496 | } | 497 | } |
497 | 498 | ||
498 | if (!event) | 499 | if (!event) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 1cc9858258b3..71fa771ee4d7 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -29,7 +29,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
29 | struct trace_entry *entry; | 29 | struct trace_entry *entry; |
30 | unsigned int loops = 0; | 30 | unsigned int loops = 0; |
31 | 31 | ||
32 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 32 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
33 | entry = ring_buffer_event_data(event); | 33 | entry = ring_buffer_event_data(event); |
34 | 34 | ||
35 | /* | 35 | /* |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 935248bdbc47..930a9e5eae08 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -512,6 +512,18 @@ config PROVE_RCU | |||
512 | 512 | ||
513 | Say N if you are unsure. | 513 | Say N if you are unsure. |
514 | 514 | ||
515 | config PROVE_RCU_REPEATEDLY | ||
516 | bool "RCU debugging: don't disable PROVE_RCU on first splat" | ||
517 | depends on PROVE_RCU | ||
518 | default n | ||
519 | help | ||
520 | By itself, PROVE_RCU will disable checking upon issuing the | ||
521 | first warning (or "splat"). This feature prevents such | ||
522 | disabling, allowing multiple RCU-lockdep warnings to be printed | ||
523 | on a single reboot. | ||
524 | |||
525 | Say N if you are unsure. | ||
526 | |||
515 | config LOCKDEP | 527 | config LOCKDEP |
516 | bool | 528 | bool |
517 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 529 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -793,7 +805,7 @@ config RCU_CPU_STALL_DETECTOR | |||
793 | config RCU_CPU_STALL_VERBOSE | 805 | config RCU_CPU_STALL_VERBOSE |
794 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | 806 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" |
795 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | 807 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU |
796 | default n | 808 | default y |
797 | help | 809 | help |
798 | This option causes RCU to printk detailed per-task information | 810 | This option causes RCU to printk detailed per-task information |
799 | for any tasks that are stalling the current RCU grace period. | 811 | for any tasks that are stalling the current RCU grace period. |
diff --git a/lib/btree.c b/lib/btree.c index 41859a820218..c9c6f0351526 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
@@ -95,7 +95,8 @@ static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) | |||
95 | unsigned long *node; | 95 | unsigned long *node; |
96 | 96 | ||
97 | node = mempool_alloc(head->mempool, gfp); | 97 | node = mempool_alloc(head->mempool, gfp); |
98 | memset(node, 0, NODESIZE); | 98 | if (likely(node)) |
99 | memset(node, 0, NODESIZE); | ||
99 | return node; | 100 | return node; |
100 | } | 101 | } |
101 | 102 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index b862b30369ff..deebcc57d4e6 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -141,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
141 | obj->object = addr; | 141 | obj->object = addr; |
142 | obj->descr = descr; | 142 | obj->descr = descr; |
143 | obj->state = ODEBUG_STATE_NONE; | 143 | obj->state = ODEBUG_STATE_NONE; |
144 | obj->astate = 0; | ||
144 | hlist_del(&obj->node); | 145 | hlist_del(&obj->node); |
145 | 146 | ||
146 | hlist_add_head(&obj->node, &b->list); | 147 | hlist_add_head(&obj->node, &b->list); |
@@ -252,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
252 | 253 | ||
253 | if (limit < 5 && obj->descr != descr_test) { | 254 | if (limit < 5 && obj->descr != descr_test) { |
254 | limit++; | 255 | limit++; |
255 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | 256 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
256 | obj_states[obj->state], obj->descr->name); | 257 | "object type: %s\n", |
258 | msg, obj_states[obj->state], obj->astate, | ||
259 | obj->descr->name); | ||
257 | } | 260 | } |
258 | debug_objects_warnings++; | 261 | debug_objects_warnings++; |
259 | } | 262 | } |
@@ -447,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
447 | case ODEBUG_STATE_INIT: | 450 | case ODEBUG_STATE_INIT: |
448 | case ODEBUG_STATE_INACTIVE: | 451 | case ODEBUG_STATE_INACTIVE: |
449 | case ODEBUG_STATE_ACTIVE: | 452 | case ODEBUG_STATE_ACTIVE: |
450 | obj->state = ODEBUG_STATE_INACTIVE; | 453 | if (!obj->astate) |
454 | obj->state = ODEBUG_STATE_INACTIVE; | ||
455 | else | ||
456 | debug_print_object(obj, "deactivate"); | ||
451 | break; | 457 | break; |
452 | 458 | ||
453 | case ODEBUG_STATE_DESTROYED: | 459 | case ODEBUG_STATE_DESTROYED: |
@@ -553,6 +559,53 @@ out_unlock: | |||
553 | raw_spin_unlock_irqrestore(&db->lock, flags); | 559 | raw_spin_unlock_irqrestore(&db->lock, flags); |
554 | } | 560 | } |
555 | 561 | ||
562 | /** | ||
563 | * debug_object_active_state - debug checks object usage state machine | ||
564 | * @addr: address of the object | ||
565 | * @descr: pointer to an object specific debug description structure | ||
566 | * @expect: expected state | ||
567 | * @next: state to move to if expected state is found | ||
568 | */ | ||
569 | void | ||
570 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
571 | unsigned int expect, unsigned int next) | ||
572 | { | ||
573 | struct debug_bucket *db; | ||
574 | struct debug_obj *obj; | ||
575 | unsigned long flags; | ||
576 | |||
577 | if (!debug_objects_enabled) | ||
578 | return; | ||
579 | |||
580 | db = get_bucket((unsigned long) addr); | ||
581 | |||
582 | raw_spin_lock_irqsave(&db->lock, flags); | ||
583 | |||
584 | obj = lookup_object(addr, db); | ||
585 | if (obj) { | ||
586 | switch (obj->state) { | ||
587 | case ODEBUG_STATE_ACTIVE: | ||
588 | if (obj->astate == expect) | ||
589 | obj->astate = next; | ||
590 | else | ||
591 | debug_print_object(obj, "active_state"); | ||
592 | break; | ||
593 | |||
594 | default: | ||
595 | debug_print_object(obj, "active_state"); | ||
596 | break; | ||
597 | } | ||
598 | } else { | ||
599 | struct debug_obj o = { .object = addr, | ||
600 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
601 | .descr = descr }; | ||
602 | |||
603 | debug_print_object(&o, "active_state"); | ||
604 | } | ||
605 | |||
606 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
607 | } | ||
608 | |||
556 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | 609 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
557 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 610 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
558 | { | 611 | { |
@@ -774,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state) | |||
774 | } | 827 | } |
775 | } | 828 | } |
776 | 829 | ||
777 | static int | 830 | static int __init |
778 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | 831 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
779 | { | 832 | { |
780 | struct debug_bucket *db; | 833 | struct debug_bucket *db; |
@@ -917,7 +970,7 @@ void __init debug_objects_early_init(void) | |||
917 | /* | 970 | /* |
918 | * Convert the statically allocated objects to dynamic ones: | 971 | * Convert the statically allocated objects to dynamic ones: |
919 | */ | 972 | */ |
920 | static int debug_objects_replace_static_objects(void) | 973 | static int __init debug_objects_replace_static_objects(void) |
921 | { | 974 | { |
922 | struct debug_bucket *db = obj_hash; | 975 | struct debug_bucket *db = obj_hash; |
923 | struct hlist_node *node, *tmp; | 976 | struct hlist_node *node, *tmp; |
diff --git a/lib/rwsem.c b/lib/rwsem.c index 3e3365e5665e..ceba8e28807a 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
136 | out: | 136 | out: |
137 | return sem; | 137 | return sem; |
138 | 138 | ||
139 | /* undo the change to count, but check for a transition 1->0 */ | 139 | /* undo the change to the active count, but check for a transition |
140 | * 1->0 */ | ||
140 | undo: | 141 | undo: |
141 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) | 142 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) |
142 | goto out; | 143 | goto out; |
143 | goto try_again; | 144 | goto try_again; |
144 | } | 145 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ffbdfc86aedf..4c9e6bbf3772 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1039,7 +1039,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
1039 | page = alloc_buddy_huge_page(h, vma, addr); | 1039 | page = alloc_buddy_huge_page(h, vma, addr); |
1040 | if (!page) { | 1040 | if (!page) { |
1041 | hugetlb_put_quota(inode->i_mapping, chg); | 1041 | hugetlb_put_quota(inode->i_mapping, chg); |
1042 | return ERR_PTR(-VM_FAULT_OOM); | 1042 | return ERR_PTR(-VM_FAULT_SIGBUS); |
1043 | } | 1043 | } |
1044 | } | 1044 | } |
1045 | 1045 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6c755de385f7..8a79a6f0f029 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1601,7 +1601,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1601 | * There is a small race that "from" or "to" can be | 1601 | * There is a small race that "from" or "to" can be |
1602 | * freed by rmdir, so we use css_tryget(). | 1602 | * freed by rmdir, so we use css_tryget(). |
1603 | */ | 1603 | */ |
1604 | rcu_read_lock(); | ||
1605 | from = mc.from; | 1604 | from = mc.from; |
1606 | to = mc.to; | 1605 | to = mc.to; |
1607 | if (from && css_tryget(&from->css)) { | 1606 | if (from && css_tryget(&from->css)) { |
@@ -1622,7 +1621,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
1622 | do_continue = (to == mem_over_limit); | 1621 | do_continue = (to == mem_over_limit); |
1623 | css_put(&to->css); | 1622 | css_put(&to->css); |
1624 | } | 1623 | } |
1625 | rcu_read_unlock(); | ||
1626 | if (do_continue) { | 1624 | if (do_continue) { |
1627 | DEFINE_WAIT(wait); | 1625 | DEFINE_WAIT(wait); |
1628 | prepare_to_wait(&mc.waitq, &wait, | 1626 | prepare_to_wait(&mc.waitq, &wait, |
@@ -336,14 +336,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
336 | 336 | ||
337 | /* | 337 | /* |
338 | * At what user virtual address is page expected in vma? | 338 | * At what user virtual address is page expected in vma? |
339 | * checking that the page matches the vma. | 339 | * Caller should check the page is actually part of the vma. |
340 | */ | 340 | */ |
341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
342 | { | 342 | { |
343 | if (PageAnon(page)) { | 343 | if (PageAnon(page)) |
344 | if (vma->anon_vma != page_anon_vma(page)) | 344 | ; |
345 | return -EFAULT; | 345 | else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
346 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | ||
347 | if (!vma->vm_file || | 346 | if (!vma->vm_file || |
348 | vma->vm_file->f_mapping != page->mapping) | 347 | vma->vm_file->f_mapping != page->mapping) |
349 | return -EFAULT; | 348 | return -EFAULT; |
diff --git a/net/core/dev.c b/net/core/dev.c index f769098774b7..264137fce3a2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1451,7 +1451,7 @@ static inline void net_timestamp(struct sk_buff *skb) | |||
1451 | * | 1451 | * |
1452 | * return values: | 1452 | * return values: |
1453 | * NET_RX_SUCCESS (no congestion) | 1453 | * NET_RX_SUCCESS (no congestion) |
1454 | * NET_RX_DROP (packet was dropped) | 1454 | * NET_RX_DROP (packet was dropped, but freed) |
1455 | * | 1455 | * |
1456 | * dev_forward_skb can be used for injecting an skb from the | 1456 | * dev_forward_skb can be used for injecting an skb from the |
1457 | * start_xmit function of one device into the receive queue | 1457 | * start_xmit function of one device into the receive queue |
@@ -1465,12 +1465,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1465 | { | 1465 | { |
1466 | skb_orphan(skb); | 1466 | skb_orphan(skb); |
1467 | 1467 | ||
1468 | if (!(dev->flags & IFF_UP)) | 1468 | if (!(dev->flags & IFF_UP) || |
1469 | return NET_RX_DROP; | 1469 | (skb->len > (dev->mtu + dev->hard_header_len))) { |
1470 | 1470 | kfree_skb(skb); | |
1471 | if (skb->len > (dev->mtu + dev->hard_header_len)) | ||
1472 | return NET_RX_DROP; | 1471 | return NET_RX_DROP; |
1473 | 1472 | } | |
1474 | skb_set_dev(skb, dev); | 1473 | skb_set_dev(skb, dev); |
1475 | skb->tstamp.tv64 = 0; | 1474 | skb->tstamp.tv64 = 0; |
1476 | skb->pkt_type = PACKET_HOST; | 1475 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fe776c9ddeca..31e85d327aa2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -602,12 +602,19 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
602 | a->tx_compressed = b->tx_compressed; | 602 | a->tx_compressed = b->tx_compressed; |
603 | }; | 603 | }; |
604 | 604 | ||
605 | /* All VF info */ | ||
605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 606 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
606 | { | 607 | { |
607 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) | 608 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { |
608 | return dev_num_vf(dev->dev.parent) * | 609 | |
609 | sizeof(struct ifla_vf_info); | 610 | int num_vfs = dev_num_vf(dev->dev.parent); |
610 | else | 611 | size_t size = nlmsg_total_size(sizeof(struct nlattr)); |
612 | size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); | ||
613 | size += num_vfs * (sizeof(struct ifla_vf_mac) + | ||
614 | sizeof(struct ifla_vf_vlan) + | ||
615 | sizeof(struct ifla_vf_tx_rate)); | ||
616 | return size; | ||
617 | } else | ||
611 | return 0; | 618 | return 0; |
612 | } | 619 | } |
613 | 620 | ||
@@ -629,7 +636,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
629 | + nla_total_size(1) /* IFLA_OPERSTATE */ | 636 | + nla_total_size(1) /* IFLA_OPERSTATE */ |
630 | + nla_total_size(1) /* IFLA_LINKMODE */ | 637 | + nla_total_size(1) /* IFLA_LINKMODE */ |
631 | + nla_total_size(4) /* IFLA_NUM_VF */ | 638 | + nla_total_size(4) /* IFLA_NUM_VF */ |
632 | + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ | 639 | + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ |
633 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ | 640 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ |
634 | } | 641 | } |
635 | 642 | ||
@@ -700,14 +707,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
700 | 707 | ||
701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 708 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
702 | int i; | 709 | int i; |
703 | struct ifla_vf_info ivi; | ||
704 | 710 | ||
705 | NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); | 711 | struct nlattr *vfinfo, *vf; |
706 | for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { | 712 | int num_vfs = dev_num_vf(dev->dev.parent); |
713 | |||
714 | NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs); | ||
715 | vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); | ||
716 | if (!vfinfo) | ||
717 | goto nla_put_failure; | ||
718 | for (i = 0; i < num_vfs; i++) { | ||
719 | struct ifla_vf_info ivi; | ||
720 | struct ifla_vf_mac vf_mac; | ||
721 | struct ifla_vf_vlan vf_vlan; | ||
722 | struct ifla_vf_tx_rate vf_tx_rate; | ||
707 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | 723 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) |
708 | break; | 724 | break; |
709 | NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); | 725 | vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; |
726 | memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); | ||
727 | vf_vlan.vlan = ivi.vlan; | ||
728 | vf_vlan.qos = ivi.qos; | ||
729 | vf_tx_rate.rate = ivi.tx_rate; | ||
730 | vf = nla_nest_start(skb, IFLA_VF_INFO); | ||
731 | if (!vf) { | ||
732 | nla_nest_cancel(skb, vfinfo); | ||
733 | goto nla_put_failure; | ||
734 | } | ||
735 | NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); | ||
736 | NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); | ||
737 | NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); | ||
738 | nla_nest_end(skb, vf); | ||
710 | } | 739 | } |
740 | nla_nest_end(skb, vfinfo); | ||
711 | } | 741 | } |
712 | if (dev->rtnl_link_ops) { | 742 | if (dev->rtnl_link_ops) { |
713 | if (rtnl_link_fill(skb, dev) < 0) | 743 | if (rtnl_link_fill(skb, dev) < 0) |
@@ -769,12 +799,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
769 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 799 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
770 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 800 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
771 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 801 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
772 | [IFLA_VF_MAC] = { .type = NLA_BINARY, | 802 | [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, |
773 | .len = sizeof(struct ifla_vf_mac) }, | ||
774 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, | ||
775 | .len = sizeof(struct ifla_vf_vlan) }, | ||
776 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, | ||
777 | .len = sizeof(struct ifla_vf_tx_rate) }, | ||
778 | }; | 803 | }; |
779 | EXPORT_SYMBOL(ifla_policy); | 804 | EXPORT_SYMBOL(ifla_policy); |
780 | 805 | ||
@@ -783,6 +808,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | |||
783 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | 808 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
784 | }; | 809 | }; |
785 | 810 | ||
811 | static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { | ||
812 | [IFLA_VF_INFO] = { .type = NLA_NESTED }, | ||
813 | }; | ||
814 | |||
815 | static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { | ||
816 | [IFLA_VF_MAC] = { .type = NLA_BINARY, | ||
817 | .len = sizeof(struct ifla_vf_mac) }, | ||
818 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, | ||
819 | .len = sizeof(struct ifla_vf_vlan) }, | ||
820 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, | ||
821 | .len = sizeof(struct ifla_vf_tx_rate) }, | ||
822 | }; | ||
823 | |||
786 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) | 824 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) |
787 | { | 825 | { |
788 | struct net *net; | 826 | struct net *net; |
@@ -812,6 +850,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) | |||
812 | return 0; | 850 | return 0; |
813 | } | 851 | } |
814 | 852 | ||
853 | static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) | ||
854 | { | ||
855 | int rem, err = -EINVAL; | ||
856 | struct nlattr *vf; | ||
857 | const struct net_device_ops *ops = dev->netdev_ops; | ||
858 | |||
859 | nla_for_each_nested(vf, attr, rem) { | ||
860 | switch (nla_type(vf)) { | ||
861 | case IFLA_VF_MAC: { | ||
862 | struct ifla_vf_mac *ivm; | ||
863 | ivm = nla_data(vf); | ||
864 | err = -EOPNOTSUPP; | ||
865 | if (ops->ndo_set_vf_mac) | ||
866 | err = ops->ndo_set_vf_mac(dev, ivm->vf, | ||
867 | ivm->mac); | ||
868 | break; | ||
869 | } | ||
870 | case IFLA_VF_VLAN: { | ||
871 | struct ifla_vf_vlan *ivv; | ||
872 | ivv = nla_data(vf); | ||
873 | err = -EOPNOTSUPP; | ||
874 | if (ops->ndo_set_vf_vlan) | ||
875 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
876 | ivv->vlan, | ||
877 | ivv->qos); | ||
878 | break; | ||
879 | } | ||
880 | case IFLA_VF_TX_RATE: { | ||
881 | struct ifla_vf_tx_rate *ivt; | ||
882 | ivt = nla_data(vf); | ||
883 | err = -EOPNOTSUPP; | ||
884 | if (ops->ndo_set_vf_tx_rate) | ||
885 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, | ||
886 | ivt->rate); | ||
887 | break; | ||
888 | } | ||
889 | default: | ||
890 | err = -EINVAL; | ||
891 | break; | ||
892 | } | ||
893 | if (err) | ||
894 | break; | ||
895 | } | ||
896 | return err; | ||
897 | } | ||
898 | |||
815 | static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | 899 | static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, |
816 | struct nlattr **tb, char *ifname, int modified) | 900 | struct nlattr **tb, char *ifname, int modified) |
817 | { | 901 | { |
@@ -942,40 +1026,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
942 | write_unlock_bh(&dev_base_lock); | 1026 | write_unlock_bh(&dev_base_lock); |
943 | } | 1027 | } |
944 | 1028 | ||
945 | if (tb[IFLA_VF_MAC]) { | 1029 | if (tb[IFLA_VFINFO_LIST]) { |
946 | struct ifla_vf_mac *ivm; | 1030 | struct nlattr *attr; |
947 | ivm = nla_data(tb[IFLA_VF_MAC]); | 1031 | int rem; |
948 | err = -EOPNOTSUPP; | 1032 | nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { |
949 | if (ops->ndo_set_vf_mac) | 1033 | if (nla_type(attr) != IFLA_VF_INFO) |
950 | err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); | 1034 | goto errout; |
951 | if (err < 0) | 1035 | err = do_setvfinfo(dev, attr); |
952 | goto errout; | 1036 | if (err < 0) |
953 | modified = 1; | 1037 | goto errout; |
954 | } | 1038 | modified = 1; |
955 | 1039 | } | |
956 | if (tb[IFLA_VF_VLAN]) { | ||
957 | struct ifla_vf_vlan *ivv; | ||
958 | ivv = nla_data(tb[IFLA_VF_VLAN]); | ||
959 | err = -EOPNOTSUPP; | ||
960 | if (ops->ndo_set_vf_vlan) | ||
961 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
962 | ivv->vlan, | ||
963 | ivv->qos); | ||
964 | if (err < 0) | ||
965 | goto errout; | ||
966 | modified = 1; | ||
967 | } | ||
968 | err = 0; | ||
969 | |||
970 | if (tb[IFLA_VF_TX_RATE]) { | ||
971 | struct ifla_vf_tx_rate *ivt; | ||
972 | ivt = nla_data(tb[IFLA_VF_TX_RATE]); | ||
973 | err = -EOPNOTSUPP; | ||
974 | if (ops->ndo_set_vf_tx_rate) | ||
975 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); | ||
976 | if (err < 0) | ||
977 | goto errout; | ||
978 | modified = 1; | ||
979 | } | 1040 | } |
980 | err = 0; | 1041 | err = 0; |
981 | 1042 | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 6e747065c202..80769f1f9fab 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -661,13 +661,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
661 | #endif | 661 | #endif |
662 | #endif | 662 | #endif |
663 | 663 | ||
664 | #ifdef CONFIG_FDDI | 664 | #if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) |
665 | case ARPHRD_FDDI: | 665 | case ARPHRD_FDDI: |
666 | arp->ar_hrd = htons(ARPHRD_ETHER); | 666 | arp->ar_hrd = htons(ARPHRD_ETHER); |
667 | arp->ar_pro = htons(ETH_P_IP); | 667 | arp->ar_pro = htons(ETH_P_IP); |
668 | break; | 668 | break; |
669 | #endif | 669 | #endif |
670 | #ifdef CONFIG_TR | 670 | #if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
671 | case ARPHRD_IEEE802_TR: | 671 | case ARPHRD_IEEE802_TR: |
672 | arp->ar_hrd = htons(ARPHRD_IEEE802); | 672 | arp->ar_hrd = htons(ARPHRD_IEEE802); |
673 | arp->ar_pro = htons(ETH_P_IP); | 673 | arp->ar_pro = htons(ETH_P_IP); |
@@ -1051,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, | |||
1051 | return -EINVAL; | 1051 | return -EINVAL; |
1052 | } | 1052 | } |
1053 | switch (dev->type) { | 1053 | switch (dev->type) { |
1054 | #ifdef CONFIG_FDDI | 1054 | #if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE) |
1055 | case ARPHRD_FDDI: | 1055 | case ARPHRD_FDDI: |
1056 | /* | 1056 | /* |
1057 | * According to RFC 1390, FDDI devices should accept ARP | 1057 | * According to RFC 1390, FDDI devices should accept ARP |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d4f6d1340a4..ec19a890c9a0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -754,7 +754,8 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
754 | c->next = mfc_unres_queue; | 754 | c->next = mfc_unres_queue; |
755 | mfc_unres_queue = c; | 755 | mfc_unres_queue = c; |
756 | 756 | ||
757 | mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); | 757 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) == 1) |
758 | mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); | ||
758 | } | 759 | } |
759 | 760 | ||
760 | /* | 761 | /* |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0f8caf64caa3..296150b2a62f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2839,7 +2839,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) | |||
2839 | if (p->md5_desc.tfm) | 2839 | if (p->md5_desc.tfm) |
2840 | crypto_free_hash(p->md5_desc.tfm); | 2840 | crypto_free_hash(p->md5_desc.tfm); |
2841 | kfree(p); | 2841 | kfree(p); |
2842 | p = NULL; | ||
2843 | } | 2842 | } |
2844 | } | 2843 | } |
2845 | free_percpu(pool); | 2844 | free_percpu(pool); |
@@ -2937,25 +2936,40 @@ retry: | |||
2937 | 2936 | ||
2938 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | 2937 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
2939 | 2938 | ||
2940 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | 2939 | |
2940 | /** | ||
2941 | * tcp_get_md5sig_pool - get md5sig_pool for this user | ||
2942 | * | ||
2943 | * We use percpu structure, so if we succeed, we exit with preemption | ||
2944 | * and BH disabled, to make sure another thread or softirq handling | ||
2945 | * wont try to get same context. | ||
2946 | */ | ||
2947 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | ||
2941 | { | 2948 | { |
2942 | struct tcp_md5sig_pool * __percpu *p; | 2949 | struct tcp_md5sig_pool * __percpu *p; |
2943 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2950 | |
2951 | local_bh_disable(); | ||
2952 | |||
2953 | spin_lock(&tcp_md5sig_pool_lock); | ||
2944 | p = tcp_md5sig_pool; | 2954 | p = tcp_md5sig_pool; |
2945 | if (p) | 2955 | if (p) |
2946 | tcp_md5sig_users++; | 2956 | tcp_md5sig_users++; |
2947 | spin_unlock_bh(&tcp_md5sig_pool_lock); | 2957 | spin_unlock(&tcp_md5sig_pool_lock); |
2948 | return (p ? *per_cpu_ptr(p, cpu) : NULL); | 2958 | |
2949 | } | 2959 | if (p) |
2960 | return *per_cpu_ptr(p, smp_processor_id()); | ||
2950 | 2961 | ||
2951 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); | 2962 | local_bh_enable(); |
2963 | return NULL; | ||
2964 | } | ||
2965 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | ||
2952 | 2966 | ||
2953 | void __tcp_put_md5sig_pool(void) | 2967 | void tcp_put_md5sig_pool(void) |
2954 | { | 2968 | { |
2969 | local_bh_enable(); | ||
2955 | tcp_free_md5sig_pool(); | 2970 | tcp_free_md5sig_pool(); |
2956 | } | 2971 | } |
2957 | 2972 | EXPORT_SYMBOL(tcp_put_md5sig_pool); | |
2958 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | ||
2959 | 2973 | ||
2960 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | 2974 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, |
2961 | struct tcphdr *th) | 2975 | struct tcphdr *th) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8fef859db35d..c36522a0f113 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1527,6 +1527,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1527 | 1527 | ||
1528 | uh = udp_hdr(skb); | 1528 | uh = udp_hdr(skb); |
1529 | ulen = ntohs(uh->len); | 1529 | ulen = ntohs(uh->len); |
1530 | saddr = ip_hdr(skb)->saddr; | ||
1531 | daddr = ip_hdr(skb)->daddr; | ||
1532 | |||
1530 | if (ulen > skb->len) | 1533 | if (ulen > skb->len) |
1531 | goto short_packet; | 1534 | goto short_packet; |
1532 | 1535 | ||
@@ -1540,9 +1543,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1540 | if (udp4_csum_init(skb, uh, proto)) | 1543 | if (udp4_csum_init(skb, uh, proto)) |
1541 | goto csum_error; | 1544 | goto csum_error; |
1542 | 1545 | ||
1543 | saddr = ip_hdr(skb)->saddr; | ||
1544 | daddr = ip_hdr(skb)->daddr; | ||
1545 | |||
1546 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 1546 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
1547 | return __udp4_lib_mcast_deliver(net, skb, uh, | 1547 | return __udp4_lib_mcast_deliver(net, skb, uh, |
1548 | saddr, daddr, udptable); | 1548 | saddr, daddr, udptable); |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 622dc7939a1b..61573885e451 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -222,6 +222,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | |||
222 | if (!skb) | 222 | if (!skb) |
223 | return; | 223 | return; |
224 | 224 | ||
225 | skb->protocol = htons(ETH_P_IPV6); | ||
226 | |||
225 | serr = SKB_EXT_ERR(skb); | 227 | serr = SKB_EXT_ERR(skb); |
226 | serr->ee.ee_errno = err; | 228 | serr->ee.ee_errno = err; |
227 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; | 229 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; |
@@ -255,6 +257,8 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) | |||
255 | if (!skb) | 257 | if (!skb) |
256 | return; | 258 | return; |
257 | 259 | ||
260 | skb->protocol = htons(ETH_P_IPV6); | ||
261 | |||
258 | skb_put(skb, sizeof(struct ipv6hdr)); | 262 | skb_put(skb, sizeof(struct ipv6hdr)); |
259 | skb_reset_network_header(skb); | 263 | skb_reset_network_header(skb); |
260 | iph = ipv6_hdr(skb); | 264 | iph = ipv6_hdr(skb); |
@@ -319,7 +323,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
319 | sin->sin6_flowinfo = 0; | 323 | sin->sin6_flowinfo = 0; |
320 | sin->sin6_port = serr->port; | 324 | sin->sin6_port = serr->port; |
321 | sin->sin6_scope_id = 0; | 325 | sin->sin6_scope_id = 0; |
322 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { | 326 | if (skb->protocol == htons(ETH_P_IPV6)) { |
323 | ipv6_addr_copy(&sin->sin6_addr, | 327 | ipv6_addr_copy(&sin->sin6_addr, |
324 | (struct in6_addr *)(nh + serr->addr_offset)); | 328 | (struct in6_addr *)(nh + serr->addr_offset)); |
325 | if (np->sndflow) | 329 | if (np->sndflow) |
@@ -341,7 +345,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
341 | sin->sin6_family = AF_INET6; | 345 | sin->sin6_family = AF_INET6; |
342 | sin->sin6_flowinfo = 0; | 346 | sin->sin6_flowinfo = 0; |
343 | sin->sin6_scope_id = 0; | 347 | sin->sin6_scope_id = 0; |
344 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { | 348 | if (skb->protocol == htons(ETH_P_IPV6)) { |
345 | ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); | 349 | ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); |
346 | if (np->rxopt.all) | 350 | if (np->rxopt.all) |
347 | datagram_recv_ctl(sk, msg, skb); | 351 | datagram_recv_ctl(sk, msg, skb); |
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index a432f0ec051c..94e7fca75b85 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
@@ -31,7 +31,7 @@ static int llc_mac_header_len(unsigned short devtype) | |||
31 | case ARPHRD_ETHER: | 31 | case ARPHRD_ETHER: |
32 | case ARPHRD_LOOPBACK: | 32 | case ARPHRD_LOOPBACK: |
33 | return sizeof(struct ethhdr); | 33 | return sizeof(struct ethhdr); |
34 | #ifdef CONFIG_TR | 34 | #if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
35 | case ARPHRD_IEEE802_TR: | 35 | case ARPHRD_IEEE802_TR: |
36 | return sizeof(struct trh_hdr); | 36 | return sizeof(struct trh_hdr); |
37 | #endif | 37 | #endif |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4aefa6dc3091..875c8dec940a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -2030,7 +2030,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2030 | continue; | 2030 | continue; |
2031 | 2031 | ||
2032 | if (wk->type != IEEE80211_WORK_DIRECT_PROBE && | 2032 | if (wk->type != IEEE80211_WORK_DIRECT_PROBE && |
2033 | wk->type != IEEE80211_WORK_AUTH) | 2033 | wk->type != IEEE80211_WORK_AUTH && |
2034 | wk->type != IEEE80211_WORK_ASSOC) | ||
2034 | continue; | 2035 | continue; |
2035 | 2036 | ||
2036 | if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) | 2037 | if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a570184e5a9..ea2192444ce6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, | |||
440 | { | 440 | { |
441 | SCTP_DEBUG_PRINTK("%s\n", __func__); | 441 | SCTP_DEBUG_PRINTK("%s\n", __func__); |
442 | 442 | ||
443 | sctp_do_sm(SCTP_EVENT_T_OTHER, | 443 | if (sock_owned_by_user(sk)) { |
444 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | 444 | if (timer_pending(&t->proto_unreach_timer)) |
445 | asoc->state, asoc->ep, asoc, t, | 445 | return; |
446 | GFP_ATOMIC); | 446 | else { |
447 | if (!mod_timer(&t->proto_unreach_timer, | ||
448 | jiffies + (HZ/20))) | ||
449 | sctp_association_hold(asoc); | ||
450 | } | ||
451 | |||
452 | } else { | ||
453 | if (timer_pending(&t->proto_unreach_timer) && | ||
454 | del_timer(&t->proto_unreach_timer)) | ||
455 | sctp_association_put(asoc); | ||
447 | 456 | ||
457 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
458 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
459 | asoc->state, asoc->ep, asoc, t, | ||
460 | GFP_ATOMIC); | ||
461 | } | ||
448 | } | 462 | } |
449 | 463 | ||
450 | /* Common lookup code for icmp/icmpv6 error handler. */ | 464 | /* Common lookup code for icmp/icmpv6 error handler. */ |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d5ae450b6f02..eb1f42f45fdd 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -397,6 +397,41 @@ out_unlock: | |||
397 | sctp_transport_put(transport); | 397 | sctp_transport_put(transport); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* Handle the timeout of the ICMP protocol unreachable timer. Trigger | ||
401 | * the correct state machine transition that will close the association. | ||
402 | */ | ||
403 | void sctp_generate_proto_unreach_event(unsigned long data) | ||
404 | { | ||
405 | struct sctp_transport *transport = (struct sctp_transport *) data; | ||
406 | struct sctp_association *asoc = transport->asoc; | ||
407 | |||
408 | sctp_bh_lock_sock(asoc->base.sk); | ||
409 | if (sock_owned_by_user(asoc->base.sk)) { | ||
410 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); | ||
411 | |||
412 | /* Try again later. */ | ||
413 | if (!mod_timer(&transport->proto_unreach_timer, | ||
414 | jiffies + (HZ/20))) | ||
415 | sctp_association_hold(asoc); | ||
416 | goto out_unlock; | ||
417 | } | ||
418 | |||
419 | /* Is this structure just waiting around for us to actually | ||
420 | * get destroyed? | ||
421 | */ | ||
422 | if (asoc->base.dead) | ||
423 | goto out_unlock; | ||
424 | |||
425 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
426 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | ||
428 | |||
429 | out_unlock: | ||
430 | sctp_bh_unlock_sock(asoc->base.sk); | ||
431 | sctp_association_put(asoc); | ||
432 | } | ||
433 | |||
434 | |||
400 | /* Inject a SACK Timeout event into the state machine. */ | 435 | /* Inject a SACK Timeout event into the state machine. */ |
401 | static void sctp_generate_sack_event(unsigned long data) | 436 | static void sctp_generate_sack_event(unsigned long data) |
402 | { | 437 | { |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index be4d63d5a5cc..165d54e07fcd 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -108,6 +108,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
108 | (unsigned long)peer); | 108 | (unsigned long)peer); |
109 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, | 109 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, |
110 | (unsigned long)peer); | 110 | (unsigned long)peer); |
111 | setup_timer(&peer->proto_unreach_timer, | ||
112 | sctp_generate_proto_unreach_event, (unsigned long)peer); | ||
111 | 113 | ||
112 | /* Initialize the 64-bit random nonce sent with heartbeat. */ | 114 | /* Initialize the 64-bit random nonce sent with heartbeat. */ |
113 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); | 115 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); |
@@ -171,6 +173,10 @@ void sctp_transport_free(struct sctp_transport *transport) | |||
171 | del_timer(&transport->T3_rtx_timer)) | 173 | del_timer(&transport->T3_rtx_timer)) |
172 | sctp_transport_put(transport); | 174 | sctp_transport_put(transport); |
173 | 175 | ||
176 | /* Delete the ICMP proto unreachable timer if it's active. */ | ||
177 | if (timer_pending(&transport->proto_unreach_timer) && | ||
178 | del_timer(&transport->proto_unreach_timer)) | ||
179 | sctp_association_put(transport->asoc); | ||
174 | 180 | ||
175 | sctp_transport_put(transport); | 181 | sctp_transport_put(transport); |
176 | } | 182 | } |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 220213e603db..df90f31d14bf 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -796,6 +796,16 @@ static int do_platform_entry(const char *filename, | |||
796 | return 1; | 796 | return 1; |
797 | } | 797 | } |
798 | 798 | ||
799 | /* Looks like: zorro:iN. */ | ||
800 | static int do_zorro_entry(const char *filename, struct zorro_device_id *id, | ||
801 | char *alias) | ||
802 | { | ||
803 | id->id = TO_NATIVE(id->id); | ||
804 | strcpy(alias, "zorro:"); | ||
805 | ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id); | ||
806 | return 1; | ||
807 | } | ||
808 | |||
799 | /* Ignore any prefix, eg. some architectures prepend _ */ | 809 | /* Ignore any prefix, eg. some architectures prepend _ */ |
800 | static inline int sym_is(const char *symbol, const char *name) | 810 | static inline int sym_is(const char *symbol, const char *name) |
801 | { | 811 | { |
@@ -943,6 +953,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, | |||
943 | do_table(symval, sym->st_size, | 953 | do_table(symval, sym->st_size, |
944 | sizeof(struct platform_device_id), "platform", | 954 | sizeof(struct platform_device_id), "platform", |
945 | do_platform_entry, mod); | 955 | do_platform_entry, mod); |
956 | else if (sym_is(symname, "__mod_zorro_device_table")) | ||
957 | do_table(symval, sym->st_size, | ||
958 | sizeof(struct zorro_device_id), "zorro", | ||
959 | do_zorro_entry, mod); | ||
946 | free(zeros); | 960 | free(zeros); |
947 | } | 961 | } |
948 | 962 | ||
diff --git a/security/min_addr.c b/security/min_addr.c index e86f297522bf..f728728f193b 100644 --- a/security/min_addr.c +++ b/security/min_addr.c | |||
@@ -33,7 +33,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write, | |||
33 | { | 33 | { |
34 | int ret; | 34 | int ret; |
35 | 35 | ||
36 | if (!capable(CAP_SYS_RAWIO)) | 36 | if (write && !capable(CAP_SYS_RAWIO)) |
37 | return -EPERM; | 37 | return -EPERM; |
38 | 38 | ||
39 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 39 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 872887624030..20b5982c996b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -36,6 +36,9 @@ | |||
36 | #include <sound/timer.h> | 36 | #include <sound/timer.h> |
37 | #include <sound/minors.h> | 37 | #include <sound/minors.h> |
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) | ||
40 | #include <dma-coherence.h> | ||
41 | #endif | ||
39 | 42 | ||
40 | /* | 43 | /* |
41 | * Compatibility | 44 | * Compatibility |
@@ -3184,6 +3187,10 @@ static int snd_pcm_default_mmap(struct snd_pcm_substream *substream, | |||
3184 | substream->runtime->dma_area, | 3187 | substream->runtime->dma_area, |
3185 | substream->runtime->dma_addr, | 3188 | substream->runtime->dma_addr, |
3186 | area->vm_end - area->vm_start); | 3189 | area->vm_end - area->vm_start); |
3190 | #elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) | ||
3191 | if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && | ||
3192 | !plat_device_is_coherent(substream->dma_buffer.dev.dev)) | ||
3193 | area->vm_page_prot = pgprot_noncached(area->vm_page_prot); | ||
3187 | #endif /* ARCH_HAS_DMA_MMAP_COHERENT */ | 3194 | #endif /* ARCH_HAS_DMA_MMAP_COHERENT */ |
3188 | /* mmap with fault handler */ | 3195 | /* mmap with fault handler */ |
3189 | area->vm_ops = &snd_pcm_vm_ops_data_fault; | 3196 | area->vm_ops = &snd_pcm_vm_ops_data_fault; |
diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h index 1e123077923d..4ff6c8cc5077 100644 --- a/sound/drivers/pcsp/pcsp.h +++ b/sound/drivers/pcsp/pcsp.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/i8253.h> | 16 | #include <asm/i8253.h> |
17 | #else | 17 | #else |
18 | #include <asm/8253pit.h> | 18 | #include <asm/8253pit.h> |
19 | static DEFINE_SPINLOCK(i8253_lock); | 19 | static DEFINE_RAW_SPINLOCK(i8253_lock); |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define PCSP_SOUND_VERSION 0x400 /* read 4.00 */ | 22 | #define PCSP_SOUND_VERSION 0x400 /* read 4.00 */ |
diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c index 0444cdeb4bec..b5e2b54c2604 100644 --- a/sound/drivers/pcsp/pcsp_input.c +++ b/sound/drivers/pcsp/pcsp_input.c | |||
@@ -21,7 +21,7 @@ static void pcspkr_do_sound(unsigned int count) | |||
21 | { | 21 | { |
22 | unsigned long flags; | 22 | unsigned long flags; |
23 | 23 | ||
24 | spin_lock_irqsave(&i8253_lock, flags); | 24 | raw_spin_lock_irqsave(&i8253_lock, flags); |
25 | 25 | ||
26 | if (count) { | 26 | if (count) { |
27 | /* set command for counter 2, 2 byte write */ | 27 | /* set command for counter 2, 2 byte write */ |
@@ -36,7 +36,7 @@ static void pcspkr_do_sound(unsigned int count) | |||
36 | outb(inb_p(0x61) & 0xFC, 0x61); | 36 | outb(inb_p(0x61) & 0xFC, 0x61); |
37 | } | 37 | } |
38 | 38 | ||
39 | spin_unlock_irqrestore(&i8253_lock, flags); | 39 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
40 | } | 40 | } |
41 | 41 | ||
42 | void pcspkr_stop_sound(void) | 42 | void pcspkr_stop_sound(void) |
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index d77ffa9a9387..ce9e7d170c0d 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c | |||
@@ -66,7 +66,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip) | |||
66 | timer_cnt = val * CUR_DIV() / 256; | 66 | timer_cnt = val * CUR_DIV() / 256; |
67 | 67 | ||
68 | if (timer_cnt && chip->enable) { | 68 | if (timer_cnt && chip->enable) { |
69 | spin_lock_irqsave(&i8253_lock, flags); | 69 | raw_spin_lock_irqsave(&i8253_lock, flags); |
70 | if (!nforce_wa) { | 70 | if (!nforce_wa) { |
71 | outb_p(chip->val61, 0x61); | 71 | outb_p(chip->val61, 0x61); |
72 | outb_p(timer_cnt, 0x42); | 72 | outb_p(timer_cnt, 0x42); |
@@ -75,7 +75,7 @@ static u64 pcsp_timer_update(struct snd_pcsp *chip) | |||
75 | outb(chip->val61 ^ 2, 0x61); | 75 | outb(chip->val61 ^ 2, 0x61); |
76 | chip->thalf = 1; | 76 | chip->thalf = 1; |
77 | } | 77 | } |
78 | spin_unlock_irqrestore(&i8253_lock, flags); | 78 | raw_spin_unlock_irqrestore(&i8253_lock, flags); |
79 | } | 79 | } |
80 | 80 | ||
81 | chip->ns_rem = PCSP_PERIOD_NS(); | 81 | chip->ns_rem = PCSP_PERIOD_NS(); |
@@ -159,10 +159,10 @@ static int pcsp_start_playing(struct snd_pcsp *chip) | |||
159 | return -EIO; | 159 | return -EIO; |
160 | } | 160 | } |
161 | 161 | ||
162 | spin_lock(&i8253_lock); | 162 | raw_spin_lock(&i8253_lock); |
163 | chip->val61 = inb(0x61) | 0x03; | 163 | chip->val61 = inb(0x61) | 0x03; |
164 | outb_p(0x92, 0x43); /* binary, mode 1, LSB only, ch 2 */ | 164 | outb_p(0x92, 0x43); /* binary, mode 1, LSB only, ch 2 */ |
165 | spin_unlock(&i8253_lock); | 165 | raw_spin_unlock(&i8253_lock); |
166 | atomic_set(&chip->timer_active, 1); | 166 | atomic_set(&chip->timer_active, 1); |
167 | chip->thalf = 0; | 167 | chip->thalf = 0; |
168 | 168 | ||
@@ -179,11 +179,11 @@ static void pcsp_stop_playing(struct snd_pcsp *chip) | |||
179 | return; | 179 | return; |
180 | 180 | ||
181 | atomic_set(&chip->timer_active, 0); | 181 | atomic_set(&chip->timer_active, 0); |
182 | spin_lock(&i8253_lock); | 182 | raw_spin_lock(&i8253_lock); |
183 | /* restore the timer */ | 183 | /* restore the timer */ |
184 | outb_p(0xb6, 0x43); /* binary, mode 3, LSB/MSB, ch 2 */ | 184 | outb_p(0xb6, 0x43); /* binary, mode 3, LSB/MSB, ch 2 */ |
185 | outb(chip->val61 & 0xFC, 0x61); | 185 | outb(chip->val61 & 0xFC, 0x61); |
186 | spin_unlock(&i8253_lock); | 186 | raw_spin_unlock(&i8253_lock); |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | 189 | /* |
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c index bb14e4c67e89..87910e992133 100644 --- a/sound/oss/dmasound/dmasound_paula.c +++ b/sound/oss/dmasound/dmasound_paula.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/soundcard.h> | 22 | #include <linux/soundcard.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/platform_device.h> | ||
24 | 25 | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
@@ -710,31 +711,41 @@ static MACHINE machAmiga = { | |||
710 | /*** Config & Setup **********************************************************/ | 711 | /*** Config & Setup **********************************************************/ |
711 | 712 | ||
712 | 713 | ||
713 | static int __init dmasound_paula_init(void) | 714 | static int __init amiga_audio_probe(struct platform_device *pdev) |
714 | { | 715 | { |
715 | int err; | 716 | dmasound.mach = machAmiga; |
716 | 717 | dmasound.mach.default_hard = def_hard ; | |
717 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(AMI_AUDIO)) { | 718 | dmasound.mach.default_soft = def_soft ; |
718 | if (!request_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40, | 719 | return dmasound_init(); |
719 | "dmasound [Paula]")) | ||
720 | return -EBUSY; | ||
721 | dmasound.mach = machAmiga; | ||
722 | dmasound.mach.default_hard = def_hard ; | ||
723 | dmasound.mach.default_soft = def_soft ; | ||
724 | err = dmasound_init(); | ||
725 | if (err) | ||
726 | release_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40); | ||
727 | return err; | ||
728 | } else | ||
729 | return -ENODEV; | ||
730 | } | 720 | } |
731 | 721 | ||
732 | static void __exit dmasound_paula_cleanup(void) | 722 | static int __exit amiga_audio_remove(struct platform_device *pdev) |
733 | { | 723 | { |
734 | dmasound_deinit(); | 724 | dmasound_deinit(); |
735 | release_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40); | 725 | return 0; |
726 | } | ||
727 | |||
728 | static struct platform_driver amiga_audio_driver = { | ||
729 | .remove = __exit_p(amiga_audio_remove), | ||
730 | .driver = { | ||
731 | .name = "amiga-audio", | ||
732 | .owner = THIS_MODULE, | ||
733 | }, | ||
734 | }; | ||
735 | |||
736 | static int __init amiga_audio_init(void) | ||
737 | { | ||
738 | return platform_driver_probe(&amiga_audio_driver, amiga_audio_probe); | ||
736 | } | 739 | } |
737 | 740 | ||
738 | module_init(dmasound_paula_init); | 741 | module_init(amiga_audio_init); |
739 | module_exit(dmasound_paula_cleanup); | 742 | |
743 | static void __exit amiga_audio_exit(void) | ||
744 | { | ||
745 | platform_driver_unregister(&amiga_audio_driver); | ||
746 | } | ||
747 | |||
748 | module_exit(amiga_audio_exit); | ||
749 | |||
740 | MODULE_LICENSE("GPL"); | 750 | MODULE_LICENSE("GPL"); |
751 | MODULE_ALIAS("platform:amiga-audio"); | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 56e52071c769..feabb44c7ca4 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -1197,9 +1197,10 @@ static int patch_cxt5045(struct hda_codec *codec) | |||
1197 | case 0x103c: | 1197 | case 0x103c: |
1198 | case 0x1631: | 1198 | case 0x1631: |
1199 | case 0x1734: | 1199 | case 0x1734: |
1200 | /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad | 1200 | case 0x17aa: |
1201 | * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB | 1201 | /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have |
1202 | * (originally it has 0x2b steps with 0dB offset 0x14) | 1202 | * really bad sound over 0dB on NID 0x17. Fix max PCM level to |
1203 | * 0 dB (originally it has 0x2b steps with 0dB offset 0x14) | ||
1203 | */ | 1204 | */ |
1204 | snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT, | 1205 | snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT, |
1205 | (0x14 << AC_AMPCAP_OFFSET_SHIFT) | | 1206 | (0x14 << AC_AMPCAP_OFFSET_SHIFT) | |
@@ -2846,6 +2847,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
2846 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 2847 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
2847 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 2848 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
2848 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), | 2849 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), |
2850 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), | ||
2849 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), | 2851 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), |
2850 | {} | 2852 | {} |
2851 | }; | 2853 | }; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7404dba16f83..886d8e46bb37 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -17871,7 +17871,6 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = { | |||
17871 | ALC662_3ST_6ch_DIG), | 17871 | ALC662_3ST_6ch_DIG), |
17872 | SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x", | 17872 | SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x", |
17873 | ALC663_ASUS_H13), | 17873 | ALC663_ASUS_H13), |
17874 | SND_PCI_QUIRK(0x8086, 0xd604, "Intel mobo", ALC662_3ST_2ch_DIG), | ||
17875 | {} | 17874 | {} |
17876 | }; | 17875 | }; |
17877 | 17876 | ||
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 7fb7d017a347..a0e06d82da1f 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -104,6 +104,7 @@ enum { | |||
104 | STAC_DELL_M4_2, | 104 | STAC_DELL_M4_2, |
105 | STAC_DELL_M4_3, | 105 | STAC_DELL_M4_3, |
106 | STAC_HP_M4, | 106 | STAC_HP_M4, |
107 | STAC_HP_DV4, | ||
107 | STAC_HP_DV5, | 108 | STAC_HP_DV5, |
108 | STAC_HP_HDX, | 109 | STAC_HP_HDX, |
109 | STAC_HP_DV4_1222NR, | 110 | STAC_HP_DV4_1222NR, |
@@ -1544,11 +1545,9 @@ static unsigned int alienware_m17x_pin_configs[13] = { | |||
1544 | 0x904601b0, | 1545 | 0x904601b0, |
1545 | }; | 1546 | }; |
1546 | 1547 | ||
1547 | static unsigned int intel_dg45id_pin_configs[14] = { | 1548 | static unsigned int intel_dg45id_pin_configs[13] = { |
1548 | 0x02214230, 0x02A19240, 0x01013214, 0x01014210, | 1549 | 0x02214230, 0x02A19240, 0x01013214, 0x01014210, |
1549 | 0x01A19250, 0x01011212, 0x01016211, 0x40f000f0, | 1550 | 0x01A19250, 0x01011212, 0x01016211 |
1550 | 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x014510A0, | ||
1551 | 0x074510B0, 0x40f000f0 | ||
1552 | }; | 1551 | }; |
1553 | 1552 | ||
1554 | static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { | 1553 | static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { |
@@ -1693,6 +1692,7 @@ static unsigned int *stac92hd71bxx_brd_tbl[STAC_92HD71BXX_MODELS] = { | |||
1693 | [STAC_DELL_M4_2] = dell_m4_2_pin_configs, | 1692 | [STAC_DELL_M4_2] = dell_m4_2_pin_configs, |
1694 | [STAC_DELL_M4_3] = dell_m4_3_pin_configs, | 1693 | [STAC_DELL_M4_3] = dell_m4_3_pin_configs, |
1695 | [STAC_HP_M4] = NULL, | 1694 | [STAC_HP_M4] = NULL, |
1695 | [STAC_HP_DV4] = NULL, | ||
1696 | [STAC_HP_DV5] = NULL, | 1696 | [STAC_HP_DV5] = NULL, |
1697 | [STAC_HP_HDX] = NULL, | 1697 | [STAC_HP_HDX] = NULL, |
1698 | [STAC_HP_DV4_1222NR] = NULL, | 1698 | [STAC_HP_DV4_1222NR] = NULL, |
@@ -1705,6 +1705,7 @@ static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = { | |||
1705 | [STAC_DELL_M4_2] = "dell-m4-2", | 1705 | [STAC_DELL_M4_2] = "dell-m4-2", |
1706 | [STAC_DELL_M4_3] = "dell-m4-3", | 1706 | [STAC_DELL_M4_3] = "dell-m4-3", |
1707 | [STAC_HP_M4] = "hp-m4", | 1707 | [STAC_HP_M4] = "hp-m4", |
1708 | [STAC_HP_DV4] = "hp-dv4", | ||
1708 | [STAC_HP_DV5] = "hp-dv5", | 1709 | [STAC_HP_DV5] = "hp-dv5", |
1709 | [STAC_HP_HDX] = "hp-hdx", | 1710 | [STAC_HP_HDX] = "hp-hdx", |
1710 | [STAC_HP_DV4_1222NR] = "hp-dv4-1222nr", | 1711 | [STAC_HP_DV4_1222NR] = "hp-dv4-1222nr", |
@@ -1723,7 +1724,7 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { | |||
1723 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080, | 1724 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080, |
1724 | "HP", STAC_HP_DV5), | 1725 | "HP", STAC_HP_DV5), |
1725 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0, | 1726 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0, |
1726 | "HP dv4-7", STAC_HP_DV5), | 1727 | "HP dv4-7", STAC_HP_DV4), |
1727 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3600, | 1728 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3600, |
1728 | "HP dv4-7", STAC_HP_DV5), | 1729 | "HP dv4-7", STAC_HP_DV5), |
1729 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3610, | 1730 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3610, |
@@ -4768,6 +4769,9 @@ static void set_hp_led_gpio(struct hda_codec *codec) | |||
4768 | struct sigmatel_spec *spec = codec->spec; | 4769 | struct sigmatel_spec *spec = codec->spec; |
4769 | unsigned int gpio; | 4770 | unsigned int gpio; |
4770 | 4771 | ||
4772 | if (spec->gpio_led) | ||
4773 | return; | ||
4774 | |||
4771 | gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); | 4775 | gpio = snd_hda_param_read(codec, codec->afg, AC_PAR_GPIO_CAP); |
4772 | gpio &= AC_GPIO_IO_COUNT; | 4776 | gpio &= AC_GPIO_IO_COUNT; |
4773 | if (gpio > 3) | 4777 | if (gpio > 3) |
@@ -5677,6 +5681,9 @@ again: | |||
5677 | spec->num_smuxes = 1; | 5681 | spec->num_smuxes = 1; |
5678 | spec->num_dmuxes = 1; | 5682 | spec->num_dmuxes = 1; |
5679 | /* fallthrough */ | 5683 | /* fallthrough */ |
5684 | case STAC_HP_DV4: | ||
5685 | spec->gpio_led = 0x01; | ||
5686 | /* fallthrough */ | ||
5680 | case STAC_HP_DV5: | 5687 | case STAC_HP_DV5: |
5681 | snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010); | 5688 | snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010); |
5682 | stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN); | 5689 | stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN); |
@@ -5690,6 +5697,7 @@ again: | |||
5690 | spec->num_dmics = 1; | 5697 | spec->num_dmics = 1; |
5691 | spec->num_dmuxes = 1; | 5698 | spec->num_dmuxes = 1; |
5692 | spec->num_smuxes = 1; | 5699 | spec->num_smuxes = 1; |
5700 | spec->gpio_led = 0x08; | ||
5693 | break; | 5701 | break; |
5694 | } | 5702 | } |
5695 | 5703 | ||
@@ -5746,7 +5754,8 @@ again: | |||
5746 | } | 5754 | } |
5747 | 5755 | ||
5748 | /* enable bass on HP dv7 */ | 5756 | /* enable bass on HP dv7 */ |
5749 | if (spec->board_config == STAC_HP_DV5) { | 5757 | if (spec->board_config == STAC_HP_DV4 || |
5758 | spec->board_config == STAC_HP_DV5) { | ||
5750 | unsigned int cap; | 5759 | unsigned int cap; |
5751 | cap = snd_hda_param_read(codec, 0x1, AC_PAR_GPIO_CAP); | 5760 | cap = snd_hda_param_read(codec, 0x1, AC_PAR_GPIO_CAP); |
5752 | cap &= AC_GPIO_IO_COUNT; | 5761 | cap &= AC_GPIO_IO_COUNT; |
diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c index 3e1c20ae2f1c..726fd4b92e19 100644 --- a/sound/pci/ice1712/maya44.c +++ b/sound/pci/ice1712/maya44.c | |||
@@ -347,7 +347,7 @@ static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol, | |||
347 | 347 | ||
348 | /* known working input slots (0-4) */ | 348 | /* known working input slots (0-4) */ |
349 | #define MAYA_LINE_IN 1 /* in-2 */ | 349 | #define MAYA_LINE_IN 1 /* in-2 */ |
350 | #define MAYA_MIC_IN 4 /* in-5 */ | 350 | #define MAYA_MIC_IN 3 /* in-4 */ |
351 | 351 | ||
352 | static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line) | 352 | static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line) |
353 | { | 353 | { |
@@ -393,8 +393,8 @@ static int maya_rec_src_put(struct snd_kcontrol *kcontrol, | |||
393 | int changed; | 393 | int changed; |
394 | 394 | ||
395 | mutex_lock(&chip->mutex); | 395 | mutex_lock(&chip->mutex); |
396 | changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY, | 396 | changed = maya_set_gpio_bits(chip->ice, 1 << GPIO_MIC_RELAY, |
397 | sel ? GPIO_MIC_RELAY : 0); | 397 | sel ? (1 << GPIO_MIC_RELAY) : 0); |
398 | wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN); | 398 | wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN); |
399 | mutex_unlock(&chip->mutex); | 399 | mutex_unlock(&chip->mutex); |
400 | return changed; | 400 | return changed; |
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c index 16c226bfcd2b..7c4986b27f2b 100644 --- a/sound/pci/oxygen/xonar_cs43xx.c +++ b/sound/pci/oxygen/xonar_cs43xx.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <sound/pcm_params.h> | 56 | #include <sound/pcm_params.h> |
57 | #include <sound/tlv.h> | 57 | #include <sound/tlv.h> |
58 | #include "xonar.h" | 58 | #include "xonar.h" |
59 | #include "cm9780.h" | ||
59 | #include "cs4398.h" | 60 | #include "cs4398.h" |
60 | #include "cs4362a.h" | 61 | #include "cs4362a.h" |
61 | 62 | ||
@@ -172,6 +173,8 @@ static void xonar_d1_init(struct oxygen *chip) | |||
172 | oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, | 173 | oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, |
173 | GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE); | 174 | GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE); |
174 | 175 | ||
176 | oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC); | ||
177 | |||
175 | xonar_init_cs53x1(chip); | 178 | xonar_init_cs53x1(chip); |
176 | xonar_enable_output(chip); | 179 | xonar_enable_output(chip); |
177 | 180 | ||
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 069f261b225c..73a02223c629 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -1937,7 +1937,7 @@ void *raw_field_ptr(struct event *event, const char *name, void *data) | |||
1937 | if (!field) | 1937 | if (!field) |
1938 | return NULL; | 1938 | return NULL; |
1939 | 1939 | ||
1940 | if (field->flags & FIELD_IS_STRING) { | 1940 | if (field->flags & FIELD_IS_DYNAMIC) { |
1941 | int offset; | 1941 | int offset; |
1942 | 1942 | ||
1943 | offset = *(int *)(data + field->offset); | 1943 | offset = *(int *)(data + field->offset); |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 03a5eb22da2b..7c79c1d76d0c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -197,7 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
197 | union kvm_ioapic_redirect_entry entry; | 197 | union kvm_ioapic_redirect_entry entry; |
198 | int ret = 1; | 198 | int ret = 1; |
199 | 199 | ||
200 | mutex_lock(&ioapic->lock); | 200 | spin_lock(&ioapic->lock); |
201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 201 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
202 | entry = ioapic->redirtbl[irq]; | 202 | entry = ioapic->redirtbl[irq]; |
203 | level ^= entry.fields.polarity; | 203 | level ^= entry.fields.polarity; |
@@ -214,7 +214,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
214 | } | 214 | } |
215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 215 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
216 | } | 216 | } |
217 | mutex_unlock(&ioapic->lock); | 217 | spin_unlock(&ioapic->lock); |
218 | 218 | ||
219 | return ret; | 219 | return ret; |
220 | } | 220 | } |
@@ -238,9 +238,9 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
238 | * is dropped it will be put into irr and will be delivered | 238 | * is dropped it will be put into irr and will be delivered |
239 | * after ack notifier returns. | 239 | * after ack notifier returns. |
240 | */ | 240 | */ |
241 | mutex_unlock(&ioapic->lock); | 241 | spin_unlock(&ioapic->lock); |
242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 242 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
243 | mutex_lock(&ioapic->lock); | 243 | spin_lock(&ioapic->lock); |
244 | 244 | ||
245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 245 | if (trigger_mode != IOAPIC_LEVEL_TRIG) |
246 | continue; | 246 | continue; |
@@ -259,9 +259,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | |||
259 | smp_rmb(); | 259 | smp_rmb(); |
260 | if (!test_bit(vector, ioapic->handled_vectors)) | 260 | if (!test_bit(vector, ioapic->handled_vectors)) |
261 | return; | 261 | return; |
262 | mutex_lock(&ioapic->lock); | 262 | spin_lock(&ioapic->lock); |
263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 263 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
264 | mutex_unlock(&ioapic->lock); | 264 | spin_unlock(&ioapic->lock); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 267 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
@@ -287,7 +287,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
287 | ASSERT(!(addr & 0xf)); /* check alignment */ | 287 | ASSERT(!(addr & 0xf)); /* check alignment */ |
288 | 288 | ||
289 | addr &= 0xff; | 289 | addr &= 0xff; |
290 | mutex_lock(&ioapic->lock); | 290 | spin_lock(&ioapic->lock); |
291 | switch (addr) { | 291 | switch (addr) { |
292 | case IOAPIC_REG_SELECT: | 292 | case IOAPIC_REG_SELECT: |
293 | result = ioapic->ioregsel; | 293 | result = ioapic->ioregsel; |
@@ -301,7 +301,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
301 | result = 0; | 301 | result = 0; |
302 | break; | 302 | break; |
303 | } | 303 | } |
304 | mutex_unlock(&ioapic->lock); | 304 | spin_unlock(&ioapic->lock); |
305 | 305 | ||
306 | switch (len) { | 306 | switch (len) { |
307 | case 8: | 307 | case 8: |
@@ -338,7 +338,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
338 | } | 338 | } |
339 | 339 | ||
340 | addr &= 0xff; | 340 | addr &= 0xff; |
341 | mutex_lock(&ioapic->lock); | 341 | spin_lock(&ioapic->lock); |
342 | switch (addr) { | 342 | switch (addr) { |
343 | case IOAPIC_REG_SELECT: | 343 | case IOAPIC_REG_SELECT: |
344 | ioapic->ioregsel = data; | 344 | ioapic->ioregsel = data; |
@@ -356,7 +356,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
356 | default: | 356 | default: |
357 | break; | 357 | break; |
358 | } | 358 | } |
359 | mutex_unlock(&ioapic->lock); | 359 | spin_unlock(&ioapic->lock); |
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
@@ -386,7 +386,7 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 386 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
387 | if (!ioapic) | 387 | if (!ioapic) |
388 | return -ENOMEM; | 388 | return -ENOMEM; |
389 | mutex_init(&ioapic->lock); | 389 | spin_lock_init(&ioapic->lock); |
390 | kvm->arch.vioapic = ioapic; | 390 | kvm->arch.vioapic = ioapic; |
391 | kvm_ioapic_reset(ioapic); | 391 | kvm_ioapic_reset(ioapic); |
392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 392 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
@@ -419,9 +419,9 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
419 | if (!ioapic) | 419 | if (!ioapic) |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | 421 | ||
422 | mutex_lock(&ioapic->lock); | 422 | spin_lock(&ioapic->lock); |
423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | 423 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); |
424 | mutex_unlock(&ioapic->lock); | 424 | spin_unlock(&ioapic->lock); |
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
@@ -431,9 +431,9 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
431 | if (!ioapic) | 431 | if (!ioapic) |
432 | return -EINVAL; | 432 | return -EINVAL; |
433 | 433 | ||
434 | mutex_lock(&ioapic->lock); | 434 | spin_lock(&ioapic->lock); |
435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 435 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
436 | update_handled_vectors(ioapic); | 436 | update_handled_vectors(ioapic); |
437 | mutex_unlock(&ioapic->lock); | 437 | spin_unlock(&ioapic->lock); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 8a751b78a430..0b190c34ccc3 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -45,7 +45,7 @@ struct kvm_ioapic { | |||
45 | struct kvm_io_device dev; | 45 | struct kvm_io_device dev; |
46 | struct kvm *kvm; | 46 | struct kvm *kvm; |
47 | void (*ack_notifier)(void *opaque, int irq); | 47 | void (*ack_notifier)(void *opaque, int irq); |
48 | struct mutex lock; | 48 | spinlock_t lock; |
49 | DECLARE_BITMAP(handled_vectors, 256); | 49 | DECLARE_BITMAP(handled_vectors, 256); |
50 | }; | 50 | }; |
51 | 51 | ||
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 80fd3ad3b2de..11692b9e8830 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -32,12 +32,30 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm); | |||
32 | static void kvm_iommu_put_pages(struct kvm *kvm, | 32 | static void kvm_iommu_put_pages(struct kvm *kvm, |
33 | gfn_t base_gfn, unsigned long npages); | 33 | gfn_t base_gfn, unsigned long npages); |
34 | 34 | ||
35 | static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, | ||
36 | gfn_t gfn, unsigned long size) | ||
37 | { | ||
38 | gfn_t end_gfn; | ||
39 | pfn_t pfn; | ||
40 | |||
41 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | ||
42 | end_gfn = gfn + (size >> PAGE_SHIFT); | ||
43 | gfn += 1; | ||
44 | |||
45 | if (is_error_pfn(pfn)) | ||
46 | return pfn; | ||
47 | |||
48 | while (gfn < end_gfn) | ||
49 | gfn_to_pfn_memslot(kvm, slot, gfn++); | ||
50 | |||
51 | return pfn; | ||
52 | } | ||
53 | |||
35 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | 54 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
36 | { | 55 | { |
37 | gfn_t gfn = slot->base_gfn; | 56 | gfn_t gfn, end_gfn; |
38 | unsigned long npages = slot->npages; | ||
39 | pfn_t pfn; | 57 | pfn_t pfn; |
40 | int i, r = 0; | 58 | int r = 0; |
41 | struct iommu_domain *domain = kvm->arch.iommu_domain; | 59 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
42 | int flags; | 60 | int flags; |
43 | 61 | ||
@@ -45,31 +63,62 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |||
45 | if (!domain) | 63 | if (!domain) |
46 | return 0; | 64 | return 0; |
47 | 65 | ||
66 | gfn = slot->base_gfn; | ||
67 | end_gfn = gfn + slot->npages; | ||
68 | |||
48 | flags = IOMMU_READ | IOMMU_WRITE; | 69 | flags = IOMMU_READ | IOMMU_WRITE; |
49 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) | 70 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) |
50 | flags |= IOMMU_CACHE; | 71 | flags |= IOMMU_CACHE; |
51 | 72 | ||
52 | for (i = 0; i < npages; i++) { | 73 | |
53 | /* check if already mapped */ | 74 | while (gfn < end_gfn) { |
54 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) | 75 | unsigned long page_size; |
76 | |||
77 | /* Check if already mapped */ | ||
78 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { | ||
79 | gfn += 1; | ||
80 | continue; | ||
81 | } | ||
82 | |||
83 | /* Get the page size we could use to map */ | ||
84 | page_size = kvm_host_page_size(kvm, gfn); | ||
85 | |||
86 | /* Make sure the page_size does not exceed the memslot */ | ||
87 | while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) | ||
88 | page_size >>= 1; | ||
89 | |||
90 | /* Make sure gfn is aligned to the page size we want to map */ | ||
91 | while ((gfn << PAGE_SHIFT) & (page_size - 1)) | ||
92 | page_size >>= 1; | ||
93 | |||
94 | /* | ||
95 | * Pin all pages we are about to map in memory. This is | ||
96 | * important because we unmap and unpin in 4kb steps later. | ||
97 | */ | ||
98 | pfn = kvm_pin_pages(kvm, slot, gfn, page_size); | ||
99 | if (is_error_pfn(pfn)) { | ||
100 | gfn += 1; | ||
55 | continue; | 101 | continue; |
102 | } | ||
56 | 103 | ||
57 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | 104 | /* Map into IO address space */ |
58 | r = iommu_map_range(domain, | 105 | r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), |
59 | gfn_to_gpa(gfn), | 106 | get_order(page_size), flags); |
60 | pfn_to_hpa(pfn), | ||
61 | PAGE_SIZE, flags); | ||
62 | if (r) { | 107 | if (r) { |
63 | printk(KERN_ERR "kvm_iommu_map_address:" | 108 | printk(KERN_ERR "kvm_iommu_map_address:" |
64 | "iommu failed to map pfn=%lx\n", pfn); | 109 | "iommu failed to map pfn=%lx\n", pfn); |
65 | goto unmap_pages; | 110 | goto unmap_pages; |
66 | } | 111 | } |
67 | gfn++; | 112 | |
113 | gfn += page_size >> PAGE_SHIFT; | ||
114 | |||
115 | |||
68 | } | 116 | } |
117 | |||
69 | return 0; | 118 | return 0; |
70 | 119 | ||
71 | unmap_pages: | 120 | unmap_pages: |
72 | kvm_iommu_put_pages(kvm, slot->base_gfn, i); | 121 | kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); |
73 | return r; | 122 | return r; |
74 | } | 123 | } |
75 | 124 | ||
@@ -189,27 +238,47 @@ out_unmap: | |||
189 | return r; | 238 | return r; |
190 | } | 239 | } |
191 | 240 | ||
241 | static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) | ||
242 | { | ||
243 | unsigned long i; | ||
244 | |||
245 | for (i = 0; i < npages; ++i) | ||
246 | kvm_release_pfn_clean(pfn + i); | ||
247 | } | ||
248 | |||
192 | static void kvm_iommu_put_pages(struct kvm *kvm, | 249 | static void kvm_iommu_put_pages(struct kvm *kvm, |
193 | gfn_t base_gfn, unsigned long npages) | 250 | gfn_t base_gfn, unsigned long npages) |
194 | { | 251 | { |
195 | gfn_t gfn = base_gfn; | 252 | struct iommu_domain *domain; |
253 | gfn_t end_gfn, gfn; | ||
196 | pfn_t pfn; | 254 | pfn_t pfn; |
197 | struct iommu_domain *domain = kvm->arch.iommu_domain; | ||
198 | unsigned long i; | ||
199 | u64 phys; | 255 | u64 phys; |
200 | 256 | ||
257 | domain = kvm->arch.iommu_domain; | ||
258 | end_gfn = base_gfn + npages; | ||
259 | gfn = base_gfn; | ||
260 | |||
201 | /* check if iommu exists and in use */ | 261 | /* check if iommu exists and in use */ |
202 | if (!domain) | 262 | if (!domain) |
203 | return; | 263 | return; |
204 | 264 | ||
205 | for (i = 0; i < npages; i++) { | 265 | while (gfn < end_gfn) { |
266 | unsigned long unmap_pages; | ||
267 | int order; | ||
268 | |||
269 | /* Get physical address */ | ||
206 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); | 270 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); |
207 | pfn = phys >> PAGE_SHIFT; | 271 | pfn = phys >> PAGE_SHIFT; |
208 | kvm_release_pfn_clean(pfn); | 272 | |
209 | gfn++; | 273 | /* Unmap address from IO address space */ |
210 | } | 274 | order = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); |
275 | unmap_pages = 1ULL << order; | ||
211 | 276 | ||
212 | iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages); | 277 | /* Unpin all pages we just unmapped to not leak any memory */ |
278 | kvm_unpin_pages(kvm, pfn, unmap_pages); | ||
279 | |||
280 | gfn += unmap_pages; | ||
281 | } | ||
213 | } | 282 | } |
214 | 283 | ||
215 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | 284 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |