diff options
411 files changed, 25775 insertions, 3134 deletions
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt index e4c38152f7f7..a4948591607d 100644 --- a/Documentation/RCU/torture.txt +++ b/Documentation/RCU/torture.txt | |||
@@ -7,7 +7,7 @@ The CONFIG_RCU_TORTURE_TEST config option is available for all RCU | |||
7 | implementations. It creates an rcutorture kernel module that can | 7 | implementations. It creates an rcutorture kernel module that can |
8 | be loaded to run a torture test. The test periodically outputs | 8 | be loaded to run a torture test. The test periodically outputs |
9 | status messages via printk(), which can be examined via the dmesg | 9 | status messages via printk(), which can be examined via the dmesg |
10 | command (perhaps grepping for "rcutorture"). The test is started | 10 | command (perhaps grepping for "torture"). The test is started |
11 | when the module is loaded, and stops when the module is unloaded. | 11 | when the module is loaded, and stops when the module is unloaded. |
12 | 12 | ||
13 | However, actually setting this config option to "y" results in the system | 13 | However, actually setting this config option to "y" results in the system |
@@ -35,6 +35,19 @@ stat_interval The number of seconds between output of torture | |||
35 | be printed -only- when the module is unloaded, and this | 35 | be printed -only- when the module is unloaded, and this |
36 | is the default. | 36 | is the default. |
37 | 37 | ||
38 | shuffle_interval | ||
39 | The number of seconds to keep the test threads affinitied | ||
40 | to a particular subset of the CPUs. Used in conjunction | ||
41 | with test_no_idle_hz. | ||
42 | |||
43 | test_no_idle_hz Whether or not to test the ability of RCU to operate in | ||
44 | a kernel that disables the scheduling-clock interrupt to | ||
45 | idle CPUs. Boolean parameter, "1" to test, "0" otherwise. | ||
46 | |||
47 | torture_type The type of RCU to test: "rcu" for the rcu_read_lock() | ||
48 | API, "rcu_bh" for the rcu_read_lock_bh() API, and "srcu" | ||
49 | for the "srcu_read_lock()" API. | ||
50 | |||
38 | verbose Enable debug printk()s. Default is disabled. | 51 | verbose Enable debug printk()s. Default is disabled. |
39 | 52 | ||
40 | 53 | ||
@@ -42,14 +55,14 @@ OUTPUT | |||
42 | 55 | ||
43 | The statistics output is as follows: | 56 | The statistics output is as follows: |
44 | 57 | ||
45 | rcutorture: --- Start of test: nreaders=16 stat_interval=0 verbose=0 | 58 | rcu-torture: --- Start of test: nreaders=16 stat_interval=0 verbose=0 |
46 | rcutorture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915 | 59 | rcu-torture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915 |
47 | rcutorture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0 | 60 | rcu-torture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0 |
48 | rcutorture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0 | 61 | rcu-torture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0 |
49 | rcutorture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0 | 62 | rcu-torture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0 |
50 | rcutorture: --- End of test | 63 | rcu-torture: --- End of test |
51 | 64 | ||
52 | The command "dmesg | grep rcutorture:" will extract this information on | 65 | The command "dmesg | grep torture:" will extract this information on |
53 | most systems. On more esoteric configurations, it may be necessary to | 66 | most systems. On more esoteric configurations, it may be necessary to |
54 | use other commands to access the output of the printk()s used by | 67 | use other commands to access the output of the printk()s used by |
55 | the RCU torture test. The printk()s use KERN_ALERT, so they should | 68 | the RCU torture test. The printk()s use KERN_ALERT, so they should |
@@ -115,8 +128,9 @@ The following script may be used to torture RCU: | |||
115 | modprobe rcutorture | 128 | modprobe rcutorture |
116 | sleep 100 | 129 | sleep 100 |
117 | rmmod rcutorture | 130 | rmmod rcutorture |
118 | dmesg | grep rcutorture: | 131 | dmesg | grep torture: |
119 | 132 | ||
120 | The output can be manually inspected for the error flag of "!!!". | 133 | The output can be manually inspected for the error flag of "!!!". |
121 | One could of course create a more elaborate script that automatically | 134 | One could of course create a more elaborate script that automatically |
122 | checked for such errors. | 135 | checked for such errors. The "rmmod" command forces a "SUCCESS" or |
136 | "FAILURE" indication to be printk()ed. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2e352a605fcf..0d189c93eeaf 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1669,6 +1669,10 @@ running once the system is up. | |||
1669 | usbhid.mousepoll= | 1669 | usbhid.mousepoll= |
1670 | [USBHID] The interval which mice are to be polled at. | 1670 | [USBHID] The interval which mice are to be polled at. |
1671 | 1671 | ||
1672 | vdso= [IA-32] | ||
1673 | vdso=1: enable VDSO (default) | ||
1674 | vdso=0: disable VDSO mapping | ||
1675 | |||
1672 | video= [FB] Frame buffer configuration | 1676 | video= [FB] Frame buffer configuration |
1673 | See Documentation/fb/modedb.txt. | 1677 | See Documentation/fb/modedb.txt. |
1674 | 1678 | ||
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt new file mode 100644 index 000000000000..5d61dacd21f6 --- /dev/null +++ b/Documentation/pi-futex.txt | |||
@@ -0,0 +1,121 @@ | |||
1 | Lightweight PI-futexes | ||
2 | ---------------------- | ||
3 | |||
4 | We are calling them lightweight for 3 reasons: | ||
5 | |||
6 | - in the user-space fastpath a PI-enabled futex involves no kernel work | ||
7 | (or any other PI complexity) at all. No registration, no extra kernel | ||
8 | calls - just pure fast atomic ops in userspace. | ||
9 | |||
10 | - even in the slowpath, the system call and scheduling pattern is very | ||
11 | similar to normal futexes. | ||
12 | |||
13 | - the in-kernel PI implementation is streamlined around the mutex | ||
14 | abstraction, with strict rules that keep the implementation | ||
15 | relatively simple: only a single owner may own a lock (i.e. no | ||
16 | read-write lock support), only the owner may unlock a lock, no | ||
17 | recursive locking, etc. | ||
18 | |||
19 | Priority Inheritance - why? | ||
20 | --------------------------- | ||
21 | |||
22 | The short reply: user-space PI helps achieving/improving determinism for | ||
23 | user-space applications. In the best-case, it can help achieve | ||
24 | determinism and well-bound latencies. Even in the worst-case, PI will | ||
25 | improve the statistical distribution of locking related application | ||
26 | delays. | ||
27 | |||
28 | The longer reply: | ||
29 | ----------------- | ||
30 | |||
31 | Firstly, sharing locks between multiple tasks is a common programming | ||
32 | technique that often cannot be replaced with lockless algorithms. As we | ||
33 | can see it in the kernel [which is a quite complex program in itself], | ||
34 | lockless structures are rather the exception than the norm - the current | ||
35 | ratio of lockless vs. locky code for shared data structures is somewhere | ||
36 | between 1:10 and 1:100. Lockless is hard, and the complexity of lockless | ||
37 | algorithms often endangers to ability to do robust reviews of said code. | ||
38 | I.e. critical RT apps often choose lock structures to protect critical | ||
39 | data structures, instead of lockless algorithms. Furthermore, there are | ||
40 | cases (like shared hardware, or other resource limits) where lockless | ||
41 | access is mathematically impossible. | ||
42 | |||
43 | Media players (such as Jack) are an example of reasonable application | ||
44 | design with multiple tasks (with multiple priority levels) sharing | ||
45 | short-held locks: for example, a highprio audio playback thread is | ||
46 | combined with medium-prio construct-audio-data threads and low-prio | ||
47 | display-colory-stuff threads. Add video and decoding to the mix and | ||
48 | we've got even more priority levels. | ||
49 | |||
50 | So once we accept that synchronization objects (locks) are an | ||
51 | unavoidable fact of life, and once we accept that multi-task userspace | ||
52 | apps have a very fair expectation of being able to use locks, we've got | ||
53 | to think about how to offer the option of a deterministic locking | ||
54 | implementation to user-space. | ||
55 | |||
56 | Most of the technical counter-arguments against doing priority | ||
57 | inheritance only apply to kernel-space locks. But user-space locks are | ||
58 | different, there we cannot disable interrupts or make the task | ||
59 | non-preemptible in a critical section, so the 'use spinlocks' argument | ||
60 | does not apply (user-space spinlocks have the same priority inversion | ||
61 | problems as other user-space locking constructs). Fact is, pretty much | ||
62 | the only technique that currently enables good determinism for userspace | ||
63 | locks (such as futex-based pthread mutexes) is priority inheritance: | ||
64 | |||
65 | Currently (without PI), if a high-prio and a low-prio task shares a lock | ||
66 | [this is a quite common scenario for most non-trivial RT applications], | ||
67 | even if all critical sections are coded carefully to be deterministic | ||
68 | (i.e. all critical sections are short in duration and only execute a | ||
69 | limited number of instructions), the kernel cannot guarantee any | ||
70 | deterministic execution of the high-prio task: any medium-priority task | ||
71 | could preempt the low-prio task while it holds the shared lock and | ||
72 | executes the critical section, and could delay it indefinitely. | ||
73 | |||
74 | Implementation: | ||
75 | --------------- | ||
76 | |||
77 | As mentioned before, the userspace fastpath of PI-enabled pthread | ||
78 | mutexes involves no kernel work at all - they behave quite similarly to | ||
79 | normal futex-based locks: a 0 value means unlocked, and a value==TID | ||
80 | means locked. (This is the same method as used by list-based robust | ||
81 | futexes.) Userspace uses atomic ops to lock/unlock these mutexes without | ||
82 | entering the kernel. | ||
83 | |||
84 | To handle the slowpath, we have added two new futex ops: | ||
85 | |||
86 | FUTEX_LOCK_PI | ||
87 | FUTEX_UNLOCK_PI | ||
88 | |||
89 | If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to | ||
90 | TID fails], then FUTEX_LOCK_PI is called. The kernel does all the | ||
91 | remaining work: if there is no futex-queue attached to the futex address | ||
92 | yet then the code looks up the task that owns the futex [it has put its | ||
93 | own TID into the futex value], and attaches a 'PI state' structure to | ||
94 | the futex-queue. The pi_state includes an rt-mutex, which is a PI-aware, | ||
95 | kernel-based synchronization object. The 'other' task is made the owner | ||
96 | of the rt-mutex, and the FUTEX_WAITERS bit is atomically set in the | ||
97 | futex value. Then this task tries to lock the rt-mutex, on which it | ||
98 | blocks. Once it returns, it has the mutex acquired, and it sets the | ||
99 | futex value to its own TID and returns. Userspace has no other work to | ||
100 | perform - it now owns the lock, and futex value contains | ||
101 | FUTEX_WAITERS|TID. | ||
102 | |||
103 | If the unlock side fastpath succeeds, [i.e. userspace manages to do a | ||
104 | TID -> 0 atomic transition of the futex value], then no kernel work is | ||
105 | triggered. | ||
106 | |||
107 | If the unlock fastpath fails (because the FUTEX_WAITERS bit is set), | ||
108 | then FUTEX_UNLOCK_PI is called, and the kernel unlocks the futex on the | ||
109 | behalf of userspace - and it also unlocks the attached | ||
110 | pi_state->rt_mutex and thus wakes up any potential waiters. | ||
111 | |||
112 | Note that under this approach, contrary to previous PI-futex approaches, | ||
113 | there is no prior 'registration' of a PI-futex. [which is not quite | ||
114 | possible anyway, due to existing ABI properties of pthread mutexes.] | ||
115 | |||
116 | Also, under this scheme, 'robustness' and 'PI' are two orthogonal | ||
117 | properties of futexes, and all four combinations are possible: futex, | ||
118 | robust-futex, PI-futex, robust+PI-futex. | ||
119 | |||
120 | More details about priority inheritance can be found in | ||
121 | Documentation/rtmutex.txt. | ||
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt index df82d75245a0..76e8064b8c3a 100644 --- a/Documentation/robust-futexes.txt +++ b/Documentation/robust-futexes.txt | |||
@@ -95,7 +95,7 @@ comparison. If the thread has registered a list, then normally the list | |||
95 | is empty. If the thread/process crashed or terminated in some incorrect | 95 | is empty. If the thread/process crashed or terminated in some incorrect |
96 | way then the list might be non-empty: in this case the kernel carefully | 96 | way then the list might be non-empty: in this case the kernel carefully |
97 | walks the list [not trusting it], and marks all locks that are owned by | 97 | walks the list [not trusting it], and marks all locks that are owned by |
98 | this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if | 98 | this thread with the FUTEX_OWNER_DIED bit, and wakes up one waiter (if |
99 | any). | 99 | any). |
100 | 100 | ||
101 | The list is guaranteed to be private and per-thread at do_exit() time, | 101 | The list is guaranteed to be private and per-thread at do_exit() time, |
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt new file mode 100644 index 000000000000..c472ffacc2f6 --- /dev/null +++ b/Documentation/rt-mutex-design.txt | |||
@@ -0,0 +1,781 @@ | |||
1 | # | ||
2 | # Copyright (c) 2006 Steven Rostedt | ||
3 | # Licensed under the GNU Free Documentation License, Version 1.2 | ||
4 | # | ||
5 | |||
6 | RT-mutex implementation design | ||
7 | ------------------------------ | ||
8 | |||
9 | This document tries to describe the design of the rtmutex.c implementation. | ||
10 | It doesn't describe the reasons why rtmutex.c exists. For that please see | ||
11 | Documentation/rt-mutex.txt. Although this document does explain problems | ||
12 | that happen without this code, but that is in the concept to understand | ||
13 | what the code actually is doing. | ||
14 | |||
15 | The goal of this document is to help others understand the priority | ||
16 | inheritance (PI) algorithm that is used, as well as reasons for the | ||
17 | decisions that were made to implement PI in the manner that was done. | ||
18 | |||
19 | |||
20 | Unbounded Priority Inversion | ||
21 | ---------------------------- | ||
22 | |||
23 | Priority inversion is when a lower priority process executes while a higher | ||
24 | priority process wants to run. This happens for several reasons, and | ||
25 | most of the time it can't be helped. Anytime a high priority process wants | ||
26 | to use a resource that a lower priority process has (a mutex for example), | ||
27 | the high priority process must wait until the lower priority process is done | ||
28 | with the resource. This is a priority inversion. What we want to prevent | ||
29 | is something called unbounded priority inversion. That is when the high | ||
30 | priority process is prevented from running by a lower priority process for | ||
31 | an undetermined amount of time. | ||
32 | |||
33 | The classic example of unbounded priority inversion is were you have three | ||
34 | processes, let's call them processes A, B, and C, where A is the highest | ||
35 | priority process, C is the lowest, and B is in between. A tries to grab a lock | ||
36 | that C owns and must wait and lets C run to release the lock. But in the | ||
37 | meantime, B executes, and since B is of a higher priority than C, it preempts C, | ||
38 | but by doing so, it is in fact preempting A which is a higher priority process. | ||
39 | Now there's no way of knowing how long A will be sleeping waiting for C | ||
40 | to release the lock, because for all we know, B is a CPU hog and will | ||
41 | never give C a chance to release the lock. This is called unbounded priority | ||
42 | inversion. | ||
43 | |||
44 | Here's a little ASCII art to show the problem. | ||
45 | |||
46 | grab lock L1 (owned by C) | ||
47 | | | ||
48 | A ---+ | ||
49 | C preempted by B | ||
50 | | | ||
51 | C +----+ | ||
52 | |||
53 | B +--------> | ||
54 | B now keeps A from running. | ||
55 | |||
56 | |||
57 | Priority Inheritance (PI) | ||
58 | ------------------------- | ||
59 | |||
60 | There are several ways to solve this issue, but other ways are out of scope | ||
61 | for this document. Here we only discuss PI. | ||
62 | |||
63 | PI is where a process inherits the priority of another process if the other | ||
64 | process blocks on a lock owned by the current process. To make this easier | ||
65 | to understand, let's use the previous example, with processes A, B, and C again. | ||
66 | |||
67 | This time, when A blocks on the lock owned by C, C would inherit the priority | ||
68 | of A. So now if B becomes runnable, it would not preempt C, since C now has | ||
69 | the high priority of A. As soon as C releases the lock, it loses its | ||
70 | inherited priority, and A then can continue with the resource that C had. | ||
71 | |||
72 | Terminology | ||
73 | ----------- | ||
74 | |||
75 | Here I explain some terminology that is used in this document to help describe | ||
76 | the design that is used to implement PI. | ||
77 | |||
78 | PI chain - The PI chain is an ordered series of locks and processes that cause | ||
79 | processes to inherit priorities from a previous process that is | ||
80 | blocked on one of its locks. This is described in more detail | ||
81 | later in this document. | ||
82 | |||
83 | mutex - In this document, to differentiate from locks that implement | ||
84 | PI and spin locks that are used in the PI code, from now on | ||
85 | the PI locks will be called a mutex. | ||
86 | |||
87 | lock - In this document from now on, I will use the term lock when | ||
88 | referring to spin locks that are used to protect parts of the PI | ||
89 | algorithm. These locks disable preemption for UP (when | ||
90 | CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from | ||
91 | entering critical sections simultaneously. | ||
92 | |||
93 | spin lock - Same as lock above. | ||
94 | |||
95 | waiter - A waiter is a struct that is stored on the stack of a blocked | ||
96 | process. Since the scope of the waiter is within the code for | ||
97 | a process being blocked on the mutex, it is fine to allocate | ||
98 | the waiter on the process's stack (local variable). This | ||
99 | structure holds a pointer to the task, as well as the mutex that | ||
100 | the task is blocked on. It also has the plist node structures to | ||
101 | place the task in the waiter_list of a mutex as well as the | ||
102 | pi_list of a mutex owner task (described below). | ||
103 | |||
104 | waiter is sometimes used in reference to the task that is waiting | ||
105 | on a mutex. This is the same as waiter->task. | ||
106 | |||
107 | waiters - A list of processes that are blocked on a mutex. | ||
108 | |||
109 | top waiter - The highest priority process waiting on a specific mutex. | ||
110 | |||
111 | top pi waiter - The highest priority process waiting on one of the mutexes | ||
112 | that a specific process owns. | ||
113 | |||
114 | Note: task and process are used interchangeably in this document, mostly to | ||
115 | differentiate between two processes that are being described together. | ||
116 | |||
117 | |||
118 | PI chain | ||
119 | -------- | ||
120 | |||
121 | The PI chain is a list of processes and mutexes that may cause priority | ||
122 | inheritance to take place. Multiple chains may converge, but a chain | ||
123 | would never diverge, since a process can't be blocked on more than one | ||
124 | mutex at a time. | ||
125 | |||
126 | Example: | ||
127 | |||
128 | Process: A, B, C, D, E | ||
129 | Mutexes: L1, L2, L3, L4 | ||
130 | |||
131 | A owns: L1 | ||
132 | B blocked on L1 | ||
133 | B owns L2 | ||
134 | C blocked on L2 | ||
135 | C owns L3 | ||
136 | D blocked on L3 | ||
137 | D owns L4 | ||
138 | E blocked on L4 | ||
139 | |||
140 | The chain would be: | ||
141 | |||
142 | E->L4->D->L3->C->L2->B->L1->A | ||
143 | |||
144 | To show where two chains merge, we could add another process F and | ||
145 | another mutex L5 where B owns L5 and F is blocked on mutex L5. | ||
146 | |||
147 | The chain for F would be: | ||
148 | |||
149 | F->L5->B->L1->A | ||
150 | |||
151 | Since a process may own more than one mutex, but never be blocked on more than | ||
152 | one, the chains merge. | ||
153 | |||
154 | Here we show both chains: | ||
155 | |||
156 | E->L4->D->L3->C->L2-+ | ||
157 | | | ||
158 | +->B->L1->A | ||
159 | | | ||
160 | F->L5-+ | ||
161 | |||
162 | For PI to work, the processes at the right end of these chains (or we may | ||
163 | also call it the Top of the chain) must be equal to or higher in priority | ||
164 | than the processes to the left or below in the chain. | ||
165 | |||
166 | Also since a mutex may have more than one process blocked on it, we can | ||
167 | have multiple chains merge at mutexes. If we add another process G that is | ||
168 | blocked on mutex L2: | ||
169 | |||
170 | G->L2->B->L1->A | ||
171 | |||
172 | And once again, to show how this can grow I will show the merging chains | ||
173 | again. | ||
174 | |||
175 | E->L4->D->L3->C-+ | ||
176 | +->L2-+ | ||
177 | | | | ||
178 | G-+ +->B->L1->A | ||
179 | | | ||
180 | F->L5-+ | ||
181 | |||
182 | |||
183 | Plist | ||
184 | ----- | ||
185 | |||
186 | Before I go further and talk about how the PI chain is stored through lists | ||
187 | on both mutexes and processes, I'll explain the plist. This is similar to | ||
188 | the struct list_head functionality that is already in the kernel. | ||
189 | The implementation of plist is out of scope for this document, but it is | ||
190 | very important to understand what it does. | ||
191 | |||
192 | There are a few differences between plist and list, the most important one | ||
193 | being that plist is a priority sorted linked list. This means that the | ||
194 | priorities of the plist are sorted, such that it takes O(1) to retrieve the | ||
195 | highest priority item in the list. Obviously this is useful to store processes | ||
196 | based on their priorities. | ||
197 | |||
198 | Another difference, which is important for implementation, is that, unlike | ||
199 | list, the head of the list is a different element than the nodes of a list. | ||
200 | So the head of the list is declared as struct plist_head and nodes that will | ||
201 | be added to the list are declared as struct plist_node. | ||
202 | |||
203 | |||
204 | Mutex Waiter List | ||
205 | ----------------- | ||
206 | |||
207 | Every mutex keeps track of all the waiters that are blocked on itself. The mutex | ||
208 | has a plist to store these waiters by priority. This list is protected by | ||
209 | a spin lock that is located in the struct of the mutex. This lock is called | ||
210 | wait_lock. Since the modification of the waiter list is never done in | ||
211 | interrupt context, the wait_lock can be taken without disabling interrupts. | ||
212 | |||
213 | |||
214 | Task PI List | ||
215 | ------------ | ||
216 | |||
217 | To keep track of the PI chains, each process has its own PI list. This is | ||
218 | a list of all top waiters of the mutexes that are owned by the process. | ||
219 | Note that this list only holds the top waiters and not all waiters that are | ||
220 | blocked on mutexes owned by the process. | ||
221 | |||
222 | The top of the task's PI list is always the highest priority task that | ||
223 | is waiting on a mutex that is owned by the task. So if the task has | ||
224 | inherited a priority, it will always be the priority of the task that is | ||
225 | at the top of this list. | ||
226 | |||
227 | This list is stored in the task structure of a process as a plist called | ||
228 | pi_list. This list is protected by a spin lock also in the task structure, | ||
229 | called pi_lock. This lock may also be taken in interrupt context, so when | ||
230 | locking the pi_lock, interrupts must be disabled. | ||
231 | |||
232 | |||
233 | Depth of the PI Chain | ||
234 | --------------------- | ||
235 | |||
236 | The maximum depth of the PI chain is not dynamic, and could actually be | ||
237 | defined. But is very complex to figure it out, since it depends on all | ||
238 | the nesting of mutexes. Let's look at the example where we have 3 mutexes, | ||
239 | L1, L2, and L3, and four separate functions func1, func2, func3 and func4. | ||
240 | The following shows a locking order of L1->L2->L3, but may not actually | ||
241 | be directly nested that way. | ||
242 | |||
243 | void func1(void) | ||
244 | { | ||
245 | mutex_lock(L1); | ||
246 | |||
247 | /* do anything */ | ||
248 | |||
249 | mutex_unlock(L1); | ||
250 | } | ||
251 | |||
252 | void func2(void) | ||
253 | { | ||
254 | mutex_lock(L1); | ||
255 | mutex_lock(L2); | ||
256 | |||
257 | /* do something */ | ||
258 | |||
259 | mutex_unlock(L2); | ||
260 | mutex_unlock(L1); | ||
261 | } | ||
262 | |||
263 | void func3(void) | ||
264 | { | ||
265 | mutex_lock(L2); | ||
266 | mutex_lock(L3); | ||
267 | |||
268 | /* do something else */ | ||
269 | |||
270 | mutex_unlock(L3); | ||
271 | mutex_unlock(L2); | ||
272 | } | ||
273 | |||
274 | void func4(void) | ||
275 | { | ||
276 | mutex_lock(L3); | ||
277 | |||
278 | /* do something again */ | ||
279 | |||
280 | mutex_unlock(L3); | ||
281 | } | ||
282 | |||
283 | Now we add 4 processes that run each of these functions separately. | ||
284 | Processes A, B, C, and D which run functions func1, func2, func3 and func4 | ||
285 | respectively, and such that D runs first and A last. With D being preempted | ||
286 | in func4 in the "do something again" area, we have a locking that follows: | ||
287 | |||
288 | D owns L3 | ||
289 | C blocked on L3 | ||
290 | C owns L2 | ||
291 | B blocked on L2 | ||
292 | B owns L1 | ||
293 | A blocked on L1 | ||
294 | |||
295 | And thus we have the chain A->L1->B->L2->C->L3->D. | ||
296 | |||
297 | This gives us a PI depth of 4 (four processes), but looking at any of the | ||
298 | functions individually, it seems as though they only have at most a locking | ||
299 | depth of two. So, although the locking depth is defined at compile time, | ||
300 | it still is very difficult to find the possibilities of that depth. | ||
301 | |||
302 | Now since mutexes can be defined by user-land applications, we don't want a DOS | ||
303 | type of application that nests large amounts of mutexes to create a large | ||
304 | PI chain, and have the code holding spin locks while looking at a large | ||
305 | amount of data. So to prevent this, the implementation not only implements | ||
306 | a maximum lock depth, but also only holds at most two different locks at a | ||
307 | time, as it walks the PI chain. More about this below. | ||
308 | |||
309 | |||
310 | Mutex owner and flags | ||
311 | --------------------- | ||
312 | |||
313 | The mutex structure contains a pointer to the owner of the mutex. If the | ||
314 | mutex is not owned, this owner is set to NULL. Since all architectures | ||
315 | have the task structure on at least a four byte alignment (and if this is | ||
316 | not true, the rtmutex.c code will be broken!), this allows for the two | ||
317 | least significant bits to be used as flags. This part is also described | ||
318 | in Documentation/rt-mutex.txt, but will also be briefly described here. | ||
319 | |||
320 | Bit 0 is used as the "Pending Owner" flag. This is described later. | ||
321 | Bit 1 is used as the "Has Waiters" flags. This is also described later | ||
322 | in more detail, but is set whenever there are waiters on a mutex. | ||
323 | |||
324 | |||
325 | cmpxchg Tricks | ||
326 | -------------- | ||
327 | |||
328 | Some architectures implement an atomic cmpxchg (Compare and Exchange). This | ||
329 | is used (when applicable) to keep the fast path of grabbing and releasing | ||
330 | mutexes short. | ||
331 | |||
332 | cmpxchg is basically the following function performed atomically: | ||
333 | |||
334 | unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C) | ||
335 | { | ||
336 | unsigned long T = *A; | ||
337 | if (*A == *B) { | ||
338 | *A = *C; | ||
339 | } | ||
340 | return T; | ||
341 | } | ||
342 | #define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c) | ||
343 | |||
344 | This is really nice to have, since it allows you to only update a variable | ||
345 | if the variable is what you expect it to be. You know if it succeeded if | ||
346 | the return value (the old value of A) is equal to B. | ||
347 | |||
348 | The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If | ||
349 | the architecture does not support CMPXCHG, then this macro is simply set | ||
350 | to fail every time. But if CMPXCHG is supported, then this will | ||
351 | help out extremely to keep the fast path short. | ||
352 | |||
353 | The use of rt_mutex_cmpxchg with the flags in the owner field help optimize | ||
354 | the system for architectures that support it. This will also be explained | ||
355 | later in this document. | ||
356 | |||
357 | |||
358 | Priority adjustments | ||
359 | -------------------- | ||
360 | |||
361 | The implementation of the PI code in rtmutex.c has several places that a | ||
362 | process must adjust its priority. With the help of the pi_list of a | ||
363 | process this is rather easy to know what needs to be adjusted. | ||
364 | |||
365 | The functions implementing the task adjustments are rt_mutex_adjust_prio, | ||
366 | __rt_mutex_adjust_prio (same as the former, but expects the task pi_lock | ||
367 | to already be taken), rt_mutex_get_prio, and rt_mutex_setprio. | ||
368 | |||
369 | rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio. | ||
370 | |||
371 | rt_mutex_getprio returns the priority that the task should have. Either the | ||
372 | task's own normal priority, or if a process of a higher priority is waiting on | ||
373 | a mutex owned by the task, then that higher priority should be returned. | ||
374 | Since the pi_list of a task holds an order by priority list of all the top | ||
375 | waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs | ||
376 | to compare the top pi waiter to its own normal priority, and return the higher | ||
377 | priority back. | ||
378 | |||
379 | (Note: if looking at the code, you will notice that the lower number of | ||
380 | prio is returned. This is because the prio field in the task structure | ||
381 | is an inverse order of the actual priority. So a "prio" of 5 is | ||
382 | of higher priority than a "prio" of 10.) | ||
383 | |||
384 | __rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the | ||
385 | result does not equal the task's current priority, then rt_mutex_setprio | ||
386 | is called to adjust the priority of the task to the new priority. | ||
387 | Note that rt_mutex_setprio is defined in kernel/sched.c to implement the | ||
388 | actual change in priority. | ||
389 | |||
390 | It is interesting to note that __rt_mutex_adjust_prio can either increase | ||
391 | or decrease the priority of the task. In the case that a higher priority | ||
392 | process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio | ||
393 | would increase/boost the task's priority. But if a higher priority task | ||
394 | were for some reason to leave the mutex (timeout or signal), this same function | ||
395 | would decrease/unboost the priority of the task. That is because the pi_list | ||
396 | always contains the highest priority task that is waiting on a mutex owned | ||
397 | by the task, so we only need to compare the priority of that top pi waiter | ||
398 | to the normal priority of the given task. | ||
399 | |||
400 | |||
401 | High level overview of the PI chain walk | ||
402 | ---------------------------------------- | ||
403 | |||
404 | The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain. | ||
405 | |||
406 | The implementation has gone through several iterations, and has ended up | ||
407 | with what we believe is the best. It walks the PI chain by only grabbing | ||
408 | at most two locks at a time, and is very efficient. | ||
409 | |||
410 | The rt_mutex_adjust_prio_chain can be used either to boost or lower process | ||
411 | priorities. | ||
412 | |||
413 | rt_mutex_adjust_prio_chain is called with a task to be checked for PI | ||
414 | (de)boosting (the owner of a mutex that a process is blocking on), a flag to | ||
415 | check for deadlocking, the mutex that the task owns, and a pointer to a waiter | ||
416 | that is the process's waiter struct that is blocked on the mutex (although this | ||
417 | parameter may be NULL for deboosting). | ||
418 | |||
419 | For this explanation, I will not mention deadlock detection. This explanation | ||
420 | will try to stay at a high level. | ||
421 | |||
422 | When this function is called, there are no locks held. That also means | ||
423 | that the state of the owner and lock can change when entered into this function. | ||
424 | |||
425 | Before this function is called, the task has already had rt_mutex_adjust_prio | ||
426 | performed on it. This means that the task is set to the priority that it | ||
427 | should be at, but the plist nodes of the task's waiter have not been updated | ||
428 | with the new priorities, and that this task may not be in the proper locations | ||
429 | in the pi_lists and wait_lists that the task is blocked on. This function | ||
430 | solves all that. | ||
431 | |||
432 | A loop is entered, where task is the owner to be checked for PI changes that | ||
433 | was passed by parameter (for the first iteration). The pi_lock of this task is | ||
434 | taken to prevent any more changes to the pi_list of the task. This also | ||
435 | prevents new tasks from completing the blocking on a mutex that is owned by this | ||
436 | task. | ||
437 | |||
438 | If the task is not blocked on a mutex then the loop is exited. We are at | ||
439 | the top of the PI chain. | ||
440 | |||
441 | A check is now done to see if the original waiter (the process that is blocked | ||
442 | on the current mutex) is the top pi waiter of the task. That is, is this | ||
443 | waiter on the top of the task's pi_list. If it is not, it either means that | ||
444 | there is another process higher in priority that is blocked on one of the | ||
445 | mutexes that the task owns, or that the waiter has just woken up via a signal | ||
446 | or timeout and has left the PI chain. In either case, the loop is exited, since | ||
447 | we don't need to do any more changes to the priority of the current task, or any | ||
448 | task that owns a mutex that this current task is waiting on. A priority chain | ||
449 | walk is only needed when a new top pi waiter is made to a task. | ||
450 | |||
451 | The next check sees if the task's waiter plist node has the priority equal to | ||
452 | the priority the task is set at. If they are equal, then we are done with | ||
453 | the loop. Remember that the function started with the priority of the | ||
454 | task adjusted, but the plist nodes that hold the task in other processes | ||
455 | pi_lists have not been adjusted. | ||
456 | |||
457 | Next, we look at the mutex that the task is blocked on. The mutex's wait_lock | ||
458 | is taken. This is done by a spin_trylock, because the locking order of the | ||
459 | pi_lock and wait_lock goes in the opposite direction. If we fail to grab the | ||
460 | lock, the pi_lock is released, and we restart the loop. | ||
461 | |||
462 | Now that we have both the pi_lock of the task as well as the wait_lock of | ||
463 | the mutex the task is blocked on, we update the task's waiter's plist node | ||
464 | that is located on the mutex's wait_list. | ||
465 | |||
466 | Now we release the pi_lock of the task. | ||
467 | |||
468 | Next the owner of the mutex has its pi_lock taken, so we can update the | ||
469 | task's entry in the owner's pi_list. If the task is the highest priority | ||
470 | process on the mutex's wait_list, then we remove the previous top waiter | ||
471 | from the owner's pi_list, and replace it with the task. | ||
472 | |||
473 | Note: It is possible that the task was the current top waiter on the mutex, | ||
474 | in which case the task is not yet on the pi_list of the waiter. This | ||
475 | is OK, since plist_del does nothing if the plist node is not on any | ||
476 | list. | ||
477 | |||
478 | If the task was not the top waiter of the mutex, but it was before we | ||
479 | did the priority updates, that means we are deboosting/lowering the | ||
480 | task. In this case, the task is removed from the pi_list of the owner, | ||
481 | and the new top waiter is added. | ||
482 | |||
483 | Lastly, we unlock both the pi_lock of the task, as well as the mutex's | ||
484 | wait_lock, and continue the loop again. On the next iteration of the | ||
485 | loop, the previous owner of the mutex will be the task that will be | ||
486 | processed. | ||
487 | |||
488 | Note: One might think that the owner of this mutex might have changed | ||
489 | since we just grab the mutex's wait_lock. And one could be right. | ||
490 | The important thing to remember is that the owner could not have | ||
491 | become the task that is being processed in the PI chain, since | ||
492 | we have taken that task's pi_lock at the beginning of the loop. | ||
493 | So as long as there is an owner of this mutex that is not the same | ||
494 | process as the tasked being worked on, we are OK. | ||
495 | |||
496 | Looking closely at the code, one might be confused. The check for the | ||
497 | end of the PI chain is when the task isn't blocked on anything or the | ||
498 | task's waiter structure "task" element is NULL. This check is | ||
499 | protected only by the task's pi_lock. But the code to unlock the mutex | ||
500 | sets the task's waiter structure "task" element to NULL with only | ||
501 | the protection of the mutex's wait_lock, which was not taken yet. | ||
502 | Isn't this a race condition if the task becomes the new owner? | ||
503 | |||
504 | The answer is No! The trick is the spin_trylock of the mutex's | ||
505 | wait_lock. If we fail that lock, we release the pi_lock of the | ||
506 | task and continue the loop, doing the end of PI chain check again. | ||
507 | |||
508 | In the code to release the lock, the wait_lock of the mutex is held | ||
509 | the entire time, and it is not let go when we grab the pi_lock of the | ||
510 | new owner of the mutex. So if the switch of a new owner were to happen | ||
511 | after the check for end of the PI chain and the grabbing of the | ||
512 | wait_lock, the unlocking code would spin on the new owner's pi_lock | ||
513 | but never give up the wait_lock. So the PI chain loop is guaranteed to | ||
514 | fail the spin_trylock on the wait_lock, release the pi_lock, and | ||
515 | try again. | ||
516 | |||
517 | If you don't quite understand the above, that's OK. You don't have to, | ||
518 | unless you really want to make a proof out of it ;) | ||
519 | |||
520 | |||
521 | Pending Owners and Lock stealing | ||
522 | -------------------------------- | ||
523 | |||
524 | One of the flags in the owner field of the mutex structure is "Pending Owner". | ||
525 | What this means is that an owner was chosen by the process releasing the | ||
526 | mutex, but that owner has yet to wake up and actually take the mutex. | ||
527 | |||
528 | Why is this important? Why can't we just give the mutex to another process | ||
529 | and be done with it? | ||
530 | |||
531 | The PI code is to help with real-time processes, and to let the highest | ||
532 | priority process run as long as possible with little latencies and delays. | ||
533 | If a high priority process owns a mutex that a lower priority process is | ||
534 | blocked on, when the mutex is released it would be given to the lower priority | ||
535 | process. What if the higher priority process wants to take that mutex again. | ||
536 | The high priority process would fail to take that mutex that it just gave up | ||
537 | and it would need to boost the lower priority process to run with full | ||
538 | latency of that critical section (since the low priority process just entered | ||
539 | it). | ||
540 | |||
541 | There's no reason a high priority process that gives up a mutex should be | ||
542 | penalized if it tries to take that mutex again. If the new owner of the | ||
543 | mutex has not woken up yet, there's no reason that the higher priority process | ||
544 | could not take that mutex away. | ||
545 | |||
546 | To solve this, we introduced Pending Ownership and Lock Stealing. When a | ||
547 | new process is given a mutex that it was blocked on, it is only given | ||
548 | pending ownership. This means that it's the new owner, unless a higher | ||
549 | priority process comes in and tries to grab that mutex. If a higher priority | ||
550 | process does come along and wants that mutex, we let the higher priority | ||
551 | process "steal" the mutex from the pending owner (only if it is still pending) | ||
552 | and continue with the mutex. | ||
553 | |||
554 | |||
555 | Taking of a mutex (The walk through) | ||
556 | ------------------------------------ | ||
557 | |||
558 | OK, now let's take a look at the detailed walk through of what happens when | ||
559 | taking a mutex. | ||
560 | |||
561 | The first thing that is tried is the fast taking of the mutex. This is | ||
562 | done when we have CMPXCHG enabled (otherwise the fast taking automatically | ||
563 | fails). Only when the owner field of the mutex is NULL can the lock be | ||
564 | taken with the CMPXCHG and nothing else needs to be done. | ||
565 | |||
566 | If there is contention on the lock, whether it is owned or pending owner | ||
567 | we go about the slow path (rt_mutex_slowlock). | ||
568 | |||
569 | The slow path function is where the task's waiter structure is created on | ||
570 | the stack. This is because the waiter structure is only needed for the | ||
571 | scope of this function. The waiter structure holds the nodes to store | ||
572 | the task on the wait_list of the mutex, and if need be, the pi_list of | ||
573 | the owner. | ||
574 | |||
575 | The wait_lock of the mutex is taken since the slow path of unlocking the | ||
576 | mutex also takes this lock. | ||
577 | |||
578 | We then call try_to_take_rt_mutex. This is where the architecture that | ||
579 | does not implement CMPXCHG would always grab the lock (if there's no | ||
580 | contention). | ||
581 | |||
582 | try_to_take_rt_mutex is used every time the task tries to grab a mutex in the | ||
583 | slow path. The first thing that is done here is an atomic setting of | ||
584 | the "Has Waiters" flag of the mutex's owner field. Yes, this could really | ||
585 | be false, because if the the mutex has no owner, there are no waiters and | ||
586 | the current task also won't have any waiters. But we don't have the lock | ||
587 | yet, so we assume we are going to be a waiter. The reason for this is to | ||
588 | play nice for those architectures that do have CMPXCHG. By setting this flag | ||
589 | now, the owner of the mutex can't release the mutex without going into the | ||
590 | slow unlock path, and it would then need to grab the wait_lock, which this | ||
591 | code currently holds. So setting the "Has Waiters" flag forces the owner | ||
592 | to synchronize with this code. | ||
593 | |||
594 | Now that we know that we can't have any races with the owner releasing the | ||
595 | mutex, we check to see if we can take the ownership. This is done if the | ||
596 | mutex doesn't have a owner, or if we can steal the mutex from a pending | ||
597 | owner. Let's look at the situations we have here. | ||
598 | |||
599 | 1) Has owner that is pending | ||
600 | ---------------------------- | ||
601 | |||
602 | The mutex has a owner, but it hasn't woken up and the mutex flag | ||
603 | "Pending Owner" is set. The first check is to see if the owner isn't the | ||
604 | current task. This is because this function is also used for the pending | ||
605 | owner to grab the mutex. When a pending owner wakes up, it checks to see | ||
606 | if it can take the mutex, and this is done if the owner is already set to | ||
607 | itself. If so, we succeed and leave the function, clearing the "Pending | ||
608 | Owner" bit. | ||
609 | |||
610 | If the pending owner is not current, we check to see if the current priority is | ||
611 | higher than the pending owner. If not, we fail the function and return. | ||
612 | |||
613 | There's also something special about a pending owner. That is a pending owner | ||
614 | is never blocked on a mutex. So there is no PI chain to worry about. It also | ||
615 | means that if the mutex doesn't have any waiters, there's no accounting needed | ||
616 | to update the pending owner's pi_list, since we only worry about processes | ||
617 | blocked on the current mutex. | ||
618 | |||
619 | If there are waiters on this mutex, and we just stole the ownership, we need | ||
620 | to take the top waiter, remove it from the pi_list of the pending owner, and | ||
621 | add it to the current pi_list. Note that at this moment, the pending owner | ||
622 | is no longer on the list of waiters. This is fine, since the pending owner | ||
623 | would add itself back when it realizes that it had the ownership stolen | ||
624 | from itself. When the pending owner tries to grab the mutex, it will fail | ||
625 | in try_to_take_rt_mutex if the owner field points to another process. | ||
626 | |||
627 | 2) No owner | ||
628 | ----------- | ||
629 | |||
630 | If there is no owner (or we successfully stole the lock), we set the owner | ||
631 | of the mutex to current, and set the flag of "Has Waiters" if the current | ||
632 | mutex actually has waiters, or we clear the flag if it doesn't. See, it was | ||
633 | OK that we set that flag early, since now it is cleared. | ||
634 | |||
635 | 3) Failed to grab ownership | ||
636 | --------------------------- | ||
637 | |||
638 | The most interesting case is when we fail to take ownership. This means that | ||
639 | there exists an owner, or there's a pending owner with equal or higher | ||
640 | priority than the current task. | ||
641 | |||
642 | We'll continue on the failed case. | ||
643 | |||
644 | If the mutex has a timeout, we set up a timer to go off to break us out | ||
645 | of this mutex if we failed to get it after a specified amount of time. | ||
646 | |||
647 | Now we enter a loop that will continue to try to take ownership of the mutex, or | ||
648 | fail from a timeout or signal. | ||
649 | |||
650 | Once again we try to take the mutex. This will usually fail the first time | ||
651 | in the loop, since it had just failed to get the mutex. But the second time | ||
652 | in the loop, this would likely succeed, since the task would likely be | ||
653 | the pending owner. | ||
654 | |||
655 | If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done | ||
656 | here. | ||
657 | |||
658 | The waiter structure has a "task" field that points to the task that is blocked | ||
659 | on the mutex. This field can be NULL the first time it goes through the loop | ||
660 | or if the task is a pending owner and had it's mutex stolen. If the "task" | ||
661 | field is NULL then we need to set up the accounting for it. | ||
662 | |||
663 | Task blocks on mutex | ||
664 | -------------------- | ||
665 | |||
666 | The accounting of a mutex and process is done with the waiter structure of | ||
667 | the process. The "task" field is set to the process, and the "lock" field | ||
668 | to the mutex. The plist nodes are initialized to the processes current | ||
669 | priority. | ||
670 | |||
671 | Since the wait_lock was taken at the entry of the slow lock, we can safely | ||
672 | add the waiter to the wait_list. If the current process is the highest | ||
673 | priority process currently waiting on this mutex, then we remove the | ||
674 | previous top waiter process (if it exists) from the pi_list of the owner, | ||
675 | and add the current process to that list. Since the pi_list of the owner | ||
676 | has changed, we call rt_mutex_adjust_prio on the owner to see if the owner | ||
677 | should adjust its priority accordingly. | ||
678 | |||
679 | If the owner is also blocked on a lock, and had its pi_list changed | ||
680 | (or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead | ||
681 | and run rt_mutex_adjust_prio_chain on the owner, as described earlier. | ||
682 | |||
683 | Now all locks are released, and if the current process is still blocked on a | ||
684 | mutex (waiter "task" field is not NULL), then we go to sleep (call schedule). | ||
685 | |||
686 | Waking up in the loop | ||
687 | --------------------- | ||
688 | |||
689 | The schedule can then wake up for a few reasons. | ||
690 | 1) we were given pending ownership of the mutex. | ||
691 | 2) we received a signal and was TASK_INTERRUPTIBLE | ||
692 | 3) we had a timeout and was TASK_INTERRUPTIBLE | ||
693 | |||
694 | In any of these cases, we continue the loop and once again try to grab the | ||
695 | ownership of the mutex. If we succeed, we exit the loop, otherwise we continue | ||
696 | and on signal and timeout, will exit the loop, or if we had the mutex stolen | ||
697 | we just simply add ourselves back on the lists and go back to sleep. | ||
698 | |||
699 | Note: For various reasons, because of timeout and signals, the steal mutex | ||
700 | algorithm needs to be careful. This is because the current process is | ||
701 | still on the wait_list. And because of dynamic changing of priorities, | ||
702 | especially on SCHED_OTHER tasks, the current process can be the | ||
703 | highest priority task on the wait_list. | ||
704 | |||
705 | Failed to get mutex on Timeout or Signal | ||
706 | ---------------------------------------- | ||
707 | |||
708 | If a timeout or signal occurred, the waiter's "task" field would not be | ||
709 | NULL and the task needs to be taken off the wait_list of the mutex and perhaps | ||
710 | pi_list of the owner. If this process was a high priority process, then | ||
711 | the rt_mutex_adjust_prio_chain needs to be executed again on the owner, | ||
712 | but this time it will be lowering the priorities. | ||
713 | |||
714 | |||
715 | Unlocking the Mutex | ||
716 | ------------------- | ||
717 | |||
718 | The unlocking of a mutex also has a fast path for those architectures with | ||
719 | CMPXCHG. Since the taking of a mutex on contention always sets the | ||
720 | "Has Waiters" flag of the mutex's owner, we use this to know if we need to | ||
721 | take the slow path when unlocking the mutex. If the mutex doesn't have any | ||
722 | waiters, the owner field of the mutex would equal the current process and | ||
723 | the mutex can be unlocked by just replacing the owner field with NULL. | ||
724 | |||
725 | If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available), | ||
726 | the slow unlock path is taken. | ||
727 | |||
728 | The first thing done in the slow unlock path is to take the wait_lock of the | ||
729 | mutex. This synchronizes the locking and unlocking of the mutex. | ||
730 | |||
731 | A check is made to see if the mutex has waiters or not. On architectures that | ||
732 | do not have CMPXCHG, this is the location that the owner of the mutex will | ||
733 | determine if a waiter needs to be awoken or not. On architectures that | ||
734 | do have CMPXCHG, that check is done in the fast path, but it is still needed | ||
735 | in the slow path too. If a waiter of a mutex woke up because of a signal | ||
736 | or timeout between the time the owner failed the fast path CMPXCHG check and | ||
737 | the grabbing of the wait_lock, the mutex may not have any waiters, thus the | ||
738 | owner still needs to make this check. If there are no waiters than the mutex | ||
739 | owner field is set to NULL, the wait_lock is released and nothing more is | ||
740 | needed. | ||
741 | |||
742 | If there are waiters, then we need to wake one up and give that waiter | ||
743 | pending ownership. | ||
744 | |||
745 | On the wake up code, the pi_lock of the current owner is taken. The top | ||
746 | waiter of the lock is found and removed from the wait_list of the mutex | ||
747 | as well as the pi_list of the current owner. The task field of the new | ||
748 | pending owner's waiter structure is set to NULL, and the owner field of the | ||
749 | mutex is set to the new owner with the "Pending Owner" bit set, as well | ||
750 | as the "Has Waiters" bit if there still are other processes blocked on the | ||
751 | mutex. | ||
752 | |||
753 | The pi_lock of the previous owner is released, and the new pending owner's | ||
754 | pi_lock is taken. Remember that this is the trick to prevent the race | ||
755 | condition in rt_mutex_adjust_prio_chain from adding itself as a waiter | ||
756 | on the mutex. | ||
757 | |||
758 | We now clear the "pi_blocked_on" field of the new pending owner, and if | ||
759 | the mutex still has waiters pending, we add the new top waiter to the pi_list | ||
760 | of the pending owner. | ||
761 | |||
762 | Finally we unlock the pi_lock of the pending owner and wake it up. | ||
763 | |||
764 | |||
765 | Contact | ||
766 | ------- | ||
767 | |||
768 | For updates on this document, please email Steven Rostedt <rostedt@goodmis.org> | ||
769 | |||
770 | |||
771 | Credits | ||
772 | ------- | ||
773 | |||
774 | Author: Steven Rostedt <rostedt@goodmis.org> | ||
775 | |||
776 | Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap | ||
777 | |||
778 | Updates | ||
779 | ------- | ||
780 | |||
781 | This document was originally written for 2.6.17-rc3-mm1 | ||
diff --git a/Documentation/rt-mutex.txt b/Documentation/rt-mutex.txt new file mode 100644 index 000000000000..243393d882ee --- /dev/null +++ b/Documentation/rt-mutex.txt | |||
@@ -0,0 +1,79 @@ | |||
1 | RT-mutex subsystem with PI support | ||
2 | ---------------------------------- | ||
3 | |||
4 | RT-mutexes with priority inheritance are used to support PI-futexes, | ||
5 | which enable pthread_mutex_t priority inheritance attributes | ||
6 | (PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details | ||
7 | about PI-futexes.] | ||
8 | |||
9 | This technology was developed in the -rt tree and streamlined for | ||
10 | pthread_mutex support. | ||
11 | |||
12 | Basic principles: | ||
13 | ----------------- | ||
14 | |||
15 | RT-mutexes extend the semantics of simple mutexes by the priority | ||
16 | inheritance protocol. | ||
17 | |||
18 | A low priority owner of a rt-mutex inherits the priority of a higher | ||
19 | priority waiter until the rt-mutex is released. If the temporarily | ||
20 | boosted owner blocks on a rt-mutex itself it propagates the priority | ||
21 | boosting to the owner of the other rt_mutex it gets blocked on. The | ||
22 | priority boosting is immediately removed once the rt_mutex has been | ||
23 | unlocked. | ||
24 | |||
25 | This approach allows us to shorten the block of high-prio tasks on | ||
26 | mutexes which protect shared resources. Priority inheritance is not a | ||
27 | magic bullet for poorly designed applications, but it allows | ||
28 | well-designed applications to use userspace locks in critical parts of | ||
29 | an high priority thread, without losing determinism. | ||
30 | |||
31 | The enqueueing of the waiters into the rtmutex waiter list is done in | ||
32 | priority order. For same priorities FIFO order is chosen. For each | ||
33 | rtmutex, only the top priority waiter is enqueued into the owner's | ||
34 | priority waiters list. This list too queues in priority order. Whenever | ||
35 | the top priority waiter of a task changes (for example it timed out or | ||
36 | got a signal), the priority of the owner task is readjusted. [The | ||
37 | priority enqueueing is handled by "plists", see include/linux/plist.h | ||
38 | for more details.] | ||
39 | |||
40 | RT-mutexes are optimized for fastpath operations and have no internal | ||
41 | locking overhead when locking an uncontended mutex or unlocking a mutex | ||
42 | without waiters. The optimized fastpath operations require cmpxchg | ||
43 | support. [If that is not available then the rt-mutex internal spinlock | ||
44 | is used] | ||
45 | |||
46 | The state of the rt-mutex is tracked via the owner field of the rt-mutex | ||
47 | structure: | ||
48 | |||
49 | rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1 | ||
50 | are used to keep track of the "owner is pending" and "rtmutex has | ||
51 | waiters" state. | ||
52 | |||
53 | owner bit1 bit0 | ||
54 | NULL 0 0 mutex is free (fast acquire possible) | ||
55 | NULL 0 1 invalid state | ||
56 | NULL 1 0 Transitional state* | ||
57 | NULL 1 1 invalid state | ||
58 | taskpointer 0 0 mutex is held (fast release possible) | ||
59 | taskpointer 0 1 task is pending owner | ||
60 | taskpointer 1 0 mutex is held and has waiters | ||
61 | taskpointer 1 1 task is pending owner and mutex has waiters | ||
62 | |||
63 | Pending-ownership handling is a performance optimization: | ||
64 | pending-ownership is assigned to the first (highest priority) waiter of | ||
65 | the mutex, when the mutex is released. The thread is woken up and once | ||
66 | it starts executing it can acquire the mutex. Until the mutex is taken | ||
67 | by it (bit 0 is cleared) a competing higher priority thread can "steal" | ||
68 | the mutex which puts the woken up thread back on the waiters list. | ||
69 | |||
70 | The pending-ownership optimization is especially important for the | ||
71 | uninterrupted workflow of high-prio tasks which repeatedly | ||
72 | takes/releases locks that have lower-prio waiters. Without this | ||
73 | optimization the higher-prio thread would ping-pong to the lower-prio | ||
74 | task [because at unlock time we always assign a new owner]. | ||
75 | |||
76 | (*) The "mutex has waiters" bit gets set to take the lock. If the lock | ||
77 | doesn't already have an owner, this bit is quickly cleared if there are | ||
78 | no waiters. So this is a transitional state to synchronize with looking | ||
79 | at the owner field of the mutex and the mutex owner releasing the lock. | ||
diff --git a/Documentation/video4linux/README.pvrusb2 b/Documentation/video4linux/README.pvrusb2 new file mode 100644 index 000000000000..c73a32c34528 --- /dev/null +++ b/Documentation/video4linux/README.pvrusb2 | |||
@@ -0,0 +1,212 @@ | |||
1 | |||
2 | $Id$ | ||
3 | Mike Isely <isely@pobox.com> | ||
4 | |||
5 | pvrusb2 driver | ||
6 | |||
7 | Background: | ||
8 | |||
9 | This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which | ||
10 | is a USB 2.0 hosted TV Tuner. This driver is a work in progress. | ||
11 | Its history started with the reverse-engineering effort by Björn | ||
12 | Danielsson <pvrusb2@dax.nu> whose web page can be found here: | ||
13 | |||
14 | http://pvrusb2.dax.nu/ | ||
15 | |||
16 | From there Aurelien Alleaume <slts@free.fr> began an effort to | ||
17 | create a video4linux compatible driver. I began with Aurelien's | ||
18 | last known snapshot and evolved the driver to the state it is in | ||
19 | here. | ||
20 | |||
21 | More information on this driver can be found at: | ||
22 | |||
23 | http://www.isely.net/pvrusb2.html | ||
24 | |||
25 | |||
26 | This driver has a strong separation of layers. They are very | ||
27 | roughly: | ||
28 | |||
29 | 1a. Low level wire-protocol implementation with the device. | ||
30 | |||
31 | 1b. I2C adaptor implementation and corresponding I2C client drivers | ||
32 | implemented elsewhere in V4L. | ||
33 | |||
34 | 1c. High level hardware driver implementation which coordinates all | ||
35 | activities that ensure correct operation of the device. | ||
36 | |||
37 | 2. A "context" layer which manages instancing of driver, setup, | ||
38 | tear-down, arbitration, and interaction with high level | ||
39 | interfaces appropriately as devices are hotplugged in the | ||
40 | system. | ||
41 | |||
42 | 3. High level interfaces which glue the driver to various published | ||
43 | Linux APIs (V4L, sysfs, maybe DVB in the future). | ||
44 | |||
45 | The most important shearing layer is between the top 2 layers. A | ||
46 | lot of work went into the driver to ensure that any kind of | ||
47 | conceivable API can be laid on top of the core driver. (Yes, the | ||
48 | driver internally leverages V4L to do its work but that really has | ||
49 | nothing to do with the API published by the driver to the outside | ||
50 | world.) The architecture allows for different APIs to | ||
51 | simultaneously access the driver. I have a strong sense of fairness | ||
52 | about APIs and also feel that it is a good design principle to keep | ||
53 | implementation and interface isolated from each other. Thus while | ||
54 | right now the V4L high level interface is the most complete, the | ||
55 | sysfs high level interface will work equally well for similar | ||
56 | functions, and there's no reason I see right now why it shouldn't be | ||
57 | possible to produce a DVB high level interface that can sit right | ||
58 | alongside V4L. | ||
59 | |||
60 | NOTE: Complete documentation on the pvrusb2 driver is contained in | ||
61 | the html files within the doc directory; these are exactly the same | ||
62 | as what is on the web site at the time. Browse those files | ||
63 | (especially the FAQ) before asking questions. | ||
64 | |||
65 | |||
66 | Building | ||
67 | |||
68 | To build these modules essentially amounts to just running "Make", | ||
69 | but you need the kernel source tree nearby and you will likely also | ||
70 | want to set a few controlling environment variables first in order | ||
71 | to link things up with that source tree. Please see the Makefile | ||
72 | here for comments that explain how to do that. | ||
73 | |||
74 | |||
75 | Source file list / functional overview: | ||
76 | |||
77 | (Note: The term "module" used below generally refers to loosely | ||
78 | defined functional units within the pvrusb2 driver and bears no | ||
79 | relation to the Linux kernel's concept of a loadable module.) | ||
80 | |||
81 | pvrusb2-audio.[ch] - This is glue logic that resides between this | ||
82 | driver and the msp3400.ko I2C client driver (which is found | ||
83 | elsewhere in V4L). | ||
84 | |||
85 | pvrusb2-context.[ch] - This module implements the context for an | ||
86 | instance of the driver. Everything else eventually ties back to | ||
87 | or is otherwise instanced within the data structures implemented | ||
88 | here. Hotplugging is ultimately coordinated here. All high level | ||
89 | interfaces tie into the driver through this module. This module | ||
90 | helps arbitrate each interface's access to the actual driver core, | ||
91 | and is designed to allow concurrent access through multiple | ||
92 | instances of multiple interfaces (thus you can for example change | ||
93 | the tuner's frequency through sysfs while simultaneously streaming | ||
94 | video through V4L out to an instance of mplayer). | ||
95 | |||
96 | pvrusb2-debug.h - This header defines a printk() wrapper and a mask | ||
97 | of debugging bit definitions for the various kinds of debug | ||
98 | messages that can be enabled within the driver. | ||
99 | |||
100 | pvrusb2-debugifc.[ch] - This module implements a crude command line | ||
101 | oriented debug interface into the driver. Aside from being part | ||
102 | of the process for implementing manual firmware extraction (see | ||
103 | the pvrusb2 web site mentioned earlier), probably I'm the only one | ||
104 | who has ever used this. It is mainly a debugging aid. | ||
105 | |||
106 | pvrusb2-eeprom.[ch] - This is glue logic that resides between this | ||
107 | driver the tveeprom.ko module, which is itself implemented | ||
108 | elsewhere in V4L. | ||
109 | |||
110 | pvrusb2-encoder.[ch] - This module implements all protocol needed to | ||
111 | interact with the Conexant mpeg2 encoder chip within the pvrusb2 | ||
112 | device. It is a crude echo of corresponding logic in ivtv, | ||
113 | however the design goals (strict isolation) and physical layer | ||
114 | (proxy through USB instead of PCI) are enough different that this | ||
115 | implementation had to be completely different. | ||
116 | |||
117 | pvrusb2-hdw-internal.h - This header defines the core data structure | ||
118 | in the driver used to track ALL internal state related to control | ||
119 | of the hardware. Nobody outside of the core hardware-handling | ||
120 | modules should have any business using this header. All external | ||
121 | access to the driver should be through one of the high level | ||
122 | interfaces (e.g. V4L, sysfs, etc), and in fact even those high | ||
123 | level interfaces are restricted to the API defined in | ||
124 | pvrusb2-hdw.h and NOT this header. | ||
125 | |||
126 | pvrusb2-hdw.h - This header defines the full internal API for | ||
127 | controlling the hardware. High level interfaces (e.g. V4L, sysfs) | ||
128 | will work through here. | ||
129 | |||
130 | pvrusb2-hdw.c - This module implements all the various bits of logic | ||
131 | that handle overall control of a specific pvrusb2 device. | ||
132 | (Policy, instantiation, and arbitration of pvrusb2 devices fall | ||
133 | within the jurisdiction of pvrusb-context not here). | ||
134 | |||
135 | pvrusb2-i2c-chips-*.c - These modules implement the glue logic to | ||
136 | tie together and configure various I2C modules as they attach to | ||
137 | the I2C bus. There are two versions of this file. The "v4l2" | ||
138 | version is intended to be used in-tree alongside V4L, where we | ||
139 | implement just the logic that makes sense for a pure V4L | ||
140 | environment. The "all" version is intended for use outside of | ||
141 | V4L, where we might encounter other possibly "challenging" modules | ||
142 | from ivtv or older kernel snapshots (or even the support modules | ||
143 | in the standalone snapshot). | ||
144 | |||
145 | pvrusb2-i2c-cmd-v4l1.[ch] - This module implements generic V4L1 | ||
146 | compatible commands to the I2C modules. It is here where state | ||
147 | changes inside the pvrusb2 driver are translated into V4L1 | ||
148 | commands that are in turn send to the various I2C modules. | ||
149 | |||
150 | pvrusb2-i2c-cmd-v4l2.[ch] - This module implements generic V4L2 | ||
151 | compatible commands to the I2C modules. It is here where state | ||
152 | changes inside the pvrusb2 driver are translated into V4L2 | ||
153 | commands that are in turn send to the various I2C modules. | ||
154 | |||
155 | pvrusb2-i2c-core.[ch] - This module provides an implementation of a | ||
156 | kernel-friendly I2C adaptor driver, through which other external | ||
157 | I2C client drivers (e.g. msp3400, tuner, lirc) may connect and | ||
158 | operate corresponding chips within the the pvrusb2 device. It is | ||
159 | through here that other V4L modules can reach into this driver to | ||
160 | operate specific pieces (and those modules are in turn driven by | ||
161 | glue logic which is coordinated by pvrusb2-hdw, doled out by | ||
162 | pvrusb2-context, and then ultimately made available to users | ||
163 | through one of the high level interfaces). | ||
164 | |||
165 | pvrusb2-io.[ch] - This module implements a very low level ring of | ||
166 | transfer buffers, required in order to stream data from the | ||
167 | device. This module is *very* low level. It only operates the | ||
168 | buffers and makes no attempt to define any policy or mechanism for | ||
169 | how such buffers might be used. | ||
170 | |||
171 | pvrusb2-ioread.[ch] - This module layers on top of pvrusb2-io.[ch] | ||
172 | to provide a streaming API usable by a read() system call style of | ||
173 | I/O. Right now this is the only layer on top of pvrusb2-io.[ch], | ||
174 | however the underlying architecture here was intended to allow for | ||
175 | other styles of I/O to be implemented with additonal modules, like | ||
176 | mmap()'ed buffers or something even more exotic. | ||
177 | |||
178 | pvrusb2-main.c - This is the top level of the driver. Module level | ||
179 | and USB core entry points are here. This is our "main". | ||
180 | |||
181 | pvrusb2-sysfs.[ch] - This is the high level interface which ties the | ||
182 | pvrusb2 driver into sysfs. Through this interface you can do | ||
183 | everything with the driver except actually stream data. | ||
184 | |||
185 | pvrusb2-tuner.[ch] - This is glue logic that resides between this | ||
186 | driver and the tuner.ko I2C client driver (which is found | ||
187 | elsewhere in V4L). | ||
188 | |||
189 | pvrusb2-util.h - This header defines some common macros used | ||
190 | throughout the driver. These macros are not really specific to | ||
191 | the driver, but they had to go somewhere. | ||
192 | |||
193 | pvrusb2-v4l2.[ch] - This is the high level interface which ties the | ||
194 | pvrusb2 driver into video4linux. It is through here that V4L | ||
195 | applications can open and operate the driver in the usual V4L | ||
196 | ways. Note that **ALL** V4L functionality is published only | ||
197 | through here and nowhere else. | ||
198 | |||
199 | pvrusb2-video-*.[ch] - This is glue logic that resides between this | ||
200 | driver and the saa711x.ko I2C client driver (which is found | ||
201 | elsewhere in V4L). Note that saa711x.ko used to be known as | ||
202 | saa7115.ko in ivtv. There are two versions of this; one is | ||
203 | selected depending on the particular saa711[5x].ko that is found. | ||
204 | |||
205 | pvrusb2.h - This header contains compile time tunable parameters | ||
206 | (and at the moment the driver has very little that needs to be | ||
207 | tuned). | ||
208 | |||
209 | |||
210 | -Mike Isely | ||
211 | isely@pobox.com | ||
212 | |||
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 558b83368559..254c507a608c 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
@@ -481,7 +481,7 @@ register_cpus(void) | |||
481 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); | 481 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
482 | if (!p) | 482 | if (!p) |
483 | return -ENOMEM; | 483 | return -ENOMEM; |
484 | register_cpu(p, i, NULL); | 484 | register_cpu(p, i); |
485 | } | 485 | } |
486 | return 0; | 486 | return 0; |
487 | } | 487 | } |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 9fc9af88c60c..093ccba0503c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -808,7 +808,7 @@ static int __init topology_init(void) | |||
808 | int cpu; | 808 | int cpu; |
809 | 809 | ||
810 | for_each_possible_cpu(cpu) | 810 | for_each_possible_cpu(cpu) |
811 | register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL); | 811 | register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); |
812 | 812 | ||
813 | return 0; | 813 | return 0; |
814 | } | 814 | } |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 47c08bcd9b24..3bb221db164a 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -233,7 +233,7 @@ config NR_CPUS | |||
233 | 233 | ||
234 | config SCHED_SMT | 234 | config SCHED_SMT |
235 | bool "SMT (Hyperthreading) scheduler support" | 235 | bool "SMT (Hyperthreading) scheduler support" |
236 | depends on SMP | 236 | depends on X86_HT |
237 | help | 237 | help |
238 | SMT scheduler support improves the CPU scheduler's decision making | 238 | SMT scheduler support improves the CPU scheduler's decision making |
239 | when dealing with Intel Pentium 4 chips with HyperThreading at a | 239 | when dealing with Intel Pentium 4 chips with HyperThreading at a |
@@ -242,7 +242,7 @@ config SCHED_SMT | |||
242 | 242 | ||
243 | config SCHED_MC | 243 | config SCHED_MC |
244 | bool "Multi-core scheduler support" | 244 | bool "Multi-core scheduler support" |
245 | depends on SMP | 245 | depends on X86_HT |
246 | default y | 246 | default y |
247 | help | 247 | help |
248 | Multi-core scheduler support improves the CPU scheduler's decision | 248 | Multi-core scheduler support improves the CPU scheduler's decision |
@@ -780,6 +780,17 @@ config HOTPLUG_CPU | |||
780 | enable suspend on SMP systems. CPUs can be controlled through | 780 | enable suspend on SMP systems. CPUs can be controlled through |
781 | /sys/devices/system/cpu. | 781 | /sys/devices/system/cpu. |
782 | 782 | ||
783 | config COMPAT_VDSO | ||
784 | bool "Compat VDSO support" | ||
785 | default y | ||
786 | help | ||
787 | Map the VDSO to the predictable old-style address too. | ||
788 | ---help--- | ||
789 | Say N here if you are running a sufficiently recent glibc | ||
790 | version (2.3.3 or later), to remove the high-mapped | ||
791 | VDSO mapping and to exclusively use the randomized VDSO. | ||
792 | |||
793 | If unsure, say Y. | ||
783 | 794 | ||
784 | endmenu | 795 | endmenu |
785 | 796 | ||
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c index 1c3a809e6421..c80271f8f084 100644 --- a/arch/i386/kernel/asm-offsets.c +++ b/arch/i386/kernel/asm-offsets.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/fixmap.h> | 14 | #include <asm/fixmap.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/elf.h> | ||
17 | 18 | ||
18 | #define DEFINE(sym, val) \ | 19 | #define DEFINE(sym, val) \ |
19 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | 20 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) |
@@ -54,6 +55,7 @@ void foo(void) | |||
54 | OFFSET(TI_preempt_count, thread_info, preempt_count); | 55 | OFFSET(TI_preempt_count, thread_info, preempt_count); |
55 | OFFSET(TI_addr_limit, thread_info, addr_limit); | 56 | OFFSET(TI_addr_limit, thread_info, addr_limit); |
56 | OFFSET(TI_restart_block, thread_info, restart_block); | 57 | OFFSET(TI_restart_block, thread_info, restart_block); |
58 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); | ||
57 | BLANK(); | 59 | BLANK(); |
58 | 60 | ||
59 | OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); | 61 | OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); |
@@ -69,7 +71,7 @@ void foo(void) | |||
69 | sizeof(struct tss_struct)); | 71 | sizeof(struct tss_struct)); |
70 | 72 | ||
71 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | 73 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); |
72 | DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); | 74 | DEFINE(VDSO_PRELINK, VDSO_PRELINK); |
73 | 75 | ||
74 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | 76 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); |
75 | } | 77 | } |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index fd0457c9c827..e6a2d6b80cda 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
@@ -235,10 +235,10 @@ static void __init init_amd(struct cpuinfo_x86 *c) | |||
235 | while ((1 << bits) < c->x86_max_cores) | 235 | while ((1 << bits) < c->x86_max_cores) |
236 | bits++; | 236 | bits++; |
237 | } | 237 | } |
238 | cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); | 238 | c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1); |
239 | phys_proc_id[cpu] >>= bits; | 239 | c->phys_proc_id >>= bits; |
240 | printk(KERN_INFO "CPU %d(%d) -> Core %d\n", | 240 | printk(KERN_INFO "CPU %d(%d) -> Core %d\n", |
241 | cpu, c->x86_max_cores, cpu_core_id[cpu]); | 241 | cpu, c->x86_max_cores, c->cpu_core_id); |
242 | } | 242 | } |
243 | #endif | 243 | #endif |
244 | 244 | ||
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 44f2c5f2dda1..70c87de582c7 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
294 | if (c->x86 >= 0x6) | 294 | if (c->x86 >= 0x6) |
295 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 295 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
296 | c->x86_mask = tfms & 15; | 296 | c->x86_mask = tfms & 15; |
297 | #ifdef CONFIG_SMP | 297 | #ifdef CONFIG_X86_HT |
298 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | 298 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); |
299 | #else | 299 | #else |
300 | c->apicid = (ebx >> 24) & 0xFF; | 300 | c->apicid = (ebx >> 24) & 0xFF; |
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
319 | early_intel_workaround(c); | 319 | early_intel_workaround(c); |
320 | 320 | ||
321 | #ifdef CONFIG_X86_HT | 321 | #ifdef CONFIG_X86_HT |
322 | phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; | 322 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; |
323 | #endif | 323 | #endif |
324 | } | 324 | } |
325 | 325 | ||
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
477 | { | 477 | { |
478 | u32 eax, ebx, ecx, edx; | 478 | u32 eax, ebx, ecx, edx; |
479 | int index_msb, core_bits; | 479 | int index_msb, core_bits; |
480 | int cpu = smp_processor_id(); | ||
481 | 480 | ||
482 | cpuid(1, &eax, &ebx, &ecx, &edx); | 481 | cpuid(1, &eax, &ebx, &ecx, &edx); |
483 | 482 | ||
484 | |||
485 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 483 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
486 | return; | 484 | return; |
487 | 485 | ||
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
492 | } else if (smp_num_siblings > 1 ) { | 490 | } else if (smp_num_siblings > 1 ) { |
493 | 491 | ||
494 | if (smp_num_siblings > NR_CPUS) { | 492 | if (smp_num_siblings > NR_CPUS) { |
495 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | 493 | printk(KERN_WARNING "CPU: Unsupported number of the " |
494 | "siblings %d", smp_num_siblings); | ||
496 | smp_num_siblings = 1; | 495 | smp_num_siblings = 1; |
497 | return; | 496 | return; |
498 | } | 497 | } |
499 | 498 | ||
500 | index_msb = get_count_order(smp_num_siblings); | 499 | index_msb = get_count_order(smp_num_siblings); |
501 | phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | 500 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); |
502 | 501 | ||
503 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 502 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
504 | phys_proc_id[cpu]); | 503 | c->phys_proc_id); |
505 | 504 | ||
506 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 505 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
507 | 506 | ||
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
509 | 508 | ||
510 | core_bits = get_count_order(c->x86_max_cores); | 509 | core_bits = get_count_order(c->x86_max_cores); |
511 | 510 | ||
512 | cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | 511 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & |
513 | ((1 << core_bits) - 1); | 512 | ((1 << core_bits) - 1); |
514 | 513 | ||
515 | if (c->x86_max_cores > 1) | 514 | if (c->x86_max_cores > 1) |
516 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 515 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
517 | cpu_core_id[cpu]); | 516 | c->cpu_core_id); |
518 | } | 517 | } |
519 | } | 518 | } |
520 | #endif | 519 | #endif |
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void) | |||
613 | set_in_cr4(X86_CR4_TSD); | 612 | set_in_cr4(X86_CR4_TSD); |
614 | } | 613 | } |
615 | 614 | ||
615 | /* The CPU hotplug case */ | ||
616 | if (cpu_gdt_descr->address) { | ||
617 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
618 | memset(gdt, 0, PAGE_SIZE); | ||
619 | goto old_gdt; | ||
620 | } | ||
616 | /* | 621 | /* |
617 | * This is a horrible hack to allocate the GDT. The problem | 622 | * This is a horrible hack to allocate the GDT. The problem |
618 | * is that cpu_init() is called really early for the boot CPU | 623 | * is that cpu_init() is called really early for the boot CPU |
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void) | |||
631 | local_irq_enable(); | 636 | local_irq_enable(); |
632 | } | 637 | } |
633 | } | 638 | } |
634 | 639 | old_gdt: | |
635 | /* | 640 | /* |
636 | * Initialize the per-CPU GDT with the boot GDT, | 641 | * Initialize the per-CPU GDT with the boot GDT, |
637 | * and set up the GDT descriptor: | 642 | * and set up the GDT descriptor: |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 6c37b4fd8ce2..e9f0b928b0a9 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -159,13 +159,13 @@ union l2_cache { | |||
159 | unsigned val; | 159 | unsigned val; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static unsigned short assocs[] = { | 162 | static const unsigned short assocs[] = { |
163 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, | 163 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, |
164 | [8] = 16, | 164 | [8] = 16, |
165 | [0xf] = 0xffff // ?? | 165 | [0xf] = 0xffff // ?? |
166 | }; | 166 | }; |
167 | static unsigned char levels[] = { 1, 1, 2 }; | 167 | static const unsigned char levels[] = { 1, 1, 2 }; |
168 | static unsigned char types[] = { 1, 2, 3 }; | 168 | static const unsigned char types[] = { 1, 2, 3 }; |
169 | 169 | ||
170 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 170 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
171 | union _cpuid4_leaf_ebx *ebx, | 171 | union _cpuid4_leaf_ebx *ebx, |
@@ -261,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
261 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 261 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
262 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 262 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
263 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | 263 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; |
264 | #ifdef CONFIG_SMP | 264 | #ifdef CONFIG_X86_HT |
265 | unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); | 265 | unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); |
266 | #endif | 266 | #endif |
267 | 267 | ||
@@ -383,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
383 | 383 | ||
384 | if (new_l2) { | 384 | if (new_l2) { |
385 | l2 = new_l2; | 385 | l2 = new_l2; |
386 | #ifdef CONFIG_SMP | 386 | #ifdef CONFIG_X86_HT |
387 | cpu_llc_id[cpu] = l2_id; | 387 | cpu_llc_id[cpu] = l2_id; |
388 | #endif | 388 | #endif |
389 | } | 389 | } |
390 | 390 | ||
391 | if (new_l3) { | 391 | if (new_l3) { |
392 | l3 = new_l3; | 392 | l3 = new_l3; |
393 | #ifdef CONFIG_SMP | 393 | #ifdef CONFIG_X86_HT |
394 | cpu_llc_id[cpu] = l3_id; | 394 | cpu_llc_id[cpu] = l3_id; |
395 | #endif | 395 | #endif |
396 | } | 396 | } |
@@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) | |||
729 | return; | 729 | return; |
730 | } | 730 | } |
731 | 731 | ||
732 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, | 732 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, |
733 | unsigned long action, void *hcpu) | 733 | unsigned long action, void *hcpu) |
734 | { | 734 | { |
735 | unsigned int cpu = (unsigned long)hcpu; | 735 | unsigned int cpu = (unsigned long)hcpu; |
@@ -747,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
747 | return NOTIFY_OK; | 747 | return NOTIFY_OK; |
748 | } | 748 | } |
749 | 749 | ||
750 | static struct notifier_block cacheinfo_cpu_notifier = | 750 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = |
751 | { | 751 | { |
752 | .notifier_call = cacheinfo_cpu_callback, | 752 | .notifier_call = cacheinfo_cpu_callback, |
753 | }; | 753 | }; |
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index a19fcb262dbb..f54a15268ed7 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
18 | * applications want to get the raw CPUID data, they should access | 18 | * applications want to get the raw CPUID data, they should access |
19 | * /dev/cpu/<cpu_nr>/cpuid instead. | 19 | * /dev/cpu/<cpu_nr>/cpuid instead. |
20 | */ | 20 | */ |
21 | static char *x86_cap_flags[] = { | 21 | static const char * const x86_cap_flags[] = { |
22 | /* Intel-defined */ | 22 | /* Intel-defined */ |
23 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | 23 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", |
24 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | 24 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", |
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
62 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 62 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
64 | }; | 64 | }; |
65 | static char *x86_power_flags[] = { | 65 | static const char * const x86_power_flags[] = { |
66 | "ts", /* temperature sensor */ | 66 | "ts", /* temperature sensor */ |
67 | "fid", /* frequency id control */ | 67 | "fid", /* frequency id control */ |
68 | "vid", /* voltage id control */ | 68 | "vid", /* voltage id control */ |
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
109 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 109 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
110 | #ifdef CONFIG_X86_HT | 110 | #ifdef CONFIG_X86_HT |
111 | if (c->x86_max_cores * smp_num_siblings > 1) { | 111 | if (c->x86_max_cores * smp_num_siblings > 1) { |
112 | seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); | 112 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
113 | seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); | 113 | seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); |
114 | seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); | 114 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
115 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 115 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
116 | } | 116 | } |
117 | #endif | 117 | #endif |
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 1d9a4abcdfc7..f6dfa9fb675c 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c | |||
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac | |||
183 | return NOTIFY_OK; | 183 | return NOTIFY_OK; |
184 | } | 184 | } |
185 | 185 | ||
186 | static struct notifier_block cpuid_class_cpu_notifier = | 186 | static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = |
187 | { | 187 | { |
188 | .notifier_call = cpuid_class_cpu_callback, | 188 | .notifier_call = cpuid_class_cpu_callback, |
189 | }; | 189 | }; |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index e6e4506e749a..fbdb933251b6 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -83,6 +83,12 @@ VM_MASK = 0x00020000 | |||
83 | #define resume_kernel restore_nocheck | 83 | #define resume_kernel restore_nocheck |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifdef CONFIG_VM86 | ||
87 | #define resume_userspace_sig check_userspace | ||
88 | #else | ||
89 | #define resume_userspace_sig resume_userspace | ||
90 | #endif | ||
91 | |||
86 | #define SAVE_ALL \ | 92 | #define SAVE_ALL \ |
87 | cld; \ | 93 | cld; \ |
88 | pushl %es; \ | 94 | pushl %es; \ |
@@ -211,6 +217,7 @@ ret_from_exception: | |||
211 | preempt_stop | 217 | preempt_stop |
212 | ret_from_intr: | 218 | ret_from_intr: |
213 | GET_THREAD_INFO(%ebp) | 219 | GET_THREAD_INFO(%ebp) |
220 | check_userspace: | ||
214 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS | 221 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS |
215 | movb CS(%esp), %al | 222 | movb CS(%esp), %al |
216 | testl $(VM_MASK | 3), %eax | 223 | testl $(VM_MASK | 3), %eax |
@@ -263,7 +270,12 @@ sysenter_past_esp: | |||
263 | pushl $(__USER_CS) | 270 | pushl $(__USER_CS) |
264 | CFI_ADJUST_CFA_OFFSET 4 | 271 | CFI_ADJUST_CFA_OFFSET 4 |
265 | /*CFI_REL_OFFSET cs, 0*/ | 272 | /*CFI_REL_OFFSET cs, 0*/ |
266 | pushl $SYSENTER_RETURN | 273 | /* |
274 | * Push current_thread_info()->sysenter_return to the stack. | ||
275 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | ||
276 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | ||
277 | */ | ||
278 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | ||
267 | CFI_ADJUST_CFA_OFFSET 4 | 279 | CFI_ADJUST_CFA_OFFSET 4 |
268 | CFI_REL_OFFSET eip, 0 | 280 | CFI_REL_OFFSET eip, 0 |
269 | 281 | ||
@@ -415,7 +427,7 @@ work_notifysig: # deal with pending signals and | |||
415 | # vm86-space | 427 | # vm86-space |
416 | xorl %edx, %edx | 428 | xorl %edx, %edx |
417 | call do_notify_resume | 429 | call do_notify_resume |
418 | jmp resume_userspace | 430 | jmp resume_userspace_sig |
419 | 431 | ||
420 | ALIGN | 432 | ALIGN |
421 | work_notifysig_v86: | 433 | work_notifysig_v86: |
@@ -428,7 +440,7 @@ work_notifysig_v86: | |||
428 | movl %eax, %esp | 440 | movl %eax, %esp |
429 | xorl %edx, %edx | 441 | xorl %edx, %edx |
430 | call do_notify_resume | 442 | call do_notify_resume |
431 | jmp resume_userspace | 443 | jmp resume_userspace_sig |
432 | #endif | 444 | #endif |
433 | 445 | ||
434 | # perform syscall exit tracing | 446 | # perform syscall exit tracing |
@@ -515,7 +527,7 @@ ENTRY(irq_entries_start) | |||
515 | .if vector | 527 | .if vector |
516 | CFI_ADJUST_CFA_OFFSET -4 | 528 | CFI_ADJUST_CFA_OFFSET -4 |
517 | .endif | 529 | .endif |
518 | 1: pushl $vector-256 | 530 | 1: pushl $~(vector) |
519 | CFI_ADJUST_CFA_OFFSET 4 | 531 | CFI_ADJUST_CFA_OFFSET 4 |
520 | jmp common_interrupt | 532 | jmp common_interrupt |
521 | .data | 533 | .data |
@@ -535,7 +547,7 @@ common_interrupt: | |||
535 | #define BUILD_INTERRUPT(name, nr) \ | 547 | #define BUILD_INTERRUPT(name, nr) \ |
536 | ENTRY(name) \ | 548 | ENTRY(name) \ |
537 | RING0_INT_FRAME; \ | 549 | RING0_INT_FRAME; \ |
538 | pushl $nr-256; \ | 550 | pushl $~(nr); \ |
539 | CFI_ADJUST_CFA_OFFSET 4; \ | 551 | CFI_ADJUST_CFA_OFFSET 4; \ |
540 | SAVE_ALL; \ | 552 | SAVE_ALL; \ |
541 | movl %esp,%eax; \ | 553 | movl %esp,%eax; \ |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 061533e0cb5e..c703bc7b0880 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | |||
53 | */ | 53 | */ |
54 | fastcall unsigned int do_IRQ(struct pt_regs *regs) | 54 | fastcall unsigned int do_IRQ(struct pt_regs *regs) |
55 | { | 55 | { |
56 | /* high bits used in ret_from_ code */ | 56 | /* high bit used in ret_from_ code */ |
57 | int irq = regs->orig_eax & 0xff; | 57 | int irq = ~regs->orig_eax; |
58 | #ifdef CONFIG_4KSTACKS | 58 | #ifdef CONFIG_4KSTACKS |
59 | union irq_ctx *curctx, *irqctx; | 59 | union irq_ctx *curctx, *irqctx; |
60 | u32 *isp; | 60 | u32 *isp; |
@@ -100,8 +100,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) | |||
100 | * softirq checks work in the hardirq context. | 100 | * softirq checks work in the hardirq context. |
101 | */ | 101 | */ |
102 | irqctx->tinfo.preempt_count = | 102 | irqctx->tinfo.preempt_count = |
103 | irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK | | 103 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
104 | curctx->tinfo.preempt_count & SOFTIRQ_MASK; | 104 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
105 | 105 | ||
106 | asm volatile( | 106 | asm volatile( |
107 | " xchgl %%ebx,%%esp \n" | 107 | " xchgl %%ebx,%%esp \n" |
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 7a328230e540..d022cb8fd725 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c | |||
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti | |||
266 | return NOTIFY_OK; | 266 | return NOTIFY_OK; |
267 | } | 267 | } |
268 | 268 | ||
269 | static struct notifier_block msr_class_cpu_notifier = | 269 | static struct notifier_block __cpuinitdata msr_class_cpu_notifier = |
270 | { | 270 | { |
271 | .notifier_call = msr_class_cpu_callback, | 271 | .notifier_call = msr_class_cpu_callback, |
272 | }; | 272 | }; |
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c index 321f5fd26e75..9bf590cefc7d 100644 --- a/arch/i386/kernel/scx200.c +++ b/arch/i386/kernel/scx200.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/mutex.h> | ||
12 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
13 | 14 | ||
14 | #include <linux/scx200.h> | 15 | #include <linux/scx200.h> |
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = { | |||
45 | .probe = scx200_probe, | 46 | .probe = scx200_probe, |
46 | }; | 47 | }; |
47 | 48 | ||
48 | static DEFINE_SPINLOCK(scx200_gpio_config_lock); | 49 | static DEFINE_MUTEX(scx200_gpio_config_lock); |
49 | 50 | ||
50 | static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 51 | static void __devinit scx200_init_shadow(void) |
51 | { | 52 | { |
52 | int bank; | 53 | int bank; |
54 | |||
55 | /* read the current values driven on the GPIO signals */ | ||
56 | for (bank = 0; bank < 2; ++bank) | ||
57 | scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); | ||
58 | } | ||
59 | |||
60 | static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
61 | { | ||
53 | unsigned base; | 62 | unsigned base; |
54 | 63 | ||
55 | if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || | 64 | if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || |
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ | |||
63 | } | 72 | } |
64 | 73 | ||
65 | scx200_gpio_base = base; | 74 | scx200_gpio_base = base; |
66 | 75 | scx200_init_shadow(); | |
67 | /* read the current values driven on the GPIO signals */ | ||
68 | for (bank = 0; bank < 2; ++bank) | ||
69 | scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); | ||
70 | 76 | ||
71 | } else { | 77 | } else { |
72 | /* find the base of the Configuration Block */ | 78 | /* find the base of the Configuration Block */ |
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ | |||
87 | return 0; | 93 | return 0; |
88 | } | 94 | } |
89 | 95 | ||
90 | u32 scx200_gpio_configure(int index, u32 mask, u32 bits) | 96 | u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits) |
91 | { | 97 | { |
92 | u32 config, new_config; | 98 | u32 config, new_config; |
93 | unsigned long flags; | ||
94 | 99 | ||
95 | spin_lock_irqsave(&scx200_gpio_config_lock, flags); | 100 | mutex_lock(&scx200_gpio_config_lock); |
96 | 101 | ||
97 | outl(index, scx200_gpio_base + 0x20); | 102 | outl(index, scx200_gpio_base + 0x20); |
98 | config = inl(scx200_gpio_base + 0x24); | 103 | config = inl(scx200_gpio_base + 0x24); |
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits) | |||
100 | new_config = (config & mask) | bits; | 105 | new_config = (config & mask) | bits; |
101 | outl(new_config, scx200_gpio_base + 0x24); | 106 | outl(new_config, scx200_gpio_base + 0x24); |
102 | 107 | ||
103 | spin_unlock_irqrestore(&scx200_gpio_config_lock, flags); | 108 | mutex_unlock(&scx200_gpio_config_lock); |
104 | 109 | ||
105 | return config; | 110 | return config; |
106 | } | 111 | } |
107 | 112 | ||
108 | #if 0 | ||
109 | void scx200_gpio_dump(unsigned index) | ||
110 | { | ||
111 | u32 config = scx200_gpio_configure(index, ~0, 0); | ||
112 | printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config); | ||
113 | |||
114 | if (config & 1) | ||
115 | printk(" OE"); /* output enabled */ | ||
116 | else | ||
117 | printk(" TS"); /* tristate */ | ||
118 | if (config & 2) | ||
119 | printk(" PP"); /* push pull */ | ||
120 | else | ||
121 | printk(" OD"); /* open drain */ | ||
122 | if (config & 4) | ||
123 | printk(" PUE"); /* pull up enabled */ | ||
124 | else | ||
125 | printk(" PUD"); /* pull up disabled */ | ||
126 | if (config & 8) | ||
127 | printk(" LOCKED"); /* locked */ | ||
128 | if (config & 16) | ||
129 | printk(" LEVEL"); /* level input */ | ||
130 | else | ||
131 | printk(" EDGE"); /* edge input */ | ||
132 | if (config & 32) | ||
133 | printk(" HI"); /* trigger on rising edge */ | ||
134 | else | ||
135 | printk(" LO"); /* trigger on falling edge */ | ||
136 | if (config & 64) | ||
137 | printk(" DEBOUNCE"); /* debounce */ | ||
138 | printk("\n"); | ||
139 | } | ||
140 | #endif /* 0 */ | ||
141 | |||
142 | static int __init scx200_init(void) | 113 | static int __init scx200_init(void) |
143 | { | 114 | { |
144 | printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); | 115 | printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); |
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base); | |||
159 | EXPORT_SYMBOL(scx200_gpio_shadow); | 130 | EXPORT_SYMBOL(scx200_gpio_shadow); |
160 | EXPORT_SYMBOL(scx200_gpio_configure); | 131 | EXPORT_SYMBOL(scx200_gpio_configure); |
161 | EXPORT_SYMBOL(scx200_cb_base); | 132 | EXPORT_SYMBOL(scx200_cb_base); |
162 | |||
163 | /* | ||
164 | Local variables: | ||
165 | compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules" | ||
166 | c-basic-offset: 8 | ||
167 | End: | ||
168 | */ | ||
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 5c352c3a9e7f..43002cfb40c4 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c | |||
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
351 | goto give_sigsegv; | 351 | goto give_sigsegv; |
352 | } | 352 | } |
353 | 353 | ||
354 | restorer = &__kernel_sigreturn; | 354 | restorer = (void *)VDSO_SYM(&__kernel_sigreturn); |
355 | if (ka->sa.sa_flags & SA_RESTORER) | 355 | if (ka->sa.sa_flags & SA_RESTORER) |
356 | restorer = ka->sa.sa_restorer; | 356 | restorer = ka->sa.sa_restorer; |
357 | 357 | ||
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
447 | goto give_sigsegv; | 447 | goto give_sigsegv; |
448 | 448 | ||
449 | /* Set up to return from userspace. */ | 449 | /* Set up to return from userspace. */ |
450 | restorer = &__kernel_rt_sigreturn; | 450 | restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn); |
451 | if (ka->sa.sa_flags & SA_RESTORER) | 451 | if (ka->sa.sa_flags & SA_RESTORER) |
452 | restorer = ka->sa.sa_restorer; | 452 | restorer = ka->sa.sa_restorer; |
453 | err |= __put_user(restorer, &frame->pretcode); | 453 | err |= __put_user(restorer, &frame->pretcode); |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index bce5470ecb42..89e7315e539c 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -67,12 +67,6 @@ int smp_num_siblings = 1; | |||
67 | EXPORT_SYMBOL(smp_num_siblings); | 67 | EXPORT_SYMBOL(smp_num_siblings); |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | /* Package ID of each logical CPU */ | ||
71 | int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; | ||
72 | |||
73 | /* Core ID of each logical CPU */ | ||
74 | int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; | ||
75 | |||
76 | /* Last level cache ID of each logical CPU */ | 70 | /* Last level cache ID of each logical CPU */ |
77 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; | 71 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; |
78 | 72 | ||
@@ -454,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu) | |||
454 | struct cpuinfo_x86 *c = cpu_data + cpu; | 448 | struct cpuinfo_x86 *c = cpu_data + cpu; |
455 | /* | 449 | /* |
456 | * For perf, we return last level cache shared map. | 450 | * For perf, we return last level cache shared map. |
457 | * TBD: when power saving sched policy is added, we will return | 451 | * And for power savings, we return cpu_core_map |
458 | * cpu_core_map when power saving policy is enabled | ||
459 | */ | 452 | */ |
460 | return c->llc_shared_map; | 453 | if (sched_mc_power_savings || sched_smt_power_savings) |
454 | return cpu_core_map[cpu]; | ||
455 | else | ||
456 | return c->llc_shared_map; | ||
461 | } | 457 | } |
462 | 458 | ||
463 | /* representing cpus for which sibling maps can be computed */ | 459 | /* representing cpus for which sibling maps can be computed */ |
@@ -473,8 +469,8 @@ set_cpu_sibling_map(int cpu) | |||
473 | 469 | ||
474 | if (smp_num_siblings > 1) { | 470 | if (smp_num_siblings > 1) { |
475 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 471 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
476 | if (phys_proc_id[cpu] == phys_proc_id[i] && | 472 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && |
477 | cpu_core_id[cpu] == cpu_core_id[i]) { | 473 | c[cpu].cpu_core_id == c[i].cpu_core_id) { |
478 | cpu_set(i, cpu_sibling_map[cpu]); | 474 | cpu_set(i, cpu_sibling_map[cpu]); |
479 | cpu_set(cpu, cpu_sibling_map[i]); | 475 | cpu_set(cpu, cpu_sibling_map[i]); |
480 | cpu_set(i, cpu_core_map[cpu]); | 476 | cpu_set(i, cpu_core_map[cpu]); |
@@ -501,7 +497,7 @@ set_cpu_sibling_map(int cpu) | |||
501 | cpu_set(i, c[cpu].llc_shared_map); | 497 | cpu_set(i, c[cpu].llc_shared_map); |
502 | cpu_set(cpu, c[i].llc_shared_map); | 498 | cpu_set(cpu, c[i].llc_shared_map); |
503 | } | 499 | } |
504 | if (phys_proc_id[cpu] == phys_proc_id[i]) { | 500 | if (c[cpu].phys_proc_id == c[i].phys_proc_id) { |
505 | cpu_set(i, cpu_core_map[cpu]); | 501 | cpu_set(i, cpu_core_map[cpu]); |
506 | cpu_set(cpu, cpu_core_map[i]); | 502 | cpu_set(cpu, cpu_core_map[i]); |
507 | /* | 503 | /* |
@@ -1056,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1056 | struct warm_boot_cpu_info info; | 1052 | struct warm_boot_cpu_info info; |
1057 | struct work_struct task; | 1053 | struct work_struct task; |
1058 | int apicid, ret; | 1054 | int apicid, ret; |
1055 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
1059 | 1056 | ||
1060 | apicid = x86_cpu_to_apicid[cpu]; | 1057 | apicid = x86_cpu_to_apicid[cpu]; |
1061 | if (apicid == BAD_APICID) { | 1058 | if (apicid == BAD_APICID) { |
@@ -1063,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1063 | goto exit; | 1060 | goto exit; |
1064 | } | 1061 | } |
1065 | 1062 | ||
1063 | /* | ||
1064 | * the CPU isn't initialized at boot time, allocate gdt table here. | ||
1065 | * cpu_init will initialize it | ||
1066 | */ | ||
1067 | if (!cpu_gdt_descr->address) { | ||
1068 | cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL); | ||
1069 | if (!cpu_gdt_descr->address) | ||
1070 | printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); | ||
1071 | ret = -ENOMEM; | ||
1072 | goto exit; | ||
1073 | } | ||
1074 | |||
1066 | info.complete = &done; | 1075 | info.complete = &done; |
1067 | info.apicid = apicid; | 1076 | info.apicid = apicid; |
1068 | info.cpu = cpu; | 1077 | info.cpu = cpu; |
@@ -1340,8 +1349,8 @@ remove_siblinginfo(int cpu) | |||
1340 | cpu_clear(cpu, cpu_sibling_map[sibling]); | 1349 | cpu_clear(cpu, cpu_sibling_map[sibling]); |
1341 | cpus_clear(cpu_sibling_map[cpu]); | 1350 | cpus_clear(cpu_sibling_map[cpu]); |
1342 | cpus_clear(cpu_core_map[cpu]); | 1351 | cpus_clear(cpu_core_map[cpu]); |
1343 | phys_proc_id[cpu] = BAD_APICID; | 1352 | c[cpu].phys_proc_id = 0; |
1344 | cpu_core_id[cpu] = BAD_APICID; | 1353 | c[cpu].cpu_core_id = 0; |
1345 | cpu_clear(cpu, cpu_sibling_setup_map); | 1354 | cpu_clear(cpu, cpu_sibling_setup_map); |
1346 | } | 1355 | } |
1347 | 1356 | ||
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 0bada1870bdf..c60419dee018 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c | |||
@@ -2,6 +2,8 @@ | |||
2 | * linux/arch/i386/kernel/sysenter.c | 2 | * linux/arch/i386/kernel/sysenter.c |
3 | * | 3 | * |
4 | * (C) Copyright 2002 Linus Torvalds | 4 | * (C) Copyright 2002 Linus Torvalds |
5 | * Portions based on the vdso-randomization code from exec-shield: | ||
6 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | ||
5 | * | 7 | * |
6 | * This file contains the needed initializations to support sysenter. | 8 | * This file contains the needed initializations to support sysenter. |
7 | */ | 9 | */ |
@@ -13,12 +15,31 @@ | |||
13 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
14 | #include <linux/string.h> | 16 | #include <linux/string.h> |
15 | #include <linux/elf.h> | 17 | #include <linux/elf.h> |
18 | #include <linux/mm.h> | ||
19 | #include <linux/module.h> | ||
16 | 20 | ||
17 | #include <asm/cpufeature.h> | 21 | #include <asm/cpufeature.h> |
18 | #include <asm/msr.h> | 22 | #include <asm/msr.h> |
19 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
20 | #include <asm/unistd.h> | 24 | #include <asm/unistd.h> |
21 | 25 | ||
26 | /* | ||
27 | * Should the kernel map a VDSO page into processes and pass its | ||
28 | * address down to glibc upon exec()? | ||
29 | */ | ||
30 | unsigned int __read_mostly vdso_enabled = 1; | ||
31 | |||
32 | EXPORT_SYMBOL_GPL(vdso_enabled); | ||
33 | |||
34 | static int __init vdso_setup(char *s) | ||
35 | { | ||
36 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
37 | |||
38 | return 1; | ||
39 | } | ||
40 | |||
41 | __setup("vdso=", vdso_setup); | ||
42 | |||
22 | extern asmlinkage void sysenter_entry(void); | 43 | extern asmlinkage void sysenter_entry(void); |
23 | 44 | ||
24 | void enable_sep_cpu(void) | 45 | void enable_sep_cpu(void) |
@@ -45,23 +66,122 @@ void enable_sep_cpu(void) | |||
45 | */ | 66 | */ |
46 | extern const char vsyscall_int80_start, vsyscall_int80_end; | 67 | extern const char vsyscall_int80_start, vsyscall_int80_end; |
47 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; | 68 | extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; |
69 | static void *syscall_page; | ||
48 | 70 | ||
49 | int __init sysenter_setup(void) | 71 | int __init sysenter_setup(void) |
50 | { | 72 | { |
51 | void *page = (void *)get_zeroed_page(GFP_ATOMIC); | 73 | syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); |
52 | 74 | ||
53 | __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); | 75 | #ifdef CONFIG_COMPAT_VDSO |
76 | __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); | ||
77 | printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); | ||
78 | #else | ||
79 | /* | ||
80 | * In the non-compat case the ELF coredumping code needs the fixmap: | ||
81 | */ | ||
82 | __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO); | ||
83 | #endif | ||
54 | 84 | ||
55 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | 85 | if (!boot_cpu_has(X86_FEATURE_SEP)) { |
56 | memcpy(page, | 86 | memcpy(syscall_page, |
57 | &vsyscall_int80_start, | 87 | &vsyscall_int80_start, |
58 | &vsyscall_int80_end - &vsyscall_int80_start); | 88 | &vsyscall_int80_end - &vsyscall_int80_start); |
59 | return 0; | 89 | return 0; |
60 | } | 90 | } |
61 | 91 | ||
62 | memcpy(page, | 92 | memcpy(syscall_page, |
63 | &vsyscall_sysenter_start, | 93 | &vsyscall_sysenter_start, |
64 | &vsyscall_sysenter_end - &vsyscall_sysenter_start); | 94 | &vsyscall_sysenter_end - &vsyscall_sysenter_start); |
65 | 95 | ||
66 | return 0; | 96 | return 0; |
67 | } | 97 | } |
98 | |||
99 | static struct page *syscall_nopage(struct vm_area_struct *vma, | ||
100 | unsigned long adr, int *type) | ||
101 | { | ||
102 | struct page *p = virt_to_page(adr - vma->vm_start + syscall_page); | ||
103 | get_page(p); | ||
104 | return p; | ||
105 | } | ||
106 | |||
107 | /* Prevent VMA merging */ | ||
108 | static void syscall_vma_close(struct vm_area_struct *vma) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static struct vm_operations_struct syscall_vm_ops = { | ||
113 | .close = syscall_vma_close, | ||
114 | .nopage = syscall_nopage, | ||
115 | }; | ||
116 | |||
117 | /* Defined in vsyscall-sysenter.S */ | ||
118 | extern void SYSENTER_RETURN; | ||
119 | |||
120 | /* Setup a VMA at program startup for the vsyscall page */ | ||
121 | int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | ||
122 | { | ||
123 | struct vm_area_struct *vma; | ||
124 | struct mm_struct *mm = current->mm; | ||
125 | unsigned long addr; | ||
126 | int ret; | ||
127 | |||
128 | down_write(&mm->mmap_sem); | ||
129 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
130 | if (IS_ERR_VALUE(addr)) { | ||
131 | ret = addr; | ||
132 | goto up_fail; | ||
133 | } | ||
134 | |||
135 | vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); | ||
136 | if (!vma) { | ||
137 | ret = -ENOMEM; | ||
138 | goto up_fail; | ||
139 | } | ||
140 | |||
141 | vma->vm_start = addr; | ||
142 | vma->vm_end = addr + PAGE_SIZE; | ||
143 | /* MAYWRITE to allow gdb to COW and set breakpoints */ | ||
144 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; | ||
145 | vma->vm_flags |= mm->def_flags; | ||
146 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; | ||
147 | vma->vm_ops = &syscall_vm_ops; | ||
148 | vma->vm_mm = mm; | ||
149 | |||
150 | ret = insert_vm_struct(mm, vma); | ||
151 | if (ret) | ||
152 | goto free_vma; | ||
153 | |||
154 | current->mm->context.vdso = (void *)addr; | ||
155 | current_thread_info()->sysenter_return = | ||
156 | (void *)VDSO_SYM(&SYSENTER_RETURN); | ||
157 | mm->total_vm++; | ||
158 | up_fail: | ||
159 | up_write(&mm->mmap_sem); | ||
160 | return ret; | ||
161 | |||
162 | free_vma: | ||
163 | kmem_cache_free(vm_area_cachep, vma); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
168 | { | ||
169 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | ||
170 | return "[vdso]"; | ||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | ||
175 | { | ||
176 | return NULL; | ||
177 | } | ||
178 | |||
179 | int in_gate_area(struct task_struct *task, unsigned long addr) | ||
180 | { | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | int in_gate_area_no_task(unsigned long addr) | ||
185 | { | ||
186 | return 0; | ||
187 | } | ||
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index 296355292c7c..e2e281d4bcc8 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c | |||
@@ -32,15 +32,8 @@ | |||
32 | 32 | ||
33 | static struct i386_cpu cpu_devices[NR_CPUS]; | 33 | static struct i386_cpu cpu_devices[NR_CPUS]; |
34 | 34 | ||
35 | int arch_register_cpu(int num){ | 35 | int arch_register_cpu(int num) |
36 | struct node *parent = NULL; | 36 | { |
37 | |||
38 | #ifdef CONFIG_NUMA | ||
39 | int node = cpu_to_node(num); | ||
40 | if (node_online(node)) | ||
41 | parent = &node_devices[node].node; | ||
42 | #endif /* CONFIG_NUMA */ | ||
43 | |||
44 | /* | 37 | /* |
45 | * CPU0 cannot be offlined due to several | 38 | * CPU0 cannot be offlined due to several |
46 | * restrictions and assumptions in kernel. This basically | 39 | * restrictions and assumptions in kernel. This basically |
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){ | |||
50 | if (!num) | 43 | if (!num) |
51 | cpu_devices[num].cpu.no_control = 1; | 44 | cpu_devices[num].cpu.no_control = 1; |
52 | 45 | ||
53 | return register_cpu(&cpu_devices[num].cpu, num, parent); | 46 | return register_cpu(&cpu_devices[num].cpu, num); |
54 | } | 47 | } |
55 | 48 | ||
56 | #ifdef CONFIG_HOTPLUG_CPU | 49 | #ifdef CONFIG_HOTPLUG_CPU |
57 | 50 | ||
58 | void arch_unregister_cpu(int num) { | 51 | void arch_unregister_cpu(int num) { |
59 | struct node *parent = NULL; | 52 | return unregister_cpu(&cpu_devices[num].cpu); |
60 | |||
61 | #ifdef CONFIG_NUMA | ||
62 | int node = cpu_to_node(num); | ||
63 | if (node_online(node)) | ||
64 | parent = &node_devices[node].node; | ||
65 | #endif /* CONFIG_NUMA */ | ||
66 | |||
67 | return unregister_cpu(&cpu_devices[num].cpu, parent); | ||
68 | } | 53 | } |
69 | EXPORT_SYMBOL(arch_register_cpu); | 54 | EXPORT_SYMBOL(arch_register_cpu); |
70 | EXPORT_SYMBOL(arch_unregister_cpu); | 55 | EXPORT_SYMBOL(arch_unregister_cpu); |
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu); | |||
74 | 59 | ||
75 | #ifdef CONFIG_NUMA | 60 | #ifdef CONFIG_NUMA |
76 | #include <linux/mmzone.h> | 61 | #include <linux/mmzone.h> |
77 | #include <asm/node.h> | ||
78 | |||
79 | struct i386_node node_devices[MAX_NUMNODES]; | ||
80 | 62 | ||
81 | static int __init topology_init(void) | 63 | static int __init topology_init(void) |
82 | { | 64 | { |
83 | int i; | 65 | int i; |
84 | 66 | ||
85 | for_each_online_node(i) | 67 | for_each_online_node(i) |
86 | arch_register_node(i); | 68 | register_one_node(i); |
87 | 69 | ||
88 | for_each_present_cpu(i) | 70 | for_each_present_cpu(i) |
89 | arch_register_cpu(i); | 71 | arch_register_cpu(i); |
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 3b62baa6a371..1a36d26e15eb 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S | |||
@@ -42,10 +42,10 @@ __kernel_vsyscall: | |||
42 | /* 7: align return point with nop's to make disassembly easier */ | 42 | /* 7: align return point with nop's to make disassembly easier */ |
43 | .space 7,0x90 | 43 | .space 7,0x90 |
44 | 44 | ||
45 | /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ | 45 | /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ |
46 | jmp .Lenter_kernel | 46 | jmp .Lenter_kernel |
47 | /* 16: System call normal return point is here! */ | 47 | /* 16: System call normal return point is here! */ |
48 | .globl SYSENTER_RETURN /* Symbol used by entry.S. */ | 48 | .globl SYSENTER_RETURN /* Symbol used by sysenter.c */ |
49 | SYSENTER_RETURN: | 49 | SYSENTER_RETURN: |
50 | pop %ebp | 50 | pop %ebp |
51 | .Lpop_ebp: | 51 | .Lpop_ebp: |
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S index 98699ca6e52d..e26975fc68b6 100644 --- a/arch/i386/kernel/vsyscall.lds.S +++ b/arch/i386/kernel/vsyscall.lds.S | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | SECTIONS | 8 | SECTIONS |
9 | { | 9 | { |
10 | . = VSYSCALL_BASE + SIZEOF_HEADERS; | 10 | . = VDSO_PRELINK + SIZEOF_HEADERS; |
11 | 11 | ||
12 | .hash : { *(.hash) } :text | 12 | .hash : { *(.hash) } :text |
13 | .dynsym : { *(.dynsym) } | 13 | .dynsym : { *(.dynsym) } |
@@ -20,7 +20,7 @@ SECTIONS | |||
20 | For the layouts to match, we need to skip more than enough | 20 | For the layouts to match, we need to skip more than enough |
21 | space for the dynamic symbol table et al. If this amount | 21 | space for the dynamic symbol table et al. If this amount |
22 | is insufficient, ld -shared will barf. Just increase it here. */ | 22 | is insufficient, ld -shared will barf. Just increase it here. */ |
23 | . = VSYSCALL_BASE + 0x400; | 23 | . = VDSO_PRELINK + 0x400; |
24 | 24 | ||
25 | .text : { *(.text) } :text =0x90909090 | 25 | .text : { *(.text) } :text =0x90909090 |
26 | .note : { *(.note.*) } :text :note | 26 | .note : { *(.note.*) } :text :note |
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c index 0e225054e222..defc6ebbd565 100644 --- a/arch/i386/mach-voyager/setup.c +++ b/arch/i386/mach-voyager/setup.c | |||
@@ -5,10 +5,10 @@ | |||
5 | #include <linux/config.h> | 5 | #include <linux/config.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <asm/acpi.h> | ||
9 | #include <asm/arch_hooks.h> | 8 | #include <asm/arch_hooks.h> |
10 | #include <asm/voyager.h> | 9 | #include <asm/voyager.h> |
11 | #include <asm/e820.h> | 10 | #include <asm/e820.h> |
11 | #include <asm/io.h> | ||
12 | #include <asm/setup.h> | 12 | #include <asm/setup.h> |
13 | 13 | ||
14 | void __init pre_intr_init_hook(void) | 14 | void __init pre_intr_init_hook(void) |
@@ -27,8 +27,7 @@ void __init intr_init_hook(void) | |||
27 | smp_intr_init(); | 27 | smp_intr_init(); |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | if (!acpi_ioapic) | 30 | setup_irq(2, &irq2); |
31 | setup_irq(2, &irq2); | ||
32 | } | 31 | } |
33 | 32 | ||
34 | void __init pre_setup_arch_hook(void) | 33 | void __init pre_setup_arch_hook(void) |
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 70e560a1b79a..8242af9ebc6f 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -661,6 +661,7 @@ do_boot_cpu(__u8 cpu) | |||
661 | print_cpu_info(&cpu_data[cpu]); | 661 | print_cpu_info(&cpu_data[cpu]); |
662 | wmb(); | 662 | wmb(); |
663 | cpu_set(cpu, cpu_callout_map); | 663 | cpu_set(cpu, cpu_callout_map); |
664 | cpu_set(cpu, cpu_present_map); | ||
664 | } | 665 | } |
665 | else { | 666 | else { |
666 | printk("CPU%d FAILED TO BOOT: ", cpu); | 667 | printk("CPU%d FAILED TO BOOT: ", cpu); |
@@ -1912,6 +1913,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
1912 | cpu_set(smp_processor_id(), cpu_online_map); | 1913 | cpu_set(smp_processor_id(), cpu_online_map); |
1913 | cpu_set(smp_processor_id(), cpu_callout_map); | 1914 | cpu_set(smp_processor_id(), cpu_callout_map); |
1914 | cpu_set(smp_processor_id(), cpu_possible_map); | 1915 | cpu_set(smp_processor_id(), cpu_possible_map); |
1916 | cpu_set(smp_processor_id(), cpu_present_map); | ||
1915 | } | 1917 | } |
1916 | 1918 | ||
1917 | int __devinit | 1919 | int __devinit |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index bf19513f0cea..f84b16e007ff 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | #include <linux/pagemap.h> | 25 | #include <linux/pagemap.h> |
26 | #include <linux/poison.h> | ||
26 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
@@ -654,7 +655,7 @@ void __init mem_init(void) | |||
654 | */ | 655 | */ |
655 | #ifdef CONFIG_MEMORY_HOTPLUG | 656 | #ifdef CONFIG_MEMORY_HOTPLUG |
656 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 657 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
657 | int add_memory(u64 start, u64 size) | 658 | int arch_add_memory(int nid, u64 start, u64 size) |
658 | { | 659 | { |
659 | struct pglist_data *pgdata = &contig_page_data; | 660 | struct pglist_data *pgdata = &contig_page_data; |
660 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 661 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; |
@@ -753,7 +754,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
753 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 754 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
754 | ClearPageReserved(virt_to_page(addr)); | 755 | ClearPageReserved(virt_to_page(addr)); |
755 | init_page_count(virt_to_page(addr)); | 756 | init_page_count(virt_to_page(addr)); |
756 | memset((void *)addr, 0xcc, PAGE_SIZE); | 757 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
757 | free_page(addr); | 758 | free_page(addr); |
758 | totalram_pages++; | 759 | totalram_pages++; |
759 | } | 760 | } |
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 0887b34bc59b..353a836ed63c 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c | |||
@@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
229 | if (PageHighMem(page)) | 229 | if (PageHighMem(page)) |
230 | return; | 230 | return; |
231 | if (!enable) | 231 | if (!enable) |
232 | mutex_debug_check_no_locks_freed(page_address(page), | 232 | debug_check_no_locks_freed(page_address(page), |
233 | numpages * PAGE_SIZE); | 233 | numpages * PAGE_SIZE); |
234 | 234 | ||
235 | /* the return value is ignored - the calls cannot fail, | 235 | /* the return value is ignored - the calls cannot fail, |
236 | * large pages are disabled at boot time. | 236 | * large pages are disabled at boot time. |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 18318749884b..a56df7bf022d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -374,6 +374,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID | |||
374 | def_bool y | 374 | def_bool y |
375 | depends on NEED_MULTIPLE_NODES | 375 | depends on NEED_MULTIPLE_NODES |
376 | 376 | ||
377 | config HAVE_ARCH_NODEDATA_EXTENSION | ||
378 | def_bool y | ||
379 | depends on NUMA | ||
380 | |||
377 | config IA32_SUPPORT | 381 | config IA32_SUPPORT |
378 | bool "Support for Linux/x86 binaries" | 382 | bool "Support for Linux/x86 binaries" |
379 | help | 383 | help |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 859fb37ff49b..303a9afcf2a1 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) | |||
959 | } | 959 | } |
960 | } | 960 | } |
961 | 961 | ||
962 | static int palinfo_cpu_callback(struct notifier_block *nfb, | 962 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, |
963 | unsigned long action, | 963 | unsigned long action, |
964 | void *hcpu) | 964 | void *hcpu) |
965 | { | 965 | { |
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, | |||
978 | return NOTIFY_OK; | 978 | return NOTIFY_OK; |
979 | } | 979 | } |
980 | 980 | ||
981 | static struct notifier_block palinfo_cpu_notifier = | 981 | static struct notifier_block __cpuinitdata palinfo_cpu_notifier = |
982 | { | 982 | { |
983 | .notifier_call = palinfo_cpu_callback, | 983 | .notifier_call = palinfo_cpu_callback, |
984 | .priority = 0, | 984 | .priority = 0, |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 663a186ad194..9065f0f01ba3 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = { | |||
572 | }; | 572 | }; |
573 | 573 | ||
574 | #ifdef CONFIG_HOTPLUG_CPU | 574 | #ifdef CONFIG_HOTPLUG_CPU |
575 | static int | 575 | static int __devinit |
576 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | 576 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
577 | { | 577 | { |
578 | unsigned int i, cpu = (unsigned long)hcpu; | 578 | unsigned int i, cpu = (unsigned long)hcpu; |
@@ -673,9 +673,7 @@ salinfo_init(void) | |||
673 | salinfo_timer.function = &salinfo_timeout; | 673 | salinfo_timer.function = &salinfo_timeout; |
674 | add_timer(&salinfo_timer); | 674 | add_timer(&salinfo_timer); |
675 | 675 | ||
676 | #ifdef CONFIG_HOTPLUG_CPU | 676 | register_hotcpu_notifier(&salinfo_cpu_notifier); |
677 | register_cpu_notifier(&salinfo_cpu_notifier); | ||
678 | #endif | ||
679 | 677 | ||
680 | return 0; | 678 | return 0; |
681 | } | 679 | } |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 879edb51d1e0..5511d9c6c701 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -26,19 +26,10 @@ | |||
26 | #include <asm/numa.h> | 26 | #include <asm/numa.h> |
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | 28 | ||
29 | #ifdef CONFIG_NUMA | ||
30 | static struct node *sysfs_nodes; | ||
31 | #endif | ||
32 | static struct ia64_cpu *sysfs_cpus; | 29 | static struct ia64_cpu *sysfs_cpus; |
33 | 30 | ||
34 | int arch_register_cpu(int num) | 31 | int arch_register_cpu(int num) |
35 | { | 32 | { |
36 | struct node *parent = NULL; | ||
37 | |||
38 | #ifdef CONFIG_NUMA | ||
39 | parent = &sysfs_nodes[cpu_to_node(num)]; | ||
40 | #endif /* CONFIG_NUMA */ | ||
41 | |||
42 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) | 33 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) |
43 | /* | 34 | /* |
44 | * If CPEI cannot be re-targetted, and this is | 35 | * If CPEI cannot be re-targetted, and this is |
@@ -48,21 +39,14 @@ int arch_register_cpu(int num) | |||
48 | sysfs_cpus[num].cpu.no_control = 1; | 39 | sysfs_cpus[num].cpu.no_control = 1; |
49 | #endif | 40 | #endif |
50 | 41 | ||
51 | return register_cpu(&sysfs_cpus[num].cpu, num, parent); | 42 | return register_cpu(&sysfs_cpus[num].cpu, num); |
52 | } | 43 | } |
53 | 44 | ||
54 | #ifdef CONFIG_HOTPLUG_CPU | 45 | #ifdef CONFIG_HOTPLUG_CPU |
55 | 46 | ||
56 | void arch_unregister_cpu(int num) | 47 | void arch_unregister_cpu(int num) |
57 | { | 48 | { |
58 | struct node *parent = NULL; | 49 | return unregister_cpu(&sysfs_cpus[num].cpu); |
59 | |||
60 | #ifdef CONFIG_NUMA | ||
61 | int node = cpu_to_node(num); | ||
62 | parent = &sysfs_nodes[node]; | ||
63 | #endif /* CONFIG_NUMA */ | ||
64 | |||
65 | return unregister_cpu(&sysfs_cpus[num].cpu, parent); | ||
66 | } | 50 | } |
67 | EXPORT_SYMBOL(arch_register_cpu); | 51 | EXPORT_SYMBOL(arch_register_cpu); |
68 | EXPORT_SYMBOL(arch_unregister_cpu); | 52 | EXPORT_SYMBOL(arch_unregister_cpu); |
@@ -74,17 +58,11 @@ static int __init topology_init(void) | |||
74 | int i, err = 0; | 58 | int i, err = 0; |
75 | 59 | ||
76 | #ifdef CONFIG_NUMA | 60 | #ifdef CONFIG_NUMA |
77 | sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL); | ||
78 | if (!sysfs_nodes) { | ||
79 | err = -ENOMEM; | ||
80 | goto out; | ||
81 | } | ||
82 | |||
83 | /* | 61 | /* |
84 | * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? | 62 | * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? |
85 | */ | 63 | */ |
86 | for_each_online_node(i) { | 64 | for_each_online_node(i) { |
87 | if ((err = register_node(&sysfs_nodes[i], i, 0))) | 65 | if ((err = register_one_node(i))) |
88 | goto out; | 66 | goto out; |
89 | } | 67 | } |
90 | #endif | 68 | #endif |
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
426 | * When a cpu is hot-plugged, do a check and initiate | 404 | * When a cpu is hot-plugged, do a check and initiate |
427 | * cache kobject if necessary | 405 | * cache kobject if necessary |
428 | */ | 406 | */ |
429 | static int cache_cpu_callback(struct notifier_block *nfb, | 407 | static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, |
430 | unsigned long action, void *hcpu) | 408 | unsigned long action, void *hcpu) |
431 | { | 409 | { |
432 | unsigned int cpu = (unsigned long)hcpu; | 410 | unsigned int cpu = (unsigned long)hcpu; |
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb, | |||
444 | return NOTIFY_OK; | 422 | return NOTIFY_OK; |
445 | } | 423 | } |
446 | 424 | ||
447 | static struct notifier_block cache_cpu_notifier = | 425 | static struct notifier_block __cpuinitdata cache_cpu_notifier = |
448 | { | 426 | { |
449 | .notifier_call = cache_cpu_callback | 427 | .notifier_call = cache_cpu_callback |
450 | }; | 428 | }; |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b6bcc9fa3603..525b082eb661 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -33,7 +33,6 @@ | |||
33 | */ | 33 | */ |
34 | struct early_node_data { | 34 | struct early_node_data { |
35 | struct ia64_node_data *node_data; | 35 | struct ia64_node_data *node_data; |
36 | pg_data_t *pgdat; | ||
37 | unsigned long pernode_addr; | 36 | unsigned long pernode_addr; |
38 | unsigned long pernode_size; | 37 | unsigned long pernode_size; |
39 | struct bootmem_data bootmem_data; | 38 | struct bootmem_data bootmem_data; |
@@ -46,6 +45,8 @@ struct early_node_data { | |||
46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | 45 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; |
47 | static nodemask_t memory_less_mask __initdata; | 46 | static nodemask_t memory_less_mask __initdata; |
48 | 47 | ||
48 | static pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
49 | |||
49 | /* | 50 | /* |
50 | * To prevent cache aliasing effects, align per-node structures so that they | 51 | * To prevent cache aliasing effects, align per-node structures so that they |
51 | * start at addresses that are strided by node number. | 52 | * start at addresses that are strided by node number. |
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, | |||
99 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 100 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
100 | * called yet. Note that node 0 will also count all non-existent cpus. | 101 | * called yet. Note that node 0 will also count all non-existent cpus. |
101 | */ | 102 | */ |
102 | static int __init early_nr_cpus_node(int node) | 103 | static int __meminit early_nr_cpus_node(int node) |
103 | { | 104 | { |
104 | int cpu, n = 0; | 105 | int cpu, n = 0; |
105 | 106 | ||
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node) | |||
114 | * compute_pernodesize - compute size of pernode data | 115 | * compute_pernodesize - compute size of pernode data |
115 | * @node: the node id. | 116 | * @node: the node id. |
116 | */ | 117 | */ |
117 | static unsigned long __init compute_pernodesize(int node) | 118 | static unsigned long __meminit compute_pernodesize(int node) |
118 | { | 119 | { |
119 | unsigned long pernodesize = 0, cpus; | 120 | unsigned long pernodesize = 0, cpus; |
120 | 121 | ||
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode, | |||
175 | pernode += PERCPU_PAGE_SIZE * cpus; | 176 | pernode += PERCPU_PAGE_SIZE * cpus; |
176 | pernode += node * L1_CACHE_BYTES; | 177 | pernode += node * L1_CACHE_BYTES; |
177 | 178 | ||
178 | mem_data[node].pgdat = __va(pernode); | 179 | pgdat_list[node] = __va(pernode); |
179 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 180 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
180 | 181 | ||
181 | mem_data[node].node_data = __va(pernode); | 182 | mem_data[node].node_data = __va(pernode); |
182 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | 183 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); |
183 | 184 | ||
184 | mem_data[node].pgdat->bdata = bdp; | 185 | pgdat_list[node]->bdata = bdp; |
185 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 186 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
186 | 187 | ||
187 | cpu_data = per_cpu_node_setup(cpu_data, node); | 188 | cpu_data = per_cpu_node_setup(cpu_data, node); |
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, | |||
268 | static int __init free_node_bootmem(unsigned long start, unsigned long len, | 269 | static int __init free_node_bootmem(unsigned long start, unsigned long len, |
269 | int node) | 270 | int node) |
270 | { | 271 | { |
271 | free_bootmem_node(mem_data[node].pgdat, start, len); | 272 | free_bootmem_node(pgdat_list[node], start, len); |
272 | 273 | ||
273 | return 0; | 274 | return 0; |
274 | } | 275 | } |
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void) | |||
287 | int node; | 288 | int node; |
288 | 289 | ||
289 | for_each_online_node(node) { | 290 | for_each_online_node(node) { |
290 | pg_data_t *pdp = mem_data[node].pgdat; | 291 | pg_data_t *pdp = pgdat_list[node]; |
291 | 292 | ||
292 | if (node_isset(node, memory_less_mask)) | 293 | if (node_isset(node, memory_less_mask)) |
293 | continue; | 294 | continue; |
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void) | |||
307 | } | 308 | } |
308 | } | 309 | } |
309 | 310 | ||
311 | static void __meminit scatter_node_data(void) | ||
312 | { | ||
313 | pg_data_t **dst; | ||
314 | int node; | ||
315 | |||
316 | for_each_online_node(node) { | ||
317 | dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; | ||
318 | memcpy(dst, pgdat_list, sizeof(pgdat_list)); | ||
319 | } | ||
320 | } | ||
321 | |||
310 | /** | 322 | /** |
311 | * initialize_pernode_data - fixup per-cpu & per-node pointers | 323 | * initialize_pernode_data - fixup per-cpu & per-node pointers |
312 | * | 324 | * |
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void) | |||
317 | */ | 329 | */ |
318 | static void __init initialize_pernode_data(void) | 330 | static void __init initialize_pernode_data(void) |
319 | { | 331 | { |
320 | pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
321 | int cpu, node; | 332 | int cpu, node; |
322 | 333 | ||
323 | for_each_online_node(node) | 334 | scatter_node_data(); |
324 | pgdat_list[node] = mem_data[node].pgdat; | ||
325 | 335 | ||
326 | /* Copy the pg_data_t list to each node and init the node field */ | ||
327 | for_each_online_node(node) { | ||
328 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | ||
329 | sizeof(pgdat_list)); | ||
330 | } | ||
331 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
332 | /* Set the node_data pointer for each per-cpu struct */ | 337 | /* Set the node_data pointer for each per-cpu struct */ |
333 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 338 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) | |||
372 | if (bestnode == -1) | 377 | if (bestnode == -1) |
373 | bestnode = anynode; | 378 | bestnode = anynode; |
374 | 379 | ||
375 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, | 380 | ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, |
376 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 381 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
377 | 382 | ||
378 | return ptr; | 383 | return ptr; |
@@ -476,7 +481,7 @@ void __init find_memory(void) | |||
476 | pernodesize = mem_data[node].pernode_size; | 481 | pernodesize = mem_data[node].pernode_size; |
477 | map = pernode + pernodesize; | 482 | map = pernode + pernodesize; |
478 | 483 | ||
479 | init_bootmem_node(mem_data[node].pgdat, | 484 | init_bootmem_node(pgdat_list[node], |
480 | map>>PAGE_SHIFT, | 485 | map>>PAGE_SHIFT, |
481 | bdp->node_boot_start>>PAGE_SHIFT, | 486 | bdp->node_boot_start>>PAGE_SHIFT, |
482 | bdp->node_low_pfn); | 487 | bdp->node_low_pfn); |
@@ -786,3 +791,21 @@ void __init paging_init(void) | |||
786 | 791 | ||
787 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 792 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
788 | } | 793 | } |
794 | |||
795 | pg_data_t *arch_alloc_nodedata(int nid) | ||
796 | { | ||
797 | unsigned long size = compute_pernodesize(nid); | ||
798 | |||
799 | return kzalloc(size, GFP_KERNEL); | ||
800 | } | ||
801 | |||
802 | void arch_free_nodedata(pg_data_t *pgdat) | ||
803 | { | ||
804 | kfree(pgdat); | ||
805 | } | ||
806 | |||
807 | void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) | ||
808 | { | ||
809 | pgdat_list[update_node] = update_pgdat; | ||
810 | scatter_node_data(); | ||
811 | } | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 11f08001f8c2..38306e98f04b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -652,7 +652,7 @@ void online_page(struct page *page) | |||
652 | num_physpages++; | 652 | num_physpages++; |
653 | } | 653 | } |
654 | 654 | ||
655 | int add_memory(u64 start, u64 size) | 655 | int arch_add_memory(int nid, u64 start, u64 size) |
656 | { | 656 | { |
657 | pg_data_t *pgdat; | 657 | pg_data_t *pgdat; |
658 | struct zone *zone; | 658 | struct zone *zone; |
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size) | |||
660 | unsigned long nr_pages = size >> PAGE_SHIFT; | 660 | unsigned long nr_pages = size >> PAGE_SHIFT; |
661 | int ret; | 661 | int ret; |
662 | 662 | ||
663 | pgdat = NODE_DATA(0); | 663 | pgdat = NODE_DATA(nid); |
664 | 664 | ||
665 | zone = pgdat->node_zones + ZONE_NORMAL; | 665 | zone = pgdat->node_zones + ZONE_NORMAL; |
666 | ret = __add_pages(zone, start_pfn, nr_pages); | 666 | ret = __add_pages(zone, start_pfn, nr_pages); |
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size) | |||
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(add_memory); | ||
675 | 674 | ||
676 | int remove_memory(u64 start, u64 size) | 675 | int remove_memory(u64 start, u64 size) |
677 | { | 676 | { |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index dc8e2b696713..677c6c0fd661 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
27 | int sn_force_interrupt_flag = 1; | 27 | int sn_force_interrupt_flag = 1; |
28 | extern int sn_ioif_inited; | 28 | extern int sn_ioif_inited; |
29 | struct list_head **sn_irq_lh; | 29 | struct list_head **sn_irq_lh; |
30 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | 30 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
31 | 31 | ||
32 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, | 32 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
33 | struct sn_irq_info *sn_irq_info, | 33 | struct sn_irq_info *sn_irq_info, |
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c index 3cd3c2988a48..1ff483c8a4c9 100644 --- a/arch/m32r/kernel/setup.c +++ b/arch/m32r/kernel/setup.c | |||
@@ -275,7 +275,7 @@ static int __init topology_init(void) | |||
275 | int i; | 275 | int i; |
276 | 276 | ||
277 | for_each_present_cpu(i) | 277 | for_each_present_cpu(i) |
278 | register_cpu(&cpu_devices[i], i, NULL); | 278 | register_cpu(&cpu_devices[i], i); |
279 | 279 | ||
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile index 6f880cbff1c8..8951793fd8d4 100644 --- a/arch/m68knommu/Makefile +++ b/arch/m68knommu/Makefile | |||
@@ -21,6 +21,7 @@ platform-$(CONFIG_M527x) := 527x | |||
21 | platform-$(CONFIG_M5272) := 5272 | 21 | platform-$(CONFIG_M5272) := 5272 |
22 | platform-$(CONFIG_M528x) := 528x | 22 | platform-$(CONFIG_M528x) := 528x |
23 | platform-$(CONFIG_M5307) := 5307 | 23 | platform-$(CONFIG_M5307) := 5307 |
24 | platform-$(CONFIG_M532x) := 532x | ||
24 | platform-$(CONFIG_M5407) := 5407 | 25 | platform-$(CONFIG_M5407) := 5407 |
25 | PLATFORM := $(platform-y) | 26 | PLATFORM := $(platform-y) |
26 | 27 | ||
@@ -44,6 +45,7 @@ board-$(CONFIG_senTec) := senTec | |||
44 | board-$(CONFIG_SNEHA) := SNEHA | 45 | board-$(CONFIG_SNEHA) := SNEHA |
45 | board-$(CONFIG_M5208EVB) := M5208EVB | 46 | board-$(CONFIG_M5208EVB) := M5208EVB |
46 | board-$(CONFIG_MOD5272) := MOD5272 | 47 | board-$(CONFIG_MOD5272) := MOD5272 |
48 | board-$(CONFIG_AVNET) := AVNET | ||
47 | BOARD := $(board-y) | 49 | BOARD := $(board-y) |
48 | 50 | ||
49 | model-$(CONFIG_RAMKERNEL) := ram | 51 | model-$(CONFIG_RAMKERNEL) := ram |
@@ -65,6 +67,7 @@ cpuclass-$(CONFIG_M527x) := 5307 | |||
65 | cpuclass-$(CONFIG_M5272) := 5307 | 67 | cpuclass-$(CONFIG_M5272) := 5307 |
66 | cpuclass-$(CONFIG_M528x) := 5307 | 68 | cpuclass-$(CONFIG_M528x) := 5307 |
67 | cpuclass-$(CONFIG_M5307) := 5307 | 69 | cpuclass-$(CONFIG_M5307) := 5307 |
70 | cpuclass-$(CONFIG_M532x) := 5307 | ||
68 | cpuclass-$(CONFIG_M5407) := 5307 | 71 | cpuclass-$(CONFIG_M5407) := 5307 |
69 | cpuclass-$(CONFIG_M68328) := 68328 | 72 | cpuclass-$(CONFIG_M68328) := 68328 |
70 | cpuclass-$(CONFIG_M68EZ328) := 68328 | 73 | cpuclass-$(CONFIG_M68EZ328) := 68328 |
@@ -81,16 +84,17 @@ export PLATFORM BOARD MODEL CPUCLASS | |||
81 | # | 84 | # |
82 | # Some CFLAG additions based on specific CPU type. | 85 | # Some CFLAG additions based on specific CPU type. |
83 | # | 86 | # |
84 | cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200 | 87 | cflags-$(CONFIG_M5206) := -m5200 |
85 | cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200 | 88 | cflags-$(CONFIG_M5206e) := -m5200 |
86 | cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307 | 89 | cflags-$(CONFIG_M520x) := -m5307 |
87 | cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307 | 90 | cflags-$(CONFIG_M523x) := -m5307 |
88 | cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200 | 91 | cflags-$(CONFIG_M5249) := -m5200 |
89 | cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307 | 92 | cflags-$(CONFIG_M527x) := -m5307 |
90 | cflags-$(CONFIG_M5272) := -m5307 -Wa,-S -Wa,-m5307 | 93 | cflags-$(CONFIG_M5272) := -m5307 |
91 | cflags-$(CONFIG_M528x) := -m5307 -Wa,-S -Wa,-m5307 | 94 | cflags-$(CONFIG_M528x) := -m5307 |
92 | cflags-$(CONFIG_M5307) := -m5307 -Wa,-S -Wa,-m5307 | 95 | cflags-$(CONFIG_M5307) := -m5307 |
93 | cflags-$(CONFIG_M5407) := -m5200 -Wa,-S -Wa,-m5200 | 96 | cflags-$(CONFIG_M532x) := -m5307 |
97 | cflags-$(CONFIG_M5407) := -m5200 | ||
94 | cflags-$(CONFIG_M68328) := -m68000 | 98 | cflags-$(CONFIG_M68328) := -m68000 |
95 | cflags-$(CONFIG_M68EZ328) := -m68000 | 99 | cflags-$(CONFIG_M68EZ328) := -m68000 |
96 | cflags-$(CONFIG_M68VZ328) := -m68000 | 100 | cflags-$(CONFIG_M68VZ328) := -m68000 |
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig index 2d59ba1a79ba..3891de09ac23 100644 --- a/arch/m68knommu/defconfig +++ b/arch/m68knommu/defconfig | |||
@@ -1,21 +1,22 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.13-uc0 | 3 | # Linux kernel version: 2.6.17 |
4 | # Wed Aug 31 15:03:26 2005 | 4 | # Tue Jun 27 12:57:06 2006 |
5 | # | 5 | # |
6 | CONFIG_M68KNOMMU=y | 6 | CONFIG_M68K=y |
7 | # CONFIG_MMU is not set | 7 | # CONFIG_MMU is not set |
8 | # CONFIG_FPU is not set | 8 | # CONFIG_FPU is not set |
9 | CONFIG_UID16=y | ||
10 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 9 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
11 | # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set | 10 | # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set |
11 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
12 | CONFIG_GENERIC_HWEIGHT=y | ||
12 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 13 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
14 | CONFIG_TIME_LOW_RES=y | ||
13 | 15 | ||
14 | # | 16 | # |
15 | # Code maturity level options | 17 | # Code maturity level options |
16 | # | 18 | # |
17 | CONFIG_EXPERIMENTAL=y | 19 | CONFIG_EXPERIMENTAL=y |
18 | CONFIG_CLEAN_COMPILE=y | ||
19 | CONFIG_BROKEN_ON_SMP=y | 20 | CONFIG_BROKEN_ON_SMP=y |
20 | CONFIG_INIT_ENV_ARG_LIMIT=32 | 21 | CONFIG_INIT_ENV_ARG_LIMIT=32 |
21 | 22 | ||
@@ -23,26 +24,30 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 | |||
23 | # General setup | 24 | # General setup |
24 | # | 25 | # |
25 | CONFIG_LOCALVERSION="" | 26 | CONFIG_LOCALVERSION="" |
27 | CONFIG_LOCALVERSION_AUTO=y | ||
28 | # CONFIG_SYSVIPC is not set | ||
26 | # CONFIG_POSIX_MQUEUE is not set | 29 | # CONFIG_POSIX_MQUEUE is not set |
27 | # CONFIG_BSD_PROCESS_ACCT is not set | 30 | # CONFIG_BSD_PROCESS_ACCT is not set |
28 | # CONFIG_SYSCTL is not set | 31 | # CONFIG_SYSCTL is not set |
29 | # CONFIG_AUDIT is not set | 32 | # CONFIG_AUDIT is not set |
30 | # CONFIG_HOTPLUG is not set | ||
31 | # CONFIG_KOBJECT_UEVENT is not set | ||
32 | # CONFIG_IKCONFIG is not set | 33 | # CONFIG_IKCONFIG is not set |
34 | # CONFIG_RELAY is not set | ||
35 | CONFIG_INITRAMFS_SOURCE="" | ||
36 | CONFIG_UID16=y | ||
37 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
33 | CONFIG_EMBEDDED=y | 38 | CONFIG_EMBEDDED=y |
34 | # CONFIG_KALLSYMS is not set | 39 | # CONFIG_KALLSYMS is not set |
40 | # CONFIG_HOTPLUG is not set | ||
35 | CONFIG_PRINTK=y | 41 | CONFIG_PRINTK=y |
36 | CONFIG_BUG=y | 42 | CONFIG_BUG=y |
43 | CONFIG_ELF_CORE=y | ||
37 | CONFIG_BASE_FULL=y | 44 | CONFIG_BASE_FULL=y |
38 | # CONFIG_FUTEX is not set | 45 | # CONFIG_FUTEX is not set |
39 | # CONFIG_EPOLL is not set | 46 | # CONFIG_EPOLL is not set |
40 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 47 | CONFIG_SLAB=y |
41 | CONFIG_CC_ALIGN_FUNCTIONS=0 | 48 | CONFIG_TINY_SHMEM=y |
42 | CONFIG_CC_ALIGN_LABELS=0 | ||
43 | CONFIG_CC_ALIGN_LOOPS=0 | ||
44 | CONFIG_CC_ALIGN_JUMPS=0 | ||
45 | CONFIG_BASE_SMALL=0 | 49 | CONFIG_BASE_SMALL=0 |
50 | # CONFIG_SLOB is not set | ||
46 | 51 | ||
47 | # | 52 | # |
48 | # Loadable module support | 53 | # Loadable module support |
@@ -50,6 +55,24 @@ CONFIG_BASE_SMALL=0 | |||
50 | # CONFIG_MODULES is not set | 55 | # CONFIG_MODULES is not set |
51 | 56 | ||
52 | # | 57 | # |
58 | # Block layer | ||
59 | # | ||
60 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
61 | |||
62 | # | ||
63 | # IO Schedulers | ||
64 | # | ||
65 | CONFIG_IOSCHED_NOOP=y | ||
66 | # CONFIG_IOSCHED_AS is not set | ||
67 | # CONFIG_IOSCHED_DEADLINE is not set | ||
68 | # CONFIG_IOSCHED_CFQ is not set | ||
69 | # CONFIG_DEFAULT_AS is not set | ||
70 | # CONFIG_DEFAULT_DEADLINE is not set | ||
71 | # CONFIG_DEFAULT_CFQ is not set | ||
72 | CONFIG_DEFAULT_NOOP=y | ||
73 | CONFIG_DEFAULT_IOSCHED="noop" | ||
74 | |||
75 | # | ||
53 | # Processor type and features | 76 | # Processor type and features |
54 | # | 77 | # |
55 | # CONFIG_M68328 is not set | 78 | # CONFIG_M68328 is not set |
@@ -58,6 +81,7 @@ CONFIG_BASE_SMALL=0 | |||
58 | # CONFIG_M68360 is not set | 81 | # CONFIG_M68360 is not set |
59 | # CONFIG_M5206 is not set | 82 | # CONFIG_M5206 is not set |
60 | # CONFIG_M5206e is not set | 83 | # CONFIG_M5206e is not set |
84 | # CONFIG_M520x is not set | ||
61 | # CONFIG_M523x is not set | 85 | # CONFIG_M523x is not set |
62 | # CONFIG_M5249 is not set | 86 | # CONFIG_M5249 is not set |
63 | # CONFIG_M5271 is not set | 87 | # CONFIG_M5271 is not set |
@@ -65,29 +89,12 @@ CONFIG_M5272=y | |||
65 | # CONFIG_M5275 is not set | 89 | # CONFIG_M5275 is not set |
66 | # CONFIG_M528x is not set | 90 | # CONFIG_M528x is not set |
67 | # CONFIG_M5307 is not set | 91 | # CONFIG_M5307 is not set |
92 | # CONFIG_M532x is not set | ||
68 | # CONFIG_M5407 is not set | 93 | # CONFIG_M5407 is not set |
69 | CONFIG_COLDFIRE=y | 94 | CONFIG_COLDFIRE=y |
70 | # CONFIG_CLOCK_AUTO is not set | 95 | CONFIG_CLOCK_SET=y |
71 | # CONFIG_CLOCK_11MHz is not set | 96 | CONFIG_CLOCK_FREQ=66666666 |
72 | # CONFIG_CLOCK_16MHz is not set | 97 | CONFIG_CLOCK_DIV=1 |
73 | # CONFIG_CLOCK_20MHz is not set | ||
74 | # CONFIG_CLOCK_24MHz is not set | ||
75 | # CONFIG_CLOCK_25MHz is not set | ||
76 | # CONFIG_CLOCK_33MHz is not set | ||
77 | # CONFIG_CLOCK_40MHz is not set | ||
78 | # CONFIG_CLOCK_45MHz is not set | ||
79 | # CONFIG_CLOCK_48MHz is not set | ||
80 | # CONFIG_CLOCK_50MHz is not set | ||
81 | # CONFIG_CLOCK_54MHz is not set | ||
82 | # CONFIG_CLOCK_60MHz is not set | ||
83 | # CONFIG_CLOCK_62_5MHz is not set | ||
84 | # CONFIG_CLOCK_64MHz is not set | ||
85 | CONFIG_CLOCK_66MHz=y | ||
86 | # CONFIG_CLOCK_70MHz is not set | ||
87 | # CONFIG_CLOCK_100MHz is not set | ||
88 | # CONFIG_CLOCK_140MHz is not set | ||
89 | # CONFIG_CLOCK_150MHz is not set | ||
90 | # CONFIG_CLOCK_166MHz is not set | ||
91 | 98 | ||
92 | # | 99 | # |
93 | # Platform | 100 | # Platform |
@@ -102,11 +109,14 @@ CONFIG_M5272C3=y | |||
102 | CONFIG_FREESCALE=y | 109 | CONFIG_FREESCALE=y |
103 | # CONFIG_LARGE_ALLOCS is not set | 110 | # CONFIG_LARGE_ALLOCS is not set |
104 | CONFIG_4KSTACKS=y | 111 | CONFIG_4KSTACKS=y |
105 | CONFIG_RAMAUTO=y | 112 | |
106 | # CONFIG_RAM4MB is not set | 113 | # |
107 | # CONFIG_RAM8MB is not set | 114 | # RAM configuration |
108 | # CONFIG_RAM16MB is not set | 115 | # |
109 | # CONFIG_RAM32MB is not set | 116 | CONFIG_RAMBASE=0x0 |
117 | CONFIG_RAMSIZE=0x800000 | ||
118 | CONFIG_VECTORBASE=0x0 | ||
119 | CONFIG_KERNELBASE=0x20000 | ||
110 | CONFIG_RAMAUTOBIT=y | 120 | CONFIG_RAMAUTOBIT=y |
111 | # CONFIG_RAM8BIT is not set | 121 | # CONFIG_RAM8BIT is not set |
112 | # CONFIG_RAM16BIT is not set | 122 | # CONFIG_RAM16BIT is not set |
@@ -119,6 +129,8 @@ CONFIG_FLATMEM_MANUAL=y | |||
119 | # CONFIG_SPARSEMEM_MANUAL is not set | 129 | # CONFIG_SPARSEMEM_MANUAL is not set |
120 | CONFIG_FLATMEM=y | 130 | CONFIG_FLATMEM=y |
121 | CONFIG_FLAT_NODE_MEM_MAP=y | 131 | CONFIG_FLAT_NODE_MEM_MAP=y |
132 | # CONFIG_SPARSEMEM_STATIC is not set | ||
133 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
122 | 134 | ||
123 | # | 135 | # |
124 | # Bus options (PCI, PCMCIA, EISA, MCA, ISA) | 136 | # Bus options (PCI, PCMCIA, EISA, MCA, ISA) |
@@ -140,6 +152,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y | |||
140 | CONFIG_BINFMT_FLAT=y | 152 | CONFIG_BINFMT_FLAT=y |
141 | # CONFIG_BINFMT_ZFLAT is not set | 153 | # CONFIG_BINFMT_ZFLAT is not set |
142 | # CONFIG_BINFMT_SHARED_FLAT is not set | 154 | # CONFIG_BINFMT_SHARED_FLAT is not set |
155 | # CONFIG_BINFMT_AOUT is not set | ||
143 | # CONFIG_BINFMT_MISC is not set | 156 | # CONFIG_BINFMT_MISC is not set |
144 | 157 | ||
145 | # | 158 | # |
@@ -155,6 +168,7 @@ CONFIG_NET=y | |||
155 | # | 168 | # |
156 | # Networking options | 169 | # Networking options |
157 | # | 170 | # |
171 | # CONFIG_NETDEBUG is not set | ||
158 | CONFIG_PACKET=y | 172 | CONFIG_PACKET=y |
159 | # CONFIG_PACKET_MMAP is not set | 173 | # CONFIG_PACKET_MMAP is not set |
160 | CONFIG_UNIX=y | 174 | CONFIG_UNIX=y |
@@ -171,18 +185,30 @@ CONFIG_IP_FIB_HASH=y | |||
171 | # CONFIG_INET_AH is not set | 185 | # CONFIG_INET_AH is not set |
172 | # CONFIG_INET_ESP is not set | 186 | # CONFIG_INET_ESP is not set |
173 | # CONFIG_INET_IPCOMP is not set | 187 | # CONFIG_INET_IPCOMP is not set |
188 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
174 | # CONFIG_INET_TUNNEL is not set | 189 | # CONFIG_INET_TUNNEL is not set |
175 | # CONFIG_IP_TCPDIAG is not set | 190 | # CONFIG_INET_DIAG is not set |
176 | # CONFIG_IP_TCPDIAG_IPV6 is not set | ||
177 | # CONFIG_TCP_CONG_ADVANCED is not set | 191 | # CONFIG_TCP_CONG_ADVANCED is not set |
178 | CONFIG_TCP_CONG_BIC=y | 192 | CONFIG_TCP_CONG_BIC=y |
179 | # CONFIG_IPV6 is not set | 193 | # CONFIG_IPV6 is not set |
194 | # CONFIG_INET6_XFRM_TUNNEL is not set | ||
195 | # CONFIG_INET6_TUNNEL is not set | ||
180 | # CONFIG_NETFILTER is not set | 196 | # CONFIG_NETFILTER is not set |
181 | 197 | ||
182 | # | 198 | # |
199 | # DCCP Configuration (EXPERIMENTAL) | ||
200 | # | ||
201 | # CONFIG_IP_DCCP is not set | ||
202 | |||
203 | # | ||
183 | # SCTP Configuration (EXPERIMENTAL) | 204 | # SCTP Configuration (EXPERIMENTAL) |
184 | # | 205 | # |
185 | # CONFIG_IP_SCTP is not set | 206 | # CONFIG_IP_SCTP is not set |
207 | |||
208 | # | ||
209 | # TIPC Configuration (EXPERIMENTAL) | ||
210 | # | ||
211 | # CONFIG_TIPC is not set | ||
186 | # CONFIG_ATM is not set | 212 | # CONFIG_ATM is not set |
187 | # CONFIG_BRIDGE is not set | 213 | # CONFIG_BRIDGE is not set |
188 | # CONFIG_VLAN_8021Q is not set | 214 | # CONFIG_VLAN_8021Q is not set |
@@ -195,8 +221,11 @@ CONFIG_TCP_CONG_BIC=y | |||
195 | # CONFIG_NET_DIVERT is not set | 221 | # CONFIG_NET_DIVERT is not set |
196 | # CONFIG_ECONET is not set | 222 | # CONFIG_ECONET is not set |
197 | # CONFIG_WAN_ROUTER is not set | 223 | # CONFIG_WAN_ROUTER is not set |
224 | |||
225 | # | ||
226 | # QoS and/or fair queueing | ||
227 | # | ||
198 | # CONFIG_NET_SCHED is not set | 228 | # CONFIG_NET_SCHED is not set |
199 | # CONFIG_NET_CLS_ROUTE is not set | ||
200 | 229 | ||
201 | # | 230 | # |
202 | # Network testing | 231 | # Network testing |
@@ -205,6 +234,7 @@ CONFIG_TCP_CONG_BIC=y | |||
205 | # CONFIG_HAMRADIO is not set | 234 | # CONFIG_HAMRADIO is not set |
206 | # CONFIG_IRDA is not set | 235 | # CONFIG_IRDA is not set |
207 | # CONFIG_BT is not set | 236 | # CONFIG_BT is not set |
237 | # CONFIG_IEEE80211 is not set | ||
208 | 238 | ||
209 | # | 239 | # |
210 | # Device Drivers | 240 | # Device Drivers |
@@ -218,6 +248,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y | |||
218 | # CONFIG_FW_LOADER is not set | 248 | # CONFIG_FW_LOADER is not set |
219 | 249 | ||
220 | # | 250 | # |
251 | # Connector - unified userspace <-> kernelspace linker | ||
252 | # | ||
253 | # CONFIG_CONNECTOR is not set | ||
254 | |||
255 | # | ||
221 | # Memory Technology Devices (MTD) | 256 | # Memory Technology Devices (MTD) |
222 | # | 257 | # |
223 | CONFIG_MTD=y | 258 | CONFIG_MTD=y |
@@ -235,6 +270,7 @@ CONFIG_MTD_BLOCK=y | |||
235 | # CONFIG_FTL is not set | 270 | # CONFIG_FTL is not set |
236 | # CONFIG_NFTL is not set | 271 | # CONFIG_NFTL is not set |
237 | # CONFIG_INFTL is not set | 272 | # CONFIG_INFTL is not set |
273 | # CONFIG_RFD_FTL is not set | ||
238 | 274 | ||
239 | # | 275 | # |
240 | # RAM/ROM/Flash chip drivers | 276 | # RAM/ROM/Flash chip drivers |
@@ -254,13 +290,13 @@ CONFIG_MTD_CFI_I2=y | |||
254 | CONFIG_MTD_RAM=y | 290 | CONFIG_MTD_RAM=y |
255 | # CONFIG_MTD_ROM is not set | 291 | # CONFIG_MTD_ROM is not set |
256 | # CONFIG_MTD_ABSENT is not set | 292 | # CONFIG_MTD_ABSENT is not set |
293 | # CONFIG_MTD_OBSOLETE_CHIPS is not set | ||
257 | 294 | ||
258 | # | 295 | # |
259 | # Mapping drivers for chip access | 296 | # Mapping drivers for chip access |
260 | # | 297 | # |
261 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | 298 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set |
262 | CONFIG_MTD_UCLINUX=y | 299 | CONFIG_MTD_UCLINUX=y |
263 | # CONFIG_MTD_SNAPGEARuC is not set | ||
264 | # CONFIG_MTD_PLATRAM is not set | 300 | # CONFIG_MTD_PLATRAM is not set |
265 | 301 | ||
266 | # | 302 | # |
@@ -269,7 +305,6 @@ CONFIG_MTD_UCLINUX=y | |||
269 | # CONFIG_MTD_SLRAM is not set | 305 | # CONFIG_MTD_SLRAM is not set |
270 | # CONFIG_MTD_PHRAM is not set | 306 | # CONFIG_MTD_PHRAM is not set |
271 | # CONFIG_MTD_MTDRAM is not set | 307 | # CONFIG_MTD_MTDRAM is not set |
272 | # CONFIG_MTD_BLKMTD is not set | ||
273 | # CONFIG_MTD_BLOCK2MTD is not set | 308 | # CONFIG_MTD_BLOCK2MTD is not set |
274 | 309 | ||
275 | # | 310 | # |
@@ -285,6 +320,11 @@ CONFIG_MTD_UCLINUX=y | |||
285 | # CONFIG_MTD_NAND is not set | 320 | # CONFIG_MTD_NAND is not set |
286 | 321 | ||
287 | # | 322 | # |
323 | # OneNAND Flash Device Drivers | ||
324 | # | ||
325 | # CONFIG_MTD_ONENAND is not set | ||
326 | |||
327 | # | ||
288 | # Parallel port support | 328 | # Parallel port support |
289 | # | 329 | # |
290 | # CONFIG_PARPORT is not set | 330 | # CONFIG_PARPORT is not set |
@@ -296,7 +336,6 @@ CONFIG_MTD_UCLINUX=y | |||
296 | # | 336 | # |
297 | # Block devices | 337 | # Block devices |
298 | # | 338 | # |
299 | # CONFIG_BLK_DEV_FD is not set | ||
300 | # CONFIG_BLK_DEV_COW_COMMON is not set | 339 | # CONFIG_BLK_DEV_COW_COMMON is not set |
301 | # CONFIG_BLK_DEV_LOOP is not set | 340 | # CONFIG_BLK_DEV_LOOP is not set |
302 | # CONFIG_BLK_DEV_NBD is not set | 341 | # CONFIG_BLK_DEV_NBD is not set |
@@ -304,16 +343,7 @@ CONFIG_BLK_DEV_RAM=y | |||
304 | CONFIG_BLK_DEV_RAM_COUNT=16 | 343 | CONFIG_BLK_DEV_RAM_COUNT=16 |
305 | CONFIG_BLK_DEV_RAM_SIZE=4096 | 344 | CONFIG_BLK_DEV_RAM_SIZE=4096 |
306 | # CONFIG_BLK_DEV_INITRD is not set | 345 | # CONFIG_BLK_DEV_INITRD is not set |
307 | CONFIG_INITRAMFS_SOURCE="" | ||
308 | # CONFIG_CDROM_PKTCDVD is not set | 346 | # CONFIG_CDROM_PKTCDVD is not set |
309 | |||
310 | # | ||
311 | # IO Schedulers | ||
312 | # | ||
313 | CONFIG_IOSCHED_NOOP=y | ||
314 | # CONFIG_IOSCHED_AS is not set | ||
315 | # CONFIG_IOSCHED_DEADLINE is not set | ||
316 | # CONFIG_IOSCHED_CFQ is not set | ||
317 | # CONFIG_ATA_OVER_ETH is not set | 347 | # CONFIG_ATA_OVER_ETH is not set |
318 | 348 | ||
319 | # | 349 | # |
@@ -324,6 +354,7 @@ CONFIG_IOSCHED_NOOP=y | |||
324 | # | 354 | # |
325 | # SCSI device support | 355 | # SCSI device support |
326 | # | 356 | # |
357 | # CONFIG_RAID_ATTRS is not set | ||
327 | # CONFIG_SCSI is not set | 358 | # CONFIG_SCSI is not set |
328 | 359 | ||
329 | # | 360 | # |
@@ -354,13 +385,15 @@ CONFIG_NETDEVICES=y | |||
354 | # CONFIG_TUN is not set | 385 | # CONFIG_TUN is not set |
355 | 386 | ||
356 | # | 387 | # |
388 | # PHY device support | ||
389 | # | ||
390 | # CONFIG_PHYLIB is not set | ||
391 | |||
392 | # | ||
357 | # Ethernet (10 or 100Mbit) | 393 | # Ethernet (10 or 100Mbit) |
358 | # | 394 | # |
359 | CONFIG_NET_ETHERNET=y | 395 | CONFIG_NET_ETHERNET=y |
360 | # CONFIG_MII is not set | 396 | # CONFIG_MII is not set |
361 | # CONFIG_NET_VENDOR_SMC is not set | ||
362 | # CONFIG_NE2000 is not set | ||
363 | # CONFIG_NET_PCI is not set | ||
364 | CONFIG_FEC=y | 397 | CONFIG_FEC=y |
365 | # CONFIG_FEC2 is not set | 398 | # CONFIG_FEC2 is not set |
366 | 399 | ||
@@ -392,6 +425,7 @@ CONFIG_PPP=y | |||
392 | # CONFIG_PPP_SYNC_TTY is not set | 425 | # CONFIG_PPP_SYNC_TTY is not set |
393 | # CONFIG_PPP_DEFLATE is not set | 426 | # CONFIG_PPP_DEFLATE is not set |
394 | # CONFIG_PPP_BSDCOMP is not set | 427 | # CONFIG_PPP_BSDCOMP is not set |
428 | # CONFIG_PPP_MPPE is not set | ||
395 | # CONFIG_PPPOE is not set | 429 | # CONFIG_PPPOE is not set |
396 | # CONFIG_SLIP is not set | 430 | # CONFIG_SLIP is not set |
397 | # CONFIG_SHAPER is not set | 431 | # CONFIG_SHAPER is not set |
@@ -425,8 +459,6 @@ CONFIG_PPP=y | |||
425 | # | 459 | # |
426 | # CONFIG_VT is not set | 460 | # CONFIG_VT is not set |
427 | # CONFIG_SERIAL_NONSTANDARD is not set | 461 | # CONFIG_SERIAL_NONSTANDARD is not set |
428 | # CONFIG_LEDMAN is not set | ||
429 | # CONFIG_RESETSWITCH is not set | ||
430 | 462 | ||
431 | # | 463 | # |
432 | # Serial drivers | 464 | # Serial drivers |
@@ -450,8 +482,6 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
450 | # Watchdog Cards | 482 | # Watchdog Cards |
451 | # | 483 | # |
452 | # CONFIG_WATCHDOG is not set | 484 | # CONFIG_WATCHDOG is not set |
453 | # CONFIG_MCFWATCHDOG is not set | ||
454 | # CONFIG_RTC is not set | ||
455 | # CONFIG_GEN_RTC is not set | 485 | # CONFIG_GEN_RTC is not set |
456 | # CONFIG_DTLK is not set | 486 | # CONFIG_DTLK is not set |
457 | # CONFIG_R3964 is not set | 487 | # CONFIG_R3964 is not set |
@@ -464,14 +494,19 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
464 | # | 494 | # |
465 | # TPM devices | 495 | # TPM devices |
466 | # | 496 | # |
467 | # CONFIG_MCF_QSPI is not set | 497 | # CONFIG_TCG_TPM is not set |
468 | # CONFIG_M41T11M6 is not set | 498 | # CONFIG_TELCLOCK is not set |
469 | 499 | ||
470 | # | 500 | # |
471 | # I2C support | 501 | # I2C support |
472 | # | 502 | # |
473 | # CONFIG_I2C is not set | 503 | # CONFIG_I2C is not set |
474 | # CONFIG_I2C_SENSOR is not set | 504 | |
505 | # | ||
506 | # SPI support | ||
507 | # | ||
508 | # CONFIG_SPI is not set | ||
509 | # CONFIG_SPI_MASTER is not set | ||
475 | 510 | ||
476 | # | 511 | # |
477 | # Dallas's 1-wire bus | 512 | # Dallas's 1-wire bus |
@@ -482,6 +517,7 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
482 | # Hardware Monitoring support | 517 | # Hardware Monitoring support |
483 | # | 518 | # |
484 | # CONFIG_HWMON is not set | 519 | # CONFIG_HWMON is not set |
520 | # CONFIG_HWMON_VID is not set | ||
485 | 521 | ||
486 | # | 522 | # |
487 | # Misc devices | 523 | # Misc devices |
@@ -491,6 +527,7 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
491 | # Multimedia devices | 527 | # Multimedia devices |
492 | # | 528 | # |
493 | # CONFIG_VIDEO_DEV is not set | 529 | # CONFIG_VIDEO_DEV is not set |
530 | CONFIG_VIDEO_V4L2=y | ||
494 | 531 | ||
495 | # | 532 | # |
496 | # Digital Video Broadcasting Devices | 533 | # Digital Video Broadcasting Devices |
@@ -503,11 +540,6 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
503 | # CONFIG_FB is not set | 540 | # CONFIG_FB is not set |
504 | 541 | ||
505 | # | 542 | # |
506 | # SPI support | ||
507 | # | ||
508 | # CONFIG_SPI is not set | ||
509 | |||
510 | # | ||
511 | # Sound | 543 | # Sound |
512 | # | 544 | # |
513 | # CONFIG_SOUND is not set | 545 | # CONFIG_SOUND is not set |
@@ -517,6 +549,11 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
517 | # | 549 | # |
518 | # CONFIG_USB_ARCH_HAS_HCD is not set | 550 | # CONFIG_USB_ARCH_HAS_HCD is not set |
519 | # CONFIG_USB_ARCH_HAS_OHCI is not set | 551 | # CONFIG_USB_ARCH_HAS_OHCI is not set |
552 | # CONFIG_USB_ARCH_HAS_EHCI is not set | ||
553 | |||
554 | # | ||
555 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | ||
556 | # | ||
520 | 557 | ||
521 | # | 558 | # |
522 | # USB Gadget Support | 559 | # USB Gadget Support |
@@ -529,29 +566,43 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
529 | # CONFIG_MMC is not set | 566 | # CONFIG_MMC is not set |
530 | 567 | ||
531 | # | 568 | # |
569 | # LED devices | ||
570 | # | ||
571 | # CONFIG_NEW_LEDS is not set | ||
572 | |||
573 | # | ||
574 | # LED drivers | ||
575 | # | ||
576 | |||
577 | # | ||
578 | # LED Triggers | ||
579 | # | ||
580 | |||
581 | # | ||
532 | # InfiniBand support | 582 | # InfiniBand support |
533 | # | 583 | # |
534 | 584 | ||
535 | # | 585 | # |
536 | # SN Devices | 586 | # EDAC - error detection and reporting (RAS) (EXPERIMENTAL) |
537 | # | 587 | # |
538 | 588 | ||
539 | # | 589 | # |
590 | # Real Time Clock | ||
591 | # | ||
592 | # CONFIG_RTC_CLASS is not set | ||
593 | |||
594 | # | ||
540 | # File systems | 595 | # File systems |
541 | # | 596 | # |
542 | CONFIG_EXT2_FS=y | 597 | CONFIG_EXT2_FS=y |
543 | # CONFIG_EXT2_FS_XATTR is not set | 598 | # CONFIG_EXT2_FS_XATTR is not set |
544 | # CONFIG_EXT2_FS_XIP is not set | 599 | # CONFIG_EXT2_FS_XIP is not set |
545 | # CONFIG_EXT3_FS is not set | 600 | # CONFIG_EXT3_FS is not set |
546 | # CONFIG_JBD is not set | ||
547 | # CONFIG_REISERFS_FS is not set | 601 | # CONFIG_REISERFS_FS is not set |
548 | # CONFIG_JFS_FS is not set | 602 | # CONFIG_JFS_FS is not set |
549 | # CONFIG_FS_POSIX_ACL is not set | 603 | # CONFIG_FS_POSIX_ACL is not set |
550 | |||
551 | # | ||
552 | # XFS support | ||
553 | # | ||
554 | # CONFIG_XFS_FS is not set | 604 | # CONFIG_XFS_FS is not set |
605 | # CONFIG_OCFS2_FS is not set | ||
555 | # CONFIG_MINIX_FS is not set | 606 | # CONFIG_MINIX_FS is not set |
556 | CONFIG_ROMFS_FS=y | 607 | CONFIG_ROMFS_FS=y |
557 | # CONFIG_INOTIFY is not set | 608 | # CONFIG_INOTIFY is not set |
@@ -559,6 +610,7 @@ CONFIG_ROMFS_FS=y | |||
559 | # CONFIG_DNOTIFY is not set | 610 | # CONFIG_DNOTIFY is not set |
560 | # CONFIG_AUTOFS_FS is not set | 611 | # CONFIG_AUTOFS_FS is not set |
561 | # CONFIG_AUTOFS4_FS is not set | 612 | # CONFIG_AUTOFS4_FS is not set |
613 | # CONFIG_FUSE_FS is not set | ||
562 | 614 | ||
563 | # | 615 | # |
564 | # CD-ROM/DVD Filesystems | 616 | # CD-ROM/DVD Filesystems |
@@ -581,6 +633,7 @@ CONFIG_SYSFS=y | |||
581 | # CONFIG_TMPFS is not set | 633 | # CONFIG_TMPFS is not set |
582 | # CONFIG_HUGETLB_PAGE is not set | 634 | # CONFIG_HUGETLB_PAGE is not set |
583 | CONFIG_RAMFS=y | 635 | CONFIG_RAMFS=y |
636 | # CONFIG_CONFIGFS_FS is not set | ||
584 | 637 | ||
585 | # | 638 | # |
586 | # Miscellaneous filesystems | 639 | # Miscellaneous filesystems |
@@ -611,6 +664,7 @@ CONFIG_RAMFS=y | |||
611 | # CONFIG_NCP_FS is not set | 664 | # CONFIG_NCP_FS is not set |
612 | # CONFIG_CODA_FS is not set | 665 | # CONFIG_CODA_FS is not set |
613 | # CONFIG_AFS_FS is not set | 666 | # CONFIG_AFS_FS is not set |
667 | # CONFIG_9P_FS is not set | ||
614 | 668 | ||
615 | # | 669 | # |
616 | # Partition Types | 670 | # Partition Types |
@@ -627,8 +681,12 @@ CONFIG_MSDOS_PARTITION=y | |||
627 | # Kernel hacking | 681 | # Kernel hacking |
628 | # | 682 | # |
629 | # CONFIG_PRINTK_TIME is not set | 683 | # CONFIG_PRINTK_TIME is not set |
684 | # CONFIG_MAGIC_SYSRQ is not set | ||
630 | # CONFIG_DEBUG_KERNEL is not set | 685 | # CONFIG_DEBUG_KERNEL is not set |
631 | CONFIG_LOG_BUF_SHIFT=14 | 686 | CONFIG_LOG_BUF_SHIFT=14 |
687 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
688 | # CONFIG_DEBUG_FS is not set | ||
689 | # CONFIG_UNWIND_INFO is not set | ||
632 | # CONFIG_FULLDEBUG is not set | 690 | # CONFIG_FULLDEBUG is not set |
633 | # CONFIG_HIGHPROFILE is not set | 691 | # CONFIG_HIGHPROFILE is not set |
634 | # CONFIG_BOOTPARAM is not set | 692 | # CONFIG_BOOTPARAM is not set |
@@ -655,5 +713,6 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
655 | # Library routines | 713 | # Library routines |
656 | # | 714 | # |
657 | # CONFIG_CRC_CCITT is not set | 715 | # CONFIG_CRC_CCITT is not set |
716 | # CONFIG_CRC16 is not set | ||
658 | # CONFIG_CRC32 is not set | 717 | # CONFIG_CRC32 is not set |
659 | # CONFIG_LIBCRC32C is not set | 718 | # CONFIG_LIBCRC32C is not set |
diff --git a/arch/m68knommu/platform/68328/head-rom.S b/arch/m68knommu/platform/68328/head-rom.S index 2b448a297011..234430b9551c 100644 --- a/arch/m68knommu/platform/68328/head-rom.S +++ b/arch/m68knommu/platform/68328/head-rom.S | |||
@@ -28,6 +28,8 @@ _ramstart: | |||
28 | _ramend: | 28 | _ramend: |
29 | .long 0 | 29 | .long 0 |
30 | 30 | ||
31 | #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) | ||
32 | |||
31 | #ifdef CONFIG_INIT_LCD | 33 | #ifdef CONFIG_INIT_LCD |
32 | splash_bits: | 34 | splash_bits: |
33 | #include "bootlogo.rh" | 35 | #include "bootlogo.rh" |
@@ -48,7 +50,7 @@ _stext: movew #0x2700,%sr | |||
48 | moveb #0x81, 0xfffffA27 /* LCKCON */ | 50 | moveb #0x81, 0xfffffA27 /* LCKCON */ |
49 | movew #0xff00, 0xfffff412 /* LCD pins */ | 51 | movew #0xff00, 0xfffff412 /* LCD pins */ |
50 | #endif | 52 | #endif |
51 | moveal #__ramend-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp | 53 | moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp |
52 | movew #32767, %d0 /* PLL settle wait loop */ | 54 | movew #32767, %d0 /* PLL settle wait loop */ |
53 | 1: subq #1, %d0 | 55 | 1: subq #1, %d0 |
54 | bne 1b | 56 | bne 1b |
@@ -73,13 +75,13 @@ _stext: movew #0x2700,%sr | |||
73 | bhi 1b | 75 | bhi 1b |
74 | 76 | ||
75 | movel #_sdata, %d0 | 77 | movel #_sdata, %d0 |
76 | movel %d0, _rambase | 78 | movel %d0, _rambase |
77 | movel #_ebss, %d0 | 79 | movel #_ebss, %d0 |
78 | movel %d0, _ramstart | 80 | movel %d0, _ramstart |
79 | movel #__ramend-CONFIG_MEMORY_RESERVE*0x100000, %d0 | 81 | movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0 |
80 | movel %d0, _ramend | 82 | movel %d0, _ramend |
81 | movel #__ramvec, %d0 | 83 | movel #CONFIG_VECTORBASE, %d0 |
82 | movel %d0, _ramvec | 84 | movel %d0, _ramvec |
83 | 85 | ||
84 | /* | 86 | /* |
85 | * load the current task pointer and stack | 87 | * load the current task pointer and stack |
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S index a5c639a51eef..f497713a4ec7 100644 --- a/arch/m68knommu/platform/68360/head-ram.S +++ b/arch/m68knommu/platform/68360/head-ram.S | |||
@@ -18,7 +18,6 @@ | |||
18 | .global _start | 18 | .global _start |
19 | 19 | ||
20 | .global _rambase | 20 | .global _rambase |
21 | .global __ramvec | ||
22 | .global _ramvec | 21 | .global _ramvec |
23 | .global _ramstart | 22 | .global _ramstart |
24 | .global _ramend | 23 | .global _ramend |
@@ -26,6 +25,8 @@ | |||
26 | .global _quicc_base | 25 | .global _quicc_base |
27 | .global _periph_base | 26 | .global _periph_base |
28 | 27 | ||
28 | #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) | ||
29 | |||
29 | #define REGB 0x1000 | 30 | #define REGB 0x1000 |
30 | #define PEPAR (_dprbase + REGB + 0x0016) | 31 | #define PEPAR (_dprbase + REGB + 0x0016) |
31 | #define GMR (_dprbase + REGB + 0x0040) | 32 | #define GMR (_dprbase + REGB + 0x0040) |
@@ -103,7 +104,7 @@ _stext: | |||
103 | nop | 104 | nop |
104 | ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ | 105 | ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ |
105 | /* We should not need to setup the boot stack the reset should do it. */ | 106 | /* We should not need to setup the boot stack the reset should do it. */ |
106 | movea.l #__ramend, %sp /*set up stack at the end of DRAM:*/ | 107 | movea.l #RAMEND, %sp /*set up stack at the end of DRAM:*/ |
107 | 108 | ||
108 | set_mbar_register: | 109 | set_mbar_register: |
109 | moveq.l #0x07, %d1 /* Setup MBAR */ | 110 | moveq.l #0x07, %d1 /* Setup MBAR */ |
@@ -163,7 +164,7 @@ configure_memory_controller: | |||
163 | move.l %d0, GMR | 164 | move.l %d0, GMR |
164 | 165 | ||
165 | configure_chip_select_0: | 166 | configure_chip_select_0: |
166 | move.l #__ramend, %d0 | 167 | move.l #RAMEND, %d0 |
167 | subi.l #__ramstart, %d0 | 168 | subi.l #__ramstart, %d0 |
168 | subq.l #0x01, %d0 | 169 | subq.l #0x01, %d0 |
169 | eori.l #SIM_OR_MASK, %d0 | 170 | eori.l #SIM_OR_MASK, %d0 |
@@ -234,16 +235,10 @@ store_ram_size: | |||
234 | /* Set ram size information */ | 235 | /* Set ram size information */ |
235 | move.l #_sdata, _rambase | 236 | move.l #_sdata, _rambase |
236 | move.l #_ebss, _ramstart | 237 | move.l #_ebss, _ramstart |
237 | move.l #__ramend, %d0 | 238 | move.l #RAMEND, %d0 |
238 | sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ | 239 | sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ |
239 | move.l %d0, _ramend /* Different from __ramend.*/ | 240 | move.l %d0, _ramend /* Different from RAMEND.*/ |
240 | 241 | ||
241 | store_flash_size: | ||
242 | /* Set rom size information */ | ||
243 | move.l #__rom_end, %d0 | ||
244 | sub.l #__rom_start, %d0 | ||
245 | move.l %d0, rom_length | ||
246 | |||
247 | pea 0 | 242 | pea 0 |
248 | pea env | 243 | pea env |
249 | pea %sp@(4) | 244 | pea %sp@(4) |
@@ -286,7 +281,7 @@ _dprbase: | |||
286 | */ | 281 | */ |
287 | 282 | ||
288 | .section ".data.initvect","awx" | 283 | .section ".data.initvect","awx" |
289 | .long __ramend /* Reset: Initial Stack Pointer - 0. */ | 284 | .long RAMEND /* Reset: Initial Stack Pointer - 0. */ |
290 | .long _start /* Reset: Initial Program Counter - 1. */ | 285 | .long _start /* Reset: Initial Program Counter - 1. */ |
291 | .long buserr /* Bus Error - 2. */ | 286 | .long buserr /* Bus Error - 2. */ |
292 | .long trap /* Address Error - 3. */ | 287 | .long trap /* Address Error - 3. */ |
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S index 0da357a4cfee..2d28c3e19a88 100644 --- a/arch/m68knommu/platform/68360/head-rom.S +++ b/arch/m68knommu/platform/68360/head-rom.S | |||
@@ -18,7 +18,6 @@ | |||
18 | .global _start | 18 | .global _start |
19 | 19 | ||
20 | .global _rambase | 20 | .global _rambase |
21 | .global __ramvec | ||
22 | .global _ramvec | 21 | .global _ramvec |
23 | .global _ramstart | 22 | .global _ramstart |
24 | .global _ramend | 23 | .global _ramend |
@@ -26,6 +25,8 @@ | |||
26 | .global _quicc_base | 25 | .global _quicc_base |
27 | .global _periph_base | 26 | .global _periph_base |
28 | 27 | ||
28 | #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) | ||
29 | |||
29 | #define REGB 0x1000 | 30 | #define REGB 0x1000 |
30 | #define PEPAR (_dprbase + REGB + 0x0016) | 31 | #define PEPAR (_dprbase + REGB + 0x0016) |
31 | #define GMR (_dprbase + REGB + 0x0040) | 32 | #define GMR (_dprbase + REGB + 0x0040) |
@@ -115,7 +116,7 @@ _stext: | |||
115 | nop | 116 | nop |
116 | ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ | 117 | ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ |
117 | /* We should not need to setup the boot stack the reset should do it. */ | 118 | /* We should not need to setup the boot stack the reset should do it. */ |
118 | movea.l #__ramend, %sp /* set up stack at the end of DRAM:*/ | 119 | movea.l #RAMEND, %sp /* set up stack at the end of DRAM:*/ |
119 | 120 | ||
120 | 121 | ||
121 | set_mbar_register: | 122 | set_mbar_register: |
@@ -245,16 +246,10 @@ store_ram_size: | |||
245 | /* Set ram size information */ | 246 | /* Set ram size information */ |
246 | move.l #_sdata, _rambase | 247 | move.l #_sdata, _rambase |
247 | move.l #_ebss, _ramstart | 248 | move.l #_ebss, _ramstart |
248 | move.l #__ramend, %d0 | 249 | move.l #RAMEND, %d0 |
249 | sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ | 250 | sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ |
250 | move.l %d0, _ramend /* Different from __ramend.*/ | 251 | move.l %d0, _ramend /* Different from RAMEND.*/ |
251 | 252 | ||
252 | store_flash_size: | ||
253 | /* Set rom size information */ | ||
254 | move.l #__rom_end, %d0 | ||
255 | sub.l #__rom_start, %d0 | ||
256 | move.l %d0, rom_length | ||
257 | |||
258 | pea 0 | 253 | pea 0 |
259 | pea env | 254 | pea env |
260 | pea %sp@(4) | 255 | pea %sp@(4) |
@@ -298,7 +293,7 @@ _dprbase: | |||
298 | */ | 293 | */ |
299 | 294 | ||
300 | .section ".data.initvect","awx" | 295 | .section ".data.initvect","awx" |
301 | .long __ramend /* Reset: Initial Stack Pointer - 0. */ | 296 | .long RAMEND /* Reset: Initial Stack Pointer - 0. */ |
302 | .long _start /* Reset: Initial Program Counter - 1. */ | 297 | .long _start /* Reset: Initial Program Counter - 1. */ |
303 | .long buserr /* Bus Error - 2. */ | 298 | .long buserr /* Bus Error - 2. */ |
304 | .long trap /* Address Error - 3. */ | 299 | .long trap /* Address Error - 3. */ |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 298f82fe8440..9096a5ea4229 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -446,7 +446,7 @@ static int __init topology_init(void) | |||
446 | int ret; | 446 | int ret; |
447 | 447 | ||
448 | for_each_present_cpu(cpu) { | 448 | for_each_present_cpu(cpu) { |
449 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | 449 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); |
450 | if (ret) | 450 | if (ret) |
451 | printk(KERN_WARNING "topology_init: register_cpu %d " | 451 | printk(KERN_WARNING "topology_init: register_cpu %d " |
452 | "failed (%d)\n", cpu, ret); | 452 | "failed (%d)\n", cpu, ret); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 2e8e52c135e6..70cf09afdf56 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void) | |||
367 | dvpe(); | 367 | dvpe(); |
368 | dmt(); | 368 | dmt(); |
369 | 369 | ||
370 | freeIPIq.lock = SPIN_LOCK_UNLOCKED; | 370 | spin_lock_init(&freeIPIq.lock); |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * We probably don't have as many VPEs as we do SMP "CPUs", | 373 | * We probably don't have as many VPEs as we do SMP "CPUs", |
@@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void) | |||
375 | */ | 375 | */ |
376 | for (i=0; i<NR_CPUS; i++) { | 376 | for (i=0; i<NR_CPUS; i++) { |
377 | IPIQ[i].head = IPIQ[i].tail = NULL; | 377 | IPIQ[i].head = IPIQ[i].tail = NULL; |
378 | IPIQ[i].lock = SPIN_LOCK_UNLOCKED; | 378 | spin_lock_init(&IPIQ[i].lock); |
379 | IPIQ[i].depth = 0; | 379 | IPIQ[i].depth = 0; |
380 | ipi_timer_latch[i] = 0; | 380 | ipi_timer_latch[i] = 0; |
381 | } | 381 | } |
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index 3ba040050e4c..068b20d822e7 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c | |||
@@ -26,11 +26,10 @@ static struct cpu cpu_devices[NR_CPUS] __read_mostly; | |||
26 | 26 | ||
27 | static int __init topology_init(void) | 27 | static int __init topology_init(void) |
28 | { | 28 | { |
29 | struct node *parent = NULL; | ||
30 | int num; | 29 | int num; |
31 | 30 | ||
32 | for_each_present_cpu(num) { | 31 | for_each_present_cpu(num) { |
33 | register_cpu(&cpu_devices[num], num, parent); | 32 | register_cpu(&cpu_devices[num], num); |
34 | } | 33 | } |
35 | return 0; | 34 | return 0; |
36 | } | 35 | } |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index e5a44812441a..0932a62a1c96 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -215,7 +215,7 @@ int __init ppc_init(void) | |||
215 | 215 | ||
216 | /* register CPU devices */ | 216 | /* register CPU devices */ |
217 | for_each_possible_cpu(i) | 217 | for_each_possible_cpu(i) |
218 | register_cpu(&cpu_devices[i], i, NULL); | 218 | register_cpu(&cpu_devices[i], i); |
219 | 219 | ||
220 | /* call platform init */ | 220 | /* call platform init */ |
221 | if (ppc_md.init != NULL) { | 221 | if (ppc_md.init != NULL) { |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 5bc2585c8036..4662b580efa1 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu) | |||
279 | } | 279 | } |
280 | #endif /* CONFIG_HOTPLUG_CPU */ | 280 | #endif /* CONFIG_HOTPLUG_CPU */ |
281 | 281 | ||
282 | static int sysfs_cpu_notify(struct notifier_block *self, | 282 | static int __devinit sysfs_cpu_notify(struct notifier_block *self, |
283 | unsigned long action, void *hcpu) | 283 | unsigned long action, void *hcpu) |
284 | { | 284 | { |
285 | unsigned int cpu = (unsigned int)(long)hcpu; | 285 | unsigned int cpu = (unsigned int)(long)hcpu; |
@@ -297,30 +297,19 @@ static int sysfs_cpu_notify(struct notifier_block *self, | |||
297 | return NOTIFY_OK; | 297 | return NOTIFY_OK; |
298 | } | 298 | } |
299 | 299 | ||
300 | static struct notifier_block sysfs_cpu_nb = { | 300 | static struct notifier_block __devinitdata sysfs_cpu_nb = { |
301 | .notifier_call = sysfs_cpu_notify, | 301 | .notifier_call = sysfs_cpu_notify, |
302 | }; | 302 | }; |
303 | 303 | ||
304 | /* NUMA stuff */ | 304 | /* NUMA stuff */ |
305 | 305 | ||
306 | #ifdef CONFIG_NUMA | 306 | #ifdef CONFIG_NUMA |
307 | static struct node node_devices[MAX_NUMNODES]; | ||
308 | |||
309 | static void register_nodes(void) | 307 | static void register_nodes(void) |
310 | { | 308 | { |
311 | int i; | 309 | int i; |
312 | 310 | ||
313 | for (i = 0; i < MAX_NUMNODES; i++) { | 311 | for (i = 0; i < MAX_NUMNODES; i++) |
314 | if (node_online(i)) { | 312 | register_one_node(i); |
315 | int p_node = parent_node(i); | ||
316 | struct node *parent = NULL; | ||
317 | |||
318 | if (p_node != i) | ||
319 | parent = &node_devices[p_node]; | ||
320 | |||
321 | register_node(&node_devices[i], i, parent); | ||
322 | } | ||
323 | } | ||
324 | } | 313 | } |
325 | 314 | ||
326 | int sysfs_add_device_to_node(struct sys_device *dev, int nid) | 315 | int sysfs_add_device_to_node(struct sys_device *dev, int nid) |
@@ -359,23 +348,13 @@ static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL); | |||
359 | static int __init topology_init(void) | 348 | static int __init topology_init(void) |
360 | { | 349 | { |
361 | int cpu; | 350 | int cpu; |
362 | struct node *parent = NULL; | ||
363 | 351 | ||
364 | register_nodes(); | 352 | register_nodes(); |
365 | |||
366 | register_cpu_notifier(&sysfs_cpu_nb); | 353 | register_cpu_notifier(&sysfs_cpu_nb); |
367 | 354 | ||
368 | for_each_possible_cpu(cpu) { | 355 | for_each_possible_cpu(cpu) { |
369 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 356 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
370 | 357 | ||
371 | #ifdef CONFIG_NUMA | ||
372 | /* The node to which a cpu belongs can't be known | ||
373 | * until the cpu is made present. | ||
374 | */ | ||
375 | parent = NULL; | ||
376 | if (cpu_present(cpu)) | ||
377 | parent = &node_devices[cpu_to_node(cpu)]; | ||
378 | #endif | ||
379 | /* | 358 | /* |
380 | * For now, we just see if the system supports making | 359 | * For now, we just see if the system supports making |
381 | * the RTAS calls for CPU hotplug. But, there may be a | 360 | * the RTAS calls for CPU hotplug. But, there may be a |
@@ -387,7 +366,7 @@ static int __init topology_init(void) | |||
387 | c->no_control = 1; | 366 | c->no_control = 1; |
388 | 367 | ||
389 | if (cpu_online(cpu) || (c->no_control == 0)) { | 368 | if (cpu_online(cpu) || (c->no_control == 0)) { |
390 | register_cpu(c, cpu, parent); | 369 | register_cpu(c, cpu); |
391 | 370 | ||
392 | sysdev_create_file(&c->sysdev, &attr_physical_id); | 371 | sysdev_create_file(&c->sysdev, &attr_physical_id); |
393 | } | 372 | } |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9e30f968c184..d454caada265 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
42 | #include <linux/nodemask.h> | 42 | #include <linux/nodemask.h> |
43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
44 | #include <linux/poison.h> | ||
44 | 45 | ||
45 | #include <asm/pgalloc.h> | 46 | #include <asm/pgalloc.h> |
46 | #include <asm/page.h> | 47 | #include <asm/page.h> |
@@ -90,7 +91,7 @@ void free_initmem(void) | |||
90 | 91 | ||
91 | addr = (unsigned long)__init_begin; | 92 | addr = (unsigned long)__init_begin; |
92 | for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { | 93 | for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { |
93 | memset((void *)addr, 0xcc, PAGE_SIZE); | 94 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
94 | ClearPageReserved(virt_to_page(addr)); | 95 | ClearPageReserved(virt_to_page(addr)); |
95 | init_page_count(virt_to_page(addr)); | 96 | init_page_count(virt_to_page(addr)); |
96 | free_page(addr); | 97 | free_page(addr); |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 69f3b9a20beb..089d939a0b3e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -114,15 +114,20 @@ void online_page(struct page *page) | |||
114 | num_physpages++; | 114 | num_physpages++; |
115 | } | 115 | } |
116 | 116 | ||
117 | int __devinit add_memory(u64 start, u64 size) | 117 | #ifdef CONFIG_NUMA |
118 | int memory_add_physaddr_to_nid(u64 start) | ||
119 | { | ||
120 | return hot_add_scn_to_nid(start); | ||
121 | } | ||
122 | #endif | ||
123 | |||
124 | int __devinit arch_add_memory(int nid, u64 start, u64 size) | ||
118 | { | 125 | { |
119 | struct pglist_data *pgdata; | 126 | struct pglist_data *pgdata; |
120 | struct zone *zone; | 127 | struct zone *zone; |
121 | int nid; | ||
122 | unsigned long start_pfn = start >> PAGE_SHIFT; | 128 | unsigned long start_pfn = start >> PAGE_SHIFT; |
123 | unsigned long nr_pages = size >> PAGE_SHIFT; | 129 | unsigned long nr_pages = size >> PAGE_SHIFT; |
124 | 130 | ||
125 | nid = hot_add_scn_to_nid(start); | ||
126 | pgdata = NODE_DATA(nid); | 131 | pgdata = NODE_DATA(nid); |
127 | 132 | ||
128 | start = (unsigned long)__va(start); | 133 | start = (unsigned long)__va(start); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aa98cb3b59d8..fbe23933f731 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -334,7 +334,7 @@ out: | |||
334 | return nid; | 334 | return nid; |
335 | } | 335 | } |
336 | 336 | ||
337 | static int cpu_numa_callback(struct notifier_block *nfb, | 337 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, |
338 | unsigned long action, | 338 | unsigned long action, |
339 | void *hcpu) | 339 | void *hcpu) |
340 | { | 340 | { |
@@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size, | |||
609 | return (void *)ret; | 609 | return (void *)ret; |
610 | } | 610 | } |
611 | 611 | ||
612 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { | ||
613 | .notifier_call = cpu_numa_callback, | ||
614 | .priority = 1 /* Must run before sched domains notifier. */ | ||
615 | }; | ||
616 | |||
612 | void __init do_init_bootmem(void) | 617 | void __init do_init_bootmem(void) |
613 | { | 618 | { |
614 | int nid; | 619 | int nid; |
615 | unsigned int i; | 620 | unsigned int i; |
616 | static struct notifier_block ppc64_numa_nb = { | ||
617 | .notifier_call = cpu_numa_callback, | ||
618 | .priority = 1 /* Must run before sched domains notifier. */ | ||
619 | }; | ||
620 | 621 | ||
621 | min_low_pfn = 0; | 622 | min_low_pfn = 0; |
622 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 623 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 3068b429b031..a656d810a44a 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa) | |||
2203 | 2203 | ||
2204 | memset(lscsa, 0, sizeof(struct spu_lscsa)); | 2204 | memset(lscsa, 0, sizeof(struct spu_lscsa)); |
2205 | csa->lscsa = lscsa; | 2205 | csa->lscsa = lscsa; |
2206 | csa->register_lock = SPIN_LOCK_UNLOCKED; | 2206 | spin_lock_init(&csa->register_lock); |
2207 | 2207 | ||
2208 | /* Set LS pages reserved to allow for user-space mapping. */ | 2208 | /* Set LS pages reserved to allow for user-space mapping. */ |
2209 | for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) | 2209 | for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) |
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index 047f954a89eb..93e7505debc5 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c | |||
@@ -546,7 +546,7 @@ struct pmf_device { | |||
546 | }; | 546 | }; |
547 | 547 | ||
548 | static LIST_HEAD(pmf_devices); | 548 | static LIST_HEAD(pmf_devices); |
549 | static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; | 549 | static DEFINE_SPINLOCK(pmf_lock); |
550 | static DEFINE_MUTEX(pmf_irq_mutex); | 550 | static DEFINE_MUTEX(pmf_irq_mutex); |
551 | 551 | ||
552 | static void pmf_release_device(struct kref *kref) | 552 | static void pmf_release_device(struct kref *kref) |
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 8f2d12935b99..45ccc687e57c 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* EEH event workqueue setup. */ | 37 | /* EEH event workqueue setup. */ |
38 | static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED; | 38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); |
39 | LIST_HEAD(eeh_eventlist); | 39 | LIST_HEAD(eeh_eventlist); |
40 | static void eeh_thread_launcher(void *); | 40 | static void eeh_thread_launcher(void *); |
41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); | 41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); |
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 74e0d31a3559..615350d46b52 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | static void __iomem *mmio_nvram_start; | 33 | static void __iomem *mmio_nvram_start; |
34 | static long mmio_nvram_len; | 34 | static long mmio_nvram_len; |
35 | static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED; | 35 | static DEFINE_SPINLOCK(mmio_nvram_lock); |
36 | 36 | ||
37 | static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) | 37 | static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) |
38 | { | 38 | { |
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index 1f79e84ab464..4b4607d89bfa 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
@@ -475,7 +475,7 @@ int __init ppc_init(void) | |||
475 | 475 | ||
476 | /* register CPU devices */ | 476 | /* register CPU devices */ |
477 | for_each_possible_cpu(i) | 477 | for_each_possible_cpu(i) |
478 | register_cpu(&cpu_devices[i], i, NULL); | 478 | register_cpu(&cpu_devices[i], i); |
479 | 479 | ||
480 | /* call platform init */ | 480 | /* call platform init */ |
481 | if (ppc_md.init != NULL) { | 481 | if (ppc_md.init != NULL) { |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 9a22434a580c..54d35c130907 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -652,7 +652,7 @@ appldata_cpu_notify(struct notifier_block *self, | |||
652 | return NOTIFY_OK; | 652 | return NOTIFY_OK; |
653 | } | 653 | } |
654 | 654 | ||
655 | static struct notifier_block appldata_nb = { | 655 | static struct notifier_block __devinitdata appldata_nb = { |
656 | .notifier_call = appldata_cpu_notify, | 656 | .notifier_call = appldata_cpu_notify, |
657 | }; | 657 | }; |
658 | 658 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 343120c9223d..8e03219eea76 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -869,7 +869,7 @@ static int __init topology_init(void) | |||
869 | int ret; | 869 | int ret; |
870 | 870 | ||
871 | for_each_possible_cpu(cpu) { | 871 | for_each_possible_cpu(cpu) { |
872 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | 872 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); |
873 | if (ret) | 873 | if (ret) |
874 | printk(KERN_WARNING "topology_init: register_cpu %d " | 874 | printk(KERN_WARNING "topology_init: register_cpu %d " |
875 | "failed (%d)\n", cpu, ret); | 875 | "failed (%d)\n", cpu, ret); |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index bb229ef030f3..9af22116c9a2 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -402,7 +402,7 @@ static int __init topology_init(void) | |||
402 | int cpu_id; | 402 | int cpu_id; |
403 | 403 | ||
404 | for_each_possible_cpu(cpu_id) | 404 | for_each_possible_cpu(cpu_id) |
405 | register_cpu(&cpu[cpu_id], cpu_id, NULL); | 405 | register_cpu(&cpu[cpu_id], cpu_id); |
406 | 406 | ||
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c index d2711c9c9d13..da98d8dbcf95 100644 --- a/arch/sh64/kernel/setup.c +++ b/arch/sh64/kernel/setup.c | |||
@@ -309,7 +309,7 @@ static struct cpu cpu[1]; | |||
309 | 309 | ||
310 | static int __init topology_init(void) | 310 | static int __init topology_init(void) |
311 | { | 311 | { |
312 | return register_cpu(cpu, 0, NULL); | 312 | return register_cpu(cpu, 0); |
313 | } | 313 | } |
314 | 314 | ||
315 | subsys_initcall(topology_init); | 315 | subsys_initcall(topology_init); |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index a6a7d8168346..116d9632002d 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -537,7 +537,7 @@ static int __init topology_init(void) | |||
537 | for_each_possible_cpu(i) { | 537 | for_each_possible_cpu(i) { |
538 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); | 538 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
539 | if (p) { | 539 | if (p) { |
540 | register_cpu(p, i, NULL); | 540 | register_cpu(p, i); |
541 | err = 0; | 541 | err = 0; |
542 | } | 542 | } |
543 | } | 543 | } |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5c2bcf354ce6..cb75a27adb51 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/initrd.h> | 18 | #include <linux/initrd.h> |
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/pagemap.h> | 20 | #include <linux/pagemap.h> |
21 | #include <linux/poison.h> | ||
21 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
22 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
23 | #include <linux/kprobes.h> | 24 | #include <linux/kprobes.h> |
@@ -1520,7 +1521,7 @@ void free_initmem(void) | |||
1520 | page = (addr + | 1521 | page = (addr + |
1521 | ((unsigned long) __va(kern_base)) - | 1522 | ((unsigned long) __va(kern_base)) - |
1522 | ((unsigned long) KERNBASE)); | 1523 | ((unsigned long) KERNBASE)); |
1523 | memset((void *)addr, 0xcc, PAGE_SIZE); | 1524 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
1524 | p = virt_to_page(page); | 1525 | p = virt_to_page(page); |
1525 | 1526 | ||
1526 | ClearPageReserved(p); | 1527 | ClearPageReserved(p); |
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 7290e72b9a34..22cac4487b57 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S | |||
@@ -588,7 +588,7 @@ END(common_interrupt) | |||
588 | */ | 588 | */ |
589 | .macro apicinterrupt num,func | 589 | .macro apicinterrupt num,func |
590 | INTR_FRAME | 590 | INTR_FRAME |
591 | pushq $\num-256 | 591 | pushq $~(\num) |
592 | CFI_ADJUST_CFA_OFFSET 8 | 592 | CFI_ADJUST_CFA_OFFSET 8 |
593 | interrupt \func | 593 | interrupt \func |
594 | jmp ret_from_intr | 594 | jmp ret_from_intr |
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 59518d4d4358..3be0a7e4bf08 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c | |||
@@ -115,8 +115,8 @@ skip: | |||
115 | */ | 115 | */ |
116 | asmlinkage unsigned int do_IRQ(struct pt_regs *regs) | 116 | asmlinkage unsigned int do_IRQ(struct pt_regs *regs) |
117 | { | 117 | { |
118 | /* high bits used in ret_from_ code */ | 118 | /* high bit used in ret_from_ code */ |
119 | unsigned irq = regs->orig_rax & 0xff; | 119 | unsigned irq = ~regs->orig_rax; |
120 | 120 | ||
121 | exit_idle(); | 121 | exit_idle(); |
122 | irq_enter(); | 122 | irq_enter(); |
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index acd5816b1a6f..88845674c661 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
629 | #endif | 629 | #endif |
630 | 630 | ||
631 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 631 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
632 | static int | 632 | static __cpuinit int |
633 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 633 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
634 | { | 634 | { |
635 | unsigned int cpu = (unsigned long)hcpu; | 635 | unsigned int cpu = (unsigned long)hcpu; |
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
647 | return NOTIFY_OK; | 647 | return NOTIFY_OK; |
648 | } | 648 | } |
649 | 649 | ||
650 | static struct notifier_block mce_cpu_notifier = { | 650 | static struct notifier_block __cpuinitdata mce_cpu_notifier = { |
651 | .notifier_call = mce_cpu_callback, | 651 | .notifier_call = mce_cpu_callback, |
652 | }; | 652 | }; |
653 | 653 | ||
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index acee4bc3f6fa..5a1c0a3bf872 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c | |||
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
135 | 135 | ||
136 | cpu = smp_processor_id(); | 136 | cpu = smp_processor_id(); |
137 | /* | 137 | /* |
138 | * orig_rax contains the interrupt vector - 256. | 138 | * orig_rax contains the negated interrupt vector. |
139 | * Use that to determine where the sender put the data. | 139 | * Use that to determine where the sender put the data. |
140 | */ | 140 | */ |
141 | sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START; | 141 | sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START; |
142 | f = &per_cpu(flush_state, sender); | 142 | f = &per_cpu(flush_state, sender); |
143 | 143 | ||
144 | if (!cpu_isset(cpu, f->flush_cpumask)) | 144 | if (!cpu_isset(cpu, f->flush_cpumask)) |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 4e9755179ecf..540c0ccbcccc 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -455,10 +455,12 @@ cpumask_t cpu_coregroup_map(int cpu) | |||
455 | struct cpuinfo_x86 *c = cpu_data + cpu; | 455 | struct cpuinfo_x86 *c = cpu_data + cpu; |
456 | /* | 456 | /* |
457 | * For perf, we return last level cache shared map. | 457 | * For perf, we return last level cache shared map. |
458 | * TBD: when power saving sched policy is added, we will return | 458 | * And for power savings, we return cpu_core_map |
459 | * cpu_core_map when power saving policy is enabled | ||
460 | */ | 459 | */ |
461 | return c->llc_shared_map; | 460 | if (sched_mc_power_savings || sched_smt_power_savings) |
461 | return cpu_core_map[cpu]; | ||
462 | else | ||
463 | return c->llc_shared_map; | ||
462 | } | 464 | } |
463 | 465 | ||
464 | /* representing cpus for which sibling maps can be computed */ | 466 | /* representing cpus for which sibling maps can be computed */ |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 02add1d1dfa8..95bd232ff0cf 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/poison.h> | ||
26 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <linux/memory_hotplug.h> | 29 | #include <linux/memory_hotplug.h> |
@@ -506,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |||
506 | /* | 507 | /* |
507 | * Memory hotplug specific functions | 508 | * Memory hotplug specific functions |
508 | */ | 509 | */ |
509 | #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) | ||
510 | |||
511 | void online_page(struct page *page) | 510 | void online_page(struct page *page) |
512 | { | 511 | { |
513 | ClearPageReserved(page); | 512 | ClearPageReserved(page); |
@@ -517,31 +516,17 @@ void online_page(struct page *page) | |||
517 | num_physpages++; | 516 | num_physpages++; |
518 | } | 517 | } |
519 | 518 | ||
520 | #ifndef CONFIG_MEMORY_HOTPLUG | 519 | #ifdef CONFIG_MEMORY_HOTPLUG |
521 | /* | 520 | /* |
522 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | 521 | * XXX: memory_add_physaddr_to_nid() is to find node id from physical address |
523 | * just online the pages. | 522 | * via probe interface of sysfs. If acpi notifies hot-add event, then it |
523 | * can tell node id by searching dsdt. But, probe interface doesn't have | ||
524 | * node id. So, return 0 as node id at this time. | ||
524 | */ | 525 | */ |
525 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | 526 | #ifdef CONFIG_NUMA |
527 | int memory_add_physaddr_to_nid(u64 start) | ||
526 | { | 528 | { |
527 | int err = -EIO; | 529 | return 0; |
528 | unsigned long pfn; | ||
529 | unsigned long total = 0, mem = 0; | ||
530 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
531 | if (pfn_valid(pfn)) { | ||
532 | online_page(pfn_to_page(pfn)); | ||
533 | err = 0; | ||
534 | mem++; | ||
535 | } | ||
536 | total++; | ||
537 | } | ||
538 | if (!err) { | ||
539 | z->spanned_pages += total; | ||
540 | z->present_pages += mem; | ||
541 | z->zone_pgdat->node_spanned_pages += total; | ||
542 | z->zone_pgdat->node_present_pages += mem; | ||
543 | } | ||
544 | return err; | ||
545 | } | 530 | } |
546 | #endif | 531 | #endif |
547 | 532 | ||
@@ -549,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | |||
549 | * Memory is added always to NORMAL zone. This means you will never get | 534 | * Memory is added always to NORMAL zone. This means you will never get |
550 | * additional DMA/DMA32 memory. | 535 | * additional DMA/DMA32 memory. |
551 | */ | 536 | */ |
552 | int add_memory(u64 start, u64 size) | 537 | int arch_add_memory(int nid, u64 start, u64 size) |
553 | { | 538 | { |
554 | struct pglist_data *pgdat = NODE_DATA(0); | 539 | struct pglist_data *pgdat = NODE_DATA(nid); |
555 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; | 540 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; |
556 | unsigned long start_pfn = start >> PAGE_SHIFT; | 541 | unsigned long start_pfn = start >> PAGE_SHIFT; |
557 | unsigned long nr_pages = size >> PAGE_SHIFT; | 542 | unsigned long nr_pages = size >> PAGE_SHIFT; |
@@ -568,7 +553,7 @@ error: | |||
568 | printk("%s: Problem encountered in __add_pages!\n", __func__); | 553 | printk("%s: Problem encountered in __add_pages!\n", __func__); |
569 | return ret; | 554 | return ret; |
570 | } | 555 | } |
571 | EXPORT_SYMBOL_GPL(add_memory); | 556 | EXPORT_SYMBOL_GPL(arch_add_memory); |
572 | 557 | ||
573 | int remove_memory(u64 start, u64 size) | 558 | int remove_memory(u64 start, u64 size) |
574 | { | 559 | { |
@@ -576,7 +561,33 @@ int remove_memory(u64 start, u64 size) | |||
576 | } | 561 | } |
577 | EXPORT_SYMBOL_GPL(remove_memory); | 562 | EXPORT_SYMBOL_GPL(remove_memory); |
578 | 563 | ||
579 | #endif | 564 | #else /* CONFIG_MEMORY_HOTPLUG */ |
565 | /* | ||
566 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | ||
567 | * just online the pages. | ||
568 | */ | ||
569 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | ||
570 | { | ||
571 | int err = -EIO; | ||
572 | unsigned long pfn; | ||
573 | unsigned long total = 0, mem = 0; | ||
574 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
575 | if (pfn_valid(pfn)) { | ||
576 | online_page(pfn_to_page(pfn)); | ||
577 | err = 0; | ||
578 | mem++; | ||
579 | } | ||
580 | total++; | ||
581 | } | ||
582 | if (!err) { | ||
583 | z->spanned_pages += total; | ||
584 | z->present_pages += mem; | ||
585 | z->zone_pgdat->node_spanned_pages += total; | ||
586 | z->zone_pgdat->node_present_pages += mem; | ||
587 | } | ||
588 | return err; | ||
589 | } | ||
590 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
580 | 591 | ||
581 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, | 592 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, |
582 | kcore_vsyscall; | 593 | kcore_vsyscall; |
@@ -650,7 +661,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
650 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 661 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
651 | ClearPageReserved(virt_to_page(addr)); | 662 | ClearPageReserved(virt_to_page(addr)); |
652 | init_page_count(virt_to_page(addr)); | 663 | init_page_count(virt_to_page(addr)); |
653 | memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); | 664 | memset((void *)(addr & ~(PAGE_SIZE-1)), |
665 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
654 | free_page(addr); | 666 | free_page(addr); |
655 | totalram_pages++; | 667 | totalram_pages++; |
656 | } | 668 | } |
@@ -658,7 +670,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
658 | 670 | ||
659 | void free_initmem(void) | 671 | void free_initmem(void) |
660 | { | 672 | { |
661 | memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); | 673 | memset(__initdata_begin, POISON_FREE_INITDATA, |
674 | __initdata_end - __initdata_begin); | ||
662 | free_init_pages("unused kernel memory", | 675 | free_init_pages("unused kernel memory", |
663 | (unsigned long)(&__init_begin), | 676 | (unsigned long)(&__init_begin), |
664 | (unsigned long)(&__init_end)); | 677 | (unsigned long)(&__init_end)); |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 937d81f62f43..fe14909f45e0 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | extern volatile unsigned long wall_jiffies; | 30 | extern volatile unsigned long wall_jiffies; |
31 | 31 | ||
32 | spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; | 32 | DEFINE_SPINLOCK(rtc_lock); |
33 | EXPORT_SYMBOL(rtc_lock); | 33 | EXPORT_SYMBOL(rtc_lock); |
34 | 34 | ||
35 | 35 | ||
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 225d64d73f04..27e409089a7b 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c | |||
@@ -461,7 +461,7 @@ void show_code(unsigned int *pc) | |||
461 | } | 461 | } |
462 | } | 462 | } |
463 | 463 | ||
464 | spinlock_t die_lock = SPIN_LOCK_UNLOCKED; | 464 | DEFINE_SPINLOCK(die_lock); |
465 | 465 | ||
466 | void die(const char * str, struct pt_regs * regs, long err) | 466 | void die(const char * str, struct pt_regs * regs, long err) |
467 | { | 467 | { |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c04422a502da..eee03a3876a3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -3403,7 +3403,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | |||
3403 | } | 3403 | } |
3404 | 3404 | ||
3405 | 3405 | ||
3406 | static struct notifier_block blk_cpu_notifier = { | 3406 | static struct notifier_block __devinitdata blk_cpu_notifier = { |
3407 | .notifier_call = blk_cpu_notify, | 3407 | .notifier_call = blk_cpu_notify, |
3408 | }; | 3408 | }; |
3409 | 3409 | ||
@@ -3541,9 +3541,7 @@ int __init blk_dev_init(void) | |||
3541 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | 3541 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
3542 | 3542 | ||
3543 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | 3543 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); |
3544 | #ifdef CONFIG_HOTPLUG_CPU | 3544 | register_hotcpu_notifier(&blk_cpu_notifier); |
3545 | register_cpu_notifier(&blk_cpu_notifier); | ||
3546 | #endif | ||
3547 | 3545 | ||
3548 | blk_max_low_pfn = max_low_pfn; | 3546 | blk_max_low_pfn = max_low_pfn; |
3549 | blk_max_pfn = max_pfn; | 3547 | blk_max_pfn = max_pfn; |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 94b8d820c512..610d2cc02cf8 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -328,7 +328,7 @@ config ACPI_CONTAINER | |||
328 | config ACPI_HOTPLUG_MEMORY | 328 | config ACPI_HOTPLUG_MEMORY |
329 | tristate "Memory Hotplug" | 329 | tristate "Memory Hotplug" |
330 | depends on ACPI | 330 | depends on ACPI |
331 | depends on MEMORY_HOTPLUG || X86_64 | 331 | depends on MEMORY_HOTPLUG |
332 | default n | 332 | default n |
333 | help | 333 | help |
334 | This driver adds supports for ACPI Memory Hotplug. This driver | 334 | This driver adds supports for ACPI Memory Hotplug. This driver |
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index e0a95ba72371..1012284ff4f7 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -57,6 +57,7 @@ MODULE_LICENSE("GPL"); | |||
57 | 57 | ||
58 | static int acpi_memory_device_add(struct acpi_device *device); | 58 | static int acpi_memory_device_add(struct acpi_device *device); |
59 | static int acpi_memory_device_remove(struct acpi_device *device, int type); | 59 | static int acpi_memory_device_remove(struct acpi_device *device, int type); |
60 | static int acpi_memory_device_start(struct acpi_device *device); | ||
60 | 61 | ||
61 | static struct acpi_driver acpi_memory_device_driver = { | 62 | static struct acpi_driver acpi_memory_device_driver = { |
62 | .name = ACPI_MEMORY_DEVICE_DRIVER_NAME, | 63 | .name = ACPI_MEMORY_DEVICE_DRIVER_NAME, |
@@ -65,48 +66,79 @@ static struct acpi_driver acpi_memory_device_driver = { | |||
65 | .ops = { | 66 | .ops = { |
66 | .add = acpi_memory_device_add, | 67 | .add = acpi_memory_device_add, |
67 | .remove = acpi_memory_device_remove, | 68 | .remove = acpi_memory_device_remove, |
69 | .start = acpi_memory_device_start, | ||
68 | }, | 70 | }, |
69 | }; | 71 | }; |
70 | 72 | ||
73 | struct acpi_memory_info { | ||
74 | struct list_head list; | ||
75 | u64 start_addr; /* Memory Range start physical addr */ | ||
76 | u64 length; /* Memory Range length */ | ||
77 | unsigned short caching; /* memory cache attribute */ | ||
78 | unsigned short write_protect; /* memory read/write attribute */ | ||
79 | unsigned int enabled:1; | ||
80 | }; | ||
81 | |||
71 | struct acpi_memory_device { | 82 | struct acpi_memory_device { |
72 | acpi_handle handle; | 83 | acpi_handle handle; |
73 | unsigned int state; /* State of the memory device */ | 84 | unsigned int state; /* State of the memory device */ |
74 | unsigned short caching; /* memory cache attribute */ | 85 | struct list_head res_list; |
75 | unsigned short write_protect; /* memory read/write attribute */ | ||
76 | u64 start_addr; /* Memory Range start physical addr */ | ||
77 | u64 length; /* Memory Range length */ | ||
78 | }; | 86 | }; |
79 | 87 | ||
88 | static acpi_status | ||
89 | acpi_memory_get_resource(struct acpi_resource *resource, void *context) | ||
90 | { | ||
91 | struct acpi_memory_device *mem_device = context; | ||
92 | struct acpi_resource_address64 address64; | ||
93 | struct acpi_memory_info *info, *new; | ||
94 | acpi_status status; | ||
95 | |||
96 | status = acpi_resource_to_address64(resource, &address64); | ||
97 | if (ACPI_FAILURE(status) || | ||
98 | (address64.resource_type != ACPI_MEMORY_RANGE)) | ||
99 | return AE_OK; | ||
100 | |||
101 | list_for_each_entry(info, &mem_device->res_list, list) { | ||
102 | /* Can we combine the resource range information? */ | ||
103 | if ((info->caching == address64.info.mem.caching) && | ||
104 | (info->write_protect == address64.info.mem.write_protect) && | ||
105 | (info->start_addr + info->length == address64.minimum)) { | ||
106 | info->length += address64.address_length; | ||
107 | return AE_OK; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL); | ||
112 | if (!new) | ||
113 | return AE_ERROR; | ||
114 | |||
115 | INIT_LIST_HEAD(&new->list); | ||
116 | new->caching = address64.info.mem.caching; | ||
117 | new->write_protect = address64.info.mem.write_protect; | ||
118 | new->start_addr = address64.minimum; | ||
119 | new->length = address64.address_length; | ||
120 | list_add_tail(&new->list, &mem_device->res_list); | ||
121 | |||
122 | return AE_OK; | ||
123 | } | ||
124 | |||
80 | static int | 125 | static int |
81 | acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) | 126 | acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) |
82 | { | 127 | { |
83 | acpi_status status; | 128 | acpi_status status; |
84 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 129 | struct acpi_memory_info *info, *n; |
85 | struct acpi_resource *resource = NULL; | ||
86 | struct acpi_resource_address64 address64; | ||
87 | 130 | ||
88 | ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources"); | 131 | ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources"); |
89 | 132 | ||
90 | /* Get the range from the _CRS */ | 133 | status = acpi_walk_resources(mem_device->handle, METHOD_NAME__CRS, |
91 | status = acpi_get_current_resources(mem_device->handle, &buffer); | 134 | acpi_memory_get_resource, mem_device); |
92 | if (ACPI_FAILURE(status)) | 135 | if (ACPI_FAILURE(status)) { |
93 | return_VALUE(-EINVAL); | 136 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) |
94 | 137 | kfree(info); | |
95 | resource = (struct acpi_resource *)buffer.pointer; | 138 | return -EINVAL; |
96 | status = acpi_resource_to_address64(resource, &address64); | ||
97 | if (ACPI_SUCCESS(status)) { | ||
98 | if (address64.resource_type == ACPI_MEMORY_RANGE) { | ||
99 | /* Populate the structure */ | ||
100 | mem_device->caching = address64.info.mem.caching; | ||
101 | mem_device->write_protect = | ||
102 | address64.info.mem.write_protect; | ||
103 | mem_device->start_addr = address64.minimum; | ||
104 | mem_device->length = address64.address_length; | ||
105 | } | ||
106 | } | 139 | } |
107 | 140 | ||
108 | acpi_os_free(buffer.pointer); | 141 | return 0; |
109 | return_VALUE(0); | ||
110 | } | 142 | } |
111 | 143 | ||
112 | static int | 144 | static int |
@@ -181,7 +213,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device) | |||
181 | 213 | ||
182 | static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | 214 | static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) |
183 | { | 215 | { |
184 | int result; | 216 | int result, num_enabled = 0; |
217 | struct acpi_memory_info *info; | ||
218 | int node; | ||
185 | 219 | ||
186 | ACPI_FUNCTION_TRACE("acpi_memory_enable_device"); | 220 | ACPI_FUNCTION_TRACE("acpi_memory_enable_device"); |
187 | 221 | ||
@@ -194,15 +228,35 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
194 | return result; | 228 | return result; |
195 | } | 229 | } |
196 | 230 | ||
231 | node = acpi_get_node(mem_device->handle); | ||
197 | /* | 232 | /* |
198 | * Tell the VM there is more memory here... | 233 | * Tell the VM there is more memory here... |
199 | * Note: Assume that this function returns zero on success | 234 | * Note: Assume that this function returns zero on success |
235 | * We don't have memory-hot-add rollback function,now. | ||
236 | * (i.e. memory-hot-remove function) | ||
200 | */ | 237 | */ |
201 | result = add_memory(mem_device->start_addr, mem_device->length); | 238 | list_for_each_entry(info, &mem_device->res_list, list) { |
202 | if (result) { | 239 | u64 start_pfn, end_pfn; |
240 | |||
241 | start_pfn = info->start_addr >> PAGE_SHIFT; | ||
242 | end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT; | ||
243 | |||
244 | if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) { | ||
245 | /* already enabled. try next area */ | ||
246 | num_enabled++; | ||
247 | continue; | ||
248 | } | ||
249 | |||
250 | result = add_memory(node, info->start_addr, info->length); | ||
251 | if (result) | ||
252 | continue; | ||
253 | info->enabled = 1; | ||
254 | num_enabled++; | ||
255 | } | ||
256 | if (!num_enabled) { | ||
203 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); | 257 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); |
204 | mem_device->state = MEMORY_INVALID_STATE; | 258 | mem_device->state = MEMORY_INVALID_STATE; |
205 | return result; | 259 | return -EINVAL; |
206 | } | 260 | } |
207 | 261 | ||
208 | return result; | 262 | return result; |
@@ -246,8 +300,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) | |||
246 | static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) | 300 | static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) |
247 | { | 301 | { |
248 | int result; | 302 | int result; |
249 | u64 start = mem_device->start_addr; | 303 | struct acpi_memory_info *info, *n; |
250 | u64 len = mem_device->length; | ||
251 | 304 | ||
252 | ACPI_FUNCTION_TRACE("acpi_memory_disable_device"); | 305 | ACPI_FUNCTION_TRACE("acpi_memory_disable_device"); |
253 | 306 | ||
@@ -255,10 +308,13 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) | |||
255 | * Ask the VM to offline this memory range. | 308 | * Ask the VM to offline this memory range. |
256 | * Note: Assume that this function returns zero on success | 309 | * Note: Assume that this function returns zero on success |
257 | */ | 310 | */ |
258 | result = remove_memory(start, len); | 311 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) { |
259 | if (result) { | 312 | if (info->enabled) { |
260 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n")); | 313 | result = remove_memory(info->start_addr, info->length); |
261 | return_VALUE(result); | 314 | if (result) |
315 | return result; | ||
316 | } | ||
317 | kfree(info); | ||
262 | } | 318 | } |
263 | 319 | ||
264 | /* Power-off and eject the device */ | 320 | /* Power-off and eject the device */ |
@@ -356,6 +412,7 @@ static int acpi_memory_device_add(struct acpi_device *device) | |||
356 | return_VALUE(-ENOMEM); | 412 | return_VALUE(-ENOMEM); |
357 | memset(mem_device, 0, sizeof(struct acpi_memory_device)); | 413 | memset(mem_device, 0, sizeof(struct acpi_memory_device)); |
358 | 414 | ||
415 | INIT_LIST_HEAD(&mem_device->res_list); | ||
359 | mem_device->handle = device->handle; | 416 | mem_device->handle = device->handle; |
360 | sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); | 417 | sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); |
361 | sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); | 418 | sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); |
@@ -391,6 +448,25 @@ static int acpi_memory_device_remove(struct acpi_device *device, int type) | |||
391 | return_VALUE(0); | 448 | return_VALUE(0); |
392 | } | 449 | } |
393 | 450 | ||
451 | static int acpi_memory_device_start (struct acpi_device *device) | ||
452 | { | ||
453 | struct acpi_memory_device *mem_device; | ||
454 | int result = 0; | ||
455 | |||
456 | ACPI_FUNCTION_TRACE("acpi_memory_device_start"); | ||
457 | |||
458 | mem_device = acpi_driver_data(device); | ||
459 | |||
460 | if (!acpi_memory_check_device(mem_device)) { | ||
461 | /* call add_memory func */ | ||
462 | result = acpi_memory_enable_device(mem_device); | ||
463 | if (result) | ||
464 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | ||
465 | "Error in acpi_memory_enable_device\n")); | ||
466 | } | ||
467 | return_VALUE(result); | ||
468 | } | ||
469 | |||
394 | /* | 470 | /* |
395 | * Helper function to check for memory device | 471 | * Helper function to check for memory device |
396 | */ | 472 | */ |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index e2c1a16078c9..13d6d5bdea26 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -254,5 +254,18 @@ int acpi_get_pxm(acpi_handle h) | |||
254 | } while (ACPI_SUCCESS(status)); | 254 | } while (ACPI_SUCCESS(status)); |
255 | return -1; | 255 | return -1; |
256 | } | 256 | } |
257 | |||
258 | EXPORT_SYMBOL(acpi_get_pxm); | 257 | EXPORT_SYMBOL(acpi_get_pxm); |
258 | |||
259 | int acpi_get_node(acpi_handle *handle) | ||
260 | { | ||
261 | int pxm, node = -1; | ||
262 | |||
263 | ACPI_FUNCTION_TRACE("acpi_get_node"); | ||
264 | |||
265 | pxm = acpi_get_pxm(handle); | ||
266 | if (pxm >= 0) | ||
267 | node = acpi_map_pxm_to_node(pxm); | ||
268 | |||
269 | return_VALUE(node); | ||
270 | } | ||
271 | EXPORT_SYMBOL(acpi_get_node); | ||
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index f2eeaf9dc56a..1bca86edf570 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/poison.h> | ||
36 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
37 | #include <linux/atm.h> | 38 | #include <linux/atm.h> |
38 | #include <linux/atmdev.h> | 39 | #include <linux/atmdev.h> |
@@ -754,7 +755,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) | |||
754 | fs_kfree_skb (skb); | 755 | fs_kfree_skb (skb); |
755 | 756 | ||
756 | fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td); | 757 | fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td); |
757 | memset (td, 0x12, sizeof (struct FS_BPENTRY)); | 758 | memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY)); |
758 | kfree (td); | 759 | kfree (td); |
759 | break; | 760 | break; |
760 | default: | 761 | default: |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index dd712b24ec91..4bef76a2f3f2 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/cpu.h> | 8 | #include <linux/cpu.h> |
9 | #include <linux/topology.h> | 9 | #include <linux/topology.h> |
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/node.h> | ||
11 | 12 | ||
12 | #include "base.h" | 13 | #include "base.h" |
13 | 14 | ||
@@ -57,13 +58,12 @@ static void __devinit register_cpu_control(struct cpu *cpu) | |||
57 | { | 58 | { |
58 | sysdev_create_file(&cpu->sysdev, &attr_online); | 59 | sysdev_create_file(&cpu->sysdev, &attr_online); |
59 | } | 60 | } |
60 | void unregister_cpu(struct cpu *cpu, struct node *root) | 61 | void unregister_cpu(struct cpu *cpu) |
61 | { | 62 | { |
62 | int logical_cpu = cpu->sysdev.id; | 63 | int logical_cpu = cpu->sysdev.id; |
63 | 64 | ||
64 | if (root) | 65 | unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); |
65 | sysfs_remove_link(&root->sysdev.kobj, | 66 | |
66 | kobject_name(&cpu->sysdev.kobj)); | ||
67 | sysdev_remove_file(&cpu->sysdev, &attr_online); | 67 | sysdev_remove_file(&cpu->sysdev, &attr_online); |
68 | 68 | ||
69 | sysdev_unregister(&cpu->sysdev); | 69 | sysdev_unregister(&cpu->sysdev); |
@@ -109,23 +109,21 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
109 | * | 109 | * |
110 | * Initialize and register the CPU device. | 110 | * Initialize and register the CPU device. |
111 | */ | 111 | */ |
112 | int __devinit register_cpu(struct cpu *cpu, int num, struct node *root) | 112 | int __devinit register_cpu(struct cpu *cpu, int num) |
113 | { | 113 | { |
114 | int error; | 114 | int error; |
115 | |||
116 | cpu->node_id = cpu_to_node(num); | 115 | cpu->node_id = cpu_to_node(num); |
117 | cpu->sysdev.id = num; | 116 | cpu->sysdev.id = num; |
118 | cpu->sysdev.cls = &cpu_sysdev_class; | 117 | cpu->sysdev.cls = &cpu_sysdev_class; |
119 | 118 | ||
120 | error = sysdev_register(&cpu->sysdev); | 119 | error = sysdev_register(&cpu->sysdev); |
121 | if (!error && root) | 120 | |
122 | error = sysfs_create_link(&root->sysdev.kobj, | ||
123 | &cpu->sysdev.kobj, | ||
124 | kobject_name(&cpu->sysdev.kobj)); | ||
125 | if (!error && !cpu->no_control) | 121 | if (!error && !cpu->no_control) |
126 | register_cpu_control(cpu); | 122 | register_cpu_control(cpu); |
127 | if (!error) | 123 | if (!error) |
128 | cpu_sys_devices[num] = &cpu->sysdev; | 124 | cpu_sys_devices[num] = &cpu->sysdev; |
125 | if (!error) | ||
126 | register_cpu_under_node(num, cpu_to_node(num)); | ||
129 | 127 | ||
130 | #ifdef CONFIG_KEXEC | 128 | #ifdef CONFIG_KEXEC |
131 | if (!error) | 129 | if (!error) |
@@ -145,5 +143,13 @@ EXPORT_SYMBOL_GPL(get_cpu_sysdev); | |||
145 | 143 | ||
146 | int __init cpu_dev_init(void) | 144 | int __init cpu_dev_init(void) |
147 | { | 145 | { |
148 | return sysdev_class_register(&cpu_sysdev_class); | 146 | int err; |
147 | |||
148 | err = sysdev_class_register(&cpu_sysdev_class); | ||
149 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
150 | if (!err) | ||
151 | err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); | ||
152 | #endif | ||
153 | |||
154 | return err; | ||
149 | } | 155 | } |
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c index e2f64f91ed05..33c5cce1560b 100644 --- a/drivers/base/dmapool.c +++ b/drivers/base/dmapool.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/dmapool.h> | 7 | #include <linux/dmapool.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/poison.h> | ||
10 | 11 | ||
11 | /* | 12 | /* |
12 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so | 13 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so |
@@ -35,8 +36,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ | |||
35 | }; | 36 | }; |
36 | 37 | ||
37 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | 38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) |
38 | #define POOL_POISON_FREED 0xa7 /* !inuse */ | ||
39 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ | ||
40 | 39 | ||
41 | static DECLARE_MUTEX (pools_lock); | 40 | static DECLARE_MUTEX (pools_lock); |
42 | 41 | ||
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index dd547af4681a..c6b7d9c4b651 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -306,11 +306,13 @@ static ssize_t | |||
306 | memory_probe_store(struct class *class, const char *buf, size_t count) | 306 | memory_probe_store(struct class *class, const char *buf, size_t count) |
307 | { | 307 | { |
308 | u64 phys_addr; | 308 | u64 phys_addr; |
309 | int nid; | ||
309 | int ret; | 310 | int ret; |
310 | 311 | ||
311 | phys_addr = simple_strtoull(buf, NULL, 0); | 312 | phys_addr = simple_strtoull(buf, NULL, 0); |
312 | 313 | ||
313 | ret = add_memory(phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); | 314 | nid = memory_add_physaddr_to_nid(phys_addr); |
315 | ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); | ||
314 | 316 | ||
315 | if (ret) | 317 | if (ret) |
316 | count = ret; | 318 | count = ret; |
diff --git a/drivers/base/node.c b/drivers/base/node.c index c80c3aeed004..eae2bdc183bb 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
12 | #include <linux/topology.h> | 12 | #include <linux/topology.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/cpu.h> | ||
14 | 15 | ||
15 | static struct sysdev_class node_class = { | 16 | static struct sysdev_class node_class = { |
16 | set_kset_name("node"), | 17 | set_kset_name("node"), |
@@ -190,6 +191,66 @@ void unregister_node(struct node *node) | |||
190 | sysdev_unregister(&node->sysdev); | 191 | sysdev_unregister(&node->sysdev); |
191 | } | 192 | } |
192 | 193 | ||
194 | struct node node_devices[MAX_NUMNODES]; | ||
195 | |||
196 | /* | ||
197 | * register cpu under node | ||
198 | */ | ||
199 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) | ||
200 | { | ||
201 | if (node_online(nid)) { | ||
202 | struct sys_device *obj = get_cpu_sysdev(cpu); | ||
203 | if (!obj) | ||
204 | return 0; | ||
205 | return sysfs_create_link(&node_devices[nid].sysdev.kobj, | ||
206 | &obj->kobj, | ||
207 | kobject_name(&obj->kobj)); | ||
208 | } | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) | ||
214 | { | ||
215 | if (node_online(nid)) { | ||
216 | struct sys_device *obj = get_cpu_sysdev(cpu); | ||
217 | if (obj) | ||
218 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | ||
219 | kobject_name(&obj->kobj)); | ||
220 | } | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | int register_one_node(int nid) | ||
225 | { | ||
226 | int error = 0; | ||
227 | int cpu; | ||
228 | |||
229 | if (node_online(nid)) { | ||
230 | int p_node = parent_node(nid); | ||
231 | struct node *parent = NULL; | ||
232 | |||
233 | if (p_node != nid) | ||
234 | parent = &node_devices[p_node]; | ||
235 | |||
236 | error = register_node(&node_devices[nid], nid, parent); | ||
237 | |||
238 | /* link cpu under this node */ | ||
239 | for_each_present_cpu(cpu) { | ||
240 | if (cpu_to_node(cpu) == nid) | ||
241 | register_cpu_under_node(cpu, nid); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | return error; | ||
246 | |||
247 | } | ||
248 | |||
249 | void unregister_one_node(int nid) | ||
250 | { | ||
251 | unregister_node(&node_devices[nid]); | ||
252 | } | ||
253 | |||
193 | static int __init register_node_type(void) | 254 | static int __init register_node_type(void) |
194 | { | 255 | { |
195 | return sysdev_class_register(&node_class); | 256 | return sysdev_class_register(&node_class); |
diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 8c52421cbc54..c2d621632383 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c | |||
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev) | |||
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | static int topology_cpu_callback(struct notifier_block *nfb, | 110 | static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, |
111 | unsigned long action, void *hcpu) | 111 | unsigned long action, void *hcpu) |
112 | { | 112 | { |
113 | unsigned int cpu = (unsigned long)hcpu; | 113 | unsigned int cpu = (unsigned long)hcpu; |
@@ -125,7 +125,7 @@ static int topology_cpu_callback(struct notifier_block *nfb, | |||
125 | return NOTIFY_OK; | 125 | return NOTIFY_OK; |
126 | } | 126 | } |
127 | 127 | ||
128 | static struct notifier_block topology_cpu_notifier = | 128 | static struct notifier_block __cpuinitdata topology_cpu_notifier = |
129 | { | 129 | { |
130 | .notifier_call = topology_cpu_callback, | 130 | .notifier_call = topology_cpu_callback, |
131 | }; | 131 | }; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3610c5729553..410d70cb76fb 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -939,12 +939,35 @@ config MWAVE | |||
939 | config SCx200_GPIO | 939 | config SCx200_GPIO |
940 | tristate "NatSemi SCx200 GPIO Support" | 940 | tristate "NatSemi SCx200 GPIO Support" |
941 | depends on SCx200 | 941 | depends on SCx200 |
942 | select NSC_GPIO | ||
942 | help | 943 | help |
943 | Give userspace access to the GPIO pins on the National | 944 | Give userspace access to the GPIO pins on the National |
944 | Semiconductor SCx200 processors. | 945 | Semiconductor SCx200 processors. |
945 | 946 | ||
946 | If compiled as a module, it will be called scx200_gpio. | 947 | If compiled as a module, it will be called scx200_gpio. |
947 | 948 | ||
949 | config PC8736x_GPIO | ||
950 | tristate "NatSemi PC8736x GPIO Support" | ||
951 | depends on X86 | ||
952 | default SCx200_GPIO # mostly N | ||
953 | select NSC_GPIO # needed for support routines | ||
954 | help | ||
955 | Give userspace access to the GPIO pins on the National | ||
956 | Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip | ||
957 | has multiple functional units, inc several managed by | ||
958 | hwmon/pc87360 driver. Tested with PC-87366 | ||
959 | |||
960 | If compiled as a module, it will be called pc8736x_gpio. | ||
961 | |||
962 | config NSC_GPIO | ||
963 | tristate "NatSemi Base GPIO Support" | ||
964 | # selected by SCx200_GPIO and PC8736x_GPIO | ||
965 | # what about 2 selectors differing: m != y | ||
966 | help | ||
967 | Common support used (and needed) by scx200_gpio and | ||
968 | pc8736x_gpio drivers. If those drivers are built as | ||
969 | modules, this one will be too, named nsc_gpio | ||
970 | |||
948 | config CS5535_GPIO | 971 | config CS5535_GPIO |
949 | tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" | 972 | tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" |
950 | depends on X86_32 | 973 | depends on X86_32 |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 524105597ea7..6e0f4469d8bb 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -82,6 +82,8 @@ obj-$(CONFIG_PPDEV) += ppdev.o | |||
82 | obj-$(CONFIG_NWBUTTON) += nwbutton.o | 82 | obj-$(CONFIG_NWBUTTON) += nwbutton.o |
83 | obj-$(CONFIG_NWFLASH) += nwflash.o | 83 | obj-$(CONFIG_NWFLASH) += nwflash.o |
84 | obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o | 84 | obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o |
85 | obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o | ||
86 | obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o | ||
85 | obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o | 87 | obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o |
86 | obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o | 88 | obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o |
87 | obj-$(CONFIG_TANBAC_TB0219) += tb0219.o | 89 | obj-$(CONFIG_TANBAC_TB0219) += tb0219.o |
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index cfa7922cb431..d73be4c2db8a 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c | |||
@@ -329,9 +329,8 @@ static int __devinit agp_sgi_init(void) | |||
329 | 329 | ||
330 | static void __devexit agp_sgi_cleanup(void) | 330 | static void __devexit agp_sgi_cleanup(void) |
331 | { | 331 | { |
332 | if (sgi_tioca_agp_bridges) | 332 | kfree(sgi_tioca_agp_bridges); |
333 | kfree(sgi_tioca_agp_bridges); | 333 | sgi_tioca_agp_bridges = NULL; |
334 | sgi_tioca_agp_bridges=NULL; | ||
335 | } | 334 | } |
336 | 335 | ||
337 | module_init(agp_sgi_init); | 336 | module_init(agp_sgi_init); |
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h index 6543b9a14c42..d117cc997192 100644 --- a/drivers/char/drm/drm_memory_debug.h +++ b/drivers/char/drm/drm_memory_debug.h | |||
@@ -43,7 +43,7 @@ typedef struct drm_mem_stats { | |||
43 | unsigned long bytes_freed; | 43 | unsigned long bytes_freed; |
44 | } drm_mem_stats_t; | 44 | } drm_mem_stats_t; |
45 | 45 | ||
46 | static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; | 46 | static DEFINE_SPINLOCK(drm_mem_lock); |
47 | static unsigned long drm_ram_available = 0; /* In pages */ | 47 | static unsigned long drm_ram_available = 0; /* In pages */ |
48 | static unsigned long drm_ram_used = 0; | 48 | static unsigned long drm_ram_used = 0; |
49 | static drm_mem_stats_t drm_mem_stats[] = | 49 | static drm_mem_stats_t drm_mem_stats[] = |
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c index b7f17457b424..78a81a4a99c5 100644 --- a/drivers/char/drm/via_dmablit.c +++ b/drivers/char/drm/via_dmablit.c | |||
@@ -557,7 +557,7 @@ via_init_dmablit(drm_device_t *dev) | |||
557 | blitq->num_outstanding = 0; | 557 | blitq->num_outstanding = 0; |
558 | blitq->is_active = 0; | 558 | blitq->is_active = 0; |
559 | blitq->aborting = 0; | 559 | blitq->aborting = 0; |
560 | blitq->blit_lock = SPIN_LOCK_UNLOCKED; | 560 | spin_lock_init(&blitq->blit_lock); |
561 | for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { | 561 | for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { |
562 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | 562 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); |
563 | } | 563 | } |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 9cad8501d62c..dc0602ae8503 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -80,7 +80,7 @@ static int invalid_lilo_config; | |||
80 | /* The ISA boards do window flipping into the same spaces so its only sane | 80 | /* The ISA boards do window flipping into the same spaces so its only sane |
81 | with a single lock. It's still pretty efficient */ | 81 | with a single lock. It's still pretty efficient */ |
82 | 82 | ||
83 | static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED; | 83 | static DEFINE_SPINLOCK(epca_lock); |
84 | 84 | ||
85 | /* ----------------------------------------------------------------------- | 85 | /* ----------------------------------------------------------------------- |
86 | MAXBOARDS is typically 12, but ISA and EISA cards are restricted to | 86 | MAXBOARDS is typically 12, but ISA and EISA cards are restricted to |
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c index 8d97b3911293..afa26b65dac3 100644 --- a/drivers/char/hvcs.c +++ b/drivers/char/hvcs.c | |||
@@ -1320,11 +1320,12 @@ static struct tty_operations hvcs_ops = { | |||
1320 | static int hvcs_alloc_index_list(int n) | 1320 | static int hvcs_alloc_index_list(int n) |
1321 | { | 1321 | { |
1322 | int i; | 1322 | int i; |
1323 | |||
1323 | hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL); | 1324 | hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL); |
1324 | if (!hvcs_index_list) | 1325 | if (!hvcs_index_list) |
1325 | return -ENOMEM; | 1326 | return -ENOMEM; |
1326 | hvcs_index_count = n; | 1327 | hvcs_index_count = n; |
1327 | for(i = 0; i < hvcs_index_count; i++) | 1328 | for (i = 0; i < hvcs_index_count; i++) |
1328 | hvcs_index_list[i] = -1; | 1329 | hvcs_index_list[i] = -1; |
1329 | return 0; | 1330 | return 0; |
1330 | } | 1331 | } |
@@ -1332,11 +1333,9 @@ static int hvcs_alloc_index_list(int n) | |||
1332 | static void hvcs_free_index_list(void) | 1333 | static void hvcs_free_index_list(void) |
1333 | { | 1334 | { |
1334 | /* Paranoia check to be thorough. */ | 1335 | /* Paranoia check to be thorough. */ |
1335 | if (hvcs_index_list) { | 1336 | kfree(hvcs_index_list); |
1336 | kfree(hvcs_index_list); | 1337 | hvcs_index_list = NULL; |
1337 | hvcs_index_list = NULL; | 1338 | hvcs_index_count = 0; |
1338 | hvcs_index_count = 0; | ||
1339 | } | ||
1340 | } | 1339 | } |
1341 | 1340 | ||
1342 | static int __init hvcs_module_init(void) | 1341 | static int __init hvcs_module_init(void) |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index b03ddab1bef5..83ed6ae466a5 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -57,8 +57,7 @@ static int ipmi_init_msghandler(void); | |||
57 | static int initialized = 0; | 57 | static int initialized = 0; |
58 | 58 | ||
59 | #ifdef CONFIG_PROC_FS | 59 | #ifdef CONFIG_PROC_FS |
60 | struct proc_dir_entry *proc_ipmi_root = NULL; | 60 | static struct proc_dir_entry *proc_ipmi_root = NULL; |
61 | EXPORT_SYMBOL(proc_ipmi_root); | ||
62 | #endif /* CONFIG_PROC_FS */ | 61 | #endif /* CONFIG_PROC_FS */ |
63 | 62 | ||
64 | #define MAX_EVENTS_IN_QUEUE 25 | 63 | #define MAX_EVENTS_IN_QUEUE 25 |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 02a7dd7a8a55..101c14b9b26d 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -809,7 +809,7 @@ static int ipmi_thread(void *data) | |||
809 | /* do nothing */ | 809 | /* do nothing */ |
810 | } | 810 | } |
811 | else if (smi_result == SI_SM_CALL_WITH_DELAY) | 811 | else if (smi_result == SI_SM_CALL_WITH_DELAY) |
812 | udelay(1); | 812 | schedule(); |
813 | else | 813 | else |
814 | schedule_timeout_interruptible(1); | 814 | schedule_timeout_interruptible(1); |
815 | } | 815 | } |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index f43c2e04eadd..01247cccb89f 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -301,7 +301,7 @@ static struct tty_operations moxa_ops = { | |||
301 | .tiocmset = moxa_tiocmset, | 301 | .tiocmset = moxa_tiocmset, |
302 | }; | 302 | }; |
303 | 303 | ||
304 | static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED; | 304 | static DEFINE_SPINLOCK(moxa_lock); |
305 | 305 | ||
306 | #ifdef CONFIG_PCI | 306 | #ifdef CONFIG_PCI |
307 | static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board) | 307 | static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board) |
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c new file mode 100644 index 000000000000..5b91e4e25641 --- /dev/null +++ b/drivers/char/nsc_gpio.c | |||
@@ -0,0 +1,142 @@ | |||
1 | /* linux/drivers/char/nsc_gpio.c | ||
2 | |||
3 | National Semiconductor common GPIO device-file/VFS methods. | ||
4 | Allows a user space process to control the GPIO pins. | ||
5 | |||
6 | Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> | ||
7 | Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/nsc_gpio.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/io.h> | ||
20 | |||
21 | #define NAME "nsc_gpio" | ||
22 | |||
23 | void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index) | ||
24 | { | ||
25 | /* retrieve current config w/o changing it */ | ||
26 | u32 config = amp->gpio_config(index, ~0, 0); | ||
27 | |||
28 | /* user requested via 'v' command, so its INFO */ | ||
29 | dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n", | ||
30 | index, config, | ||
31 | (config & 1) ? "OE" : "TS", /* output-enabled/tristate */ | ||
32 | (config & 2) ? "PP" : "OD", /* push pull / open drain */ | ||
33 | (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */ | ||
34 | (config & 8) ? "LOCKED" : "", /* locked / unlocked */ | ||
35 | (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */ | ||
36 | (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */ | ||
37 | (config & 64) ? "DEBOUNCE" : "", /* debounce */ | ||
38 | |||
39 | amp->gpio_get(index), amp->gpio_current(index)); | ||
40 | } | ||
41 | |||
42 | ssize_t nsc_gpio_write(struct file *file, const char __user *data, | ||
43 | size_t len, loff_t *ppos) | ||
44 | { | ||
45 | unsigned m = iminor(file->f_dentry->d_inode); | ||
46 | struct nsc_gpio_ops *amp = file->private_data; | ||
47 | struct device *dev = amp->dev; | ||
48 | size_t i; | ||
49 | int err = 0; | ||
50 | |||
51 | for (i = 0; i < len; ++i) { | ||
52 | char c; | ||
53 | if (get_user(c, data + i)) | ||
54 | return -EFAULT; | ||
55 | switch (c) { | ||
56 | case '0': | ||
57 | amp->gpio_set(m, 0); | ||
58 | break; | ||
59 | case '1': | ||
60 | amp->gpio_set(m, 1); | ||
61 | break; | ||
62 | case 'O': | ||
63 | dev_dbg(dev, "GPIO%d output enabled\n", m); | ||
64 | amp->gpio_config(m, ~1, 1); | ||
65 | break; | ||
66 | case 'o': | ||
67 | dev_dbg(dev, "GPIO%d output disabled\n", m); | ||
68 | amp->gpio_config(m, ~1, 0); | ||
69 | break; | ||
70 | case 'T': | ||
71 | dev_dbg(dev, "GPIO%d output is push pull\n", | ||
72 | m); | ||
73 | amp->gpio_config(m, ~2, 2); | ||
74 | break; | ||
75 | case 't': | ||
76 | dev_dbg(dev, "GPIO%d output is open drain\n", | ||
77 | m); | ||
78 | amp->gpio_config(m, ~2, 0); | ||
79 | break; | ||
80 | case 'P': | ||
81 | dev_dbg(dev, "GPIO%d pull up enabled\n", m); | ||
82 | amp->gpio_config(m, ~4, 4); | ||
83 | break; | ||
84 | case 'p': | ||
85 | dev_dbg(dev, "GPIO%d pull up disabled\n", m); | ||
86 | amp->gpio_config(m, ~4, 0); | ||
87 | break; | ||
88 | case 'v': | ||
89 | /* View Current pin settings */ | ||
90 | amp->gpio_dump(amp, m); | ||
91 | break; | ||
92 | case '\n': | ||
93 | /* end of settings string, do nothing */ | ||
94 | break; | ||
95 | default: | ||
96 | dev_err(dev, "io%2d bad setting: chr<0x%2x>\n", | ||
97 | m, (int)c); | ||
98 | err++; | ||
99 | } | ||
100 | } | ||
101 | if (err) | ||
102 | return -EINVAL; /* full string handled, report error */ | ||
103 | |||
104 | return len; | ||
105 | } | ||
106 | |||
107 | ssize_t nsc_gpio_read(struct file *file, char __user * buf, | ||
108 | size_t len, loff_t * ppos) | ||
109 | { | ||
110 | unsigned m = iminor(file->f_dentry->d_inode); | ||
111 | int value; | ||
112 | struct nsc_gpio_ops *amp = file->private_data; | ||
113 | |||
114 | value = amp->gpio_get(m); | ||
115 | if (put_user(value ? '1' : '0', buf)) | ||
116 | return -EFAULT; | ||
117 | |||
118 | return 1; | ||
119 | } | ||
120 | |||
121 | /* common file-ops routines for both scx200_gpio and pc87360_gpio */ | ||
122 | EXPORT_SYMBOL(nsc_gpio_write); | ||
123 | EXPORT_SYMBOL(nsc_gpio_read); | ||
124 | EXPORT_SYMBOL(nsc_gpio_dump); | ||
125 | |||
126 | static int __init nsc_gpio_init(void) | ||
127 | { | ||
128 | printk(KERN_DEBUG NAME " initializing\n"); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static void __exit nsc_gpio_cleanup(void) | ||
133 | { | ||
134 | printk(KERN_DEBUG NAME " cleanup\n"); | ||
135 | } | ||
136 | |||
137 | module_init(nsc_gpio_init); | ||
138 | module_exit(nsc_gpio_cleanup); | ||
139 | |||
140 | MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); | ||
141 | MODULE_DESCRIPTION("NatSemi GPIO Common Methods"); | ||
142 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c new file mode 100644 index 000000000000..1c706ccfdbb3 --- /dev/null +++ b/drivers/char/pc8736x_gpio.c | |||
@@ -0,0 +1,340 @@ | |||
1 | /* linux/drivers/char/pc8736x_gpio.c | ||
2 | |||
3 | National Semiconductor PC8736x GPIO driver. Allows a user space | ||
4 | process to play with the GPIO pins. | ||
5 | |||
6 | Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> | ||
7 | |||
8 | adapted from linux/drivers/char/scx200_gpio.c | ||
9 | Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>, | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/mutex.h> | ||
21 | #include <linux/nsc_gpio.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | |||
25 | #define DEVNAME "pc8736x_gpio" | ||
26 | |||
27 | MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); | ||
28 | MODULE_DESCRIPTION("NatSemi PC-8736x GPIO Pin Driver"); | ||
29 | MODULE_LICENSE("GPL"); | ||
30 | |||
31 | static int major; /* default to dynamic major */ | ||
32 | module_param(major, int, 0); | ||
33 | MODULE_PARM_DESC(major, "Major device number"); | ||
34 | |||
35 | static DEFINE_MUTEX(pc8736x_gpio_config_lock); | ||
36 | static unsigned pc8736x_gpio_base; | ||
37 | static u8 pc8736x_gpio_shadow[4]; | ||
38 | |||
39 | #define SIO_BASE1 0x2E /* 1st command-reg to check */ | ||
40 | #define SIO_BASE2 0x4E /* alt command-reg to check */ | ||
41 | #define SIO_BASE_OFFSET 0x20 | ||
42 | |||
43 | #define SIO_SID 0x20 /* SuperI/O ID Register */ | ||
44 | #define SIO_SID_VALUE 0xe9 /* Expected value in SuperI/O ID Register */ | ||
45 | |||
46 | #define SIO_CF1 0x21 /* chip config, bit0 is chip enable */ | ||
47 | |||
48 | #define PC8736X_GPIO_SIZE 16 | ||
49 | |||
50 | #define SIO_UNIT_SEL 0x7 /* unit select reg */ | ||
51 | #define SIO_UNIT_ACT 0x30 /* unit enable */ | ||
52 | #define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */ | ||
53 | #define SIO_VLM_UNIT 0x0D | ||
54 | #define SIO_TMS_UNIT 0x0E | ||
55 | |||
56 | /* config-space addrs to read/write each unit's runtime addr */ | ||
57 | #define SIO_BASE_HADDR 0x60 | ||
58 | #define SIO_BASE_LADDR 0x61 | ||
59 | |||
60 | /* GPIO config-space pin-control addresses */ | ||
61 | #define SIO_GPIO_PIN_SELECT 0xF0 | ||
62 | #define SIO_GPIO_PIN_CONFIG 0xF1 | ||
63 | #define SIO_GPIO_PIN_EVENT 0xF2 | ||
64 | |||
65 | static unsigned char superio_cmd = 0; | ||
66 | static unsigned char selected_device = 0xFF; /* bogus start val */ | ||
67 | |||
68 | /* GPIO port runtime access, functionality */ | ||
69 | static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */ | ||
70 | /* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */ | ||
71 | |||
72 | #define PORT_OUT 0 | ||
73 | #define PORT_IN 1 | ||
74 | #define PORT_EVT_EN 2 | ||
75 | #define PORT_EVT_STST 3 | ||
76 | |||
77 | static struct platform_device *pdev; /* use in dev_*() */ | ||
78 | |||
79 | static inline void superio_outb(int addr, int val) | ||
80 | { | ||
81 | outb_p(addr, superio_cmd); | ||
82 | outb_p(val, superio_cmd + 1); | ||
83 | } | ||
84 | |||
85 | static inline int superio_inb(int addr) | ||
86 | { | ||
87 | outb_p(addr, superio_cmd); | ||
88 | return inb_p(superio_cmd + 1); | ||
89 | } | ||
90 | |||
91 | static int pc8736x_superio_present(void) | ||
92 | { | ||
93 | /* try the 2 possible values, read a hardware reg to verify */ | ||
94 | superio_cmd = SIO_BASE1; | ||
95 | if (superio_inb(SIO_SID) == SIO_SID_VALUE) | ||
96 | return superio_cmd; | ||
97 | |||
98 | superio_cmd = SIO_BASE2; | ||
99 | if (superio_inb(SIO_SID) == SIO_SID_VALUE) | ||
100 | return superio_cmd; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static void device_select(unsigned devldn) | ||
106 | { | ||
107 | superio_outb(SIO_UNIT_SEL, devldn); | ||
108 | selected_device = devldn; | ||
109 | } | ||
110 | |||
111 | static void select_pin(unsigned iminor) | ||
112 | { | ||
113 | /* select GPIO port/pin from device minor number */ | ||
114 | device_select(SIO_GPIO_UNIT); | ||
115 | superio_outb(SIO_GPIO_PIN_SELECT, | ||
116 | ((iminor << 1) & 0xF0) | (iminor & 0x7)); | ||
117 | } | ||
118 | |||
119 | static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits, | ||
120 | u32 func_slct) | ||
121 | { | ||
122 | u32 config, new_config; | ||
123 | |||
124 | mutex_lock(&pc8736x_gpio_config_lock); | ||
125 | |||
126 | device_select(SIO_GPIO_UNIT); | ||
127 | select_pin(index); | ||
128 | |||
129 | /* read current config value */ | ||
130 | config = superio_inb(func_slct); | ||
131 | |||
132 | /* set new config */ | ||
133 | new_config = (config & mask) | bits; | ||
134 | superio_outb(func_slct, new_config); | ||
135 | |||
136 | mutex_unlock(&pc8736x_gpio_config_lock); | ||
137 | |||
138 | return config; | ||
139 | } | ||
140 | |||
141 | static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits) | ||
142 | { | ||
143 | return pc8736x_gpio_configure_fn(index, mask, bits, | ||
144 | SIO_GPIO_PIN_CONFIG); | ||
145 | } | ||
146 | |||
147 | static int pc8736x_gpio_get(unsigned minor) | ||
148 | { | ||
149 | int port, bit, val; | ||
150 | |||
151 | port = minor >> 3; | ||
152 | bit = minor & 7; | ||
153 | val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); | ||
154 | val >>= bit; | ||
155 | val &= 1; | ||
156 | |||
157 | dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n", | ||
158 | minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit, | ||
159 | val); | ||
160 | |||
161 | return val; | ||
162 | } | ||
163 | |||
164 | static void pc8736x_gpio_set(unsigned minor, int val) | ||
165 | { | ||
166 | int port, bit, curval; | ||
167 | |||
168 | minor &= 0x1f; | ||
169 | port = minor >> 3; | ||
170 | bit = minor & 7; | ||
171 | curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); | ||
172 | |||
173 | dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n", | ||
174 | pc8736x_gpio_base + port_offset[port] + PORT_OUT, | ||
175 | curval, bit, (curval & ~(1 << bit)), val, (val << bit)); | ||
176 | |||
177 | val = (curval & ~(1 << bit)) | (val << bit); | ||
178 | |||
179 | dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)" | ||
180 | " %2x -> %2x\n", minor, port, bit, curval, val); | ||
181 | |||
182 | outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT); | ||
183 | |||
184 | curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); | ||
185 | val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); | ||
186 | |||
187 | dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val); | ||
188 | pc8736x_gpio_shadow[port] = val; | ||
189 | } | ||
190 | |||
191 | static void pc8736x_gpio_set_high(unsigned index) | ||
192 | { | ||
193 | pc8736x_gpio_set(index, 1); | ||
194 | } | ||
195 | |||
196 | static void pc8736x_gpio_set_low(unsigned index) | ||
197 | { | ||
198 | pc8736x_gpio_set(index, 0); | ||
199 | } | ||
200 | |||
201 | static int pc8736x_gpio_current(unsigned minor) | ||
202 | { | ||
203 | int port, bit; | ||
204 | minor &= 0x1f; | ||
205 | port = minor >> 3; | ||
206 | bit = minor & 7; | ||
207 | return ((pc8736x_gpio_shadow[port] >> bit) & 0x01); | ||
208 | } | ||
209 | |||
210 | static void pc8736x_gpio_change(unsigned index) | ||
211 | { | ||
212 | pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); | ||
213 | } | ||
214 | |||
215 | static struct nsc_gpio_ops pc8736x_access = { | ||
216 | .owner = THIS_MODULE, | ||
217 | .gpio_config = pc8736x_gpio_configure, | ||
218 | .gpio_dump = nsc_gpio_dump, | ||
219 | .gpio_get = pc8736x_gpio_get, | ||
220 | .gpio_set = pc8736x_gpio_set, | ||
221 | .gpio_set_high = pc8736x_gpio_set_high, | ||
222 | .gpio_set_low = pc8736x_gpio_set_low, | ||
223 | .gpio_change = pc8736x_gpio_change, | ||
224 | .gpio_current = pc8736x_gpio_current | ||
225 | }; | ||
226 | |||
227 | static int pc8736x_gpio_open(struct inode *inode, struct file *file) | ||
228 | { | ||
229 | unsigned m = iminor(inode); | ||
230 | file->private_data = &pc8736x_access; | ||
231 | |||
232 | dev_dbg(&pdev->dev, "open %d\n", m); | ||
233 | |||
234 | if (m > 63) | ||
235 | return -EINVAL; | ||
236 | return nonseekable_open(inode, file); | ||
237 | } | ||
238 | |||
239 | static struct file_operations pc8736x_gpio_fops = { | ||
240 | .owner = THIS_MODULE, | ||
241 | .open = pc8736x_gpio_open, | ||
242 | .write = nsc_gpio_write, | ||
243 | .read = nsc_gpio_read, | ||
244 | }; | ||
245 | |||
246 | static void __init pc8736x_init_shadow(void) | ||
247 | { | ||
248 | int port; | ||
249 | |||
250 | /* read the current values driven on the GPIO signals */ | ||
251 | for (port = 0; port < 4; ++port) | ||
252 | pc8736x_gpio_shadow[port] | ||
253 | = inb_p(pc8736x_gpio_base + port_offset[port] | ||
254 | + PORT_OUT); | ||
255 | |||
256 | } | ||
257 | |||
258 | static int __init pc8736x_gpio_init(void) | ||
259 | { | ||
260 | int rc = 0; | ||
261 | |||
262 | pdev = platform_device_alloc(DEVNAME, 0); | ||
263 | if (!pdev) | ||
264 | return -ENOMEM; | ||
265 | |||
266 | rc = platform_device_add(pdev); | ||
267 | if (rc) { | ||
268 | rc = -ENODEV; | ||
269 | goto undo_platform_dev_alloc; | ||
270 | } | ||
271 | dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n"); | ||
272 | |||
273 | if (!pc8736x_superio_present()) { | ||
274 | rc = -ENODEV; | ||
275 | dev_err(&pdev->dev, "no device found\n"); | ||
276 | goto undo_platform_dev_add; | ||
277 | } | ||
278 | pc8736x_access.dev = &pdev->dev; | ||
279 | |||
280 | /* Verify that chip and it's GPIO unit are both enabled. | ||
281 | My BIOS does this, so I take minimum action here | ||
282 | */ | ||
283 | rc = superio_inb(SIO_CF1); | ||
284 | if (!(rc & 0x01)) { | ||
285 | rc = -ENODEV; | ||
286 | dev_err(&pdev->dev, "device not enabled\n"); | ||
287 | goto undo_platform_dev_add; | ||
288 | } | ||
289 | device_select(SIO_GPIO_UNIT); | ||
290 | if (!superio_inb(SIO_UNIT_ACT)) { | ||
291 | rc = -ENODEV; | ||
292 | dev_err(&pdev->dev, "GPIO unit not enabled\n"); | ||
293 | goto undo_platform_dev_add; | ||
294 | } | ||
295 | |||
296 | /* read the GPIO unit base addr that chip responds to */ | ||
297 | pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8 | ||
298 | | superio_inb(SIO_BASE_LADDR)); | ||
299 | |||
300 | if (!request_region(pc8736x_gpio_base, 16, DEVNAME)) { | ||
301 | rc = -ENODEV; | ||
302 | dev_err(&pdev->dev, "GPIO ioport %x busy\n", | ||
303 | pc8736x_gpio_base); | ||
304 | goto undo_platform_dev_add; | ||
305 | } | ||
306 | dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base); | ||
307 | |||
308 | rc = register_chrdev(major, DEVNAME, &pc8736x_gpio_fops); | ||
309 | if (rc < 0) { | ||
310 | dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc); | ||
311 | goto undo_platform_dev_add; | ||
312 | } | ||
313 | if (!major) { | ||
314 | major = rc; | ||
315 | dev_dbg(&pdev->dev, "got dynamic major %d\n", major); | ||
316 | } | ||
317 | |||
318 | pc8736x_init_shadow(); | ||
319 | return 0; | ||
320 | |||
321 | undo_platform_dev_add: | ||
322 | platform_device_put(pdev); | ||
323 | undo_platform_dev_alloc: | ||
324 | kfree(pdev); | ||
325 | return rc; | ||
326 | } | ||
327 | |||
328 | static void __exit pc8736x_gpio_cleanup(void) | ||
329 | { | ||
330 | dev_dbg(&pdev->dev, " cleanup\n"); | ||
331 | |||
332 | release_region(pc8736x_gpio_base, 16); | ||
333 | |||
334 | unregister_chrdev(major, DEVNAME); | ||
335 | } | ||
336 | |||
337 | EXPORT_SYMBOL(pc8736x_access); | ||
338 | |||
339 | module_init(pc8736x_gpio_init); | ||
340 | module_exit(pc8736x_gpio_cleanup); | ||
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 664a6e97eb1a..5a280a330401 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* linux/drivers/char/scx200_gpio.c | 1 | /* linux/drivers/char/scx200_gpio.c |
2 | 2 | ||
3 | National Semiconductor SCx200 GPIO driver. Allows a user space | 3 | National Semiconductor SCx200 GPIO driver. Allows a user space |
4 | process to play with the GPIO pins. | 4 | process to play with the GPIO pins. |
@@ -6,17 +6,26 @@ | |||
6 | Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ | 6 | Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ |
7 | 7 | ||
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/device.h> | ||
9 | #include <linux/fs.h> | 10 | #include <linux/fs.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | ||
14 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
15 | #include <asm/io.h> | 17 | #include <asm/io.h> |
16 | 18 | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/cdev.h> | ||
21 | |||
17 | #include <linux/scx200_gpio.h> | 22 | #include <linux/scx200_gpio.h> |
23 | #include <linux/nsc_gpio.h> | ||
18 | 24 | ||
19 | #define NAME "scx200_gpio" | 25 | #define NAME "scx200_gpio" |
26 | #define DEVNAME NAME | ||
27 | |||
28 | static struct platform_device *pdev; | ||
20 | 29 | ||
21 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); | 30 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); |
22 | MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver"); | 31 | MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver"); |
@@ -26,70 +35,23 @@ static int major = 0; /* default to dynamic major */ | |||
26 | module_param(major, int, 0); | 35 | module_param(major, int, 0); |
27 | MODULE_PARM_DESC(major, "Major device number"); | 36 | MODULE_PARM_DESC(major, "Major device number"); |
28 | 37 | ||
29 | static ssize_t scx200_gpio_write(struct file *file, const char __user *data, | 38 | struct nsc_gpio_ops scx200_access = { |
30 | size_t len, loff_t *ppos) | 39 | .owner = THIS_MODULE, |
31 | { | 40 | .gpio_config = scx200_gpio_configure, |
32 | unsigned m = iminor(file->f_dentry->d_inode); | 41 | .gpio_dump = nsc_gpio_dump, |
33 | size_t i; | 42 | .gpio_get = scx200_gpio_get, |
34 | 43 | .gpio_set = scx200_gpio_set, | |
35 | for (i = 0; i < len; ++i) { | 44 | .gpio_set_high = scx200_gpio_set_high, |
36 | char c; | 45 | .gpio_set_low = scx200_gpio_set_low, |
37 | if (get_user(c, data+i)) | 46 | .gpio_change = scx200_gpio_change, |
38 | return -EFAULT; | 47 | .gpio_current = scx200_gpio_current |
39 | switch (c) | 48 | }; |
40 | { | ||
41 | case '0': | ||
42 | scx200_gpio_set(m, 0); | ||
43 | break; | ||
44 | case '1': | ||
45 | scx200_gpio_set(m, 1); | ||
46 | break; | ||
47 | case 'O': | ||
48 | printk(KERN_INFO NAME ": GPIO%d output enabled\n", m); | ||
49 | scx200_gpio_configure(m, ~1, 1); | ||
50 | break; | ||
51 | case 'o': | ||
52 | printk(KERN_INFO NAME ": GPIO%d output disabled\n", m); | ||
53 | scx200_gpio_configure(m, ~1, 0); | ||
54 | break; | ||
55 | case 'T': | ||
56 | printk(KERN_INFO NAME ": GPIO%d output is push pull\n", m); | ||
57 | scx200_gpio_configure(m, ~2, 2); | ||
58 | break; | ||
59 | case 't': | ||
60 | printk(KERN_INFO NAME ": GPIO%d output is open drain\n", m); | ||
61 | scx200_gpio_configure(m, ~2, 0); | ||
62 | break; | ||
63 | case 'P': | ||
64 | printk(KERN_INFO NAME ": GPIO%d pull up enabled\n", m); | ||
65 | scx200_gpio_configure(m, ~4, 4); | ||
66 | break; | ||
67 | case 'p': | ||
68 | printk(KERN_INFO NAME ": GPIO%d pull up disabled\n", m); | ||
69 | scx200_gpio_configure(m, ~4, 0); | ||
70 | break; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | return len; | ||
75 | } | ||
76 | |||
77 | static ssize_t scx200_gpio_read(struct file *file, char __user *buf, | ||
78 | size_t len, loff_t *ppos) | ||
79 | { | ||
80 | unsigned m = iminor(file->f_dentry->d_inode); | ||
81 | int value; | ||
82 | |||
83 | value = scx200_gpio_get(m); | ||
84 | if (put_user(value ? '1' : '0', buf)) | ||
85 | return -EFAULT; | ||
86 | |||
87 | return 1; | ||
88 | } | ||
89 | 49 | ||
90 | static int scx200_gpio_open(struct inode *inode, struct file *file) | 50 | static int scx200_gpio_open(struct inode *inode, struct file *file) |
91 | { | 51 | { |
92 | unsigned m = iminor(inode); | 52 | unsigned m = iminor(inode); |
53 | file->private_data = &scx200_access; | ||
54 | |||
93 | if (m > 63) | 55 | if (m > 63) |
94 | return -EINVAL; | 56 | return -EINVAL; |
95 | return nonseekable_open(inode, file); | 57 | return nonseekable_open(inode, file); |
@@ -103,47 +65,81 @@ static int scx200_gpio_release(struct inode *inode, struct file *file) | |||
103 | 65 | ||
104 | static struct file_operations scx200_gpio_fops = { | 66 | static struct file_operations scx200_gpio_fops = { |
105 | .owner = THIS_MODULE, | 67 | .owner = THIS_MODULE, |
106 | .write = scx200_gpio_write, | 68 | .write = nsc_gpio_write, |
107 | .read = scx200_gpio_read, | 69 | .read = nsc_gpio_read, |
108 | .open = scx200_gpio_open, | 70 | .open = scx200_gpio_open, |
109 | .release = scx200_gpio_release, | 71 | .release = scx200_gpio_release, |
110 | }; | 72 | }; |
111 | 73 | ||
74 | struct cdev *scx200_devices; | ||
75 | static int num_pins = 32; | ||
76 | |||
112 | static int __init scx200_gpio_init(void) | 77 | static int __init scx200_gpio_init(void) |
113 | { | 78 | { |
114 | int r; | 79 | int rc, i; |
115 | 80 | dev_t dev = MKDEV(major, 0); | |
116 | printk(KERN_DEBUG NAME ": NatSemi SCx200 GPIO Driver\n"); | ||
117 | 81 | ||
118 | if (!scx200_gpio_present()) { | 82 | if (!scx200_gpio_present()) { |
119 | printk(KERN_ERR NAME ": no SCx200 gpio pins available\n"); | 83 | printk(KERN_ERR NAME ": no SCx200 gpio present\n"); |
120 | return -ENODEV; | 84 | return -ENODEV; |
121 | } | 85 | } |
122 | 86 | ||
123 | r = register_chrdev(major, NAME, &scx200_gpio_fops); | 87 | /* support dev_dbg() with pdev->dev */ |
124 | if (r < 0) { | 88 | pdev = platform_device_alloc(DEVNAME, 0); |
125 | printk(KERN_ERR NAME ": unable to register character device\n"); | 89 | if (!pdev) |
126 | return r; | 90 | return -ENOMEM; |
91 | |||
92 | rc = platform_device_add(pdev); | ||
93 | if (rc) | ||
94 | goto undo_malloc; | ||
95 | |||
96 | /* nsc_gpio uses dev_dbg(), so needs this */ | ||
97 | scx200_access.dev = &pdev->dev; | ||
98 | |||
99 | if (major) | ||
100 | rc = register_chrdev_region(dev, num_pins, "scx200_gpio"); | ||
101 | else { | ||
102 | rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio"); | ||
103 | major = MAJOR(dev); | ||
127 | } | 104 | } |
128 | if (!major) { | 105 | if (rc < 0) { |
129 | major = r; | 106 | dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); |
130 | printk(KERN_DEBUG NAME ": got dynamic major %d\n", major); | 107 | goto undo_platform_device_add; |
108 | } | ||
109 | scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL); | ||
110 | if (!scx200_devices) { | ||
111 | rc = -ENOMEM; | ||
112 | goto undo_chrdev_region; | ||
113 | } | ||
114 | for (i = 0; i < num_pins; i++) { | ||
115 | struct cdev *cdev = &scx200_devices[i]; | ||
116 | cdev_init(cdev, &scx200_gpio_fops); | ||
117 | cdev->owner = THIS_MODULE; | ||
118 | rc = cdev_add(cdev, MKDEV(major, i), 1); | ||
119 | /* tolerate 'minor' errors */ | ||
120 | if (rc) | ||
121 | dev_err(&pdev->dev, "Error %d on minor %d", rc, i); | ||
131 | } | 122 | } |
132 | 123 | ||
133 | return 0; | 124 | return 0; /* succeed */ |
125 | |||
126 | undo_chrdev_region: | ||
127 | unregister_chrdev_region(dev, num_pins); | ||
128 | undo_platform_device_add: | ||
129 | platform_device_put(pdev); | ||
130 | undo_malloc: | ||
131 | kfree(pdev); | ||
132 | return rc; | ||
134 | } | 133 | } |
135 | 134 | ||
136 | static void __exit scx200_gpio_cleanup(void) | 135 | static void __exit scx200_gpio_cleanup(void) |
137 | { | 136 | { |
138 | unregister_chrdev(major, NAME); | 137 | kfree(scx200_devices); |
138 | unregister_chrdev_region(MKDEV(major, 0), num_pins); | ||
139 | platform_device_put(pdev); | ||
140 | platform_device_unregister(pdev); | ||
141 | /* kfree(pdev); */ | ||
139 | } | 142 | } |
140 | 143 | ||
141 | module_init(scx200_gpio_init); | 144 | module_init(scx200_gpio_init); |
142 | module_exit(scx200_gpio_cleanup); | 145 | module_exit(scx200_gpio_cleanup); |
143 | |||
144 | /* | ||
145 | Local variables: | ||
146 | compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules" | ||
147 | c-basic-offset: 8 | ||
148 | End: | ||
149 | */ | ||
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 1b5330299e30..d2d6b01dcd05 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2477,7 +2477,7 @@ static int __init specialix_init(void) | |||
2477 | #endif | 2477 | #endif |
2478 | 2478 | ||
2479 | for (i = 0; i < SX_NBOARD; i++) | 2479 | for (i = 0; i < SX_NBOARD; i++) |
2480 | sx_board[i].lock = SPIN_LOCK_UNLOCKED; | 2480 | spin_lock_init(&sx_board[i].lock); |
2481 | 2481 | ||
2482 | if (sx_init_drivers()) { | 2482 | if (sx_init_drivers()) { |
2483 | func_exit(); | 2483 | func_exit(); |
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index a9c5a7230f89..bf361a5ba70d 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
@@ -141,15 +141,6 @@ static char *stl_drvversion = "5.6.0"; | |||
141 | static struct tty_driver *stl_serial; | 141 | static struct tty_driver *stl_serial; |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * We will need to allocate a temporary write buffer for chars that | ||
145 | * come direct from user space. The problem is that a copy from user | ||
146 | * space might cause a page fault (typically on a system that is | ||
147 | * swapping!). All ports will share one buffer - since if the system | ||
148 | * is already swapping a shared buffer won't make things any worse. | ||
149 | */ | ||
150 | static char *stl_tmpwritebuf; | ||
151 | |||
152 | /* | ||
153 | * Define a local default termios struct. All ports will be created | 144 | * Define a local default termios struct. All ports will be created |
154 | * with this termios initially. Basically all it defines is a raw port | 145 | * with this termios initially. Basically all it defines is a raw port |
155 | * at 9600, 8 data bits, 1 stop bit. | 146 | * at 9600, 8 data bits, 1 stop bit. |
@@ -363,6 +354,14 @@ static unsigned char stl_vecmap[] = { | |||
363 | }; | 354 | }; |
364 | 355 | ||
365 | /* | 356 | /* |
357 | * Lock ordering is that you may not take stallion_lock holding | ||
358 | * brd_lock. | ||
359 | */ | ||
360 | |||
361 | static spinlock_t brd_lock; /* Guard the board mapping */ | ||
362 | static spinlock_t stallion_lock; /* Guard the tty driver */ | ||
363 | |||
364 | /* | ||
366 | * Set up enable and disable macros for the ECH boards. They require | 365 | * Set up enable and disable macros for the ECH boards. They require |
367 | * the secondary io address space to be activated and deactivated. | 366 | * the secondary io address space to be activated and deactivated. |
368 | * This way all ECH boards can share their secondary io region. | 367 | * This way all ECH boards can share their secondary io region. |
@@ -725,17 +724,7 @@ static struct class *stallion_class; | |||
725 | 724 | ||
726 | static int __init stallion_module_init(void) | 725 | static int __init stallion_module_init(void) |
727 | { | 726 | { |
728 | unsigned long flags; | ||
729 | |||
730 | #ifdef DEBUG | ||
731 | printk("init_module()\n"); | ||
732 | #endif | ||
733 | |||
734 | save_flags(flags); | ||
735 | cli(); | ||
736 | stl_init(); | 727 | stl_init(); |
737 | restore_flags(flags); | ||
738 | |||
739 | return 0; | 728 | return 0; |
740 | } | 729 | } |
741 | 730 | ||
@@ -746,7 +735,6 @@ static void __exit stallion_module_exit(void) | |||
746 | stlbrd_t *brdp; | 735 | stlbrd_t *brdp; |
747 | stlpanel_t *panelp; | 736 | stlpanel_t *panelp; |
748 | stlport_t *portp; | 737 | stlport_t *portp; |
749 | unsigned long flags; | ||
750 | int i, j, k; | 738 | int i, j, k; |
751 | 739 | ||
752 | #ifdef DEBUG | 740 | #ifdef DEBUG |
@@ -756,9 +744,6 @@ static void __exit stallion_module_exit(void) | |||
756 | printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle, | 744 | printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle, |
757 | stl_drvversion); | 745 | stl_drvversion); |
758 | 746 | ||
759 | save_flags(flags); | ||
760 | cli(); | ||
761 | |||
762 | /* | 747 | /* |
763 | * Free up all allocated resources used by the ports. This includes | 748 | * Free up all allocated resources used by the ports. This includes |
764 | * memory and interrupts. As part of this process we will also do | 749 | * memory and interrupts. As part of this process we will also do |
@@ -770,7 +755,6 @@ static void __exit stallion_module_exit(void) | |||
770 | if (i) { | 755 | if (i) { |
771 | printk("STALLION: failed to un-register tty driver, " | 756 | printk("STALLION: failed to un-register tty driver, " |
772 | "errno=%d\n", -i); | 757 | "errno=%d\n", -i); |
773 | restore_flags(flags); | ||
774 | return; | 758 | return; |
775 | } | 759 | } |
776 | for (i = 0; i < 4; i++) { | 760 | for (i = 0; i < 4; i++) { |
@@ -783,8 +767,6 @@ static void __exit stallion_module_exit(void) | |||
783 | "errno=%d\n", -i); | 767 | "errno=%d\n", -i); |
784 | class_destroy(stallion_class); | 768 | class_destroy(stallion_class); |
785 | 769 | ||
786 | kfree(stl_tmpwritebuf); | ||
787 | |||
788 | for (i = 0; (i < stl_nrbrds); i++) { | 770 | for (i = 0; (i < stl_nrbrds); i++) { |
789 | if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL) | 771 | if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL) |
790 | continue; | 772 | continue; |
@@ -814,8 +796,6 @@ static void __exit stallion_module_exit(void) | |||
814 | kfree(brdp); | 796 | kfree(brdp); |
815 | stl_brds[i] = (stlbrd_t *) NULL; | 797 | stl_brds[i] = (stlbrd_t *) NULL; |
816 | } | 798 | } |
817 | |||
818 | restore_flags(flags); | ||
819 | } | 799 | } |
820 | 800 | ||
821 | module_init(stallion_module_init); | 801 | module_init(stallion_module_init); |
@@ -948,7 +928,7 @@ static stlbrd_t *stl_allocbrd(void) | |||
948 | 928 | ||
949 | brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL); | 929 | brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL); |
950 | if (!brdp) { | 930 | if (!brdp) { |
951 | printk("STALLION: failed to allocate memory (size=%d)\n", | 931 | printk("STALLION: failed to allocate memory (size=%Zd)\n", |
952 | sizeof(stlbrd_t)); | 932 | sizeof(stlbrd_t)); |
953 | return NULL; | 933 | return NULL; |
954 | } | 934 | } |
@@ -1066,16 +1046,17 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp) | |||
1066 | rc = 0; | 1046 | rc = 0; |
1067 | doclocal = 0; | 1047 | doclocal = 0; |
1068 | 1048 | ||
1049 | spin_lock_irqsave(&stallion_lock, flags); | ||
1050 | |||
1069 | if (portp->tty->termios->c_cflag & CLOCAL) | 1051 | if (portp->tty->termios->c_cflag & CLOCAL) |
1070 | doclocal++; | 1052 | doclocal++; |
1071 | 1053 | ||
1072 | save_flags(flags); | ||
1073 | cli(); | ||
1074 | portp->openwaitcnt++; | 1054 | portp->openwaitcnt++; |
1075 | if (! tty_hung_up_p(filp)) | 1055 | if (! tty_hung_up_p(filp)) |
1076 | portp->refcount--; | 1056 | portp->refcount--; |
1077 | 1057 | ||
1078 | for (;;) { | 1058 | for (;;) { |
1059 | /* Takes brd_lock internally */ | ||
1079 | stl_setsignals(portp, 1, 1); | 1060 | stl_setsignals(portp, 1, 1); |
1080 | if (tty_hung_up_p(filp) || | 1061 | if (tty_hung_up_p(filp) || |
1081 | ((portp->flags & ASYNC_INITIALIZED) == 0)) { | 1062 | ((portp->flags & ASYNC_INITIALIZED) == 0)) { |
@@ -1093,13 +1074,14 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp) | |||
1093 | rc = -ERESTARTSYS; | 1074 | rc = -ERESTARTSYS; |
1094 | break; | 1075 | break; |
1095 | } | 1076 | } |
1077 | /* FIXME */ | ||
1096 | interruptible_sleep_on(&portp->open_wait); | 1078 | interruptible_sleep_on(&portp->open_wait); |
1097 | } | 1079 | } |
1098 | 1080 | ||
1099 | if (! tty_hung_up_p(filp)) | 1081 | if (! tty_hung_up_p(filp)) |
1100 | portp->refcount++; | 1082 | portp->refcount++; |
1101 | portp->openwaitcnt--; | 1083 | portp->openwaitcnt--; |
1102 | restore_flags(flags); | 1084 | spin_unlock_irqrestore(&stallion_lock, flags); |
1103 | 1085 | ||
1104 | return rc; | 1086 | return rc; |
1105 | } | 1087 | } |
@@ -1119,16 +1101,15 @@ static void stl_close(struct tty_struct *tty, struct file *filp) | |||
1119 | if (portp == (stlport_t *) NULL) | 1101 | if (portp == (stlport_t *) NULL) |
1120 | return; | 1102 | return; |
1121 | 1103 | ||
1122 | save_flags(flags); | 1104 | spin_lock_irqsave(&stallion_lock, flags); |
1123 | cli(); | ||
1124 | if (tty_hung_up_p(filp)) { | 1105 | if (tty_hung_up_p(filp)) { |
1125 | restore_flags(flags); | 1106 | spin_unlock_irqrestore(&stallion_lock, flags); |
1126 | return; | 1107 | return; |
1127 | } | 1108 | } |
1128 | if ((tty->count == 1) && (portp->refcount != 1)) | 1109 | if ((tty->count == 1) && (portp->refcount != 1)) |
1129 | portp->refcount = 1; | 1110 | portp->refcount = 1; |
1130 | if (portp->refcount-- > 1) { | 1111 | if (portp->refcount-- > 1) { |
1131 | restore_flags(flags); | 1112 | spin_unlock_irqrestore(&stallion_lock, flags); |
1132 | return; | 1113 | return; |
1133 | } | 1114 | } |
1134 | 1115 | ||
@@ -1142,11 +1123,18 @@ static void stl_close(struct tty_struct *tty, struct file *filp) | |||
1142 | * (The sc26198 has no "end-of-data" interrupt only empty FIFO) | 1123 | * (The sc26198 has no "end-of-data" interrupt only empty FIFO) |
1143 | */ | 1124 | */ |
1144 | tty->closing = 1; | 1125 | tty->closing = 1; |
1126 | |||
1127 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
1128 | |||
1145 | if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) | 1129 | if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) |
1146 | tty_wait_until_sent(tty, portp->closing_wait); | 1130 | tty_wait_until_sent(tty, portp->closing_wait); |
1147 | stl_waituntilsent(tty, (HZ / 2)); | 1131 | stl_waituntilsent(tty, (HZ / 2)); |
1148 | 1132 | ||
1133 | |||
1134 | spin_lock_irqsave(&stallion_lock, flags); | ||
1149 | portp->flags &= ~ASYNC_INITIALIZED; | 1135 | portp->flags &= ~ASYNC_INITIALIZED; |
1136 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
1137 | |||
1150 | stl_disableintrs(portp); | 1138 | stl_disableintrs(portp); |
1151 | if (tty->termios->c_cflag & HUPCL) | 1139 | if (tty->termios->c_cflag & HUPCL) |
1152 | stl_setsignals(portp, 0, 0); | 1140 | stl_setsignals(portp, 0, 0); |
@@ -1173,7 +1161,6 @@ static void stl_close(struct tty_struct *tty, struct file *filp) | |||
1173 | 1161 | ||
1174 | portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | 1162 | portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); |
1175 | wake_up_interruptible(&portp->close_wait); | 1163 | wake_up_interruptible(&portp->close_wait); |
1176 | restore_flags(flags); | ||
1177 | } | 1164 | } |
1178 | 1165 | ||
1179 | /*****************************************************************************/ | 1166 | /*****************************************************************************/ |
@@ -1195,9 +1182,6 @@ static int stl_write(struct tty_struct *tty, const unsigned char *buf, int count | |||
1195 | (int) tty, (int) buf, count); | 1182 | (int) tty, (int) buf, count); |
1196 | #endif | 1183 | #endif |
1197 | 1184 | ||
1198 | if ((tty == (struct tty_struct *) NULL) || | ||
1199 | (stl_tmpwritebuf == (char *) NULL)) | ||
1200 | return 0; | ||
1201 | portp = tty->driver_data; | 1185 | portp = tty->driver_data; |
1202 | if (portp == (stlport_t *) NULL) | 1186 | if (portp == (stlport_t *) NULL) |
1203 | return 0; | 1187 | return 0; |
@@ -1302,11 +1286,6 @@ static void stl_flushchars(struct tty_struct *tty) | |||
1302 | if (portp->tx.buf == (char *) NULL) | 1286 | if (portp->tx.buf == (char *) NULL) |
1303 | return; | 1287 | return; |
1304 | 1288 | ||
1305 | #if 0 | ||
1306 | if (tty->stopped || tty->hw_stopped || | ||
1307 | (portp->tx.head == portp->tx.tail)) | ||
1308 | return; | ||
1309 | #endif | ||
1310 | stl_startrxtx(portp, -1, 1); | 1289 | stl_startrxtx(portp, -1, 1); |
1311 | } | 1290 | } |
1312 | 1291 | ||
@@ -1977,12 +1956,14 @@ static int stl_eiointr(stlbrd_t *brdp) | |||
1977 | unsigned int iobase; | 1956 | unsigned int iobase; |
1978 | int handled = 0; | 1957 | int handled = 0; |
1979 | 1958 | ||
1959 | spin_lock(&brd_lock); | ||
1980 | panelp = brdp->panels[0]; | 1960 | panelp = brdp->panels[0]; |
1981 | iobase = panelp->iobase; | 1961 | iobase = panelp->iobase; |
1982 | while (inb(brdp->iostatus) & EIO_INTRPEND) { | 1962 | while (inb(brdp->iostatus) & EIO_INTRPEND) { |
1983 | handled = 1; | 1963 | handled = 1; |
1984 | (* panelp->isr)(panelp, iobase); | 1964 | (* panelp->isr)(panelp, iobase); |
1985 | } | 1965 | } |
1966 | spin_unlock(&brd_lock); | ||
1986 | return handled; | 1967 | return handled; |
1987 | } | 1968 | } |
1988 | 1969 | ||
@@ -2168,7 +2149,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp) | |||
2168 | portp = kzalloc(sizeof(stlport_t), GFP_KERNEL); | 2149 | portp = kzalloc(sizeof(stlport_t), GFP_KERNEL); |
2169 | if (!portp) { | 2150 | if (!portp) { |
2170 | printk("STALLION: failed to allocate memory " | 2151 | printk("STALLION: failed to allocate memory " |
2171 | "(size=%d)\n", sizeof(stlport_t)); | 2152 | "(size=%Zd)\n", sizeof(stlport_t)); |
2172 | break; | 2153 | break; |
2173 | } | 2154 | } |
2174 | 2155 | ||
@@ -2304,7 +2285,7 @@ static inline int stl_initeio(stlbrd_t *brdp) | |||
2304 | panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); | 2285 | panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); |
2305 | if (!panelp) { | 2286 | if (!panelp) { |
2306 | printk(KERN_WARNING "STALLION: failed to allocate memory " | 2287 | printk(KERN_WARNING "STALLION: failed to allocate memory " |
2307 | "(size=%d)\n", sizeof(stlpanel_t)); | 2288 | "(size=%Zd)\n", sizeof(stlpanel_t)); |
2308 | return -ENOMEM; | 2289 | return -ENOMEM; |
2309 | } | 2290 | } |
2310 | 2291 | ||
@@ -2478,7 +2459,7 @@ static inline int stl_initech(stlbrd_t *brdp) | |||
2478 | panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); | 2459 | panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); |
2479 | if (!panelp) { | 2460 | if (!panelp) { |
2480 | printk("STALLION: failed to allocate memory " | 2461 | printk("STALLION: failed to allocate memory " |
2481 | "(size=%d)\n", sizeof(stlpanel_t)); | 2462 | "(size=%Zd)\n", sizeof(stlpanel_t)); |
2482 | break; | 2463 | break; |
2483 | } | 2464 | } |
2484 | panelp->magic = STL_PANELMAGIC; | 2465 | panelp->magic = STL_PANELMAGIC; |
@@ -2879,8 +2860,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp) | |||
2879 | portp->stats.lflags = 0; | 2860 | portp->stats.lflags = 0; |
2880 | portp->stats.rxbuffered = 0; | 2861 | portp->stats.rxbuffered = 0; |
2881 | 2862 | ||
2882 | save_flags(flags); | 2863 | spin_lock_irqsave(&stallion_lock, flags); |
2883 | cli(); | ||
2884 | if (portp->tty != (struct tty_struct *) NULL) { | 2864 | if (portp->tty != (struct tty_struct *) NULL) { |
2885 | if (portp->tty->driver_data == portp) { | 2865 | if (portp->tty->driver_data == portp) { |
2886 | portp->stats.ttystate = portp->tty->flags; | 2866 | portp->stats.ttystate = portp->tty->flags; |
@@ -2894,7 +2874,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp) | |||
2894 | } | 2874 | } |
2895 | } | 2875 | } |
2896 | } | 2876 | } |
2897 | restore_flags(flags); | 2877 | spin_unlock_irqrestore(&stallion_lock, flags); |
2898 | 2878 | ||
2899 | head = portp->tx.head; | 2879 | head = portp->tx.head; |
2900 | tail = portp->tx.tail; | 2880 | tail = portp->tx.tail; |
@@ -3056,14 +3036,6 @@ static int __init stl_init(void) | |||
3056 | return -1; | 3036 | return -1; |
3057 | 3037 | ||
3058 | /* | 3038 | /* |
3059 | * Allocate a temporary write buffer. | ||
3060 | */ | ||
3061 | stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL); | ||
3062 | if (!stl_tmpwritebuf) | ||
3063 | printk("STALLION: failed to allocate memory (size=%d)\n", | ||
3064 | STL_TXBUFSIZE); | ||
3065 | |||
3066 | /* | ||
3067 | * Set up a character driver for per board stuff. This is mainly used | 3039 | * Set up a character driver for per board stuff. This is mainly used |
3068 | * to do stats ioctls on the ports. | 3040 | * to do stats ioctls on the ports. |
3069 | */ | 3041 | */ |
@@ -3147,11 +3119,13 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp) | |||
3147 | unsigned int gfrcr; | 3119 | unsigned int gfrcr; |
3148 | int chipmask, i, j; | 3120 | int chipmask, i, j; |
3149 | int nrchips, uartaddr, ioaddr; | 3121 | int nrchips, uartaddr, ioaddr; |
3122 | unsigned long flags; | ||
3150 | 3123 | ||
3151 | #ifdef DEBUG | 3124 | #ifdef DEBUG |
3152 | printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp); | 3125 | printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp); |
3153 | #endif | 3126 | #endif |
3154 | 3127 | ||
3128 | spin_lock_irqsave(&brd_lock, flags); | ||
3155 | BRDENABLE(panelp->brdnr, panelp->pagenr); | 3129 | BRDENABLE(panelp->brdnr, panelp->pagenr); |
3156 | 3130 | ||
3157 | /* | 3131 | /* |
@@ -3189,6 +3163,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp) | |||
3189 | } | 3163 | } |
3190 | 3164 | ||
3191 | BRDDISABLE(panelp->brdnr); | 3165 | BRDDISABLE(panelp->brdnr); |
3166 | spin_unlock_irqrestore(&brd_lock, flags); | ||
3192 | return chipmask; | 3167 | return chipmask; |
3193 | } | 3168 | } |
3194 | 3169 | ||
@@ -3200,6 +3175,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp) | |||
3200 | 3175 | ||
3201 | static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp) | 3176 | static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp) |
3202 | { | 3177 | { |
3178 | unsigned long flags; | ||
3203 | #ifdef DEBUG | 3179 | #ifdef DEBUG |
3204 | printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n", | 3180 | printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n", |
3205 | (int) brdp, (int) panelp, (int) portp); | 3181 | (int) brdp, (int) panelp, (int) portp); |
@@ -3209,6 +3185,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po | |||
3209 | (portp == (stlport_t *) NULL)) | 3185 | (portp == (stlport_t *) NULL)) |
3210 | return; | 3186 | return; |
3211 | 3187 | ||
3188 | spin_lock_irqsave(&brd_lock, flags); | ||
3212 | portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) || | 3189 | portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) || |
3213 | (portp->portnr < 8)) ? 0 : EREG_BANKSIZE); | 3190 | (portp->portnr < 8)) ? 0 : EREG_BANKSIZE); |
3214 | portp->uartaddr = (portp->portnr & 0x04) << 5; | 3191 | portp->uartaddr = (portp->portnr & 0x04) << 5; |
@@ -3219,6 +3196,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po | |||
3219 | stl_cd1400setreg(portp, LIVR, (portp->portnr << 3)); | 3196 | stl_cd1400setreg(portp, LIVR, (portp->portnr << 3)); |
3220 | portp->hwid = stl_cd1400getreg(portp, GFRCR); | 3197 | portp->hwid = stl_cd1400getreg(portp, GFRCR); |
3221 | BRDDISABLE(portp->brdnr); | 3198 | BRDDISABLE(portp->brdnr); |
3199 | spin_unlock_irqrestore(&brd_lock, flags); | ||
3222 | } | 3200 | } |
3223 | 3201 | ||
3224 | /*****************************************************************************/ | 3202 | /*****************************************************************************/ |
@@ -3428,8 +3406,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp) | |||
3428 | tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); | 3406 | tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); |
3429 | #endif | 3407 | #endif |
3430 | 3408 | ||
3431 | save_flags(flags); | 3409 | spin_lock_irqsave(&brd_lock, flags); |
3432 | cli(); | ||
3433 | BRDENABLE(portp->brdnr, portp->pagenr); | 3410 | BRDENABLE(portp->brdnr, portp->pagenr); |
3434 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3)); | 3411 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3)); |
3435 | srer = stl_cd1400getreg(portp, SRER); | 3412 | srer = stl_cd1400getreg(portp, SRER); |
@@ -3466,7 +3443,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp) | |||
3466 | portp->sigs &= ~TIOCM_CD; | 3443 | portp->sigs &= ~TIOCM_CD; |
3467 | stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron)); | 3444 | stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron)); |
3468 | BRDDISABLE(portp->brdnr); | 3445 | BRDDISABLE(portp->brdnr); |
3469 | restore_flags(flags); | 3446 | spin_unlock_irqrestore(&brd_lock, flags); |
3470 | } | 3447 | } |
3471 | 3448 | ||
3472 | /*****************************************************************************/ | 3449 | /*****************************************************************************/ |
@@ -3492,8 +3469,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts) | |||
3492 | if (rts > 0) | 3469 | if (rts > 0) |
3493 | msvr2 = MSVR2_RTS; | 3470 | msvr2 = MSVR2_RTS; |
3494 | 3471 | ||
3495 | save_flags(flags); | 3472 | spin_lock_irqsave(&brd_lock, flags); |
3496 | cli(); | ||
3497 | BRDENABLE(portp->brdnr, portp->pagenr); | 3473 | BRDENABLE(portp->brdnr, portp->pagenr); |
3498 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3474 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3499 | if (rts >= 0) | 3475 | if (rts >= 0) |
@@ -3501,7 +3477,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts) | |||
3501 | if (dtr >= 0) | 3477 | if (dtr >= 0) |
3502 | stl_cd1400setreg(portp, MSVR1, msvr1); | 3478 | stl_cd1400setreg(portp, MSVR1, msvr1); |
3503 | BRDDISABLE(portp->brdnr); | 3479 | BRDDISABLE(portp->brdnr); |
3504 | restore_flags(flags); | 3480 | spin_unlock_irqrestore(&brd_lock, flags); |
3505 | } | 3481 | } |
3506 | 3482 | ||
3507 | /*****************************************************************************/ | 3483 | /*****************************************************************************/ |
@@ -3520,14 +3496,13 @@ static int stl_cd1400getsignals(stlport_t *portp) | |||
3520 | printk("stl_cd1400getsignals(portp=%x)\n", (int) portp); | 3496 | printk("stl_cd1400getsignals(portp=%x)\n", (int) portp); |
3521 | #endif | 3497 | #endif |
3522 | 3498 | ||
3523 | save_flags(flags); | 3499 | spin_lock_irqsave(&brd_lock, flags); |
3524 | cli(); | ||
3525 | BRDENABLE(portp->brdnr, portp->pagenr); | 3500 | BRDENABLE(portp->brdnr, portp->pagenr); |
3526 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3501 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3527 | msvr1 = stl_cd1400getreg(portp, MSVR1); | 3502 | msvr1 = stl_cd1400getreg(portp, MSVR1); |
3528 | msvr2 = stl_cd1400getreg(portp, MSVR2); | 3503 | msvr2 = stl_cd1400getreg(portp, MSVR2); |
3529 | BRDDISABLE(portp->brdnr); | 3504 | BRDDISABLE(portp->brdnr); |
3530 | restore_flags(flags); | 3505 | spin_unlock_irqrestore(&brd_lock, flags); |
3531 | 3506 | ||
3532 | sigs = 0; | 3507 | sigs = 0; |
3533 | sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0; | 3508 | sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0; |
@@ -3569,15 +3544,14 @@ static void stl_cd1400enablerxtx(stlport_t *portp, int rx, int tx) | |||
3569 | else if (rx > 0) | 3544 | else if (rx > 0) |
3570 | ccr |= CCR_RXENABLE; | 3545 | ccr |= CCR_RXENABLE; |
3571 | 3546 | ||
3572 | save_flags(flags); | 3547 | spin_lock_irqsave(&brd_lock, flags); |
3573 | cli(); | ||
3574 | BRDENABLE(portp->brdnr, portp->pagenr); | 3548 | BRDENABLE(portp->brdnr, portp->pagenr); |
3575 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3549 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3576 | stl_cd1400ccrwait(portp); | 3550 | stl_cd1400ccrwait(portp); |
3577 | stl_cd1400setreg(portp, CCR, ccr); | 3551 | stl_cd1400setreg(portp, CCR, ccr); |
3578 | stl_cd1400ccrwait(portp); | 3552 | stl_cd1400ccrwait(portp); |
3579 | BRDDISABLE(portp->brdnr); | 3553 | BRDDISABLE(portp->brdnr); |
3580 | restore_flags(flags); | 3554 | spin_unlock_irqrestore(&brd_lock, flags); |
3581 | } | 3555 | } |
3582 | 3556 | ||
3583 | /*****************************************************************************/ | 3557 | /*****************************************************************************/ |
@@ -3609,8 +3583,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx) | |||
3609 | else if (rx > 0) | 3583 | else if (rx > 0) |
3610 | sreron |= SRER_RXDATA; | 3584 | sreron |= SRER_RXDATA; |
3611 | 3585 | ||
3612 | save_flags(flags); | 3586 | spin_lock_irqsave(&brd_lock, flags); |
3613 | cli(); | ||
3614 | BRDENABLE(portp->brdnr, portp->pagenr); | 3587 | BRDENABLE(portp->brdnr, portp->pagenr); |
3615 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3588 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3616 | stl_cd1400setreg(portp, SRER, | 3589 | stl_cd1400setreg(portp, SRER, |
@@ -3618,7 +3591,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx) | |||
3618 | BRDDISABLE(portp->brdnr); | 3591 | BRDDISABLE(portp->brdnr); |
3619 | if (tx > 0) | 3592 | if (tx > 0) |
3620 | set_bit(ASYI_TXBUSY, &portp->istate); | 3593 | set_bit(ASYI_TXBUSY, &portp->istate); |
3621 | restore_flags(flags); | 3594 | spin_unlock_irqrestore(&brd_lock, flags); |
3622 | } | 3595 | } |
3623 | 3596 | ||
3624 | /*****************************************************************************/ | 3597 | /*****************************************************************************/ |
@@ -3634,13 +3607,12 @@ static void stl_cd1400disableintrs(stlport_t *portp) | |||
3634 | #ifdef DEBUG | 3607 | #ifdef DEBUG |
3635 | printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp); | 3608 | printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp); |
3636 | #endif | 3609 | #endif |
3637 | save_flags(flags); | 3610 | spin_lock_irqsave(&brd_lock, flags); |
3638 | cli(); | ||
3639 | BRDENABLE(portp->brdnr, portp->pagenr); | 3611 | BRDENABLE(portp->brdnr, portp->pagenr); |
3640 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3612 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3641 | stl_cd1400setreg(portp, SRER, 0); | 3613 | stl_cd1400setreg(portp, SRER, 0); |
3642 | BRDDISABLE(portp->brdnr); | 3614 | BRDDISABLE(portp->brdnr); |
3643 | restore_flags(flags); | 3615 | spin_unlock_irqrestore(&brd_lock, flags); |
3644 | } | 3616 | } |
3645 | 3617 | ||
3646 | /*****************************************************************************/ | 3618 | /*****************************************************************************/ |
@@ -3653,8 +3625,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len) | |||
3653 | printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len); | 3625 | printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len); |
3654 | #endif | 3626 | #endif |
3655 | 3627 | ||
3656 | save_flags(flags); | 3628 | spin_lock_irqsave(&brd_lock, flags); |
3657 | cli(); | ||
3658 | BRDENABLE(portp->brdnr, portp->pagenr); | 3629 | BRDENABLE(portp->brdnr, portp->pagenr); |
3659 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3630 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3660 | stl_cd1400setreg(portp, SRER, | 3631 | stl_cd1400setreg(portp, SRER, |
@@ -3664,7 +3635,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len) | |||
3664 | portp->brklen = len; | 3635 | portp->brklen = len; |
3665 | if (len == 1) | 3636 | if (len == 1) |
3666 | portp->stats.txbreaks++; | 3637 | portp->stats.txbreaks++; |
3667 | restore_flags(flags); | 3638 | spin_unlock_irqrestore(&brd_lock, flags); |
3668 | } | 3639 | } |
3669 | 3640 | ||
3670 | /*****************************************************************************/ | 3641 | /*****************************************************************************/ |
@@ -3688,8 +3659,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state) | |||
3688 | if (tty == (struct tty_struct *) NULL) | 3659 | if (tty == (struct tty_struct *) NULL) |
3689 | return; | 3660 | return; |
3690 | 3661 | ||
3691 | save_flags(flags); | 3662 | spin_lock_irqsave(&brd_lock, flags); |
3692 | cli(); | ||
3693 | BRDENABLE(portp->brdnr, portp->pagenr); | 3663 | BRDENABLE(portp->brdnr, portp->pagenr); |
3694 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3664 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3695 | 3665 | ||
@@ -3729,7 +3699,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state) | |||
3729 | } | 3699 | } |
3730 | 3700 | ||
3731 | BRDDISABLE(portp->brdnr); | 3701 | BRDDISABLE(portp->brdnr); |
3732 | restore_flags(flags); | 3702 | spin_unlock_irqrestore(&brd_lock, flags); |
3733 | } | 3703 | } |
3734 | 3704 | ||
3735 | /*****************************************************************************/ | 3705 | /*****************************************************************************/ |
@@ -3753,8 +3723,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state) | |||
3753 | if (tty == (struct tty_struct *) NULL) | 3723 | if (tty == (struct tty_struct *) NULL) |
3754 | return; | 3724 | return; |
3755 | 3725 | ||
3756 | save_flags(flags); | 3726 | spin_lock_irqsave(&brd_lock, flags); |
3757 | cli(); | ||
3758 | BRDENABLE(portp->brdnr, portp->pagenr); | 3727 | BRDENABLE(portp->brdnr, portp->pagenr); |
3759 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3728 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3760 | if (state) { | 3729 | if (state) { |
@@ -3769,7 +3738,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state) | |||
3769 | stl_cd1400ccrwait(portp); | 3738 | stl_cd1400ccrwait(portp); |
3770 | } | 3739 | } |
3771 | BRDDISABLE(portp->brdnr); | 3740 | BRDDISABLE(portp->brdnr); |
3772 | restore_flags(flags); | 3741 | spin_unlock_irqrestore(&brd_lock, flags); |
3773 | } | 3742 | } |
3774 | 3743 | ||
3775 | /*****************************************************************************/ | 3744 | /*****************************************************************************/ |
@@ -3785,8 +3754,7 @@ static void stl_cd1400flush(stlport_t *portp) | |||
3785 | if (portp == (stlport_t *) NULL) | 3754 | if (portp == (stlport_t *) NULL) |
3786 | return; | 3755 | return; |
3787 | 3756 | ||
3788 | save_flags(flags); | 3757 | spin_lock_irqsave(&brd_lock, flags); |
3789 | cli(); | ||
3790 | BRDENABLE(portp->brdnr, portp->pagenr); | 3758 | BRDENABLE(portp->brdnr, portp->pagenr); |
3791 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); | 3759 | stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); |
3792 | stl_cd1400ccrwait(portp); | 3760 | stl_cd1400ccrwait(portp); |
@@ -3794,7 +3762,7 @@ static void stl_cd1400flush(stlport_t *portp) | |||
3794 | stl_cd1400ccrwait(portp); | 3762 | stl_cd1400ccrwait(portp); |
3795 | portp->tx.tail = portp->tx.head; | 3763 | portp->tx.tail = portp->tx.head; |
3796 | BRDDISABLE(portp->brdnr); | 3764 | BRDDISABLE(portp->brdnr); |
3797 | restore_flags(flags); | 3765 | spin_unlock_irqrestore(&brd_lock, flags); |
3798 | } | 3766 | } |
3799 | 3767 | ||
3800 | /*****************************************************************************/ | 3768 | /*****************************************************************************/ |
@@ -3833,6 +3801,7 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase) | |||
3833 | (int) panelp, iobase); | 3801 | (int) panelp, iobase); |
3834 | #endif | 3802 | #endif |
3835 | 3803 | ||
3804 | spin_lock(&brd_lock); | ||
3836 | outb(SVRR, iobase); | 3805 | outb(SVRR, iobase); |
3837 | svrtype = inb(iobase + EREG_DATA); | 3806 | svrtype = inb(iobase + EREG_DATA); |
3838 | if (panelp->nrports > 4) { | 3807 | if (panelp->nrports > 4) { |
@@ -3846,6 +3815,8 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase) | |||
3846 | stl_cd1400txisr(panelp, iobase); | 3815 | stl_cd1400txisr(panelp, iobase); |
3847 | else if (svrtype & SVRR_MDM) | 3816 | else if (svrtype & SVRR_MDM) |
3848 | stl_cd1400mdmisr(panelp, iobase); | 3817 | stl_cd1400mdmisr(panelp, iobase); |
3818 | |||
3819 | spin_unlock(&brd_lock); | ||
3849 | } | 3820 | } |
3850 | 3821 | ||
3851 | /*****************************************************************************/ | 3822 | /*****************************************************************************/ |
@@ -4433,8 +4404,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp) | |||
4433 | tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); | 4404 | tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); |
4434 | #endif | 4405 | #endif |
4435 | 4406 | ||
4436 | save_flags(flags); | 4407 | spin_lock_irqsave(&brd_lock, flags); |
4437 | cli(); | ||
4438 | BRDENABLE(portp->brdnr, portp->pagenr); | 4408 | BRDENABLE(portp->brdnr, portp->pagenr); |
4439 | stl_sc26198setreg(portp, IMR, 0); | 4409 | stl_sc26198setreg(portp, IMR, 0); |
4440 | stl_sc26198updatereg(portp, MR0, mr0); | 4410 | stl_sc26198updatereg(portp, MR0, mr0); |
@@ -4461,7 +4431,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp) | |||
4461 | portp->imr = (portp->imr & ~imroff) | imron; | 4431 | portp->imr = (portp->imr & ~imroff) | imron; |
4462 | stl_sc26198setreg(portp, IMR, portp->imr); | 4432 | stl_sc26198setreg(portp, IMR, portp->imr); |
4463 | BRDDISABLE(portp->brdnr); | 4433 | BRDDISABLE(portp->brdnr); |
4464 | restore_flags(flags); | 4434 | spin_unlock_irqrestore(&brd_lock, flags); |
4465 | } | 4435 | } |
4466 | 4436 | ||
4467 | /*****************************************************************************/ | 4437 | /*****************************************************************************/ |
@@ -4491,13 +4461,12 @@ static void stl_sc26198setsignals(stlport_t *portp, int dtr, int rts) | |||
4491 | else if (rts > 0) | 4461 | else if (rts > 0) |
4492 | iopioron |= IPR_RTS; | 4462 | iopioron |= IPR_RTS; |
4493 | 4463 | ||
4494 | save_flags(flags); | 4464 | spin_lock_irqsave(&brd_lock, flags); |
4495 | cli(); | ||
4496 | BRDENABLE(portp->brdnr, portp->pagenr); | 4465 | BRDENABLE(portp->brdnr, portp->pagenr); |
4497 | stl_sc26198setreg(portp, IOPIOR, | 4466 | stl_sc26198setreg(portp, IOPIOR, |
4498 | ((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron)); | 4467 | ((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron)); |
4499 | BRDDISABLE(portp->brdnr); | 4468 | BRDDISABLE(portp->brdnr); |
4500 | restore_flags(flags); | 4469 | spin_unlock_irqrestore(&brd_lock, flags); |
4501 | } | 4470 | } |
4502 | 4471 | ||
4503 | /*****************************************************************************/ | 4472 | /*****************************************************************************/ |
@@ -4516,12 +4485,11 @@ static int stl_sc26198getsignals(stlport_t *portp) | |||
4516 | printk("stl_sc26198getsignals(portp=%x)\n", (int) portp); | 4485 | printk("stl_sc26198getsignals(portp=%x)\n", (int) portp); |
4517 | #endif | 4486 | #endif |
4518 | 4487 | ||
4519 | save_flags(flags); | 4488 | spin_lock_irqsave(&brd_lock, flags); |
4520 | cli(); | ||
4521 | BRDENABLE(portp->brdnr, portp->pagenr); | 4489 | BRDENABLE(portp->brdnr, portp->pagenr); |
4522 | ipr = stl_sc26198getreg(portp, IPR); | 4490 | ipr = stl_sc26198getreg(portp, IPR); |
4523 | BRDDISABLE(portp->brdnr); | 4491 | BRDDISABLE(portp->brdnr); |
4524 | restore_flags(flags); | 4492 | spin_unlock_irqrestore(&brd_lock, flags); |
4525 | 4493 | ||
4526 | sigs = 0; | 4494 | sigs = 0; |
4527 | sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD; | 4495 | sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD; |
@@ -4558,13 +4526,12 @@ static void stl_sc26198enablerxtx(stlport_t *portp, int rx, int tx) | |||
4558 | else if (rx > 0) | 4526 | else if (rx > 0) |
4559 | ccr |= CR_RXENABLE; | 4527 | ccr |= CR_RXENABLE; |
4560 | 4528 | ||
4561 | save_flags(flags); | 4529 | spin_lock_irqsave(&brd_lock, flags); |
4562 | cli(); | ||
4563 | BRDENABLE(portp->brdnr, portp->pagenr); | 4530 | BRDENABLE(portp->brdnr, portp->pagenr); |
4564 | stl_sc26198setreg(portp, SCCR, ccr); | 4531 | stl_sc26198setreg(portp, SCCR, ccr); |
4565 | BRDDISABLE(portp->brdnr); | 4532 | BRDDISABLE(portp->brdnr); |
4566 | portp->crenable = ccr; | 4533 | portp->crenable = ccr; |
4567 | restore_flags(flags); | 4534 | spin_unlock_irqrestore(&brd_lock, flags); |
4568 | } | 4535 | } |
4569 | 4536 | ||
4570 | /*****************************************************************************/ | 4537 | /*****************************************************************************/ |
@@ -4593,15 +4560,14 @@ static void stl_sc26198startrxtx(stlport_t *portp, int rx, int tx) | |||
4593 | else if (rx > 0) | 4560 | else if (rx > 0) |
4594 | imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG; | 4561 | imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG; |
4595 | 4562 | ||
4596 | save_flags(flags); | 4563 | spin_lock_irqsave(&brd_lock, flags); |
4597 | cli(); | ||
4598 | BRDENABLE(portp->brdnr, portp->pagenr); | 4564 | BRDENABLE(portp->brdnr, portp->pagenr); |
4599 | stl_sc26198setreg(portp, IMR, imr); | 4565 | stl_sc26198setreg(portp, IMR, imr); |
4600 | BRDDISABLE(portp->brdnr); | 4566 | BRDDISABLE(portp->brdnr); |
4601 | portp->imr = imr; | 4567 | portp->imr = imr; |
4602 | if (tx > 0) | 4568 | if (tx > 0) |
4603 | set_bit(ASYI_TXBUSY, &portp->istate); | 4569 | set_bit(ASYI_TXBUSY, &portp->istate); |
4604 | restore_flags(flags); | 4570 | spin_unlock_irqrestore(&brd_lock, flags); |
4605 | } | 4571 | } |
4606 | 4572 | ||
4607 | /*****************************************************************************/ | 4573 | /*****************************************************************************/ |
@@ -4618,13 +4584,12 @@ static void stl_sc26198disableintrs(stlport_t *portp) | |||
4618 | printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp); | 4584 | printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp); |
4619 | #endif | 4585 | #endif |
4620 | 4586 | ||
4621 | save_flags(flags); | 4587 | spin_lock_irqsave(&brd_lock, flags); |
4622 | cli(); | ||
4623 | BRDENABLE(portp->brdnr, portp->pagenr); | 4588 | BRDENABLE(portp->brdnr, portp->pagenr); |
4624 | portp->imr = 0; | 4589 | portp->imr = 0; |
4625 | stl_sc26198setreg(portp, IMR, 0); | 4590 | stl_sc26198setreg(portp, IMR, 0); |
4626 | BRDDISABLE(portp->brdnr); | 4591 | BRDDISABLE(portp->brdnr); |
4627 | restore_flags(flags); | 4592 | spin_unlock_irqrestore(&brd_lock, flags); |
4628 | } | 4593 | } |
4629 | 4594 | ||
4630 | /*****************************************************************************/ | 4595 | /*****************************************************************************/ |
@@ -4637,8 +4602,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len) | |||
4637 | printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len); | 4602 | printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len); |
4638 | #endif | 4603 | #endif |
4639 | 4604 | ||
4640 | save_flags(flags); | 4605 | spin_lock_irqsave(&brd_lock, flags); |
4641 | cli(); | ||
4642 | BRDENABLE(portp->brdnr, portp->pagenr); | 4606 | BRDENABLE(portp->brdnr, portp->pagenr); |
4643 | if (len == 1) { | 4607 | if (len == 1) { |
4644 | stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK); | 4608 | stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK); |
@@ -4647,7 +4611,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len) | |||
4647 | stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK); | 4611 | stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK); |
4648 | } | 4612 | } |
4649 | BRDDISABLE(portp->brdnr); | 4613 | BRDDISABLE(portp->brdnr); |
4650 | restore_flags(flags); | 4614 | spin_unlock_irqrestore(&brd_lock, flags); |
4651 | } | 4615 | } |
4652 | 4616 | ||
4653 | /*****************************************************************************/ | 4617 | /*****************************************************************************/ |
@@ -4672,8 +4636,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state) | |||
4672 | if (tty == (struct tty_struct *) NULL) | 4636 | if (tty == (struct tty_struct *) NULL) |
4673 | return; | 4637 | return; |
4674 | 4638 | ||
4675 | save_flags(flags); | 4639 | spin_lock_irqsave(&brd_lock, flags); |
4676 | cli(); | ||
4677 | BRDENABLE(portp->brdnr, portp->pagenr); | 4640 | BRDENABLE(portp->brdnr, portp->pagenr); |
4678 | 4641 | ||
4679 | if (state) { | 4642 | if (state) { |
@@ -4719,7 +4682,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state) | |||
4719 | } | 4682 | } |
4720 | 4683 | ||
4721 | BRDDISABLE(portp->brdnr); | 4684 | BRDDISABLE(portp->brdnr); |
4722 | restore_flags(flags); | 4685 | spin_unlock_irqrestore(&brd_lock, flags); |
4723 | } | 4686 | } |
4724 | 4687 | ||
4725 | /*****************************************************************************/ | 4688 | /*****************************************************************************/ |
@@ -4744,8 +4707,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state) | |||
4744 | if (tty == (struct tty_struct *) NULL) | 4707 | if (tty == (struct tty_struct *) NULL) |
4745 | return; | 4708 | return; |
4746 | 4709 | ||
4747 | save_flags(flags); | 4710 | spin_lock_irqsave(&brd_lock, flags); |
4748 | cli(); | ||
4749 | BRDENABLE(portp->brdnr, portp->pagenr); | 4711 | BRDENABLE(portp->brdnr, portp->pagenr); |
4750 | if (state) { | 4712 | if (state) { |
4751 | mr0 = stl_sc26198getreg(portp, MR0); | 4713 | mr0 = stl_sc26198getreg(portp, MR0); |
@@ -4765,7 +4727,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state) | |||
4765 | stl_sc26198setreg(portp, MR0, mr0); | 4727 | stl_sc26198setreg(portp, MR0, mr0); |
4766 | } | 4728 | } |
4767 | BRDDISABLE(portp->brdnr); | 4729 | BRDDISABLE(portp->brdnr); |
4768 | restore_flags(flags); | 4730 | spin_unlock_irqrestore(&brd_lock, flags); |
4769 | } | 4731 | } |
4770 | 4732 | ||
4771 | /*****************************************************************************/ | 4733 | /*****************************************************************************/ |
@@ -4781,14 +4743,13 @@ static void stl_sc26198flush(stlport_t *portp) | |||
4781 | if (portp == (stlport_t *) NULL) | 4743 | if (portp == (stlport_t *) NULL) |
4782 | return; | 4744 | return; |
4783 | 4745 | ||
4784 | save_flags(flags); | 4746 | spin_lock_irqsave(&brd_lock, flags); |
4785 | cli(); | ||
4786 | BRDENABLE(portp->brdnr, portp->pagenr); | 4747 | BRDENABLE(portp->brdnr, portp->pagenr); |
4787 | stl_sc26198setreg(portp, SCCR, CR_TXRESET); | 4748 | stl_sc26198setreg(portp, SCCR, CR_TXRESET); |
4788 | stl_sc26198setreg(portp, SCCR, portp->crenable); | 4749 | stl_sc26198setreg(portp, SCCR, portp->crenable); |
4789 | BRDDISABLE(portp->brdnr); | 4750 | BRDDISABLE(portp->brdnr); |
4790 | portp->tx.tail = portp->tx.head; | 4751 | portp->tx.tail = portp->tx.head; |
4791 | restore_flags(flags); | 4752 | spin_unlock_irqrestore(&brd_lock, flags); |
4792 | } | 4753 | } |
4793 | 4754 | ||
4794 | /*****************************************************************************/ | 4755 | /*****************************************************************************/ |
@@ -4815,12 +4776,11 @@ static int stl_sc26198datastate(stlport_t *portp) | |||
4815 | if (test_bit(ASYI_TXBUSY, &portp->istate)) | 4776 | if (test_bit(ASYI_TXBUSY, &portp->istate)) |
4816 | return 1; | 4777 | return 1; |
4817 | 4778 | ||
4818 | save_flags(flags); | 4779 | spin_lock_irqsave(&brd_lock, flags); |
4819 | cli(); | ||
4820 | BRDENABLE(portp->brdnr, portp->pagenr); | 4780 | BRDENABLE(portp->brdnr, portp->pagenr); |
4821 | sr = stl_sc26198getreg(portp, SR); | 4781 | sr = stl_sc26198getreg(portp, SR); |
4822 | BRDDISABLE(portp->brdnr); | 4782 | BRDDISABLE(portp->brdnr); |
4823 | restore_flags(flags); | 4783 | spin_unlock_irqrestore(&brd_lock, flags); |
4824 | 4784 | ||
4825 | return (sr & SR_TXEMPTY) ? 0 : 1; | 4785 | return (sr & SR_TXEMPTY) ? 0 : 1; |
4826 | } | 4786 | } |
@@ -4878,6 +4838,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase) | |||
4878 | stlport_t *portp; | 4838 | stlport_t *portp; |
4879 | unsigned int iack; | 4839 | unsigned int iack; |
4880 | 4840 | ||
4841 | spin_lock(&brd_lock); | ||
4842 | |||
4881 | /* | 4843 | /* |
4882 | * Work around bug in sc26198 chip... Cannot have A6 address | 4844 | * Work around bug in sc26198 chip... Cannot have A6 address |
4883 | * line of UART high, else iack will be returned as 0. | 4845 | * line of UART high, else iack will be returned as 0. |
@@ -4893,6 +4855,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase) | |||
4893 | stl_sc26198txisr(portp); | 4855 | stl_sc26198txisr(portp); |
4894 | else | 4856 | else |
4895 | stl_sc26198otherisr(portp, iack); | 4857 | stl_sc26198otherisr(portp, iack); |
4858 | |||
4859 | spin_unlock(&brd_lock); | ||
4896 | } | 4860 | } |
4897 | 4861 | ||
4898 | /*****************************************************************************/ | 4862 | /*****************************************************************************/ |
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index 3b4747230270..76b9107f7f81 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
@@ -2320,7 +2320,7 @@ static int sx_init_portstructs (int nboards, int nports) | |||
2320 | #ifdef NEW_WRITE_LOCKING | 2320 | #ifdef NEW_WRITE_LOCKING |
2321 | port->gs.port_write_mutex = MUTEX; | 2321 | port->gs.port_write_mutex = MUTEX; |
2322 | #endif | 2322 | #endif |
2323 | port->gs.driver_lock = SPIN_LOCK_UNLOCKED; | 2323 | spin_lock_init(&port->gs.driver_lock); |
2324 | /* | 2324 | /* |
2325 | * Initializing wait queue | 2325 | * Initializing wait queue |
2326 | */ | 2326 | */ |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 8b2a59969868..bd74e82d8a72 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -2621,10 +2621,9 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
2621 | tty->driver->break_ctl(tty, 0); | 2621 | tty->driver->break_ctl(tty, 0); |
2622 | return 0; | 2622 | return 0; |
2623 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | 2623 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
2624 | /* | 2624 | /* non-zero arg means wait for all output data |
2625 | * XXX is the above comment correct, or the | 2625 | * to be sent (performed above) but don't send break. |
2626 | * code below correct? Is this ioctl used at | 2626 | * This is used by the tcdrain() termios function. |
2627 | * all by anyone? | ||
2628 | */ | 2627 | */ |
2629 | if (!arg) | 2628 | if (!arg) |
2630 | return send_break(tty, 250); | 2629 | return send_break(tty, 250); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 44d1eca83a72..35e0b9ceecf7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1497,6 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1497 | } | 1497 | } |
1498 | EXPORT_SYMBOL(cpufreq_update_policy); | 1498 | EXPORT_SYMBOL(cpufreq_update_policy); |
1499 | 1499 | ||
1500 | #ifdef CONFIG_HOTPLUG_CPU | ||
1500 | static int cpufreq_cpu_callback(struct notifier_block *nfb, | 1501 | static int cpufreq_cpu_callback(struct notifier_block *nfb, |
1501 | unsigned long action, void *hcpu) | 1502 | unsigned long action, void *hcpu) |
1502 | { | 1503 | { |
@@ -1532,10 +1533,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1532 | return NOTIFY_OK; | 1533 | return NOTIFY_OK; |
1533 | } | 1534 | } |
1534 | 1535 | ||
1535 | static struct notifier_block cpufreq_cpu_notifier = | 1536 | static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = |
1536 | { | 1537 | { |
1537 | .notifier_call = cpufreq_cpu_callback, | 1538 | .notifier_call = cpufreq_cpu_callback, |
1538 | }; | 1539 | }; |
1540 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1539 | 1541 | ||
1540 | /********************************************************************* | 1542 | /********************************************************************* |
1541 | * REGISTER / UNREGISTER CPUFREQ DRIVER * | 1543 | * REGISTER / UNREGISTER CPUFREQ DRIVER * |
@@ -1596,7 +1598,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1596 | } | 1598 | } |
1597 | 1599 | ||
1598 | if (!ret) { | 1600 | if (!ret) { |
1599 | register_cpu_notifier(&cpufreq_cpu_notifier); | 1601 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1600 | dprintk("driver %s up and running\n", driver_data->name); | 1602 | dprintk("driver %s up and running\n", driver_data->name); |
1601 | cpufreq_debug_enable_ratelimit(); | 1603 | cpufreq_debug_enable_ratelimit(); |
1602 | } | 1604 | } |
@@ -1628,7 +1630,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1628 | dprintk("unregistering driver %s\n", driver->name); | 1630 | dprintk("unregistering driver %s\n", driver->name); |
1629 | 1631 | ||
1630 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | 1632 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); |
1631 | unregister_cpu_notifier(&cpufreq_cpu_notifier); | 1633 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); |
1632 | 1634 | ||
1633 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1635 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1634 | cpufreq_driver = NULL; | 1636 | cpufreq_driver = NULL; |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c576c0b3f452..145061b8472a 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -350,7 +350,7 @@ __init cpufreq_stats_init(void) | |||
350 | return ret; | 350 | return ret; |
351 | } | 351 | } |
352 | 352 | ||
353 | register_cpu_notifier(&cpufreq_stat_cpu_notifier); | 353 | register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); |
354 | lock_cpu_hotplug(); | 354 | lock_cpu_hotplug(); |
355 | for_each_online_cpu(cpu) { | 355 | for_each_online_cpu(cpu) { |
356 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, | 356 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, |
@@ -368,7 +368,7 @@ __exit cpufreq_stats_exit(void) | |||
368 | CPUFREQ_POLICY_NOTIFIER); | 368 | CPUFREQ_POLICY_NOTIFIER); |
369 | cpufreq_unregister_notifier(¬ifier_trans_block, | 369 | cpufreq_unregister_notifier(¬ifier_trans_block, |
370 | CPUFREQ_TRANSITION_NOTIFIER); | 370 | CPUFREQ_TRANSITION_NOTIFIER); |
371 | unregister_cpu_notifier(&cpufreq_stat_cpu_notifier); | 371 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); |
372 | lock_cpu_hotplug(); | 372 | lock_cpu_hotplug(); |
373 | for_each_online_cpu(cpu) { | 373 | for_each_online_cpu(cpu) { |
374 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, | 374 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 3e0d04d5a800..8b46ef7d9ff8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -488,7 +488,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
488 | dev_err(&dev->dev, "SMBus base address uninitialized, " | 488 | dev_err(&dev->dev, "SMBus base address uninitialized, " |
489 | "upgrade BIOS\n"); | 489 | "upgrade BIOS\n"); |
490 | err = -ENODEV; | 490 | err = -ENODEV; |
491 | goto exit_disable; | 491 | goto exit; |
492 | } | 492 | } |
493 | 493 | ||
494 | err = pci_request_region(dev, SMBBAR, i801_driver.name); | 494 | err = pci_request_region(dev, SMBBAR, i801_driver.name); |
@@ -496,7 +496,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
496 | dev_err(&dev->dev, "Failed to request SMBus region " | 496 | dev_err(&dev->dev, "Failed to request SMBus region " |
497 | "0x%lx-0x%lx\n", i801_smba, | 497 | "0x%lx-0x%lx\n", i801_smba, |
498 | pci_resource_end(dev, SMBBAR)); | 498 | pci_resource_end(dev, SMBBAR)); |
499 | goto exit_disable; | 499 | goto exit; |
500 | } | 500 | } |
501 | 501 | ||
502 | pci_read_config_byte(I801_dev, SMBHSTCFG, &temp); | 502 | pci_read_config_byte(I801_dev, SMBHSTCFG, &temp); |
@@ -520,11 +520,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
520 | err = i2c_add_adapter(&i801_adapter); | 520 | err = i2c_add_adapter(&i801_adapter); |
521 | if (err) { | 521 | if (err) { |
522 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); | 522 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); |
523 | goto exit_disable; | 523 | goto exit_release; |
524 | } | 524 | } |
525 | return 0; | ||
525 | 526 | ||
526 | exit_disable: | 527 | exit_release: |
527 | pci_disable_device(dev); | 528 | pci_release_region(dev, SMBBAR); |
528 | exit: | 529 | exit: |
529 | return err; | 530 | return err; |
530 | } | 531 | } |
@@ -533,7 +534,10 @@ static void __devexit i801_remove(struct pci_dev *dev) | |||
533 | { | 534 | { |
534 | i2c_del_adapter(&i801_adapter); | 535 | i2c_del_adapter(&i801_adapter); |
535 | pci_release_region(dev, SMBBAR); | 536 | pci_release_region(dev, SMBBAR); |
536 | pci_disable_device(dev); | 537 | /* |
538 | * do not call pci_disable_device(dev) since it can cause hard hangs on | ||
539 | * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010) | ||
540 | */ | ||
537 | } | 541 | } |
538 | 542 | ||
539 | static struct pci_driver i801_driver = { | 543 | static struct pci_driver i801_driver = { |
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index 6e9dbf4d8077..85007cb12c52 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c | |||
@@ -75,6 +75,7 @@ static struct amd_ide_chip { | |||
75 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, | 75 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, |
76 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, | 76 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, |
77 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 }, | 77 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 }, |
78 | { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, AMD_UDMA_133 }, | ||
78 | { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, | 79 | { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, |
79 | { 0 } | 80 | { 0 } |
80 | }; | 81 | }; |
@@ -490,7 +491,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = { | |||
490 | /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), | 491 | /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), |
491 | /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), | 492 | /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), |
492 | /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"), | 493 | /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"), |
493 | /* 18 */ DECLARE_AMD_DEV("AMD5536"), | 494 | /* 18 */ DECLARE_NV_DEV("NFORCE-MCP65"), |
495 | /* 19 */ DECLARE_AMD_DEV("AMD5536"), | ||
494 | }; | 496 | }; |
495 | 497 | ||
496 | static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) | 498 | static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) |
@@ -528,7 +530,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = { | |||
528 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, | 530 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, |
529 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, | 531 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, |
530 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, | 532 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, |
531 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 }, | 533 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 }, |
534 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19 }, | ||
532 | { 0, }, | 535 | { 0, }, |
533 | }; | 536 | }; |
534 | MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); | 537 | MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); |
diff --git a/drivers/input/input.c b/drivers/input/input.c index de2e7546b491..a90486f5e491 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -998,12 +998,13 @@ void input_unregister_device(struct input_dev *dev) | |||
998 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group); | 998 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group); |
999 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group); | 999 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group); |
1000 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group); | 1000 | sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group); |
1001 | class_device_unregister(&dev->cdev); | ||
1002 | 1001 | ||
1003 | mutex_lock(&dev->mutex); | 1002 | mutex_lock(&dev->mutex); |
1004 | dev->name = dev->phys = dev->uniq = NULL; | 1003 | dev->name = dev->phys = dev->uniq = NULL; |
1005 | mutex_unlock(&dev->mutex); | 1004 | mutex_unlock(&dev->mutex); |
1006 | 1005 | ||
1006 | class_device_unregister(&dev->cdev); | ||
1007 | |||
1007 | input_wakeup_procfs_readers(); | 1008 | input_wakeup_procfs_readers(); |
1008 | } | 1009 | } |
1009 | EXPORT_SYMBOL(input_unregister_device); | 1010 | EXPORT_SYMBOL(input_unregister_device); |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index acb7e2656780..2a56bf33a673 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -981,7 +981,7 @@ exit: | |||
981 | EXPORT_SYMBOL_GPL(gigaset_stop); | 981 | EXPORT_SYMBOL_GPL(gigaset_stop); |
982 | 982 | ||
983 | static LIST_HEAD(drivers); | 983 | static LIST_HEAD(drivers); |
984 | static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED; | 984 | static DEFINE_SPINLOCK(driver_lock); |
985 | 985 | ||
986 | struct cardstate *gigaset_get_cs_by_id(int id) | 986 | struct cardstate *gigaset_get_cs_by_id(int id) |
987 | { | 987 | { |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 2ac90242d263..433389daedb2 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
@@ -82,7 +82,7 @@ isdn_tty_try_read(modem_info * info, struct sk_buff *skb) | |||
82 | int l = skb->len; | 82 | int l = skb->len; |
83 | unsigned char *dp = skb->data; | 83 | unsigned char *dp = skb->data; |
84 | while (--l) { | 84 | while (--l) { |
85 | if (*skb->data == DLE) | 85 | if (*dp == DLE) |
86 | tty_insert_flip_char(tty, DLE, 0); | 86 | tty_insert_flip_char(tty, DLE, 0); |
87 | tty_insert_flip_char(tty, *dp++, 0); | 87 | tty_insert_flip_char(tty, *dp++, 0); |
88 | } | 88 | } |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index fe6541326c71..9b015f9af351 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/leds.h> | 18 | #include <linux/leds.h> |
19 | #include "leds.h" | 19 | #include "leds.h" |
20 | 20 | ||
21 | rwlock_t leds_list_lock = RW_LOCK_UNLOCKED; | 21 | DEFINE_RWLOCK(leds_list_lock); |
22 | LIST_HEAD(leds_list); | 22 | LIST_HEAD(leds_list); |
23 | 23 | ||
24 | EXPORT_SYMBOL_GPL(leds_list); | 24 | EXPORT_SYMBOL_GPL(leds_list); |
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 5e2cd8be1191..1b1ce6523960 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c | |||
@@ -26,7 +26,7 @@ | |||
26 | /* | 26 | /* |
27 | * Nests outside led_cdev->trigger_lock | 27 | * Nests outside led_cdev->trigger_lock |
28 | */ | 28 | */ |
29 | static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED; | 29 | static DEFINE_RWLOCK(triggers_list_lock); |
30 | static LIST_HEAD(trigger_list); | 30 | static LIST_HEAD(trigger_list); |
31 | 31 | ||
32 | ssize_t led_trigger_store(struct class_device *dev, const char *buf, | 32 | ssize_t led_trigger_store(struct class_device *dev, const char *buf, |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index e4290491fa9e..6d532f170ce5 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -445,6 +445,8 @@ endmenu # encoder / decoder chips | |||
445 | menu "V4L USB devices" | 445 | menu "V4L USB devices" |
446 | depends on USB && VIDEO_DEV | 446 | depends on USB && VIDEO_DEV |
447 | 447 | ||
448 | source "drivers/media/video/pvrusb2/Kconfig" | ||
449 | |||
448 | source "drivers/media/video/em28xx/Kconfig" | 450 | source "drivers/media/video/em28xx/Kconfig" |
449 | 451 | ||
450 | config USB_DSBR | 452 | config USB_DSBR |
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 6c401b46398a..353d61cfac1b 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile | |||
@@ -47,6 +47,7 @@ obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/ | |||
47 | obj-$(CONFIG_VIDEO_CX88) += cx88/ | 47 | obj-$(CONFIG_VIDEO_CX88) += cx88/ |
48 | obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ | 48 | obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ |
49 | obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o | 49 | obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o |
50 | obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/ | ||
50 | obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o | 51 | obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o |
51 | obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o | 52 | obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o |
52 | obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o | 53 | obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o |
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c index 01b22eab5725..65f00fc08fa9 100644 --- a/drivers/media/video/cx2341x.c +++ b/drivers/media/video/cx2341x.c | |||
@@ -601,7 +601,7 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params) | |||
601 | } | 601 | } |
602 | 602 | ||
603 | int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, | 603 | int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, |
604 | struct v4l2_ext_controls *ctrls, int cmd) | 604 | struct v4l2_ext_controls *ctrls, unsigned int cmd) |
605 | { | 605 | { |
606 | int err = 0; | 606 | int err = 0; |
607 | int i; | 607 | int i; |
@@ -847,22 +847,22 @@ invalid: | |||
847 | return "<invalid>"; | 847 | return "<invalid>"; |
848 | } | 848 | } |
849 | 849 | ||
850 | void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id) | 850 | void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix) |
851 | { | 851 | { |
852 | int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; | 852 | int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; |
853 | 853 | ||
854 | /* Stream */ | 854 | /* Stream */ |
855 | printk(KERN_INFO "cx2341x-%d: Stream: %s\n", | 855 | printk(KERN_INFO "%s: Stream: %s\n", |
856 | card_id, | 856 | prefix, |
857 | cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); | 857 | cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); |
858 | 858 | ||
859 | /* Video */ | 859 | /* Video */ |
860 | printk(KERN_INFO "cx2341x-%d: Video: %dx%d, %d fps\n", | 860 | printk(KERN_INFO "%s: Video: %dx%d, %d fps\n", |
861 | card_id, | 861 | prefix, |
862 | p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), | 862 | p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), |
863 | p->is_50hz ? 25 : 30); | 863 | p->is_50hz ? 25 : 30); |
864 | printk(KERN_INFO "cx2341x-%d: Video: %s, %s, %s, %d", | 864 | printk(KERN_INFO "%s: Video: %s, %s, %s, %d", |
865 | card_id, | 865 | prefix, |
866 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), | 866 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), |
867 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), | 867 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), |
868 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), | 868 | cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), |
@@ -871,19 +871,19 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id) | |||
871 | printk(", Peak %d", p->video_bitrate_peak); | 871 | printk(", Peak %d", p->video_bitrate_peak); |
872 | } | 872 | } |
873 | printk("\n"); | 873 | printk("\n"); |
874 | printk(KERN_INFO "cx2341x-%d: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n", | 874 | printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n", |
875 | card_id, | 875 | prefix, |
876 | p->video_gop_size, p->video_b_frames, | 876 | p->video_gop_size, p->video_b_frames, |
877 | p->video_gop_closure ? "" : "No ", | 877 | p->video_gop_closure ? "" : "No ", |
878 | p->video_pulldown ? "" : "No "); | 878 | p->video_pulldown ? "" : "No "); |
879 | if (p->video_temporal_decimation) { | 879 | if (p->video_temporal_decimation) { |
880 | printk(KERN_INFO "cx2341x-%d: Video: Temporal Decimation %d\n", | 880 | printk(KERN_INFO "%s: Video: Temporal Decimation %d\n", |
881 | card_id, p->video_temporal_decimation); | 881 | prefix, p->video_temporal_decimation); |
882 | } | 882 | } |
883 | 883 | ||
884 | /* Audio */ | 884 | /* Audio */ |
885 | printk(KERN_INFO "cx2341x-%d: Audio: %s, %s, %s, %s", | 885 | printk(KERN_INFO "%s: Audio: %s, %s, %s, %s", |
886 | card_id, | 886 | prefix, |
887 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), | 887 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), |
888 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), | 888 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), |
889 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE), | 889 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE), |
@@ -897,18 +897,18 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id) | |||
897 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); | 897 | cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); |
898 | 898 | ||
899 | /* Encoding filters */ | 899 | /* Encoding filters */ |
900 | printk(KERN_INFO "cx2341x-%d: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", | 900 | printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", |
901 | card_id, | 901 | prefix, |
902 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), | 902 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), |
903 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), | 903 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), |
904 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), | 904 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), |
905 | p->video_spatial_filter); | 905 | p->video_spatial_filter); |
906 | printk(KERN_INFO "cx2341x-%d: Temporal Filter: %s, %d\n", | 906 | printk(KERN_INFO "%s: Temporal Filter: %s, %d\n", |
907 | card_id, | 907 | prefix, |
908 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), | 908 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), |
909 | p->video_temporal_filter); | 909 | p->video_temporal_filter); |
910 | printk(KERN_INFO "cx2341x-%d: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", | 910 | printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", |
911 | card_id, | 911 | prefix, |
912 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), | 912 | cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), |
913 | p->video_luma_median_filter_bottom, | 913 | p->video_luma_median_filter_bottom, |
914 | p->video_luma_median_filter_top, | 914 | p->video_luma_median_filter_top, |
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index 78df66671ea2..4ff81582ec56 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c | |||
@@ -853,6 +853,19 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file, | |||
853 | fh->mpegq.field); | 853 | fh->mpegq.field); |
854 | return 0; | 854 | return 0; |
855 | } | 855 | } |
856 | case VIDIOC_LOG_STATUS: | ||
857 | { | ||
858 | char name[32 + 2]; | ||
859 | |||
860 | snprintf(name, sizeof(name), "%s/2", core->name); | ||
861 | printk("%s/2: ============ START LOG STATUS ============\n", | ||
862 | core->name); | ||
863 | cx88_call_i2c_clients(core, VIDIOC_LOG_STATUS, 0); | ||
864 | cx2341x_log_status(&dev->params, name); | ||
865 | printk("%s/2: ============= END LOG STATUS =============\n", | ||
866 | core->name); | ||
867 | return 0; | ||
868 | } | ||
856 | 869 | ||
857 | default: | 870 | default: |
858 | return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl); | 871 | return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl); |
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig new file mode 100644 index 000000000000..7e727fe14b32 --- /dev/null +++ b/drivers/media/video/pvrusb2/Kconfig | |||
@@ -0,0 +1,62 @@ | |||
1 | config VIDEO_PVRUSB2 | ||
2 | tristate "Hauppauge WinTV-PVR USB2 support" | ||
3 | depends on VIDEO_V4L2 && USB && I2C && EXPERIMENTAL | ||
4 | select FW_LOADER | ||
5 | select VIDEO_TUNER | ||
6 | select VIDEO_TVEEPROM | ||
7 | select VIDEO_CX2341X | ||
8 | select VIDEO_SAA711X | ||
9 | select VIDEO_MSP3400 | ||
10 | ---help--- | ||
11 | This is a video4linux driver for Conexant 23416 based | ||
12 | usb2 personal video recorder devices. | ||
13 | |||
14 | To compile this driver as a module, choose M here: the | ||
15 | module will be called pvrusb2 | ||
16 | |||
17 | config VIDEO_PVRUSB2_24XXX | ||
18 | bool "Hauppauge WinTV-PVR USB2 support for 24xxx model series" | ||
19 | depends on VIDEO_PVRUSB2 && EXPERIMENTAL | ||
20 | select VIDEO_CX25840 | ||
21 | select VIDEO_WM8775 | ||
22 | ---help--- | ||
23 | This option enables inclusion of additional logic to operate | ||
24 | newer WinTV-PVR USB2 devices whose model number is of the | ||
25 | form "24xxx" (leading prefix of "24" followed by 3 digits). | ||
26 | To see if you may need this option, examine the white | ||
27 | sticker on the underside of your device. Enabling this | ||
28 | option will not harm support for older devices, however it | ||
29 | is a separate option because of the experimental nature of | ||
30 | this new feature. | ||
31 | |||
32 | If you are in doubt, say N. | ||
33 | |||
34 | Note: This feature is _very_ experimental. You have been | ||
35 | warned. | ||
36 | |||
37 | config VIDEO_PVRUSB2_SYSFS | ||
38 | bool "pvrusb2 sysfs support (EXPERIMENTAL)" | ||
39 | default y | ||
40 | depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL | ||
41 | ---help--- | ||
42 | This option enables the operation of a sysfs based | ||
43 | interface for query and control of the pvrusb2 driver. | ||
44 | |||
45 | This is not generally needed for v4l applications, | ||
46 | although certain applications are optimized to take | ||
47 | advantage of this feature. | ||
48 | |||
49 | If you are in doubt, say Y. | ||
50 | |||
51 | Note: This feature is experimental and subject to change. | ||
52 | |||
53 | config VIDEO_PVRUSB2_DEBUGIFC | ||
54 | bool "pvrusb2 debug interface" | ||
55 | depends on VIDEO_PVRUSB2_SYSFS | ||
56 | ---help--- | ||
57 | This option enables the inclusion of a debug interface | ||
58 | in the pvrusb2 driver, hosted through sysfs. | ||
59 | |||
60 | You do not need to select this option unless you plan | ||
61 | on debugging the driver or performing a manual firmware | ||
62 | extraction. | ||
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile new file mode 100644 index 000000000000..fed603ad0a67 --- /dev/null +++ b/drivers/media/video/pvrusb2/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | obj-pvrusb2-sysfs-$(CONFIG_VIDEO_PVRUSB2_SYSFS) := pvrusb2-sysfs.o | ||
2 | obj-pvrusb2-debugifc-$(CONFIG_VIDEO_PVRUSB2_DEBUGIFC) := pvrusb2-debugifc.o | ||
3 | |||
4 | obj-pvrusb2-24xxx-$(CONFIG_VIDEO_PVRUSB2_24XXX) := \ | ||
5 | pvrusb2-cx2584x-v4l.o \ | ||
6 | pvrusb2-wm8775.o | ||
7 | |||
8 | pvrusb2-objs := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \ | ||
9 | pvrusb2-audio.o pvrusb2-i2c-chips-v4l2.o \ | ||
10 | pvrusb2-encoder.o pvrusb2-video-v4l.o \ | ||
11 | pvrusb2-eeprom.o pvrusb2-tuner.o pvrusb2-demod.o \ | ||
12 | pvrusb2-main.o pvrusb2-hdw.o pvrusb2-v4l2.o \ | ||
13 | pvrusb2-ctrl.o pvrusb2-std.o \ | ||
14 | pvrusb2-context.o pvrusb2-io.o pvrusb2-ioread.o \ | ||
15 | $(obj-pvrusb2-24xxx-y) \ | ||
16 | $(obj-pvrusb2-sysfs-y) $(obj-pvrusb2-debugifc-y) | ||
17 | |||
18 | obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c new file mode 100644 index 000000000000..313d2dcf9e4b --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include "pvrusb2-audio.h" | ||
24 | #include "pvrusb2-hdw-internal.h" | ||
25 | #include "pvrusb2-debug.h" | ||
26 | #include <linux/videodev2.h> | ||
27 | #include <media/msp3400.h> | ||
28 | #include <media/v4l2-common.h> | ||
29 | |||
30 | struct pvr2_msp3400_handler { | ||
31 | struct pvr2_hdw *hdw; | ||
32 | struct pvr2_i2c_client *client; | ||
33 | struct pvr2_i2c_handler i2c_handler; | ||
34 | struct pvr2_audio_stat astat; | ||
35 | unsigned long stale_mask; | ||
36 | }; | ||
37 | |||
38 | |||
39 | /* This function selects the correct audio input source */ | ||
40 | static void set_stereo(struct pvr2_msp3400_handler *ctxt) | ||
41 | { | ||
42 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
43 | struct v4l2_routing route; | ||
44 | |||
45 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo"); | ||
46 | |||
47 | if (hdw->input_val == PVR2_CVAL_INPUT_TV) { | ||
48 | struct v4l2_tuner vt; | ||
49 | memset(&vt,0,sizeof(vt)); | ||
50 | vt.audmode = hdw->audiomode_val; | ||
51 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_S_TUNER,&vt); | ||
52 | } | ||
53 | |||
54 | route.input = MSP_INPUT_DEFAULT; | ||
55 | route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1); | ||
56 | switch (hdw->input_val) { | ||
57 | case PVR2_CVAL_INPUT_TV: | ||
58 | break; | ||
59 | case PVR2_CVAL_INPUT_RADIO: | ||
60 | /* Assume that msp34xx also handle FM decoding, in which case | ||
61 | we're still using the tuner. */ | ||
62 | /* HV: actually it is more likely to be the SCART2 input if | ||
63 | the ivtv experience is any indication. */ | ||
64 | route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, | ||
65 | MSP_DSP_IN_SCART, MSP_DSP_IN_SCART); | ||
66 | break; | ||
67 | case PVR2_CVAL_INPUT_SVIDEO: | ||
68 | case PVR2_CVAL_INPUT_COMPOSITE: | ||
69 | /* SCART 1 input */ | ||
70 | route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, | ||
71 | MSP_DSP_IN_SCART, MSP_DSP_IN_SCART); | ||
72 | break; | ||
73 | } | ||
74 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route); | ||
75 | } | ||
76 | |||
77 | |||
78 | static int check_stereo(struct pvr2_msp3400_handler *ctxt) | ||
79 | { | ||
80 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
81 | return (hdw->input_dirty || | ||
82 | hdw->audiomode_dirty); | ||
83 | } | ||
84 | |||
85 | |||
86 | struct pvr2_msp3400_ops { | ||
87 | void (*update)(struct pvr2_msp3400_handler *); | ||
88 | int (*check)(struct pvr2_msp3400_handler *); | ||
89 | }; | ||
90 | |||
91 | |||
92 | static const struct pvr2_msp3400_ops msp3400_ops[] = { | ||
93 | { .update = set_stereo, .check = check_stereo}, | ||
94 | }; | ||
95 | |||
96 | |||
97 | static int msp3400_check(struct pvr2_msp3400_handler *ctxt) | ||
98 | { | ||
99 | unsigned long msk; | ||
100 | unsigned int idx; | ||
101 | |||
102 | for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]); | ||
103 | idx++) { | ||
104 | msk = 1 << idx; | ||
105 | if (ctxt->stale_mask & msk) continue; | ||
106 | if (msp3400_ops[idx].check(ctxt)) { | ||
107 | ctxt->stale_mask |= msk; | ||
108 | } | ||
109 | } | ||
110 | return ctxt->stale_mask != 0; | ||
111 | } | ||
112 | |||
113 | |||
114 | static void msp3400_update(struct pvr2_msp3400_handler *ctxt) | ||
115 | { | ||
116 | unsigned long msk; | ||
117 | unsigned int idx; | ||
118 | |||
119 | for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]); | ||
120 | idx++) { | ||
121 | msk = 1 << idx; | ||
122 | if (!(ctxt->stale_mask & msk)) continue; | ||
123 | ctxt->stale_mask &= ~msk; | ||
124 | msp3400_ops[idx].update(ctxt); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | |||
129 | /* This reads back the current signal type */ | ||
130 | static int get_audio_status(struct pvr2_msp3400_handler *ctxt) | ||
131 | { | ||
132 | struct v4l2_tuner vt; | ||
133 | int stat; | ||
134 | |||
135 | memset(&vt,0,sizeof(vt)); | ||
136 | stat = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt); | ||
137 | if (stat < 0) return stat; | ||
138 | |||
139 | ctxt->hdw->flag_stereo = (vt.audmode & V4L2_TUNER_MODE_STEREO) != 0; | ||
140 | ctxt->hdw->flag_bilingual = | ||
141 | (vt.audmode & V4L2_TUNER_MODE_LANG2) != 0; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | |||
146 | static void pvr2_msp3400_detach(struct pvr2_msp3400_handler *ctxt) | ||
147 | { | ||
148 | ctxt->client->handler = 0; | ||
149 | ctxt->hdw->audio_stat = 0; | ||
150 | kfree(ctxt); | ||
151 | } | ||
152 | |||
153 | |||
154 | static unsigned int pvr2_msp3400_describe(struct pvr2_msp3400_handler *ctxt, | ||
155 | char *buf,unsigned int cnt) | ||
156 | { | ||
157 | return scnprintf(buf,cnt,"handler: pvrusb2-audio v4l2"); | ||
158 | } | ||
159 | |||
160 | |||
161 | const static struct pvr2_i2c_handler_functions msp3400_funcs = { | ||
162 | .detach = (void (*)(void *))pvr2_msp3400_detach, | ||
163 | .check = (int (*)(void *))msp3400_check, | ||
164 | .update = (void (*)(void *))msp3400_update, | ||
165 | .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_msp3400_describe, | ||
166 | }; | ||
167 | |||
168 | |||
169 | int pvr2_i2c_msp3400_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp) | ||
170 | { | ||
171 | struct pvr2_msp3400_handler *ctxt; | ||
172 | if (hdw->audio_stat) return 0; | ||
173 | if (cp->handler) return 0; | ||
174 | |||
175 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
176 | if (!ctxt) return 0; | ||
177 | memset(ctxt,0,sizeof(*ctxt)); | ||
178 | |||
179 | ctxt->i2c_handler.func_data = ctxt; | ||
180 | ctxt->i2c_handler.func_table = &msp3400_funcs; | ||
181 | ctxt->client = cp; | ||
182 | ctxt->hdw = hdw; | ||
183 | ctxt->astat.ctxt = ctxt; | ||
184 | ctxt->astat.status = (int (*)(void *))get_audio_status; | ||
185 | ctxt->astat.detach = (void (*)(void *))pvr2_msp3400_detach; | ||
186 | ctxt->stale_mask = (1 << (sizeof(msp3400_ops)/ | ||
187 | sizeof(msp3400_ops[0]))) - 1; | ||
188 | cp->handler = &ctxt->i2c_handler; | ||
189 | hdw->audio_stat = &ctxt->astat; | ||
190 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x msp3400 V4L2 handler set up", | ||
191 | cp->client->addr); | ||
192 | return !0; | ||
193 | } | ||
194 | |||
195 | |||
196 | /* | ||
197 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
198 | *** Local Variables: *** | ||
199 | *** mode: c *** | ||
200 | *** fill-column: 70 *** | ||
201 | *** tab-width: 8 *** | ||
202 | *** c-basic-offset: 8 *** | ||
203 | *** End: *** | ||
204 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.h b/drivers/media/video/pvrusb2/pvrusb2-audio.h new file mode 100644 index 000000000000..536339b68843 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-audio.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_AUDIO_H | ||
24 | #define __PVRUSB2_AUDIO_H | ||
25 | |||
26 | #include "pvrusb2-i2c-core.h" | ||
27 | |||
28 | int pvr2_i2c_msp3400_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
29 | |||
30 | #endif /* __PVRUSB2_AUDIO_H */ | ||
31 | |||
32 | /* | ||
33 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
34 | *** Local Variables: *** | ||
35 | *** mode: c *** | ||
36 | *** fill-column: 70 *** | ||
37 | *** tab-width: 8 *** | ||
38 | *** c-basic-offset: 8 *** | ||
39 | *** End: *** | ||
40 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c new file mode 100644 index 000000000000..40dc59871a45 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-context.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * $Id$ | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include "pvrusb2-context.h" | ||
22 | #include "pvrusb2-io.h" | ||
23 | #include "pvrusb2-ioread.h" | ||
24 | #include "pvrusb2-hdw.h" | ||
25 | #include "pvrusb2-debug.h" | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <asm/semaphore.h> | ||
30 | |||
31 | |||
32 | static void pvr2_context_destroy(struct pvr2_context *mp) | ||
33 | { | ||
34 | if (mp->hdw) pvr2_hdw_destroy(mp->hdw); | ||
35 | pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr_main id=%p",mp); | ||
36 | flush_workqueue(mp->workqueue); | ||
37 | destroy_workqueue(mp->workqueue); | ||
38 | kfree(mp); | ||
39 | } | ||
40 | |||
41 | |||
42 | static void pvr2_context_trigger_poll(struct pvr2_context *mp) | ||
43 | { | ||
44 | queue_work(mp->workqueue,&mp->workpoll); | ||
45 | } | ||
46 | |||
47 | |||
48 | static void pvr2_context_poll(struct pvr2_context *mp) | ||
49 | { | ||
50 | pvr2_context_enter(mp); do { | ||
51 | pvr2_hdw_poll(mp->hdw); | ||
52 | } while (0); pvr2_context_exit(mp); | ||
53 | } | ||
54 | |||
55 | |||
56 | static void pvr2_context_setup(struct pvr2_context *mp) | ||
57 | { | ||
58 | pvr2_context_enter(mp); do { | ||
59 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; | ||
60 | pvr2_hdw_setup(mp->hdw); | ||
61 | pvr2_hdw_setup_poll_trigger( | ||
62 | mp->hdw, | ||
63 | (void (*)(void *))pvr2_context_trigger_poll, | ||
64 | mp); | ||
65 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; | ||
66 | if (!pvr2_hdw_init_ok(mp->hdw)) break; | ||
67 | mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw); | ||
68 | if (mp->setup_func) { | ||
69 | mp->setup_func(mp); | ||
70 | } | ||
71 | } while (0); pvr2_context_exit(mp); | ||
72 | } | ||
73 | |||
74 | |||
75 | struct pvr2_context *pvr2_context_create( | ||
76 | struct usb_interface *intf, | ||
77 | const struct usb_device_id *devid, | ||
78 | void (*setup_func)(struct pvr2_context *)) | ||
79 | { | ||
80 | struct pvr2_context *mp = 0; | ||
81 | mp = kmalloc(sizeof(*mp),GFP_KERNEL); | ||
82 | if (!mp) goto done; | ||
83 | memset(mp,0,sizeof(*mp)); | ||
84 | pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_main id=%p",mp); | ||
85 | mp->setup_func = setup_func; | ||
86 | mutex_init(&mp->mutex); | ||
87 | mp->hdw = pvr2_hdw_create(intf,devid); | ||
88 | if (!mp->hdw) { | ||
89 | pvr2_context_destroy(mp); | ||
90 | mp = 0; | ||
91 | goto done; | ||
92 | } | ||
93 | |||
94 | mp->workqueue = create_singlethread_workqueue("pvrusb2"); | ||
95 | INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); | ||
96 | INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); | ||
97 | queue_work(mp->workqueue,&mp->workinit); | ||
98 | done: | ||
99 | return mp; | ||
100 | } | ||
101 | |||
102 | |||
103 | void pvr2_context_enter(struct pvr2_context *mp) | ||
104 | { | ||
105 | mutex_lock(&mp->mutex); | ||
106 | pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_enter(id=%p)",mp); | ||
107 | } | ||
108 | |||
109 | |||
110 | void pvr2_context_exit(struct pvr2_context *mp) | ||
111 | { | ||
112 | int destroy_flag = 0; | ||
113 | if (!(mp->mc_first || !mp->disconnect_flag)) { | ||
114 | destroy_flag = !0; | ||
115 | } | ||
116 | pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_exit(id=%p) outside",mp); | ||
117 | mutex_unlock(&mp->mutex); | ||
118 | if (destroy_flag) pvr2_context_destroy(mp); | ||
119 | } | ||
120 | |||
121 | |||
122 | static void pvr2_context_run_checks(struct pvr2_context *mp) | ||
123 | { | ||
124 | struct pvr2_channel *ch1,*ch2; | ||
125 | for (ch1 = mp->mc_first; ch1; ch1 = ch2) { | ||
126 | ch2 = ch1->mc_next; | ||
127 | if (ch1->check_func) { | ||
128 | ch1->check_func(ch1); | ||
129 | } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | |||
134 | void pvr2_context_disconnect(struct pvr2_context *mp) | ||
135 | { | ||
136 | pvr2_context_enter(mp); do { | ||
137 | pvr2_hdw_disconnect(mp->hdw); | ||
138 | mp->disconnect_flag = !0; | ||
139 | pvr2_context_run_checks(mp); | ||
140 | } while (0); pvr2_context_exit(mp); | ||
141 | } | ||
142 | |||
143 | |||
144 | void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp) | ||
145 | { | ||
146 | cp->hdw = mp->hdw; | ||
147 | cp->mc_head = mp; | ||
148 | cp->mc_next = 0; | ||
149 | cp->mc_prev = mp->mc_last; | ||
150 | if (mp->mc_last) { | ||
151 | mp->mc_last->mc_next = cp; | ||
152 | } else { | ||
153 | mp->mc_first = cp; | ||
154 | } | ||
155 | mp->mc_last = cp; | ||
156 | } | ||
157 | |||
158 | |||
159 | static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp) | ||
160 | { | ||
161 | if (!cp->stream) return; | ||
162 | pvr2_stream_kill(cp->stream->stream); | ||
163 | cp->stream->user = 0; | ||
164 | cp->stream = 0; | ||
165 | } | ||
166 | |||
167 | |||
168 | void pvr2_channel_done(struct pvr2_channel *cp) | ||
169 | { | ||
170 | struct pvr2_context *mp = cp->mc_head; | ||
171 | pvr2_channel_disclaim_stream(cp); | ||
172 | if (cp->mc_next) { | ||
173 | cp->mc_next->mc_prev = cp->mc_prev; | ||
174 | } else { | ||
175 | mp->mc_last = cp->mc_prev; | ||
176 | } | ||
177 | if (cp->mc_prev) { | ||
178 | cp->mc_prev->mc_next = cp->mc_next; | ||
179 | } else { | ||
180 | mp->mc_first = cp->mc_next; | ||
181 | } | ||
182 | cp->hdw = 0; | ||
183 | } | ||
184 | |||
185 | |||
186 | int pvr2_channel_claim_stream(struct pvr2_channel *cp, | ||
187 | struct pvr2_context_stream *sp) | ||
188 | { | ||
189 | int code = 0; | ||
190 | pvr2_context_enter(cp->mc_head); do { | ||
191 | if (sp == cp->stream) break; | ||
192 | if (sp->user) { | ||
193 | code = -EBUSY; | ||
194 | break; | ||
195 | } | ||
196 | pvr2_channel_disclaim_stream(cp); | ||
197 | if (!sp) break; | ||
198 | sp->user = cp; | ||
199 | cp->stream = sp; | ||
200 | } while (0); pvr2_context_exit(cp->mc_head); | ||
201 | return code; | ||
202 | } | ||
203 | |||
204 | |||
205 | // This is the marker for the real beginning of a legitimate mpeg2 stream. | ||
206 | static char stream_sync_key[] = { | ||
207 | 0x00, 0x00, 0x01, 0xba, | ||
208 | }; | ||
209 | |||
210 | struct pvr2_ioread *pvr2_channel_create_mpeg_stream( | ||
211 | struct pvr2_context_stream *sp) | ||
212 | { | ||
213 | struct pvr2_ioread *cp; | ||
214 | cp = pvr2_ioread_create(); | ||
215 | if (!cp) return 0; | ||
216 | pvr2_ioread_setup(cp,sp->stream); | ||
217 | pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key)); | ||
218 | return cp; | ||
219 | } | ||
220 | |||
221 | |||
222 | /* | ||
223 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
224 | *** Local Variables: *** | ||
225 | *** mode: c *** | ||
226 | *** fill-column: 75 *** | ||
227 | *** tab-width: 8 *** | ||
228 | *** c-basic-offset: 8 *** | ||
229 | *** End: *** | ||
230 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.h b/drivers/media/video/pvrusb2/pvrusb2-context.h new file mode 100644 index 000000000000..6327fa1f7e4f --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-context.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * $Id$ | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | */ | ||
20 | #ifndef __PVRUSB2_BASE_H | ||
21 | #define __PVRUSB2_BASE_H | ||
22 | |||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/usb.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | |||
27 | struct pvr2_hdw; /* hardware interface - defined elsewhere */ | ||
28 | struct pvr2_stream; /* stream interface - defined elsewhere */ | ||
29 | |||
30 | struct pvr2_context; /* All central state */ | ||
31 | struct pvr2_channel; /* One I/O pathway to a user */ | ||
32 | struct pvr2_context_stream; /* Wrapper for a stream */ | ||
33 | struct pvr2_crit_reg; /* Critical region pointer */ | ||
34 | struct pvr2_ioread; /* Low level stream structure */ | ||
35 | |||
36 | struct pvr2_context_stream { | ||
37 | struct pvr2_channel *user; | ||
38 | struct pvr2_stream *stream; | ||
39 | }; | ||
40 | |||
41 | struct pvr2_context { | ||
42 | struct pvr2_channel *mc_first; | ||
43 | struct pvr2_channel *mc_last; | ||
44 | struct pvr2_hdw *hdw; | ||
45 | struct pvr2_context_stream video_stream; | ||
46 | struct mutex mutex; | ||
47 | int disconnect_flag; | ||
48 | |||
49 | /* Called after pvr2_context initialization is complete */ | ||
50 | void (*setup_func)(struct pvr2_context *); | ||
51 | |||
52 | /* Work queue overhead for out-of-line processing */ | ||
53 | struct workqueue_struct *workqueue; | ||
54 | struct work_struct workinit; | ||
55 | struct work_struct workpoll; | ||
56 | }; | ||
57 | |||
58 | struct pvr2_channel { | ||
59 | struct pvr2_context *mc_head; | ||
60 | struct pvr2_channel *mc_next; | ||
61 | struct pvr2_channel *mc_prev; | ||
62 | struct pvr2_context_stream *stream; | ||
63 | struct pvr2_hdw *hdw; | ||
64 | void (*check_func)(struct pvr2_channel *); | ||
65 | }; | ||
66 | |||
67 | void pvr2_context_enter(struct pvr2_context *); | ||
68 | void pvr2_context_exit(struct pvr2_context *); | ||
69 | |||
70 | struct pvr2_context *pvr2_context_create(struct usb_interface *intf, | ||
71 | const struct usb_device_id *devid, | ||
72 | void (*setup_func)(struct pvr2_context *)); | ||
73 | void pvr2_context_disconnect(struct pvr2_context *); | ||
74 | |||
75 | void pvr2_channel_init(struct pvr2_channel *,struct pvr2_context *); | ||
76 | void pvr2_channel_done(struct pvr2_channel *); | ||
77 | int pvr2_channel_claim_stream(struct pvr2_channel *, | ||
78 | struct pvr2_context_stream *); | ||
79 | struct pvr2_ioread *pvr2_channel_create_mpeg_stream( | ||
80 | struct pvr2_context_stream *); | ||
81 | |||
82 | |||
83 | #endif /* __PVRUSB2_CONTEXT_H */ | ||
84 | /* | ||
85 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
86 | *** Local Variables: *** | ||
87 | *** mode: c *** | ||
88 | *** fill-column: 75 *** | ||
89 | *** tab-width: 8 *** | ||
90 | *** c-basic-offset: 8 *** | ||
91 | *** End: *** | ||
92 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c new file mode 100644 index 000000000000..d5df9fbeba2f --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c | |||
@@ -0,0 +1,593 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-ctrl.h" | ||
23 | #include "pvrusb2-hdw-internal.h" | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/mutex.h> | ||
27 | |||
28 | |||
29 | /* Set the given control. */ | ||
30 | int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val) | ||
31 | { | ||
32 | return pvr2_ctrl_set_mask_value(cptr,~0,val); | ||
33 | } | ||
34 | |||
35 | |||
36 | /* Set/clear specific bits of the given control. */ | ||
37 | int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val) | ||
38 | { | ||
39 | int ret = 0; | ||
40 | if (!cptr) return -EINVAL; | ||
41 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
42 | if (cptr->info->set_value != 0) { | ||
43 | if (cptr->info->type == pvr2_ctl_bitmask) { | ||
44 | mask &= cptr->info->def.type_bitmask.valid_bits; | ||
45 | } else if (cptr->info->type == pvr2_ctl_int) { | ||
46 | if (val < cptr->info->def.type_int.min_value) { | ||
47 | break; | ||
48 | } | ||
49 | if (val > cptr->info->def.type_int.max_value) { | ||
50 | break; | ||
51 | } | ||
52 | } else if (cptr->info->type == pvr2_ctl_enum) { | ||
53 | if (val >= cptr->info->def.type_enum.count) { | ||
54 | break; | ||
55 | } | ||
56 | } else if (cptr->info->type != pvr2_ctl_bool) { | ||
57 | break; | ||
58 | } | ||
59 | ret = cptr->info->set_value(cptr,mask,val); | ||
60 | } else { | ||
61 | ret = -EPERM; | ||
62 | } | ||
63 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | |||
68 | /* Get the current value of the given control. */ | ||
69 | int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr) | ||
70 | { | ||
71 | int ret = 0; | ||
72 | if (!cptr) return -EINVAL; | ||
73 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
74 | ret = cptr->info->get_value(cptr,valptr); | ||
75 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | |||
80 | /* Retrieve control's type */ | ||
81 | enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr) | ||
82 | { | ||
83 | if (!cptr) return pvr2_ctl_int; | ||
84 | return cptr->info->type; | ||
85 | } | ||
86 | |||
87 | |||
88 | /* Retrieve control's maximum value (int type) */ | ||
89 | int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | if (!cptr) return 0; | ||
93 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
94 | if (cptr->info->type == pvr2_ctl_int) { | ||
95 | ret = cptr->info->def.type_int.max_value; | ||
96 | } | ||
97 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | |||
102 | /* Retrieve control's minimum value (int type) */ | ||
103 | int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr) | ||
104 | { | ||
105 | int ret = 0; | ||
106 | if (!cptr) return 0; | ||
107 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
108 | if (cptr->info->type == pvr2_ctl_int) { | ||
109 | ret = cptr->info->def.type_int.min_value; | ||
110 | } | ||
111 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | |||
116 | /* Retrieve control's default value (any type) */ | ||
117 | int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr) | ||
118 | { | ||
119 | int ret = 0; | ||
120 | if (!cptr) return 0; | ||
121 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
122 | if (cptr->info->type == pvr2_ctl_int) { | ||
123 | ret = cptr->info->default_value; | ||
124 | } | ||
125 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | |||
130 | /* Retrieve control's enumeration count (enum only) */ | ||
131 | int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr) | ||
132 | { | ||
133 | int ret = 0; | ||
134 | if (!cptr) return 0; | ||
135 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
136 | if (cptr->info->type == pvr2_ctl_enum) { | ||
137 | ret = cptr->info->def.type_enum.count; | ||
138 | } | ||
139 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | |||
144 | /* Retrieve control's valid mask bits (bit mask only) */ | ||
145 | int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | if (!cptr) return 0; | ||
149 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
150 | if (cptr->info->type == pvr2_ctl_bitmask) { | ||
151 | ret = cptr->info->def.type_bitmask.valid_bits; | ||
152 | } | ||
153 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | |||
158 | /* Retrieve the control's name */ | ||
159 | const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr) | ||
160 | { | ||
161 | if (!cptr) return 0; | ||
162 | return cptr->info->name; | ||
163 | } | ||
164 | |||
165 | |||
166 | /* Retrieve the control's desc */ | ||
167 | const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr) | ||
168 | { | ||
169 | if (!cptr) return 0; | ||
170 | return cptr->info->desc; | ||
171 | } | ||
172 | |||
173 | |||
174 | /* Retrieve a control enumeration or bit mask value */ | ||
175 | int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val, | ||
176 | char *bptr,unsigned int bmax, | ||
177 | unsigned int *blen) | ||
178 | { | ||
179 | int ret = -EINVAL; | ||
180 | if (!cptr) return 0; | ||
181 | *blen = 0; | ||
182 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
183 | if (cptr->info->type == pvr2_ctl_enum) { | ||
184 | const char **names; | ||
185 | names = cptr->info->def.type_enum.value_names; | ||
186 | if ((val >= 0) && | ||
187 | (val < cptr->info->def.type_enum.count)) { | ||
188 | if (names[val]) { | ||
189 | *blen = scnprintf( | ||
190 | bptr,bmax,"%s", | ||
191 | names[val]); | ||
192 | } else { | ||
193 | *blen = 0; | ||
194 | } | ||
195 | ret = 0; | ||
196 | } | ||
197 | } else if (cptr->info->type == pvr2_ctl_bitmask) { | ||
198 | const char **names; | ||
199 | unsigned int idx; | ||
200 | int msk; | ||
201 | names = cptr->info->def.type_bitmask.bit_names; | ||
202 | val &= cptr->info->def.type_bitmask.valid_bits; | ||
203 | for (idx = 0, msk = 1; val; idx++, msk <<= 1) { | ||
204 | if (val & msk) { | ||
205 | *blen = scnprintf(bptr,bmax,"%s", | ||
206 | names[idx]); | ||
207 | ret = 0; | ||
208 | break; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | |||
217 | /* Return V4L ID for this control or zero if none */ | ||
218 | int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr) | ||
219 | { | ||
220 | if (!cptr) return 0; | ||
221 | return cptr->info->v4l_id; | ||
222 | } | ||
223 | |||
224 | |||
225 | unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr) | ||
226 | { | ||
227 | unsigned int flags = 0; | ||
228 | |||
229 | if (cptr->info->get_v4lflags) { | ||
230 | flags = cptr->info->get_v4lflags(cptr); | ||
231 | } | ||
232 | |||
233 | if (cptr->info->set_value) { | ||
234 | flags &= ~V4L2_CTRL_FLAG_READ_ONLY; | ||
235 | } else { | ||
236 | flags |= V4L2_CTRL_FLAG_READ_ONLY; | ||
237 | } | ||
238 | |||
239 | return flags; | ||
240 | } | ||
241 | |||
242 | |||
243 | /* Return true if control is writable */ | ||
244 | int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr) | ||
245 | { | ||
246 | if (!cptr) return 0; | ||
247 | return cptr->info->set_value != 0; | ||
248 | } | ||
249 | |||
250 | |||
251 | /* Return true if control has custom symbolic representation */ | ||
252 | int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr) | ||
253 | { | ||
254 | if (!cptr) return 0; | ||
255 | if (!cptr->info->val_to_sym) return 0; | ||
256 | if (!cptr->info->sym_to_val) return 0; | ||
257 | return !0; | ||
258 | } | ||
259 | |||
260 | |||
261 | /* Convert a given mask/val to a custom symbolic value */ | ||
262 | int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr, | ||
263 | int mask,int val, | ||
264 | char *buf,unsigned int maxlen, | ||
265 | unsigned int *len) | ||
266 | { | ||
267 | if (!cptr) return -EINVAL; | ||
268 | if (!cptr->info->val_to_sym) return -EINVAL; | ||
269 | return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len); | ||
270 | } | ||
271 | |||
272 | |||
273 | /* Convert a symbolic value to a mask/value pair */ | ||
274 | int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr, | ||
275 | const char *buf,unsigned int len, | ||
276 | int *maskptr,int *valptr) | ||
277 | { | ||
278 | if (!cptr) return -EINVAL; | ||
279 | if (!cptr->info->sym_to_val) return -EINVAL; | ||
280 | return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr); | ||
281 | } | ||
282 | |||
283 | |||
284 | static unsigned int gen_bitmask_string(int msk,int val,int msk_only, | ||
285 | const char **names, | ||
286 | char *ptr,unsigned int len) | ||
287 | { | ||
288 | unsigned int idx; | ||
289 | long sm,um; | ||
290 | int spcFl; | ||
291 | unsigned int uc,cnt; | ||
292 | const char *idStr; | ||
293 | |||
294 | spcFl = 0; | ||
295 | uc = 0; | ||
296 | um = 0; | ||
297 | for (idx = 0, sm = 1; msk; idx++, sm <<= 1) { | ||
298 | if (sm & msk) { | ||
299 | msk &= ~sm; | ||
300 | idStr = names[idx]; | ||
301 | if (idStr) { | ||
302 | cnt = scnprintf(ptr,len,"%s%s%s", | ||
303 | (spcFl ? " " : ""), | ||
304 | (msk_only ? "" : | ||
305 | ((val & sm) ? "+" : "-")), | ||
306 | idStr); | ||
307 | ptr += cnt; len -= cnt; uc += cnt; | ||
308 | spcFl = !0; | ||
309 | } else { | ||
310 | um |= sm; | ||
311 | } | ||
312 | } | ||
313 | } | ||
314 | if (um) { | ||
315 | if (msk_only) { | ||
316 | cnt = scnprintf(ptr,len,"%s0x%lx", | ||
317 | (spcFl ? " " : ""), | ||
318 | um); | ||
319 | ptr += cnt; len -= cnt; uc += cnt; | ||
320 | spcFl = !0; | ||
321 | } else if (um & val) { | ||
322 | cnt = scnprintf(ptr,len,"%s+0x%lx", | ||
323 | (spcFl ? " " : ""), | ||
324 | um & val); | ||
325 | ptr += cnt; len -= cnt; uc += cnt; | ||
326 | spcFl = !0; | ||
327 | } else if (um & ~val) { | ||
328 | cnt = scnprintf(ptr,len,"%s+0x%lx", | ||
329 | (spcFl ? " " : ""), | ||
330 | um & ~val); | ||
331 | ptr += cnt; len -= cnt; uc += cnt; | ||
332 | spcFl = !0; | ||
333 | } | ||
334 | } | ||
335 | return uc; | ||
336 | } | ||
337 | |||
338 | |||
339 | static const char *boolNames[] = { | ||
340 | "false", | ||
341 | "true", | ||
342 | "no", | ||
343 | "yes", | ||
344 | }; | ||
345 | |||
346 | |||
347 | static int parse_token(const char *ptr,unsigned int len, | ||
348 | int *valptr, | ||
349 | const char **names,unsigned int namecnt) | ||
350 | { | ||
351 | char buf[33]; | ||
352 | unsigned int slen; | ||
353 | unsigned int idx; | ||
354 | int negfl; | ||
355 | char *p2; | ||
356 | *valptr = 0; | ||
357 | if (!names) namecnt = 0; | ||
358 | for (idx = 0; idx < namecnt; idx++) { | ||
359 | if (!names[idx]) continue; | ||
360 | slen = strlen(names[idx]); | ||
361 | if (slen != len) continue; | ||
362 | if (memcmp(names[idx],ptr,slen)) continue; | ||
363 | *valptr = idx; | ||
364 | return 0; | ||
365 | } | ||
366 | negfl = 0; | ||
367 | if ((*ptr == '-') || (*ptr == '+')) { | ||
368 | negfl = (*ptr == '-'); | ||
369 | ptr++; len--; | ||
370 | } | ||
371 | if (len >= sizeof(buf)) return -EINVAL; | ||
372 | memcpy(buf,ptr,len); | ||
373 | buf[len] = 0; | ||
374 | *valptr = simple_strtol(buf,&p2,0); | ||
375 | if (negfl) *valptr = -(*valptr); | ||
376 | if (*p2) return -EINVAL; | ||
377 | return 1; | ||
378 | } | ||
379 | |||
380 | |||
381 | static int parse_mtoken(const char *ptr,unsigned int len, | ||
382 | int *valptr, | ||
383 | const char **names,int valid_bits) | ||
384 | { | ||
385 | char buf[33]; | ||
386 | unsigned int slen; | ||
387 | unsigned int idx; | ||
388 | char *p2; | ||
389 | int msk; | ||
390 | *valptr = 0; | ||
391 | for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) { | ||
392 | if (!msk & valid_bits) continue; | ||
393 | valid_bits &= ~msk; | ||
394 | if (!names[idx]) continue; | ||
395 | slen = strlen(names[idx]); | ||
396 | if (slen != len) continue; | ||
397 | if (memcmp(names[idx],ptr,slen)) continue; | ||
398 | *valptr = msk; | ||
399 | return 0; | ||
400 | } | ||
401 | if (len >= sizeof(buf)) return -EINVAL; | ||
402 | memcpy(buf,ptr,len); | ||
403 | buf[len] = 0; | ||
404 | *valptr = simple_strtol(buf,&p2,0); | ||
405 | if (*p2) return -EINVAL; | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | |||
410 | static int parse_tlist(const char *ptr,unsigned int len, | ||
411 | int *maskptr,int *valptr, | ||
412 | const char **names,int valid_bits) | ||
413 | { | ||
414 | unsigned int cnt; | ||
415 | int mask,val,kv,mode,ret; | ||
416 | mask = 0; | ||
417 | val = 0; | ||
418 | ret = 0; | ||
419 | while (len) { | ||
420 | cnt = 0; | ||
421 | while ((cnt < len) && | ||
422 | ((ptr[cnt] <= 32) || | ||
423 | (ptr[cnt] >= 127))) cnt++; | ||
424 | ptr += cnt; | ||
425 | len -= cnt; | ||
426 | mode = 0; | ||
427 | if ((*ptr == '-') || (*ptr == '+')) { | ||
428 | mode = (*ptr == '-') ? -1 : 1; | ||
429 | ptr++; | ||
430 | len--; | ||
431 | } | ||
432 | cnt = 0; | ||
433 | while (cnt < len) { | ||
434 | if (ptr[cnt] <= 32) break; | ||
435 | if (ptr[cnt] >= 127) break; | ||
436 | cnt++; | ||
437 | } | ||
438 | if (!cnt) break; | ||
439 | if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) { | ||
440 | ret = -EINVAL; | ||
441 | break; | ||
442 | } | ||
443 | ptr += cnt; | ||
444 | len -= cnt; | ||
445 | switch (mode) { | ||
446 | case 0: | ||
447 | mask = valid_bits; | ||
448 | val |= kv; | ||
449 | break; | ||
450 | case -1: | ||
451 | mask |= kv; | ||
452 | val &= ~kv; | ||
453 | break; | ||
454 | case 1: | ||
455 | mask |= kv; | ||
456 | val |= kv; | ||
457 | break; | ||
458 | default: | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | *maskptr = mask; | ||
463 | *valptr = val; | ||
464 | return ret; | ||
465 | } | ||
466 | |||
467 | |||
468 | /* Convert a symbolic value to a mask/value pair */ | ||
469 | int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, | ||
470 | const char *ptr,unsigned int len, | ||
471 | int *maskptr,int *valptr) | ||
472 | { | ||
473 | int ret = -EINVAL; | ||
474 | unsigned int cnt; | ||
475 | |||
476 | *maskptr = 0; | ||
477 | *valptr = 0; | ||
478 | |||
479 | cnt = 0; | ||
480 | while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++; | ||
481 | len -= cnt; ptr += cnt; | ||
482 | cnt = 0; | ||
483 | while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) || | ||
484 | (ptr[len-(cnt+1)] >= 127))) cnt++; | ||
485 | len -= cnt; | ||
486 | |||
487 | if (!len) return -EINVAL; | ||
488 | |||
489 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
490 | if (cptr->info->type == pvr2_ctl_int) { | ||
491 | ret = parse_token(ptr,len,valptr,0,0); | ||
492 | if ((ret >= 0) && | ||
493 | ((*valptr < cptr->info->def.type_int.min_value) || | ||
494 | (*valptr > cptr->info->def.type_int.max_value))) { | ||
495 | ret = -ERANGE; | ||
496 | } | ||
497 | if (maskptr) *maskptr = ~0; | ||
498 | } else if (cptr->info->type == pvr2_ctl_bool) { | ||
499 | ret = parse_token( | ||
500 | ptr,len,valptr,boolNames, | ||
501 | sizeof(boolNames)/sizeof(boolNames[0])); | ||
502 | if (ret == 1) { | ||
503 | *valptr = *valptr ? !0 : 0; | ||
504 | } else if (ret == 0) { | ||
505 | *valptr = (*valptr & 1) ? !0 : 0; | ||
506 | } | ||
507 | if (maskptr) *maskptr = 1; | ||
508 | } else if (cptr->info->type == pvr2_ctl_enum) { | ||
509 | ret = parse_token( | ||
510 | ptr,len,valptr, | ||
511 | cptr->info->def.type_enum.value_names, | ||
512 | cptr->info->def.type_enum.count); | ||
513 | if ((ret >= 0) && | ||
514 | ((*valptr < 0) || | ||
515 | (*valptr >= cptr->info->def.type_enum.count))) { | ||
516 | ret = -ERANGE; | ||
517 | } | ||
518 | if (maskptr) *maskptr = ~0; | ||
519 | } else if (cptr->info->type == pvr2_ctl_bitmask) { | ||
520 | ret = parse_tlist( | ||
521 | ptr,len,maskptr,valptr, | ||
522 | cptr->info->def.type_bitmask.bit_names, | ||
523 | cptr->info->def.type_bitmask.valid_bits); | ||
524 | } | ||
525 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
526 | return ret; | ||
527 | } | ||
528 | |||
529 | |||
530 | /* Convert a given mask/val to a symbolic value */ | ||
531 | int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr, | ||
532 | int mask,int val, | ||
533 | char *buf,unsigned int maxlen, | ||
534 | unsigned int *len) | ||
535 | { | ||
536 | int ret = -EINVAL; | ||
537 | |||
538 | *len = 0; | ||
539 | if (cptr->info->type == pvr2_ctl_int) { | ||
540 | *len = scnprintf(buf,maxlen,"%d",val); | ||
541 | ret = 0; | ||
542 | } else if (cptr->info->type == pvr2_ctl_bool) { | ||
543 | *len = scnprintf(buf,maxlen,"%s",val ? "true" : "false"); | ||
544 | ret = 0; | ||
545 | } else if (cptr->info->type == pvr2_ctl_enum) { | ||
546 | const char **names; | ||
547 | names = cptr->info->def.type_enum.value_names; | ||
548 | if ((val >= 0) && | ||
549 | (val < cptr->info->def.type_enum.count)) { | ||
550 | if (names[val]) { | ||
551 | *len = scnprintf( | ||
552 | buf,maxlen,"%s", | ||
553 | names[val]); | ||
554 | } else { | ||
555 | *len = 0; | ||
556 | } | ||
557 | ret = 0; | ||
558 | } | ||
559 | } else if (cptr->info->type == pvr2_ctl_bitmask) { | ||
560 | *len = gen_bitmask_string( | ||
561 | val & mask & cptr->info->def.type_bitmask.valid_bits, | ||
562 | ~0,!0, | ||
563 | cptr->info->def.type_bitmask.bit_names, | ||
564 | buf,maxlen); | ||
565 | } | ||
566 | return ret; | ||
567 | } | ||
568 | |||
569 | |||
570 | /* Convert a given mask/val to a symbolic value */ | ||
571 | int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr, | ||
572 | int mask,int val, | ||
573 | char *buf,unsigned int maxlen, | ||
574 | unsigned int *len) | ||
575 | { | ||
576 | int ret; | ||
577 | LOCK_TAKE(cptr->hdw->big_lock); do { | ||
578 | ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val, | ||
579 | buf,maxlen,len); | ||
580 | } while(0); LOCK_GIVE(cptr->hdw->big_lock); | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | |||
585 | /* | ||
586 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
587 | *** Local Variables: *** | ||
588 | *** mode: c *** | ||
589 | *** fill-column: 75 *** | ||
590 | *** tab-width: 8 *** | ||
591 | *** c-basic-offset: 8 *** | ||
592 | *** End: *** | ||
593 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.h b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h new file mode 100644 index 000000000000..c1680053cd64 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_CTRL_H | ||
22 | #define __PVRUSB2_CTRL_H | ||
23 | |||
24 | struct pvr2_ctrl; | ||
25 | |||
26 | enum pvr2_ctl_type { | ||
27 | pvr2_ctl_int = 0, | ||
28 | pvr2_ctl_enum = 1, | ||
29 | pvr2_ctl_bitmask = 2, | ||
30 | pvr2_ctl_bool = 3, | ||
31 | }; | ||
32 | |||
33 | |||
34 | /* Set the given control. */ | ||
35 | int pvr2_ctrl_set_value(struct pvr2_ctrl *,int val); | ||
36 | |||
37 | /* Set/clear specific bits of the given control. */ | ||
38 | int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *,int mask,int val); | ||
39 | |||
40 | /* Get the current value of the given control. */ | ||
41 | int pvr2_ctrl_get_value(struct pvr2_ctrl *,int *valptr); | ||
42 | |||
43 | /* Retrieve control's type */ | ||
44 | enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *); | ||
45 | |||
46 | /* Retrieve control's maximum value (int type) */ | ||
47 | int pvr2_ctrl_get_max(struct pvr2_ctrl *); | ||
48 | |||
49 | /* Retrieve control's minimum value (int type) */ | ||
50 | int pvr2_ctrl_get_min(struct pvr2_ctrl *); | ||
51 | |||
52 | /* Retrieve control's default value (any type) */ | ||
53 | int pvr2_ctrl_get_def(struct pvr2_ctrl *); | ||
54 | |||
55 | /* Retrieve control's enumeration count (enum only) */ | ||
56 | int pvr2_ctrl_get_cnt(struct pvr2_ctrl *); | ||
57 | |||
58 | /* Retrieve control's valid mask bits (bit mask only) */ | ||
59 | int pvr2_ctrl_get_mask(struct pvr2_ctrl *); | ||
60 | |||
61 | /* Retrieve the control's name */ | ||
62 | const char *pvr2_ctrl_get_name(struct pvr2_ctrl *); | ||
63 | |||
64 | /* Retrieve the control's desc */ | ||
65 | const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *); | ||
66 | |||
67 | /* Retrieve a control enumeration or bit mask value */ | ||
68 | int pvr2_ctrl_get_valname(struct pvr2_ctrl *,int,char *,unsigned int, | ||
69 | unsigned int *); | ||
70 | |||
71 | /* Return true if control is writable */ | ||
72 | int pvr2_ctrl_is_writable(struct pvr2_ctrl *); | ||
73 | |||
74 | /* Return V4L flags value for control (or zero if there is no v4l control | ||
75 | actually under this control) */ | ||
76 | unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *); | ||
77 | |||
78 | /* Return V4L ID for this control or zero if none */ | ||
79 | int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *); | ||
80 | |||
81 | /* Return true if control has custom symbolic representation */ | ||
82 | int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *); | ||
83 | |||
84 | /* Convert a given mask/val to a custom symbolic value */ | ||
85 | int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *, | ||
86 | int mask,int val, | ||
87 | char *buf,unsigned int maxlen, | ||
88 | unsigned int *len); | ||
89 | |||
90 | /* Convert a symbolic value to a mask/value pair */ | ||
91 | int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *, | ||
92 | const char *buf,unsigned int len, | ||
93 | int *maskptr,int *valptr); | ||
94 | |||
95 | /* Convert a given mask/val to a symbolic value */ | ||
96 | int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *, | ||
97 | int mask,int val, | ||
98 | char *buf,unsigned int maxlen, | ||
99 | unsigned int *len); | ||
100 | |||
101 | /* Convert a symbolic value to a mask/value pair */ | ||
102 | int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *, | ||
103 | const char *buf,unsigned int len, | ||
104 | int *maskptr,int *valptr); | ||
105 | |||
106 | /* Convert a given mask/val to a symbolic value - must already be | ||
107 | inside of critical region. */ | ||
108 | int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *, | ||
109 | int mask,int val, | ||
110 | char *buf,unsigned int maxlen, | ||
111 | unsigned int *len); | ||
112 | |||
113 | #endif /* __PVRUSB2_CTRL_H */ | ||
114 | |||
115 | /* | ||
116 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
117 | *** Local Variables: *** | ||
118 | *** mode: c *** | ||
119 | *** fill-column: 75 *** | ||
120 | *** tab-width: 8 *** | ||
121 | *** c-basic-offset: 8 *** | ||
122 | *** End: *** | ||
123 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c new file mode 100644 index 000000000000..27eadaff75a0 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | |||
25 | This source file is specifically designed to interface with the | ||
26 | cx2584x, in kernels 2.6.16 or newer. | ||
27 | |||
28 | */ | ||
29 | |||
30 | #include "pvrusb2-cx2584x-v4l.h" | ||
31 | #include "pvrusb2-video-v4l.h" | ||
32 | #include "pvrusb2-i2c-cmd-v4l2.h" | ||
33 | |||
34 | |||
35 | #include "pvrusb2-hdw-internal.h" | ||
36 | #include "pvrusb2-debug.h" | ||
37 | #include <media/cx25840.h> | ||
38 | #include <linux/videodev2.h> | ||
39 | #include <media/v4l2-common.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/slab.h> | ||
42 | |||
43 | struct pvr2_v4l_cx2584x { | ||
44 | struct pvr2_i2c_handler handler; | ||
45 | struct pvr2_decoder_ctrl ctrl; | ||
46 | struct pvr2_i2c_client *client; | ||
47 | struct pvr2_hdw *hdw; | ||
48 | unsigned long stale_mask; | ||
49 | }; | ||
50 | |||
51 | |||
52 | static void set_input(struct pvr2_v4l_cx2584x *ctxt) | ||
53 | { | ||
54 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
55 | struct v4l2_routing route; | ||
56 | enum cx25840_video_input vid_input; | ||
57 | enum cx25840_audio_input aud_input; | ||
58 | |||
59 | memset(&route,0,sizeof(route)); | ||
60 | |||
61 | switch(hdw->input_val) { | ||
62 | case PVR2_CVAL_INPUT_TV: | ||
63 | vid_input = CX25840_COMPOSITE7; | ||
64 | aud_input = CX25840_AUDIO8; | ||
65 | break; | ||
66 | case PVR2_CVAL_INPUT_COMPOSITE: | ||
67 | vid_input = CX25840_COMPOSITE3; | ||
68 | aud_input = CX25840_AUDIO_SERIAL; | ||
69 | break; | ||
70 | case PVR2_CVAL_INPUT_SVIDEO: | ||
71 | vid_input = CX25840_SVIDEO1; | ||
72 | aud_input = CX25840_AUDIO_SERIAL; | ||
73 | break; | ||
74 | case PVR2_CVAL_INPUT_RADIO: | ||
75 | default: | ||
76 | // Just set it to be composite input for now... | ||
77 | vid_input = CX25840_COMPOSITE3; | ||
78 | aud_input = CX25840_AUDIO_SERIAL; | ||
79 | break; | ||
80 | } | ||
81 | |||
82 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_input vid=0x%x aud=0x%x", | ||
83 | vid_input,aud_input); | ||
84 | route.input = (u32)vid_input; | ||
85 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route); | ||
86 | route.input = (u32)aud_input; | ||
87 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route); | ||
88 | } | ||
89 | |||
90 | |||
91 | static int check_input(struct pvr2_v4l_cx2584x *ctxt) | ||
92 | { | ||
93 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
94 | return hdw->input_dirty != 0; | ||
95 | } | ||
96 | |||
97 | |||
98 | static void set_audio(struct pvr2_v4l_cx2584x *ctxt) | ||
99 | { | ||
100 | u32 val; | ||
101 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
102 | |||
103 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_audio %d", | ||
104 | hdw->srate_val); | ||
105 | switch (hdw->srate_val) { | ||
106 | default: | ||
107 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000: | ||
108 | val = 48000; | ||
109 | break; | ||
110 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100: | ||
111 | val = 44100; | ||
112 | break; | ||
113 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000: | ||
114 | val = 32000; | ||
115 | break; | ||
116 | } | ||
117 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val); | ||
118 | } | ||
119 | |||
120 | |||
121 | static int check_audio(struct pvr2_v4l_cx2584x *ctxt) | ||
122 | { | ||
123 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
124 | return hdw->srate_dirty != 0; | ||
125 | } | ||
126 | |||
127 | |||
128 | struct pvr2_v4l_cx2584x_ops { | ||
129 | void (*update)(struct pvr2_v4l_cx2584x *); | ||
130 | int (*check)(struct pvr2_v4l_cx2584x *); | ||
131 | }; | ||
132 | |||
133 | |||
134 | static const struct pvr2_v4l_cx2584x_ops decoder_ops[] = { | ||
135 | { .update = set_input, .check = check_input}, | ||
136 | { .update = set_audio, .check = check_audio}, | ||
137 | }; | ||
138 | |||
139 | |||
140 | static void decoder_detach(struct pvr2_v4l_cx2584x *ctxt) | ||
141 | { | ||
142 | ctxt->client->handler = 0; | ||
143 | ctxt->hdw->decoder_ctrl = 0; | ||
144 | kfree(ctxt); | ||
145 | } | ||
146 | |||
147 | |||
148 | static int decoder_check(struct pvr2_v4l_cx2584x *ctxt) | ||
149 | { | ||
150 | unsigned long msk; | ||
151 | unsigned int idx; | ||
152 | |||
153 | for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]); | ||
154 | idx++) { | ||
155 | msk = 1 << idx; | ||
156 | if (ctxt->stale_mask & msk) continue; | ||
157 | if (decoder_ops[idx].check(ctxt)) { | ||
158 | ctxt->stale_mask |= msk; | ||
159 | } | ||
160 | } | ||
161 | return ctxt->stale_mask != 0; | ||
162 | } | ||
163 | |||
164 | |||
165 | static void decoder_update(struct pvr2_v4l_cx2584x *ctxt) | ||
166 | { | ||
167 | unsigned long msk; | ||
168 | unsigned int idx; | ||
169 | |||
170 | for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]); | ||
171 | idx++) { | ||
172 | msk = 1 << idx; | ||
173 | if (!(ctxt->stale_mask & msk)) continue; | ||
174 | ctxt->stale_mask &= ~msk; | ||
175 | decoder_ops[idx].update(ctxt); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | |||
180 | static void decoder_enable(struct pvr2_v4l_cx2584x *ctxt,int fl) | ||
181 | { | ||
182 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_enable(%d)",fl); | ||
183 | pvr2_v4l2_cmd_stream(ctxt->client,fl); | ||
184 | } | ||
185 | |||
186 | |||
187 | static int decoder_detect(struct pvr2_i2c_client *cp) | ||
188 | { | ||
189 | int ret; | ||
190 | /* Attempt to query the decoder - let's see if it will answer */ | ||
191 | struct v4l2_queryctrl qc; | ||
192 | |||
193 | memset(&qc,0,sizeof(qc)); | ||
194 | |||
195 | qc.id = V4L2_CID_BRIGHTNESS; | ||
196 | |||
197 | ret = pvr2_i2c_client_cmd(cp,VIDIOC_QUERYCTRL,&qc); | ||
198 | return ret == 0; /* Return true if it answered */ | ||
199 | } | ||
200 | |||
201 | |||
202 | static int decoder_is_tuned(struct pvr2_v4l_cx2584x *ctxt) | ||
203 | { | ||
204 | struct v4l2_tuner vt; | ||
205 | int ret; | ||
206 | |||
207 | memset(&vt,0,sizeof(vt)); | ||
208 | ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt); | ||
209 | if (ret < 0) return -EINVAL; | ||
210 | return vt.signal ? 1 : 0; | ||
211 | } | ||
212 | |||
213 | |||
214 | static unsigned int decoder_describe(struct pvr2_v4l_cx2584x *ctxt, | ||
215 | char *buf,unsigned int cnt) | ||
216 | { | ||
217 | return scnprintf(buf,cnt,"handler: pvrusb2-cx2584x-v4l"); | ||
218 | } | ||
219 | |||
220 | |||
221 | static void decoder_reset(struct pvr2_v4l_cx2584x *ctxt) | ||
222 | { | ||
223 | int ret; | ||
224 | ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_RESET,0); | ||
225 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_reset (ret=%d)",ret); | ||
226 | } | ||
227 | |||
228 | |||
229 | const static struct pvr2_i2c_handler_functions hfuncs = { | ||
230 | .detach = (void (*)(void *))decoder_detach, | ||
231 | .check = (int (*)(void *))decoder_check, | ||
232 | .update = (void (*)(void *))decoder_update, | ||
233 | .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe, | ||
234 | }; | ||
235 | |||
236 | |||
237 | int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *hdw, | ||
238 | struct pvr2_i2c_client *cp) | ||
239 | { | ||
240 | struct pvr2_v4l_cx2584x *ctxt; | ||
241 | |||
242 | if (hdw->decoder_ctrl) return 0; | ||
243 | if (cp->handler) return 0; | ||
244 | if (!decoder_detect(cp)) return 0; | ||
245 | |||
246 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
247 | if (!ctxt) return 0; | ||
248 | memset(ctxt,0,sizeof(*ctxt)); | ||
249 | |||
250 | ctxt->handler.func_data = ctxt; | ||
251 | ctxt->handler.func_table = &hfuncs; | ||
252 | ctxt->ctrl.ctxt = ctxt; | ||
253 | ctxt->ctrl.detach = (void (*)(void *))decoder_detach; | ||
254 | ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable; | ||
255 | ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned; | ||
256 | ctxt->ctrl.force_reset = (void (*)(void*))decoder_reset; | ||
257 | ctxt->client = cp; | ||
258 | ctxt->hdw = hdw; | ||
259 | ctxt->stale_mask = (1 << (sizeof(decoder_ops)/ | ||
260 | sizeof(decoder_ops[0]))) - 1; | ||
261 | hdw->decoder_ctrl = &ctxt->ctrl; | ||
262 | cp->handler = &ctxt->handler; | ||
263 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up", | ||
264 | cp->client->addr); | ||
265 | return !0; | ||
266 | } | ||
267 | |||
268 | |||
269 | |||
270 | |||
271 | /* | ||
272 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
273 | *** Local Variables: *** | ||
274 | *** mode: c *** | ||
275 | *** fill-column: 70 *** | ||
276 | *** tab-width: 8 *** | ||
277 | *** c-basic-offset: 8 *** | ||
278 | *** End: *** | ||
279 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h new file mode 100644 index 000000000000..54b2844e7a71 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_CX2584X_V4L_H | ||
24 | #define __PVRUSB2_CX2584X_V4L_H | ||
25 | |||
26 | /* | ||
27 | |||
28 | This module connects the pvrusb2 driver to the I2C chip level | ||
29 | driver which handles combined device audio & video processing. | ||
30 | This interface is used internally by the driver; higher level code | ||
31 | should only interact through the interface provided by | ||
32 | pvrusb2-hdw.h. | ||
33 | |||
34 | */ | ||
35 | |||
36 | |||
37 | |||
38 | #include "pvrusb2-i2c-core.h" | ||
39 | |||
40 | int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
41 | |||
42 | |||
43 | #endif /* __PVRUSB2_CX2584X_V4L_H */ | ||
44 | |||
45 | /* | ||
46 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
47 | *** Local Variables: *** | ||
48 | *** mode: c *** | ||
49 | *** fill-column: 70 *** | ||
50 | *** tab-width: 8 *** | ||
51 | *** c-basic-offset: 8 *** | ||
52 | *** End: *** | ||
53 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debug.h b/drivers/media/video/pvrusb2/pvrusb2-debug.h new file mode 100644 index 000000000000..d95a8588e4f8 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-debug.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * $Id$ | ||
3 | * | ||
4 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | */ | ||
20 | #ifndef __PVRUSB2_DEBUG_H | ||
21 | #define __PVRUSB2_DEBUG_H | ||
22 | |||
23 | extern int pvrusb2_debug; | ||
24 | |||
25 | #define pvr2_trace(msk, fmt, arg...) do {if(msk & pvrusb2_debug) printk(KERN_INFO "pvrusb2: " fmt "\n", ##arg); } while (0) | ||
26 | |||
27 | /* These are listed in *rough* order of decreasing usefulness and | ||
28 | increasing noise level. */ | ||
29 | #define PVR2_TRACE_INFO (1 << 0) // Normal messages | ||
30 | #define PVR2_TRACE_ERROR_LEGS (1 << 1) // error messages | ||
31 | #define PVR2_TRACE_TOLERANCE (1 << 2) // track tolerance-affected errors | ||
32 | #define PVR2_TRACE_TRAP (1 << 3) // Trap & report misbehavior from app | ||
33 | #define PVR2_TRACE_INIT (1 << 4) // misc initialization steps | ||
34 | #define PVR2_TRACE_START_STOP (1 << 5) // Streaming start / stop | ||
35 | #define PVR2_TRACE_CTL (1 << 6) // commit of control changes | ||
36 | #define PVR2_TRACE_DEBUG (1 << 7) // Temporary debug code | ||
37 | #define PVR2_TRACE_EEPROM (1 << 8) // eeprom parsing / report | ||
38 | #define PVR2_TRACE_STRUCT (1 << 9) // internal struct creation | ||
39 | #define PVR2_TRACE_OPEN_CLOSE (1 << 10) // application open / close | ||
40 | #define PVR2_TRACE_CREG (1 << 11) // Main critical region entry / exit | ||
41 | #define PVR2_TRACE_SYSFS (1 << 12) // Sysfs driven I/O | ||
42 | #define PVR2_TRACE_FIRMWARE (1 << 13) // firmware upload actions | ||
43 | #define PVR2_TRACE_CHIPS (1 << 14) // chip broadcast operation | ||
44 | #define PVR2_TRACE_I2C (1 << 15) // I2C related stuff | ||
45 | #define PVR2_TRACE_I2C_CMD (1 << 16) // Software commands to I2C modules | ||
46 | #define PVR2_TRACE_I2C_CORE (1 << 17) // I2C core debugging | ||
47 | #define PVR2_TRACE_I2C_TRAF (1 << 18) // I2C traffic through the adapter | ||
48 | #define PVR2_TRACE_V4LIOCTL (1 << 19) // v4l ioctl details | ||
49 | #define PVR2_TRACE_ENCODER (1 << 20) // mpeg2 encoder operation | ||
50 | #define PVR2_TRACE_BUF_POOL (1 << 21) // Track buffer pool management | ||
51 | #define PVR2_TRACE_BUF_FLOW (1 << 22) // Track buffer flow in system | ||
52 | #define PVR2_TRACE_DATA_FLOW (1 << 23) // Track data flow | ||
53 | #define PVR2_TRACE_DEBUGIFC (1 << 24) // Debug interface actions | ||
54 | #define PVR2_TRACE_GPIO (1 << 25) // GPIO state bit changes | ||
55 | |||
56 | |||
57 | #endif /* __PVRUSB2_HDW_INTERNAL_H */ | ||
58 | |||
59 | /* | ||
60 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
61 | *** Local Variables: *** | ||
62 | *** mode: c *** | ||
63 | *** fill-column: 75 *** | ||
64 | *** tab-width: 8 *** | ||
65 | *** c-basic-offset: 8 *** | ||
66 | *** End: *** | ||
67 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c new file mode 100644 index 000000000000..586900e365ff --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c | |||
@@ -0,0 +1,478 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/string.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include "pvrusb2-debugifc.h" | ||
25 | #include "pvrusb2-hdw.h" | ||
26 | #include "pvrusb2-debug.h" | ||
27 | #include "pvrusb2-i2c-core.h" | ||
28 | |||
29 | struct debugifc_mask_item { | ||
30 | const char *name; | ||
31 | unsigned long msk; | ||
32 | }; | ||
33 | |||
34 | static struct debugifc_mask_item mask_items[] = { | ||
35 | {"ENC_FIRMWARE",(1<<PVR2_SUBSYS_B_ENC_FIRMWARE)}, | ||
36 | {"ENC_CFG",(1<<PVR2_SUBSYS_B_ENC_CFG)}, | ||
37 | {"DIG_RUN",(1<<PVR2_SUBSYS_B_DIGITIZER_RUN)}, | ||
38 | {"USB_RUN",(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)}, | ||
39 | {"ENC_RUN",(1<<PVR2_SUBSYS_B_ENC_RUN)}, | ||
40 | }; | ||
41 | |||
42 | |||
43 | static unsigned int debugifc_count_whitespace(const char *buf, | ||
44 | unsigned int count) | ||
45 | { | ||
46 | unsigned int scnt; | ||
47 | char ch; | ||
48 | |||
49 | for (scnt = 0; scnt < count; scnt++) { | ||
50 | ch = buf[scnt]; | ||
51 | if (ch == ' ') continue; | ||
52 | if (ch == '\t') continue; | ||
53 | if (ch == '\n') continue; | ||
54 | break; | ||
55 | } | ||
56 | return scnt; | ||
57 | } | ||
58 | |||
59 | |||
60 | static unsigned int debugifc_count_nonwhitespace(const char *buf, | ||
61 | unsigned int count) | ||
62 | { | ||
63 | unsigned int scnt; | ||
64 | char ch; | ||
65 | |||
66 | for (scnt = 0; scnt < count; scnt++) { | ||
67 | ch = buf[scnt]; | ||
68 | if (ch == ' ') break; | ||
69 | if (ch == '\t') break; | ||
70 | if (ch == '\n') break; | ||
71 | } | ||
72 | return scnt; | ||
73 | } | ||
74 | |||
75 | |||
76 | static unsigned int debugifc_isolate_word(const char *buf,unsigned int count, | ||
77 | const char **wstrPtr, | ||
78 | unsigned int *wlenPtr) | ||
79 | { | ||
80 | const char *wptr; | ||
81 | unsigned int consume_cnt = 0; | ||
82 | unsigned int wlen; | ||
83 | unsigned int scnt; | ||
84 | |||
85 | wptr = 0; | ||
86 | wlen = 0; | ||
87 | scnt = debugifc_count_whitespace(buf,count); | ||
88 | consume_cnt += scnt; count -= scnt; buf += scnt; | ||
89 | if (!count) goto done; | ||
90 | |||
91 | scnt = debugifc_count_nonwhitespace(buf,count); | ||
92 | if (!scnt) goto done; | ||
93 | wptr = buf; | ||
94 | wlen = scnt; | ||
95 | consume_cnt += scnt; count -= scnt; buf += scnt; | ||
96 | |||
97 | done: | ||
98 | *wstrPtr = wptr; | ||
99 | *wlenPtr = wlen; | ||
100 | return consume_cnt; | ||
101 | } | ||
102 | |||
103 | |||
104 | static int debugifc_parse_unsigned_number(const char *buf,unsigned int count, | ||
105 | u32 *num_ptr) | ||
106 | { | ||
107 | u32 result = 0; | ||
108 | u32 val; | ||
109 | int ch; | ||
110 | int radix = 10; | ||
111 | if ((count >= 2) && (buf[0] == '0') && | ||
112 | ((buf[1] == 'x') || (buf[1] == 'X'))) { | ||
113 | radix = 16; | ||
114 | count -= 2; | ||
115 | buf += 2; | ||
116 | } else if ((count >= 1) && (buf[0] == '0')) { | ||
117 | radix = 8; | ||
118 | } | ||
119 | |||
120 | while (count--) { | ||
121 | ch = *buf++; | ||
122 | if ((ch >= '0') && (ch <= '9')) { | ||
123 | val = ch - '0'; | ||
124 | } else if ((ch >= 'a') && (ch <= 'f')) { | ||
125 | val = ch - 'a' + 10; | ||
126 | } else if ((ch >= 'A') && (ch <= 'F')) { | ||
127 | val = ch - 'A' + 10; | ||
128 | } else { | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | if (val >= radix) return -EINVAL; | ||
132 | result *= radix; | ||
133 | result += val; | ||
134 | } | ||
135 | *num_ptr = result; | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | |||
140 | static int debugifc_match_keyword(const char *buf,unsigned int count, | ||
141 | const char *keyword) | ||
142 | { | ||
143 | unsigned int kl; | ||
144 | if (!keyword) return 0; | ||
145 | kl = strlen(keyword); | ||
146 | if (kl != count) return 0; | ||
147 | return !memcmp(buf,keyword,kl); | ||
148 | } | ||
149 | |||
150 | |||
151 | static unsigned long debugifc_find_mask(const char *buf,unsigned int count) | ||
152 | { | ||
153 | struct debugifc_mask_item *mip; | ||
154 | unsigned int idx; | ||
155 | for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) { | ||
156 | mip = mask_items + idx; | ||
157 | if (debugifc_match_keyword(buf,count,mip->name)) { | ||
158 | return mip->msk; | ||
159 | } | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | |||
165 | static int debugifc_print_mask(char *buf,unsigned int sz, | ||
166 | unsigned long msk,unsigned long val) | ||
167 | { | ||
168 | struct debugifc_mask_item *mip; | ||
169 | unsigned int idx; | ||
170 | int bcnt = 0; | ||
171 | int ccnt; | ||
172 | for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) { | ||
173 | mip = mask_items + idx; | ||
174 | if (!(mip->msk & msk)) continue; | ||
175 | ccnt = scnprintf(buf,sz,"%s%c%s", | ||
176 | (bcnt ? " " : ""), | ||
177 | ((mip->msk & val) ? '+' : '-'), | ||
178 | mip->name); | ||
179 | sz -= ccnt; | ||
180 | buf += ccnt; | ||
181 | bcnt += ccnt; | ||
182 | } | ||
183 | return bcnt; | ||
184 | } | ||
185 | |||
186 | static unsigned int debugifc_parse_subsys_mask(const char *buf, | ||
187 | unsigned int count, | ||
188 | unsigned long *mskPtr, | ||
189 | unsigned long *valPtr) | ||
190 | { | ||
191 | const char *wptr; | ||
192 | unsigned int consume_cnt = 0; | ||
193 | unsigned int scnt; | ||
194 | unsigned int wlen; | ||
195 | int mode; | ||
196 | unsigned long m1,msk,val; | ||
197 | |||
198 | msk = 0; | ||
199 | val = 0; | ||
200 | |||
201 | while (count) { | ||
202 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
203 | if (!scnt) break; | ||
204 | consume_cnt += scnt; count -= scnt; buf += scnt; | ||
205 | if (!wptr) break; | ||
206 | |||
207 | mode = 0; | ||
208 | if (wlen) switch (wptr[0]) { | ||
209 | case '+': | ||
210 | wptr++; | ||
211 | wlen--; | ||
212 | break; | ||
213 | case '-': | ||
214 | mode = 1; | ||
215 | wptr++; | ||
216 | wlen--; | ||
217 | break; | ||
218 | } | ||
219 | if (!wlen) continue; | ||
220 | m1 = debugifc_find_mask(wptr,wlen); | ||
221 | if (!m1) break; | ||
222 | msk |= m1; | ||
223 | if (!mode) val |= m1; | ||
224 | } | ||
225 | *mskPtr = msk; | ||
226 | *valPtr = val; | ||
227 | return consume_cnt; | ||
228 | } | ||
229 | |||
230 | |||
231 | int pvr2_debugifc_print_info(struct pvr2_hdw *hdw,char *buf,unsigned int acnt) | ||
232 | { | ||
233 | int bcnt = 0; | ||
234 | int ccnt; | ||
235 | struct pvr2_hdw_debug_info dbg; | ||
236 | |||
237 | pvr2_hdw_get_debug_info(hdw,&dbg); | ||
238 | |||
239 | ccnt = scnprintf(buf,acnt,"big lock %s; ctl lock %s", | ||
240 | (dbg.big_lock_held ? "held" : "free"), | ||
241 | (dbg.ctl_lock_held ? "held" : "free")); | ||
242 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
243 | if (dbg.ctl_lock_held) { | ||
244 | ccnt = scnprintf(buf,acnt,"; cmd_state=%d cmd_code=%d" | ||
245 | " cmd_wlen=%d cmd_rlen=%d" | ||
246 | " wpend=%d rpend=%d tmout=%d rstatus=%d" | ||
247 | " wstatus=%d", | ||
248 | dbg.cmd_debug_state,dbg.cmd_code, | ||
249 | dbg.cmd_debug_write_len, | ||
250 | dbg.cmd_debug_read_len, | ||
251 | dbg.cmd_debug_write_pend, | ||
252 | dbg.cmd_debug_read_pend, | ||
253 | dbg.cmd_debug_timeout, | ||
254 | dbg.cmd_debug_rstatus, | ||
255 | dbg.cmd_debug_wstatus); | ||
256 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
257 | } | ||
258 | ccnt = scnprintf(buf,acnt,"\n"); | ||
259 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
260 | ccnt = scnprintf( | ||
261 | buf,acnt,"driver flags: %s %s %s\n", | ||
262 | (dbg.flag_init_ok ? "initialized" : "uninitialized"), | ||
263 | (dbg.flag_ok ? "ok" : "fail"), | ||
264 | (dbg.flag_disconnected ? "disconnected" : "connected")); | ||
265 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
266 | ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: "); | ||
267 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
268 | ccnt = debugifc_print_mask(buf,acnt,dbg.subsys_flags,dbg.subsys_flags); | ||
269 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
270 | ccnt = scnprintf(buf,acnt,"\n"); | ||
271 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
272 | ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: "); | ||
273 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
274 | ccnt = debugifc_print_mask(buf,acnt,~dbg.subsys_flags,dbg.subsys_flags); | ||
275 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
276 | ccnt = scnprintf(buf,acnt,"\n"); | ||
277 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
278 | |||
279 | ccnt = scnprintf(buf,acnt,"Attached I2C modules:\n"); | ||
280 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
281 | ccnt = pvr2_i2c_report(hdw,buf,acnt); | ||
282 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
283 | |||
284 | return bcnt; | ||
285 | } | ||
286 | |||
287 | |||
288 | int pvr2_debugifc_print_status(struct pvr2_hdw *hdw, | ||
289 | char *buf,unsigned int acnt) | ||
290 | { | ||
291 | int bcnt = 0; | ||
292 | int ccnt; | ||
293 | unsigned long msk; | ||
294 | int ret; | ||
295 | u32 gpio_dir,gpio_in,gpio_out; | ||
296 | |||
297 | ret = pvr2_hdw_is_hsm(hdw); | ||
298 | ccnt = scnprintf(buf,acnt,"USB link speed: %s\n", | ||
299 | (ret < 0 ? "FAIL" : (ret ? "high" : "full"))); | ||
300 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
301 | |||
302 | gpio_dir = 0; gpio_in = 0; gpio_out = 0; | ||
303 | pvr2_hdw_gpio_get_dir(hdw,&gpio_dir); | ||
304 | pvr2_hdw_gpio_get_out(hdw,&gpio_out); | ||
305 | pvr2_hdw_gpio_get_in(hdw,&gpio_in); | ||
306 | ccnt = scnprintf(buf,acnt,"GPIO state: dir=0x%x in=0x%x out=0x%x\n", | ||
307 | gpio_dir,gpio_in,gpio_out); | ||
308 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
309 | |||
310 | ccnt = scnprintf(buf,acnt,"Streaming is %s\n", | ||
311 | pvr2_hdw_get_streaming(hdw) ? "on" : "off"); | ||
312 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
313 | |||
314 | msk = pvr2_hdw_subsys_get(hdw); | ||
315 | ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: "); | ||
316 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
317 | ccnt = debugifc_print_mask(buf,acnt,msk,msk); | ||
318 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
319 | ccnt = scnprintf(buf,acnt,"\n"); | ||
320 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
321 | ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: "); | ||
322 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
323 | ccnt = debugifc_print_mask(buf,acnt,~msk,msk); | ||
324 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
325 | ccnt = scnprintf(buf,acnt,"\n"); | ||
326 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
327 | |||
328 | msk = pvr2_hdw_subsys_stream_get(hdw); | ||
329 | ccnt = scnprintf(buf,acnt,"Subsystems stopped on stream shutdown: "); | ||
330 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
331 | ccnt = debugifc_print_mask(buf,acnt,msk,msk); | ||
332 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
333 | ccnt = scnprintf(buf,acnt,"\n"); | ||
334 | bcnt += ccnt; acnt -= ccnt; buf += ccnt; | ||
335 | |||
336 | return bcnt; | ||
337 | } | ||
338 | |||
339 | |||
340 | int pvr2_debugifc_do1cmd(struct pvr2_hdw *hdw,const char *buf, | ||
341 | unsigned int count) | ||
342 | { | ||
343 | const char *wptr; | ||
344 | unsigned int wlen; | ||
345 | unsigned int scnt; | ||
346 | |||
347 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
348 | if (!scnt) return 0; | ||
349 | count -= scnt; buf += scnt; | ||
350 | if (!wptr) return 0; | ||
351 | |||
352 | pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr); | ||
353 | if (debugifc_match_keyword(wptr,wlen,"reset")) { | ||
354 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
355 | if (!scnt) return -EINVAL; | ||
356 | count -= scnt; buf += scnt; | ||
357 | if (!wptr) return -EINVAL; | ||
358 | if (debugifc_match_keyword(wptr,wlen,"cpu")) { | ||
359 | pvr2_hdw_cpureset_assert(hdw,!0); | ||
360 | pvr2_hdw_cpureset_assert(hdw,0); | ||
361 | return 0; | ||
362 | } else if (debugifc_match_keyword(wptr,wlen,"bus")) { | ||
363 | pvr2_hdw_device_reset(hdw); | ||
364 | } else if (debugifc_match_keyword(wptr,wlen,"soft")) { | ||
365 | return pvr2_hdw_cmd_powerup(hdw); | ||
366 | } else if (debugifc_match_keyword(wptr,wlen,"deep")) { | ||
367 | return pvr2_hdw_cmd_deep_reset(hdw); | ||
368 | } else if (debugifc_match_keyword(wptr,wlen,"firmware")) { | ||
369 | return pvr2_upload_firmware2(hdw); | ||
370 | } else if (debugifc_match_keyword(wptr,wlen,"decoder")) { | ||
371 | return pvr2_hdw_cmd_decoder_reset(hdw); | ||
372 | } | ||
373 | return -EINVAL; | ||
374 | } else if (debugifc_match_keyword(wptr,wlen,"subsys_flags")) { | ||
375 | unsigned long msk = 0; | ||
376 | unsigned long val = 0; | ||
377 | if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) { | ||
378 | pvr2_trace(PVR2_TRACE_DEBUGIFC, | ||
379 | "debugifc parse error on subsys mask"); | ||
380 | return -EINVAL; | ||
381 | } | ||
382 | pvr2_hdw_subsys_bit_chg(hdw,msk,val); | ||
383 | return 0; | ||
384 | } else if (debugifc_match_keyword(wptr,wlen,"stream_flags")) { | ||
385 | unsigned long msk = 0; | ||
386 | unsigned long val = 0; | ||
387 | if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) { | ||
388 | pvr2_trace(PVR2_TRACE_DEBUGIFC, | ||
389 | "debugifc parse error on stream mask"); | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | pvr2_hdw_subsys_stream_bit_chg(hdw,msk,val); | ||
393 | return 0; | ||
394 | } else if (debugifc_match_keyword(wptr,wlen,"cpufw")) { | ||
395 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
396 | if (!scnt) return -EINVAL; | ||
397 | count -= scnt; buf += scnt; | ||
398 | if (!wptr) return -EINVAL; | ||
399 | if (debugifc_match_keyword(wptr,wlen,"fetch")) { | ||
400 | pvr2_hdw_cpufw_set_enabled(hdw,!0); | ||
401 | return 0; | ||
402 | } else if (debugifc_match_keyword(wptr,wlen,"done")) { | ||
403 | pvr2_hdw_cpufw_set_enabled(hdw,0); | ||
404 | return 0; | ||
405 | } else { | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | } else if (debugifc_match_keyword(wptr,wlen,"gpio")) { | ||
409 | int dir_fl = 0; | ||
410 | int ret; | ||
411 | u32 msk,val; | ||
412 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
413 | if (!scnt) return -EINVAL; | ||
414 | count -= scnt; buf += scnt; | ||
415 | if (!wptr) return -EINVAL; | ||
416 | if (debugifc_match_keyword(wptr,wlen,"dir")) { | ||
417 | dir_fl = !0; | ||
418 | } else if (!debugifc_match_keyword(wptr,wlen,"out")) { | ||
419 | return -EINVAL; | ||
420 | } | ||
421 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
422 | if (!scnt) return -EINVAL; | ||
423 | count -= scnt; buf += scnt; | ||
424 | if (!wptr) return -EINVAL; | ||
425 | ret = debugifc_parse_unsigned_number(wptr,wlen,&msk); | ||
426 | if (ret) return ret; | ||
427 | scnt = debugifc_isolate_word(buf,count,&wptr,&wlen); | ||
428 | if (wptr) { | ||
429 | ret = debugifc_parse_unsigned_number(wptr,wlen,&val); | ||
430 | if (ret) return ret; | ||
431 | } else { | ||
432 | val = msk; | ||
433 | msk = 0xffffffff; | ||
434 | } | ||
435 | if (dir_fl) { | ||
436 | ret = pvr2_hdw_gpio_chg_dir(hdw,msk,val); | ||
437 | } else { | ||
438 | ret = pvr2_hdw_gpio_chg_out(hdw,msk,val); | ||
439 | } | ||
440 | return ret; | ||
441 | } | ||
442 | pvr2_trace(PVR2_TRACE_DEBUGIFC, | ||
443 | "debugifc failed to recognize cmd: \"%.*s\"",wlen,wptr); | ||
444 | return -EINVAL; | ||
445 | } | ||
446 | |||
447 | |||
448 | int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf, | ||
449 | unsigned int count) | ||
450 | { | ||
451 | unsigned int bcnt = 0; | ||
452 | int ret; | ||
453 | |||
454 | while (count) { | ||
455 | for (bcnt = 0; bcnt < count; bcnt++) { | ||
456 | if (buf[bcnt] == '\n') break; | ||
457 | } | ||
458 | |||
459 | ret = pvr2_debugifc_do1cmd(hdw,buf,bcnt); | ||
460 | if (ret < 0) return ret; | ||
461 | if (bcnt < count) bcnt++; | ||
462 | buf += bcnt; | ||
463 | count -= bcnt; | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | |||
470 | /* | ||
471 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
472 | *** Local Variables: *** | ||
473 | *** mode: c *** | ||
474 | *** fill-column: 75 *** | ||
475 | *** tab-width: 8 *** | ||
476 | *** c-basic-offset: 8 *** | ||
477 | *** End: *** | ||
478 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.h b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h new file mode 100644 index 000000000000..990b02d35d36 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_DEBUGIFC_H | ||
22 | #define __PVRUSB2_DEBUGIFC_H | ||
23 | |||
24 | struct pvr2_hdw; | ||
25 | |||
26 | /* Non-intrusively print some useful debugging info from inside the | ||
27 | driver. This should work even if the driver appears to be | ||
28 | wedged. */ | ||
29 | int pvr2_debugifc_print_info(struct pvr2_hdw *, | ||
30 | char *buf_ptr,unsigned int buf_size); | ||
31 | |||
32 | /* Print general status of driver. This will also trigger a probe of | ||
33 | the USB link. Unlike print_info(), this one synchronizes with the | ||
34 | driver so the information should be self-consistent (but it will | ||
35 | hang if the driver is wedged). */ | ||
36 | int pvr2_debugifc_print_status(struct pvr2_hdw *, | ||
37 | char *buf_ptr,unsigned int buf_size); | ||
38 | |||
39 | /* Parse a string command into a driver action. */ | ||
40 | int pvr2_debugifc_docmd(struct pvr2_hdw *, | ||
41 | const char *buf_ptr,unsigned int buf_size); | ||
42 | |||
43 | #endif /* __PVRUSB2_DEBUGIFC_H */ | ||
44 | |||
45 | /* | ||
46 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
47 | *** Local Variables: *** | ||
48 | *** mode: c *** | ||
49 | *** fill-column: 75 *** | ||
50 | *** tab-width: 8 *** | ||
51 | *** c-basic-offset: 8 *** | ||
52 | *** End: *** | ||
53 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.c b/drivers/media/video/pvrusb2/pvrusb2-demod.c new file mode 100644 index 000000000000..9686569a11f6 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-demod.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include "pvrusb2.h" | ||
24 | #include "pvrusb2-util.h" | ||
25 | #include "pvrusb2-demod.h" | ||
26 | #include "pvrusb2-hdw-internal.h" | ||
27 | #include "pvrusb2-debug.h" | ||
28 | #include <linux/videodev2.h> | ||
29 | #include <media/tuner.h> | ||
30 | #include <media/v4l2-common.h> | ||
31 | |||
32 | |||
33 | struct pvr2_demod_handler { | ||
34 | struct pvr2_hdw *hdw; | ||
35 | struct pvr2_i2c_client *client; | ||
36 | struct pvr2_i2c_handler i2c_handler; | ||
37 | int type_update_fl; | ||
38 | }; | ||
39 | |||
40 | |||
41 | static void set_config(struct pvr2_demod_handler *ctxt) | ||
42 | { | ||
43 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
44 | int cfg = 0; | ||
45 | |||
46 | switch (hdw->tuner_type) { | ||
47 | case TUNER_PHILIPS_FM1216ME_MK3: | ||
48 | case TUNER_PHILIPS_FM1236_MK3: | ||
49 | cfg = TDA9887_PORT1_ACTIVE|TDA9887_PORT2_ACTIVE; | ||
50 | break; | ||
51 | default: | ||
52 | break; | ||
53 | } | ||
54 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c demod set_config(0x%x)",cfg); | ||
55 | pvr2_i2c_client_cmd(ctxt->client,TDA9887_SET_CONFIG,&cfg); | ||
56 | ctxt->type_update_fl = 0; | ||
57 | } | ||
58 | |||
59 | |||
60 | static int demod_check(struct pvr2_demod_handler *ctxt) | ||
61 | { | ||
62 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
63 | if (hdw->tuner_updated) ctxt->type_update_fl = !0; | ||
64 | return ctxt->type_update_fl != 0; | ||
65 | } | ||
66 | |||
67 | |||
68 | static void demod_update(struct pvr2_demod_handler *ctxt) | ||
69 | { | ||
70 | if (ctxt->type_update_fl) set_config(ctxt); | ||
71 | } | ||
72 | |||
73 | |||
74 | static void demod_detach(struct pvr2_demod_handler *ctxt) | ||
75 | { | ||
76 | ctxt->client->handler = 0; | ||
77 | kfree(ctxt); | ||
78 | } | ||
79 | |||
80 | |||
81 | static unsigned int demod_describe(struct pvr2_demod_handler *ctxt,char *buf,unsigned int cnt) | ||
82 | { | ||
83 | return scnprintf(buf,cnt,"handler: pvrusb2-demod"); | ||
84 | } | ||
85 | |||
86 | |||
87 | const static struct pvr2_i2c_handler_functions tuner_funcs = { | ||
88 | .detach = (void (*)(void *))demod_detach, | ||
89 | .check = (int (*)(void *))demod_check, | ||
90 | .update = (void (*)(void *))demod_update, | ||
91 | .describe = (unsigned int (*)(void *,char *,unsigned int))demod_describe, | ||
92 | }; | ||
93 | |||
94 | |||
95 | int pvr2_i2c_demod_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp) | ||
96 | { | ||
97 | struct pvr2_demod_handler *ctxt; | ||
98 | if (cp->handler) return 0; | ||
99 | |||
100 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
101 | if (!ctxt) return 0; | ||
102 | memset(ctxt,0,sizeof(*ctxt)); | ||
103 | |||
104 | ctxt->i2c_handler.func_data = ctxt; | ||
105 | ctxt->i2c_handler.func_table = &tuner_funcs; | ||
106 | ctxt->type_update_fl = !0; | ||
107 | ctxt->client = cp; | ||
108 | ctxt->hdw = hdw; | ||
109 | cp->handler = &ctxt->i2c_handler; | ||
110 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tda9887 V4L2 handler set up", | ||
111 | cp->client->addr); | ||
112 | return !0; | ||
113 | } | ||
114 | |||
115 | |||
116 | |||
117 | |||
118 | /* | ||
119 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
120 | *** Local Variables: *** | ||
121 | *** mode: c *** | ||
122 | *** fill-column: 70 *** | ||
123 | *** tab-width: 8 *** | ||
124 | *** c-basic-offset: 8 *** | ||
125 | *** End: *** | ||
126 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.h b/drivers/media/video/pvrusb2/pvrusb2-demod.h new file mode 100644 index 000000000000..4c4e40ffbf03 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-demod.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_DEMOD_H | ||
22 | #define __PVRUSB2_DEMOD_H | ||
23 | |||
24 | #include "pvrusb2-i2c-core.h" | ||
25 | |||
26 | int pvr2_i2c_demod_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
27 | |||
28 | #endif /* __PVRUSB2_DEMOD_H */ | ||
29 | |||
30 | /* | ||
31 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
32 | *** Local Variables: *** | ||
33 | *** mode: c *** | ||
34 | *** fill-column: 70 *** | ||
35 | *** tab-width: 8 *** | ||
36 | *** c-basic-offset: 8 *** | ||
37 | *** End: *** | ||
38 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c new file mode 100644 index 000000000000..94d383ff9889 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include "pvrusb2-eeprom.h" | ||
24 | #include "pvrusb2-hdw-internal.h" | ||
25 | #include "pvrusb2-debug.h" | ||
26 | |||
27 | #define trace_eeprom(...) pvr2_trace(PVR2_TRACE_EEPROM,__VA_ARGS__) | ||
28 | |||
29 | |||
30 | |||
31 | /* | ||
32 | |||
33 | Read and analyze data in the eeprom. Use tveeprom to figure out | ||
34 | the packet structure, since this is another Hauppauge device and | ||
35 | internally it has a family resemblence to ivtv-type devices | ||
36 | |||
37 | */ | ||
38 | |||
39 | #include <media/tveeprom.h> | ||
40 | |||
41 | /* We seem to only be interested in the last 128 bytes of the EEPROM */ | ||
42 | #define EEPROM_SIZE 128 | ||
43 | |||
44 | /* Grab EEPROM contents, needed for direct method. */ | ||
45 | static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw) | ||
46 | { | ||
47 | struct i2c_msg msg[2]; | ||
48 | u8 *eeprom; | ||
49 | u8 iadd[2]; | ||
50 | u8 addr; | ||
51 | u16 eepromSize; | ||
52 | unsigned int offs; | ||
53 | int ret; | ||
54 | int mode16 = 0; | ||
55 | unsigned pcnt,tcnt; | ||
56 | eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL); | ||
57 | if (!eeprom) { | ||
58 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
59 | "Failed to allocate memory" | ||
60 | " required to read eeprom"); | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | trace_eeprom("Value for eeprom addr from controller was 0x%x", | ||
65 | hdw->eeprom_addr); | ||
66 | addr = hdw->eeprom_addr; | ||
67 | /* Seems that if the high bit is set, then the *real* eeprom | ||
68 | address is shifted right now bit position (noticed this in | ||
69 | newer PVR USB2 hardware) */ | ||
70 | if (addr & 0x80) addr >>= 1; | ||
71 | |||
72 | /* FX2 documentation states that a 16bit-addressed eeprom is | ||
73 | expected if the I2C address is an odd number (yeah, this is | ||
74 | strange but it's what they do) */ | ||
75 | mode16 = (addr & 1); | ||
76 | eepromSize = (mode16 ? 4096 : 256); | ||
77 | trace_eeprom("Examining %d byte eeprom at location 0x%x" | ||
78 | " using %d bit addressing",eepromSize,addr, | ||
79 | mode16 ? 16 : 8); | ||
80 | |||
81 | msg[0].addr = addr; | ||
82 | msg[0].flags = 0; | ||
83 | msg[0].len = mode16 ? 2 : 1; | ||
84 | msg[0].buf = iadd; | ||
85 | msg[1].addr = addr; | ||
86 | msg[1].flags = I2C_M_RD; | ||
87 | |||
88 | /* We have to do the actual eeprom data fetch ourselves, because | ||
89 | (1) we're only fetching part of the eeprom, and (2) if we were | ||
90 | getting the whole thing our I2C driver can't grab it in one | ||
91 | pass - which is what tveeprom is otherwise going to attempt */ | ||
92 | memset(eeprom,0,EEPROM_SIZE); | ||
93 | for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) { | ||
94 | pcnt = 16; | ||
95 | if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt; | ||
96 | offs = tcnt + (eepromSize - EEPROM_SIZE); | ||
97 | if (mode16) { | ||
98 | iadd[0] = offs >> 8; | ||
99 | iadd[1] = offs; | ||
100 | } else { | ||
101 | iadd[0] = offs; | ||
102 | } | ||
103 | msg[1].len = pcnt; | ||
104 | msg[1].buf = eeprom+tcnt; | ||
105 | if ((ret = i2c_transfer( | ||
106 | &hdw->i2c_adap, | ||
107 | msg,sizeof(msg)/sizeof(msg[0]))) != 2) { | ||
108 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
109 | "eeprom fetch set offs err=%d",ret); | ||
110 | kfree(eeprom); | ||
111 | return 0; | ||
112 | } | ||
113 | } | ||
114 | return eeprom; | ||
115 | } | ||
116 | |||
117 | |||
118 | /* Directly call eeprom analysis function within tveeprom. */ | ||
119 | int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) | ||
120 | { | ||
121 | u8 *eeprom; | ||
122 | struct tveeprom tvdata; | ||
123 | |||
124 | memset(&tvdata,0,sizeof(tvdata)); | ||
125 | |||
126 | eeprom = pvr2_eeprom_fetch(hdw); | ||
127 | if (!eeprom) return -EINVAL; | ||
128 | |||
129 | { | ||
130 | struct i2c_client fake_client; | ||
131 | /* Newer version expects a useless client interface */ | ||
132 | fake_client.addr = hdw->eeprom_addr; | ||
133 | fake_client.adapter = &hdw->i2c_adap; | ||
134 | tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom); | ||
135 | } | ||
136 | |||
137 | trace_eeprom("eeprom assumed v4l tveeprom module"); | ||
138 | trace_eeprom("eeprom direct call results:"); | ||
139 | trace_eeprom("has_radio=%d",tvdata.has_radio); | ||
140 | trace_eeprom("tuner_type=%d",tvdata.tuner_type); | ||
141 | trace_eeprom("tuner_formats=0x%x",tvdata.tuner_formats); | ||
142 | trace_eeprom("audio_processor=%d",tvdata.audio_processor); | ||
143 | trace_eeprom("model=%d",tvdata.model); | ||
144 | trace_eeprom("revision=%d",tvdata.revision); | ||
145 | trace_eeprom("serial_number=%d",tvdata.serial_number); | ||
146 | trace_eeprom("rev_str=%s",tvdata.rev_str); | ||
147 | hdw->tuner_type = tvdata.tuner_type; | ||
148 | hdw->serial_number = tvdata.serial_number; | ||
149 | hdw->std_mask_eeprom = tvdata.tuner_formats; | ||
150 | |||
151 | kfree(eeprom); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
158 | *** Local Variables: *** | ||
159 | *** mode: c *** | ||
160 | *** fill-column: 70 *** | ||
161 | *** tab-width: 8 *** | ||
162 | *** c-basic-offset: 8 *** | ||
163 | *** End: *** | ||
164 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.h b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h new file mode 100644 index 000000000000..84242975dea7 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_EEPROM_H | ||
24 | #define __PVRUSB2_EEPROM_H | ||
25 | |||
26 | struct pvr2_hdw; | ||
27 | |||
28 | int pvr2_eeprom_analyze(struct pvr2_hdw *); | ||
29 | |||
30 | #endif /* __PVRUSB2_EEPROM_H */ | ||
31 | |||
32 | /* | ||
33 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
34 | *** Local Variables: *** | ||
35 | *** mode: c *** | ||
36 | *** fill-column: 70 *** | ||
37 | *** tab-width: 8 *** | ||
38 | *** c-basic-offset: 8 *** | ||
39 | *** End: *** | ||
40 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c new file mode 100644 index 000000000000..2cc31695b435 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/device.h> // for linux/firmware.h | ||
24 | #include <linux/firmware.h> | ||
25 | #include "pvrusb2-util.h" | ||
26 | #include "pvrusb2-encoder.h" | ||
27 | #include "pvrusb2-hdw-internal.h" | ||
28 | #include "pvrusb2-debug.h" | ||
29 | |||
30 | |||
31 | |||
32 | /* Firmware mailbox flags - definitions found from ivtv */ | ||
33 | #define IVTV_MBOX_FIRMWARE_DONE 0x00000004 | ||
34 | #define IVTV_MBOX_DRIVER_DONE 0x00000002 | ||
35 | #define IVTV_MBOX_DRIVER_BUSY 0x00000001 | ||
36 | |||
37 | |||
38 | static int pvr2_encoder_write_words(struct pvr2_hdw *hdw, | ||
39 | const u32 *data, unsigned int dlen) | ||
40 | { | ||
41 | unsigned int idx; | ||
42 | int ret; | ||
43 | unsigned int offs = 0; | ||
44 | unsigned int chunkCnt; | ||
45 | |||
46 | /* | ||
47 | |||
48 | Format: First byte must be 0x01. Remaining 32 bit words are | ||
49 | spread out into chunks of 7 bytes each, little-endian ordered, | ||
50 | offset at zero within each 2 blank bytes following and a | ||
51 | single byte that is 0x44 plus the offset of the word. Repeat | ||
52 | request for additional words, with offset adjusted | ||
53 | accordingly. | ||
54 | |||
55 | */ | ||
56 | while (dlen) { | ||
57 | chunkCnt = 8; | ||
58 | if (chunkCnt > dlen) chunkCnt = dlen; | ||
59 | memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer)); | ||
60 | hdw->cmd_buffer[0] = 0x01; | ||
61 | for (idx = 0; idx < chunkCnt; idx++) { | ||
62 | hdw->cmd_buffer[1+(idx*7)+6] = 0x44 + idx + offs; | ||
63 | PVR2_DECOMPOSE_LE(hdw->cmd_buffer, 1+(idx*7), | ||
64 | data[idx]); | ||
65 | } | ||
66 | ret = pvr2_send_request(hdw, | ||
67 | hdw->cmd_buffer,1+(chunkCnt*7), | ||
68 | 0,0); | ||
69 | if (ret) return ret; | ||
70 | data += chunkCnt; | ||
71 | dlen -= chunkCnt; | ||
72 | offs += chunkCnt; | ||
73 | } | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | |||
79 | static int pvr2_encoder_read_words(struct pvr2_hdw *hdw,int statusFl, | ||
80 | u32 *data, unsigned int dlen) | ||
81 | { | ||
82 | unsigned int idx; | ||
83 | int ret; | ||
84 | unsigned int offs = 0; | ||
85 | unsigned int chunkCnt; | ||
86 | |||
87 | /* | ||
88 | |||
89 | Format: First byte must be 0x02 (status check) or 0x28 (read | ||
90 | back block of 32 bit words). Next 6 bytes must be zero, | ||
91 | followed by a single byte of 0x44+offset for portion to be | ||
92 | read. Returned data is packed set of 32 bits words that were | ||
93 | read. | ||
94 | |||
95 | */ | ||
96 | |||
97 | while (dlen) { | ||
98 | chunkCnt = 16; | ||
99 | if (chunkCnt > dlen) chunkCnt = dlen; | ||
100 | memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer)); | ||
101 | hdw->cmd_buffer[0] = statusFl ? 0x02 : 0x28; | ||
102 | hdw->cmd_buffer[7] = 0x44 + offs; | ||
103 | ret = pvr2_send_request(hdw, | ||
104 | hdw->cmd_buffer,8, | ||
105 | hdw->cmd_buffer,chunkCnt * 4); | ||
106 | if (ret) return ret; | ||
107 | |||
108 | for (idx = 0; idx < chunkCnt; idx++) { | ||
109 | data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4); | ||
110 | } | ||
111 | data += chunkCnt; | ||
112 | dlen -= chunkCnt; | ||
113 | offs += chunkCnt; | ||
114 | } | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | |||
120 | /* This prototype is set up to be compatible with the | ||
121 | cx2341x_mbox_func prototype in cx2341x.h, which should be in | ||
122 | kernels 2.6.18 or later. We do this so that we can enable | ||
123 | cx2341x.ko to write to our encoder (by handing it a pointer to this | ||
124 | function). For earlier kernels this doesn't really matter. */ | ||
125 | static int pvr2_encoder_cmd(void *ctxt, | ||
126 | int cmd, | ||
127 | int arg_cnt_send, | ||
128 | int arg_cnt_recv, | ||
129 | u32 *argp) | ||
130 | { | ||
131 | unsigned int poll_count; | ||
132 | int ret = 0; | ||
133 | unsigned int idx; | ||
134 | /* These sizes look to be limited by the FX2 firmware implementation */ | ||
135 | u32 wrData[16]; | ||
136 | u32 rdData[16]; | ||
137 | struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt; | ||
138 | |||
139 | |||
140 | /* | ||
141 | |||
142 | The encoder seems to speak entirely using blocks 32 bit words. | ||
143 | In ivtv driver terms, this is a mailbox which we populate with | ||
144 | data and watch what the hardware does with it. The first word | ||
145 | is a set of flags used to control the transaction, the second | ||
146 | word is the command to execute, the third byte is zero (ivtv | ||
147 | driver suggests that this is some kind of return value), and | ||
148 | the fourth byte is a specified timeout (windows driver always | ||
149 | uses 0x00060000 except for one case when it is zero). All | ||
150 | successive words are the argument words for the command. | ||
151 | |||
152 | First, write out the entire set of words, with the first word | ||
153 | being zero. | ||
154 | |||
155 | Next, write out just the first word again, but set it to | ||
156 | IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which | ||
157 | probably means "go"). | ||
158 | |||
159 | Next, read back 16 words as status. Check the first word, | ||
160 | which should have IVTV_MBOX_FIRMWARE_DONE set. If however | ||
161 | that bit is not set, then the command isn't done so repeat the | ||
162 | read. | ||
163 | |||
164 | Next, read back 32 words and compare with the original | ||
165 | arugments. Hopefully they will match. | ||
166 | |||
167 | Finally, write out just the first word again, but set it to | ||
168 | 0x0 this time (which probably means "idle"). | ||
169 | |||
170 | */ | ||
171 | |||
172 | if (arg_cnt_send > (sizeof(wrData)/sizeof(wrData[0]))-4) { | ||
173 | pvr2_trace( | ||
174 | PVR2_TRACE_ERROR_LEGS, | ||
175 | "Failed to write cx23416 command" | ||
176 | " - too many input arguments" | ||
177 | " (was given %u limit %u)", | ||
178 | arg_cnt_send, | ||
179 | (unsigned int)(sizeof(wrData)/sizeof(wrData[0])) - 4); | ||
180 | return -EINVAL; | ||
181 | } | ||
182 | |||
183 | if (arg_cnt_recv > (sizeof(rdData)/sizeof(rdData[0]))-4) { | ||
184 | pvr2_trace( | ||
185 | PVR2_TRACE_ERROR_LEGS, | ||
186 | "Failed to write cx23416 command" | ||
187 | " - too many return arguments" | ||
188 | " (was given %u limit %u)", | ||
189 | arg_cnt_recv, | ||
190 | (unsigned int)(sizeof(rdData)/sizeof(rdData[0])) - 4); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | |||
195 | LOCK_TAKE(hdw->ctl_lock); do { | ||
196 | |||
197 | wrData[0] = 0; | ||
198 | wrData[1] = cmd; | ||
199 | wrData[2] = 0; | ||
200 | wrData[3] = 0x00060000; | ||
201 | for (idx = 0; idx < arg_cnt_send; idx++) { | ||
202 | wrData[idx+4] = argp[idx]; | ||
203 | } | ||
204 | for (; idx < (sizeof(wrData)/sizeof(wrData[0]))-4; idx++) { | ||
205 | wrData[idx+4] = 0; | ||
206 | } | ||
207 | |||
208 | ret = pvr2_encoder_write_words(hdw,wrData,idx); | ||
209 | if (ret) break; | ||
210 | wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY; | ||
211 | ret = pvr2_encoder_write_words(hdw,wrData,1); | ||
212 | if (ret) break; | ||
213 | poll_count = 0; | ||
214 | while (1) { | ||
215 | if (poll_count < 10000000) poll_count++; | ||
216 | ret = pvr2_encoder_read_words(hdw,!0,rdData,1); | ||
217 | if (ret) break; | ||
218 | if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) { | ||
219 | break; | ||
220 | } | ||
221 | if (poll_count == 100) { | ||
222 | pvr2_trace( | ||
223 | PVR2_TRACE_ERROR_LEGS, | ||
224 | "***WARNING*** device's encoder" | ||
225 | " appears to be stuck" | ||
226 | " (status=0%08x)",rdData[0]); | ||
227 | pvr2_trace( | ||
228 | PVR2_TRACE_ERROR_LEGS, | ||
229 | "Encoder command: 0x%02x",cmd); | ||
230 | for (idx = 4; idx < arg_cnt_send; idx++) { | ||
231 | pvr2_trace( | ||
232 | PVR2_TRACE_ERROR_LEGS, | ||
233 | "Encoder arg%d: 0x%08x", | ||
234 | idx-3,wrData[idx]); | ||
235 | } | ||
236 | pvr2_trace( | ||
237 | PVR2_TRACE_ERROR_LEGS, | ||
238 | "Giving up waiting." | ||
239 | " It is likely that" | ||
240 | " this is a bad idea..."); | ||
241 | ret = -EBUSY; | ||
242 | break; | ||
243 | } | ||
244 | } | ||
245 | if (ret) break; | ||
246 | wrData[0] = 0x7; | ||
247 | ret = pvr2_encoder_read_words( | ||
248 | hdw,0,rdData, | ||
249 | sizeof(rdData)/sizeof(rdData[0])); | ||
250 | if (ret) break; | ||
251 | for (idx = 0; idx < arg_cnt_recv; idx++) { | ||
252 | argp[idx] = rdData[idx+4]; | ||
253 | } | ||
254 | |||
255 | wrData[0] = 0x0; | ||
256 | ret = pvr2_encoder_write_words(hdw,wrData,1); | ||
257 | if (ret) break; | ||
258 | |||
259 | } while(0); LOCK_GIVE(hdw->ctl_lock); | ||
260 | |||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | |||
265 | static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd, | ||
266 | int args, ...) | ||
267 | { | ||
268 | va_list vl; | ||
269 | unsigned int idx; | ||
270 | u32 data[12]; | ||
271 | |||
272 | if (args > sizeof(data)/sizeof(data[0])) { | ||
273 | pvr2_trace( | ||
274 | PVR2_TRACE_ERROR_LEGS, | ||
275 | "Failed to write cx23416 command" | ||
276 | " - too many arguments" | ||
277 | " (was given %u limit %u)", | ||
278 | args,(unsigned int)(sizeof(data)/sizeof(data[0]))); | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | |||
282 | va_start(vl, args); | ||
283 | for (idx = 0; idx < args; idx++) { | ||
284 | data[idx] = va_arg(vl, u32); | ||
285 | } | ||
286 | va_end(vl); | ||
287 | |||
288 | return pvr2_encoder_cmd(hdw,cmd,args,0,data); | ||
289 | } | ||
290 | |||
291 | int pvr2_encoder_configure(struct pvr2_hdw *hdw) | ||
292 | { | ||
293 | int ret; | ||
294 | pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure" | ||
295 | " (cx2341x module)"); | ||
296 | hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING; | ||
297 | hdw->enc_ctl_state.width = hdw->res_hor_val; | ||
298 | hdw->enc_ctl_state.height = hdw->res_ver_val; | ||
299 | hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur & | ||
300 | (V4L2_STD_NTSC|V4L2_STD_PAL_M)) ? | ||
301 | 0 : 1); | ||
302 | |||
303 | ret = 0; | ||
304 | |||
305 | if (!ret) ret = pvr2_encoder_vcmd( | ||
306 | hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, | ||
307 | 0xf0, 0xf0); | ||
308 | |||
309 | /* setup firmware to notify us about some events (don't know why...) */ | ||
310 | if (!ret) ret = pvr2_encoder_vcmd( | ||
311 | hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, | ||
312 | 0, 0, 0x10000000, 0xffffffff); | ||
313 | |||
314 | if (!ret) ret = pvr2_encoder_vcmd( | ||
315 | hdw,CX2341X_ENC_SET_VBI_LINE, 5, | ||
316 | 0xffffffff,0,0,0,0); | ||
317 | |||
318 | if (ret) { | ||
319 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
320 | "Failed to configure cx32416"); | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | ret = cx2341x_update(hdw,pvr2_encoder_cmd, | ||
325 | (hdw->enc_cur_valid ? &hdw->enc_cur_state : 0), | ||
326 | &hdw->enc_ctl_state); | ||
327 | if (ret) { | ||
328 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
329 | "Error from cx2341x module code=%d",ret); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | ret = 0; | ||
334 | |||
335 | if (!ret) ret = pvr2_encoder_vcmd( | ||
336 | hdw, CX2341X_ENC_INITIALIZE_INPUT, 0); | ||
337 | |||
338 | if (ret) { | ||
339 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
340 | "Failed to initialize cx32416 video input"); | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG); | ||
345 | memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state, | ||
346 | sizeof(struct cx2341x_mpeg_params)); | ||
347 | hdw->enc_cur_valid = !0; | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | |||
352 | int pvr2_encoder_start(struct pvr2_hdw *hdw) | ||
353 | { | ||
354 | int status; | ||
355 | |||
356 | /* unmask some interrupts */ | ||
357 | pvr2_write_register(hdw, 0x0048, 0xbfffffff); | ||
358 | |||
359 | /* change some GPIO data */ | ||
360 | pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000481); | ||
361 | pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000); | ||
362 | |||
363 | if (hdw->config == pvr2_config_vbi) { | ||
364 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, | ||
365 | 0x01,0x14); | ||
366 | } else if (hdw->config == pvr2_config_mpeg) { | ||
367 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, | ||
368 | 0,0x13); | ||
369 | } else { | ||
370 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, | ||
371 | 0,0x13); | ||
372 | } | ||
373 | if (!status) { | ||
374 | hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_RUN); | ||
375 | } | ||
376 | return status; | ||
377 | } | ||
378 | |||
379 | int pvr2_encoder_stop(struct pvr2_hdw *hdw) | ||
380 | { | ||
381 | int status; | ||
382 | |||
383 | /* mask all interrupts */ | ||
384 | pvr2_write_register(hdw, 0x0048, 0xffffffff); | ||
385 | |||
386 | if (hdw->config == pvr2_config_vbi) { | ||
387 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, | ||
388 | 0x01,0x01,0x14); | ||
389 | } else if (hdw->config == pvr2_config_mpeg) { | ||
390 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, | ||
391 | 0x01,0,0x13); | ||
392 | } else { | ||
393 | status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, | ||
394 | 0x01,0,0x13); | ||
395 | } | ||
396 | |||
397 | /* change some GPIO data */ | ||
398 | /* Note: Bit d7 of dir appears to control the LED. So we shut it | ||
399 | off here. */ | ||
400 | pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000401); | ||
401 | pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000); | ||
402 | |||
403 | if (!status) { | ||
404 | hdw->subsys_enabled_mask &= ~(1<<PVR2_SUBSYS_B_ENC_RUN); | ||
405 | } | ||
406 | return status; | ||
407 | } | ||
408 | |||
409 | |||
410 | /* | ||
411 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
412 | *** Local Variables: *** | ||
413 | *** mode: c *** | ||
414 | *** fill-column: 70 *** | ||
415 | *** tab-width: 8 *** | ||
416 | *** c-basic-offset: 8 *** | ||
417 | *** End: *** | ||
418 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.h b/drivers/media/video/pvrusb2/pvrusb2-encoder.h new file mode 100644 index 000000000000..01b5a0b89c03 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_ENCODER_H | ||
24 | #define __PVRUSB2_ENCODER_H | ||
25 | |||
26 | struct pvr2_hdw; | ||
27 | |||
28 | int pvr2_encoder_configure(struct pvr2_hdw *); | ||
29 | int pvr2_encoder_start(struct pvr2_hdw *); | ||
30 | int pvr2_encoder_stop(struct pvr2_hdw *); | ||
31 | |||
32 | #endif /* __PVRUSB2_ENCODER_H */ | ||
33 | |||
34 | /* | ||
35 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
36 | *** Local Variables: *** | ||
37 | *** mode: c *** | ||
38 | *** fill-column: 70 *** | ||
39 | *** tab-width: 8 *** | ||
40 | *** c-basic-offset: 8 *** | ||
41 | *** End: *** | ||
42 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h new file mode 100644 index 000000000000..ba2afbfe32c5 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h | |||
@@ -0,0 +1,384 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_HDW_INTERNAL_H | ||
22 | #define __PVRUSB2_HDW_INTERNAL_H | ||
23 | |||
24 | /* | ||
25 | |||
26 | This header sets up all the internal structures and definitions needed to | ||
27 | track and coordinate the driver's interaction with the hardware. ONLY | ||
28 | source files which actually implement part of that whole circus should be | ||
29 | including this header. Higher levels, like the external layers to the | ||
30 | various public APIs (V4L, sysfs, etc) should NOT ever include this | ||
31 | private, internal header. This means that pvrusb2-hdw, pvrusb2-encoder, | ||
32 | etc will include this, but pvrusb2-v4l should not. | ||
33 | |||
34 | */ | ||
35 | |||
36 | #include <linux/config.h> | ||
37 | #include <linux/videodev2.h> | ||
38 | #include <linux/i2c.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include "pvrusb2-hdw.h" | ||
41 | #include "pvrusb2-io.h" | ||
42 | #include <media/cx2341x.h> | ||
43 | |||
44 | /* Legal values for the SRATE state variable */ | ||
45 | #define PVR2_CVAL_SRATE_48 0 | ||
46 | #define PVR2_CVAL_SRATE_44_1 1 | ||
47 | |||
48 | /* Legal values for the AUDIOBITRATE state variable */ | ||
49 | #define PVR2_CVAL_AUDIOBITRATE_384 0 | ||
50 | #define PVR2_CVAL_AUDIOBITRATE_320 1 | ||
51 | #define PVR2_CVAL_AUDIOBITRATE_256 2 | ||
52 | #define PVR2_CVAL_AUDIOBITRATE_224 3 | ||
53 | #define PVR2_CVAL_AUDIOBITRATE_192 4 | ||
54 | #define PVR2_CVAL_AUDIOBITRATE_160 5 | ||
55 | #define PVR2_CVAL_AUDIOBITRATE_128 6 | ||
56 | #define PVR2_CVAL_AUDIOBITRATE_112 7 | ||
57 | #define PVR2_CVAL_AUDIOBITRATE_96 8 | ||
58 | #define PVR2_CVAL_AUDIOBITRATE_80 9 | ||
59 | #define PVR2_CVAL_AUDIOBITRATE_64 10 | ||
60 | #define PVR2_CVAL_AUDIOBITRATE_56 11 | ||
61 | #define PVR2_CVAL_AUDIOBITRATE_48 12 | ||
62 | #define PVR2_CVAL_AUDIOBITRATE_32 13 | ||
63 | #define PVR2_CVAL_AUDIOBITRATE_VBR 14 | ||
64 | |||
65 | /* Legal values for the AUDIOEMPHASIS state variable */ | ||
66 | #define PVR2_CVAL_AUDIOEMPHASIS_NONE 0 | ||
67 | #define PVR2_CVAL_AUDIOEMPHASIS_50_15 1 | ||
68 | #define PVR2_CVAL_AUDIOEMPHASIS_CCITT 2 | ||
69 | |||
70 | /* Legal values for PVR2_CID_HSM */ | ||
71 | #define PVR2_CVAL_HSM_FAIL 0 | ||
72 | #define PVR2_CVAL_HSM_FULL 1 | ||
73 | #define PVR2_CVAL_HSM_HIGH 2 | ||
74 | |||
75 | #define PVR2_VID_ENDPOINT 0x84 | ||
76 | #define PVR2_UNK_ENDPOINT 0x86 /* maybe raw yuv ? */ | ||
77 | #define PVR2_VBI_ENDPOINT 0x88 | ||
78 | |||
79 | #define PVR2_CTL_BUFFSIZE 64 | ||
80 | |||
81 | #define FREQTABLE_SIZE 500 | ||
82 | |||
83 | #define LOCK_TAKE(x) do { mutex_lock(&x##_mutex); x##_held = !0; } while (0) | ||
84 | #define LOCK_GIVE(x) do { x##_held = 0; mutex_unlock(&x##_mutex); } while (0) | ||
85 | |||
86 | struct pvr2_decoder; | ||
87 | |||
88 | typedef int (*pvr2_ctlf_is_dirty)(struct pvr2_ctrl *); | ||
89 | typedef void (*pvr2_ctlf_clear_dirty)(struct pvr2_ctrl *); | ||
90 | typedef int (*pvr2_ctlf_get_value)(struct pvr2_ctrl *,int *); | ||
91 | typedef int (*pvr2_ctlf_set_value)(struct pvr2_ctrl *,int msk,int val); | ||
92 | typedef int (*pvr2_ctlf_val_to_sym)(struct pvr2_ctrl *,int msk,int val, | ||
93 | char *,unsigned int,unsigned int *); | ||
94 | typedef int (*pvr2_ctlf_sym_to_val)(struct pvr2_ctrl *, | ||
95 | const char *,unsigned int, | ||
96 | int *mskp,int *valp); | ||
97 | typedef unsigned int (*pvr2_ctlf_get_v4lflags)(struct pvr2_ctrl *); | ||
98 | |||
99 | /* This structure describes a specific control. A table of these is set up | ||
100 | in pvrusb2-hdw.c. */ | ||
101 | struct pvr2_ctl_info { | ||
102 | /* Control's name suitable for use as an identifier */ | ||
103 | const char *name; | ||
104 | |||
105 | /* Short description of control */ | ||
106 | const char *desc; | ||
107 | |||
108 | /* Control's implementation */ | ||
109 | pvr2_ctlf_get_value get_value; /* Get its value */ | ||
110 | pvr2_ctlf_set_value set_value; /* Set its value */ | ||
111 | pvr2_ctlf_val_to_sym val_to_sym; /* Custom convert value->symbol */ | ||
112 | pvr2_ctlf_sym_to_val sym_to_val; /* Custom convert symbol->value */ | ||
113 | pvr2_ctlf_is_dirty is_dirty; /* Return true if dirty */ | ||
114 | pvr2_ctlf_clear_dirty clear_dirty; /* Clear dirty state */ | ||
115 | pvr2_ctlf_get_v4lflags get_v4lflags;/* Retrieve v4l flags */ | ||
116 | |||
117 | /* Control's type (int, enum, bitmask) */ | ||
118 | enum pvr2_ctl_type type; | ||
119 | |||
120 | /* Associated V4L control ID, if any */ | ||
121 | int v4l_id; | ||
122 | |||
123 | /* Associated driver internal ID, if any */ | ||
124 | int internal_id; | ||
125 | |||
126 | /* Don't implicitly initialize this control's value */ | ||
127 | int skip_init; | ||
128 | |||
129 | /* Starting value for this control */ | ||
130 | int default_value; | ||
131 | |||
132 | /* Type-specific control information */ | ||
133 | union { | ||
134 | struct { /* Integer control */ | ||
135 | long min_value; /* lower limit */ | ||
136 | long max_value; /* upper limit */ | ||
137 | } type_int; | ||
138 | struct { /* enumerated control */ | ||
139 | unsigned int count; /* enum value count */ | ||
140 | const char **value_names; /* symbol names */ | ||
141 | } type_enum; | ||
142 | struct { /* bitmask control */ | ||
143 | unsigned int valid_bits; /* bits in use */ | ||
144 | const char **bit_names; /* symbol name/bit */ | ||
145 | } type_bitmask; | ||
146 | } def; | ||
147 | }; | ||
148 | |||
149 | |||
150 | /* Same as pvr2_ctl_info, but includes storage for the control description */ | ||
151 | #define PVR2_CTLD_INFO_DESC_SIZE 32 | ||
152 | struct pvr2_ctld_info { | ||
153 | struct pvr2_ctl_info info; | ||
154 | char desc[PVR2_CTLD_INFO_DESC_SIZE]; | ||
155 | }; | ||
156 | |||
157 | struct pvr2_ctrl { | ||
158 | const struct pvr2_ctl_info *info; | ||
159 | struct pvr2_hdw *hdw; | ||
160 | }; | ||
161 | |||
162 | |||
163 | struct pvr2_audio_stat { | ||
164 | void *ctxt; | ||
165 | void (*detach)(void *); | ||
166 | int (*status)(void *); | ||
167 | }; | ||
168 | |||
169 | struct pvr2_decoder_ctrl { | ||
170 | void *ctxt; | ||
171 | void (*detach)(void *); | ||
172 | void (*enable)(void *,int); | ||
173 | int (*tuned)(void *); | ||
174 | void (*force_reset)(void *); | ||
175 | }; | ||
176 | |||
177 | #define PVR2_I2C_PEND_DETECT 0x01 /* Need to detect a client type */ | ||
178 | #define PVR2_I2C_PEND_CLIENT 0x02 /* Client needs a specific update */ | ||
179 | #define PVR2_I2C_PEND_REFRESH 0x04 /* Client has specific pending bits */ | ||
180 | #define PVR2_I2C_PEND_STALE 0x08 /* Broadcast pending bits */ | ||
181 | |||
182 | #define PVR2_I2C_PEND_ALL (PVR2_I2C_PEND_DETECT |\ | ||
183 | PVR2_I2C_PEND_CLIENT |\ | ||
184 | PVR2_I2C_PEND_REFRESH |\ | ||
185 | PVR2_I2C_PEND_STALE) | ||
186 | |||
187 | /* Disposition of firmware1 loading situation */ | ||
188 | #define FW1_STATE_UNKNOWN 0 | ||
189 | #define FW1_STATE_MISSING 1 | ||
190 | #define FW1_STATE_FAILED 2 | ||
191 | #define FW1_STATE_RELOAD 3 | ||
192 | #define FW1_STATE_OK 4 | ||
193 | |||
194 | /* Known major hardware variants, keyed from device ID */ | ||
195 | #define PVR2_HDW_TYPE_29XXX 0 | ||
196 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
197 | #define PVR2_HDW_TYPE_24XXX 1 | ||
198 | #endif | ||
199 | |||
200 | typedef int (*pvr2_i2c_func)(struct pvr2_hdw *,u8,u8 *,u16,u8 *, u16); | ||
201 | #define PVR2_I2C_FUNC_CNT 128 | ||
202 | |||
203 | /* This structure contains all state data directly needed to | ||
204 | manipulate the hardware (as opposed to complying with a kernel | ||
205 | interface) */ | ||
206 | struct pvr2_hdw { | ||
207 | /* Underlying USB device handle */ | ||
208 | struct usb_device *usb_dev; | ||
209 | struct usb_interface *usb_intf; | ||
210 | |||
211 | /* Device type, one of PVR2_HDW_TYPE_xxxxx */ | ||
212 | unsigned int hdw_type; | ||
213 | |||
214 | /* Video spigot */ | ||
215 | struct pvr2_stream *vid_stream; | ||
216 | |||
217 | /* Mutex for all hardware state control */ | ||
218 | struct mutex big_lock_mutex; | ||
219 | int big_lock_held; /* For debugging */ | ||
220 | |||
221 | void (*poll_trigger_func)(void *); | ||
222 | void *poll_trigger_data; | ||
223 | |||
224 | char name[32]; | ||
225 | |||
226 | /* I2C stuff */ | ||
227 | struct i2c_adapter i2c_adap; | ||
228 | struct i2c_algorithm i2c_algo; | ||
229 | pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT]; | ||
230 | int i2c_cx25840_hack_state; | ||
231 | int i2c_linked; | ||
232 | unsigned int i2c_pend_types; /* Which types of update are needed */ | ||
233 | unsigned long i2c_pend_mask; /* Change bits we need to scan */ | ||
234 | unsigned long i2c_stale_mask; /* Pending broadcast change bits */ | ||
235 | unsigned long i2c_active_mask; /* All change bits currently in use */ | ||
236 | struct list_head i2c_clients; | ||
237 | struct mutex i2c_list_lock; | ||
238 | |||
239 | /* Frequency table */ | ||
240 | unsigned int freqTable[FREQTABLE_SIZE]; | ||
241 | unsigned int freqProgSlot; | ||
242 | unsigned int freqSlot; | ||
243 | |||
244 | /* Stuff for handling low level control interaction with device */ | ||
245 | struct mutex ctl_lock_mutex; | ||
246 | int ctl_lock_held; /* For debugging */ | ||
247 | struct urb *ctl_write_urb; | ||
248 | struct urb *ctl_read_urb; | ||
249 | unsigned char *ctl_write_buffer; | ||
250 | unsigned char *ctl_read_buffer; | ||
251 | volatile int ctl_write_pend_flag; | ||
252 | volatile int ctl_read_pend_flag; | ||
253 | volatile int ctl_timeout_flag; | ||
254 | struct completion ctl_done; | ||
255 | unsigned char cmd_buffer[PVR2_CTL_BUFFSIZE]; | ||
256 | int cmd_debug_state; // Low level command debugging info | ||
257 | unsigned char cmd_debug_code; // | ||
258 | unsigned int cmd_debug_write_len; // | ||
259 | unsigned int cmd_debug_read_len; // | ||
260 | |||
261 | int flag_ok; // device in known good state | ||
262 | int flag_disconnected; // flag_ok == 0 due to disconnect | ||
263 | int flag_init_ok; // true if structure is fully initialized | ||
264 | int flag_streaming_enabled; // true if streaming should be on | ||
265 | int fw1_state; // current situation with fw1 | ||
266 | |||
267 | int flag_decoder_is_tuned; | ||
268 | |||
269 | struct pvr2_decoder_ctrl *decoder_ctrl; | ||
270 | |||
271 | // CPU firmware info (used to help find / save firmware data) | ||
272 | char *fw_buffer; | ||
273 | unsigned int fw_size; | ||
274 | |||
275 | // Which subsystem pieces have been enabled / configured | ||
276 | unsigned long subsys_enabled_mask; | ||
277 | |||
278 | // Which subsystems are manipulated to enable streaming | ||
279 | unsigned long subsys_stream_mask; | ||
280 | |||
281 | // True if there is a request to trigger logging of state in each | ||
282 | // module. | ||
283 | int log_requested; | ||
284 | |||
285 | /* Tuner / frequency control stuff */ | ||
286 | unsigned int tuner_type; | ||
287 | int tuner_updated; | ||
288 | unsigned int freqVal; | ||
289 | int freqDirty; | ||
290 | |||
291 | /* Video standard handling */ | ||
292 | v4l2_std_id std_mask_eeprom; // Hardware supported selections | ||
293 | v4l2_std_id std_mask_avail; // Which standards we may select from | ||
294 | v4l2_std_id std_mask_cur; // Currently selected standard(s) | ||
295 | unsigned int std_enum_cnt; // # of enumerated standards | ||
296 | int std_enum_cur; // selected standard enumeration value | ||
297 | int std_dirty; // True if std_mask_cur has changed | ||
298 | struct pvr2_ctl_info std_info_enum; | ||
299 | struct pvr2_ctl_info std_info_avail; | ||
300 | struct pvr2_ctl_info std_info_cur; | ||
301 | struct v4l2_standard *std_defs; | ||
302 | const char **std_enum_names; | ||
303 | |||
304 | // Generated string names, one per actual V4L2 standard | ||
305 | const char *std_mask_ptrs[32]; | ||
306 | char std_mask_names[32][10]; | ||
307 | |||
308 | int unit_number; /* ID for driver instance */ | ||
309 | unsigned long serial_number; /* ID for hardware itself */ | ||
310 | |||
311 | /* Minor number used by v4l logic (yes, this is a hack, as there should | ||
312 | be no v4l junk here). Probably a better way to do this. */ | ||
313 | int v4l_minor_number; | ||
314 | |||
315 | /* Location of eeprom or a negative number if none */ | ||
316 | int eeprom_addr; | ||
317 | |||
318 | enum pvr2_config config; | ||
319 | |||
320 | /* Information about what audio signal we're hearing */ | ||
321 | int flag_stereo; | ||
322 | int flag_bilingual; | ||
323 | struct pvr2_audio_stat *audio_stat; | ||
324 | |||
325 | /* Control state needed for cx2341x module */ | ||
326 | struct cx2341x_mpeg_params enc_cur_state; | ||
327 | struct cx2341x_mpeg_params enc_ctl_state; | ||
328 | /* True if an encoder attribute has changed */ | ||
329 | int enc_stale; | ||
330 | /* True if enc_cur_state is valid */ | ||
331 | int enc_cur_valid; | ||
332 | |||
333 | /* Control state */ | ||
334 | #define VCREATE_DATA(lab) int lab##_val; int lab##_dirty | ||
335 | VCREATE_DATA(brightness); | ||
336 | VCREATE_DATA(contrast); | ||
337 | VCREATE_DATA(saturation); | ||
338 | VCREATE_DATA(hue); | ||
339 | VCREATE_DATA(volume); | ||
340 | VCREATE_DATA(balance); | ||
341 | VCREATE_DATA(bass); | ||
342 | VCREATE_DATA(treble); | ||
343 | VCREATE_DATA(mute); | ||
344 | VCREATE_DATA(input); | ||
345 | VCREATE_DATA(audiomode); | ||
346 | VCREATE_DATA(res_hor); | ||
347 | VCREATE_DATA(res_ver); | ||
348 | VCREATE_DATA(srate); | ||
349 | #undef VCREATE_DATA | ||
350 | |||
351 | struct pvr2_ctld_info *mpeg_ctrl_info; | ||
352 | |||
353 | struct pvr2_ctrl *controls; | ||
354 | unsigned int control_cnt; | ||
355 | }; | ||
356 | |||
357 | int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw); | ||
358 | |||
359 | unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *); | ||
360 | |||
361 | void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw, | ||
362 | unsigned long msk,unsigned long val); | ||
363 | void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw, | ||
364 | unsigned long msk, | ||
365 | unsigned long val); | ||
366 | |||
367 | void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw); | ||
368 | void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw); | ||
369 | |||
370 | int pvr2_i2c_basic_op(struct pvr2_hdw *,u8 i2c_addr, | ||
371 | u8 *wdata,u16 wlen, | ||
372 | u8 *rdata,u16 rlen); | ||
373 | |||
374 | #endif /* __PVRUSB2_HDW_INTERNAL_H */ | ||
375 | |||
376 | /* | ||
377 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
378 | *** Local Variables: *** | ||
379 | *** mode: c *** | ||
380 | *** fill-column: 75 *** | ||
381 | *** tab-width: 8 *** | ||
382 | *** c-basic-offset: 8 *** | ||
383 | *** End: *** | ||
384 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c new file mode 100644 index 000000000000..643c471375da --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c | |||
@@ -0,0 +1,3120 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/errno.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/firmware.h> | ||
26 | #include <linux/videodev2.h> | ||
27 | #include <asm/semaphore.h> | ||
28 | #include "pvrusb2.h" | ||
29 | #include "pvrusb2-std.h" | ||
30 | #include "pvrusb2-util.h" | ||
31 | #include "pvrusb2-hdw.h" | ||
32 | #include "pvrusb2-i2c-core.h" | ||
33 | #include "pvrusb2-tuner.h" | ||
34 | #include "pvrusb2-eeprom.h" | ||
35 | #include "pvrusb2-hdw-internal.h" | ||
36 | #include "pvrusb2-encoder.h" | ||
37 | #include "pvrusb2-debug.h" | ||
38 | |||
39 | struct usb_device_id pvr2_device_table[] = { | ||
40 | [PVR2_HDW_TYPE_29XXX] = { USB_DEVICE(0x2040, 0x2900) }, | ||
41 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
42 | [PVR2_HDW_TYPE_24XXX] = { USB_DEVICE(0x2040, 0x2400) }, | ||
43 | #endif | ||
44 | { } | ||
45 | }; | ||
46 | |||
47 | MODULE_DEVICE_TABLE(usb, pvr2_device_table); | ||
48 | |||
49 | static const char *pvr2_device_names[] = { | ||
50 | [PVR2_HDW_TYPE_29XXX] = "WinTV PVR USB2 Model Category 29xxxx", | ||
51 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
52 | [PVR2_HDW_TYPE_24XXX] = "WinTV PVR USB2 Model Category 24xxxx", | ||
53 | #endif | ||
54 | }; | ||
55 | |||
56 | struct pvr2_string_table { | ||
57 | const char **lst; | ||
58 | unsigned int cnt; | ||
59 | }; | ||
60 | |||
61 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
62 | // Names of other client modules to request for 24xxx model hardware | ||
63 | static const char *pvr2_client_24xxx[] = { | ||
64 | "cx25840", | ||
65 | "tuner", | ||
66 | "tda9887", | ||
67 | "wm8775", | ||
68 | }; | ||
69 | #endif | ||
70 | |||
71 | // Names of other client modules to request for 29xxx model hardware | ||
72 | static const char *pvr2_client_29xxx[] = { | ||
73 | "msp3400", | ||
74 | "saa7115", | ||
75 | "tuner", | ||
76 | "tda9887", | ||
77 | }; | ||
78 | |||
79 | static struct pvr2_string_table pvr2_client_lists[] = { | ||
80 | [PVR2_HDW_TYPE_29XXX] = { | ||
81 | pvr2_client_29xxx, | ||
82 | sizeof(pvr2_client_29xxx)/sizeof(pvr2_client_29xxx[0]), | ||
83 | }, | ||
84 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
85 | [PVR2_HDW_TYPE_24XXX] = { | ||
86 | pvr2_client_24xxx, | ||
87 | sizeof(pvr2_client_24xxx)/sizeof(pvr2_client_24xxx[0]), | ||
88 | }, | ||
89 | #endif | ||
90 | }; | ||
91 | |||
92 | static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = 0}; | ||
93 | DECLARE_MUTEX(pvr2_unit_sem); | ||
94 | |||
95 | static int ctlchg = 0; | ||
96 | static int initusbreset = 1; | ||
97 | static int procreload = 0; | ||
98 | static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 }; | ||
99 | static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 }; | ||
100 | static int video_std[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 }; | ||
101 | static int init_pause_msec = 0; | ||
102 | |||
103 | module_param(ctlchg, int, S_IRUGO|S_IWUSR); | ||
104 | MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value"); | ||
105 | module_param(init_pause_msec, int, S_IRUGO|S_IWUSR); | ||
106 | MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay"); | ||
107 | module_param(initusbreset, int, S_IRUGO|S_IWUSR); | ||
108 | MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe"); | ||
109 | module_param(procreload, int, S_IRUGO|S_IWUSR); | ||
110 | MODULE_PARM_DESC(procreload, | ||
111 | "Attempt init failure recovery with firmware reload"); | ||
112 | module_param_array(tuner, int, NULL, 0444); | ||
113 | MODULE_PARM_DESC(tuner,"specify installed tuner type"); | ||
114 | module_param_array(video_std, int, NULL, 0444); | ||
115 | MODULE_PARM_DESC(video_std,"specify initial video standard"); | ||
116 | module_param_array(tolerance, int, NULL, 0444); | ||
117 | MODULE_PARM_DESC(tolerance,"specify stream error tolerance"); | ||
118 | |||
119 | #define PVR2_CTL_WRITE_ENDPOINT 0x01 | ||
120 | #define PVR2_CTL_READ_ENDPOINT 0x81 | ||
121 | |||
122 | #define PVR2_GPIO_IN 0x9008 | ||
123 | #define PVR2_GPIO_OUT 0x900c | ||
124 | #define PVR2_GPIO_DIR 0x9020 | ||
125 | |||
126 | #define trace_firmware(...) pvr2_trace(PVR2_TRACE_FIRMWARE,__VA_ARGS__) | ||
127 | |||
128 | #define PVR2_FIRMWARE_ENDPOINT 0x02 | ||
129 | |||
130 | /* size of a firmware chunk */ | ||
131 | #define FIRMWARE_CHUNK_SIZE 0x2000 | ||
132 | |||
133 | /* Define the list of additional controls we'll dynamically construct based | ||
134 | on query of the cx2341x module. */ | ||
135 | struct pvr2_mpeg_ids { | ||
136 | const char *strid; | ||
137 | int id; | ||
138 | }; | ||
139 | static const struct pvr2_mpeg_ids mpeg_ids[] = { | ||
140 | { | ||
141 | .strid = "audio_layer", | ||
142 | .id = V4L2_CID_MPEG_AUDIO_ENCODING, | ||
143 | },{ | ||
144 | .strid = "audio_bitrate", | ||
145 | .id = V4L2_CID_MPEG_AUDIO_L2_BITRATE, | ||
146 | },{ | ||
147 | /* Already using audio_mode elsewhere :-( */ | ||
148 | .strid = "mpeg_audio_mode", | ||
149 | .id = V4L2_CID_MPEG_AUDIO_MODE, | ||
150 | },{ | ||
151 | .strid = "mpeg_audio_mode_extension", | ||
152 | .id = V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, | ||
153 | },{ | ||
154 | .strid = "audio_emphasis", | ||
155 | .id = V4L2_CID_MPEG_AUDIO_EMPHASIS, | ||
156 | },{ | ||
157 | .strid = "audio_crc", | ||
158 | .id = V4L2_CID_MPEG_AUDIO_CRC, | ||
159 | },{ | ||
160 | .strid = "video_aspect", | ||
161 | .id = V4L2_CID_MPEG_VIDEO_ASPECT, | ||
162 | },{ | ||
163 | .strid = "video_b_frames", | ||
164 | .id = V4L2_CID_MPEG_VIDEO_B_FRAMES, | ||
165 | },{ | ||
166 | .strid = "video_gop_size", | ||
167 | .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE, | ||
168 | },{ | ||
169 | .strid = "video_gop_closure", | ||
170 | .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, | ||
171 | },{ | ||
172 | .strid = "video_pulldown", | ||
173 | .id = V4L2_CID_MPEG_VIDEO_PULLDOWN, | ||
174 | },{ | ||
175 | .strid = "video_bitrate_mode", | ||
176 | .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE, | ||
177 | },{ | ||
178 | .strid = "video_bitrate", | ||
179 | .id = V4L2_CID_MPEG_VIDEO_BITRATE, | ||
180 | },{ | ||
181 | .strid = "video_bitrate_peak", | ||
182 | .id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, | ||
183 | },{ | ||
184 | .strid = "video_temporal_decimation", | ||
185 | .id = V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, | ||
186 | },{ | ||
187 | .strid = "stream_type", | ||
188 | .id = V4L2_CID_MPEG_STREAM_TYPE, | ||
189 | },{ | ||
190 | .strid = "video_spatial_filter_mode", | ||
191 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, | ||
192 | },{ | ||
193 | .strid = "video_spatial_filter", | ||
194 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, | ||
195 | },{ | ||
196 | .strid = "video_luma_spatial_filter_type", | ||
197 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, | ||
198 | },{ | ||
199 | .strid = "video_chroma_spatial_filter_type", | ||
200 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, | ||
201 | },{ | ||
202 | .strid = "video_temporal_filter_mode", | ||
203 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, | ||
204 | },{ | ||
205 | .strid = "video_temporal_filter", | ||
206 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, | ||
207 | },{ | ||
208 | .strid = "video_median_filter_type", | ||
209 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, | ||
210 | },{ | ||
211 | .strid = "video_luma_median_filter_top", | ||
212 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, | ||
213 | },{ | ||
214 | .strid = "video_luma_median_filter_bottom", | ||
215 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, | ||
216 | },{ | ||
217 | .strid = "video_chroma_median_filter_top", | ||
218 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, | ||
219 | },{ | ||
220 | .strid = "video_chroma_median_filter_bottom", | ||
221 | .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, | ||
222 | } | ||
223 | }; | ||
224 | #define MPEGDEF_COUNT (sizeof(mpeg_ids)/sizeof(mpeg_ids[0])) | ||
225 | |||
226 | static const char *control_values_srate[] = { | ||
227 | [PVR2_CVAL_SRATE_48] = "48KHz", | ||
228 | [PVR2_CVAL_SRATE_44_1] = "44.1KHz", | ||
229 | }; | ||
230 | |||
231 | |||
232 | |||
233 | |||
234 | static const char *control_values_input[] = { | ||
235 | [PVR2_CVAL_INPUT_TV] = "television", /*xawtv needs this name*/ | ||
236 | [PVR2_CVAL_INPUT_RADIO] = "radio", | ||
237 | [PVR2_CVAL_INPUT_SVIDEO] = "s-video", | ||
238 | [PVR2_CVAL_INPUT_COMPOSITE] = "composite", | ||
239 | }; | ||
240 | |||
241 | |||
242 | static const char *control_values_audiomode[] = { | ||
243 | [V4L2_TUNER_MODE_MONO] = "Mono", | ||
244 | [V4L2_TUNER_MODE_STEREO] = "Stereo", | ||
245 | [V4L2_TUNER_MODE_LANG1] = "Lang1", | ||
246 | [V4L2_TUNER_MODE_LANG2] = "Lang2", | ||
247 | [V4L2_TUNER_MODE_LANG1_LANG2] = "Lang1+Lang2", | ||
248 | }; | ||
249 | |||
250 | |||
251 | static const char *control_values_hsm[] = { | ||
252 | [PVR2_CVAL_HSM_FAIL] = "Fail", | ||
253 | [PVR2_CVAL_HSM_HIGH] = "High", | ||
254 | [PVR2_CVAL_HSM_FULL] = "Full", | ||
255 | }; | ||
256 | |||
257 | |||
258 | static const char *control_values_subsystem[] = { | ||
259 | [PVR2_SUBSYS_B_ENC_FIRMWARE] = "enc_firmware", | ||
260 | [PVR2_SUBSYS_B_ENC_CFG] = "enc_config", | ||
261 | [PVR2_SUBSYS_B_DIGITIZER_RUN] = "digitizer_run", | ||
262 | [PVR2_SUBSYS_B_USBSTREAM_RUN] = "usbstream_run", | ||
263 | [PVR2_SUBSYS_B_ENC_RUN] = "enc_run", | ||
264 | }; | ||
265 | |||
266 | |||
267 | static int ctrl_channelfreq_get(struct pvr2_ctrl *cptr,int *vp) | ||
268 | { | ||
269 | struct pvr2_hdw *hdw = cptr->hdw; | ||
270 | if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) { | ||
271 | *vp = hdw->freqTable[hdw->freqProgSlot-1]; | ||
272 | } else { | ||
273 | *vp = 0; | ||
274 | } | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int ctrl_channelfreq_set(struct pvr2_ctrl *cptr,int m,int v) | ||
279 | { | ||
280 | struct pvr2_hdw *hdw = cptr->hdw; | ||
281 | if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) { | ||
282 | hdw->freqTable[hdw->freqProgSlot-1] = v; | ||
283 | } | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static int ctrl_channelprog_get(struct pvr2_ctrl *cptr,int *vp) | ||
288 | { | ||
289 | *vp = cptr->hdw->freqProgSlot; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static int ctrl_channelprog_set(struct pvr2_ctrl *cptr,int m,int v) | ||
294 | { | ||
295 | struct pvr2_hdw *hdw = cptr->hdw; | ||
296 | if ((v >= 0) && (v <= FREQTABLE_SIZE)) { | ||
297 | hdw->freqProgSlot = v; | ||
298 | } | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static int ctrl_channel_get(struct pvr2_ctrl *cptr,int *vp) | ||
303 | { | ||
304 | *vp = cptr->hdw->freqSlot; | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | static int ctrl_channel_set(struct pvr2_ctrl *cptr,int m,int v) | ||
309 | { | ||
310 | unsigned freq = 0; | ||
311 | struct pvr2_hdw *hdw = cptr->hdw; | ||
312 | hdw->freqSlot = v; | ||
313 | if ((hdw->freqSlot > 0) && (hdw->freqSlot <= FREQTABLE_SIZE)) { | ||
314 | freq = hdw->freqTable[hdw->freqSlot-1]; | ||
315 | } | ||
316 | if (freq && (freq != hdw->freqVal)) { | ||
317 | hdw->freqVal = freq; | ||
318 | hdw->freqDirty = !0; | ||
319 | } | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | static int ctrl_freq_get(struct pvr2_ctrl *cptr,int *vp) | ||
324 | { | ||
325 | *vp = cptr->hdw->freqVal; | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int ctrl_freq_is_dirty(struct pvr2_ctrl *cptr) | ||
330 | { | ||
331 | return cptr->hdw->freqDirty != 0; | ||
332 | } | ||
333 | |||
334 | static void ctrl_freq_clear_dirty(struct pvr2_ctrl *cptr) | ||
335 | { | ||
336 | cptr->hdw->freqDirty = 0; | ||
337 | } | ||
338 | |||
339 | static int ctrl_freq_set(struct pvr2_ctrl *cptr,int m,int v) | ||
340 | { | ||
341 | struct pvr2_hdw *hdw = cptr->hdw; | ||
342 | hdw->freqVal = v; | ||
343 | hdw->freqDirty = !0; | ||
344 | hdw->freqSlot = 0; | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static int ctrl_cx2341x_is_dirty(struct pvr2_ctrl *cptr) | ||
349 | { | ||
350 | return cptr->hdw->enc_stale != 0; | ||
351 | } | ||
352 | |||
353 | static void ctrl_cx2341x_clear_dirty(struct pvr2_ctrl *cptr) | ||
354 | { | ||
355 | cptr->hdw->enc_stale = 0; | ||
356 | } | ||
357 | |||
358 | static int ctrl_cx2341x_get(struct pvr2_ctrl *cptr,int *vp) | ||
359 | { | ||
360 | int ret; | ||
361 | struct v4l2_ext_controls cs; | ||
362 | struct v4l2_ext_control c1; | ||
363 | memset(&cs,0,sizeof(cs)); | ||
364 | memset(&c1,0,sizeof(c1)); | ||
365 | cs.controls = &c1; | ||
366 | cs.count = 1; | ||
367 | c1.id = cptr->info->v4l_id; | ||
368 | ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs, | ||
369 | VIDIOC_G_EXT_CTRLS); | ||
370 | if (ret) return ret; | ||
371 | *vp = c1.value; | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static int ctrl_cx2341x_set(struct pvr2_ctrl *cptr,int m,int v) | ||
376 | { | ||
377 | int ret; | ||
378 | struct v4l2_ext_controls cs; | ||
379 | struct v4l2_ext_control c1; | ||
380 | memset(&cs,0,sizeof(cs)); | ||
381 | memset(&c1,0,sizeof(c1)); | ||
382 | cs.controls = &c1; | ||
383 | cs.count = 1; | ||
384 | c1.id = cptr->info->v4l_id; | ||
385 | c1.value = v; | ||
386 | ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs, | ||
387 | VIDIOC_S_EXT_CTRLS); | ||
388 | if (ret) return ret; | ||
389 | cptr->hdw->enc_stale = !0; | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static unsigned int ctrl_cx2341x_getv4lflags(struct pvr2_ctrl *cptr) | ||
394 | { | ||
395 | struct v4l2_queryctrl qctrl; | ||
396 | struct pvr2_ctl_info *info; | ||
397 | qctrl.id = cptr->info->v4l_id; | ||
398 | cx2341x_ctrl_query(&cptr->hdw->enc_ctl_state,&qctrl); | ||
399 | /* Strip out the const so we can adjust a function pointer. It's | ||
400 | OK to do this here because we know this is a dynamically created | ||
401 | control, so the underlying storage for the info pointer is (a) | ||
402 | private to us, and (b) not in read-only storage. Either we do | ||
403 | this or we significantly complicate the underlying control | ||
404 | implementation. */ | ||
405 | info = (struct pvr2_ctl_info *)(cptr->info); | ||
406 | if (qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY) { | ||
407 | if (info->set_value) { | ||
408 | info->set_value = 0; | ||
409 | } | ||
410 | } else { | ||
411 | if (!(info->set_value)) { | ||
412 | info->set_value = ctrl_cx2341x_set; | ||
413 | } | ||
414 | } | ||
415 | return qctrl.flags; | ||
416 | } | ||
417 | |||
418 | static int ctrl_streamingenabled_get(struct pvr2_ctrl *cptr,int *vp) | ||
419 | { | ||
420 | *vp = cptr->hdw->flag_streaming_enabled; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp) | ||
425 | { | ||
426 | int result = pvr2_hdw_is_hsm(cptr->hdw); | ||
427 | *vp = PVR2_CVAL_HSM_FULL; | ||
428 | if (result < 0) *vp = PVR2_CVAL_HSM_FAIL; | ||
429 | if (result) *vp = PVR2_CVAL_HSM_HIGH; | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp) | ||
434 | { | ||
435 | *vp = cptr->hdw->std_mask_avail; | ||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v) | ||
440 | { | ||
441 | struct pvr2_hdw *hdw = cptr->hdw; | ||
442 | v4l2_std_id ns; | ||
443 | ns = hdw->std_mask_avail; | ||
444 | ns = (ns & ~m) | (v & m); | ||
445 | if (ns == hdw->std_mask_avail) return 0; | ||
446 | hdw->std_mask_avail = ns; | ||
447 | pvr2_hdw_internal_set_std_avail(hdw); | ||
448 | pvr2_hdw_internal_find_stdenum(hdw); | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int ctrl_std_val_to_sym(struct pvr2_ctrl *cptr,int msk,int val, | ||
453 | char *bufPtr,unsigned int bufSize, | ||
454 | unsigned int *len) | ||
455 | { | ||
456 | *len = pvr2_std_id_to_str(bufPtr,bufSize,msk & val); | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static int ctrl_std_sym_to_val(struct pvr2_ctrl *cptr, | ||
461 | const char *bufPtr,unsigned int bufSize, | ||
462 | int *mskp,int *valp) | ||
463 | { | ||
464 | int ret; | ||
465 | v4l2_std_id id; | ||
466 | ret = pvr2_std_str_to_id(&id,bufPtr,bufSize); | ||
467 | if (ret < 0) return ret; | ||
468 | if (mskp) *mskp = id; | ||
469 | if (valp) *valp = id; | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int ctrl_stdcur_get(struct pvr2_ctrl *cptr,int *vp) | ||
474 | { | ||
475 | *vp = cptr->hdw->std_mask_cur; | ||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v) | ||
480 | { | ||
481 | struct pvr2_hdw *hdw = cptr->hdw; | ||
482 | v4l2_std_id ns; | ||
483 | ns = hdw->std_mask_cur; | ||
484 | ns = (ns & ~m) | (v & m); | ||
485 | if (ns == hdw->std_mask_cur) return 0; | ||
486 | hdw->std_mask_cur = ns; | ||
487 | hdw->std_dirty = !0; | ||
488 | pvr2_hdw_internal_find_stdenum(hdw); | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static int ctrl_stdcur_is_dirty(struct pvr2_ctrl *cptr) | ||
493 | { | ||
494 | return cptr->hdw->std_dirty != 0; | ||
495 | } | ||
496 | |||
497 | static void ctrl_stdcur_clear_dirty(struct pvr2_ctrl *cptr) | ||
498 | { | ||
499 | cptr->hdw->std_dirty = 0; | ||
500 | } | ||
501 | |||
502 | static int ctrl_signal_get(struct pvr2_ctrl *cptr,int *vp) | ||
503 | { | ||
504 | *vp = ((pvr2_hdw_get_signal_status_internal(cptr->hdw) & | ||
505 | PVR2_SIGNAL_OK) ? 1 : 0); | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | static int ctrl_subsys_get(struct pvr2_ctrl *cptr,int *vp) | ||
510 | { | ||
511 | *vp = cptr->hdw->subsys_enabled_mask; | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static int ctrl_subsys_set(struct pvr2_ctrl *cptr,int m,int v) | ||
516 | { | ||
517 | pvr2_hdw_subsys_bit_chg_no_lock(cptr->hdw,m,v); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int ctrl_subsys_stream_get(struct pvr2_ctrl *cptr,int *vp) | ||
522 | { | ||
523 | *vp = cptr->hdw->subsys_stream_mask; | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static int ctrl_subsys_stream_set(struct pvr2_ctrl *cptr,int m,int v) | ||
528 | { | ||
529 | pvr2_hdw_subsys_stream_bit_chg_no_lock(cptr->hdw,m,v); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v) | ||
534 | { | ||
535 | struct pvr2_hdw *hdw = cptr->hdw; | ||
536 | if (v < 0) return -EINVAL; | ||
537 | if (v > hdw->std_enum_cnt) return -EINVAL; | ||
538 | hdw->std_enum_cur = v; | ||
539 | if (!v) return 0; | ||
540 | v--; | ||
541 | if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0; | ||
542 | hdw->std_mask_cur = hdw->std_defs[v].id; | ||
543 | hdw->std_dirty = !0; | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | |||
548 | static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp) | ||
549 | { | ||
550 | *vp = cptr->hdw->std_enum_cur; | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | |||
555 | static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr) | ||
556 | { | ||
557 | return cptr->hdw->std_dirty != 0; | ||
558 | } | ||
559 | |||
560 | |||
561 | static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr) | ||
562 | { | ||
563 | cptr->hdw->std_dirty = 0; | ||
564 | } | ||
565 | |||
566 | |||
567 | #define DEFINT(vmin,vmax) \ | ||
568 | .type = pvr2_ctl_int, \ | ||
569 | .def.type_int.min_value = vmin, \ | ||
570 | .def.type_int.max_value = vmax | ||
571 | |||
572 | #define DEFENUM(tab) \ | ||
573 | .type = pvr2_ctl_enum, \ | ||
574 | .def.type_enum.count = (sizeof(tab)/sizeof((tab)[0])), \ | ||
575 | .def.type_enum.value_names = tab | ||
576 | |||
577 | #define DEFBOOL \ | ||
578 | .type = pvr2_ctl_bool | ||
579 | |||
580 | #define DEFMASK(msk,tab) \ | ||
581 | .type = pvr2_ctl_bitmask, \ | ||
582 | .def.type_bitmask.valid_bits = msk, \ | ||
583 | .def.type_bitmask.bit_names = tab | ||
584 | |||
585 | #define DEFREF(vname) \ | ||
586 | .set_value = ctrl_set_##vname, \ | ||
587 | .get_value = ctrl_get_##vname, \ | ||
588 | .is_dirty = ctrl_isdirty_##vname, \ | ||
589 | .clear_dirty = ctrl_cleardirty_##vname | ||
590 | |||
591 | |||
592 | #define VCREATE_FUNCS(vname) \ | ||
593 | static int ctrl_get_##vname(struct pvr2_ctrl *cptr,int *vp) \ | ||
594 | {*vp = cptr->hdw->vname##_val; return 0;} \ | ||
595 | static int ctrl_set_##vname(struct pvr2_ctrl *cptr,int m,int v) \ | ||
596 | {cptr->hdw->vname##_val = v; cptr->hdw->vname##_dirty = !0; return 0;} \ | ||
597 | static int ctrl_isdirty_##vname(struct pvr2_ctrl *cptr) \ | ||
598 | {return cptr->hdw->vname##_dirty != 0;} \ | ||
599 | static void ctrl_cleardirty_##vname(struct pvr2_ctrl *cptr) \ | ||
600 | {cptr->hdw->vname##_dirty = 0;} | ||
601 | |||
602 | VCREATE_FUNCS(brightness) | ||
603 | VCREATE_FUNCS(contrast) | ||
604 | VCREATE_FUNCS(saturation) | ||
605 | VCREATE_FUNCS(hue) | ||
606 | VCREATE_FUNCS(volume) | ||
607 | VCREATE_FUNCS(balance) | ||
608 | VCREATE_FUNCS(bass) | ||
609 | VCREATE_FUNCS(treble) | ||
610 | VCREATE_FUNCS(mute) | ||
611 | VCREATE_FUNCS(input) | ||
612 | VCREATE_FUNCS(audiomode) | ||
613 | VCREATE_FUNCS(res_hor) | ||
614 | VCREATE_FUNCS(res_ver) | ||
615 | VCREATE_FUNCS(srate) | ||
616 | |||
617 | #define MIN_FREQ 55250000L | ||
618 | #define MAX_FREQ 850000000L | ||
619 | |||
620 | /* Table definition of all controls which can be manipulated */ | ||
621 | static const struct pvr2_ctl_info control_defs[] = { | ||
622 | { | ||
623 | .v4l_id = V4L2_CID_BRIGHTNESS, | ||
624 | .desc = "Brightness", | ||
625 | .name = "brightness", | ||
626 | .default_value = 128, | ||
627 | DEFREF(brightness), | ||
628 | DEFINT(0,255), | ||
629 | },{ | ||
630 | .v4l_id = V4L2_CID_CONTRAST, | ||
631 | .desc = "Contrast", | ||
632 | .name = "contrast", | ||
633 | .default_value = 68, | ||
634 | DEFREF(contrast), | ||
635 | DEFINT(0,127), | ||
636 | },{ | ||
637 | .v4l_id = V4L2_CID_SATURATION, | ||
638 | .desc = "Saturation", | ||
639 | .name = "saturation", | ||
640 | .default_value = 64, | ||
641 | DEFREF(saturation), | ||
642 | DEFINT(0,127), | ||
643 | },{ | ||
644 | .v4l_id = V4L2_CID_HUE, | ||
645 | .desc = "Hue", | ||
646 | .name = "hue", | ||
647 | .default_value = 0, | ||
648 | DEFREF(hue), | ||
649 | DEFINT(-128,127), | ||
650 | },{ | ||
651 | .v4l_id = V4L2_CID_AUDIO_VOLUME, | ||
652 | .desc = "Volume", | ||
653 | .name = "volume", | ||
654 | .default_value = 65535, | ||
655 | DEFREF(volume), | ||
656 | DEFINT(0,65535), | ||
657 | },{ | ||
658 | .v4l_id = V4L2_CID_AUDIO_BALANCE, | ||
659 | .desc = "Balance", | ||
660 | .name = "balance", | ||
661 | .default_value = 0, | ||
662 | DEFREF(balance), | ||
663 | DEFINT(-32768,32767), | ||
664 | },{ | ||
665 | .v4l_id = V4L2_CID_AUDIO_BASS, | ||
666 | .desc = "Bass", | ||
667 | .name = "bass", | ||
668 | .default_value = 0, | ||
669 | DEFREF(bass), | ||
670 | DEFINT(-32768,32767), | ||
671 | },{ | ||
672 | .v4l_id = V4L2_CID_AUDIO_TREBLE, | ||
673 | .desc = "Treble", | ||
674 | .name = "treble", | ||
675 | .default_value = 0, | ||
676 | DEFREF(treble), | ||
677 | DEFINT(-32768,32767), | ||
678 | },{ | ||
679 | .v4l_id = V4L2_CID_AUDIO_MUTE, | ||
680 | .desc = "Mute", | ||
681 | .name = "mute", | ||
682 | .default_value = 0, | ||
683 | DEFREF(mute), | ||
684 | DEFBOOL, | ||
685 | },{ | ||
686 | .desc = "Video Source", | ||
687 | .name = "input", | ||
688 | .internal_id = PVR2_CID_INPUT, | ||
689 | .default_value = PVR2_CVAL_INPUT_TV, | ||
690 | DEFREF(input), | ||
691 | DEFENUM(control_values_input), | ||
692 | },{ | ||
693 | .desc = "Audio Mode", | ||
694 | .name = "audio_mode", | ||
695 | .internal_id = PVR2_CID_AUDIOMODE, | ||
696 | .default_value = V4L2_TUNER_MODE_STEREO, | ||
697 | DEFREF(audiomode), | ||
698 | DEFENUM(control_values_audiomode), | ||
699 | },{ | ||
700 | .desc = "Horizontal capture resolution", | ||
701 | .name = "resolution_hor", | ||
702 | .internal_id = PVR2_CID_HRES, | ||
703 | .default_value = 720, | ||
704 | DEFREF(res_hor), | ||
705 | DEFINT(320,720), | ||
706 | },{ | ||
707 | .desc = "Vertical capture resolution", | ||
708 | .name = "resolution_ver", | ||
709 | .internal_id = PVR2_CID_VRES, | ||
710 | .default_value = 480, | ||
711 | DEFREF(res_ver), | ||
712 | DEFINT(200,625), | ||
713 | },{ | ||
714 | .v4l_id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, | ||
715 | .desc = "Sample rate", | ||
716 | .name = "srate", | ||
717 | .default_value = PVR2_CVAL_SRATE_48, | ||
718 | DEFREF(srate), | ||
719 | DEFENUM(control_values_srate), | ||
720 | },{ | ||
721 | .desc = "Tuner Frequency (Hz)", | ||
722 | .name = "frequency", | ||
723 | .internal_id = PVR2_CID_FREQUENCY, | ||
724 | .default_value = 175250000L, | ||
725 | .set_value = ctrl_freq_set, | ||
726 | .get_value = ctrl_freq_get, | ||
727 | .is_dirty = ctrl_freq_is_dirty, | ||
728 | .clear_dirty = ctrl_freq_clear_dirty, | ||
729 | DEFINT(MIN_FREQ,MAX_FREQ), | ||
730 | },{ | ||
731 | .desc = "Channel", | ||
732 | .name = "channel", | ||
733 | .set_value = ctrl_channel_set, | ||
734 | .get_value = ctrl_channel_get, | ||
735 | DEFINT(0,FREQTABLE_SIZE), | ||
736 | },{ | ||
737 | .desc = "Channel Program Frequency", | ||
738 | .name = "freq_table_value", | ||
739 | .set_value = ctrl_channelfreq_set, | ||
740 | .get_value = ctrl_channelfreq_get, | ||
741 | DEFINT(MIN_FREQ,MAX_FREQ), | ||
742 | },{ | ||
743 | .desc = "Channel Program ID", | ||
744 | .name = "freq_table_channel", | ||
745 | .set_value = ctrl_channelprog_set, | ||
746 | .get_value = ctrl_channelprog_get, | ||
747 | DEFINT(0,FREQTABLE_SIZE), | ||
748 | },{ | ||
749 | .desc = "Streaming Enabled", | ||
750 | .name = "streaming_enabled", | ||
751 | .get_value = ctrl_streamingenabled_get, | ||
752 | DEFBOOL, | ||
753 | },{ | ||
754 | .desc = "USB Speed", | ||
755 | .name = "usb_speed", | ||
756 | .get_value = ctrl_hsm_get, | ||
757 | DEFENUM(control_values_hsm), | ||
758 | },{ | ||
759 | .desc = "Signal Present", | ||
760 | .name = "signal_present", | ||
761 | .get_value = ctrl_signal_get, | ||
762 | DEFBOOL, | ||
763 | },{ | ||
764 | .desc = "Video Standards Available Mask", | ||
765 | .name = "video_standard_mask_available", | ||
766 | .internal_id = PVR2_CID_STDAVAIL, | ||
767 | .skip_init = !0, | ||
768 | .get_value = ctrl_stdavail_get, | ||
769 | .set_value = ctrl_stdavail_set, | ||
770 | .val_to_sym = ctrl_std_val_to_sym, | ||
771 | .sym_to_val = ctrl_std_sym_to_val, | ||
772 | .type = pvr2_ctl_bitmask, | ||
773 | },{ | ||
774 | .desc = "Video Standards In Use Mask", | ||
775 | .name = "video_standard_mask_active", | ||
776 | .internal_id = PVR2_CID_STDCUR, | ||
777 | .skip_init = !0, | ||
778 | .get_value = ctrl_stdcur_get, | ||
779 | .set_value = ctrl_stdcur_set, | ||
780 | .is_dirty = ctrl_stdcur_is_dirty, | ||
781 | .clear_dirty = ctrl_stdcur_clear_dirty, | ||
782 | .val_to_sym = ctrl_std_val_to_sym, | ||
783 | .sym_to_val = ctrl_std_sym_to_val, | ||
784 | .type = pvr2_ctl_bitmask, | ||
785 | },{ | ||
786 | .desc = "Subsystem enabled mask", | ||
787 | .name = "debug_subsys_mask", | ||
788 | .skip_init = !0, | ||
789 | .get_value = ctrl_subsys_get, | ||
790 | .set_value = ctrl_subsys_set, | ||
791 | DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem), | ||
792 | },{ | ||
793 | .desc = "Subsystem stream mask", | ||
794 | .name = "debug_subsys_stream_mask", | ||
795 | .skip_init = !0, | ||
796 | .get_value = ctrl_subsys_stream_get, | ||
797 | .set_value = ctrl_subsys_stream_set, | ||
798 | DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem), | ||
799 | },{ | ||
800 | .desc = "Video Standard Name", | ||
801 | .name = "video_standard", | ||
802 | .internal_id = PVR2_CID_STDENUM, | ||
803 | .skip_init = !0, | ||
804 | .get_value = ctrl_stdenumcur_get, | ||
805 | .set_value = ctrl_stdenumcur_set, | ||
806 | .is_dirty = ctrl_stdenumcur_is_dirty, | ||
807 | .clear_dirty = ctrl_stdenumcur_clear_dirty, | ||
808 | .type = pvr2_ctl_enum, | ||
809 | } | ||
810 | }; | ||
811 | |||
812 | #define CTRLDEF_COUNT (sizeof(control_defs)/sizeof(control_defs[0])) | ||
813 | |||
814 | |||
815 | const char *pvr2_config_get_name(enum pvr2_config cfg) | ||
816 | { | ||
817 | switch (cfg) { | ||
818 | case pvr2_config_empty: return "empty"; | ||
819 | case pvr2_config_mpeg: return "mpeg"; | ||
820 | case pvr2_config_vbi: return "vbi"; | ||
821 | case pvr2_config_radio: return "radio"; | ||
822 | } | ||
823 | return "<unknown>"; | ||
824 | } | ||
825 | |||
826 | |||
827 | struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *hdw) | ||
828 | { | ||
829 | return hdw->usb_dev; | ||
830 | } | ||
831 | |||
832 | |||
833 | unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *hdw) | ||
834 | { | ||
835 | return hdw->serial_number; | ||
836 | } | ||
837 | |||
838 | |||
839 | struct pvr2_hdw *pvr2_hdw_find(int unit_number) | ||
840 | { | ||
841 | if (unit_number < 0) return 0; | ||
842 | if (unit_number >= PVR_NUM) return 0; | ||
843 | return unit_pointers[unit_number]; | ||
844 | } | ||
845 | |||
846 | |||
847 | int pvr2_hdw_get_unit_number(struct pvr2_hdw *hdw) | ||
848 | { | ||
849 | return hdw->unit_number; | ||
850 | } | ||
851 | |||
852 | |||
853 | /* Attempt to locate one of the given set of files. Messages are logged | ||
854 | appropriate to what has been found. The return value will be 0 or | ||
855 | greater on success (it will be the index of the file name found) and | ||
856 | fw_entry will be filled in. Otherwise a negative error is returned on | ||
857 | failure. If the return value is -ENOENT then no viable firmware file | ||
858 | could be located. */ | ||
859 | static int pvr2_locate_firmware(struct pvr2_hdw *hdw, | ||
860 | const struct firmware **fw_entry, | ||
861 | const char *fwtypename, | ||
862 | unsigned int fwcount, | ||
863 | const char *fwnames[]) | ||
864 | { | ||
865 | unsigned int idx; | ||
866 | int ret = -EINVAL; | ||
867 | for (idx = 0; idx < fwcount; idx++) { | ||
868 | ret = request_firmware(fw_entry, | ||
869 | fwnames[idx], | ||
870 | &hdw->usb_dev->dev); | ||
871 | if (!ret) { | ||
872 | trace_firmware("Located %s firmware: %s;" | ||
873 | " uploading...", | ||
874 | fwtypename, | ||
875 | fwnames[idx]); | ||
876 | return idx; | ||
877 | } | ||
878 | if (ret == -ENOENT) continue; | ||
879 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
880 | "request_firmware fatal error with code=%d",ret); | ||
881 | return ret; | ||
882 | } | ||
883 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
884 | "***WARNING***" | ||
885 | " Device %s firmware" | ||
886 | " seems to be missing.", | ||
887 | fwtypename); | ||
888 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
889 | "Did you install the pvrusb2 firmware files" | ||
890 | " in their proper location?"); | ||
891 | if (fwcount == 1) { | ||
892 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
893 | "request_firmware unable to locate %s file %s", | ||
894 | fwtypename,fwnames[0]); | ||
895 | } else { | ||
896 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
897 | "request_firmware unable to locate" | ||
898 | " one of the following %s files:", | ||
899 | fwtypename); | ||
900 | for (idx = 0; idx < fwcount; idx++) { | ||
901 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
902 | "request_firmware: Failed to find %s", | ||
903 | fwnames[idx]); | ||
904 | } | ||
905 | } | ||
906 | return ret; | ||
907 | } | ||
908 | |||
909 | |||
910 | /* | ||
911 | * pvr2_upload_firmware1(). | ||
912 | * | ||
913 | * Send the 8051 firmware to the device. After the upload, arrange for | ||
914 | * device to re-enumerate. | ||
915 | * | ||
916 | * NOTE : the pointer to the firmware data given by request_firmware() | ||
917 | * is not suitable for an usb transaction. | ||
918 | * | ||
919 | */ | ||
920 | int pvr2_upload_firmware1(struct pvr2_hdw *hdw) | ||
921 | { | ||
922 | const struct firmware *fw_entry = 0; | ||
923 | void *fw_ptr; | ||
924 | unsigned int pipe; | ||
925 | int ret; | ||
926 | u16 address; | ||
927 | static const char *fw_files_29xxx[] = { | ||
928 | "v4l-pvrusb2-29xxx-01.fw", | ||
929 | }; | ||
930 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
931 | static const char *fw_files_24xxx[] = { | ||
932 | "v4l-pvrusb2-24xxx-01.fw", | ||
933 | }; | ||
934 | #endif | ||
935 | static const struct pvr2_string_table fw_file_defs[] = { | ||
936 | [PVR2_HDW_TYPE_29XXX] = { | ||
937 | fw_files_29xxx, | ||
938 | sizeof(fw_files_29xxx)/sizeof(fw_files_29xxx[0]), | ||
939 | }, | ||
940 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
941 | [PVR2_HDW_TYPE_24XXX] = { | ||
942 | fw_files_24xxx, | ||
943 | sizeof(fw_files_24xxx)/sizeof(fw_files_24xxx[0]), | ||
944 | }, | ||
945 | #endif | ||
946 | }; | ||
947 | hdw->fw1_state = FW1_STATE_FAILED; // default result | ||
948 | |||
949 | trace_firmware("pvr2_upload_firmware1"); | ||
950 | |||
951 | ret = pvr2_locate_firmware(hdw,&fw_entry,"fx2 controller", | ||
952 | fw_file_defs[hdw->hdw_type].cnt, | ||
953 | fw_file_defs[hdw->hdw_type].lst); | ||
954 | if (ret < 0) { | ||
955 | if (ret == -ENOENT) hdw->fw1_state = FW1_STATE_MISSING; | ||
956 | return ret; | ||
957 | } | ||
958 | |||
959 | usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0); | ||
960 | usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f)); | ||
961 | |||
962 | pipe = usb_sndctrlpipe(hdw->usb_dev, 0); | ||
963 | |||
964 | if (fw_entry->size != 0x2000){ | ||
965 | pvr2_trace(PVR2_TRACE_ERROR_LEGS,"wrong fx2 firmware size"); | ||
966 | release_firmware(fw_entry); | ||
967 | return -ENOMEM; | ||
968 | } | ||
969 | |||
970 | fw_ptr = kmalloc(0x800, GFP_KERNEL); | ||
971 | if (fw_ptr == NULL){ | ||
972 | release_firmware(fw_entry); | ||
973 | return -ENOMEM; | ||
974 | } | ||
975 | |||
976 | /* We have to hold the CPU during firmware upload. */ | ||
977 | pvr2_hdw_cpureset_assert(hdw,1); | ||
978 | |||
979 | /* upload the firmware to address 0000-1fff in 2048 (=0x800) bytes | ||
980 | chunk. */ | ||
981 | |||
982 | ret = 0; | ||
983 | for(address = 0; address < fw_entry->size; address += 0x800) { | ||
984 | memcpy(fw_ptr, fw_entry->data + address, 0x800); | ||
985 | ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address, | ||
986 | 0, fw_ptr, 0x800, HZ); | ||
987 | } | ||
988 | |||
989 | trace_firmware("Upload done, releasing device's CPU"); | ||
990 | |||
991 | /* Now release the CPU. It will disconnect and reconnect later. */ | ||
992 | pvr2_hdw_cpureset_assert(hdw,0); | ||
993 | |||
994 | kfree(fw_ptr); | ||
995 | release_firmware(fw_entry); | ||
996 | |||
997 | trace_firmware("Upload done (%d bytes sent)",ret); | ||
998 | |||
999 | /* We should have written 8192 bytes */ | ||
1000 | if (ret == 8192) { | ||
1001 | hdw->fw1_state = FW1_STATE_RELOAD; | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | return -EIO; | ||
1006 | } | ||
1007 | |||
1008 | |||
1009 | /* | ||
1010 | * pvr2_upload_firmware2() | ||
1011 | * | ||
1012 | * This uploads encoder firmware on endpoint 2. | ||
1013 | * | ||
1014 | */ | ||
1015 | |||
1016 | int pvr2_upload_firmware2(struct pvr2_hdw *hdw) | ||
1017 | { | ||
1018 | const struct firmware *fw_entry = 0; | ||
1019 | void *fw_ptr; | ||
1020 | unsigned int pipe, fw_len, fw_done; | ||
1021 | int actual_length; | ||
1022 | int ret = 0; | ||
1023 | int fwidx; | ||
1024 | static const char *fw_files[] = { | ||
1025 | CX2341X_FIRM_ENC_FILENAME, | ||
1026 | }; | ||
1027 | |||
1028 | trace_firmware("pvr2_upload_firmware2"); | ||
1029 | |||
1030 | ret = pvr2_locate_firmware(hdw,&fw_entry,"encoder", | ||
1031 | sizeof(fw_files)/sizeof(fw_files[0]), | ||
1032 | fw_files); | ||
1033 | if (ret < 0) return ret; | ||
1034 | fwidx = ret; | ||
1035 | ret = 0; | ||
1036 | /* Since we're about to completely reinitialize the encoder, | ||
1037 | invalidate our cached copy of its configuration state. Next | ||
1038 | time we configure the encoder, then we'll fully configure it. */ | ||
1039 | hdw->enc_cur_valid = 0; | ||
1040 | |||
1041 | /* First prepare firmware loading */ | ||
1042 | ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/ | ||
1043 | ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/ | ||
1044 | ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/ | ||
1045 | ret |= pvr2_hdw_cmd_deep_reset(hdw); | ||
1046 | ret |= pvr2_write_register(hdw, 0xa064, 0x00000000); /*APU command*/ | ||
1047 | ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000408); /*gpio dir*/ | ||
1048 | ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/ | ||
1049 | ret |= pvr2_write_register(hdw, 0x9058, 0xffffffed); /*VPU ctrl*/ | ||
1050 | ret |= pvr2_write_register(hdw, 0x9054, 0xfffffffd); /*reset hw blocks*/ | ||
1051 | ret |= pvr2_write_register(hdw, 0x07f8, 0x80000800); /*encoder SDRAM refresh*/ | ||
1052 | ret |= pvr2_write_register(hdw, 0x07fc, 0x0000001a); /*encoder SDRAM pre-charge*/ | ||
1053 | ret |= pvr2_write_register(hdw, 0x0700, 0x00000000); /*I2C clock*/ | ||
1054 | ret |= pvr2_write_register(hdw, 0xaa00, 0x00000000); /*unknown*/ | ||
1055 | ret |= pvr2_write_register(hdw, 0xaa04, 0x00057810); /*unknown*/ | ||
1056 | ret |= pvr2_write_register(hdw, 0xaa10, 0x00148500); /*unknown*/ | ||
1057 | ret |= pvr2_write_register(hdw, 0xaa18, 0x00840000); /*unknown*/ | ||
1058 | ret |= pvr2_write_u8(hdw, 0x52, 0); | ||
1059 | ret |= pvr2_write_u16(hdw, 0x0600, 0); | ||
1060 | |||
1061 | if (ret) { | ||
1062 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1063 | "firmware2 upload prep failed, ret=%d",ret); | ||
1064 | release_firmware(fw_entry); | ||
1065 | return ret; | ||
1066 | } | ||
1067 | |||
1068 | /* Now send firmware */ | ||
1069 | |||
1070 | fw_len = fw_entry->size; | ||
1071 | |||
1072 | if (fw_len % FIRMWARE_CHUNK_SIZE) { | ||
1073 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1074 | "size of %s firmware" | ||
1075 | " must be a multiple of 8192B", | ||
1076 | fw_files[fwidx]); | ||
1077 | release_firmware(fw_entry); | ||
1078 | return -1; | ||
1079 | } | ||
1080 | |||
1081 | fw_ptr = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL); | ||
1082 | if (fw_ptr == NULL){ | ||
1083 | release_firmware(fw_entry); | ||
1084 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1085 | "failed to allocate memory for firmware2 upload"); | ||
1086 | return -ENOMEM; | ||
1087 | } | ||
1088 | |||
1089 | pipe = usb_sndbulkpipe(hdw->usb_dev, PVR2_FIRMWARE_ENDPOINT); | ||
1090 | |||
1091 | for (fw_done = 0 ; (fw_done < fw_len) && !ret ; | ||
1092 | fw_done += FIRMWARE_CHUNK_SIZE ) { | ||
1093 | int i; | ||
1094 | memcpy(fw_ptr, fw_entry->data + fw_done, FIRMWARE_CHUNK_SIZE); | ||
1095 | /* Usbsnoop log shows that we must swap bytes... */ | ||
1096 | for (i = 0; i < FIRMWARE_CHUNK_SIZE/4 ; i++) | ||
1097 | ((u32 *)fw_ptr)[i] = ___swab32(((u32 *)fw_ptr)[i]); | ||
1098 | |||
1099 | ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr, | ||
1100 | FIRMWARE_CHUNK_SIZE, | ||
1101 | &actual_length, HZ); | ||
1102 | ret |= (actual_length != FIRMWARE_CHUNK_SIZE); | ||
1103 | } | ||
1104 | |||
1105 | trace_firmware("upload of %s : %i / %i ", | ||
1106 | fw_files[fwidx],fw_done,fw_len); | ||
1107 | |||
1108 | kfree(fw_ptr); | ||
1109 | release_firmware(fw_entry); | ||
1110 | |||
1111 | if (ret) { | ||
1112 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1113 | "firmware2 upload transfer failure"); | ||
1114 | return ret; | ||
1115 | } | ||
1116 | |||
1117 | /* Finish upload */ | ||
1118 | |||
1119 | ret |= pvr2_write_register(hdw, 0x9054, 0xffffffff); /*reset hw blocks*/ | ||
1120 | ret |= pvr2_write_register(hdw, 0x9058, 0xffffffe8); /*VPU ctrl*/ | ||
1121 | ret |= pvr2_write_u16(hdw, 0x0600, 0); | ||
1122 | |||
1123 | if (ret) { | ||
1124 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1125 | "firmware2 upload post-proc failure"); | ||
1126 | } else { | ||
1127 | hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE); | ||
1128 | } | ||
1129 | return ret; | ||
1130 | } | ||
1131 | |||
1132 | |||
1133 | #define FIRMWARE_RECOVERY_BITS \ | ||
1134 | ((1<<PVR2_SUBSYS_B_ENC_CFG) | \ | ||
1135 | (1<<PVR2_SUBSYS_B_ENC_RUN) | \ | ||
1136 | (1<<PVR2_SUBSYS_B_ENC_FIRMWARE) | \ | ||
1137 | (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) | ||
1138 | |||
1139 | /* | ||
1140 | |||
1141 | This single function is key to pretty much everything. The pvrusb2 | ||
1142 | device can logically be viewed as a series of subsystems which can be | ||
1143 | stopped / started or unconfigured / configured. To get things streaming, | ||
1144 | one must configure everything and start everything, but there may be | ||
1145 | various reasons over time to deconfigure something or stop something. | ||
1146 | This function handles all of this activity. Everything EVERYWHERE that | ||
1147 | must affect a subsystem eventually comes here to do the work. | ||
1148 | |||
1149 | The current state of all subsystems is represented by a single bit mask, | ||
1150 | known as subsys_enabled_mask. The bit positions are defined by the | ||
1151 | PVR2_SUBSYS_xxxx macros, with one subsystem per bit position. At any | ||
1152 | time the set of configured or active subsystems can be queried just by | ||
1153 | looking at that mask. To change bits in that mask, this function here | ||
1154 | must be called. The "msk" argument indicates which bit positions to | ||
1155 | change, and the "val" argument defines the new values for the positions | ||
1156 | defined by "msk". | ||
1157 | |||
1158 | There is a priority ordering of starting / stopping things, and for | ||
1159 | multiple requested changes, this function implements that ordering. | ||
1160 | (Thus we will act on a request to load encoder firmware before we | ||
1161 | configure the encoder.) In addition to priority ordering, there is a | ||
1162 | recovery strategy implemented here. If a particular step fails and we | ||
1163 | detect that failure, this function will clear the affected subsystem bits | ||
1164 | and restart. Thus we have a means for recovering from a dead encoder: | ||
1165 | Clear all bits that correspond to subsystems that we need to restart / | ||
1166 | reconfigure and start over. | ||
1167 | |||
1168 | */ | ||
1169 | void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw, | ||
1170 | unsigned long msk,unsigned long val) | ||
1171 | { | ||
1172 | unsigned long nmsk; | ||
1173 | unsigned long vmsk; | ||
1174 | int ret; | ||
1175 | unsigned int tryCount = 0; | ||
1176 | |||
1177 | if (!hdw->flag_ok) return; | ||
1178 | |||
1179 | msk &= PVR2_SUBSYS_ALL; | ||
1180 | nmsk = (hdw->subsys_enabled_mask & ~msk) | (val & msk); | ||
1181 | nmsk &= PVR2_SUBSYS_ALL; | ||
1182 | |||
1183 | for (;;) { | ||
1184 | tryCount++; | ||
1185 | if (!((nmsk ^ hdw->subsys_enabled_mask) & | ||
1186 | PVR2_SUBSYS_ALL)) break; | ||
1187 | if (tryCount > 4) { | ||
1188 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1189 | "Too many retries when configuring device;" | ||
1190 | " giving up"); | ||
1191 | pvr2_hdw_render_useless(hdw); | ||
1192 | break; | ||
1193 | } | ||
1194 | if (tryCount > 1) { | ||
1195 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1196 | "Retrying device reconfiguration"); | ||
1197 | } | ||
1198 | pvr2_trace(PVR2_TRACE_INIT, | ||
1199 | "subsys mask changing 0x%lx:0x%lx" | ||
1200 | " from 0x%lx to 0x%lx", | ||
1201 | msk,val,hdw->subsys_enabled_mask,nmsk); | ||
1202 | |||
1203 | vmsk = (nmsk ^ hdw->subsys_enabled_mask) & | ||
1204 | hdw->subsys_enabled_mask; | ||
1205 | if (vmsk) { | ||
1206 | if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) { | ||
1207 | pvr2_trace(PVR2_TRACE_CTL, | ||
1208 | "/*---TRACE_CTL----*/" | ||
1209 | " pvr2_encoder_stop"); | ||
1210 | ret = pvr2_encoder_stop(hdw); | ||
1211 | if (ret) { | ||
1212 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1213 | "Error recovery initiated"); | ||
1214 | hdw->subsys_enabled_mask &= | ||
1215 | ~FIRMWARE_RECOVERY_BITS; | ||
1216 | continue; | ||
1217 | } | ||
1218 | } | ||
1219 | if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) { | ||
1220 | pvr2_trace(PVR2_TRACE_CTL, | ||
1221 | "/*---TRACE_CTL----*/" | ||
1222 | " pvr2_hdw_cmd_usbstream(0)"); | ||
1223 | pvr2_hdw_cmd_usbstream(hdw,0); | ||
1224 | } | ||
1225 | if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) { | ||
1226 | pvr2_trace(PVR2_TRACE_CTL, | ||
1227 | "/*---TRACE_CTL----*/" | ||
1228 | " decoder disable"); | ||
1229 | if (hdw->decoder_ctrl) { | ||
1230 | hdw->decoder_ctrl->enable( | ||
1231 | hdw->decoder_ctrl->ctxt,0); | ||
1232 | } else { | ||
1233 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1234 | "WARNING:" | ||
1235 | " No decoder present"); | ||
1236 | } | ||
1237 | hdw->subsys_enabled_mask &= | ||
1238 | ~(1<<PVR2_SUBSYS_B_DIGITIZER_RUN); | ||
1239 | } | ||
1240 | if (vmsk & PVR2_SUBSYS_CFG_ALL) { | ||
1241 | hdw->subsys_enabled_mask &= | ||
1242 | ~(vmsk & PVR2_SUBSYS_CFG_ALL); | ||
1243 | } | ||
1244 | } | ||
1245 | vmsk = (nmsk ^ hdw->subsys_enabled_mask) & nmsk; | ||
1246 | if (vmsk) { | ||
1247 | if (vmsk & (1<<PVR2_SUBSYS_B_ENC_FIRMWARE)) { | ||
1248 | pvr2_trace(PVR2_TRACE_CTL, | ||
1249 | "/*---TRACE_CTL----*/" | ||
1250 | " pvr2_upload_firmware2"); | ||
1251 | ret = pvr2_upload_firmware2(hdw); | ||
1252 | if (ret) { | ||
1253 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1254 | "Failure uploading encoder" | ||
1255 | " firmware"); | ||
1256 | pvr2_hdw_render_useless(hdw); | ||
1257 | break; | ||
1258 | } | ||
1259 | } | ||
1260 | if (vmsk & (1<<PVR2_SUBSYS_B_ENC_CFG)) { | ||
1261 | pvr2_trace(PVR2_TRACE_CTL, | ||
1262 | "/*---TRACE_CTL----*/" | ||
1263 | " pvr2_encoder_configure"); | ||
1264 | ret = pvr2_encoder_configure(hdw); | ||
1265 | if (ret) { | ||
1266 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1267 | "Error recovery initiated"); | ||
1268 | hdw->subsys_enabled_mask &= | ||
1269 | ~FIRMWARE_RECOVERY_BITS; | ||
1270 | continue; | ||
1271 | } | ||
1272 | } | ||
1273 | if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) { | ||
1274 | pvr2_trace(PVR2_TRACE_CTL, | ||
1275 | "/*---TRACE_CTL----*/" | ||
1276 | " decoder enable"); | ||
1277 | if (hdw->decoder_ctrl) { | ||
1278 | hdw->decoder_ctrl->enable( | ||
1279 | hdw->decoder_ctrl->ctxt,!0); | ||
1280 | } else { | ||
1281 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1282 | "WARNING:" | ||
1283 | " No decoder present"); | ||
1284 | } | ||
1285 | hdw->subsys_enabled_mask |= | ||
1286 | (1<<PVR2_SUBSYS_B_DIGITIZER_RUN); | ||
1287 | } | ||
1288 | if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) { | ||
1289 | pvr2_trace(PVR2_TRACE_CTL, | ||
1290 | "/*---TRACE_CTL----*/" | ||
1291 | " pvr2_hdw_cmd_usbstream(1)"); | ||
1292 | pvr2_hdw_cmd_usbstream(hdw,!0); | ||
1293 | } | ||
1294 | if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) { | ||
1295 | pvr2_trace(PVR2_TRACE_CTL, | ||
1296 | "/*---TRACE_CTL----*/" | ||
1297 | " pvr2_encoder_start"); | ||
1298 | ret = pvr2_encoder_start(hdw); | ||
1299 | if (ret) { | ||
1300 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1301 | "Error recovery initiated"); | ||
1302 | hdw->subsys_enabled_mask &= | ||
1303 | ~FIRMWARE_RECOVERY_BITS; | ||
1304 | continue; | ||
1305 | } | ||
1306 | } | ||
1307 | } | ||
1308 | } | ||
1309 | } | ||
1310 | |||
1311 | |||
1312 | void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw, | ||
1313 | unsigned long msk,unsigned long val) | ||
1314 | { | ||
1315 | LOCK_TAKE(hdw->big_lock); do { | ||
1316 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,msk,val); | ||
1317 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
1318 | } | ||
1319 | |||
1320 | |||
1321 | void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk) | ||
1322 | { | ||
1323 | pvr2_hdw_subsys_bit_chg(hdw,msk,msk); | ||
1324 | } | ||
1325 | |||
1326 | |||
1327 | void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk) | ||
1328 | { | ||
1329 | pvr2_hdw_subsys_bit_chg(hdw,msk,0); | ||
1330 | } | ||
1331 | |||
1332 | |||
1333 | unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *hdw) | ||
1334 | { | ||
1335 | return hdw->subsys_enabled_mask; | ||
1336 | } | ||
1337 | |||
1338 | |||
1339 | unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *hdw) | ||
1340 | { | ||
1341 | return hdw->subsys_stream_mask; | ||
1342 | } | ||
1343 | |||
1344 | |||
1345 | void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw, | ||
1346 | unsigned long msk, | ||
1347 | unsigned long val) | ||
1348 | { | ||
1349 | unsigned long val2; | ||
1350 | msk &= PVR2_SUBSYS_ALL; | ||
1351 | val2 = ((hdw->subsys_stream_mask & ~msk) | (val & msk)); | ||
1352 | pvr2_trace(PVR2_TRACE_INIT, | ||
1353 | "stream mask changing 0x%lx:0x%lx from 0x%lx to 0x%lx", | ||
1354 | msk,val,hdw->subsys_stream_mask,val2); | ||
1355 | hdw->subsys_stream_mask = val2; | ||
1356 | } | ||
1357 | |||
1358 | |||
1359 | void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw, | ||
1360 | unsigned long msk, | ||
1361 | unsigned long val) | ||
1362 | { | ||
1363 | LOCK_TAKE(hdw->big_lock); do { | ||
1364 | pvr2_hdw_subsys_stream_bit_chg_no_lock(hdw,msk,val); | ||
1365 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
1366 | } | ||
1367 | |||
1368 | |||
1369 | int pvr2_hdw_set_streaming_no_lock(struct pvr2_hdw *hdw,int enableFl) | ||
1370 | { | ||
1371 | if ((!enableFl) == !(hdw->flag_streaming_enabled)) return 0; | ||
1372 | if (enableFl) { | ||
1373 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
1374 | "/*--TRACE_STREAM--*/ enable"); | ||
1375 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,~0); | ||
1376 | } else { | ||
1377 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
1378 | "/*--TRACE_STREAM--*/ disable"); | ||
1379 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0); | ||
1380 | } | ||
1381 | if (!hdw->flag_ok) return -EIO; | ||
1382 | hdw->flag_streaming_enabled = enableFl != 0; | ||
1383 | return 0; | ||
1384 | } | ||
1385 | |||
1386 | |||
1387 | int pvr2_hdw_get_streaming(struct pvr2_hdw *hdw) | ||
1388 | { | ||
1389 | return hdw->flag_streaming_enabled != 0; | ||
1390 | } | ||
1391 | |||
1392 | |||
1393 | int pvr2_hdw_set_streaming(struct pvr2_hdw *hdw,int enable_flag) | ||
1394 | { | ||
1395 | int ret; | ||
1396 | LOCK_TAKE(hdw->big_lock); do { | ||
1397 | ret = pvr2_hdw_set_streaming_no_lock(hdw,enable_flag); | ||
1398 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
1399 | return ret; | ||
1400 | } | ||
1401 | |||
1402 | |||
1403 | int pvr2_hdw_set_stream_type_no_lock(struct pvr2_hdw *hdw, | ||
1404 | enum pvr2_config config) | ||
1405 | { | ||
1406 | unsigned long sm = hdw->subsys_enabled_mask; | ||
1407 | if (!hdw->flag_ok) return -EIO; | ||
1408 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0); | ||
1409 | hdw->config = config; | ||
1410 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,sm); | ||
1411 | return 0; | ||
1412 | } | ||
1413 | |||
1414 | |||
1415 | int pvr2_hdw_set_stream_type(struct pvr2_hdw *hdw,enum pvr2_config config) | ||
1416 | { | ||
1417 | int ret; | ||
1418 | if (!hdw->flag_ok) return -EIO; | ||
1419 | LOCK_TAKE(hdw->big_lock); | ||
1420 | ret = pvr2_hdw_set_stream_type_no_lock(hdw,config); | ||
1421 | LOCK_GIVE(hdw->big_lock); | ||
1422 | return ret; | ||
1423 | } | ||
1424 | |||
1425 | |||
1426 | static int get_default_tuner_type(struct pvr2_hdw *hdw) | ||
1427 | { | ||
1428 | int unit_number = hdw->unit_number; | ||
1429 | int tp = -1; | ||
1430 | if ((unit_number >= 0) && (unit_number < PVR_NUM)) { | ||
1431 | tp = tuner[unit_number]; | ||
1432 | } | ||
1433 | if (tp < 0) return -EINVAL; | ||
1434 | hdw->tuner_type = tp; | ||
1435 | return 0; | ||
1436 | } | ||
1437 | |||
1438 | |||
1439 | static v4l2_std_id get_default_standard(struct pvr2_hdw *hdw) | ||
1440 | { | ||
1441 | int unit_number = hdw->unit_number; | ||
1442 | int tp = 0; | ||
1443 | if ((unit_number >= 0) && (unit_number < PVR_NUM)) { | ||
1444 | tp = video_std[unit_number]; | ||
1445 | } | ||
1446 | return tp; | ||
1447 | } | ||
1448 | |||
1449 | |||
1450 | static unsigned int get_default_error_tolerance(struct pvr2_hdw *hdw) | ||
1451 | { | ||
1452 | int unit_number = hdw->unit_number; | ||
1453 | int tp = 0; | ||
1454 | if ((unit_number >= 0) && (unit_number < PVR_NUM)) { | ||
1455 | tp = tolerance[unit_number]; | ||
1456 | } | ||
1457 | return tp; | ||
1458 | } | ||
1459 | |||
1460 | |||
1461 | static int pvr2_hdw_check_firmware(struct pvr2_hdw *hdw) | ||
1462 | { | ||
1463 | /* Try a harmless request to fetch the eeprom's address over | ||
1464 | endpoint 1. See what happens. Only the full FX2 image can | ||
1465 | respond to this. If this probe fails then likely the FX2 | ||
1466 | firmware needs be loaded. */ | ||
1467 | int result; | ||
1468 | LOCK_TAKE(hdw->ctl_lock); do { | ||
1469 | hdw->cmd_buffer[0] = 0xeb; | ||
1470 | result = pvr2_send_request_ex(hdw,HZ*1,!0, | ||
1471 | hdw->cmd_buffer,1, | ||
1472 | hdw->cmd_buffer,1); | ||
1473 | if (result < 0) break; | ||
1474 | } while(0); LOCK_GIVE(hdw->ctl_lock); | ||
1475 | if (result) { | ||
1476 | pvr2_trace(PVR2_TRACE_INIT, | ||
1477 | "Probe of device endpoint 1 result status %d", | ||
1478 | result); | ||
1479 | } else { | ||
1480 | pvr2_trace(PVR2_TRACE_INIT, | ||
1481 | "Probe of device endpoint 1 succeeded"); | ||
1482 | } | ||
1483 | return result == 0; | ||
1484 | } | ||
1485 | |||
1486 | static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw) | ||
1487 | { | ||
1488 | char buf[40]; | ||
1489 | unsigned int bcnt; | ||
1490 | v4l2_std_id std1,std2; | ||
1491 | |||
1492 | std1 = get_default_standard(hdw); | ||
1493 | |||
1494 | bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom); | ||
1495 | pvr2_trace(PVR2_TRACE_INIT, | ||
1496 | "Supported video standard(s) reported by eeprom: %.*s", | ||
1497 | bcnt,buf); | ||
1498 | |||
1499 | hdw->std_mask_avail = hdw->std_mask_eeprom; | ||
1500 | |||
1501 | std2 = std1 & ~hdw->std_mask_avail; | ||
1502 | if (std2) { | ||
1503 | bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2); | ||
1504 | pvr2_trace(PVR2_TRACE_INIT, | ||
1505 | "Expanding supported video standards" | ||
1506 | " to include: %.*s", | ||
1507 | bcnt,buf); | ||
1508 | hdw->std_mask_avail |= std2; | ||
1509 | } | ||
1510 | |||
1511 | pvr2_hdw_internal_set_std_avail(hdw); | ||
1512 | |||
1513 | if (std1) { | ||
1514 | bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1); | ||
1515 | pvr2_trace(PVR2_TRACE_INIT, | ||
1516 | "Initial video standard forced to %.*s", | ||
1517 | bcnt,buf); | ||
1518 | hdw->std_mask_cur = std1; | ||
1519 | hdw->std_dirty = !0; | ||
1520 | pvr2_hdw_internal_find_stdenum(hdw); | ||
1521 | return; | ||
1522 | } | ||
1523 | |||
1524 | if (hdw->std_enum_cnt > 1) { | ||
1525 | // Autoselect the first listed standard | ||
1526 | hdw->std_enum_cur = 1; | ||
1527 | hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id; | ||
1528 | hdw->std_dirty = !0; | ||
1529 | pvr2_trace(PVR2_TRACE_INIT, | ||
1530 | "Initial video standard auto-selected to %s", | ||
1531 | hdw->std_defs[hdw->std_enum_cur-1].name); | ||
1532 | return; | ||
1533 | } | ||
1534 | |||
1535 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1536 | "Unable to select a viable initial video standard"); | ||
1537 | } | ||
1538 | |||
1539 | |||
1540 | static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw) | ||
1541 | { | ||
1542 | int ret; | ||
1543 | unsigned int idx; | ||
1544 | struct pvr2_ctrl *cptr; | ||
1545 | int reloadFl = 0; | ||
1546 | if (!reloadFl) { | ||
1547 | reloadFl = (hdw->usb_intf->cur_altsetting->desc.bNumEndpoints | ||
1548 | == 0); | ||
1549 | if (reloadFl) { | ||
1550 | pvr2_trace(PVR2_TRACE_INIT, | ||
1551 | "USB endpoint config looks strange" | ||
1552 | "; possibly firmware needs to be loaded"); | ||
1553 | } | ||
1554 | } | ||
1555 | if (!reloadFl) { | ||
1556 | reloadFl = !pvr2_hdw_check_firmware(hdw); | ||
1557 | if (reloadFl) { | ||
1558 | pvr2_trace(PVR2_TRACE_INIT, | ||
1559 | "Check for FX2 firmware failed" | ||
1560 | "; possibly firmware needs to be loaded"); | ||
1561 | } | ||
1562 | } | ||
1563 | if (reloadFl) { | ||
1564 | if (pvr2_upload_firmware1(hdw) != 0) { | ||
1565 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1566 | "Failure uploading firmware1"); | ||
1567 | } | ||
1568 | return; | ||
1569 | } | ||
1570 | hdw->fw1_state = FW1_STATE_OK; | ||
1571 | |||
1572 | if (initusbreset) { | ||
1573 | pvr2_hdw_device_reset(hdw); | ||
1574 | } | ||
1575 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1576 | |||
1577 | for (idx = 0; idx < pvr2_client_lists[hdw->hdw_type].cnt; idx++) { | ||
1578 | request_module(pvr2_client_lists[hdw->hdw_type].lst[idx]); | ||
1579 | } | ||
1580 | |||
1581 | pvr2_hdw_cmd_powerup(hdw); | ||
1582 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1583 | |||
1584 | if (pvr2_upload_firmware2(hdw)){ | ||
1585 | pvr2_trace(PVR2_TRACE_ERROR_LEGS,"device unstable!!"); | ||
1586 | pvr2_hdw_render_useless(hdw); | ||
1587 | return; | ||
1588 | } | ||
1589 | |||
1590 | // This step MUST happen after the earlier powerup step. | ||
1591 | pvr2_i2c_core_init(hdw); | ||
1592 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1593 | |||
1594 | for (idx = 0; idx < CTRLDEF_COUNT; idx++) { | ||
1595 | cptr = hdw->controls + idx; | ||
1596 | if (cptr->info->skip_init) continue; | ||
1597 | if (!cptr->info->set_value) continue; | ||
1598 | cptr->info->set_value(cptr,~0,cptr->info->default_value); | ||
1599 | } | ||
1600 | |||
1601 | // Do not use pvr2_reset_ctl_endpoints() here. It is not | ||
1602 | // thread-safe against the normal pvr2_send_request() mechanism. | ||
1603 | // (We should make it thread safe). | ||
1604 | |||
1605 | ret = pvr2_hdw_get_eeprom_addr(hdw); | ||
1606 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1607 | if (ret < 0) { | ||
1608 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1609 | "Unable to determine location of eeprom, skipping"); | ||
1610 | } else { | ||
1611 | hdw->eeprom_addr = ret; | ||
1612 | pvr2_eeprom_analyze(hdw); | ||
1613 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1614 | } | ||
1615 | |||
1616 | pvr2_hdw_setup_std(hdw); | ||
1617 | |||
1618 | if (!get_default_tuner_type(hdw)) { | ||
1619 | pvr2_trace(PVR2_TRACE_INIT, | ||
1620 | "pvr2_hdw_setup: Tuner type overridden to %d", | ||
1621 | hdw->tuner_type); | ||
1622 | } | ||
1623 | |||
1624 | hdw->tuner_updated = !0; | ||
1625 | pvr2_i2c_core_check_stale(hdw); | ||
1626 | hdw->tuner_updated = 0; | ||
1627 | |||
1628 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1629 | |||
1630 | pvr2_hdw_commit_ctl_internal(hdw); | ||
1631 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1632 | |||
1633 | hdw->vid_stream = pvr2_stream_create(); | ||
1634 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1635 | pvr2_trace(PVR2_TRACE_INIT, | ||
1636 | "pvr2_hdw_setup: video stream is %p",hdw->vid_stream); | ||
1637 | if (hdw->vid_stream) { | ||
1638 | idx = get_default_error_tolerance(hdw); | ||
1639 | if (idx) { | ||
1640 | pvr2_trace(PVR2_TRACE_INIT, | ||
1641 | "pvr2_hdw_setup: video stream %p" | ||
1642 | " setting tolerance %u", | ||
1643 | hdw->vid_stream,idx); | ||
1644 | } | ||
1645 | pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev, | ||
1646 | PVR2_VID_ENDPOINT,idx); | ||
1647 | } | ||
1648 | |||
1649 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1650 | |||
1651 | /* Make sure everything is up to date */ | ||
1652 | pvr2_i2c_core_sync(hdw); | ||
1653 | |||
1654 | if (!pvr2_hdw_dev_ok(hdw)) return; | ||
1655 | |||
1656 | hdw->flag_init_ok = !0; | ||
1657 | } | ||
1658 | |||
1659 | |||
1660 | int pvr2_hdw_setup(struct pvr2_hdw *hdw) | ||
1661 | { | ||
1662 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) begin",hdw); | ||
1663 | LOCK_TAKE(hdw->big_lock); do { | ||
1664 | pvr2_hdw_setup_low(hdw); | ||
1665 | pvr2_trace(PVR2_TRACE_INIT, | ||
1666 | "pvr2_hdw_setup(hdw=%p) done, ok=%d init_ok=%d", | ||
1667 | hdw,hdw->flag_ok,hdw->flag_init_ok); | ||
1668 | if (pvr2_hdw_dev_ok(hdw)) { | ||
1669 | if (pvr2_hdw_init_ok(hdw)) { | ||
1670 | pvr2_trace( | ||
1671 | PVR2_TRACE_INFO, | ||
1672 | "Device initialization" | ||
1673 | " completed successfully."); | ||
1674 | break; | ||
1675 | } | ||
1676 | if (hdw->fw1_state == FW1_STATE_RELOAD) { | ||
1677 | pvr2_trace( | ||
1678 | PVR2_TRACE_INFO, | ||
1679 | "Device microcontroller firmware" | ||
1680 | " (re)loaded; it should now reset" | ||
1681 | " and reconnect."); | ||
1682 | break; | ||
1683 | } | ||
1684 | pvr2_trace( | ||
1685 | PVR2_TRACE_ERROR_LEGS, | ||
1686 | "Device initialization was not successful."); | ||
1687 | if (hdw->fw1_state == FW1_STATE_MISSING) { | ||
1688 | pvr2_trace( | ||
1689 | PVR2_TRACE_ERROR_LEGS, | ||
1690 | "Giving up since device" | ||
1691 | " microcontroller firmware" | ||
1692 | " appears to be missing."); | ||
1693 | break; | ||
1694 | } | ||
1695 | } | ||
1696 | if (procreload) { | ||
1697 | pvr2_trace( | ||
1698 | PVR2_TRACE_ERROR_LEGS, | ||
1699 | "Attempting pvrusb2 recovery by reloading" | ||
1700 | " primary firmware."); | ||
1701 | pvr2_trace( | ||
1702 | PVR2_TRACE_ERROR_LEGS, | ||
1703 | "If this works, device should disconnect" | ||
1704 | " and reconnect in a sane state."); | ||
1705 | hdw->fw1_state = FW1_STATE_UNKNOWN; | ||
1706 | pvr2_upload_firmware1(hdw); | ||
1707 | } else { | ||
1708 | pvr2_trace( | ||
1709 | PVR2_TRACE_ERROR_LEGS, | ||
1710 | "***WARNING*** pvrusb2 device hardware" | ||
1711 | " appears to be jammed" | ||
1712 | " and I can't clear it."); | ||
1713 | pvr2_trace( | ||
1714 | PVR2_TRACE_ERROR_LEGS, | ||
1715 | "You might need to power cycle" | ||
1716 | " the pvrusb2 device" | ||
1717 | " in order to recover."); | ||
1718 | } | ||
1719 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
1720 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw); | ||
1721 | return hdw->flag_init_ok; | ||
1722 | } | ||
1723 | |||
1724 | |||
1725 | /* Create and return a structure for interacting with the underlying | ||
1726 | hardware */ | ||
1727 | struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, | ||
1728 | const struct usb_device_id *devid) | ||
1729 | { | ||
1730 | unsigned int idx,cnt1,cnt2; | ||
1731 | struct pvr2_hdw *hdw; | ||
1732 | unsigned int hdw_type; | ||
1733 | int valid_std_mask; | ||
1734 | struct pvr2_ctrl *cptr; | ||
1735 | __u8 ifnum; | ||
1736 | struct v4l2_queryctrl qctrl; | ||
1737 | struct pvr2_ctl_info *ciptr; | ||
1738 | |||
1739 | hdw_type = devid - pvr2_device_table; | ||
1740 | if (hdw_type >= | ||
1741 | sizeof(pvr2_device_names)/sizeof(pvr2_device_names[0])) { | ||
1742 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
1743 | "Bogus device type of %u reported",hdw_type); | ||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | hdw = kmalloc(sizeof(*hdw),GFP_KERNEL); | ||
1748 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_create: hdw=%p, type \"%s\"", | ||
1749 | hdw,pvr2_device_names[hdw_type]); | ||
1750 | if (!hdw) goto fail; | ||
1751 | memset(hdw,0,sizeof(*hdw)); | ||
1752 | cx2341x_fill_defaults(&hdw->enc_ctl_state); | ||
1753 | |||
1754 | hdw->control_cnt = CTRLDEF_COUNT; | ||
1755 | hdw->control_cnt += MPEGDEF_COUNT; | ||
1756 | hdw->controls = kmalloc(sizeof(struct pvr2_ctrl) * hdw->control_cnt, | ||
1757 | GFP_KERNEL); | ||
1758 | if (!hdw->controls) goto fail; | ||
1759 | memset(hdw->controls,0,sizeof(struct pvr2_ctrl) * hdw->control_cnt); | ||
1760 | hdw->hdw_type = hdw_type; | ||
1761 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
1762 | cptr = hdw->controls + idx; | ||
1763 | cptr->hdw = hdw; | ||
1764 | } | ||
1765 | for (idx = 0; idx < 32; idx++) { | ||
1766 | hdw->std_mask_ptrs[idx] = hdw->std_mask_names[idx]; | ||
1767 | } | ||
1768 | for (idx = 0; idx < CTRLDEF_COUNT; idx++) { | ||
1769 | cptr = hdw->controls + idx; | ||
1770 | cptr->info = control_defs+idx; | ||
1771 | } | ||
1772 | /* Define and configure additional controls from cx2341x module. */ | ||
1773 | hdw->mpeg_ctrl_info = kmalloc( | ||
1774 | sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT, GFP_KERNEL); | ||
1775 | if (!hdw->mpeg_ctrl_info) goto fail; | ||
1776 | memset(hdw->mpeg_ctrl_info,0, | ||
1777 | sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT); | ||
1778 | for (idx = 0; idx < MPEGDEF_COUNT; idx++) { | ||
1779 | cptr = hdw->controls + idx + CTRLDEF_COUNT; | ||
1780 | ciptr = &(hdw->mpeg_ctrl_info[idx].info); | ||
1781 | ciptr->desc = hdw->mpeg_ctrl_info[idx].desc; | ||
1782 | ciptr->name = mpeg_ids[idx].strid; | ||
1783 | ciptr->v4l_id = mpeg_ids[idx].id; | ||
1784 | ciptr->skip_init = !0; | ||
1785 | ciptr->get_value = ctrl_cx2341x_get; | ||
1786 | ciptr->get_v4lflags = ctrl_cx2341x_getv4lflags; | ||
1787 | ciptr->is_dirty = ctrl_cx2341x_is_dirty; | ||
1788 | if (!idx) ciptr->clear_dirty = ctrl_cx2341x_clear_dirty; | ||
1789 | qctrl.id = ciptr->v4l_id; | ||
1790 | cx2341x_ctrl_query(&hdw->enc_ctl_state,&qctrl); | ||
1791 | if (!(qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)) { | ||
1792 | ciptr->set_value = ctrl_cx2341x_set; | ||
1793 | } | ||
1794 | strncpy(hdw->mpeg_ctrl_info[idx].desc,qctrl.name, | ||
1795 | PVR2_CTLD_INFO_DESC_SIZE); | ||
1796 | hdw->mpeg_ctrl_info[idx].desc[PVR2_CTLD_INFO_DESC_SIZE-1] = 0; | ||
1797 | ciptr->default_value = qctrl.default_value; | ||
1798 | switch (qctrl.type) { | ||
1799 | default: | ||
1800 | case V4L2_CTRL_TYPE_INTEGER: | ||
1801 | ciptr->type = pvr2_ctl_int; | ||
1802 | ciptr->def.type_int.min_value = qctrl.minimum; | ||
1803 | ciptr->def.type_int.max_value = qctrl.maximum; | ||
1804 | break; | ||
1805 | case V4L2_CTRL_TYPE_BOOLEAN: | ||
1806 | ciptr->type = pvr2_ctl_bool; | ||
1807 | break; | ||
1808 | case V4L2_CTRL_TYPE_MENU: | ||
1809 | ciptr->type = pvr2_ctl_enum; | ||
1810 | ciptr->def.type_enum.value_names = | ||
1811 | cx2341x_ctrl_get_menu(ciptr->v4l_id); | ||
1812 | for (cnt1 = 0; | ||
1813 | ciptr->def.type_enum.value_names[cnt1] != NULL; | ||
1814 | cnt1++) { } | ||
1815 | ciptr->def.type_enum.count = cnt1; | ||
1816 | break; | ||
1817 | } | ||
1818 | cptr->info = ciptr; | ||
1819 | } | ||
1820 | |||
1821 | // Initialize video standard enum dynamic control | ||
1822 | cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM); | ||
1823 | if (cptr) { | ||
1824 | memcpy(&hdw->std_info_enum,cptr->info, | ||
1825 | sizeof(hdw->std_info_enum)); | ||
1826 | cptr->info = &hdw->std_info_enum; | ||
1827 | |||
1828 | } | ||
1829 | // Initialize control data regarding video standard masks | ||
1830 | valid_std_mask = pvr2_std_get_usable(); | ||
1831 | for (idx = 0; idx < 32; idx++) { | ||
1832 | if (!(valid_std_mask & (1 << idx))) continue; | ||
1833 | cnt1 = pvr2_std_id_to_str( | ||
1834 | hdw->std_mask_names[idx], | ||
1835 | sizeof(hdw->std_mask_names[idx])-1, | ||
1836 | 1 << idx); | ||
1837 | hdw->std_mask_names[idx][cnt1] = 0; | ||
1838 | } | ||
1839 | cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDAVAIL); | ||
1840 | if (cptr) { | ||
1841 | memcpy(&hdw->std_info_avail,cptr->info, | ||
1842 | sizeof(hdw->std_info_avail)); | ||
1843 | cptr->info = &hdw->std_info_avail; | ||
1844 | hdw->std_info_avail.def.type_bitmask.bit_names = | ||
1845 | hdw->std_mask_ptrs; | ||
1846 | hdw->std_info_avail.def.type_bitmask.valid_bits = | ||
1847 | valid_std_mask; | ||
1848 | } | ||
1849 | cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR); | ||
1850 | if (cptr) { | ||
1851 | memcpy(&hdw->std_info_cur,cptr->info, | ||
1852 | sizeof(hdw->std_info_cur)); | ||
1853 | cptr->info = &hdw->std_info_cur; | ||
1854 | hdw->std_info_cur.def.type_bitmask.bit_names = | ||
1855 | hdw->std_mask_ptrs; | ||
1856 | hdw->std_info_avail.def.type_bitmask.valid_bits = | ||
1857 | valid_std_mask; | ||
1858 | } | ||
1859 | |||
1860 | hdw->eeprom_addr = -1; | ||
1861 | hdw->unit_number = -1; | ||
1862 | hdw->v4l_minor_number = -1; | ||
1863 | hdw->ctl_write_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL); | ||
1864 | if (!hdw->ctl_write_buffer) goto fail; | ||
1865 | hdw->ctl_read_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL); | ||
1866 | if (!hdw->ctl_read_buffer) goto fail; | ||
1867 | hdw->ctl_write_urb = usb_alloc_urb(0,GFP_KERNEL); | ||
1868 | if (!hdw->ctl_write_urb) goto fail; | ||
1869 | hdw->ctl_read_urb = usb_alloc_urb(0,GFP_KERNEL); | ||
1870 | if (!hdw->ctl_read_urb) goto fail; | ||
1871 | |||
1872 | down(&pvr2_unit_sem); do { | ||
1873 | for (idx = 0; idx < PVR_NUM; idx++) { | ||
1874 | if (unit_pointers[idx]) continue; | ||
1875 | hdw->unit_number = idx; | ||
1876 | unit_pointers[idx] = hdw; | ||
1877 | break; | ||
1878 | } | ||
1879 | } while (0); up(&pvr2_unit_sem); | ||
1880 | |||
1881 | cnt1 = 0; | ||
1882 | cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2"); | ||
1883 | cnt1 += cnt2; | ||
1884 | if (hdw->unit_number >= 0) { | ||
1885 | cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"_%c", | ||
1886 | ('a' + hdw->unit_number)); | ||
1887 | cnt1 += cnt2; | ||
1888 | } | ||
1889 | if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1; | ||
1890 | hdw->name[cnt1] = 0; | ||
1891 | |||
1892 | pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s", | ||
1893 | hdw->unit_number,hdw->name); | ||
1894 | |||
1895 | hdw->tuner_type = -1; | ||
1896 | hdw->flag_ok = !0; | ||
1897 | /* Initialize the mask of subsystems that we will shut down when we | ||
1898 | stop streaming. */ | ||
1899 | hdw->subsys_stream_mask = PVR2_SUBSYS_RUN_ALL; | ||
1900 | hdw->subsys_stream_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG); | ||
1901 | |||
1902 | pvr2_trace(PVR2_TRACE_INIT,"subsys_stream_mask: 0x%lx", | ||
1903 | hdw->subsys_stream_mask); | ||
1904 | |||
1905 | hdw->usb_intf = intf; | ||
1906 | hdw->usb_dev = interface_to_usbdev(intf); | ||
1907 | |||
1908 | ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber; | ||
1909 | usb_set_interface(hdw->usb_dev,ifnum,0); | ||
1910 | |||
1911 | mutex_init(&hdw->ctl_lock_mutex); | ||
1912 | mutex_init(&hdw->big_lock_mutex); | ||
1913 | |||
1914 | return hdw; | ||
1915 | fail: | ||
1916 | if (hdw) { | ||
1917 | if (hdw->ctl_read_urb) usb_free_urb(hdw->ctl_read_urb); | ||
1918 | if (hdw->ctl_write_urb) usb_free_urb(hdw->ctl_write_urb); | ||
1919 | if (hdw->ctl_read_buffer) kfree(hdw->ctl_read_buffer); | ||
1920 | if (hdw->ctl_write_buffer) kfree(hdw->ctl_write_buffer); | ||
1921 | if (hdw->controls) kfree(hdw->controls); | ||
1922 | if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info); | ||
1923 | kfree(hdw); | ||
1924 | } | ||
1925 | return 0; | ||
1926 | } | ||
1927 | |||
1928 | |||
1929 | /* Remove _all_ associations between this driver and the underlying USB | ||
1930 | layer. */ | ||
1931 | void pvr2_hdw_remove_usb_stuff(struct pvr2_hdw *hdw) | ||
1932 | { | ||
1933 | if (hdw->flag_disconnected) return; | ||
1934 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_remove_usb_stuff: hdw=%p",hdw); | ||
1935 | if (hdw->ctl_read_urb) { | ||
1936 | usb_kill_urb(hdw->ctl_read_urb); | ||
1937 | usb_free_urb(hdw->ctl_read_urb); | ||
1938 | hdw->ctl_read_urb = 0; | ||
1939 | } | ||
1940 | if (hdw->ctl_write_urb) { | ||
1941 | usb_kill_urb(hdw->ctl_write_urb); | ||
1942 | usb_free_urb(hdw->ctl_write_urb); | ||
1943 | hdw->ctl_write_urb = 0; | ||
1944 | } | ||
1945 | if (hdw->ctl_read_buffer) { | ||
1946 | kfree(hdw->ctl_read_buffer); | ||
1947 | hdw->ctl_read_buffer = 0; | ||
1948 | } | ||
1949 | if (hdw->ctl_write_buffer) { | ||
1950 | kfree(hdw->ctl_write_buffer); | ||
1951 | hdw->ctl_write_buffer = 0; | ||
1952 | } | ||
1953 | pvr2_hdw_render_useless_unlocked(hdw); | ||
1954 | hdw->flag_disconnected = !0; | ||
1955 | hdw->usb_dev = 0; | ||
1956 | hdw->usb_intf = 0; | ||
1957 | } | ||
1958 | |||
1959 | |||
1960 | /* Destroy hardware interaction structure */ | ||
1961 | void pvr2_hdw_destroy(struct pvr2_hdw *hdw) | ||
1962 | { | ||
1963 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw); | ||
1964 | if (hdw->fw_buffer) { | ||
1965 | kfree(hdw->fw_buffer); | ||
1966 | hdw->fw_buffer = 0; | ||
1967 | } | ||
1968 | if (hdw->vid_stream) { | ||
1969 | pvr2_stream_destroy(hdw->vid_stream); | ||
1970 | hdw->vid_stream = 0; | ||
1971 | } | ||
1972 | if (hdw->audio_stat) { | ||
1973 | hdw->audio_stat->detach(hdw->audio_stat->ctxt); | ||
1974 | } | ||
1975 | if (hdw->decoder_ctrl) { | ||
1976 | hdw->decoder_ctrl->detach(hdw->decoder_ctrl->ctxt); | ||
1977 | } | ||
1978 | pvr2_i2c_core_done(hdw); | ||
1979 | pvr2_hdw_remove_usb_stuff(hdw); | ||
1980 | down(&pvr2_unit_sem); do { | ||
1981 | if ((hdw->unit_number >= 0) && | ||
1982 | (hdw->unit_number < PVR_NUM) && | ||
1983 | (unit_pointers[hdw->unit_number] == hdw)) { | ||
1984 | unit_pointers[hdw->unit_number] = 0; | ||
1985 | } | ||
1986 | } while (0); up(&pvr2_unit_sem); | ||
1987 | if (hdw->controls) kfree(hdw->controls); | ||
1988 | if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info); | ||
1989 | if (hdw->std_defs) kfree(hdw->std_defs); | ||
1990 | if (hdw->std_enum_names) kfree(hdw->std_enum_names); | ||
1991 | kfree(hdw); | ||
1992 | } | ||
1993 | |||
1994 | |||
1995 | int pvr2_hdw_init_ok(struct pvr2_hdw *hdw) | ||
1996 | { | ||
1997 | return hdw->flag_init_ok; | ||
1998 | } | ||
1999 | |||
2000 | |||
2001 | int pvr2_hdw_dev_ok(struct pvr2_hdw *hdw) | ||
2002 | { | ||
2003 | return (hdw && hdw->flag_ok); | ||
2004 | } | ||
2005 | |||
2006 | |||
2007 | /* Called when hardware has been unplugged */ | ||
2008 | void pvr2_hdw_disconnect(struct pvr2_hdw *hdw) | ||
2009 | { | ||
2010 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw); | ||
2011 | LOCK_TAKE(hdw->big_lock); | ||
2012 | LOCK_TAKE(hdw->ctl_lock); | ||
2013 | pvr2_hdw_remove_usb_stuff(hdw); | ||
2014 | LOCK_GIVE(hdw->ctl_lock); | ||
2015 | LOCK_GIVE(hdw->big_lock); | ||
2016 | } | ||
2017 | |||
2018 | |||
2019 | // Attempt to autoselect an appropriate value for std_enum_cur given | ||
2020 | // whatever is currently in std_mask_cur | ||
2021 | void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw) | ||
2022 | { | ||
2023 | unsigned int idx; | ||
2024 | for (idx = 1; idx < hdw->std_enum_cnt; idx++) { | ||
2025 | if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) { | ||
2026 | hdw->std_enum_cur = idx; | ||
2027 | return; | ||
2028 | } | ||
2029 | } | ||
2030 | hdw->std_enum_cur = 0; | ||
2031 | } | ||
2032 | |||
2033 | |||
2034 | // Calculate correct set of enumerated standards based on currently known | ||
2035 | // set of available standards bits. | ||
2036 | void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw) | ||
2037 | { | ||
2038 | struct v4l2_standard *newstd; | ||
2039 | unsigned int std_cnt; | ||
2040 | unsigned int idx; | ||
2041 | |||
2042 | newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail); | ||
2043 | |||
2044 | if (hdw->std_defs) { | ||
2045 | kfree(hdw->std_defs); | ||
2046 | hdw->std_defs = 0; | ||
2047 | } | ||
2048 | hdw->std_enum_cnt = 0; | ||
2049 | if (hdw->std_enum_names) { | ||
2050 | kfree(hdw->std_enum_names); | ||
2051 | hdw->std_enum_names = 0; | ||
2052 | } | ||
2053 | |||
2054 | if (!std_cnt) { | ||
2055 | pvr2_trace( | ||
2056 | PVR2_TRACE_ERROR_LEGS, | ||
2057 | "WARNING: Failed to identify any viable standards"); | ||
2058 | } | ||
2059 | hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL); | ||
2060 | hdw->std_enum_names[0] = "none"; | ||
2061 | for (idx = 0; idx < std_cnt; idx++) { | ||
2062 | hdw->std_enum_names[idx+1] = | ||
2063 | newstd[idx].name; | ||
2064 | } | ||
2065 | // Set up the dynamic control for this standard | ||
2066 | hdw->std_info_enum.def.type_enum.value_names = hdw->std_enum_names; | ||
2067 | hdw->std_info_enum.def.type_enum.count = std_cnt+1; | ||
2068 | hdw->std_defs = newstd; | ||
2069 | hdw->std_enum_cnt = std_cnt+1; | ||
2070 | hdw->std_enum_cur = 0; | ||
2071 | hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail; | ||
2072 | } | ||
2073 | |||
2074 | |||
2075 | int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw, | ||
2076 | struct v4l2_standard *std, | ||
2077 | unsigned int idx) | ||
2078 | { | ||
2079 | int ret = -EINVAL; | ||
2080 | if (!idx) return ret; | ||
2081 | LOCK_TAKE(hdw->big_lock); do { | ||
2082 | if (idx >= hdw->std_enum_cnt) break; | ||
2083 | idx--; | ||
2084 | memcpy(std,hdw->std_defs+idx,sizeof(*std)); | ||
2085 | ret = 0; | ||
2086 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2087 | return ret; | ||
2088 | } | ||
2089 | |||
2090 | |||
2091 | /* Get the number of defined controls */ | ||
2092 | unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw) | ||
2093 | { | ||
2094 | return hdw->control_cnt; | ||
2095 | } | ||
2096 | |||
2097 | |||
2098 | /* Retrieve a control handle given its index (0..count-1) */ | ||
2099 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *hdw, | ||
2100 | unsigned int idx) | ||
2101 | { | ||
2102 | if (idx >= hdw->control_cnt) return 0; | ||
2103 | return hdw->controls + idx; | ||
2104 | } | ||
2105 | |||
2106 | |||
2107 | /* Retrieve a control handle given its index (0..count-1) */ | ||
2108 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *hdw, | ||
2109 | unsigned int ctl_id) | ||
2110 | { | ||
2111 | struct pvr2_ctrl *cptr; | ||
2112 | unsigned int idx; | ||
2113 | int i; | ||
2114 | |||
2115 | /* This could be made a lot more efficient, but for now... */ | ||
2116 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
2117 | cptr = hdw->controls + idx; | ||
2118 | i = cptr->info->internal_id; | ||
2119 | if (i && (i == ctl_id)) return cptr; | ||
2120 | } | ||
2121 | return 0; | ||
2122 | } | ||
2123 | |||
2124 | |||
2125 | /* Given a V4L ID, retrieve the control structure associated with it. */ | ||
2126 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *hdw,unsigned int ctl_id) | ||
2127 | { | ||
2128 | struct pvr2_ctrl *cptr; | ||
2129 | unsigned int idx; | ||
2130 | int i; | ||
2131 | |||
2132 | /* This could be made a lot more efficient, but for now... */ | ||
2133 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
2134 | cptr = hdw->controls + idx; | ||
2135 | i = cptr->info->v4l_id; | ||
2136 | if (i && (i == ctl_id)) return cptr; | ||
2137 | } | ||
2138 | return 0; | ||
2139 | } | ||
2140 | |||
2141 | |||
2142 | /* Given a V4L ID for its immediate predecessor, retrieve the control | ||
2143 | structure associated with it. */ | ||
2144 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *hdw, | ||
2145 | unsigned int ctl_id) | ||
2146 | { | ||
2147 | struct pvr2_ctrl *cptr,*cp2; | ||
2148 | unsigned int idx; | ||
2149 | int i; | ||
2150 | |||
2151 | /* This could be made a lot more efficient, but for now... */ | ||
2152 | cp2 = 0; | ||
2153 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
2154 | cptr = hdw->controls + idx; | ||
2155 | i = cptr->info->v4l_id; | ||
2156 | if (!i) continue; | ||
2157 | if (i <= ctl_id) continue; | ||
2158 | if (cp2 && (cp2->info->v4l_id < i)) continue; | ||
2159 | cp2 = cptr; | ||
2160 | } | ||
2161 | return cp2; | ||
2162 | return 0; | ||
2163 | } | ||
2164 | |||
2165 | |||
2166 | static const char *get_ctrl_typename(enum pvr2_ctl_type tp) | ||
2167 | { | ||
2168 | switch (tp) { | ||
2169 | case pvr2_ctl_int: return "integer"; | ||
2170 | case pvr2_ctl_enum: return "enum"; | ||
2171 | case pvr2_ctl_bool: return "boolean"; | ||
2172 | case pvr2_ctl_bitmask: return "bitmask"; | ||
2173 | } | ||
2174 | return ""; | ||
2175 | } | ||
2176 | |||
2177 | |||
2178 | /* Commit all control changes made up to this point. Subsystems can be | ||
2179 | indirectly affected by these changes. For a given set of things being | ||
2180 | committed, we'll clear the affected subsystem bits and then once we're | ||
2181 | done committing everything we'll make a request to restore the subsystem | ||
2182 | state(s) back to their previous value before this function was called. | ||
2183 | Thus we can automatically reconfigure affected pieces of the driver as | ||
2184 | controls are changed. */ | ||
2185 | int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw) | ||
2186 | { | ||
2187 | unsigned long saved_subsys_mask = hdw->subsys_enabled_mask; | ||
2188 | unsigned long stale_subsys_mask = 0; | ||
2189 | unsigned int idx; | ||
2190 | struct pvr2_ctrl *cptr; | ||
2191 | int value; | ||
2192 | int commit_flag = 0; | ||
2193 | char buf[100]; | ||
2194 | unsigned int bcnt,ccnt; | ||
2195 | |||
2196 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
2197 | cptr = hdw->controls + idx; | ||
2198 | if (cptr->info->is_dirty == 0) continue; | ||
2199 | if (!cptr->info->is_dirty(cptr)) continue; | ||
2200 | if (!commit_flag) { | ||
2201 | commit_flag = !0; | ||
2202 | } | ||
2203 | |||
2204 | bcnt = scnprintf(buf,sizeof(buf),"\"%s\" <-- ", | ||
2205 | cptr->info->name); | ||
2206 | value = 0; | ||
2207 | cptr->info->get_value(cptr,&value); | ||
2208 | pvr2_ctrl_value_to_sym_internal(cptr,~0,value, | ||
2209 | buf+bcnt, | ||
2210 | sizeof(buf)-bcnt,&ccnt); | ||
2211 | bcnt += ccnt; | ||
2212 | bcnt += scnprintf(buf+bcnt,sizeof(buf)-bcnt," <%s>", | ||
2213 | get_ctrl_typename(cptr->info->type)); | ||
2214 | pvr2_trace(PVR2_TRACE_CTL, | ||
2215 | "/*--TRACE_COMMIT--*/ %.*s", | ||
2216 | bcnt,buf); | ||
2217 | } | ||
2218 | |||
2219 | if (!commit_flag) { | ||
2220 | /* Nothing has changed */ | ||
2221 | return 0; | ||
2222 | } | ||
2223 | |||
2224 | /* When video standard changes, reset the hres and vres values - | ||
2225 | but if the user has pending changes there, then let the changes | ||
2226 | take priority. */ | ||
2227 | if (hdw->std_dirty) { | ||
2228 | /* Rewrite the vertical resolution to be appropriate to the | ||
2229 | video standard that has been selected. */ | ||
2230 | int nvres; | ||
2231 | if (hdw->std_mask_cur & V4L2_STD_525_60) { | ||
2232 | nvres = 480; | ||
2233 | } else { | ||
2234 | nvres = 576; | ||
2235 | } | ||
2236 | if (nvres != hdw->res_ver_val) { | ||
2237 | hdw->res_ver_val = nvres; | ||
2238 | hdw->res_ver_dirty = !0; | ||
2239 | } | ||
2240 | } | ||
2241 | |||
2242 | if (hdw->std_dirty || | ||
2243 | 0) { | ||
2244 | /* If any of this changes, then the encoder needs to be | ||
2245 | reconfigured, and we need to reset the stream. */ | ||
2246 | stale_subsys_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG); | ||
2247 | stale_subsys_mask |= hdw->subsys_stream_mask; | ||
2248 | } | ||
2249 | |||
2250 | if (hdw->srate_dirty) { | ||
2251 | /* Write new sample rate into control structure since | ||
2252 | * the master copy is stale. We must track srate | ||
2253 | * separate from the mpeg control structure because | ||
2254 | * other logic also uses this value. */ | ||
2255 | struct v4l2_ext_controls cs; | ||
2256 | struct v4l2_ext_control c1; | ||
2257 | memset(&cs,0,sizeof(cs)); | ||
2258 | memset(&c1,0,sizeof(c1)); | ||
2259 | cs.controls = &c1; | ||
2260 | cs.count = 1; | ||
2261 | c1.id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ; | ||
2262 | c1.value = hdw->srate_val; | ||
2263 | cx2341x_ext_ctrls(&hdw->enc_ctl_state,&cs,VIDIOC_S_EXT_CTRLS); | ||
2264 | } | ||
2265 | |||
2266 | /* Scan i2c core at this point - before we clear all the dirty | ||
2267 | bits. Various parts of the i2c core will notice dirty bits as | ||
2268 | appropriate and arrange to broadcast or directly send updates to | ||
2269 | the client drivers in order to keep everything in sync */ | ||
2270 | pvr2_i2c_core_check_stale(hdw); | ||
2271 | |||
2272 | for (idx = 0; idx < hdw->control_cnt; idx++) { | ||
2273 | cptr = hdw->controls + idx; | ||
2274 | if (!cptr->info->clear_dirty) continue; | ||
2275 | cptr->info->clear_dirty(cptr); | ||
2276 | } | ||
2277 | |||
2278 | /* Now execute i2c core update */ | ||
2279 | pvr2_i2c_core_sync(hdw); | ||
2280 | |||
2281 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,stale_subsys_mask,0); | ||
2282 | pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,saved_subsys_mask); | ||
2283 | |||
2284 | return 0; | ||
2285 | } | ||
2286 | |||
2287 | |||
2288 | int pvr2_hdw_commit_ctl(struct pvr2_hdw *hdw) | ||
2289 | { | ||
2290 | LOCK_TAKE(hdw->big_lock); do { | ||
2291 | pvr2_hdw_commit_ctl_internal(hdw); | ||
2292 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2293 | return 0; | ||
2294 | } | ||
2295 | |||
2296 | |||
2297 | void pvr2_hdw_poll(struct pvr2_hdw *hdw) | ||
2298 | { | ||
2299 | LOCK_TAKE(hdw->big_lock); do { | ||
2300 | pvr2_i2c_core_sync(hdw); | ||
2301 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2302 | } | ||
2303 | |||
2304 | |||
2305 | void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *hdw, | ||
2306 | void (*func)(void *), | ||
2307 | void *data) | ||
2308 | { | ||
2309 | LOCK_TAKE(hdw->big_lock); do { | ||
2310 | hdw->poll_trigger_func = func; | ||
2311 | hdw->poll_trigger_data = data; | ||
2312 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2313 | } | ||
2314 | |||
2315 | |||
2316 | void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *hdw) | ||
2317 | { | ||
2318 | if (hdw->poll_trigger_func) { | ||
2319 | hdw->poll_trigger_func(hdw->poll_trigger_data); | ||
2320 | } | ||
2321 | } | ||
2322 | |||
2323 | |||
2324 | void pvr2_hdw_poll_trigger(struct pvr2_hdw *hdw) | ||
2325 | { | ||
2326 | LOCK_TAKE(hdw->big_lock); do { | ||
2327 | pvr2_hdw_poll_trigger_unlocked(hdw); | ||
2328 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2329 | } | ||
2330 | |||
2331 | |||
2332 | /* Return name for this driver instance */ | ||
2333 | const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *hdw) | ||
2334 | { | ||
2335 | return hdw->name; | ||
2336 | } | ||
2337 | |||
2338 | |||
2339 | /* Return bit mask indicating signal status */ | ||
2340 | unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *hdw) | ||
2341 | { | ||
2342 | unsigned int msk = 0; | ||
2343 | switch (hdw->input_val) { | ||
2344 | case PVR2_CVAL_INPUT_TV: | ||
2345 | case PVR2_CVAL_INPUT_RADIO: | ||
2346 | if (hdw->decoder_ctrl && | ||
2347 | hdw->decoder_ctrl->tuned(hdw->decoder_ctrl->ctxt)) { | ||
2348 | msk |= PVR2_SIGNAL_OK; | ||
2349 | if (hdw->audio_stat && | ||
2350 | hdw->audio_stat->status(hdw->audio_stat->ctxt)) { | ||
2351 | if (hdw->flag_stereo) { | ||
2352 | msk |= PVR2_SIGNAL_STEREO; | ||
2353 | } | ||
2354 | if (hdw->flag_bilingual) { | ||
2355 | msk |= PVR2_SIGNAL_SAP; | ||
2356 | } | ||
2357 | } | ||
2358 | } | ||
2359 | break; | ||
2360 | default: | ||
2361 | msk |= PVR2_SIGNAL_OK | PVR2_SIGNAL_STEREO; | ||
2362 | } | ||
2363 | return msk; | ||
2364 | } | ||
2365 | |||
2366 | |||
2367 | int pvr2_hdw_is_hsm(struct pvr2_hdw *hdw) | ||
2368 | { | ||
2369 | int result; | ||
2370 | LOCK_TAKE(hdw->ctl_lock); do { | ||
2371 | hdw->cmd_buffer[0] = 0x0b; | ||
2372 | result = pvr2_send_request(hdw, | ||
2373 | hdw->cmd_buffer,1, | ||
2374 | hdw->cmd_buffer,1); | ||
2375 | if (result < 0) break; | ||
2376 | result = (hdw->cmd_buffer[0] != 0); | ||
2377 | } while(0); LOCK_GIVE(hdw->ctl_lock); | ||
2378 | return result; | ||
2379 | } | ||
2380 | |||
2381 | |||
2382 | /* Return bit mask indicating signal status */ | ||
2383 | unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *hdw) | ||
2384 | { | ||
2385 | unsigned int msk = 0; | ||
2386 | LOCK_TAKE(hdw->big_lock); do { | ||
2387 | msk = pvr2_hdw_get_signal_status_internal(hdw); | ||
2388 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2389 | return msk; | ||
2390 | } | ||
2391 | |||
2392 | |||
2393 | /* Get handle to video output stream */ | ||
2394 | struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *hp) | ||
2395 | { | ||
2396 | return hp->vid_stream; | ||
2397 | } | ||
2398 | |||
2399 | |||
2400 | void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw) | ||
2401 | { | ||
2402 | int nr = pvr2_hdw_get_unit_number(hdw); | ||
2403 | LOCK_TAKE(hdw->big_lock); do { | ||
2404 | hdw->log_requested = !0; | ||
2405 | printk(KERN_INFO "pvrusb2: ================= START STATUS CARD #%d =================\n", nr); | ||
2406 | pvr2_i2c_core_check_stale(hdw); | ||
2407 | hdw->log_requested = 0; | ||
2408 | pvr2_i2c_core_sync(hdw); | ||
2409 | pvr2_trace(PVR2_TRACE_INFO,"cx2341x config:"); | ||
2410 | cx2341x_log_status(&hdw->enc_ctl_state, "pvrusb2"); | ||
2411 | printk(KERN_INFO "pvrusb2: ================== END STATUS CARD #%d ==================\n", nr); | ||
2412 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2413 | } | ||
2414 | |||
2415 | void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, int enable_flag) | ||
2416 | { | ||
2417 | int ret; | ||
2418 | u16 address; | ||
2419 | unsigned int pipe; | ||
2420 | LOCK_TAKE(hdw->big_lock); do { | ||
2421 | if ((hdw->fw_buffer == 0) == !enable_flag) break; | ||
2422 | |||
2423 | if (!enable_flag) { | ||
2424 | pvr2_trace(PVR2_TRACE_FIRMWARE, | ||
2425 | "Cleaning up after CPU firmware fetch"); | ||
2426 | kfree(hdw->fw_buffer); | ||
2427 | hdw->fw_buffer = 0; | ||
2428 | hdw->fw_size = 0; | ||
2429 | /* Now release the CPU. It will disconnect and | ||
2430 | reconnect later. */ | ||
2431 | pvr2_hdw_cpureset_assert(hdw,0); | ||
2432 | break; | ||
2433 | } | ||
2434 | |||
2435 | pvr2_trace(PVR2_TRACE_FIRMWARE, | ||
2436 | "Preparing to suck out CPU firmware"); | ||
2437 | hdw->fw_size = 0x2000; | ||
2438 | hdw->fw_buffer = kmalloc(hdw->fw_size,GFP_KERNEL); | ||
2439 | if (!hdw->fw_buffer) { | ||
2440 | hdw->fw_size = 0; | ||
2441 | break; | ||
2442 | } | ||
2443 | |||
2444 | memset(hdw->fw_buffer,0,hdw->fw_size); | ||
2445 | |||
2446 | /* We have to hold the CPU during firmware upload. */ | ||
2447 | pvr2_hdw_cpureset_assert(hdw,1); | ||
2448 | |||
2449 | /* download the firmware from address 0000-1fff in 2048 | ||
2450 | (=0x800) bytes chunk. */ | ||
2451 | |||
2452 | pvr2_trace(PVR2_TRACE_FIRMWARE,"Grabbing CPU firmware"); | ||
2453 | pipe = usb_rcvctrlpipe(hdw->usb_dev, 0); | ||
2454 | for(address = 0; address < hdw->fw_size; address += 0x800) { | ||
2455 | ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0xc0, | ||
2456 | address,0, | ||
2457 | hdw->fw_buffer+address,0x800,HZ); | ||
2458 | if (ret < 0) break; | ||
2459 | } | ||
2460 | |||
2461 | pvr2_trace(PVR2_TRACE_FIRMWARE,"Done grabbing CPU firmware"); | ||
2462 | |||
2463 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2464 | } | ||
2465 | |||
2466 | |||
2467 | /* Return true if we're in a mode for retrieval CPU firmware */ | ||
2468 | int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *hdw) | ||
2469 | { | ||
2470 | return hdw->fw_buffer != 0; | ||
2471 | } | ||
2472 | |||
2473 | |||
2474 | int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs, | ||
2475 | char *buf,unsigned int cnt) | ||
2476 | { | ||
2477 | int ret = -EINVAL; | ||
2478 | LOCK_TAKE(hdw->big_lock); do { | ||
2479 | if (!buf) break; | ||
2480 | if (!cnt) break; | ||
2481 | |||
2482 | if (!hdw->fw_buffer) { | ||
2483 | ret = -EIO; | ||
2484 | break; | ||
2485 | } | ||
2486 | |||
2487 | if (offs >= hdw->fw_size) { | ||
2488 | pvr2_trace(PVR2_TRACE_FIRMWARE, | ||
2489 | "Read firmware data offs=%d EOF", | ||
2490 | offs); | ||
2491 | ret = 0; | ||
2492 | break; | ||
2493 | } | ||
2494 | |||
2495 | if (offs + cnt > hdw->fw_size) cnt = hdw->fw_size - offs; | ||
2496 | |||
2497 | memcpy(buf,hdw->fw_buffer+offs,cnt); | ||
2498 | |||
2499 | pvr2_trace(PVR2_TRACE_FIRMWARE, | ||
2500 | "Read firmware data offs=%d cnt=%d", | ||
2501 | offs,cnt); | ||
2502 | ret = cnt; | ||
2503 | } while (0); LOCK_GIVE(hdw->big_lock); | ||
2504 | |||
2505 | return ret; | ||
2506 | } | ||
2507 | |||
2508 | |||
2509 | int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *hdw) | ||
2510 | { | ||
2511 | return hdw->v4l_minor_number; | ||
2512 | } | ||
2513 | |||
2514 | |||
2515 | /* Store the v4l minor device number */ | ||
2516 | void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,int v) | ||
2517 | { | ||
2518 | hdw->v4l_minor_number = v; | ||
2519 | } | ||
2520 | |||
2521 | |||
2522 | void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw) | ||
2523 | { | ||
2524 | if (!hdw->usb_dev) return; | ||
2525 | usb_settoggle(hdw->usb_dev, PVR2_CTL_WRITE_ENDPOINT & 0xf, | ||
2526 | !(PVR2_CTL_WRITE_ENDPOINT & USB_DIR_IN), 0); | ||
2527 | usb_settoggle(hdw->usb_dev, PVR2_CTL_READ_ENDPOINT & 0xf, | ||
2528 | !(PVR2_CTL_READ_ENDPOINT & USB_DIR_IN), 0); | ||
2529 | usb_clear_halt(hdw->usb_dev, | ||
2530 | usb_rcvbulkpipe(hdw->usb_dev, | ||
2531 | PVR2_CTL_READ_ENDPOINT & 0x7f)); | ||
2532 | usb_clear_halt(hdw->usb_dev, | ||
2533 | usb_sndbulkpipe(hdw->usb_dev, | ||
2534 | PVR2_CTL_WRITE_ENDPOINT & 0x7f)); | ||
2535 | } | ||
2536 | |||
2537 | |||
2538 | static void pvr2_ctl_write_complete(struct urb *urb, struct pt_regs *regs) | ||
2539 | { | ||
2540 | struct pvr2_hdw *hdw = urb->context; | ||
2541 | hdw->ctl_write_pend_flag = 0; | ||
2542 | if (hdw->ctl_read_pend_flag) return; | ||
2543 | complete(&hdw->ctl_done); | ||
2544 | } | ||
2545 | |||
2546 | |||
2547 | static void pvr2_ctl_read_complete(struct urb *urb, struct pt_regs *regs) | ||
2548 | { | ||
2549 | struct pvr2_hdw *hdw = urb->context; | ||
2550 | hdw->ctl_read_pend_flag = 0; | ||
2551 | if (hdw->ctl_write_pend_flag) return; | ||
2552 | complete(&hdw->ctl_done); | ||
2553 | } | ||
2554 | |||
2555 | |||
2556 | static void pvr2_ctl_timeout(unsigned long data) | ||
2557 | { | ||
2558 | struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; | ||
2559 | if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) { | ||
2560 | hdw->ctl_timeout_flag = !0; | ||
2561 | if (hdw->ctl_write_pend_flag && hdw->ctl_write_urb) { | ||
2562 | usb_unlink_urb(hdw->ctl_write_urb); | ||
2563 | } | ||
2564 | if (hdw->ctl_read_pend_flag && hdw->ctl_read_urb) { | ||
2565 | usb_unlink_urb(hdw->ctl_read_urb); | ||
2566 | } | ||
2567 | } | ||
2568 | } | ||
2569 | |||
2570 | |||
2571 | int pvr2_send_request_ex(struct pvr2_hdw *hdw, | ||
2572 | unsigned int timeout,int probe_fl, | ||
2573 | void *write_data,unsigned int write_len, | ||
2574 | void *read_data,unsigned int read_len) | ||
2575 | { | ||
2576 | unsigned int idx; | ||
2577 | int status = 0; | ||
2578 | struct timer_list timer; | ||
2579 | if (!hdw->ctl_lock_held) { | ||
2580 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2581 | "Attempted to execute control transfer" | ||
2582 | " without lock!!"); | ||
2583 | return -EDEADLK; | ||
2584 | } | ||
2585 | if ((!hdw->flag_ok) && !probe_fl) { | ||
2586 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2587 | "Attempted to execute control transfer" | ||
2588 | " when device not ok"); | ||
2589 | return -EIO; | ||
2590 | } | ||
2591 | if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) { | ||
2592 | if (!probe_fl) { | ||
2593 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2594 | "Attempted to execute control transfer" | ||
2595 | " when USB is disconnected"); | ||
2596 | } | ||
2597 | return -ENOTTY; | ||
2598 | } | ||
2599 | |||
2600 | /* Ensure that we have sane parameters */ | ||
2601 | if (!write_data) write_len = 0; | ||
2602 | if (!read_data) read_len = 0; | ||
2603 | if (write_len > PVR2_CTL_BUFFSIZE) { | ||
2604 | pvr2_trace( | ||
2605 | PVR2_TRACE_ERROR_LEGS, | ||
2606 | "Attempted to execute %d byte" | ||
2607 | " control-write transfer (limit=%d)", | ||
2608 | write_len,PVR2_CTL_BUFFSIZE); | ||
2609 | return -EINVAL; | ||
2610 | } | ||
2611 | if (read_len > PVR2_CTL_BUFFSIZE) { | ||
2612 | pvr2_trace( | ||
2613 | PVR2_TRACE_ERROR_LEGS, | ||
2614 | "Attempted to execute %d byte" | ||
2615 | " control-read transfer (limit=%d)", | ||
2616 | write_len,PVR2_CTL_BUFFSIZE); | ||
2617 | return -EINVAL; | ||
2618 | } | ||
2619 | if ((!write_len) && (!read_len)) { | ||
2620 | pvr2_trace( | ||
2621 | PVR2_TRACE_ERROR_LEGS, | ||
2622 | "Attempted to execute null control transfer?"); | ||
2623 | return -EINVAL; | ||
2624 | } | ||
2625 | |||
2626 | |||
2627 | hdw->cmd_debug_state = 1; | ||
2628 | if (write_len) { | ||
2629 | hdw->cmd_debug_code = ((unsigned char *)write_data)[0]; | ||
2630 | } else { | ||
2631 | hdw->cmd_debug_code = 0; | ||
2632 | } | ||
2633 | hdw->cmd_debug_write_len = write_len; | ||
2634 | hdw->cmd_debug_read_len = read_len; | ||
2635 | |||
2636 | /* Initialize common stuff */ | ||
2637 | init_completion(&hdw->ctl_done); | ||
2638 | hdw->ctl_timeout_flag = 0; | ||
2639 | hdw->ctl_write_pend_flag = 0; | ||
2640 | hdw->ctl_read_pend_flag = 0; | ||
2641 | init_timer(&timer); | ||
2642 | timer.expires = jiffies + timeout; | ||
2643 | timer.data = (unsigned long)hdw; | ||
2644 | timer.function = pvr2_ctl_timeout; | ||
2645 | |||
2646 | if (write_len) { | ||
2647 | hdw->cmd_debug_state = 2; | ||
2648 | /* Transfer write data to internal buffer */ | ||
2649 | for (idx = 0; idx < write_len; idx++) { | ||
2650 | hdw->ctl_write_buffer[idx] = | ||
2651 | ((unsigned char *)write_data)[idx]; | ||
2652 | } | ||
2653 | /* Initiate a write request */ | ||
2654 | usb_fill_bulk_urb(hdw->ctl_write_urb, | ||
2655 | hdw->usb_dev, | ||
2656 | usb_sndbulkpipe(hdw->usb_dev, | ||
2657 | PVR2_CTL_WRITE_ENDPOINT), | ||
2658 | hdw->ctl_write_buffer, | ||
2659 | write_len, | ||
2660 | pvr2_ctl_write_complete, | ||
2661 | hdw); | ||
2662 | hdw->ctl_write_urb->actual_length = 0; | ||
2663 | hdw->ctl_write_pend_flag = !0; | ||
2664 | status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL); | ||
2665 | if (status < 0) { | ||
2666 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2667 | "Failed to submit write-control" | ||
2668 | " URB status=%d",status); | ||
2669 | hdw->ctl_write_pend_flag = 0; | ||
2670 | goto done; | ||
2671 | } | ||
2672 | } | ||
2673 | |||
2674 | if (read_len) { | ||
2675 | hdw->cmd_debug_state = 3; | ||
2676 | memset(hdw->ctl_read_buffer,0x43,read_len); | ||
2677 | /* Initiate a read request */ | ||
2678 | usb_fill_bulk_urb(hdw->ctl_read_urb, | ||
2679 | hdw->usb_dev, | ||
2680 | usb_rcvbulkpipe(hdw->usb_dev, | ||
2681 | PVR2_CTL_READ_ENDPOINT), | ||
2682 | hdw->ctl_read_buffer, | ||
2683 | read_len, | ||
2684 | pvr2_ctl_read_complete, | ||
2685 | hdw); | ||
2686 | hdw->ctl_read_urb->actual_length = 0; | ||
2687 | hdw->ctl_read_pend_flag = !0; | ||
2688 | status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL); | ||
2689 | if (status < 0) { | ||
2690 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2691 | "Failed to submit read-control" | ||
2692 | " URB status=%d",status); | ||
2693 | hdw->ctl_read_pend_flag = 0; | ||
2694 | goto done; | ||
2695 | } | ||
2696 | } | ||
2697 | |||
2698 | /* Start timer */ | ||
2699 | add_timer(&timer); | ||
2700 | |||
2701 | /* Now wait for all I/O to complete */ | ||
2702 | hdw->cmd_debug_state = 4; | ||
2703 | while (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) { | ||
2704 | wait_for_completion(&hdw->ctl_done); | ||
2705 | } | ||
2706 | hdw->cmd_debug_state = 5; | ||
2707 | |||
2708 | /* Stop timer */ | ||
2709 | del_timer_sync(&timer); | ||
2710 | |||
2711 | hdw->cmd_debug_state = 6; | ||
2712 | status = 0; | ||
2713 | |||
2714 | if (hdw->ctl_timeout_flag) { | ||
2715 | status = -ETIMEDOUT; | ||
2716 | if (!probe_fl) { | ||
2717 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2718 | "Timed out control-write"); | ||
2719 | } | ||
2720 | goto done; | ||
2721 | } | ||
2722 | |||
2723 | if (write_len) { | ||
2724 | /* Validate results of write request */ | ||
2725 | if ((hdw->ctl_write_urb->status != 0) && | ||
2726 | (hdw->ctl_write_urb->status != -ENOENT) && | ||
2727 | (hdw->ctl_write_urb->status != -ESHUTDOWN) && | ||
2728 | (hdw->ctl_write_urb->status != -ECONNRESET)) { | ||
2729 | /* USB subsystem is reporting some kind of failure | ||
2730 | on the write */ | ||
2731 | status = hdw->ctl_write_urb->status; | ||
2732 | if (!probe_fl) { | ||
2733 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2734 | "control-write URB failure," | ||
2735 | " status=%d", | ||
2736 | status); | ||
2737 | } | ||
2738 | goto done; | ||
2739 | } | ||
2740 | if (hdw->ctl_write_urb->actual_length < write_len) { | ||
2741 | /* Failed to write enough data */ | ||
2742 | status = -EIO; | ||
2743 | if (!probe_fl) { | ||
2744 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2745 | "control-write URB short," | ||
2746 | " expected=%d got=%d", | ||
2747 | write_len, | ||
2748 | hdw->ctl_write_urb->actual_length); | ||
2749 | } | ||
2750 | goto done; | ||
2751 | } | ||
2752 | } | ||
2753 | if (read_len) { | ||
2754 | /* Validate results of read request */ | ||
2755 | if ((hdw->ctl_read_urb->status != 0) && | ||
2756 | (hdw->ctl_read_urb->status != -ENOENT) && | ||
2757 | (hdw->ctl_read_urb->status != -ESHUTDOWN) && | ||
2758 | (hdw->ctl_read_urb->status != -ECONNRESET)) { | ||
2759 | /* USB subsystem is reporting some kind of failure | ||
2760 | on the read */ | ||
2761 | status = hdw->ctl_read_urb->status; | ||
2762 | if (!probe_fl) { | ||
2763 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2764 | "control-read URB failure," | ||
2765 | " status=%d", | ||
2766 | status); | ||
2767 | } | ||
2768 | goto done; | ||
2769 | } | ||
2770 | if (hdw->ctl_read_urb->actual_length < read_len) { | ||
2771 | /* Failed to read enough data */ | ||
2772 | status = -EIO; | ||
2773 | if (!probe_fl) { | ||
2774 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2775 | "control-read URB short," | ||
2776 | " expected=%d got=%d", | ||
2777 | read_len, | ||
2778 | hdw->ctl_read_urb->actual_length); | ||
2779 | } | ||
2780 | goto done; | ||
2781 | } | ||
2782 | /* Transfer retrieved data out from internal buffer */ | ||
2783 | for (idx = 0; idx < read_len; idx++) { | ||
2784 | ((unsigned char *)read_data)[idx] = | ||
2785 | hdw->ctl_read_buffer[idx]; | ||
2786 | } | ||
2787 | } | ||
2788 | |||
2789 | done: | ||
2790 | |||
2791 | hdw->cmd_debug_state = 0; | ||
2792 | if ((status < 0) && (!probe_fl)) { | ||
2793 | pvr2_hdw_render_useless_unlocked(hdw); | ||
2794 | } | ||
2795 | return status; | ||
2796 | } | ||
2797 | |||
2798 | |||
2799 | int pvr2_send_request(struct pvr2_hdw *hdw, | ||
2800 | void *write_data,unsigned int write_len, | ||
2801 | void *read_data,unsigned int read_len) | ||
2802 | { | ||
2803 | return pvr2_send_request_ex(hdw,HZ*4,0, | ||
2804 | write_data,write_len, | ||
2805 | read_data,read_len); | ||
2806 | } | ||
2807 | |||
2808 | int pvr2_write_register(struct pvr2_hdw *hdw, u16 reg, u32 data) | ||
2809 | { | ||
2810 | int ret; | ||
2811 | |||
2812 | LOCK_TAKE(hdw->ctl_lock); | ||
2813 | |||
2814 | hdw->cmd_buffer[0] = 0x04; /* write register prefix */ | ||
2815 | PVR2_DECOMPOSE_LE(hdw->cmd_buffer,1,data); | ||
2816 | hdw->cmd_buffer[5] = 0; | ||
2817 | hdw->cmd_buffer[6] = (reg >> 8) & 0xff; | ||
2818 | hdw->cmd_buffer[7] = reg & 0xff; | ||
2819 | |||
2820 | |||
2821 | ret = pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 0); | ||
2822 | |||
2823 | LOCK_GIVE(hdw->ctl_lock); | ||
2824 | |||
2825 | return ret; | ||
2826 | } | ||
2827 | |||
2828 | |||
2829 | int pvr2_read_register(struct pvr2_hdw *hdw, u16 reg, u32 *data) | ||
2830 | { | ||
2831 | int ret = 0; | ||
2832 | |||
2833 | LOCK_TAKE(hdw->ctl_lock); | ||
2834 | |||
2835 | hdw->cmd_buffer[0] = 0x05; /* read register prefix */ | ||
2836 | hdw->cmd_buffer[1] = 0; | ||
2837 | hdw->cmd_buffer[2] = 0; | ||
2838 | hdw->cmd_buffer[3] = 0; | ||
2839 | hdw->cmd_buffer[4] = 0; | ||
2840 | hdw->cmd_buffer[5] = 0; | ||
2841 | hdw->cmd_buffer[6] = (reg >> 8) & 0xff; | ||
2842 | hdw->cmd_buffer[7] = reg & 0xff; | ||
2843 | |||
2844 | ret |= pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 4); | ||
2845 | *data = PVR2_COMPOSE_LE(hdw->cmd_buffer,0); | ||
2846 | |||
2847 | LOCK_GIVE(hdw->ctl_lock); | ||
2848 | |||
2849 | return ret; | ||
2850 | } | ||
2851 | |||
2852 | |||
2853 | int pvr2_write_u16(struct pvr2_hdw *hdw, u16 data, int res) | ||
2854 | { | ||
2855 | int ret; | ||
2856 | |||
2857 | LOCK_TAKE(hdw->ctl_lock); | ||
2858 | |||
2859 | hdw->cmd_buffer[0] = (data >> 8) & 0xff; | ||
2860 | hdw->cmd_buffer[1] = data & 0xff; | ||
2861 | |||
2862 | ret = pvr2_send_request(hdw, hdw->cmd_buffer, 2, hdw->cmd_buffer, res); | ||
2863 | |||
2864 | LOCK_GIVE(hdw->ctl_lock); | ||
2865 | |||
2866 | return ret; | ||
2867 | } | ||
2868 | |||
2869 | |||
2870 | int pvr2_write_u8(struct pvr2_hdw *hdw, u8 data, int res) | ||
2871 | { | ||
2872 | int ret; | ||
2873 | |||
2874 | LOCK_TAKE(hdw->ctl_lock); | ||
2875 | |||
2876 | hdw->cmd_buffer[0] = data; | ||
2877 | |||
2878 | ret = pvr2_send_request(hdw, hdw->cmd_buffer, 1, hdw->cmd_buffer, res); | ||
2879 | |||
2880 | LOCK_GIVE(hdw->ctl_lock); | ||
2881 | |||
2882 | return ret; | ||
2883 | } | ||
2884 | |||
2885 | |||
2886 | void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *hdw) | ||
2887 | { | ||
2888 | if (!hdw->flag_ok) return; | ||
2889 | pvr2_trace(PVR2_TRACE_INIT,"render_useless"); | ||
2890 | hdw->flag_ok = 0; | ||
2891 | if (hdw->vid_stream) { | ||
2892 | pvr2_stream_setup(hdw->vid_stream,0,0,0); | ||
2893 | } | ||
2894 | hdw->flag_streaming_enabled = 0; | ||
2895 | hdw->subsys_enabled_mask = 0; | ||
2896 | } | ||
2897 | |||
2898 | |||
2899 | void pvr2_hdw_render_useless(struct pvr2_hdw *hdw) | ||
2900 | { | ||
2901 | LOCK_TAKE(hdw->ctl_lock); | ||
2902 | pvr2_hdw_render_useless_unlocked(hdw); | ||
2903 | LOCK_GIVE(hdw->ctl_lock); | ||
2904 | } | ||
2905 | |||
2906 | |||
2907 | void pvr2_hdw_device_reset(struct pvr2_hdw *hdw) | ||
2908 | { | ||
2909 | int ret; | ||
2910 | pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset..."); | ||
2911 | ret = usb_lock_device_for_reset(hdw->usb_dev,0); | ||
2912 | if (ret == 1) { | ||
2913 | ret = usb_reset_device(hdw->usb_dev); | ||
2914 | usb_unlock_device(hdw->usb_dev); | ||
2915 | } else { | ||
2916 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2917 | "Failed to lock USB device ret=%d",ret); | ||
2918 | } | ||
2919 | if (init_pause_msec) { | ||
2920 | pvr2_trace(PVR2_TRACE_INFO, | ||
2921 | "Waiting %u msec for hardware to settle", | ||
2922 | init_pause_msec); | ||
2923 | msleep(init_pause_msec); | ||
2924 | } | ||
2925 | |||
2926 | } | ||
2927 | |||
2928 | |||
2929 | void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val) | ||
2930 | { | ||
2931 | char da[1]; | ||
2932 | unsigned int pipe; | ||
2933 | int ret; | ||
2934 | |||
2935 | if (!hdw->usb_dev) return; | ||
2936 | |||
2937 | pvr2_trace(PVR2_TRACE_INIT,"cpureset_assert(%d)",val); | ||
2938 | |||
2939 | da[0] = val ? 0x01 : 0x00; | ||
2940 | |||
2941 | /* Write the CPUCS register on the 8051. The lsb of the register | ||
2942 | is the reset bit; a 1 asserts reset while a 0 clears it. */ | ||
2943 | pipe = usb_sndctrlpipe(hdw->usb_dev, 0); | ||
2944 | ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ); | ||
2945 | if (ret < 0) { | ||
2946 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
2947 | "cpureset_assert(%d) error=%d",val,ret); | ||
2948 | pvr2_hdw_render_useless(hdw); | ||
2949 | } | ||
2950 | } | ||
2951 | |||
2952 | |||
2953 | int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *hdw) | ||
2954 | { | ||
2955 | int status; | ||
2956 | LOCK_TAKE(hdw->ctl_lock); do { | ||
2957 | pvr2_trace(PVR2_TRACE_INIT,"Requesting uproc hard reset"); | ||
2958 | hdw->flag_ok = !0; | ||
2959 | hdw->cmd_buffer[0] = 0xdd; | ||
2960 | status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0); | ||
2961 | } while (0); LOCK_GIVE(hdw->ctl_lock); | ||
2962 | return status; | ||
2963 | } | ||
2964 | |||
2965 | |||
2966 | int pvr2_hdw_cmd_powerup(struct pvr2_hdw *hdw) | ||
2967 | { | ||
2968 | int status; | ||
2969 | LOCK_TAKE(hdw->ctl_lock); do { | ||
2970 | pvr2_trace(PVR2_TRACE_INIT,"Requesting powerup"); | ||
2971 | hdw->cmd_buffer[0] = 0xde; | ||
2972 | status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0); | ||
2973 | } while (0); LOCK_GIVE(hdw->ctl_lock); | ||
2974 | return status; | ||
2975 | } | ||
2976 | |||
2977 | |||
2978 | int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw) | ||
2979 | { | ||
2980 | if (!hdw->decoder_ctrl) { | ||
2981 | pvr2_trace(PVR2_TRACE_INIT, | ||
2982 | "Unable to reset decoder: nothing attached"); | ||
2983 | return -ENOTTY; | ||
2984 | } | ||
2985 | |||
2986 | if (!hdw->decoder_ctrl->force_reset) { | ||
2987 | pvr2_trace(PVR2_TRACE_INIT, | ||
2988 | "Unable to reset decoder: not implemented"); | ||
2989 | return -ENOTTY; | ||
2990 | } | ||
2991 | |||
2992 | pvr2_trace(PVR2_TRACE_INIT, | ||
2993 | "Requesting decoder reset"); | ||
2994 | hdw->decoder_ctrl->force_reset(hdw->decoder_ctrl->ctxt); | ||
2995 | return 0; | ||
2996 | } | ||
2997 | |||
2998 | |||
2999 | int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl) | ||
3000 | { | ||
3001 | int status; | ||
3002 | LOCK_TAKE(hdw->ctl_lock); do { | ||
3003 | hdw->cmd_buffer[0] = (runFl ? 0x36 : 0x37); | ||
3004 | status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0); | ||
3005 | } while (0); LOCK_GIVE(hdw->ctl_lock); | ||
3006 | if (!status) { | ||
3007 | hdw->subsys_enabled_mask = | ||
3008 | ((hdw->subsys_enabled_mask & | ||
3009 | ~(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) | | ||
3010 | (runFl ? (1<<PVR2_SUBSYS_B_USBSTREAM_RUN) : 0)); | ||
3011 | } | ||
3012 | return status; | ||
3013 | } | ||
3014 | |||
3015 | |||
3016 | void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw, | ||
3017 | struct pvr2_hdw_debug_info *ptr) | ||
3018 | { | ||
3019 | ptr->big_lock_held = hdw->big_lock_held; | ||
3020 | ptr->ctl_lock_held = hdw->ctl_lock_held; | ||
3021 | ptr->flag_ok = hdw->flag_ok; | ||
3022 | ptr->flag_disconnected = hdw->flag_disconnected; | ||
3023 | ptr->flag_init_ok = hdw->flag_init_ok; | ||
3024 | ptr->flag_streaming_enabled = hdw->flag_streaming_enabled; | ||
3025 | ptr->subsys_flags = hdw->subsys_enabled_mask; | ||
3026 | ptr->cmd_debug_state = hdw->cmd_debug_state; | ||
3027 | ptr->cmd_code = hdw->cmd_debug_code; | ||
3028 | ptr->cmd_debug_write_len = hdw->cmd_debug_write_len; | ||
3029 | ptr->cmd_debug_read_len = hdw->cmd_debug_read_len; | ||
3030 | ptr->cmd_debug_timeout = hdw->ctl_timeout_flag; | ||
3031 | ptr->cmd_debug_write_pend = hdw->ctl_write_pend_flag; | ||
3032 | ptr->cmd_debug_read_pend = hdw->ctl_read_pend_flag; | ||
3033 | ptr->cmd_debug_rstatus = hdw->ctl_read_urb->status; | ||
3034 | ptr->cmd_debug_wstatus = hdw->ctl_read_urb->status; | ||
3035 | } | ||
3036 | |||
3037 | |||
3038 | int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *dp) | ||
3039 | { | ||
3040 | return pvr2_read_register(hdw,PVR2_GPIO_DIR,dp); | ||
3041 | } | ||
3042 | |||
3043 | |||
3044 | int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *dp) | ||
3045 | { | ||
3046 | return pvr2_read_register(hdw,PVR2_GPIO_OUT,dp); | ||
3047 | } | ||
3048 | |||
3049 | |||
3050 | int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *dp) | ||
3051 | { | ||
3052 | return pvr2_read_register(hdw,PVR2_GPIO_IN,dp); | ||
3053 | } | ||
3054 | |||
3055 | |||
3056 | int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val) | ||
3057 | { | ||
3058 | u32 cval,nval; | ||
3059 | int ret; | ||
3060 | if (~msk) { | ||
3061 | ret = pvr2_read_register(hdw,PVR2_GPIO_DIR,&cval); | ||
3062 | if (ret) return ret; | ||
3063 | nval = (cval & ~msk) | (val & msk); | ||
3064 | pvr2_trace(PVR2_TRACE_GPIO, | ||
3065 | "GPIO direction changing 0x%x:0x%x" | ||
3066 | " from 0x%x to 0x%x", | ||
3067 | msk,val,cval,nval); | ||
3068 | } else { | ||
3069 | nval = val; | ||
3070 | pvr2_trace(PVR2_TRACE_GPIO, | ||
3071 | "GPIO direction changing to 0x%x",nval); | ||
3072 | } | ||
3073 | return pvr2_write_register(hdw,PVR2_GPIO_DIR,nval); | ||
3074 | } | ||
3075 | |||
3076 | |||
3077 | int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val) | ||
3078 | { | ||
3079 | u32 cval,nval; | ||
3080 | int ret; | ||
3081 | if (~msk) { | ||
3082 | ret = pvr2_read_register(hdw,PVR2_GPIO_OUT,&cval); | ||
3083 | if (ret) return ret; | ||
3084 | nval = (cval & ~msk) | (val & msk); | ||
3085 | pvr2_trace(PVR2_TRACE_GPIO, | ||
3086 | "GPIO output changing 0x%x:0x%x from 0x%x to 0x%x", | ||
3087 | msk,val,cval,nval); | ||
3088 | } else { | ||
3089 | nval = val; | ||
3090 | pvr2_trace(PVR2_TRACE_GPIO, | ||
3091 | "GPIO output changing to 0x%x",nval); | ||
3092 | } | ||
3093 | return pvr2_write_register(hdw,PVR2_GPIO_OUT,nval); | ||
3094 | } | ||
3095 | |||
3096 | |||
3097 | int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw) | ||
3098 | { | ||
3099 | int result; | ||
3100 | LOCK_TAKE(hdw->ctl_lock); do { | ||
3101 | hdw->cmd_buffer[0] = 0xeb; | ||
3102 | result = pvr2_send_request(hdw, | ||
3103 | hdw->cmd_buffer,1, | ||
3104 | hdw->cmd_buffer,1); | ||
3105 | if (result < 0) break; | ||
3106 | result = hdw->cmd_buffer[0]; | ||
3107 | } while(0); LOCK_GIVE(hdw->ctl_lock); | ||
3108 | return result; | ||
3109 | } | ||
3110 | |||
3111 | |||
3112 | /* | ||
3113 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
3114 | *** Local Variables: *** | ||
3115 | *** mode: c *** | ||
3116 | *** fill-column: 75 *** | ||
3117 | *** tab-width: 8 *** | ||
3118 | *** c-basic-offset: 8 *** | ||
3119 | *** End: *** | ||
3120 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h new file mode 100644 index 000000000000..63f529154431 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_HDW_H | ||
22 | #define __PVRUSB2_HDW_H | ||
23 | |||
24 | #include <linux/usb.h> | ||
25 | #include <linux/videodev2.h> | ||
26 | #include "pvrusb2-io.h" | ||
27 | #include "pvrusb2-ctrl.h" | ||
28 | |||
29 | |||
30 | /* Private internal control ids, look these up with | ||
31 | pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */ | ||
32 | #define PVR2_CID_STDENUM 1 | ||
33 | #define PVR2_CID_STDCUR 2 | ||
34 | #define PVR2_CID_STDAVAIL 3 | ||
35 | #define PVR2_CID_INPUT 4 | ||
36 | #define PVR2_CID_AUDIOMODE 5 | ||
37 | #define PVR2_CID_FREQUENCY 6 | ||
38 | #define PVR2_CID_HRES 7 | ||
39 | #define PVR2_CID_VRES 8 | ||
40 | |||
41 | /* Legal values for the INPUT state variable */ | ||
42 | #define PVR2_CVAL_INPUT_TV 0 | ||
43 | #define PVR2_CVAL_INPUT_SVIDEO 1 | ||
44 | #define PVR2_CVAL_INPUT_COMPOSITE 2 | ||
45 | #define PVR2_CVAL_INPUT_RADIO 3 | ||
46 | |||
47 | /* Values that pvr2_hdw_get_signal_status() returns */ | ||
48 | #define PVR2_SIGNAL_OK 0x0001 | ||
49 | #define PVR2_SIGNAL_STEREO 0x0002 | ||
50 | #define PVR2_SIGNAL_SAP 0x0004 | ||
51 | |||
52 | |||
53 | /* Subsystem definitions - these are various pieces that can be | ||
54 | independently stopped / started. Usually you don't want to mess with | ||
55 | this directly (let the driver handle things itself), but it is useful | ||
56 | for debugging. */ | ||
57 | #define PVR2_SUBSYS_B_ENC_FIRMWARE 0 | ||
58 | #define PVR2_SUBSYS_B_ENC_CFG 1 | ||
59 | #define PVR2_SUBSYS_B_DIGITIZER_RUN 2 | ||
60 | #define PVR2_SUBSYS_B_USBSTREAM_RUN 3 | ||
61 | #define PVR2_SUBSYS_B_ENC_RUN 4 | ||
62 | |||
63 | #define PVR2_SUBSYS_CFG_ALL ( \ | ||
64 | (1 << PVR2_SUBSYS_B_ENC_FIRMWARE) | \ | ||
65 | (1 << PVR2_SUBSYS_B_ENC_CFG) ) | ||
66 | #define PVR2_SUBSYS_RUN_ALL ( \ | ||
67 | (1 << PVR2_SUBSYS_B_DIGITIZER_RUN) | \ | ||
68 | (1 << PVR2_SUBSYS_B_USBSTREAM_RUN) | \ | ||
69 | (1 << PVR2_SUBSYS_B_ENC_RUN) ) | ||
70 | #define PVR2_SUBSYS_ALL ( \ | ||
71 | PVR2_SUBSYS_CFG_ALL | \ | ||
72 | PVR2_SUBSYS_RUN_ALL ) | ||
73 | |||
74 | enum pvr2_config { | ||
75 | pvr2_config_empty, | ||
76 | pvr2_config_mpeg, | ||
77 | pvr2_config_vbi, | ||
78 | pvr2_config_radio, | ||
79 | }; | ||
80 | |||
81 | const char *pvr2_config_get_name(enum pvr2_config); | ||
82 | |||
83 | struct pvr2_hdw; | ||
84 | |||
85 | /* Create and return a structure for interacting with the underlying | ||
86 | hardware */ | ||
87 | struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, | ||
88 | const struct usb_device_id *devid); | ||
89 | |||
90 | /* Poll for background activity (if any) */ | ||
91 | void pvr2_hdw_poll(struct pvr2_hdw *); | ||
92 | |||
93 | /* Trigger a poll to take place later at a convenient time */ | ||
94 | void pvr2_hdw_poll_trigger(struct pvr2_hdw *); | ||
95 | void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *); | ||
96 | |||
97 | /* Register a callback used to trigger a future poll */ | ||
98 | void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *, | ||
99 | void (*func)(void *), | ||
100 | void *data); | ||
101 | |||
102 | /* Get pointer to structure given unit number */ | ||
103 | struct pvr2_hdw *pvr2_hdw_find(int unit_number); | ||
104 | |||
105 | /* Destroy hardware interaction structure */ | ||
106 | void pvr2_hdw_destroy(struct pvr2_hdw *); | ||
107 | |||
108 | /* Set up the structure and attempt to put the device into a usable state. | ||
109 | This can be a time-consuming operation, which is why it is not done | ||
110 | internally as part of the create() step. Return value is exactly the | ||
111 | same as pvr2_hdw_init_ok(). */ | ||
112 | int pvr2_hdw_setup(struct pvr2_hdw *); | ||
113 | |||
114 | /* Initialization succeeded */ | ||
115 | int pvr2_hdw_init_ok(struct pvr2_hdw *); | ||
116 | |||
117 | /* Return true if in the ready (normal) state */ | ||
118 | int pvr2_hdw_dev_ok(struct pvr2_hdw *); | ||
119 | |||
120 | /* Return small integer number [1..N] for logical instance number of this | ||
121 | device. This is useful for indexing array-valued module parameters. */ | ||
122 | int pvr2_hdw_get_unit_number(struct pvr2_hdw *); | ||
123 | |||
124 | /* Get pointer to underlying USB device */ | ||
125 | struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *); | ||
126 | |||
127 | /* Retrieve serial number of device */ | ||
128 | unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *); | ||
129 | |||
130 | /* Called when hardware has been unplugged */ | ||
131 | void pvr2_hdw_disconnect(struct pvr2_hdw *); | ||
132 | |||
133 | /* Get the number of defined controls */ | ||
134 | unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *); | ||
135 | |||
136 | /* Retrieve a control handle given its index (0..count-1) */ | ||
137 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *,unsigned int); | ||
138 | |||
139 | /* Retrieve a control handle given its internal ID (if any) */ | ||
140 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *,unsigned int); | ||
141 | |||
142 | /* Retrieve a control handle given its V4L ID (if any) */ | ||
143 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *,unsigned int ctl_id); | ||
144 | |||
145 | /* Retrieve a control handle given its immediate predecessor V4L ID (if any) */ | ||
146 | struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *, | ||
147 | unsigned int ctl_id); | ||
148 | |||
149 | /* Commit all control changes made up to this point */ | ||
150 | int pvr2_hdw_commit_ctl(struct pvr2_hdw *); | ||
151 | |||
152 | /* Return name for this driver instance */ | ||
153 | const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *); | ||
154 | |||
155 | /* Return PVR2_SIGNAL_XXXX bit mask indicating signal status */ | ||
156 | unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *); | ||
157 | |||
158 | /* Query device and see if it thinks it is on a high-speed USB link */ | ||
159 | int pvr2_hdw_is_hsm(struct pvr2_hdw *); | ||
160 | |||
161 | /* Turn streaming on/off */ | ||
162 | int pvr2_hdw_set_streaming(struct pvr2_hdw *,int); | ||
163 | |||
164 | /* Find out if streaming is on */ | ||
165 | int pvr2_hdw_get_streaming(struct pvr2_hdw *); | ||
166 | |||
167 | /* Configure the type of stream to generate */ | ||
168 | int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config); | ||
169 | |||
170 | /* Get handle to video output stream */ | ||
171 | struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *); | ||
172 | |||
173 | /* Emit a video standard struct */ | ||
174 | int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std, | ||
175 | unsigned int idx); | ||
176 | |||
177 | /* Enable / disable various pieces of hardware. Items to change are | ||
178 | identified by bit positions within msk, and new state for each item is | ||
179 | identified by corresponding bit positions within val. */ | ||
180 | void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw, | ||
181 | unsigned long msk,unsigned long val); | ||
182 | |||
183 | /* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,msk) */ | ||
184 | void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk); | ||
185 | |||
186 | /* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,0) */ | ||
187 | void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk); | ||
188 | |||
189 | /* Retrieve mask indicating which pieces of hardware are currently enabled | ||
190 | / configured. */ | ||
191 | unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *); | ||
192 | |||
193 | /* Adjust mask of what get shut down when streaming is stopped. This is a | ||
194 | debugging aid. */ | ||
195 | void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw, | ||
196 | unsigned long msk,unsigned long val); | ||
197 | |||
198 | /* Retrieve mask indicating which pieces of hardware are disabled when | ||
199 | streaming is turned off. */ | ||
200 | unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *); | ||
201 | |||
202 | |||
203 | /* Enable / disable retrieval of CPU firmware. This must be enabled before | ||
204 | pvr2_hdw_cpufw_get() will function. Note that doing this may prevent | ||
205 | the device from running (and leaving this mode may imply a device | ||
206 | reset). */ | ||
207 | void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *, int enable_flag); | ||
208 | |||
209 | /* Return true if we're in a mode for retrieval CPU firmware */ | ||
210 | int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *); | ||
211 | |||
212 | /* Retrieve a piece of the CPU's firmware at the given offset. Return | ||
213 | value is the number of bytes retrieved or zero if we're past the end or | ||
214 | an error otherwise (e.g. if firmware retrieval is not enabled). */ | ||
215 | int pvr2_hdw_cpufw_get(struct pvr2_hdw *,unsigned int offs, | ||
216 | char *buf,unsigned int cnt); | ||
217 | |||
218 | /* Retrieve previously stored v4l minor device number */ | ||
219 | int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *); | ||
220 | |||
221 | /* Store the v4l minor device number */ | ||
222 | void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *,int); | ||
223 | |||
224 | |||
225 | /* The following entry points are all lower level things you normally don't | ||
226 | want to worry about. */ | ||
227 | |||
228 | /* Attempt to recover from a USB foul-up (in practice I find that if you | ||
229 | have to do this, then it's already too late). */ | ||
230 | void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw); | ||
231 | |||
232 | /* Issue a command and get a response from the device. LOTS of higher | ||
233 | level stuff is built on this. */ | ||
234 | int pvr2_send_request(struct pvr2_hdw *, | ||
235 | void *write_ptr,unsigned int write_len, | ||
236 | void *read_ptr,unsigned int read_len); | ||
237 | |||
238 | /* Issue a command and get a response from the device. This extended | ||
239 | version includes a probe flag (which if set means that device errors | ||
240 | should not be logged or treated as fatal) and a timeout in jiffies. | ||
241 | This can be used to non-lethally probe the health of endpoint 1. */ | ||
242 | int pvr2_send_request_ex(struct pvr2_hdw *,unsigned int timeout,int probe_fl, | ||
243 | void *write_ptr,unsigned int write_len, | ||
244 | void *read_ptr,unsigned int read_len); | ||
245 | |||
246 | /* Slightly higher level device communication functions. */ | ||
247 | int pvr2_write_register(struct pvr2_hdw *, u16, u32); | ||
248 | int pvr2_read_register(struct pvr2_hdw *, u16, u32 *); | ||
249 | int pvr2_write_u16(struct pvr2_hdw *, u16, int); | ||
250 | int pvr2_write_u8(struct pvr2_hdw *, u8, int); | ||
251 | |||
252 | /* Call if for any reason we can't talk to the hardware anymore - this will | ||
253 | cause the driver to stop flailing on the device. */ | ||
254 | void pvr2_hdw_render_useless(struct pvr2_hdw *); | ||
255 | void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *); | ||
256 | |||
257 | /* Set / clear 8051's reset bit */ | ||
258 | void pvr2_hdw_cpureset_assert(struct pvr2_hdw *,int); | ||
259 | |||
260 | /* Execute a USB-commanded device reset */ | ||
261 | void pvr2_hdw_device_reset(struct pvr2_hdw *); | ||
262 | |||
263 | /* Execute hard reset command (after this point it's likely that the | ||
264 | encoder will have to be reconfigured). This also clears the "useless" | ||
265 | state. */ | ||
266 | int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *); | ||
267 | |||
268 | /* Execute simple reset command */ | ||
269 | int pvr2_hdw_cmd_powerup(struct pvr2_hdw *); | ||
270 | |||
271 | /* Order decoder to reset */ | ||
272 | int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *); | ||
273 | |||
274 | /* Stop / start video stream transport */ | ||
275 | int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl); | ||
276 | |||
277 | /* Find I2C address of eeprom */ | ||
278 | int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *); | ||
279 | |||
280 | /* Direct manipulation of GPIO bits */ | ||
281 | int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *); | ||
282 | int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *); | ||
283 | int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *); | ||
284 | int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val); | ||
285 | int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val); | ||
286 | |||
287 | /* This data structure is specifically for the next function... */ | ||
288 | struct pvr2_hdw_debug_info { | ||
289 | int big_lock_held; | ||
290 | int ctl_lock_held; | ||
291 | int flag_ok; | ||
292 | int flag_disconnected; | ||
293 | int flag_init_ok; | ||
294 | int flag_streaming_enabled; | ||
295 | unsigned long subsys_flags; | ||
296 | int cmd_debug_state; | ||
297 | int cmd_debug_write_len; | ||
298 | int cmd_debug_read_len; | ||
299 | int cmd_debug_write_pend; | ||
300 | int cmd_debug_read_pend; | ||
301 | int cmd_debug_timeout; | ||
302 | int cmd_debug_rstatus; | ||
303 | int cmd_debug_wstatus; | ||
304 | unsigned char cmd_code; | ||
305 | }; | ||
306 | |||
307 | /* Non-intrusively retrieve internal state info - this is useful for | ||
308 | diagnosing lockups. Note that this operation is completed without any | ||
309 | kind of locking and so it is not atomic and may yield inconsistent | ||
310 | results. This is *purely* a debugging aid. */ | ||
311 | void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw, | ||
312 | struct pvr2_hdw_debug_info *); | ||
313 | |||
314 | /* Cause modules to log their state once */ | ||
315 | void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw); | ||
316 | |||
317 | /* Cause encoder firmware to be uploaded into the device. This is normally | ||
318 | done autonomously, but the interface is exported here because it is also | ||
319 | a debugging aid. */ | ||
320 | int pvr2_upload_firmware2(struct pvr2_hdw *hdw); | ||
321 | |||
322 | /* List of device types that we can match */ | ||
323 | extern struct usb_device_id pvr2_device_table[]; | ||
324 | |||
325 | #endif /* __PVRUSB2_HDW_H */ | ||
326 | |||
327 | /* | ||
328 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
329 | *** Local Variables: *** | ||
330 | *** mode: c *** | ||
331 | *** fill-column: 75 *** | ||
332 | *** tab-width: 8 *** | ||
333 | *** c-basic-offset: 8 *** | ||
334 | *** End: *** | ||
335 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c new file mode 100644 index 000000000000..1dd4f6249b99 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-i2c-core.h" | ||
23 | #include "pvrusb2-hdw-internal.h" | ||
24 | #include "pvrusb2-debug.h" | ||
25 | #include "pvrusb2-i2c-cmd-v4l2.h" | ||
26 | #include "pvrusb2-audio.h" | ||
27 | #include "pvrusb2-tuner.h" | ||
28 | #include "pvrusb2-demod.h" | ||
29 | #include "pvrusb2-video-v4l.h" | ||
30 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
31 | #include "pvrusb2-cx2584x-v4l.h" | ||
32 | #include "pvrusb2-wm8775.h" | ||
33 | #endif | ||
34 | |||
35 | #define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__) | ||
36 | |||
37 | #define OP_STANDARD 0 | ||
38 | #define OP_BCSH 1 | ||
39 | #define OP_VOLUME 2 | ||
40 | #define OP_FREQ 3 | ||
41 | #define OP_AUDIORATE 4 | ||
42 | #define OP_SIZE 5 | ||
43 | #define OP_LOG 6 | ||
44 | |||
45 | static const struct pvr2_i2c_op * const ops[] = { | ||
46 | [OP_STANDARD] = &pvr2_i2c_op_v4l2_standard, | ||
47 | [OP_BCSH] = &pvr2_i2c_op_v4l2_bcsh, | ||
48 | [OP_VOLUME] = &pvr2_i2c_op_v4l2_volume, | ||
49 | [OP_FREQ] = &pvr2_i2c_op_v4l2_frequency, | ||
50 | [OP_SIZE] = &pvr2_i2c_op_v4l2_size, | ||
51 | [OP_LOG] = &pvr2_i2c_op_v4l2_log, | ||
52 | }; | ||
53 | |||
54 | void pvr2_i2c_probe(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp) | ||
55 | { | ||
56 | int id; | ||
57 | id = cp->client->driver->id; | ||
58 | cp->ctl_mask = ((1 << OP_STANDARD) | | ||
59 | (1 << OP_BCSH) | | ||
60 | (1 << OP_VOLUME) | | ||
61 | (1 << OP_FREQ) | | ||
62 | (1 << OP_SIZE) | | ||
63 | (1 << OP_LOG)); | ||
64 | |||
65 | if (id == I2C_DRIVERID_MSP3400) { | ||
66 | if (pvr2_i2c_msp3400_setup(hdw,cp)) { | ||
67 | return; | ||
68 | } | ||
69 | } | ||
70 | if (id == I2C_DRIVERID_TUNER) { | ||
71 | if (pvr2_i2c_tuner_setup(hdw,cp)) { | ||
72 | return; | ||
73 | } | ||
74 | } | ||
75 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
76 | if (id == I2C_DRIVERID_CX25840) { | ||
77 | if (pvr2_i2c_cx2584x_v4l_setup(hdw,cp)) { | ||
78 | return; | ||
79 | } | ||
80 | } | ||
81 | if (id == I2C_DRIVERID_WM8775) { | ||
82 | if (pvr2_i2c_wm8775_setup(hdw,cp)) { | ||
83 | return; | ||
84 | } | ||
85 | } | ||
86 | #endif | ||
87 | if (id == I2C_DRIVERID_SAA711X) { | ||
88 | if (pvr2_i2c_decoder_v4l_setup(hdw,cp)) { | ||
89 | return; | ||
90 | } | ||
91 | } | ||
92 | if (id == I2C_DRIVERID_TDA9887) { | ||
93 | if (pvr2_i2c_demod_setup(hdw,cp)) { | ||
94 | return; | ||
95 | } | ||
96 | } | ||
97 | } | ||
98 | |||
99 | |||
100 | const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx) | ||
101 | { | ||
102 | if (idx >= sizeof(ops)/sizeof(ops[0])) return 0; | ||
103 | return ops[idx]; | ||
104 | } | ||
105 | |||
106 | |||
107 | /* | ||
108 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
109 | *** Local Variables: *** | ||
110 | *** mode: c *** | ||
111 | *** fill-column: 75 *** | ||
112 | *** tab-width: 8 *** | ||
113 | *** c-basic-offset: 8 *** | ||
114 | *** End: *** | ||
115 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c new file mode 100644 index 000000000000..9f81aff2b38a --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include "pvrusb2-i2c-cmd-v4l2.h" | ||
24 | #include "pvrusb2-hdw-internal.h" | ||
25 | #include "pvrusb2-debug.h" | ||
26 | #include <linux/videodev2.h> | ||
27 | |||
28 | |||
29 | static void set_standard(struct pvr2_hdw *hdw) | ||
30 | { | ||
31 | v4l2_std_id vs; | ||
32 | vs = hdw->std_mask_cur; | ||
33 | pvr2_trace(PVR2_TRACE_CHIPS, | ||
34 | "i2c v4l2 set_standard(0x%llx)",(__u64)vs); | ||
35 | |||
36 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_STD,&vs); | ||
37 | } | ||
38 | |||
39 | |||
40 | static int check_standard(struct pvr2_hdw *hdw) | ||
41 | { | ||
42 | return hdw->std_dirty != 0; | ||
43 | } | ||
44 | |||
45 | |||
46 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard = { | ||
47 | .check = check_standard, | ||
48 | .update = set_standard, | ||
49 | .name = "v4l2_standard", | ||
50 | }; | ||
51 | |||
52 | |||
53 | static void set_bcsh(struct pvr2_hdw *hdw) | ||
54 | { | ||
55 | struct v4l2_control ctrl; | ||
56 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_bcsh" | ||
57 | " b=%d c=%d s=%d h=%d", | ||
58 | hdw->brightness_val,hdw->contrast_val, | ||
59 | hdw->saturation_val,hdw->hue_val); | ||
60 | memset(&ctrl,0,sizeof(ctrl)); | ||
61 | ctrl.id = V4L2_CID_BRIGHTNESS; | ||
62 | ctrl.value = hdw->brightness_val; | ||
63 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
64 | ctrl.id = V4L2_CID_CONTRAST; | ||
65 | ctrl.value = hdw->contrast_val; | ||
66 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
67 | ctrl.id = V4L2_CID_SATURATION; | ||
68 | ctrl.value = hdw->saturation_val; | ||
69 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
70 | ctrl.id = V4L2_CID_HUE; | ||
71 | ctrl.value = hdw->hue_val; | ||
72 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
73 | } | ||
74 | |||
75 | |||
76 | static int check_bcsh(struct pvr2_hdw *hdw) | ||
77 | { | ||
78 | return (hdw->brightness_dirty || | ||
79 | hdw->contrast_dirty || | ||
80 | hdw->saturation_dirty || | ||
81 | hdw->hue_dirty); | ||
82 | } | ||
83 | |||
84 | |||
85 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh = { | ||
86 | .check = check_bcsh, | ||
87 | .update = set_bcsh, | ||
88 | .name = "v4l2_bcsh", | ||
89 | }; | ||
90 | |||
91 | |||
92 | static void set_volume(struct pvr2_hdw *hdw) | ||
93 | { | ||
94 | struct v4l2_control ctrl; | ||
95 | pvr2_trace(PVR2_TRACE_CHIPS, | ||
96 | "i2c v4l2 set_volume" | ||
97 | "(vol=%d bal=%d bas=%d treb=%d mute=%d)", | ||
98 | hdw->volume_val, | ||
99 | hdw->balance_val, | ||
100 | hdw->bass_val, | ||
101 | hdw->treble_val, | ||
102 | hdw->mute_val); | ||
103 | memset(&ctrl,0,sizeof(ctrl)); | ||
104 | ctrl.id = V4L2_CID_AUDIO_MUTE; | ||
105 | ctrl.value = hdw->mute_val ? 1 : 0; | ||
106 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
107 | ctrl.id = V4L2_CID_AUDIO_VOLUME; | ||
108 | ctrl.value = hdw->volume_val; | ||
109 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
110 | ctrl.id = V4L2_CID_AUDIO_BALANCE; | ||
111 | ctrl.value = hdw->balance_val; | ||
112 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
113 | ctrl.id = V4L2_CID_AUDIO_BASS; | ||
114 | ctrl.value = hdw->bass_val; | ||
115 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
116 | ctrl.id = V4L2_CID_AUDIO_TREBLE; | ||
117 | ctrl.value = hdw->treble_val; | ||
118 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl); | ||
119 | } | ||
120 | |||
121 | |||
122 | static int check_volume(struct pvr2_hdw *hdw) | ||
123 | { | ||
124 | return (hdw->volume_dirty || | ||
125 | hdw->balance_dirty || | ||
126 | hdw->bass_dirty || | ||
127 | hdw->treble_dirty || | ||
128 | hdw->mute_dirty); | ||
129 | } | ||
130 | |||
131 | |||
132 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume = { | ||
133 | .check = check_volume, | ||
134 | .update = set_volume, | ||
135 | .name = "v4l2_volume", | ||
136 | }; | ||
137 | |||
138 | |||
139 | static void set_frequency(struct pvr2_hdw *hdw) | ||
140 | { | ||
141 | unsigned long fv; | ||
142 | struct v4l2_frequency freq; | ||
143 | fv = hdw->freqVal; | ||
144 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_freq(%lu)",fv); | ||
145 | memset(&freq,0,sizeof(freq)); | ||
146 | freq.frequency = fv / 62500; | ||
147 | freq.tuner = 0; | ||
148 | freq.type = V4L2_TUNER_ANALOG_TV; | ||
149 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_FREQUENCY,&freq); | ||
150 | } | ||
151 | |||
152 | |||
153 | static int check_frequency(struct pvr2_hdw *hdw) | ||
154 | { | ||
155 | return hdw->freqDirty != 0; | ||
156 | } | ||
157 | |||
158 | |||
159 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency = { | ||
160 | .check = check_frequency, | ||
161 | .update = set_frequency, | ||
162 | .name = "v4l2_freq", | ||
163 | }; | ||
164 | |||
165 | |||
166 | static void set_size(struct pvr2_hdw *hdw) | ||
167 | { | ||
168 | struct v4l2_format fmt; | ||
169 | |||
170 | memset(&fmt,0,sizeof(fmt)); | ||
171 | |||
172 | fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | ||
173 | fmt.fmt.pix.width = hdw->res_hor_val; | ||
174 | fmt.fmt.pix.height = hdw->res_ver_val; | ||
175 | |||
176 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_size(%dx%d)", | ||
177 | fmt.fmt.pix.width,fmt.fmt.pix.height); | ||
178 | |||
179 | pvr2_i2c_core_cmd(hdw,VIDIOC_S_FMT,&fmt); | ||
180 | } | ||
181 | |||
182 | |||
183 | static int check_size(struct pvr2_hdw *hdw) | ||
184 | { | ||
185 | return (hdw->res_hor_dirty || hdw->res_ver_dirty); | ||
186 | } | ||
187 | |||
188 | |||
189 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size = { | ||
190 | .check = check_size, | ||
191 | .update = set_size, | ||
192 | .name = "v4l2_size", | ||
193 | }; | ||
194 | |||
195 | |||
196 | static void do_log(struct pvr2_hdw *hdw) | ||
197 | { | ||
198 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 do_log()"); | ||
199 | pvr2_i2c_core_cmd(hdw,VIDIOC_LOG_STATUS,0); | ||
200 | |||
201 | } | ||
202 | |||
203 | |||
204 | static int check_log(struct pvr2_hdw *hdw) | ||
205 | { | ||
206 | return hdw->log_requested != 0; | ||
207 | } | ||
208 | |||
209 | |||
210 | const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log = { | ||
211 | .check = check_log, | ||
212 | .update = do_log, | ||
213 | .name = "v4l2_log", | ||
214 | }; | ||
215 | |||
216 | |||
217 | void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *cp,int fl) | ||
218 | { | ||
219 | pvr2_i2c_client_cmd(cp, | ||
220 | (fl ? VIDIOC_STREAMON : VIDIOC_STREAMOFF),0); | ||
221 | } | ||
222 | |||
223 | |||
224 | /* | ||
225 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
226 | *** Local Variables: *** | ||
227 | *** mode: c *** | ||
228 | *** fill-column: 70 *** | ||
229 | *** tab-width: 8 *** | ||
230 | *** c-basic-offset: 8 *** | ||
231 | *** End: *** | ||
232 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h new file mode 100644 index 000000000000..ecabddba1ec5 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_CMD_V4L2_H | ||
24 | #define __PVRUSB2_CMD_V4L2_H | ||
25 | |||
26 | #include "pvrusb2-i2c-core.h" | ||
27 | |||
28 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard; | ||
29 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh; | ||
30 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume; | ||
31 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency; | ||
32 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size; | ||
33 | extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log; | ||
34 | |||
35 | void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *,int); | ||
36 | |||
37 | #endif /* __PVRUSB2_CMD_V4L2_H */ | ||
38 | |||
39 | /* | ||
40 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
41 | *** Local Variables: *** | ||
42 | *** mode: c *** | ||
43 | *** fill-column: 70 *** | ||
44 | *** tab-width: 8 *** | ||
45 | *** c-basic-offset: 8 *** | ||
46 | *** End: *** | ||
47 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c new file mode 100644 index 000000000000..c8d0bdee3ff1 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c | |||
@@ -0,0 +1,937 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-i2c-core.h" | ||
23 | #include "pvrusb2-hdw-internal.h" | ||
24 | #include "pvrusb2-debug.h" | ||
25 | |||
26 | #define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__) | ||
27 | |||
28 | /* | ||
29 | |||
30 | This module attempts to implement a compliant I2C adapter for the pvrusb2 | ||
31 | device. By doing this we can then make use of existing functionality in | ||
32 | V4L (e.g. tuner.c) rather than rolling our own. | ||
33 | |||
34 | */ | ||
35 | |||
36 | static unsigned int i2c_scan = 0; | ||
37 | module_param(i2c_scan, int, S_IRUGO|S_IWUSR); | ||
38 | MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time"); | ||
39 | |||
40 | static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */ | ||
41 | u8 i2c_addr, /* I2C address we're talking to */ | ||
42 | u8 *data, /* Data to write */ | ||
43 | u16 length) /* Size of data to write */ | ||
44 | { | ||
45 | /* Return value - default 0 means success */ | ||
46 | int ret; | ||
47 | |||
48 | |||
49 | if (!data) length = 0; | ||
50 | if (length > (sizeof(hdw->cmd_buffer) - 3)) { | ||
51 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
52 | "Killing an I2C write to %u that is too large" | ||
53 | " (desired=%u limit=%u)", | ||
54 | i2c_addr, | ||
55 | length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3)); | ||
56 | return -ENOTSUPP; | ||
57 | } | ||
58 | |||
59 | LOCK_TAKE(hdw->ctl_lock); | ||
60 | |||
61 | /* Clear the command buffer (likely to be paranoia) */ | ||
62 | memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer)); | ||
63 | |||
64 | /* Set up command buffer for an I2C write */ | ||
65 | hdw->cmd_buffer[0] = 0x08; /* write prefix */ | ||
66 | hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */ | ||
67 | hdw->cmd_buffer[2] = length; /* length of what follows */ | ||
68 | if (length) memcpy(hdw->cmd_buffer + 3, data, length); | ||
69 | |||
70 | /* Do the operation */ | ||
71 | ret = pvr2_send_request(hdw, | ||
72 | hdw->cmd_buffer, | ||
73 | length + 3, | ||
74 | hdw->cmd_buffer, | ||
75 | 1); | ||
76 | if (!ret) { | ||
77 | if (hdw->cmd_buffer[0] != 8) { | ||
78 | ret = -EIO; | ||
79 | if (hdw->cmd_buffer[0] != 7) { | ||
80 | trace_i2c("unexpected status" | ||
81 | " from i2_write[%d]: %d", | ||
82 | i2c_addr,hdw->cmd_buffer[0]); | ||
83 | } | ||
84 | } | ||
85 | } | ||
86 | |||
87 | LOCK_GIVE(hdw->ctl_lock); | ||
88 | |||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */ | ||
93 | u8 i2c_addr, /* I2C address we're talking to */ | ||
94 | u8 *data, /* Data to write */ | ||
95 | u16 dlen, /* Size of data to write */ | ||
96 | u8 *res, /* Where to put data we read */ | ||
97 | u16 rlen) /* Amount of data to read */ | ||
98 | { | ||
99 | /* Return value - default 0 means success */ | ||
100 | int ret; | ||
101 | |||
102 | |||
103 | if (!data) dlen = 0; | ||
104 | if (dlen > (sizeof(hdw->cmd_buffer) - 4)) { | ||
105 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
106 | "Killing an I2C read to %u that has wlen too large" | ||
107 | " (desired=%u limit=%u)", | ||
108 | i2c_addr, | ||
109 | dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4)); | ||
110 | return -ENOTSUPP; | ||
111 | } | ||
112 | if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) { | ||
113 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
114 | "Killing an I2C read to %u that has rlen too large" | ||
115 | " (desired=%u limit=%u)", | ||
116 | i2c_addr, | ||
117 | rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1)); | ||
118 | return -ENOTSUPP; | ||
119 | } | ||
120 | |||
121 | LOCK_TAKE(hdw->ctl_lock); | ||
122 | |||
123 | /* Clear the command buffer (likely to be paranoia) */ | ||
124 | memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer)); | ||
125 | |||
126 | /* Set up command buffer for an I2C write followed by a read */ | ||
127 | hdw->cmd_buffer[0] = 0x09; /* read prefix */ | ||
128 | hdw->cmd_buffer[1] = dlen; /* arg length */ | ||
129 | hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one | ||
130 | more byte (status). */ | ||
131 | hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */ | ||
132 | if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen); | ||
133 | |||
134 | /* Do the operation */ | ||
135 | ret = pvr2_send_request(hdw, | ||
136 | hdw->cmd_buffer, | ||
137 | 4 + dlen, | ||
138 | hdw->cmd_buffer, | ||
139 | rlen + 1); | ||
140 | if (!ret) { | ||
141 | if (hdw->cmd_buffer[0] != 8) { | ||
142 | ret = -EIO; | ||
143 | if (hdw->cmd_buffer[0] != 7) { | ||
144 | trace_i2c("unexpected status" | ||
145 | " from i2_read[%d]: %d", | ||
146 | i2c_addr,hdw->cmd_buffer[0]); | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* Copy back the result */ | ||
152 | if (res && rlen) { | ||
153 | if (ret) { | ||
154 | /* Error, just blank out the return buffer */ | ||
155 | memset(res, 0, rlen); | ||
156 | } else { | ||
157 | memcpy(res, hdw->cmd_buffer + 1, rlen); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | LOCK_GIVE(hdw->ctl_lock); | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | /* This is the common low level entry point for doing I2C operations to the | ||
167 | hardware. */ | ||
168 | int pvr2_i2c_basic_op(struct pvr2_hdw *hdw, | ||
169 | u8 i2c_addr, | ||
170 | u8 *wdata, | ||
171 | u16 wlen, | ||
172 | u8 *rdata, | ||
173 | u16 rlen) | ||
174 | { | ||
175 | if (!rdata) rlen = 0; | ||
176 | if (!wdata) wlen = 0; | ||
177 | if (rlen || !wlen) { | ||
178 | return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen); | ||
179 | } else { | ||
180 | return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
185 | |||
186 | /* This is a special entry point that is entered if an I2C operation is | ||
187 | attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this | ||
188 | part doesn't work, but we know it is really there. So let's look for | ||
189 | the autodetect attempt and just return success if we see that. */ | ||
190 | static int i2c_hack_wm8775(struct pvr2_hdw *hdw, | ||
191 | u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) | ||
192 | { | ||
193 | if (!(rlen || wlen)) { | ||
194 | // This is a probe attempt. Just let it succeed. | ||
195 | return 0; | ||
196 | } | ||
197 | return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen); | ||
198 | } | ||
199 | |||
200 | /* This is a special entry point that is entered if an I2C operation is | ||
201 | attempted to a cx25840 chip on model 24xxx hardware. This chip can | ||
202 | sometimes wedge itself. Worse still, when this happens msp3400 can | ||
203 | falsely detect this part and then the system gets hosed up after msp3400 | ||
204 | gets confused and dies. What we want to do here is try to keep msp3400 | ||
205 | away and also try to notice if the chip is wedged and send a warning to | ||
206 | the system log. */ | ||
207 | static int i2c_hack_cx25840(struct pvr2_hdw *hdw, | ||
208 | u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) | ||
209 | { | ||
210 | int ret; | ||
211 | unsigned int subaddr; | ||
212 | u8 wbuf[2]; | ||
213 | int state = hdw->i2c_cx25840_hack_state; | ||
214 | |||
215 | if (!(rlen || wlen)) { | ||
216 | // Probe attempt - always just succeed and don't bother the | ||
217 | // hardware (this helps to make the state machine further | ||
218 | // down somewhat easier). | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | if (state == 3) { | ||
223 | return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen); | ||
224 | } | ||
225 | |||
226 | /* We're looking for the exact pattern where the revision register | ||
227 | is being read. The cx25840 module will always look at the | ||
228 | revision register first. Any other pattern of access therefore | ||
229 | has to be a probe attempt from somebody else so we'll reject it. | ||
230 | Normally we could just let each client just probe the part | ||
231 | anyway, but when the cx25840 is wedged, msp3400 will get a false | ||
232 | positive and that just screws things up... */ | ||
233 | |||
234 | if (wlen == 0) { | ||
235 | switch (state) { | ||
236 | case 1: subaddr = 0x0100; break; | ||
237 | case 2: subaddr = 0x0101; break; | ||
238 | default: goto fail; | ||
239 | } | ||
240 | } else if (wlen == 2) { | ||
241 | subaddr = (wdata[0] << 8) | wdata[1]; | ||
242 | switch (subaddr) { | ||
243 | case 0x0100: state = 1; break; | ||
244 | case 0x0101: state = 2; break; | ||
245 | default: goto fail; | ||
246 | } | ||
247 | } else { | ||
248 | goto fail; | ||
249 | } | ||
250 | if (!rlen) goto success; | ||
251 | state = 0; | ||
252 | if (rlen != 1) goto fail; | ||
253 | |||
254 | /* If we get to here then we have a legitimate read for one of the | ||
255 | two revision bytes, so pass it through. */ | ||
256 | wbuf[0] = subaddr >> 8; | ||
257 | wbuf[1] = subaddr; | ||
258 | ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen); | ||
259 | |||
260 | if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) { | ||
261 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
262 | "WARNING: Detected a wedged cx25840 chip;" | ||
263 | " the device will not work."); | ||
264 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
265 | "WARNING: Try power cycling the pvrusb2 device."); | ||
266 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
267 | "WARNING: Disabling further access to the device" | ||
268 | " to prevent other foul-ups."); | ||
269 | // This blocks all further communication with the part. | ||
270 | hdw->i2c_func[0x44] = 0; | ||
271 | pvr2_hdw_render_useless(hdw); | ||
272 | goto fail; | ||
273 | } | ||
274 | |||
275 | /* Success! */ | ||
276 | pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK."); | ||
277 | state = 3; | ||
278 | |||
279 | success: | ||
280 | hdw->i2c_cx25840_hack_state = state; | ||
281 | return 0; | ||
282 | |||
283 | fail: | ||
284 | hdw->i2c_cx25840_hack_state = state; | ||
285 | return -EIO; | ||
286 | } | ||
287 | |||
288 | #endif /* CONFIG_VIDEO_PVRUSB2_24XXX */ | ||
289 | |||
290 | /* This is a very, very limited I2C adapter implementation. We can only | ||
291 | support what we actually know will work on the device... */ | ||
292 | static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
293 | struct i2c_msg msgs[], | ||
294 | int num) | ||
295 | { | ||
296 | int ret = -ENOTSUPP; | ||
297 | pvr2_i2c_func funcp = 0; | ||
298 | struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data); | ||
299 | |||
300 | if (!num) { | ||
301 | ret = -EINVAL; | ||
302 | goto done; | ||
303 | } | ||
304 | if ((msgs[0].flags & I2C_M_NOSTART)) { | ||
305 | trace_i2c("i2c refusing I2C_M_NOSTART"); | ||
306 | goto done; | ||
307 | } | ||
308 | if (msgs[0].addr < PVR2_I2C_FUNC_CNT) { | ||
309 | funcp = hdw->i2c_func[msgs[0].addr]; | ||
310 | } | ||
311 | if (!funcp) { | ||
312 | ret = -EIO; | ||
313 | goto done; | ||
314 | } | ||
315 | |||
316 | if (num == 1) { | ||
317 | if (msgs[0].flags & I2C_M_RD) { | ||
318 | /* Simple read */ | ||
319 | u16 tcnt,bcnt,offs; | ||
320 | if (!msgs[0].len) { | ||
321 | /* Length == 0 read. This is a probe. */ | ||
322 | if (funcp(hdw,msgs[0].addr,0,0,0,0)) { | ||
323 | ret = -EIO; | ||
324 | goto done; | ||
325 | } | ||
326 | ret = 1; | ||
327 | goto done; | ||
328 | } | ||
329 | /* If the read is short enough we'll do the whole | ||
330 | thing atomically. Otherwise we have no choice | ||
331 | but to break apart the reads. */ | ||
332 | tcnt = msgs[0].len; | ||
333 | offs = 0; | ||
334 | while (tcnt) { | ||
335 | bcnt = tcnt; | ||
336 | if (bcnt > sizeof(hdw->cmd_buffer)-1) { | ||
337 | bcnt = sizeof(hdw->cmd_buffer)-1; | ||
338 | } | ||
339 | if (funcp(hdw,msgs[0].addr,0,0, | ||
340 | msgs[0].buf+offs,bcnt)) { | ||
341 | ret = -EIO; | ||
342 | goto done; | ||
343 | } | ||
344 | offs += bcnt; | ||
345 | tcnt -= bcnt; | ||
346 | } | ||
347 | ret = 1; | ||
348 | goto done; | ||
349 | } else { | ||
350 | /* Simple write */ | ||
351 | ret = 1; | ||
352 | if (funcp(hdw,msgs[0].addr, | ||
353 | msgs[0].buf,msgs[0].len,0,0)) { | ||
354 | ret = -EIO; | ||
355 | } | ||
356 | goto done; | ||
357 | } | ||
358 | } else if (num == 2) { | ||
359 | if (msgs[0].addr != msgs[1].addr) { | ||
360 | trace_i2c("i2c refusing 2 phase transfer with" | ||
361 | " conflicting target addresses"); | ||
362 | ret = -ENOTSUPP; | ||
363 | goto done; | ||
364 | } | ||
365 | if ((!((msgs[0].flags & I2C_M_RD))) && | ||
366 | (msgs[1].flags & I2C_M_RD)) { | ||
367 | u16 tcnt,bcnt,wcnt,offs; | ||
368 | /* Write followed by atomic read. If the read | ||
369 | portion is short enough we'll do the whole thing | ||
370 | atomically. Otherwise we have no choice but to | ||
371 | break apart the reads. */ | ||
372 | tcnt = msgs[1].len; | ||
373 | wcnt = msgs[0].len; | ||
374 | offs = 0; | ||
375 | while (tcnt || wcnt) { | ||
376 | bcnt = tcnt; | ||
377 | if (bcnt > sizeof(hdw->cmd_buffer)-1) { | ||
378 | bcnt = sizeof(hdw->cmd_buffer)-1; | ||
379 | } | ||
380 | if (funcp(hdw,msgs[0].addr, | ||
381 | msgs[0].buf,wcnt, | ||
382 | msgs[1].buf+offs,bcnt)) { | ||
383 | ret = -EIO; | ||
384 | goto done; | ||
385 | } | ||
386 | offs += bcnt; | ||
387 | tcnt -= bcnt; | ||
388 | wcnt = 0; | ||
389 | } | ||
390 | ret = 2; | ||
391 | goto done; | ||
392 | } else { | ||
393 | trace_i2c("i2c refusing complex transfer" | ||
394 | " read0=%d read1=%d", | ||
395 | (msgs[0].flags & I2C_M_RD), | ||
396 | (msgs[1].flags & I2C_M_RD)); | ||
397 | } | ||
398 | } else { | ||
399 | trace_i2c("i2c refusing %d phase transfer",num); | ||
400 | } | ||
401 | |||
402 | done: | ||
403 | if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) { | ||
404 | unsigned int idx,offs,cnt; | ||
405 | for (idx = 0; idx < num; idx++) { | ||
406 | cnt = msgs[idx].len; | ||
407 | printk(KERN_INFO | ||
408 | "pvrusb2 i2c xfer %u/%u:" | ||
409 | " addr=0x%x len=%d %s%s", | ||
410 | idx+1,num, | ||
411 | msgs[idx].addr, | ||
412 | cnt, | ||
413 | (msgs[idx].flags & I2C_M_RD ? | ||
414 | "read" : "write"), | ||
415 | (msgs[idx].flags & I2C_M_NOSTART ? | ||
416 | " nostart" : "")); | ||
417 | if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) { | ||
418 | if (cnt > 8) cnt = 8; | ||
419 | printk(" ["); | ||
420 | for (offs = 0; offs < (cnt>8?8:cnt); offs++) { | ||
421 | if (offs) printk(" "); | ||
422 | printk("%02x",msgs[idx].buf[offs]); | ||
423 | } | ||
424 | if (offs < cnt) printk(" ..."); | ||
425 | printk("]"); | ||
426 | } | ||
427 | if (idx+1 == num) { | ||
428 | printk(" result=%d",ret); | ||
429 | } | ||
430 | printk("\n"); | ||
431 | } | ||
432 | if (!num) { | ||
433 | printk(KERN_INFO | ||
434 | "pvrusb2 i2c xfer null transfer result=%d\n", | ||
435 | ret); | ||
436 | } | ||
437 | } | ||
438 | return ret; | ||
439 | } | ||
440 | |||
441 | static int pvr2_i2c_control(struct i2c_adapter *adapter, | ||
442 | unsigned int cmd, unsigned long arg) | ||
443 | { | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static u32 pvr2_i2c_functionality(struct i2c_adapter *adap) | ||
448 | { | ||
449 | return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA; | ||
450 | } | ||
451 | |||
452 | static int pvr2_i2c_core_singleton(struct i2c_client *cp, | ||
453 | unsigned int cmd,void *arg) | ||
454 | { | ||
455 | int stat; | ||
456 | if (!cp) return -EINVAL; | ||
457 | if (!(cp->driver)) return -EINVAL; | ||
458 | if (!(cp->driver->command)) return -EINVAL; | ||
459 | if (!try_module_get(cp->driver->driver.owner)) return -EAGAIN; | ||
460 | stat = cp->driver->command(cp,cmd,arg); | ||
461 | module_put(cp->driver->driver.owner); | ||
462 | return stat; | ||
463 | } | ||
464 | |||
465 | int pvr2_i2c_client_cmd(struct pvr2_i2c_client *cp,unsigned int cmd,void *arg) | ||
466 | { | ||
467 | int stat; | ||
468 | if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) { | ||
469 | char buf[100]; | ||
470 | unsigned int cnt; | ||
471 | cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG, | ||
472 | buf,sizeof(buf)); | ||
473 | pvr2_trace(PVR2_TRACE_I2C_CMD, | ||
474 | "i2c COMMAND (code=%u 0x%x) to %.*s", | ||
475 | cmd,cmd,cnt,buf); | ||
476 | } | ||
477 | stat = pvr2_i2c_core_singleton(cp->client,cmd,arg); | ||
478 | if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) { | ||
479 | char buf[100]; | ||
480 | unsigned int cnt; | ||
481 | cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG, | ||
482 | buf,sizeof(buf)); | ||
483 | pvr2_trace(PVR2_TRACE_I2C_CMD, | ||
484 | "i2c COMMAND to %.*s (ret=%d)",cnt,buf,stat); | ||
485 | } | ||
486 | return stat; | ||
487 | } | ||
488 | |||
489 | int pvr2_i2c_core_cmd(struct pvr2_hdw *hdw,unsigned int cmd,void *arg) | ||
490 | { | ||
491 | struct list_head *item,*nc; | ||
492 | struct pvr2_i2c_client *cp; | ||
493 | int stat = -EINVAL; | ||
494 | |||
495 | if (!hdw) return stat; | ||
496 | |||
497 | mutex_lock(&hdw->i2c_list_lock); | ||
498 | list_for_each_safe(item,nc,&hdw->i2c_clients) { | ||
499 | cp = list_entry(item,struct pvr2_i2c_client,list); | ||
500 | if (!cp->recv_enable) continue; | ||
501 | mutex_unlock(&hdw->i2c_list_lock); | ||
502 | stat = pvr2_i2c_client_cmd(cp,cmd,arg); | ||
503 | mutex_lock(&hdw->i2c_list_lock); | ||
504 | } | ||
505 | mutex_unlock(&hdw->i2c_list_lock); | ||
506 | return stat; | ||
507 | } | ||
508 | |||
509 | |||
510 | static int handler_check(struct pvr2_i2c_client *cp) | ||
511 | { | ||
512 | struct pvr2_i2c_handler *hp = cp->handler; | ||
513 | if (!hp) return 0; | ||
514 | if (!hp->func_table->check) return 0; | ||
515 | return hp->func_table->check(hp->func_data) != 0; | ||
516 | } | ||
517 | |||
518 | #define BUFSIZE 500 | ||
519 | |||
520 | void pvr2_i2c_core_sync(struct pvr2_hdw *hdw) | ||
521 | { | ||
522 | unsigned long msk; | ||
523 | unsigned int idx; | ||
524 | struct list_head *item,*nc; | ||
525 | struct pvr2_i2c_client *cp; | ||
526 | |||
527 | if (!hdw->i2c_linked) return; | ||
528 | if (!(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL)) { | ||
529 | return; | ||
530 | } | ||
531 | mutex_lock(&hdw->i2c_list_lock); do { | ||
532 | pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync BEGIN"); | ||
533 | if (hdw->i2c_pend_types & PVR2_I2C_PEND_DETECT) { | ||
534 | /* One or more I2C clients have attached since we | ||
535 | last synced. So scan the list and identify the | ||
536 | new clients. */ | ||
537 | char *buf; | ||
538 | unsigned int cnt; | ||
539 | unsigned long amask = 0; | ||
540 | buf = kmalloc(BUFSIZE,GFP_KERNEL); | ||
541 | pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_DETECT"); | ||
542 | hdw->i2c_pend_types &= ~PVR2_I2C_PEND_DETECT; | ||
543 | list_for_each(item,&hdw->i2c_clients) { | ||
544 | cp = list_entry(item,struct pvr2_i2c_client, | ||
545 | list); | ||
546 | if (!cp->detected_flag) { | ||
547 | cp->ctl_mask = 0; | ||
548 | pvr2_i2c_probe(hdw,cp); | ||
549 | cp->detected_flag = !0; | ||
550 | msk = cp->ctl_mask; | ||
551 | cnt = 0; | ||
552 | if (buf) { | ||
553 | cnt = pvr2_i2c_client_describe( | ||
554 | cp, | ||
555 | PVR2_I2C_DETAIL_ALL, | ||
556 | buf,BUFSIZE); | ||
557 | } | ||
558 | trace_i2c("Probed: %.*s",cnt,buf); | ||
559 | if (handler_check(cp)) { | ||
560 | hdw->i2c_pend_types |= | ||
561 | PVR2_I2C_PEND_CLIENT; | ||
562 | } | ||
563 | cp->pend_mask = msk; | ||
564 | hdw->i2c_pend_mask |= msk; | ||
565 | hdw->i2c_pend_types |= | ||
566 | PVR2_I2C_PEND_REFRESH; | ||
567 | } | ||
568 | amask |= cp->ctl_mask; | ||
569 | } | ||
570 | hdw->i2c_active_mask = amask; | ||
571 | if (buf) kfree(buf); | ||
572 | } | ||
573 | if (hdw->i2c_pend_types & PVR2_I2C_PEND_STALE) { | ||
574 | /* Need to do one or more global updates. Arrange | ||
575 | for this to happen. */ | ||
576 | unsigned long m2; | ||
577 | pvr2_trace(PVR2_TRACE_I2C_CORE, | ||
578 | "i2c: PEND_STALE (0x%lx)", | ||
579 | hdw->i2c_stale_mask); | ||
580 | hdw->i2c_pend_types &= ~PVR2_I2C_PEND_STALE; | ||
581 | list_for_each(item,&hdw->i2c_clients) { | ||
582 | cp = list_entry(item,struct pvr2_i2c_client, | ||
583 | list); | ||
584 | m2 = hdw->i2c_stale_mask; | ||
585 | m2 &= cp->ctl_mask; | ||
586 | m2 &= ~cp->pend_mask; | ||
587 | if (m2) { | ||
588 | pvr2_trace(PVR2_TRACE_I2C_CORE, | ||
589 | "i2c: cp=%p setting 0x%lx", | ||
590 | cp,m2); | ||
591 | cp->pend_mask |= m2; | ||
592 | } | ||
593 | } | ||
594 | hdw->i2c_pend_mask |= hdw->i2c_stale_mask; | ||
595 | hdw->i2c_stale_mask = 0; | ||
596 | hdw->i2c_pend_types |= PVR2_I2C_PEND_REFRESH; | ||
597 | } | ||
598 | if (hdw->i2c_pend_types & PVR2_I2C_PEND_CLIENT) { | ||
599 | /* One or more client handlers are asking for an | ||
600 | update. Run through the list of known clients | ||
601 | and update each one. */ | ||
602 | pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_CLIENT"); | ||
603 | hdw->i2c_pend_types &= ~PVR2_I2C_PEND_CLIENT; | ||
604 | list_for_each_safe(item,nc,&hdw->i2c_clients) { | ||
605 | cp = list_entry(item,struct pvr2_i2c_client, | ||
606 | list); | ||
607 | if (!cp->handler) continue; | ||
608 | if (!cp->handler->func_table->update) continue; | ||
609 | pvr2_trace(PVR2_TRACE_I2C_CORE, | ||
610 | "i2c: cp=%p update",cp); | ||
611 | mutex_unlock(&hdw->i2c_list_lock); | ||
612 | cp->handler->func_table->update( | ||
613 | cp->handler->func_data); | ||
614 | mutex_lock(&hdw->i2c_list_lock); | ||
615 | /* If client's update function set some | ||
616 | additional pending bits, account for that | ||
617 | here. */ | ||
618 | if (cp->pend_mask & ~hdw->i2c_pend_mask) { | ||
619 | hdw->i2c_pend_mask |= cp->pend_mask; | ||
620 | hdw->i2c_pend_types |= | ||
621 | PVR2_I2C_PEND_REFRESH; | ||
622 | } | ||
623 | } | ||
624 | } | ||
625 | if (hdw->i2c_pend_types & PVR2_I2C_PEND_REFRESH) { | ||
626 | const struct pvr2_i2c_op *opf; | ||
627 | unsigned long pm; | ||
628 | /* Some actual updates are pending. Walk through | ||
629 | each update type and perform it. */ | ||
630 | pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_REFRESH" | ||
631 | " (0x%lx)",hdw->i2c_pend_mask); | ||
632 | hdw->i2c_pend_types &= ~PVR2_I2C_PEND_REFRESH; | ||
633 | pm = hdw->i2c_pend_mask; | ||
634 | hdw->i2c_pend_mask = 0; | ||
635 | for (idx = 0, msk = 1; pm; idx++, msk <<= 1) { | ||
636 | if (!(pm & msk)) continue; | ||
637 | pm &= ~msk; | ||
638 | list_for_each(item,&hdw->i2c_clients) { | ||
639 | cp = list_entry(item, | ||
640 | struct pvr2_i2c_client, | ||
641 | list); | ||
642 | if (cp->pend_mask & msk) { | ||
643 | cp->pend_mask &= ~msk; | ||
644 | cp->recv_enable = !0; | ||
645 | } else { | ||
646 | cp->recv_enable = 0; | ||
647 | } | ||
648 | } | ||
649 | opf = pvr2_i2c_get_op(idx); | ||
650 | if (!opf) continue; | ||
651 | mutex_unlock(&hdw->i2c_list_lock); | ||
652 | opf->update(hdw); | ||
653 | mutex_lock(&hdw->i2c_list_lock); | ||
654 | } | ||
655 | } | ||
656 | pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync END"); | ||
657 | } while (0); mutex_unlock(&hdw->i2c_list_lock); | ||
658 | } | ||
659 | |||
660 | int pvr2_i2c_core_check_stale(struct pvr2_hdw *hdw) | ||
661 | { | ||
662 | unsigned long msk,sm,pm; | ||
663 | unsigned int idx; | ||
664 | const struct pvr2_i2c_op *opf; | ||
665 | struct list_head *item; | ||
666 | struct pvr2_i2c_client *cp; | ||
667 | unsigned int pt = 0; | ||
668 | |||
669 | pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale BEGIN"); | ||
670 | |||
671 | pm = hdw->i2c_active_mask; | ||
672 | sm = 0; | ||
673 | for (idx = 0, msk = 1; pm; idx++, msk <<= 1) { | ||
674 | if (!(msk & pm)) continue; | ||
675 | pm &= ~msk; | ||
676 | opf = pvr2_i2c_get_op(idx); | ||
677 | if (!opf) continue; | ||
678 | if (opf->check(hdw)) { | ||
679 | sm |= msk; | ||
680 | } | ||
681 | } | ||
682 | if (sm) pt |= PVR2_I2C_PEND_STALE; | ||
683 | |||
684 | list_for_each(item,&hdw->i2c_clients) { | ||
685 | cp = list_entry(item,struct pvr2_i2c_client,list); | ||
686 | if (!handler_check(cp)) continue; | ||
687 | pt |= PVR2_I2C_PEND_CLIENT; | ||
688 | } | ||
689 | |||
690 | if (pt) { | ||
691 | mutex_lock(&hdw->i2c_list_lock); do { | ||
692 | hdw->i2c_pend_types |= pt; | ||
693 | hdw->i2c_stale_mask |= sm; | ||
694 | hdw->i2c_pend_mask |= hdw->i2c_stale_mask; | ||
695 | } while (0); mutex_unlock(&hdw->i2c_list_lock); | ||
696 | } | ||
697 | |||
698 | pvr2_trace(PVR2_TRACE_I2C_CORE, | ||
699 | "i2c: types=0x%x stale=0x%lx pend=0x%lx", | ||
700 | hdw->i2c_pend_types, | ||
701 | hdw->i2c_stale_mask, | ||
702 | hdw->i2c_pend_mask); | ||
703 | pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale END"); | ||
704 | |||
705 | return (hdw->i2c_pend_types & PVR2_I2C_PEND_ALL) != 0; | ||
706 | } | ||
707 | |||
708 | unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *cp, | ||
709 | unsigned int detail, | ||
710 | char *buf,unsigned int maxlen) | ||
711 | { | ||
712 | unsigned int ccnt,bcnt; | ||
713 | int spcfl = 0; | ||
714 | const struct pvr2_i2c_op *opf; | ||
715 | |||
716 | ccnt = 0; | ||
717 | if (detail & PVR2_I2C_DETAIL_DEBUG) { | ||
718 | bcnt = scnprintf(buf,maxlen, | ||
719 | "ctxt=%p ctl_mask=0x%lx", | ||
720 | cp,cp->ctl_mask); | ||
721 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
722 | spcfl = !0; | ||
723 | } | ||
724 | bcnt = scnprintf(buf,maxlen, | ||
725 | "%s%s @ 0x%x", | ||
726 | (spcfl ? " " : ""), | ||
727 | cp->client->name, | ||
728 | cp->client->addr); | ||
729 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
730 | if ((detail & PVR2_I2C_DETAIL_HANDLER) && | ||
731 | cp->handler && cp->handler->func_table->describe) { | ||
732 | bcnt = scnprintf(buf,maxlen," ("); | ||
733 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
734 | bcnt = cp->handler->func_table->describe( | ||
735 | cp->handler->func_data,buf,maxlen); | ||
736 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
737 | bcnt = scnprintf(buf,maxlen,")"); | ||
738 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
739 | } | ||
740 | if ((detail & PVR2_I2C_DETAIL_CTLMASK) && cp->ctl_mask) { | ||
741 | unsigned int idx; | ||
742 | unsigned long msk,sm; | ||
743 | int spcfl; | ||
744 | bcnt = scnprintf(buf,maxlen," ["); | ||
745 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
746 | sm = 0; | ||
747 | spcfl = 0; | ||
748 | for (idx = 0, msk = 1; msk; idx++, msk <<= 1) { | ||
749 | if (!(cp->ctl_mask & msk)) continue; | ||
750 | opf = pvr2_i2c_get_op(idx); | ||
751 | if (opf) { | ||
752 | bcnt = scnprintf(buf,maxlen,"%s%s", | ||
753 | spcfl ? " " : "", | ||
754 | opf->name); | ||
755 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
756 | spcfl = !0; | ||
757 | } else { | ||
758 | sm |= msk; | ||
759 | } | ||
760 | } | ||
761 | if (sm) { | ||
762 | bcnt = scnprintf(buf,maxlen,"%s%lx", | ||
763 | idx != 0 ? " " : "",sm); | ||
764 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
765 | } | ||
766 | bcnt = scnprintf(buf,maxlen,"]"); | ||
767 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
768 | } | ||
769 | return ccnt; | ||
770 | } | ||
771 | |||
772 | unsigned int pvr2_i2c_report(struct pvr2_hdw *hdw, | ||
773 | char *buf,unsigned int maxlen) | ||
774 | { | ||
775 | unsigned int ccnt,bcnt; | ||
776 | struct list_head *item; | ||
777 | struct pvr2_i2c_client *cp; | ||
778 | ccnt = 0; | ||
779 | mutex_lock(&hdw->i2c_list_lock); do { | ||
780 | list_for_each(item,&hdw->i2c_clients) { | ||
781 | cp = list_entry(item,struct pvr2_i2c_client,list); | ||
782 | bcnt = pvr2_i2c_client_describe( | ||
783 | cp, | ||
784 | (PVR2_I2C_DETAIL_HANDLER| | ||
785 | PVR2_I2C_DETAIL_CTLMASK), | ||
786 | buf,maxlen); | ||
787 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
788 | bcnt = scnprintf(buf,maxlen,"\n"); | ||
789 | ccnt += bcnt; buf += bcnt; maxlen -= bcnt; | ||
790 | } | ||
791 | } while (0); mutex_unlock(&hdw->i2c_list_lock); | ||
792 | return ccnt; | ||
793 | } | ||
794 | |||
795 | static int pvr2_i2c_attach_inform(struct i2c_client *client) | ||
796 | { | ||
797 | struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data); | ||
798 | struct pvr2_i2c_client *cp; | ||
799 | int fl = !(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL); | ||
800 | cp = kmalloc(sizeof(*cp),GFP_KERNEL); | ||
801 | trace_i2c("i2c_attach [client=%s @ 0x%x ctxt=%p]", | ||
802 | client->name, | ||
803 | client->addr,cp); | ||
804 | if (!cp) return -ENOMEM; | ||
805 | memset(cp,0,sizeof(*cp)); | ||
806 | INIT_LIST_HEAD(&cp->list); | ||
807 | cp->client = client; | ||
808 | mutex_lock(&hdw->i2c_list_lock); do { | ||
809 | list_add_tail(&cp->list,&hdw->i2c_clients); | ||
810 | hdw->i2c_pend_types |= PVR2_I2C_PEND_DETECT; | ||
811 | } while (0); mutex_unlock(&hdw->i2c_list_lock); | ||
812 | if (fl) pvr2_hdw_poll_trigger_unlocked(hdw); | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | static int pvr2_i2c_detach_inform(struct i2c_client *client) | ||
817 | { | ||
818 | struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data); | ||
819 | struct pvr2_i2c_client *cp; | ||
820 | struct list_head *item,*nc; | ||
821 | unsigned long amask = 0; | ||
822 | int foundfl = 0; | ||
823 | mutex_lock(&hdw->i2c_list_lock); do { | ||
824 | list_for_each_safe(item,nc,&hdw->i2c_clients) { | ||
825 | cp = list_entry(item,struct pvr2_i2c_client,list); | ||
826 | if (cp->client == client) { | ||
827 | trace_i2c("pvr2_i2c_detach" | ||
828 | " [client=%s @ 0x%x ctxt=%p]", | ||
829 | client->name, | ||
830 | client->addr,cp); | ||
831 | if (cp->handler && | ||
832 | cp->handler->func_table->detach) { | ||
833 | cp->handler->func_table->detach( | ||
834 | cp->handler->func_data); | ||
835 | } | ||
836 | list_del(&cp->list); | ||
837 | kfree(cp); | ||
838 | foundfl = !0; | ||
839 | continue; | ||
840 | } | ||
841 | amask |= cp->ctl_mask; | ||
842 | } | ||
843 | hdw->i2c_active_mask = amask; | ||
844 | } while (0); mutex_unlock(&hdw->i2c_list_lock); | ||
845 | if (!foundfl) { | ||
846 | trace_i2c("pvr2_i2c_detach [client=%s @ 0x%x ctxt=<unknown>]", | ||
847 | client->name, | ||
848 | client->addr); | ||
849 | } | ||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | static struct i2c_algorithm pvr2_i2c_algo_template = { | ||
854 | .master_xfer = pvr2_i2c_xfer, | ||
855 | .algo_control = pvr2_i2c_control, | ||
856 | .functionality = pvr2_i2c_functionality, | ||
857 | }; | ||
858 | |||
859 | static struct i2c_adapter pvr2_i2c_adap_template = { | ||
860 | .owner = THIS_MODULE, | ||
861 | .class = I2C_CLASS_TV_ANALOG, | ||
862 | .id = I2C_HW_B_BT848, | ||
863 | .client_register = pvr2_i2c_attach_inform, | ||
864 | .client_unregister = pvr2_i2c_detach_inform, | ||
865 | }; | ||
866 | |||
867 | static void do_i2c_scan(struct pvr2_hdw *hdw) | ||
868 | { | ||
869 | struct i2c_msg msg[1]; | ||
870 | int i,rc; | ||
871 | msg[0].addr = 0; | ||
872 | msg[0].flags = I2C_M_RD; | ||
873 | msg[0].len = 0; | ||
874 | msg[0].buf = 0; | ||
875 | printk("%s: i2c scan beginning\n",hdw->name); | ||
876 | for (i = 0; i < 128; i++) { | ||
877 | msg[0].addr = i; | ||
878 | rc = i2c_transfer(&hdw->i2c_adap,msg, | ||
879 | sizeof(msg)/sizeof(msg[0])); | ||
880 | if (rc != 1) continue; | ||
881 | printk("%s: i2c scan: found device @ 0x%x\n",hdw->name,i); | ||
882 | } | ||
883 | printk("%s: i2c scan done.\n",hdw->name); | ||
884 | } | ||
885 | |||
886 | void pvr2_i2c_core_init(struct pvr2_hdw *hdw) | ||
887 | { | ||
888 | unsigned int idx; | ||
889 | |||
890 | // The default action for all possible I2C addresses is just to do | ||
891 | // the transfer normally. | ||
892 | for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) { | ||
893 | hdw->i2c_func[idx] = pvr2_i2c_basic_op; | ||
894 | } | ||
895 | |||
896 | #ifdef CONFIG_VIDEO_PVRUSB2_24XXX | ||
897 | // If however we're dealing with new hardware, insert some hacks in | ||
898 | // the I2C transfer stack to let things work better. | ||
899 | if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) { | ||
900 | hdw->i2c_func[0x1b] = i2c_hack_wm8775; | ||
901 | hdw->i2c_func[0x44] = i2c_hack_cx25840; | ||
902 | } | ||
903 | #endif | ||
904 | |||
905 | // Configure the adapter and set up everything else related to it. | ||
906 | memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap)); | ||
907 | memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo)); | ||
908 | strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name)); | ||
909 | hdw->i2c_adap.algo = &hdw->i2c_algo; | ||
910 | hdw->i2c_adap.algo_data = hdw; | ||
911 | hdw->i2c_pend_mask = 0; | ||
912 | hdw->i2c_stale_mask = 0; | ||
913 | hdw->i2c_active_mask = 0; | ||
914 | INIT_LIST_HEAD(&hdw->i2c_clients); | ||
915 | mutex_init(&hdw->i2c_list_lock); | ||
916 | hdw->i2c_linked = !0; | ||
917 | i2c_add_adapter(&hdw->i2c_adap); | ||
918 | if (i2c_scan) do_i2c_scan(hdw); | ||
919 | } | ||
920 | |||
921 | void pvr2_i2c_core_done(struct pvr2_hdw *hdw) | ||
922 | { | ||
923 | if (hdw->i2c_linked) { | ||
924 | i2c_del_adapter(&hdw->i2c_adap); | ||
925 | hdw->i2c_linked = 0; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
931 | *** Local Variables: *** | ||
932 | *** mode: c *** | ||
933 | *** fill-column: 75 *** | ||
934 | *** tab-width: 8 *** | ||
935 | *** c-basic-offset: 8 *** | ||
936 | *** End: *** | ||
937 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h new file mode 100644 index 000000000000..e8af5b0ed3ce --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_I2C_CORE_H | ||
22 | #define __PVRUSB2_I2C_CORE_H | ||
23 | |||
24 | #include <linux/list.h> | ||
25 | #include <linux/i2c.h> | ||
26 | |||
27 | struct pvr2_hdw; | ||
28 | struct pvr2_i2c_client; | ||
29 | struct pvr2_i2c_handler; | ||
30 | struct pvr2_i2c_handler_functions; | ||
31 | struct pvr2_i2c_op; | ||
32 | struct pvr2_i2c_op_functions; | ||
33 | |||
34 | struct pvr2_i2c_client { | ||
35 | struct i2c_client *client; | ||
36 | struct pvr2_i2c_handler *handler; | ||
37 | struct list_head list; | ||
38 | int detected_flag; | ||
39 | int recv_enable; | ||
40 | unsigned long pend_mask; | ||
41 | unsigned long ctl_mask; | ||
42 | }; | ||
43 | |||
44 | struct pvr2_i2c_handler { | ||
45 | void *func_data; | ||
46 | const struct pvr2_i2c_handler_functions *func_table; | ||
47 | }; | ||
48 | |||
49 | struct pvr2_i2c_handler_functions { | ||
50 | void (*detach)(void *); | ||
51 | int (*check)(void *); | ||
52 | void (*update)(void *); | ||
53 | unsigned int (*describe)(void *,char *,unsigned int); | ||
54 | }; | ||
55 | |||
56 | struct pvr2_i2c_op { | ||
57 | int (*check)(struct pvr2_hdw *); | ||
58 | void (*update)(struct pvr2_hdw *); | ||
59 | const char *name; | ||
60 | }; | ||
61 | |||
62 | void pvr2_i2c_core_init(struct pvr2_hdw *); | ||
63 | void pvr2_i2c_core_done(struct pvr2_hdw *); | ||
64 | |||
65 | int pvr2_i2c_client_cmd(struct pvr2_i2c_client *,unsigned int cmd,void *arg); | ||
66 | int pvr2_i2c_core_cmd(struct pvr2_hdw *,unsigned int cmd,void *arg); | ||
67 | |||
68 | int pvr2_i2c_core_check_stale(struct pvr2_hdw *); | ||
69 | void pvr2_i2c_core_sync(struct pvr2_hdw *); | ||
70 | unsigned int pvr2_i2c_report(struct pvr2_hdw *,char *buf,unsigned int maxlen); | ||
71 | #define PVR2_I2C_DETAIL_DEBUG 0x0001 | ||
72 | #define PVR2_I2C_DETAIL_HANDLER 0x0002 | ||
73 | #define PVR2_I2C_DETAIL_CTLMASK 0x0004 | ||
74 | #define PVR2_I2C_DETAIL_ALL (\ | ||
75 | PVR2_I2C_DETAIL_DEBUG |\ | ||
76 | PVR2_I2C_DETAIL_HANDLER |\ | ||
77 | PVR2_I2C_DETAIL_CTLMASK) | ||
78 | unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *, | ||
79 | unsigned int detail_mask, | ||
80 | char *buf,unsigned int maxlen); | ||
81 | |||
82 | void pvr2_i2c_probe(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
83 | const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx); | ||
84 | |||
85 | #endif /* __PVRUSB2_I2C_CORE_H */ | ||
86 | |||
87 | |||
88 | /* | ||
89 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
90 | *** Local Variables: *** | ||
91 | *** mode: c *** | ||
92 | *** fill-column: 75 *** | ||
93 | *** tab-width: 8 *** | ||
94 | *** c-basic-offset: 8 *** | ||
95 | *** End: *** | ||
96 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.c b/drivers/media/video/pvrusb2/pvrusb2-io.c new file mode 100644 index 000000000000..a984c91f571c --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-io.c | |||
@@ -0,0 +1,695 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-io.h" | ||
23 | #include "pvrusb2-debug.h" | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/mutex.h> | ||
28 | |||
29 | #define BUFFER_SIG 0x47653271 | ||
30 | |||
31 | // #define SANITY_CHECK_BUFFERS | ||
32 | |||
33 | |||
34 | #ifdef SANITY_CHECK_BUFFERS | ||
35 | #define BUFFER_CHECK(bp) do { \ | ||
36 | if ((bp)->signature != BUFFER_SIG) { \ | ||
37 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, \ | ||
38 | "Buffer %p is bad at %s:%d", \ | ||
39 | (bp),__FILE__,__LINE__); \ | ||
40 | pvr2_buffer_describe(bp,"BadSig"); \ | ||
41 | BUG(); \ | ||
42 | } \ | ||
43 | } while (0) | ||
44 | #else | ||
45 | #define BUFFER_CHECK(bp) do {} while(0) | ||
46 | #endif | ||
47 | |||
48 | struct pvr2_stream { | ||
49 | /* Buffers queued for reading */ | ||
50 | struct list_head queued_list; | ||
51 | unsigned int q_count; | ||
52 | unsigned int q_bcount; | ||
53 | /* Buffers with retrieved data */ | ||
54 | struct list_head ready_list; | ||
55 | unsigned int r_count; | ||
56 | unsigned int r_bcount; | ||
57 | /* Buffers available for use */ | ||
58 | struct list_head idle_list; | ||
59 | unsigned int i_count; | ||
60 | unsigned int i_bcount; | ||
61 | /* Pointers to all buffers */ | ||
62 | struct pvr2_buffer **buffers; | ||
63 | /* Array size of buffers */ | ||
64 | unsigned int buffer_slot_count; | ||
65 | /* Total buffers actually in circulation */ | ||
66 | unsigned int buffer_total_count; | ||
67 | /* Designed number of buffers to be in circulation */ | ||
68 | unsigned int buffer_target_count; | ||
69 | /* Executed when ready list become non-empty */ | ||
70 | pvr2_stream_callback callback_func; | ||
71 | void *callback_data; | ||
72 | /* Context for transfer endpoint */ | ||
73 | struct usb_device *dev; | ||
74 | int endpoint; | ||
75 | /* Overhead for mutex enforcement */ | ||
76 | spinlock_t list_lock; | ||
77 | struct mutex mutex; | ||
78 | /* Tracking state for tolerating errors */ | ||
79 | unsigned int fail_count; | ||
80 | unsigned int fail_tolerance; | ||
81 | }; | ||
82 | |||
83 | struct pvr2_buffer { | ||
84 | int id; | ||
85 | int signature; | ||
86 | enum pvr2_buffer_state state; | ||
87 | void *ptr; /* Pointer to storage area */ | ||
88 | unsigned int max_count; /* Size of storage area */ | ||
89 | unsigned int used_count; /* Amount of valid data in storage area */ | ||
90 | int status; /* Transfer result status */ | ||
91 | struct pvr2_stream *stream; | ||
92 | struct list_head list_overhead; | ||
93 | struct urb *purb; | ||
94 | }; | ||
95 | |||
96 | const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st) | ||
97 | { | ||
98 | switch (st) { | ||
99 | case pvr2_buffer_state_none: return "none"; | ||
100 | case pvr2_buffer_state_idle: return "idle"; | ||
101 | case pvr2_buffer_state_queued: return "queued"; | ||
102 | case pvr2_buffer_state_ready: return "ready"; | ||
103 | } | ||
104 | return "unknown"; | ||
105 | } | ||
106 | |||
107 | void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) | ||
108 | { | ||
109 | pvr2_trace(PVR2_TRACE_INFO, | ||
110 | "buffer%s%s %p state=%s id=%d status=%d" | ||
111 | " stream=%p purb=%p sig=0x%x", | ||
112 | (msg ? " " : ""), | ||
113 | (msg ? msg : ""), | ||
114 | bp, | ||
115 | (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), | ||
116 | (bp ? bp->id : 0), | ||
117 | (bp ? bp->status : 0), | ||
118 | (bp ? bp->stream : 0), | ||
119 | (bp ? bp->purb : 0), | ||
120 | (bp ? bp->signature : 0)); | ||
121 | } | ||
122 | |||
123 | static void pvr2_buffer_remove(struct pvr2_buffer *bp) | ||
124 | { | ||
125 | unsigned int *cnt; | ||
126 | unsigned int *bcnt; | ||
127 | unsigned int ccnt; | ||
128 | struct pvr2_stream *sp = bp->stream; | ||
129 | switch (bp->state) { | ||
130 | case pvr2_buffer_state_idle: | ||
131 | cnt = &sp->i_count; | ||
132 | bcnt = &sp->i_bcount; | ||
133 | ccnt = bp->max_count; | ||
134 | break; | ||
135 | case pvr2_buffer_state_queued: | ||
136 | cnt = &sp->q_count; | ||
137 | bcnt = &sp->q_bcount; | ||
138 | ccnt = bp->max_count; | ||
139 | break; | ||
140 | case pvr2_buffer_state_ready: | ||
141 | cnt = &sp->r_count; | ||
142 | bcnt = &sp->r_bcount; | ||
143 | ccnt = bp->used_count; | ||
144 | break; | ||
145 | default: | ||
146 | return; | ||
147 | } | ||
148 | list_del_init(&bp->list_overhead); | ||
149 | (*cnt)--; | ||
150 | (*bcnt) -= ccnt; | ||
151 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
152 | "/*---TRACE_FLOW---*/" | ||
153 | " bufferPool %8s dec cap=%07d cnt=%02d", | ||
154 | pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); | ||
155 | bp->state = pvr2_buffer_state_none; | ||
156 | } | ||
157 | |||
158 | static void pvr2_buffer_set_none(struct pvr2_buffer *bp) | ||
159 | { | ||
160 | unsigned long irq_flags; | ||
161 | struct pvr2_stream *sp; | ||
162 | BUFFER_CHECK(bp); | ||
163 | sp = bp->stream; | ||
164 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
165 | "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", | ||
166 | bp, | ||
167 | pvr2_buffer_state_decode(bp->state), | ||
168 | pvr2_buffer_state_decode(pvr2_buffer_state_none)); | ||
169 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
170 | pvr2_buffer_remove(bp); | ||
171 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
172 | } | ||
173 | |||
174 | static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) | ||
175 | { | ||
176 | int fl; | ||
177 | unsigned long irq_flags; | ||
178 | struct pvr2_stream *sp; | ||
179 | BUFFER_CHECK(bp); | ||
180 | sp = bp->stream; | ||
181 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
182 | "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", | ||
183 | bp, | ||
184 | pvr2_buffer_state_decode(bp->state), | ||
185 | pvr2_buffer_state_decode(pvr2_buffer_state_ready)); | ||
186 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
187 | fl = (sp->r_count == 0); | ||
188 | pvr2_buffer_remove(bp); | ||
189 | list_add_tail(&bp->list_overhead,&sp->ready_list); | ||
190 | bp->state = pvr2_buffer_state_ready; | ||
191 | (sp->r_count)++; | ||
192 | sp->r_bcount += bp->used_count; | ||
193 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
194 | "/*---TRACE_FLOW---*/" | ||
195 | " bufferPool %8s inc cap=%07d cnt=%02d", | ||
196 | pvr2_buffer_state_decode(bp->state), | ||
197 | sp->r_bcount,sp->r_count); | ||
198 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
199 | return fl; | ||
200 | } | ||
201 | |||
202 | static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) | ||
203 | { | ||
204 | unsigned long irq_flags; | ||
205 | struct pvr2_stream *sp; | ||
206 | BUFFER_CHECK(bp); | ||
207 | sp = bp->stream; | ||
208 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
209 | "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", | ||
210 | bp, | ||
211 | pvr2_buffer_state_decode(bp->state), | ||
212 | pvr2_buffer_state_decode(pvr2_buffer_state_idle)); | ||
213 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
214 | pvr2_buffer_remove(bp); | ||
215 | list_add_tail(&bp->list_overhead,&sp->idle_list); | ||
216 | bp->state = pvr2_buffer_state_idle; | ||
217 | (sp->i_count)++; | ||
218 | sp->i_bcount += bp->max_count; | ||
219 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
220 | "/*---TRACE_FLOW---*/" | ||
221 | " bufferPool %8s inc cap=%07d cnt=%02d", | ||
222 | pvr2_buffer_state_decode(bp->state), | ||
223 | sp->i_bcount,sp->i_count); | ||
224 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
225 | } | ||
226 | |||
227 | static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) | ||
228 | { | ||
229 | unsigned long irq_flags; | ||
230 | struct pvr2_stream *sp; | ||
231 | BUFFER_CHECK(bp); | ||
232 | sp = bp->stream; | ||
233 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
234 | "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", | ||
235 | bp, | ||
236 | pvr2_buffer_state_decode(bp->state), | ||
237 | pvr2_buffer_state_decode(pvr2_buffer_state_queued)); | ||
238 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
239 | pvr2_buffer_remove(bp); | ||
240 | list_add_tail(&bp->list_overhead,&sp->queued_list); | ||
241 | bp->state = pvr2_buffer_state_queued; | ||
242 | (sp->q_count)++; | ||
243 | sp->q_bcount += bp->max_count; | ||
244 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
245 | "/*---TRACE_FLOW---*/" | ||
246 | " bufferPool %8s inc cap=%07d cnt=%02d", | ||
247 | pvr2_buffer_state_decode(bp->state), | ||
248 | sp->q_bcount,sp->q_count); | ||
249 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
250 | } | ||
251 | |||
252 | static void pvr2_buffer_wipe(struct pvr2_buffer *bp) | ||
253 | { | ||
254 | if (bp->state == pvr2_buffer_state_queued) { | ||
255 | usb_kill_urb(bp->purb); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static int pvr2_buffer_init(struct pvr2_buffer *bp, | ||
260 | struct pvr2_stream *sp, | ||
261 | unsigned int id) | ||
262 | { | ||
263 | memset(bp,0,sizeof(*bp)); | ||
264 | bp->signature = BUFFER_SIG; | ||
265 | bp->id = id; | ||
266 | pvr2_trace(PVR2_TRACE_BUF_POOL, | ||
267 | "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); | ||
268 | bp->stream = sp; | ||
269 | bp->state = pvr2_buffer_state_none; | ||
270 | INIT_LIST_HEAD(&bp->list_overhead); | ||
271 | bp->purb = usb_alloc_urb(0,GFP_KERNEL); | ||
272 | if (! bp->purb) return -ENOMEM; | ||
273 | #ifdef SANITY_CHECK_BUFFERS | ||
274 | pvr2_buffer_describe(bp,"create"); | ||
275 | #endif | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static void pvr2_buffer_done(struct pvr2_buffer *bp) | ||
280 | { | ||
281 | #ifdef SANITY_CHECK_BUFFERS | ||
282 | pvr2_buffer_describe(bp,"delete"); | ||
283 | #endif | ||
284 | pvr2_buffer_wipe(bp); | ||
285 | pvr2_buffer_set_none(bp); | ||
286 | bp->signature = 0; | ||
287 | bp->stream = 0; | ||
288 | if (bp->purb) usb_free_urb(bp->purb); | ||
289 | pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/" | ||
290 | " bufferDone %p",bp); | ||
291 | } | ||
292 | |||
293 | static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt) | ||
294 | { | ||
295 | int ret; | ||
296 | unsigned int scnt; | ||
297 | |||
298 | /* Allocate buffers pointer array in multiples of 32 entries */ | ||
299 | if (cnt == sp->buffer_total_count) return 0; | ||
300 | |||
301 | pvr2_trace(PVR2_TRACE_BUF_POOL, | ||
302 | "/*---TRACE_FLOW---*/ poolResize " | ||
303 | " stream=%p cur=%d adj=%+d", | ||
304 | sp, | ||
305 | sp->buffer_total_count, | ||
306 | cnt-sp->buffer_total_count); | ||
307 | |||
308 | scnt = cnt & ~0x1f; | ||
309 | if (cnt > scnt) scnt += 0x20; | ||
310 | |||
311 | if (cnt > sp->buffer_total_count) { | ||
312 | if (scnt > sp->buffer_slot_count) { | ||
313 | struct pvr2_buffer **nb; | ||
314 | nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); | ||
315 | if (!nb) return -ENOMEM; | ||
316 | if (sp->buffer_slot_count) { | ||
317 | memcpy(nb,sp->buffers, | ||
318 | sp->buffer_slot_count * sizeof(*nb)); | ||
319 | kfree(sp->buffers); | ||
320 | } | ||
321 | sp->buffers = nb; | ||
322 | sp->buffer_slot_count = scnt; | ||
323 | } | ||
324 | while (sp->buffer_total_count < cnt) { | ||
325 | struct pvr2_buffer *bp; | ||
326 | bp = kmalloc(sizeof(*bp),GFP_KERNEL); | ||
327 | if (!bp) return -ENOMEM; | ||
328 | ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); | ||
329 | if (ret) { | ||
330 | kfree(bp); | ||
331 | return -ENOMEM; | ||
332 | } | ||
333 | sp->buffers[sp->buffer_total_count] = bp; | ||
334 | (sp->buffer_total_count)++; | ||
335 | pvr2_buffer_set_idle(bp); | ||
336 | } | ||
337 | } else { | ||
338 | while (sp->buffer_total_count > cnt) { | ||
339 | struct pvr2_buffer *bp; | ||
340 | bp = sp->buffers[sp->buffer_total_count - 1]; | ||
341 | /* Paranoia */ | ||
342 | sp->buffers[sp->buffer_total_count - 1] = 0; | ||
343 | (sp->buffer_total_count)--; | ||
344 | pvr2_buffer_done(bp); | ||
345 | kfree(bp); | ||
346 | } | ||
347 | if (scnt < sp->buffer_slot_count) { | ||
348 | struct pvr2_buffer **nb = 0; | ||
349 | if (scnt) { | ||
350 | nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); | ||
351 | if (!nb) return -ENOMEM; | ||
352 | memcpy(nb,sp->buffers,scnt * sizeof(*nb)); | ||
353 | } | ||
354 | kfree(sp->buffers); | ||
355 | sp->buffers = nb; | ||
356 | sp->buffer_slot_count = scnt; | ||
357 | } | ||
358 | } | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp) | ||
363 | { | ||
364 | struct pvr2_buffer *bp; | ||
365 | unsigned int cnt; | ||
366 | |||
367 | if (sp->buffer_total_count == sp->buffer_target_count) return 0; | ||
368 | |||
369 | pvr2_trace(PVR2_TRACE_BUF_POOL, | ||
370 | "/*---TRACE_FLOW---*/" | ||
371 | " poolCheck stream=%p cur=%d tgt=%d", | ||
372 | sp,sp->buffer_total_count,sp->buffer_target_count); | ||
373 | |||
374 | if (sp->buffer_total_count < sp->buffer_target_count) { | ||
375 | return pvr2_stream_buffer_count(sp,sp->buffer_target_count); | ||
376 | } | ||
377 | |||
378 | cnt = 0; | ||
379 | while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) { | ||
380 | bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; | ||
381 | if (bp->state != pvr2_buffer_state_idle) break; | ||
382 | cnt++; | ||
383 | } | ||
384 | if (cnt) { | ||
385 | pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt); | ||
386 | } | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static void pvr2_stream_internal_flush(struct pvr2_stream *sp) | ||
392 | { | ||
393 | struct list_head *lp; | ||
394 | struct pvr2_buffer *bp1; | ||
395 | while ((lp = sp->queued_list.next) != &sp->queued_list) { | ||
396 | bp1 = list_entry(lp,struct pvr2_buffer,list_overhead); | ||
397 | pvr2_buffer_wipe(bp1); | ||
398 | /* At this point, we should be guaranteed that no | ||
399 | completion callback may happen on this buffer. But it's | ||
400 | possible that it might have completed after we noticed | ||
401 | it but before we wiped it. So double check its status | ||
402 | here first. */ | ||
403 | if (bp1->state != pvr2_buffer_state_queued) continue; | ||
404 | pvr2_buffer_set_idle(bp1); | ||
405 | } | ||
406 | if (sp->buffer_total_count != sp->buffer_target_count) { | ||
407 | pvr2_stream_achieve_buffer_count(sp); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | static void pvr2_stream_init(struct pvr2_stream *sp) | ||
412 | { | ||
413 | spin_lock_init(&sp->list_lock); | ||
414 | mutex_init(&sp->mutex); | ||
415 | INIT_LIST_HEAD(&sp->queued_list); | ||
416 | INIT_LIST_HEAD(&sp->ready_list); | ||
417 | INIT_LIST_HEAD(&sp->idle_list); | ||
418 | } | ||
419 | |||
420 | static void pvr2_stream_done(struct pvr2_stream *sp) | ||
421 | { | ||
422 | mutex_lock(&sp->mutex); do { | ||
423 | pvr2_stream_internal_flush(sp); | ||
424 | pvr2_stream_buffer_count(sp,0); | ||
425 | } while (0); mutex_unlock(&sp->mutex); | ||
426 | } | ||
427 | |||
428 | static void buffer_complete(struct urb *urb, struct pt_regs *regs) | ||
429 | { | ||
430 | struct pvr2_buffer *bp = urb->context; | ||
431 | struct pvr2_stream *sp; | ||
432 | unsigned long irq_flags; | ||
433 | BUFFER_CHECK(bp); | ||
434 | sp = bp->stream; | ||
435 | bp->used_count = 0; | ||
436 | bp->status = 0; | ||
437 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
438 | "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d", | ||
439 | bp,urb->status,urb->actual_length); | ||
440 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
441 | if ((!(urb->status)) || | ||
442 | (urb->status == -ENOENT) || | ||
443 | (urb->status == -ECONNRESET) || | ||
444 | (urb->status == -ESHUTDOWN)) { | ||
445 | bp->used_count = urb->actual_length; | ||
446 | if (sp->fail_count) { | ||
447 | pvr2_trace(PVR2_TRACE_TOLERANCE, | ||
448 | "stream %p transfer ok" | ||
449 | " - fail count reset",sp); | ||
450 | sp->fail_count = 0; | ||
451 | } | ||
452 | } else if (sp->fail_count < sp->fail_tolerance) { | ||
453 | // We can tolerate this error, because we're below the | ||
454 | // threshold... | ||
455 | (sp->fail_count)++; | ||
456 | pvr2_trace(PVR2_TRACE_TOLERANCE, | ||
457 | "stream %p ignoring error %d" | ||
458 | " - fail count increased to %u", | ||
459 | sp,urb->status,sp->fail_count); | ||
460 | } else { | ||
461 | bp->status = urb->status; | ||
462 | } | ||
463 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
464 | pvr2_buffer_set_ready(bp); | ||
465 | if (sp && sp->callback_func) { | ||
466 | sp->callback_func(sp->callback_data); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | struct pvr2_stream *pvr2_stream_create(void) | ||
471 | { | ||
472 | struct pvr2_stream *sp; | ||
473 | sp = kmalloc(sizeof(*sp),GFP_KERNEL); | ||
474 | if (!sp) return sp; | ||
475 | memset(sp,0,sizeof(*sp)); | ||
476 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp); | ||
477 | pvr2_stream_init(sp); | ||
478 | return sp; | ||
479 | } | ||
480 | |||
481 | void pvr2_stream_destroy(struct pvr2_stream *sp) | ||
482 | { | ||
483 | if (!sp) return; | ||
484 | pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp); | ||
485 | pvr2_stream_done(sp); | ||
486 | kfree(sp); | ||
487 | } | ||
488 | |||
489 | void pvr2_stream_setup(struct pvr2_stream *sp, | ||
490 | struct usb_device *dev, | ||
491 | int endpoint, | ||
492 | unsigned int tolerance) | ||
493 | { | ||
494 | mutex_lock(&sp->mutex); do { | ||
495 | pvr2_stream_internal_flush(sp); | ||
496 | sp->dev = dev; | ||
497 | sp->endpoint = endpoint; | ||
498 | sp->fail_tolerance = tolerance; | ||
499 | } while(0); mutex_unlock(&sp->mutex); | ||
500 | } | ||
501 | |||
502 | void pvr2_stream_set_callback(struct pvr2_stream *sp, | ||
503 | pvr2_stream_callback func, | ||
504 | void *data) | ||
505 | { | ||
506 | unsigned long irq_flags; | ||
507 | mutex_lock(&sp->mutex); do { | ||
508 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
509 | sp->callback_data = data; | ||
510 | sp->callback_func = func; | ||
511 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
512 | } while(0); mutex_unlock(&sp->mutex); | ||
513 | } | ||
514 | |||
515 | /* Query / set the nominal buffer count */ | ||
516 | int pvr2_stream_get_buffer_count(struct pvr2_stream *sp) | ||
517 | { | ||
518 | return sp->buffer_target_count; | ||
519 | } | ||
520 | |||
521 | int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt) | ||
522 | { | ||
523 | int ret; | ||
524 | if (sp->buffer_target_count == cnt) return 0; | ||
525 | mutex_lock(&sp->mutex); do { | ||
526 | sp->buffer_target_count = cnt; | ||
527 | ret = pvr2_stream_achieve_buffer_count(sp); | ||
528 | } while(0); mutex_unlock(&sp->mutex); | ||
529 | return ret; | ||
530 | } | ||
531 | |||
532 | struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp) | ||
533 | { | ||
534 | struct list_head *lp = sp->idle_list.next; | ||
535 | if (lp == &sp->idle_list) return 0; | ||
536 | return list_entry(lp,struct pvr2_buffer,list_overhead); | ||
537 | } | ||
538 | |||
539 | struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp) | ||
540 | { | ||
541 | struct list_head *lp = sp->ready_list.next; | ||
542 | if (lp == &sp->ready_list) return 0; | ||
543 | return list_entry(lp,struct pvr2_buffer,list_overhead); | ||
544 | } | ||
545 | |||
546 | struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id) | ||
547 | { | ||
548 | if (id < 0) return 0; | ||
549 | if (id >= sp->buffer_total_count) return 0; | ||
550 | return sp->buffers[id]; | ||
551 | } | ||
552 | |||
553 | int pvr2_stream_get_ready_count(struct pvr2_stream *sp) | ||
554 | { | ||
555 | return sp->r_count; | ||
556 | } | ||
557 | |||
558 | int pvr2_stream_get_idle_count(struct pvr2_stream *sp) | ||
559 | { | ||
560 | return sp->i_count; | ||
561 | } | ||
562 | |||
563 | void pvr2_stream_flush(struct pvr2_stream *sp) | ||
564 | { | ||
565 | mutex_lock(&sp->mutex); do { | ||
566 | pvr2_stream_internal_flush(sp); | ||
567 | } while(0); mutex_unlock(&sp->mutex); | ||
568 | } | ||
569 | |||
570 | void pvr2_stream_kill(struct pvr2_stream *sp) | ||
571 | { | ||
572 | struct pvr2_buffer *bp; | ||
573 | mutex_lock(&sp->mutex); do { | ||
574 | pvr2_stream_internal_flush(sp); | ||
575 | while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) { | ||
576 | pvr2_buffer_set_idle(bp); | ||
577 | } | ||
578 | if (sp->buffer_total_count != sp->buffer_target_count) { | ||
579 | pvr2_stream_achieve_buffer_count(sp); | ||
580 | } | ||
581 | } while(0); mutex_unlock(&sp->mutex); | ||
582 | } | ||
583 | |||
584 | int pvr2_buffer_queue(struct pvr2_buffer *bp) | ||
585 | { | ||
586 | #undef SEED_BUFFER | ||
587 | #ifdef SEED_BUFFER | ||
588 | unsigned int idx; | ||
589 | unsigned int val; | ||
590 | #endif | ||
591 | int ret = 0; | ||
592 | struct pvr2_stream *sp; | ||
593 | if (!bp) return -EINVAL; | ||
594 | sp = bp->stream; | ||
595 | mutex_lock(&sp->mutex); do { | ||
596 | pvr2_buffer_wipe(bp); | ||
597 | if (!sp->dev) { | ||
598 | ret = -EIO; | ||
599 | break; | ||
600 | } | ||
601 | pvr2_buffer_set_queued(bp); | ||
602 | #ifdef SEED_BUFFER | ||
603 | for (idx = 0; idx < (bp->max_count) / 4; idx++) { | ||
604 | val = bp->id << 24; | ||
605 | val |= idx; | ||
606 | ((unsigned int *)(bp->ptr))[idx] = val; | ||
607 | } | ||
608 | #endif | ||
609 | bp->status = -EINPROGRESS; | ||
610 | usb_fill_bulk_urb(bp->purb, // struct urb *urb | ||
611 | sp->dev, // struct usb_device *dev | ||
612 | // endpoint (below) | ||
613 | usb_rcvbulkpipe(sp->dev,sp->endpoint), | ||
614 | bp->ptr, // void *transfer_buffer | ||
615 | bp->max_count, // int buffer_length | ||
616 | buffer_complete, | ||
617 | bp); | ||
618 | usb_submit_urb(bp->purb,GFP_KERNEL); | ||
619 | } while(0); mutex_unlock(&sp->mutex); | ||
620 | return ret; | ||
621 | } | ||
622 | |||
623 | int pvr2_buffer_idle(struct pvr2_buffer *bp) | ||
624 | { | ||
625 | struct pvr2_stream *sp; | ||
626 | if (!bp) return -EINVAL; | ||
627 | sp = bp->stream; | ||
628 | mutex_lock(&sp->mutex); do { | ||
629 | pvr2_buffer_wipe(bp); | ||
630 | pvr2_buffer_set_idle(bp); | ||
631 | if (sp->buffer_total_count != sp->buffer_target_count) { | ||
632 | pvr2_stream_achieve_buffer_count(sp); | ||
633 | } | ||
634 | } while(0); mutex_unlock(&sp->mutex); | ||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) | ||
639 | { | ||
640 | int ret = 0; | ||
641 | unsigned long irq_flags; | ||
642 | struct pvr2_stream *sp; | ||
643 | if (!bp) return -EINVAL; | ||
644 | sp = bp->stream; | ||
645 | mutex_lock(&sp->mutex); do { | ||
646 | spin_lock_irqsave(&sp->list_lock,irq_flags); | ||
647 | if (bp->state != pvr2_buffer_state_idle) { | ||
648 | ret = -EPERM; | ||
649 | } else { | ||
650 | bp->ptr = ptr; | ||
651 | bp->stream->i_bcount -= bp->max_count; | ||
652 | bp->max_count = cnt; | ||
653 | bp->stream->i_bcount += bp->max_count; | ||
654 | pvr2_trace(PVR2_TRACE_BUF_FLOW, | ||
655 | "/*---TRACE_FLOW---*/ bufferPool " | ||
656 | " %8s cap cap=%07d cnt=%02d", | ||
657 | pvr2_buffer_state_decode( | ||
658 | pvr2_buffer_state_idle), | ||
659 | bp->stream->i_bcount,bp->stream->i_count); | ||
660 | } | ||
661 | spin_unlock_irqrestore(&sp->list_lock,irq_flags); | ||
662 | } while(0); mutex_unlock(&sp->mutex); | ||
663 | return ret; | ||
664 | } | ||
665 | |||
666 | unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) | ||
667 | { | ||
668 | return bp->used_count; | ||
669 | } | ||
670 | |||
671 | int pvr2_buffer_get_status(struct pvr2_buffer *bp) | ||
672 | { | ||
673 | return bp->status; | ||
674 | } | ||
675 | |||
676 | enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *bp) | ||
677 | { | ||
678 | return bp->state; | ||
679 | } | ||
680 | |||
681 | int pvr2_buffer_get_id(struct pvr2_buffer *bp) | ||
682 | { | ||
683 | return bp->id; | ||
684 | } | ||
685 | |||
686 | |||
687 | /* | ||
688 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
689 | *** Local Variables: *** | ||
690 | *** mode: c *** | ||
691 | *** fill-column: 75 *** | ||
692 | *** tab-width: 8 *** | ||
693 | *** c-basic-offset: 8 *** | ||
694 | *** End: *** | ||
695 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.h b/drivers/media/video/pvrusb2/pvrusb2-io.h new file mode 100644 index 000000000000..65e11385b2b3 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-io.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_IO_H | ||
22 | #define __PVRUSB2_IO_H | ||
23 | |||
24 | #include <linux/usb.h> | ||
25 | #include <linux/list.h> | ||
26 | |||
27 | typedef void (*pvr2_stream_callback)(void *); | ||
28 | |||
29 | enum pvr2_buffer_state { | ||
30 | pvr2_buffer_state_none = 0, // Not on any list | ||
31 | pvr2_buffer_state_idle = 1, // Buffer is ready to be used again | ||
32 | pvr2_buffer_state_queued = 2, // Buffer has been queued for filling | ||
33 | pvr2_buffer_state_ready = 3, // Buffer has data available | ||
34 | }; | ||
35 | |||
36 | struct pvr2_stream; | ||
37 | struct pvr2_buffer; | ||
38 | |||
39 | const char *pvr2_buffer_state_decode(enum pvr2_buffer_state); | ||
40 | |||
41 | /* Initialize / tear down stream structure */ | ||
42 | struct pvr2_stream *pvr2_stream_create(void); | ||
43 | void pvr2_stream_destroy(struct pvr2_stream *); | ||
44 | void pvr2_stream_setup(struct pvr2_stream *, | ||
45 | struct usb_device *dev,int endpoint, | ||
46 | unsigned int tolerance); | ||
47 | void pvr2_stream_set_callback(struct pvr2_stream *, | ||
48 | pvr2_stream_callback func, | ||
49 | void *data); | ||
50 | |||
51 | /* Query / set the nominal buffer count */ | ||
52 | int pvr2_stream_get_buffer_count(struct pvr2_stream *); | ||
53 | int pvr2_stream_set_buffer_count(struct pvr2_stream *,unsigned int); | ||
54 | |||
55 | /* Get a pointer to a buffer that is either idle, ready, or is specified | ||
56 | named. */ | ||
57 | struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *); | ||
58 | struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *); | ||
59 | struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id); | ||
60 | |||
61 | /* Find out how many buffers are idle or ready */ | ||
62 | int pvr2_stream_get_idle_count(struct pvr2_stream *); | ||
63 | int pvr2_stream_get_ready_count(struct pvr2_stream *); | ||
64 | |||
65 | /* Kill all pending operations */ | ||
66 | void pvr2_stream_flush(struct pvr2_stream *); | ||
67 | |||
68 | /* Kill all pending buffers and throw away any ready buffers as well */ | ||
69 | void pvr2_stream_kill(struct pvr2_stream *); | ||
70 | |||
71 | /* Set up the actual storage for a buffer */ | ||
72 | int pvr2_buffer_set_buffer(struct pvr2_buffer *,void *ptr,unsigned int cnt); | ||
73 | |||
74 | /* Find out size of data in the given ready buffer */ | ||
75 | unsigned int pvr2_buffer_get_count(struct pvr2_buffer *); | ||
76 | |||
77 | /* Retrieve completion code for given ready buffer */ | ||
78 | int pvr2_buffer_get_status(struct pvr2_buffer *); | ||
79 | |||
80 | /* Retrieve state of given buffer */ | ||
81 | enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *); | ||
82 | |||
83 | /* Retrieve ID of given buffer */ | ||
84 | int pvr2_buffer_get_id(struct pvr2_buffer *); | ||
85 | |||
86 | /* Start reading into given buffer (kill it if needed) */ | ||
87 | int pvr2_buffer_queue(struct pvr2_buffer *); | ||
88 | |||
89 | /* Move buffer back to idle pool (kill it if needed) */ | ||
90 | int pvr2_buffer_idle(struct pvr2_buffer *); | ||
91 | |||
92 | #endif /* __PVRUSB2_IO_H */ | ||
93 | |||
94 | /* | ||
95 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
96 | *** Local Variables: *** | ||
97 | *** mode: c *** | ||
98 | *** fill-column: 75 *** | ||
99 | *** tab-width: 8 *** | ||
100 | *** c-basic-offset: 8 *** | ||
101 | *** End: *** | ||
102 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c new file mode 100644 index 000000000000..49da062e3271 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-ioread.h" | ||
23 | #include "pvrusb2-debug.h" | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | |||
30 | #define BUFFER_COUNT 32 | ||
31 | #define BUFFER_SIZE PAGE_ALIGN(0x4000) | ||
32 | |||
33 | struct pvr2_ioread { | ||
34 | struct pvr2_stream *stream; | ||
35 | char *buffer_storage[BUFFER_COUNT]; | ||
36 | char *sync_key_ptr; | ||
37 | unsigned int sync_key_len; | ||
38 | unsigned int sync_buf_offs; | ||
39 | unsigned int sync_state; | ||
40 | unsigned int sync_trashed_count; | ||
41 | int enabled; // Streaming is on | ||
42 | int spigot_open; // OK to pass data to client | ||
43 | int stream_running; // Passing data to client now | ||
44 | |||
45 | /* State relevant to current buffer being read */ | ||
46 | struct pvr2_buffer *c_buf; | ||
47 | char *c_data_ptr; | ||
48 | unsigned int c_data_len; | ||
49 | unsigned int c_data_offs; | ||
50 | struct mutex mutex; | ||
51 | }; | ||
52 | |||
53 | static int pvr2_ioread_init(struct pvr2_ioread *cp) | ||
54 | { | ||
55 | unsigned int idx; | ||
56 | |||
57 | cp->stream = 0; | ||
58 | mutex_init(&cp->mutex); | ||
59 | |||
60 | for (idx = 0; idx < BUFFER_COUNT; idx++) { | ||
61 | cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL); | ||
62 | if (!(cp->buffer_storage[idx])) break; | ||
63 | } | ||
64 | |||
65 | if (idx < BUFFER_COUNT) { | ||
66 | // An allocation appears to have failed | ||
67 | for (idx = 0; idx < BUFFER_COUNT; idx++) { | ||
68 | if (!(cp->buffer_storage[idx])) continue; | ||
69 | kfree(cp->buffer_storage[idx]); | ||
70 | } | ||
71 | return -ENOMEM; | ||
72 | } | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static void pvr2_ioread_done(struct pvr2_ioread *cp) | ||
77 | { | ||
78 | unsigned int idx; | ||
79 | |||
80 | pvr2_ioread_setup(cp,0); | ||
81 | for (idx = 0; idx < BUFFER_COUNT; idx++) { | ||
82 | if (!(cp->buffer_storage[idx])) continue; | ||
83 | kfree(cp->buffer_storage[idx]); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | struct pvr2_ioread *pvr2_ioread_create(void) | ||
88 | { | ||
89 | struct pvr2_ioread *cp; | ||
90 | cp = kmalloc(sizeof(*cp),GFP_KERNEL); | ||
91 | if (!cp) return 0; | ||
92 | pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp); | ||
93 | memset(cp,0,sizeof(*cp)); | ||
94 | if (pvr2_ioread_init(cp) < 0) { | ||
95 | kfree(cp); | ||
96 | return 0; | ||
97 | } | ||
98 | return cp; | ||
99 | } | ||
100 | |||
101 | void pvr2_ioread_destroy(struct pvr2_ioread *cp) | ||
102 | { | ||
103 | if (!cp) return; | ||
104 | pvr2_ioread_done(cp); | ||
105 | pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp); | ||
106 | if (cp->sync_key_ptr) { | ||
107 | kfree(cp->sync_key_ptr); | ||
108 | cp->sync_key_ptr = 0; | ||
109 | } | ||
110 | kfree(cp); | ||
111 | } | ||
112 | |||
113 | void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp, | ||
114 | const char *sync_key_ptr, | ||
115 | unsigned int sync_key_len) | ||
116 | { | ||
117 | if (!cp) return; | ||
118 | |||
119 | if (!sync_key_ptr) sync_key_len = 0; | ||
120 | if ((sync_key_len == cp->sync_key_len) && | ||
121 | ((!sync_key_len) || | ||
122 | (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return; | ||
123 | |||
124 | if (sync_key_len != cp->sync_key_len) { | ||
125 | if (cp->sync_key_ptr) { | ||
126 | kfree(cp->sync_key_ptr); | ||
127 | cp->sync_key_ptr = 0; | ||
128 | } | ||
129 | cp->sync_key_len = 0; | ||
130 | if (sync_key_len) { | ||
131 | cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL); | ||
132 | if (cp->sync_key_ptr) { | ||
133 | cp->sync_key_len = sync_key_len; | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | if (!cp->sync_key_len) return; | ||
138 | memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len); | ||
139 | } | ||
140 | |||
141 | static void pvr2_ioread_stop(struct pvr2_ioread *cp) | ||
142 | { | ||
143 | if (!(cp->enabled)) return; | ||
144 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
145 | "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp); | ||
146 | pvr2_stream_kill(cp->stream); | ||
147 | cp->c_buf = 0; | ||
148 | cp->c_data_ptr = 0; | ||
149 | cp->c_data_len = 0; | ||
150 | cp->c_data_offs = 0; | ||
151 | cp->enabled = 0; | ||
152 | cp->stream_running = 0; | ||
153 | cp->spigot_open = 0; | ||
154 | if (cp->sync_state) { | ||
155 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
156 | "/*---TRACE_READ---*/ sync_state <== 0"); | ||
157 | cp->sync_state = 0; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static int pvr2_ioread_start(struct pvr2_ioread *cp) | ||
162 | { | ||
163 | int stat; | ||
164 | struct pvr2_buffer *bp; | ||
165 | if (cp->enabled) return 0; | ||
166 | if (!(cp->stream)) return 0; | ||
167 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
168 | "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp); | ||
169 | while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != 0) { | ||
170 | stat = pvr2_buffer_queue(bp); | ||
171 | if (stat < 0) { | ||
172 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
173 | "/*---TRACE_READ---*/" | ||
174 | " pvr2_ioread_start id=%p" | ||
175 | " error=%d", | ||
176 | cp,stat); | ||
177 | pvr2_ioread_stop(cp); | ||
178 | return stat; | ||
179 | } | ||
180 | } | ||
181 | cp->enabled = !0; | ||
182 | cp->c_buf = 0; | ||
183 | cp->c_data_ptr = 0; | ||
184 | cp->c_data_len = 0; | ||
185 | cp->c_data_offs = 0; | ||
186 | cp->stream_running = 0; | ||
187 | if (cp->sync_key_len) { | ||
188 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
189 | "/*---TRACE_READ---*/ sync_state <== 1"); | ||
190 | cp->sync_state = 1; | ||
191 | cp->sync_trashed_count = 0; | ||
192 | cp->sync_buf_offs = 0; | ||
193 | } | ||
194 | cp->spigot_open = 0; | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp) | ||
199 | { | ||
200 | return cp->stream; | ||
201 | } | ||
202 | |||
203 | int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp) | ||
204 | { | ||
205 | int ret; | ||
206 | unsigned int idx; | ||
207 | struct pvr2_buffer *bp; | ||
208 | |||
209 | mutex_lock(&cp->mutex); do { | ||
210 | if (cp->stream) { | ||
211 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
212 | "/*---TRACE_READ---*/" | ||
213 | " pvr2_ioread_setup (tear-down) id=%p",cp); | ||
214 | pvr2_ioread_stop(cp); | ||
215 | pvr2_stream_kill(cp->stream); | ||
216 | pvr2_stream_set_buffer_count(cp->stream,0); | ||
217 | cp->stream = 0; | ||
218 | } | ||
219 | if (sp) { | ||
220 | pvr2_trace(PVR2_TRACE_START_STOP, | ||
221 | "/*---TRACE_READ---*/" | ||
222 | " pvr2_ioread_setup (setup) id=%p",cp); | ||
223 | pvr2_stream_kill(sp); | ||
224 | ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT); | ||
225 | if (ret < 0) return ret; | ||
226 | for (idx = 0; idx < BUFFER_COUNT; idx++) { | ||
227 | bp = pvr2_stream_get_buffer(sp,idx); | ||
228 | pvr2_buffer_set_buffer(bp, | ||
229 | cp->buffer_storage[idx], | ||
230 | BUFFER_SIZE); | ||
231 | } | ||
232 | cp->stream = sp; | ||
233 | } | ||
234 | } while (0); mutex_unlock(&cp->mutex); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl) | ||
240 | { | ||
241 | int ret = 0; | ||
242 | if ((!fl) == (!(cp->enabled))) return ret; | ||
243 | |||
244 | mutex_lock(&cp->mutex); do { | ||
245 | if (fl) { | ||
246 | ret = pvr2_ioread_start(cp); | ||
247 | } else { | ||
248 | pvr2_ioread_stop(cp); | ||
249 | } | ||
250 | } while (0); mutex_unlock(&cp->mutex); | ||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | int pvr2_ioread_get_enabled(struct pvr2_ioread *cp) | ||
255 | { | ||
256 | return cp->enabled != 0; | ||
257 | } | ||
258 | |||
259 | int pvr2_ioread_get_buffer(struct pvr2_ioread *cp) | ||
260 | { | ||
261 | int stat; | ||
262 | |||
263 | while (cp->c_data_len <= cp->c_data_offs) { | ||
264 | if (cp->c_buf) { | ||
265 | // Flush out current buffer first. | ||
266 | stat = pvr2_buffer_queue(cp->c_buf); | ||
267 | if (stat < 0) { | ||
268 | // Streaming error... | ||
269 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
270 | "/*---TRACE_READ---*/" | ||
271 | " pvr2_ioread_read id=%p" | ||
272 | " queue_error=%d", | ||
273 | cp,stat); | ||
274 | pvr2_ioread_stop(cp); | ||
275 | return 0; | ||
276 | } | ||
277 | cp->c_buf = 0; | ||
278 | cp->c_data_ptr = 0; | ||
279 | cp->c_data_len = 0; | ||
280 | cp->c_data_offs = 0; | ||
281 | } | ||
282 | // Now get a freshly filled buffer. | ||
283 | cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream); | ||
284 | if (!cp->c_buf) break; // Nothing ready; done. | ||
285 | cp->c_data_len = pvr2_buffer_get_count(cp->c_buf); | ||
286 | if (!cp->c_data_len) { | ||
287 | // Nothing transferred. Was there an error? | ||
288 | stat = pvr2_buffer_get_status(cp->c_buf); | ||
289 | if (stat < 0) { | ||
290 | // Streaming error... | ||
291 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
292 | "/*---TRACE_READ---*/" | ||
293 | " pvr2_ioread_read id=%p" | ||
294 | " buffer_error=%d", | ||
295 | cp,stat); | ||
296 | pvr2_ioread_stop(cp); | ||
297 | // Give up. | ||
298 | return 0; | ||
299 | } | ||
300 | // Start over... | ||
301 | continue; | ||
302 | } | ||
303 | cp->c_data_offs = 0; | ||
304 | cp->c_data_ptr = cp->buffer_storage[ | ||
305 | pvr2_buffer_get_id(cp->c_buf)]; | ||
306 | } | ||
307 | return !0; | ||
308 | } | ||
309 | |||
310 | void pvr2_ioread_filter(struct pvr2_ioread *cp) | ||
311 | { | ||
312 | unsigned int idx; | ||
313 | if (!cp->enabled) return; | ||
314 | if (cp->sync_state != 1) return; | ||
315 | |||
316 | // Search the stream for our synchronization key. This is made | ||
317 | // complicated by the fact that in order to be honest with | ||
318 | // ourselves here we must search across buffer boundaries... | ||
319 | mutex_lock(&cp->mutex); while (1) { | ||
320 | // Ensure we have a buffer | ||
321 | if (!pvr2_ioread_get_buffer(cp)) break; | ||
322 | if (!cp->c_data_len) break; | ||
323 | |||
324 | // Now walk the buffer contents until we match the key or | ||
325 | // run out of buffer data. | ||
326 | for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) { | ||
327 | if (cp->sync_buf_offs >= cp->sync_key_len) break; | ||
328 | if (cp->c_data_ptr[idx] == | ||
329 | cp->sync_key_ptr[cp->sync_buf_offs]) { | ||
330 | // Found the next key byte | ||
331 | (cp->sync_buf_offs)++; | ||
332 | } else { | ||
333 | // Whoops, mismatched. Start key over... | ||
334 | cp->sync_buf_offs = 0; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | // Consume what we've walked through | ||
339 | cp->c_data_offs += idx; | ||
340 | cp->sync_trashed_count += idx; | ||
341 | |||
342 | // If we've found the key, then update state and get out. | ||
343 | if (cp->sync_buf_offs >= cp->sync_key_len) { | ||
344 | cp->sync_trashed_count -= cp->sync_key_len; | ||
345 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
346 | "/*---TRACE_READ---*/" | ||
347 | " sync_state <== 2 (skipped %u bytes)", | ||
348 | cp->sync_trashed_count); | ||
349 | cp->sync_state = 2; | ||
350 | cp->sync_buf_offs = 0; | ||
351 | break; | ||
352 | } | ||
353 | |||
354 | if (cp->c_data_offs < cp->c_data_len) { | ||
355 | // Sanity check - should NEVER get here | ||
356 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
357 | "ERROR: pvr2_ioread filter sync problem" | ||
358 | " len=%u offs=%u", | ||
359 | cp->c_data_len,cp->c_data_offs); | ||
360 | // Get out so we don't get stuck in an infinite | ||
361 | // loop. | ||
362 | break; | ||
363 | } | ||
364 | |||
365 | continue; // (for clarity) | ||
366 | } mutex_unlock(&cp->mutex); | ||
367 | } | ||
368 | |||
369 | int pvr2_ioread_avail(struct pvr2_ioread *cp) | ||
370 | { | ||
371 | int ret; | ||
372 | if (!(cp->enabled)) { | ||
373 | // Stream is not enabled; so this is an I/O error | ||
374 | return -EIO; | ||
375 | } | ||
376 | |||
377 | if (cp->sync_state == 1) { | ||
378 | pvr2_ioread_filter(cp); | ||
379 | if (cp->sync_state == 1) return -EAGAIN; | ||
380 | } | ||
381 | |||
382 | ret = 0; | ||
383 | if (cp->stream_running) { | ||
384 | if (!pvr2_stream_get_ready_count(cp->stream)) { | ||
385 | // No data available at all right now. | ||
386 | ret = -EAGAIN; | ||
387 | } | ||
388 | } else { | ||
389 | if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) { | ||
390 | // Haven't buffered up enough yet; try again later | ||
391 | ret = -EAGAIN; | ||
392 | } | ||
393 | } | ||
394 | |||
395 | if ((!(cp->spigot_open)) != (!(ret == 0))) { | ||
396 | cp->spigot_open = (ret == 0); | ||
397 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
398 | "/*---TRACE_READ---*/ data is %s", | ||
399 | cp->spigot_open ? "available" : "pending"); | ||
400 | } | ||
401 | |||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt) | ||
406 | { | ||
407 | unsigned int copied_cnt; | ||
408 | unsigned int bcnt; | ||
409 | const char *src; | ||
410 | int stat; | ||
411 | int ret = 0; | ||
412 | unsigned int req_cnt = cnt; | ||
413 | |||
414 | if (!cnt) { | ||
415 | pvr2_trace(PVR2_TRACE_TRAP, | ||
416 | "/*---TRACE_READ---*/ pvr2_ioread_read id=%p" | ||
417 | " ZERO Request? Returning zero.",cp); | ||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | stat = pvr2_ioread_avail(cp); | ||
422 | if (stat < 0) return stat; | ||
423 | |||
424 | cp->stream_running = !0; | ||
425 | |||
426 | mutex_lock(&cp->mutex); do { | ||
427 | |||
428 | // Suck data out of the buffers and copy to the user | ||
429 | copied_cnt = 0; | ||
430 | if (!buf) cnt = 0; | ||
431 | while (1) { | ||
432 | if (!pvr2_ioread_get_buffer(cp)) { | ||
433 | ret = -EIO; | ||
434 | break; | ||
435 | } | ||
436 | |||
437 | if (!cnt) break; | ||
438 | |||
439 | if (cp->sync_state == 2) { | ||
440 | // We're repeating the sync key data into | ||
441 | // the stream. | ||
442 | src = cp->sync_key_ptr + cp->sync_buf_offs; | ||
443 | bcnt = cp->sync_key_len - cp->sync_buf_offs; | ||
444 | } else { | ||
445 | // Normal buffer copy | ||
446 | src = cp->c_data_ptr + cp->c_data_offs; | ||
447 | bcnt = cp->c_data_len - cp->c_data_offs; | ||
448 | } | ||
449 | |||
450 | if (!bcnt) break; | ||
451 | |||
452 | // Don't run past user's buffer | ||
453 | if (bcnt > cnt) bcnt = cnt; | ||
454 | |||
455 | if (copy_to_user(buf,src,bcnt)) { | ||
456 | // User supplied a bad pointer? | ||
457 | // Give up - this *will* cause data | ||
458 | // to be lost. | ||
459 | ret = -EFAULT; | ||
460 | break; | ||
461 | } | ||
462 | cnt -= bcnt; | ||
463 | buf += bcnt; | ||
464 | copied_cnt += bcnt; | ||
465 | |||
466 | if (cp->sync_state == 2) { | ||
467 | // Update offset inside sync key that we're | ||
468 | // repeating back out. | ||
469 | cp->sync_buf_offs += bcnt; | ||
470 | if (cp->sync_buf_offs >= cp->sync_key_len) { | ||
471 | // Consumed entire key; switch mode | ||
472 | // to normal. | ||
473 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
474 | "/*---TRACE_READ---*/" | ||
475 | " sync_state <== 0"); | ||
476 | cp->sync_state = 0; | ||
477 | } | ||
478 | } else { | ||
479 | // Update buffer offset. | ||
480 | cp->c_data_offs += bcnt; | ||
481 | } | ||
482 | } | ||
483 | |||
484 | } while (0); mutex_unlock(&cp->mutex); | ||
485 | |||
486 | if (!ret) { | ||
487 | if (copied_cnt) { | ||
488 | // If anything was copied, return that count | ||
489 | ret = copied_cnt; | ||
490 | } else { | ||
491 | // Nothing copied; suggest to caller that another | ||
492 | // attempt should be tried again later | ||
493 | ret = -EAGAIN; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | pvr2_trace(PVR2_TRACE_DATA_FLOW, | ||
498 | "/*---TRACE_READ---*/ pvr2_ioread_read" | ||
499 | " id=%p request=%d result=%d", | ||
500 | cp,req_cnt,ret); | ||
501 | return ret; | ||
502 | } | ||
503 | |||
504 | |||
505 | /* | ||
506 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
507 | *** Local Variables: *** | ||
508 | *** mode: c *** | ||
509 | *** fill-column: 75 *** | ||
510 | *** tab-width: 8 *** | ||
511 | *** c-basic-offset: 8 *** | ||
512 | *** End: *** | ||
513 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.h b/drivers/media/video/pvrusb2/pvrusb2-ioread.h new file mode 100644 index 000000000000..6b002597f5de --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_IOREAD_H | ||
22 | #define __PVRUSB2_IOREAD_H | ||
23 | |||
24 | #include "pvrusb2-io.h" | ||
25 | |||
26 | struct pvr2_ioread; | ||
27 | |||
28 | struct pvr2_ioread *pvr2_ioread_create(void); | ||
29 | void pvr2_ioread_destroy(struct pvr2_ioread *); | ||
30 | int pvr2_ioread_setup(struct pvr2_ioread *,struct pvr2_stream *); | ||
31 | struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *); | ||
32 | void pvr2_ioread_set_sync_key(struct pvr2_ioread *, | ||
33 | const char *sync_key_ptr, | ||
34 | unsigned int sync_key_len); | ||
35 | int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl); | ||
36 | int pvr2_ioread_get_enabled(struct pvr2_ioread *); | ||
37 | int pvr2_ioread_read(struct pvr2_ioread *,void __user *buf,unsigned int cnt); | ||
38 | int pvr2_ioread_avail(struct pvr2_ioread *); | ||
39 | |||
40 | #endif /* __PVRUSB2_IOREAD_H */ | ||
41 | |||
42 | /* | ||
43 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
44 | *** Local Variables: *** | ||
45 | *** mode: c *** | ||
46 | *** fill-column: 75 *** | ||
47 | *** tab-width: 8 *** | ||
48 | *** c-basic-offset: 8 *** | ||
49 | *** End: *** | ||
50 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c new file mode 100644 index 000000000000..b95248274ed0 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-main.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/moduleparam.h> | ||
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/usb.h> | ||
31 | #include <linux/videodev2.h> | ||
32 | |||
33 | #include "pvrusb2-hdw.h" | ||
34 | #include "pvrusb2-context.h" | ||
35 | #include "pvrusb2-debug.h" | ||
36 | #include "pvrusb2-v4l2.h" | ||
37 | #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS | ||
38 | #include "pvrusb2-sysfs.h" | ||
39 | #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ | ||
40 | |||
41 | #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" | ||
42 | #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" | ||
43 | #define DRIVER_VERSION "V4L in-tree version" | ||
44 | |||
45 | #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ | ||
46 | PVR2_TRACE_INFO| \ | ||
47 | PVR2_TRACE_TOLERANCE| \ | ||
48 | PVR2_TRACE_TRAP| \ | ||
49 | 0) | ||
50 | |||
51 | int pvrusb2_debug = DEFAULT_DEBUG_MASK; | ||
52 | |||
53 | module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); | ||
54 | MODULE_PARM_DESC(debug, "Debug trace mask"); | ||
55 | |||
56 | #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS | ||
57 | static struct pvr2_sysfs_class *class_ptr = 0; | ||
58 | #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ | ||
59 | |||
60 | static void pvr_setup_attach(struct pvr2_context *pvr) | ||
61 | { | ||
62 | /* Create association with v4l layer */ | ||
63 | pvr2_v4l2_create(pvr); | ||
64 | #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS | ||
65 | pvr2_sysfs_create(pvr,class_ptr); | ||
66 | #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ | ||
67 | } | ||
68 | |||
69 | static int pvr_probe(struct usb_interface *intf, | ||
70 | const struct usb_device_id *devid) | ||
71 | { | ||
72 | struct pvr2_context *pvr; | ||
73 | |||
74 | /* Create underlying hardware interface */ | ||
75 | pvr = pvr2_context_create(intf,devid,pvr_setup_attach); | ||
76 | if (!pvr) { | ||
77 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
78 | "Failed to create hdw handler"); | ||
79 | return -ENOMEM; | ||
80 | } | ||
81 | |||
82 | pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); | ||
83 | |||
84 | usb_set_intfdata(intf, pvr); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * pvr_disconnect() | ||
91 | * | ||
92 | */ | ||
93 | static void pvr_disconnect(struct usb_interface *intf) | ||
94 | { | ||
95 | struct pvr2_context *pvr = usb_get_intfdata(intf); | ||
96 | |||
97 | pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); | ||
98 | |||
99 | usb_set_intfdata (intf, NULL); | ||
100 | pvr2_context_disconnect(pvr); | ||
101 | |||
102 | pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); | ||
103 | |||
104 | } | ||
105 | |||
106 | static struct usb_driver pvr_driver = { | ||
107 | name: "pvrusb2", | ||
108 | id_table: pvr2_device_table, | ||
109 | probe: pvr_probe, | ||
110 | disconnect: pvr_disconnect | ||
111 | }; | ||
112 | |||
113 | /* | ||
114 | * pvr_init() / pvr_exit() | ||
115 | * | ||
116 | * This code is run to initialize/exit the driver. | ||
117 | * | ||
118 | */ | ||
119 | static int __init pvr_init(void) | ||
120 | { | ||
121 | int ret; | ||
122 | |||
123 | pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); | ||
124 | |||
125 | #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS | ||
126 | class_ptr = pvr2_sysfs_class_create(); | ||
127 | #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ | ||
128 | |||
129 | ret = usb_register(&pvr_driver); | ||
130 | |||
131 | if (ret == 0) | ||
132 | info(DRIVER_DESC " : " DRIVER_VERSION); | ||
133 | if (pvrusb2_debug) info("Debug mask is %d (0x%x)", | ||
134 | pvrusb2_debug,pvrusb2_debug); | ||
135 | |||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static void __exit pvr_exit(void) | ||
140 | { | ||
141 | |||
142 | pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); | ||
143 | |||
144 | #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS | ||
145 | pvr2_sysfs_class_destroy(class_ptr); | ||
146 | #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ | ||
147 | |||
148 | usb_deregister(&pvr_driver); | ||
149 | } | ||
150 | |||
151 | module_init(pvr_init); | ||
152 | module_exit(pvr_exit); | ||
153 | |||
154 | /* Mike Isely <mcisely@pobox.com> 11-Mar-2006: See pvrusb2-hdw.c for | ||
155 | MODULE_DEVICE_TABLE(). We have to declare that attribute there | ||
156 | because that's where the device table actually is now and it seems | ||
157 | that certain gcc configurations get angry if MODULE_DEVICE_TABLE() | ||
158 | is used on what ends up being an external symbol. */ | ||
159 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
160 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
161 | MODULE_LICENSE("GPL"); | ||
162 | |||
163 | |||
164 | /* | ||
165 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
166 | *** Local Variables: *** | ||
167 | *** mode: c *** | ||
168 | *** fill-column: 70 *** | ||
169 | *** tab-width: 8 *** | ||
170 | *** c-basic-offset: 8 *** | ||
171 | *** End: *** | ||
172 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.c b/drivers/media/video/pvrusb2/pvrusb2-std.c new file mode 100644 index 000000000000..134063693643 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-std.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "pvrusb2-std.h" | ||
23 | #include "pvrusb2-debug.h" | ||
24 | #include <asm/string.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | struct std_name { | ||
28 | const char *name; | ||
29 | v4l2_std_id id; | ||
30 | }; | ||
31 | |||
32 | |||
33 | #define CSTD_PAL \ | ||
34 | (V4L2_STD_PAL_B| \ | ||
35 | V4L2_STD_PAL_B1| \ | ||
36 | V4L2_STD_PAL_G| \ | ||
37 | V4L2_STD_PAL_H| \ | ||
38 | V4L2_STD_PAL_I| \ | ||
39 | V4L2_STD_PAL_D| \ | ||
40 | V4L2_STD_PAL_D1| \ | ||
41 | V4L2_STD_PAL_K| \ | ||
42 | V4L2_STD_PAL_M| \ | ||
43 | V4L2_STD_PAL_N| \ | ||
44 | V4L2_STD_PAL_Nc| \ | ||
45 | V4L2_STD_PAL_60) | ||
46 | |||
47 | #define CSTD_NTSC \ | ||
48 | (V4L2_STD_NTSC_M| \ | ||
49 | V4L2_STD_NTSC_M_JP| \ | ||
50 | V4L2_STD_NTSC_M_KR| \ | ||
51 | V4L2_STD_NTSC_443) | ||
52 | |||
53 | #define CSTD_SECAM \ | ||
54 | (V4L2_STD_SECAM_B| \ | ||
55 | V4L2_STD_SECAM_D| \ | ||
56 | V4L2_STD_SECAM_G| \ | ||
57 | V4L2_STD_SECAM_H| \ | ||
58 | V4L2_STD_SECAM_K| \ | ||
59 | V4L2_STD_SECAM_K1| \ | ||
60 | V4L2_STD_SECAM_L| \ | ||
61 | V4L2_STD_SECAM_LC) | ||
62 | |||
63 | #define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B) | ||
64 | #define TSTD_B1 (V4L2_STD_PAL_B1) | ||
65 | #define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D) | ||
66 | #define TSTD_D1 (V4L2_STD_PAL_D1) | ||
67 | #define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G) | ||
68 | #define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H) | ||
69 | #define TSTD_I (V4L2_STD_PAL_I) | ||
70 | #define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K) | ||
71 | #define TSTD_K1 (V4L2_STD_SECAM_K1) | ||
72 | #define TSTD_L (V4L2_STD_SECAM_L) | ||
73 | #define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M) | ||
74 | #define TSTD_N (V4L2_STD_PAL_N) | ||
75 | #define TSTD_Nc (V4L2_STD_PAL_Nc) | ||
76 | #define TSTD_60 (V4L2_STD_PAL_60) | ||
77 | |||
78 | #define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_SECAM) | ||
79 | |||
80 | /* Mapping of standard bits to color system */ | ||
81 | const static struct std_name std_groups[] = { | ||
82 | {"PAL",CSTD_PAL}, | ||
83 | {"NTSC",CSTD_NTSC}, | ||
84 | {"SECAM",CSTD_SECAM}, | ||
85 | }; | ||
86 | |||
87 | /* Mapping of standard bits to modulation system */ | ||
88 | const static struct std_name std_items[] = { | ||
89 | {"B",TSTD_B}, | ||
90 | {"B1",TSTD_B1}, | ||
91 | {"D",TSTD_D}, | ||
92 | {"D1",TSTD_D1}, | ||
93 | {"G",TSTD_G}, | ||
94 | {"H",TSTD_H}, | ||
95 | {"I",TSTD_I}, | ||
96 | {"K",TSTD_K}, | ||
97 | {"K1",TSTD_K1}, | ||
98 | {"L",TSTD_L}, | ||
99 | {"LC",V4L2_STD_SECAM_LC}, | ||
100 | {"M",TSTD_M}, | ||
101 | {"Mj",V4L2_STD_NTSC_M_JP}, | ||
102 | {"443",V4L2_STD_NTSC_443}, | ||
103 | {"Mk",V4L2_STD_NTSC_M_KR}, | ||
104 | {"N",TSTD_N}, | ||
105 | {"Nc",TSTD_Nc}, | ||
106 | {"60",TSTD_60}, | ||
107 | }; | ||
108 | |||
109 | |||
110 | // Search an array of std_name structures and return a pointer to the | ||
111 | // element with the matching name. | ||
112 | static const struct std_name *find_std_name(const struct std_name *arrPtr, | ||
113 | unsigned int arrSize, | ||
114 | const char *bufPtr, | ||
115 | unsigned int bufSize) | ||
116 | { | ||
117 | unsigned int idx; | ||
118 | const struct std_name *p; | ||
119 | for (idx = 0; idx < arrSize; idx++) { | ||
120 | p = arrPtr + idx; | ||
121 | if (strlen(p->name) != bufSize) continue; | ||
122 | if (!memcmp(bufPtr,p->name,bufSize)) return p; | ||
123 | } | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | |||
128 | int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, | ||
129 | unsigned int bufSize) | ||
130 | { | ||
131 | v4l2_std_id id = 0; | ||
132 | v4l2_std_id cmsk = 0; | ||
133 | v4l2_std_id t; | ||
134 | int mMode = 0; | ||
135 | unsigned int cnt; | ||
136 | char ch; | ||
137 | const struct std_name *sp; | ||
138 | |||
139 | while (bufSize) { | ||
140 | if (!mMode) { | ||
141 | cnt = 0; | ||
142 | while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++; | ||
143 | if (cnt >= bufSize) return 0; // No more characters | ||
144 | sp = find_std_name( | ||
145 | std_groups, | ||
146 | sizeof(std_groups)/sizeof(std_groups[0]), | ||
147 | bufPtr,cnt); | ||
148 | if (!sp) return 0; // Illegal color system name | ||
149 | cnt++; | ||
150 | bufPtr += cnt; | ||
151 | bufSize -= cnt; | ||
152 | mMode = !0; | ||
153 | cmsk = sp->id; | ||
154 | continue; | ||
155 | } | ||
156 | cnt = 0; | ||
157 | while (cnt < bufSize) { | ||
158 | ch = bufPtr[cnt]; | ||
159 | if (ch == ';') { | ||
160 | mMode = 0; | ||
161 | break; | ||
162 | } | ||
163 | if (ch == '/') break; | ||
164 | cnt++; | ||
165 | } | ||
166 | sp = find_std_name(std_items, | ||
167 | sizeof(std_items)/sizeof(std_items[0]), | ||
168 | bufPtr,cnt); | ||
169 | if (!sp) return 0; // Illegal modulation system ID | ||
170 | t = sp->id & cmsk; | ||
171 | if (!t) return 0; // Specific color + modulation system illegal | ||
172 | id |= t; | ||
173 | if (cnt < bufSize) cnt++; | ||
174 | bufPtr += cnt; | ||
175 | bufSize -= cnt; | ||
176 | } | ||
177 | |||
178 | if (idPtr) *idPtr = id; | ||
179 | return !0; | ||
180 | } | ||
181 | |||
182 | |||
183 | unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, | ||
184 | v4l2_std_id id) | ||
185 | { | ||
186 | unsigned int idx1,idx2; | ||
187 | const struct std_name *ip,*gp; | ||
188 | int gfl,cfl; | ||
189 | unsigned int c1,c2; | ||
190 | cfl = 0; | ||
191 | c1 = 0; | ||
192 | for (idx1 = 0; | ||
193 | idx1 < sizeof(std_groups)/sizeof(std_groups[0]); | ||
194 | idx1++) { | ||
195 | gp = std_groups + idx1; | ||
196 | gfl = 0; | ||
197 | for (idx2 = 0; | ||
198 | idx2 < sizeof(std_items)/sizeof(std_items[0]); | ||
199 | idx2++) { | ||
200 | ip = std_items + idx2; | ||
201 | if (!(gp->id & ip->id & id)) continue; | ||
202 | if (!gfl) { | ||
203 | if (cfl) { | ||
204 | c2 = scnprintf(bufPtr,bufSize,";"); | ||
205 | c1 += c2; | ||
206 | bufSize -= c2; | ||
207 | bufPtr += c2; | ||
208 | } | ||
209 | cfl = !0; | ||
210 | c2 = scnprintf(bufPtr,bufSize, | ||
211 | "%s-",gp->name); | ||
212 | gfl = !0; | ||
213 | } else { | ||
214 | c2 = scnprintf(bufPtr,bufSize,"/"); | ||
215 | } | ||
216 | c1 += c2; | ||
217 | bufSize -= c2; | ||
218 | bufPtr += c2; | ||
219 | c2 = scnprintf(bufPtr,bufSize, | ||
220 | ip->name); | ||
221 | c1 += c2; | ||
222 | bufSize -= c2; | ||
223 | bufPtr += c2; | ||
224 | } | ||
225 | } | ||
226 | return c1; | ||
227 | } | ||
228 | |||
229 | |||
230 | // Template data for possible enumerated video standards. Here we group | ||
231 | // standards which share common frame rates and resolution. | ||
232 | static struct v4l2_standard generic_standards[] = { | ||
233 | { | ||
234 | .id = (TSTD_B|TSTD_B1| | ||
235 | TSTD_D|TSTD_D1| | ||
236 | TSTD_G| | ||
237 | TSTD_H| | ||
238 | TSTD_I| | ||
239 | TSTD_K|TSTD_K1| | ||
240 | TSTD_L| | ||
241 | V4L2_STD_SECAM_LC | | ||
242 | TSTD_N|TSTD_Nc), | ||
243 | .frameperiod = | ||
244 | { | ||
245 | .numerator = 1, | ||
246 | .denominator= 25 | ||
247 | }, | ||
248 | .framelines = 625, | ||
249 | .reserved = {0,0,0,0} | ||
250 | }, { | ||
251 | .id = (TSTD_M| | ||
252 | V4L2_STD_NTSC_M_JP| | ||
253 | V4L2_STD_NTSC_M_KR), | ||
254 | .frameperiod = | ||
255 | { | ||
256 | .numerator = 1001, | ||
257 | .denominator= 30000 | ||
258 | }, | ||
259 | .framelines = 525, | ||
260 | .reserved = {0,0,0,0} | ||
261 | }, { // This is a total wild guess | ||
262 | .id = (TSTD_60), | ||
263 | .frameperiod = | ||
264 | { | ||
265 | .numerator = 1001, | ||
266 | .denominator= 30000 | ||
267 | }, | ||
268 | .framelines = 525, | ||
269 | .reserved = {0,0,0,0} | ||
270 | }, { // This is total wild guess | ||
271 | .id = V4L2_STD_NTSC_443, | ||
272 | .frameperiod = | ||
273 | { | ||
274 | .numerator = 1001, | ||
275 | .denominator= 30000 | ||
276 | }, | ||
277 | .framelines = 525, | ||
278 | .reserved = {0,0,0,0} | ||
279 | } | ||
280 | }; | ||
281 | |||
282 | #define generic_standards_cnt (sizeof(generic_standards)/sizeof(generic_standards[0])) | ||
283 | |||
284 | static struct v4l2_standard *match_std(v4l2_std_id id) | ||
285 | { | ||
286 | unsigned int idx; | ||
287 | for (idx = 0; idx < generic_standards_cnt; idx++) { | ||
288 | if (generic_standards[idx].id & id) { | ||
289 | return generic_standards + idx; | ||
290 | } | ||
291 | } | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id) | ||
296 | { | ||
297 | struct v4l2_standard *template; | ||
298 | int idx; | ||
299 | unsigned int bcnt; | ||
300 | template = match_std(id); | ||
301 | if (!template) return 0; | ||
302 | idx = std->index; | ||
303 | memcpy(std,template,sizeof(*template)); | ||
304 | std->index = idx; | ||
305 | std->id = id; | ||
306 | bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id); | ||
307 | std->name[bcnt] = 0; | ||
308 | pvr2_trace(PVR2_TRACE_INIT,"Set up standard idx=%u name=%s", | ||
309 | std->index,std->name); | ||
310 | return !0; | ||
311 | } | ||
312 | |||
313 | /* These are special cases of combined standards that we should enumerate | ||
314 | separately if the component pieces are present. */ | ||
315 | static v4l2_std_id std_mixes[] = { | ||
316 | V4L2_STD_PAL_B | V4L2_STD_PAL_G, | ||
317 | V4L2_STD_PAL_D | V4L2_STD_PAL_K, | ||
318 | V4L2_STD_SECAM_B | V4L2_STD_SECAM_G, | ||
319 | V4L2_STD_SECAM_D | V4L2_STD_SECAM_K, | ||
320 | }; | ||
321 | |||
322 | struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, | ||
323 | v4l2_std_id id) | ||
324 | { | ||
325 | unsigned int std_cnt = 0; | ||
326 | unsigned int idx,bcnt,idx2; | ||
327 | v4l2_std_id idmsk,cmsk,fmsk; | ||
328 | struct v4l2_standard *stddefs; | ||
329 | |||
330 | if (pvrusb2_debug & PVR2_TRACE_INIT) { | ||
331 | char buf[50]; | ||
332 | bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id); | ||
333 | pvr2_trace( | ||
334 | PVR2_TRACE_INIT,"Mapping standards mask=0x%x (%.*s)", | ||
335 | (int)id,bcnt,buf); | ||
336 | } | ||
337 | |||
338 | *countptr = 0; | ||
339 | std_cnt = 0; | ||
340 | fmsk = 0; | ||
341 | for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) { | ||
342 | if (!(idmsk & cmsk)) continue; | ||
343 | cmsk &= ~idmsk; | ||
344 | if (match_std(idmsk)) { | ||
345 | std_cnt++; | ||
346 | continue; | ||
347 | } | ||
348 | fmsk |= idmsk; | ||
349 | } | ||
350 | |||
351 | for (idx2 = 0; idx2 < sizeof(std_mixes)/sizeof(std_mixes[0]); idx2++) { | ||
352 | if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++; | ||
353 | } | ||
354 | |||
355 | if (fmsk) { | ||
356 | char buf[50]; | ||
357 | bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); | ||
358 | pvr2_trace( | ||
359 | PVR2_TRACE_ERROR_LEGS, | ||
360 | "WARNING:" | ||
361 | " Failed to classify the following standard(s): %.*s", | ||
362 | bcnt,buf); | ||
363 | } | ||
364 | |||
365 | pvr2_trace(PVR2_TRACE_INIT,"Setting up %u unique standard(s)", | ||
366 | std_cnt); | ||
367 | if (!std_cnt) return 0; // paranoia | ||
368 | |||
369 | stddefs = kmalloc(sizeof(struct v4l2_standard) * std_cnt, | ||
370 | GFP_KERNEL); | ||
371 | memset(stddefs,0,sizeof(struct v4l2_standard) * std_cnt); | ||
372 | for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx; | ||
373 | |||
374 | idx = 0; | ||
375 | |||
376 | /* Enumerate potential special cases */ | ||
377 | for (idx2 = 0; ((idx2 < sizeof(std_mixes)/sizeof(std_mixes[0])) && | ||
378 | (idx < std_cnt)); idx2++) { | ||
379 | if (!(id & std_mixes[idx2])) continue; | ||
380 | if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++; | ||
381 | } | ||
382 | /* Now enumerate individual pieces */ | ||
383 | for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) { | ||
384 | if (!(idmsk & cmsk)) continue; | ||
385 | cmsk &= ~idmsk; | ||
386 | if (!pvr2_std_fill(stddefs+idx,idmsk)) continue; | ||
387 | idx++; | ||
388 | } | ||
389 | |||
390 | *countptr = std_cnt; | ||
391 | return stddefs; | ||
392 | } | ||
393 | |||
394 | v4l2_std_id pvr2_std_get_usable(void) | ||
395 | { | ||
396 | return CSTD_ALL; | ||
397 | } | ||
398 | |||
399 | |||
400 | /* | ||
401 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
402 | *** Local Variables: *** | ||
403 | *** mode: c *** | ||
404 | *** fill-column: 75 *** | ||
405 | *** tab-width: 8 *** | ||
406 | *** c-basic-offset: 8 *** | ||
407 | *** End: *** | ||
408 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.h b/drivers/media/video/pvrusb2/pvrusb2-std.h new file mode 100644 index 000000000000..07c399375341 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-std.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_STD_H | ||
22 | #define __PVRUSB2_STD_H | ||
23 | |||
24 | #include <linux/videodev2.h> | ||
25 | |||
26 | // Convert string describing one or more video standards into a mask of V4L | ||
27 | // standard bits. Return true if conversion succeeds otherwise return | ||
28 | // false. String is expected to be of the form: C1-x/y;C2-a/b where C1 and | ||
29 | // C2 are color system names (e.g. "PAL", "NTSC") and x, y, a, and b are | ||
30 | // modulation schemes (e.g. "M", "B", "G", etc). | ||
31 | int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr, | ||
32 | unsigned int bufSize); | ||
33 | |||
34 | // Convert any arbitrary set of video standard bits into an unambiguous | ||
35 | // readable string. Return value is the number of bytes consumed in the | ||
36 | // buffer. The formatted string is of a form that can be parsed by our | ||
37 | // sibling std_std_to_id() function. | ||
38 | unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize, | ||
39 | v4l2_std_id id); | ||
40 | |||
41 | // Create an array of suitable v4l2_standard structures given a bit mask of | ||
42 | // video standards to support. The array is allocated from the heap, and | ||
43 | // the number of elements is returned in the first argument. | ||
44 | struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, | ||
45 | v4l2_std_id id); | ||
46 | |||
47 | // Return mask of which video standard bits are valid | ||
48 | v4l2_std_id pvr2_std_get_usable(void); | ||
49 | |||
50 | #endif /* __PVRUSB2_STD_H */ | ||
51 | |||
52 | /* | ||
53 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
54 | *** Local Variables: *** | ||
55 | *** mode: c *** | ||
56 | *** fill-column: 75 *** | ||
57 | *** tab-width: 8 *** | ||
58 | *** c-basic-offset: 8 *** | ||
59 | *** End: *** | ||
60 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c new file mode 100644 index 000000000000..c6e6523d74b4 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c | |||
@@ -0,0 +1,865 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/config.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <asm/semaphore.h> | ||
26 | #include "pvrusb2-sysfs.h" | ||
27 | #include "pvrusb2-hdw.h" | ||
28 | #include "pvrusb2-debug.h" | ||
29 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
30 | #include "pvrusb2-debugifc.h" | ||
31 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
32 | |||
33 | #define pvr2_sysfs_trace(...) pvr2_trace(PVR2_TRACE_SYSFS,__VA_ARGS__) | ||
34 | |||
35 | struct pvr2_sysfs { | ||
36 | struct pvr2_channel channel; | ||
37 | struct class_device *class_dev; | ||
38 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
39 | struct pvr2_sysfs_debugifc *debugifc; | ||
40 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
41 | struct pvr2_sysfs_ctl_item *item_first; | ||
42 | struct pvr2_sysfs_ctl_item *item_last; | ||
43 | struct sysfs_ops kops; | ||
44 | struct kobj_type ktype; | ||
45 | struct class_device_attribute attr_v4l_minor_number; | ||
46 | struct class_device_attribute attr_unit_number; | ||
47 | }; | ||
48 | |||
49 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
50 | struct pvr2_sysfs_debugifc { | ||
51 | struct class_device_attribute attr_debugcmd; | ||
52 | struct class_device_attribute attr_debuginfo; | ||
53 | }; | ||
54 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
55 | |||
56 | struct pvr2_sysfs_ctl_item { | ||
57 | struct class_device_attribute attr_name; | ||
58 | struct class_device_attribute attr_type; | ||
59 | struct class_device_attribute attr_min; | ||
60 | struct class_device_attribute attr_max; | ||
61 | struct class_device_attribute attr_enum; | ||
62 | struct class_device_attribute attr_bits; | ||
63 | struct class_device_attribute attr_val; | ||
64 | struct class_device_attribute attr_custom; | ||
65 | struct pvr2_ctrl *cptr; | ||
66 | struct pvr2_sysfs *chptr; | ||
67 | struct pvr2_sysfs_ctl_item *item_next; | ||
68 | struct attribute *attr_gen[7]; | ||
69 | struct attribute_group grp; | ||
70 | char name[80]; | ||
71 | }; | ||
72 | |||
73 | struct pvr2_sysfs_class { | ||
74 | struct class class; | ||
75 | }; | ||
76 | |||
77 | static ssize_t show_name(int id,struct class_device *class_dev,char *buf) | ||
78 | { | ||
79 | struct pvr2_ctrl *cptr; | ||
80 | struct pvr2_sysfs *sfp; | ||
81 | const char *name; | ||
82 | |||
83 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
84 | if (!sfp) return -EINVAL; | ||
85 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
86 | if (!cptr) return -EINVAL; | ||
87 | |||
88 | name = pvr2_ctrl_get_desc(cptr); | ||
89 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_name(cid=%d) is %s",sfp,id,name); | ||
90 | |||
91 | if (!name) return -EINVAL; | ||
92 | |||
93 | return scnprintf(buf,PAGE_SIZE,"%s\n",name); | ||
94 | } | ||
95 | |||
96 | static ssize_t show_type(int id,struct class_device *class_dev,char *buf) | ||
97 | { | ||
98 | struct pvr2_ctrl *cptr; | ||
99 | struct pvr2_sysfs *sfp; | ||
100 | const char *name; | ||
101 | enum pvr2_ctl_type tp; | ||
102 | |||
103 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
104 | if (!sfp) return -EINVAL; | ||
105 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
106 | if (!cptr) return -EINVAL; | ||
107 | |||
108 | tp = pvr2_ctrl_get_type(cptr); | ||
109 | switch (tp) { | ||
110 | case pvr2_ctl_int: name = "integer"; break; | ||
111 | case pvr2_ctl_enum: name = "enum"; break; | ||
112 | case pvr2_ctl_bitmask: name = "bitmask"; break; | ||
113 | case pvr2_ctl_bool: name = "boolean"; break; | ||
114 | default: name = "?"; break; | ||
115 | } | ||
116 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",sfp,id,name); | ||
117 | |||
118 | if (!name) return -EINVAL; | ||
119 | |||
120 | return scnprintf(buf,PAGE_SIZE,"%s\n",name); | ||
121 | } | ||
122 | |||
123 | static ssize_t show_min(int id,struct class_device *class_dev,char *buf) | ||
124 | { | ||
125 | struct pvr2_ctrl *cptr; | ||
126 | struct pvr2_sysfs *sfp; | ||
127 | long val; | ||
128 | |||
129 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
130 | if (!sfp) return -EINVAL; | ||
131 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
132 | if (!cptr) return -EINVAL; | ||
133 | val = pvr2_ctrl_get_min(cptr); | ||
134 | |||
135 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_min(cid=%d) is %ld",sfp,id,val); | ||
136 | |||
137 | return scnprintf(buf,PAGE_SIZE,"%ld\n",val); | ||
138 | } | ||
139 | |||
140 | static ssize_t show_max(int id,struct class_device *class_dev,char *buf) | ||
141 | { | ||
142 | struct pvr2_ctrl *cptr; | ||
143 | struct pvr2_sysfs *sfp; | ||
144 | long val; | ||
145 | |||
146 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
147 | if (!sfp) return -EINVAL; | ||
148 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
149 | if (!cptr) return -EINVAL; | ||
150 | val = pvr2_ctrl_get_max(cptr); | ||
151 | |||
152 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_max(cid=%d) is %ld",sfp,id,val); | ||
153 | |||
154 | return scnprintf(buf,PAGE_SIZE,"%ld\n",val); | ||
155 | } | ||
156 | |||
157 | static ssize_t show_val_norm(int id,struct class_device *class_dev,char *buf) | ||
158 | { | ||
159 | struct pvr2_ctrl *cptr; | ||
160 | struct pvr2_sysfs *sfp; | ||
161 | int val,ret; | ||
162 | unsigned int cnt = 0; | ||
163 | |||
164 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
165 | if (!sfp) return -EINVAL; | ||
166 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
167 | if (!cptr) return -EINVAL; | ||
168 | |||
169 | ret = pvr2_ctrl_get_value(cptr,&val); | ||
170 | if (ret < 0) return ret; | ||
171 | |||
172 | ret = pvr2_ctrl_value_to_sym(cptr,~0,val, | ||
173 | buf,PAGE_SIZE-1,&cnt); | ||
174 | |||
175 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_norm(cid=%d) is %.*s (%d)", | ||
176 | sfp,id,cnt,buf,val); | ||
177 | buf[cnt] = '\n'; | ||
178 | return cnt+1; | ||
179 | } | ||
180 | |||
181 | static ssize_t show_val_custom(int id,struct class_device *class_dev,char *buf) | ||
182 | { | ||
183 | struct pvr2_ctrl *cptr; | ||
184 | struct pvr2_sysfs *sfp; | ||
185 | int val,ret; | ||
186 | unsigned int cnt = 0; | ||
187 | |||
188 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
189 | if (!sfp) return -EINVAL; | ||
190 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
191 | if (!cptr) return -EINVAL; | ||
192 | |||
193 | ret = pvr2_ctrl_get_value(cptr,&val); | ||
194 | if (ret < 0) return ret; | ||
195 | |||
196 | ret = pvr2_ctrl_custom_value_to_sym(cptr,~0,val, | ||
197 | buf,PAGE_SIZE-1,&cnt); | ||
198 | |||
199 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_custom(cid=%d) is %.*s (%d)", | ||
200 | sfp,id,cnt,buf,val); | ||
201 | buf[cnt] = '\n'; | ||
202 | return cnt+1; | ||
203 | } | ||
204 | |||
205 | static ssize_t show_enum(int id,struct class_device *class_dev,char *buf) | ||
206 | { | ||
207 | struct pvr2_ctrl *cptr; | ||
208 | struct pvr2_sysfs *sfp; | ||
209 | long val; | ||
210 | unsigned int bcnt,ccnt,ecnt; | ||
211 | |||
212 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
213 | if (!sfp) return -EINVAL; | ||
214 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
215 | if (!cptr) return -EINVAL; | ||
216 | ecnt = pvr2_ctrl_get_cnt(cptr); | ||
217 | bcnt = 0; | ||
218 | for (val = 0; val < ecnt; val++) { | ||
219 | pvr2_ctrl_get_valname(cptr,val,buf+bcnt,PAGE_SIZE-bcnt,&ccnt); | ||
220 | if (!ccnt) continue; | ||
221 | bcnt += ccnt; | ||
222 | if (bcnt >= PAGE_SIZE) break; | ||
223 | buf[bcnt] = '\n'; | ||
224 | bcnt++; | ||
225 | } | ||
226 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_enum(cid=%d)",sfp,id); | ||
227 | return bcnt; | ||
228 | } | ||
229 | |||
230 | static ssize_t show_bits(int id,struct class_device *class_dev,char *buf) | ||
231 | { | ||
232 | struct pvr2_ctrl *cptr; | ||
233 | struct pvr2_sysfs *sfp; | ||
234 | int valid_bits,msk; | ||
235 | unsigned int bcnt,ccnt; | ||
236 | |||
237 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
238 | if (!sfp) return -EINVAL; | ||
239 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
240 | if (!cptr) return -EINVAL; | ||
241 | valid_bits = pvr2_ctrl_get_mask(cptr); | ||
242 | bcnt = 0; | ||
243 | for (msk = 1; valid_bits; msk <<= 1) { | ||
244 | if (!(msk & valid_bits)) continue; | ||
245 | valid_bits &= ~msk; | ||
246 | pvr2_ctrl_get_valname(cptr,msk,buf+bcnt,PAGE_SIZE-bcnt,&ccnt); | ||
247 | bcnt += ccnt; | ||
248 | if (bcnt >= PAGE_SIZE) break; | ||
249 | buf[bcnt] = '\n'; | ||
250 | bcnt++; | ||
251 | } | ||
252 | pvr2_sysfs_trace("pvr2_sysfs(%p) show_bits(cid=%d)",sfp,id); | ||
253 | return bcnt; | ||
254 | } | ||
255 | |||
256 | static int store_val_any(int id,int customfl,struct pvr2_sysfs *sfp, | ||
257 | const char *buf,unsigned int count) | ||
258 | { | ||
259 | struct pvr2_ctrl *cptr; | ||
260 | int ret; | ||
261 | int mask,val; | ||
262 | |||
263 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id); | ||
264 | if (customfl) { | ||
265 | ret = pvr2_ctrl_custom_sym_to_value(cptr,buf,count,&mask,&val); | ||
266 | } else { | ||
267 | ret = pvr2_ctrl_sym_to_value(cptr,buf,count,&mask,&val); | ||
268 | } | ||
269 | if (ret < 0) return ret; | ||
270 | ret = pvr2_ctrl_set_mask_value(cptr,mask,val); | ||
271 | pvr2_hdw_commit_ctl(sfp->channel.hdw); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | static ssize_t store_val_norm(int id,struct class_device *class_dev, | ||
276 | const char *buf,size_t count) | ||
277 | { | ||
278 | struct pvr2_sysfs *sfp; | ||
279 | int ret; | ||
280 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
281 | ret = store_val_any(id,0,sfp,buf,count); | ||
282 | if (!ret) ret = count; | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static ssize_t store_val_custom(int id,struct class_device *class_dev, | ||
287 | const char *buf,size_t count) | ||
288 | { | ||
289 | struct pvr2_sysfs *sfp; | ||
290 | int ret; | ||
291 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
292 | ret = store_val_any(id,1,sfp,buf,count); | ||
293 | if (!ret) ret = count; | ||
294 | return ret; | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | Mike Isely <isely@pobox.com> 30-April-2005 | ||
299 | |||
300 | This next batch of horrible preprocessor hackery is needed because the | ||
301 | kernel's class_device_attribute mechanism fails to pass the actual | ||
302 | attribute through to the show / store functions, which means we have no | ||
303 | way to package up any attribute-specific parameters, like for example the | ||
304 | control id. So we work around this brain-damage by encoding the control | ||
305 | id into the show / store functions themselves and pick the function based | ||
306 | on the control id we're setting up. These macros try to ease the pain. | ||
307 | Yuck. | ||
308 | */ | ||
309 | |||
310 | #define CREATE_SHOW_INSTANCE(sf_name,ctl_id) \ | ||
311 | static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,char *buf) \ | ||
312 | { return sf_name(ctl_id,class_dev,buf); } | ||
313 | |||
314 | #define CREATE_STORE_INSTANCE(sf_name,ctl_id) \ | ||
315 | static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,const char *buf,size_t count) \ | ||
316 | { return sf_name(ctl_id,class_dev,buf,count); } | ||
317 | |||
318 | #define CREATE_BATCH(ctl_id) \ | ||
319 | CREATE_SHOW_INSTANCE(show_name,ctl_id) \ | ||
320 | CREATE_SHOW_INSTANCE(show_type,ctl_id) \ | ||
321 | CREATE_SHOW_INSTANCE(show_min,ctl_id) \ | ||
322 | CREATE_SHOW_INSTANCE(show_max,ctl_id) \ | ||
323 | CREATE_SHOW_INSTANCE(show_val_norm,ctl_id) \ | ||
324 | CREATE_SHOW_INSTANCE(show_val_custom,ctl_id) \ | ||
325 | CREATE_SHOW_INSTANCE(show_enum,ctl_id) \ | ||
326 | CREATE_SHOW_INSTANCE(show_bits,ctl_id) \ | ||
327 | CREATE_STORE_INSTANCE(store_val_norm,ctl_id) \ | ||
328 | CREATE_STORE_INSTANCE(store_val_custom,ctl_id) \ | ||
329 | |||
330 | CREATE_BATCH(0) | ||
331 | CREATE_BATCH(1) | ||
332 | CREATE_BATCH(2) | ||
333 | CREATE_BATCH(3) | ||
334 | CREATE_BATCH(4) | ||
335 | CREATE_BATCH(5) | ||
336 | CREATE_BATCH(6) | ||
337 | CREATE_BATCH(7) | ||
338 | CREATE_BATCH(8) | ||
339 | CREATE_BATCH(9) | ||
340 | CREATE_BATCH(10) | ||
341 | CREATE_BATCH(11) | ||
342 | CREATE_BATCH(12) | ||
343 | CREATE_BATCH(13) | ||
344 | CREATE_BATCH(14) | ||
345 | CREATE_BATCH(15) | ||
346 | CREATE_BATCH(16) | ||
347 | CREATE_BATCH(17) | ||
348 | CREATE_BATCH(18) | ||
349 | CREATE_BATCH(19) | ||
350 | CREATE_BATCH(20) | ||
351 | CREATE_BATCH(21) | ||
352 | CREATE_BATCH(22) | ||
353 | CREATE_BATCH(23) | ||
354 | CREATE_BATCH(24) | ||
355 | CREATE_BATCH(25) | ||
356 | CREATE_BATCH(26) | ||
357 | CREATE_BATCH(27) | ||
358 | CREATE_BATCH(28) | ||
359 | CREATE_BATCH(29) | ||
360 | CREATE_BATCH(30) | ||
361 | CREATE_BATCH(31) | ||
362 | CREATE_BATCH(32) | ||
363 | CREATE_BATCH(33) | ||
364 | CREATE_BATCH(34) | ||
365 | CREATE_BATCH(35) | ||
366 | CREATE_BATCH(36) | ||
367 | CREATE_BATCH(37) | ||
368 | CREATE_BATCH(38) | ||
369 | CREATE_BATCH(39) | ||
370 | CREATE_BATCH(40) | ||
371 | CREATE_BATCH(41) | ||
372 | CREATE_BATCH(42) | ||
373 | CREATE_BATCH(43) | ||
374 | CREATE_BATCH(44) | ||
375 | CREATE_BATCH(45) | ||
376 | CREATE_BATCH(46) | ||
377 | CREATE_BATCH(47) | ||
378 | CREATE_BATCH(48) | ||
379 | CREATE_BATCH(49) | ||
380 | CREATE_BATCH(50) | ||
381 | CREATE_BATCH(51) | ||
382 | CREATE_BATCH(52) | ||
383 | CREATE_BATCH(53) | ||
384 | CREATE_BATCH(54) | ||
385 | CREATE_BATCH(55) | ||
386 | CREATE_BATCH(56) | ||
387 | CREATE_BATCH(57) | ||
388 | CREATE_BATCH(58) | ||
389 | CREATE_BATCH(59) | ||
390 | |||
391 | struct pvr2_sysfs_func_set { | ||
392 | ssize_t (*show_name)(struct class_device *,char *); | ||
393 | ssize_t (*show_type)(struct class_device *,char *); | ||
394 | ssize_t (*show_min)(struct class_device *,char *); | ||
395 | ssize_t (*show_max)(struct class_device *,char *); | ||
396 | ssize_t (*show_enum)(struct class_device *,char *); | ||
397 | ssize_t (*show_bits)(struct class_device *,char *); | ||
398 | ssize_t (*show_val_norm)(struct class_device *,char *); | ||
399 | ssize_t (*store_val_norm)(struct class_device *, | ||
400 | const char *,size_t); | ||
401 | ssize_t (*show_val_custom)(struct class_device *,char *); | ||
402 | ssize_t (*store_val_custom)(struct class_device *, | ||
403 | const char *,size_t); | ||
404 | }; | ||
405 | |||
406 | #define INIT_BATCH(ctl_id) \ | ||
407 | [ctl_id] = { \ | ||
408 | .show_name = show_name_##ctl_id, \ | ||
409 | .show_type = show_type_##ctl_id, \ | ||
410 | .show_min = show_min_##ctl_id, \ | ||
411 | .show_max = show_max_##ctl_id, \ | ||
412 | .show_enum = show_enum_##ctl_id, \ | ||
413 | .show_bits = show_bits_##ctl_id, \ | ||
414 | .show_val_norm = show_val_norm_##ctl_id, \ | ||
415 | .store_val_norm = store_val_norm_##ctl_id, \ | ||
416 | .show_val_custom = show_val_custom_##ctl_id, \ | ||
417 | .store_val_custom = store_val_custom_##ctl_id, \ | ||
418 | } \ | ||
419 | |||
420 | static struct pvr2_sysfs_func_set funcs[] = { | ||
421 | INIT_BATCH(0), | ||
422 | INIT_BATCH(1), | ||
423 | INIT_BATCH(2), | ||
424 | INIT_BATCH(3), | ||
425 | INIT_BATCH(4), | ||
426 | INIT_BATCH(5), | ||
427 | INIT_BATCH(6), | ||
428 | INIT_BATCH(7), | ||
429 | INIT_BATCH(8), | ||
430 | INIT_BATCH(9), | ||
431 | INIT_BATCH(10), | ||
432 | INIT_BATCH(11), | ||
433 | INIT_BATCH(12), | ||
434 | INIT_BATCH(13), | ||
435 | INIT_BATCH(14), | ||
436 | INIT_BATCH(15), | ||
437 | INIT_BATCH(16), | ||
438 | INIT_BATCH(17), | ||
439 | INIT_BATCH(18), | ||
440 | INIT_BATCH(19), | ||
441 | INIT_BATCH(20), | ||
442 | INIT_BATCH(21), | ||
443 | INIT_BATCH(22), | ||
444 | INIT_BATCH(23), | ||
445 | INIT_BATCH(24), | ||
446 | INIT_BATCH(25), | ||
447 | INIT_BATCH(26), | ||
448 | INIT_BATCH(27), | ||
449 | INIT_BATCH(28), | ||
450 | INIT_BATCH(29), | ||
451 | INIT_BATCH(30), | ||
452 | INIT_BATCH(31), | ||
453 | INIT_BATCH(32), | ||
454 | INIT_BATCH(33), | ||
455 | INIT_BATCH(34), | ||
456 | INIT_BATCH(35), | ||
457 | INIT_BATCH(36), | ||
458 | INIT_BATCH(37), | ||
459 | INIT_BATCH(38), | ||
460 | INIT_BATCH(39), | ||
461 | INIT_BATCH(40), | ||
462 | INIT_BATCH(41), | ||
463 | INIT_BATCH(42), | ||
464 | INIT_BATCH(43), | ||
465 | INIT_BATCH(44), | ||
466 | INIT_BATCH(45), | ||
467 | INIT_BATCH(46), | ||
468 | INIT_BATCH(47), | ||
469 | INIT_BATCH(48), | ||
470 | INIT_BATCH(49), | ||
471 | INIT_BATCH(50), | ||
472 | INIT_BATCH(51), | ||
473 | INIT_BATCH(52), | ||
474 | INIT_BATCH(53), | ||
475 | INIT_BATCH(54), | ||
476 | INIT_BATCH(55), | ||
477 | INIT_BATCH(56), | ||
478 | INIT_BATCH(57), | ||
479 | INIT_BATCH(58), | ||
480 | INIT_BATCH(59), | ||
481 | }; | ||
482 | |||
483 | |||
484 | static void pvr2_sysfs_add_control(struct pvr2_sysfs *sfp,int ctl_id) | ||
485 | { | ||
486 | struct pvr2_sysfs_ctl_item *cip; | ||
487 | struct pvr2_sysfs_func_set *fp; | ||
488 | struct pvr2_ctrl *cptr; | ||
489 | unsigned int cnt,acnt; | ||
490 | |||
491 | if ((ctl_id < 0) || (ctl_id >= (sizeof(funcs)/sizeof(funcs[0])))) { | ||
492 | return; | ||
493 | } | ||
494 | |||
495 | fp = funcs + ctl_id; | ||
496 | cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,ctl_id); | ||
497 | if (!cptr) return; | ||
498 | |||
499 | cip = kmalloc(sizeof(*cip),GFP_KERNEL); | ||
500 | if (!cip) return; | ||
501 | memset(cip,0,sizeof(*cip)); | ||
502 | pvr2_sysfs_trace("Creating pvr2_sysfs_ctl_item id=%p",cip); | ||
503 | |||
504 | cip->cptr = cptr; | ||
505 | |||
506 | cip->chptr = sfp; | ||
507 | cip->item_next = 0; | ||
508 | if (sfp->item_last) { | ||
509 | sfp->item_last->item_next = cip; | ||
510 | } else { | ||
511 | sfp->item_first = cip; | ||
512 | } | ||
513 | sfp->item_last = cip; | ||
514 | |||
515 | cip->attr_name.attr.owner = THIS_MODULE; | ||
516 | cip->attr_name.attr.name = "name"; | ||
517 | cip->attr_name.attr.mode = S_IRUGO; | ||
518 | cip->attr_name.show = fp->show_name; | ||
519 | |||
520 | cip->attr_type.attr.owner = THIS_MODULE; | ||
521 | cip->attr_type.attr.name = "type"; | ||
522 | cip->attr_type.attr.mode = S_IRUGO; | ||
523 | cip->attr_type.show = fp->show_type; | ||
524 | |||
525 | cip->attr_min.attr.owner = THIS_MODULE; | ||
526 | cip->attr_min.attr.name = "min_val"; | ||
527 | cip->attr_min.attr.mode = S_IRUGO; | ||
528 | cip->attr_min.show = fp->show_min; | ||
529 | |||
530 | cip->attr_max.attr.owner = THIS_MODULE; | ||
531 | cip->attr_max.attr.name = "max_val"; | ||
532 | cip->attr_max.attr.mode = S_IRUGO; | ||
533 | cip->attr_max.show = fp->show_max; | ||
534 | |||
535 | cip->attr_val.attr.owner = THIS_MODULE; | ||
536 | cip->attr_val.attr.name = "cur_val"; | ||
537 | cip->attr_val.attr.mode = S_IRUGO; | ||
538 | |||
539 | cip->attr_custom.attr.owner = THIS_MODULE; | ||
540 | cip->attr_custom.attr.name = "custom_val"; | ||
541 | cip->attr_custom.attr.mode = S_IRUGO; | ||
542 | |||
543 | cip->attr_enum.attr.owner = THIS_MODULE; | ||
544 | cip->attr_enum.attr.name = "enum_val"; | ||
545 | cip->attr_enum.attr.mode = S_IRUGO; | ||
546 | cip->attr_enum.show = fp->show_enum; | ||
547 | |||
548 | cip->attr_bits.attr.owner = THIS_MODULE; | ||
549 | cip->attr_bits.attr.name = "bit_val"; | ||
550 | cip->attr_bits.attr.mode = S_IRUGO; | ||
551 | cip->attr_bits.show = fp->show_bits; | ||
552 | |||
553 | if (pvr2_ctrl_is_writable(cptr)) { | ||
554 | cip->attr_val.attr.mode |= S_IWUSR|S_IWGRP; | ||
555 | cip->attr_custom.attr.mode |= S_IWUSR|S_IWGRP; | ||
556 | } | ||
557 | |||
558 | acnt = 0; | ||
559 | cip->attr_gen[acnt++] = &cip->attr_name.attr; | ||
560 | cip->attr_gen[acnt++] = &cip->attr_type.attr; | ||
561 | cip->attr_gen[acnt++] = &cip->attr_val.attr; | ||
562 | cip->attr_val.show = fp->show_val_norm; | ||
563 | cip->attr_val.store = fp->store_val_norm; | ||
564 | if (pvr2_ctrl_has_custom_symbols(cptr)) { | ||
565 | cip->attr_gen[acnt++] = &cip->attr_custom.attr; | ||
566 | cip->attr_custom.show = fp->show_val_custom; | ||
567 | cip->attr_custom.store = fp->store_val_custom; | ||
568 | } | ||
569 | switch (pvr2_ctrl_get_type(cptr)) { | ||
570 | case pvr2_ctl_enum: | ||
571 | // Control is an enumeration | ||
572 | cip->attr_gen[acnt++] = &cip->attr_enum.attr; | ||
573 | break; | ||
574 | case pvr2_ctl_int: | ||
575 | // Control is an integer | ||
576 | cip->attr_gen[acnt++] = &cip->attr_min.attr; | ||
577 | cip->attr_gen[acnt++] = &cip->attr_max.attr; | ||
578 | break; | ||
579 | case pvr2_ctl_bitmask: | ||
580 | // Control is an bitmask | ||
581 | cip->attr_gen[acnt++] = &cip->attr_bits.attr; | ||
582 | break; | ||
583 | default: break; | ||
584 | } | ||
585 | |||
586 | cnt = scnprintf(cip->name,sizeof(cip->name)-1,"ctl_%s", | ||
587 | pvr2_ctrl_get_name(cptr)); | ||
588 | cip->name[cnt] = 0; | ||
589 | cip->grp.name = cip->name; | ||
590 | cip->grp.attrs = cip->attr_gen; | ||
591 | |||
592 | sysfs_create_group(&sfp->class_dev->kobj,&cip->grp); | ||
593 | } | ||
594 | |||
595 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
596 | static ssize_t debuginfo_show(struct class_device *,char *); | ||
597 | static ssize_t debugcmd_show(struct class_device *,char *); | ||
598 | static ssize_t debugcmd_store(struct class_device *,const char *,size_t count); | ||
599 | |||
600 | static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp) | ||
601 | { | ||
602 | struct pvr2_sysfs_debugifc *dip; | ||
603 | dip = kmalloc(sizeof(*dip),GFP_KERNEL); | ||
604 | if (!dip) return; | ||
605 | memset(dip,0,sizeof(*dip)); | ||
606 | dip->attr_debugcmd.attr.owner = THIS_MODULE; | ||
607 | dip->attr_debugcmd.attr.name = "debugcmd"; | ||
608 | dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP; | ||
609 | dip->attr_debugcmd.show = debugcmd_show; | ||
610 | dip->attr_debugcmd.store = debugcmd_store; | ||
611 | dip->attr_debuginfo.attr.owner = THIS_MODULE; | ||
612 | dip->attr_debuginfo.attr.name = "debuginfo"; | ||
613 | dip->attr_debuginfo.attr.mode = S_IRUGO; | ||
614 | dip->attr_debuginfo.show = debuginfo_show; | ||
615 | sfp->debugifc = dip; | ||
616 | class_device_create_file(sfp->class_dev,&dip->attr_debugcmd); | ||
617 | class_device_create_file(sfp->class_dev,&dip->attr_debuginfo); | ||
618 | } | ||
619 | |||
620 | |||
621 | static void pvr2_sysfs_tear_down_debugifc(struct pvr2_sysfs *sfp) | ||
622 | { | ||
623 | if (!sfp->debugifc) return; | ||
624 | class_device_remove_file(sfp->class_dev, | ||
625 | &sfp->debugifc->attr_debuginfo); | ||
626 | class_device_remove_file(sfp->class_dev,&sfp->debugifc->attr_debugcmd); | ||
627 | kfree(sfp->debugifc); | ||
628 | sfp->debugifc = 0; | ||
629 | } | ||
630 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
631 | |||
632 | |||
633 | static void pvr2_sysfs_add_controls(struct pvr2_sysfs *sfp) | ||
634 | { | ||
635 | unsigned int idx,cnt; | ||
636 | cnt = pvr2_hdw_get_ctrl_count(sfp->channel.hdw); | ||
637 | for (idx = 0; idx < cnt; idx++) { | ||
638 | pvr2_sysfs_add_control(sfp,idx); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | |||
643 | static void pvr2_sysfs_tear_down_controls(struct pvr2_sysfs *sfp) | ||
644 | { | ||
645 | struct pvr2_sysfs_ctl_item *cip1,*cip2; | ||
646 | for (cip1 = sfp->item_first; cip1; cip1 = cip2) { | ||
647 | cip2 = cip1->item_next; | ||
648 | sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp); | ||
649 | pvr2_sysfs_trace("Destroying pvr2_sysfs_ctl_item id=%p",cip1); | ||
650 | kfree(cip1); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | |||
655 | static void pvr2_sysfs_class_release(struct class *class) | ||
656 | { | ||
657 | struct pvr2_sysfs_class *clp; | ||
658 | clp = container_of(class,struct pvr2_sysfs_class,class); | ||
659 | pvr2_sysfs_trace("Destroying pvr2_sysfs_class id=%p",clp); | ||
660 | kfree(clp); | ||
661 | } | ||
662 | |||
663 | |||
664 | static void pvr2_sysfs_release(struct class_device *class_dev) | ||
665 | { | ||
666 | pvr2_sysfs_trace("Releasing class_dev id=%p",class_dev); | ||
667 | kfree(class_dev); | ||
668 | } | ||
669 | |||
670 | |||
671 | static void class_dev_destroy(struct pvr2_sysfs *sfp) | ||
672 | { | ||
673 | if (!sfp->class_dev) return; | ||
674 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
675 | pvr2_sysfs_tear_down_debugifc(sfp); | ||
676 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
677 | pvr2_sysfs_tear_down_controls(sfp); | ||
678 | class_device_remove_file(sfp->class_dev,&sfp->attr_v4l_minor_number); | ||
679 | class_device_remove_file(sfp->class_dev,&sfp->attr_unit_number); | ||
680 | pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev); | ||
681 | sfp->class_dev->class_data = 0; | ||
682 | class_device_unregister(sfp->class_dev); | ||
683 | sfp->class_dev = 0; | ||
684 | } | ||
685 | |||
686 | |||
687 | static ssize_t v4l_minor_number_show(struct class_device *class_dev,char *buf) | ||
688 | { | ||
689 | struct pvr2_sysfs *sfp; | ||
690 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
691 | if (!sfp) return -EINVAL; | ||
692 | return scnprintf(buf,PAGE_SIZE,"%d\n", | ||
693 | pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw)); | ||
694 | } | ||
695 | |||
696 | |||
697 | static ssize_t unit_number_show(struct class_device *class_dev,char *buf) | ||
698 | { | ||
699 | struct pvr2_sysfs *sfp; | ||
700 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
701 | if (!sfp) return -EINVAL; | ||
702 | return scnprintf(buf,PAGE_SIZE,"%d\n", | ||
703 | pvr2_hdw_get_unit_number(sfp->channel.hdw)); | ||
704 | } | ||
705 | |||
706 | |||
707 | static void class_dev_create(struct pvr2_sysfs *sfp, | ||
708 | struct pvr2_sysfs_class *class_ptr) | ||
709 | { | ||
710 | struct usb_device *usb_dev; | ||
711 | struct class_device *class_dev; | ||
712 | usb_dev = pvr2_hdw_get_dev(sfp->channel.hdw); | ||
713 | if (!usb_dev) return; | ||
714 | class_dev = kmalloc(sizeof(*class_dev),GFP_KERNEL); | ||
715 | if (!class_dev) return; | ||
716 | memset(class_dev,0,sizeof(*class_dev)); | ||
717 | |||
718 | pvr2_sysfs_trace("Creating class_dev id=%p",class_dev); | ||
719 | |||
720 | class_dev->class = &class_ptr->class; | ||
721 | if (pvr2_hdw_get_sn(sfp->channel.hdw)) { | ||
722 | snprintf(class_dev->class_id,BUS_ID_SIZE,"sn-%lu", | ||
723 | pvr2_hdw_get_sn(sfp->channel.hdw)); | ||
724 | } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) { | ||
725 | snprintf(class_dev->class_id,BUS_ID_SIZE,"unit-%c", | ||
726 | pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a'); | ||
727 | } else { | ||
728 | kfree(class_dev); | ||
729 | return; | ||
730 | } | ||
731 | |||
732 | class_dev->dev = &usb_dev->dev; | ||
733 | |||
734 | sfp->class_dev = class_dev; | ||
735 | class_dev->class_data = sfp; | ||
736 | class_device_register(class_dev); | ||
737 | |||
738 | sfp->attr_v4l_minor_number.attr.owner = THIS_MODULE; | ||
739 | sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number"; | ||
740 | sfp->attr_v4l_minor_number.attr.mode = S_IRUGO; | ||
741 | sfp->attr_v4l_minor_number.show = v4l_minor_number_show; | ||
742 | sfp->attr_v4l_minor_number.store = 0; | ||
743 | class_device_create_file(sfp->class_dev,&sfp->attr_v4l_minor_number); | ||
744 | sfp->attr_unit_number.attr.owner = THIS_MODULE; | ||
745 | sfp->attr_unit_number.attr.name = "unit_number"; | ||
746 | sfp->attr_unit_number.attr.mode = S_IRUGO; | ||
747 | sfp->attr_unit_number.show = unit_number_show; | ||
748 | sfp->attr_unit_number.store = 0; | ||
749 | class_device_create_file(sfp->class_dev,&sfp->attr_unit_number); | ||
750 | |||
751 | pvr2_sysfs_add_controls(sfp); | ||
752 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
753 | pvr2_sysfs_add_debugifc(sfp); | ||
754 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
755 | } | ||
756 | |||
757 | |||
758 | static void pvr2_sysfs_internal_check(struct pvr2_channel *chp) | ||
759 | { | ||
760 | struct pvr2_sysfs *sfp; | ||
761 | sfp = container_of(chp,struct pvr2_sysfs,channel); | ||
762 | if (!sfp->channel.mc_head->disconnect_flag) return; | ||
763 | pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_sysfs id=%p",sfp); | ||
764 | class_dev_destroy(sfp); | ||
765 | pvr2_channel_done(&sfp->channel); | ||
766 | kfree(sfp); | ||
767 | } | ||
768 | |||
769 | |||
770 | struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *mp, | ||
771 | struct pvr2_sysfs_class *class_ptr) | ||
772 | { | ||
773 | struct pvr2_sysfs *sfp; | ||
774 | sfp = kmalloc(sizeof(*sfp),GFP_KERNEL); | ||
775 | if (!sfp) return sfp; | ||
776 | memset(sfp,0,sizeof(*sfp)); | ||
777 | pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_sysfs id=%p",sfp); | ||
778 | pvr2_channel_init(&sfp->channel,mp); | ||
779 | sfp->channel.check_func = pvr2_sysfs_internal_check; | ||
780 | |||
781 | class_dev_create(sfp,class_ptr); | ||
782 | return sfp; | ||
783 | } | ||
784 | |||
785 | |||
786 | static int pvr2_sysfs_hotplug(struct class_device *cd,char **envp, | ||
787 | int numenvp,char *buf,int size) | ||
788 | { | ||
789 | /* Even though we don't do anything here, we still need this function | ||
790 | because sysfs will still try to call it. */ | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | struct pvr2_sysfs_class *pvr2_sysfs_class_create(void) | ||
795 | { | ||
796 | struct pvr2_sysfs_class *clp; | ||
797 | clp = kmalloc(sizeof(*clp),GFP_KERNEL); | ||
798 | if (!clp) return clp; | ||
799 | memset(clp,0,sizeof(*clp)); | ||
800 | pvr2_sysfs_trace("Creating pvr2_sysfs_class id=%p",clp); | ||
801 | clp->class.name = "pvrusb2"; | ||
802 | clp->class.class_release = pvr2_sysfs_class_release; | ||
803 | clp->class.release = pvr2_sysfs_release; | ||
804 | clp->class.uevent = pvr2_sysfs_hotplug; | ||
805 | if (class_register(&clp->class)) { | ||
806 | pvr2_sysfs_trace( | ||
807 | "Registration failed for pvr2_sysfs_class id=%p",clp); | ||
808 | kfree(clp); | ||
809 | clp = 0; | ||
810 | } | ||
811 | return clp; | ||
812 | } | ||
813 | |||
814 | |||
815 | void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *clp) | ||
816 | { | ||
817 | class_unregister(&clp->class); | ||
818 | } | ||
819 | |||
820 | |||
821 | #ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC | ||
822 | static ssize_t debuginfo_show(struct class_device *class_dev,char *buf) | ||
823 | { | ||
824 | struct pvr2_sysfs *sfp; | ||
825 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
826 | if (!sfp) return -EINVAL; | ||
827 | pvr2_hdw_trigger_module_log(sfp->channel.hdw); | ||
828 | return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE); | ||
829 | } | ||
830 | |||
831 | |||
832 | static ssize_t debugcmd_show(struct class_device *class_dev,char *buf) | ||
833 | { | ||
834 | struct pvr2_sysfs *sfp; | ||
835 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
836 | if (!sfp) return -EINVAL; | ||
837 | return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE); | ||
838 | } | ||
839 | |||
840 | |||
841 | static ssize_t debugcmd_store(struct class_device *class_dev, | ||
842 | const char *buf,size_t count) | ||
843 | { | ||
844 | struct pvr2_sysfs *sfp; | ||
845 | int ret; | ||
846 | |||
847 | sfp = (struct pvr2_sysfs *)class_dev->class_data; | ||
848 | if (!sfp) return -EINVAL; | ||
849 | |||
850 | ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count); | ||
851 | if (ret < 0) return ret; | ||
852 | return count; | ||
853 | } | ||
854 | #endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ | ||
855 | |||
856 | |||
857 | /* | ||
858 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
859 | *** Local Variables: *** | ||
860 | *** mode: c *** | ||
861 | *** fill-column: 75 *** | ||
862 | *** tab-width: 8 *** | ||
863 | *** c-basic-offset: 8 *** | ||
864 | *** End: *** | ||
865 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.h b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h new file mode 100644 index 000000000000..ff9373b47f8f --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_SYSFS_H | ||
22 | #define __PVRUSB2_SYSFS_H | ||
23 | |||
24 | #include <linux/list.h> | ||
25 | #include <linux/sysfs.h> | ||
26 | #include "pvrusb2-context.h" | ||
27 | |||
28 | struct pvr2_sysfs; | ||
29 | struct pvr2_sysfs_class; | ||
30 | |||
31 | struct pvr2_sysfs_class *pvr2_sysfs_class_create(void); | ||
32 | void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *); | ||
33 | |||
34 | struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *, | ||
35 | struct pvr2_sysfs_class *); | ||
36 | |||
37 | #endif /* __PVRUSB2_SYSFS_H */ | ||
38 | |||
39 | /* | ||
40 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
41 | *** Local Variables: *** | ||
42 | *** mode: c *** | ||
43 | *** fill-column: 75 *** | ||
44 | *** tab-width: 8 *** | ||
45 | *** c-basic-offset: 8 *** | ||
46 | *** End: *** | ||
47 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.c b/drivers/media/video/pvrusb2/pvrusb2-tuner.c new file mode 100644 index 000000000000..f4aba8144ce0 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include "pvrusb2.h" | ||
24 | #include "pvrusb2-util.h" | ||
25 | #include "pvrusb2-tuner.h" | ||
26 | #include "pvrusb2-hdw-internal.h" | ||
27 | #include "pvrusb2-debug.h" | ||
28 | #include <linux/videodev2.h> | ||
29 | #include <media/tuner.h> | ||
30 | #include <media/v4l2-common.h> | ||
31 | |||
32 | struct pvr2_tuner_handler { | ||
33 | struct pvr2_hdw *hdw; | ||
34 | struct pvr2_i2c_client *client; | ||
35 | struct pvr2_i2c_handler i2c_handler; | ||
36 | int type_update_fl; | ||
37 | }; | ||
38 | |||
39 | |||
40 | static void set_type(struct pvr2_tuner_handler *ctxt) | ||
41 | { | ||
42 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
43 | struct tuner_setup setup; | ||
44 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c tuner set_type(%d)",hdw->tuner_type); | ||
45 | if (((int)(hdw->tuner_type)) < 0) return; | ||
46 | |||
47 | setup.addr = ADDR_UNSET; | ||
48 | setup.type = hdw->tuner_type; | ||
49 | setup.mode_mask = T_RADIO | T_ANALOG_TV; | ||
50 | /* We may really want mode_mask to be T_ANALOG_TV for now */ | ||
51 | pvr2_i2c_client_cmd(ctxt->client,TUNER_SET_TYPE_ADDR,&setup); | ||
52 | ctxt->type_update_fl = 0; | ||
53 | } | ||
54 | |||
55 | |||
56 | static int tuner_check(struct pvr2_tuner_handler *ctxt) | ||
57 | { | ||
58 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
59 | if (hdw->tuner_updated) ctxt->type_update_fl = !0; | ||
60 | return ctxt->type_update_fl != 0; | ||
61 | } | ||
62 | |||
63 | |||
64 | static void tuner_update(struct pvr2_tuner_handler *ctxt) | ||
65 | { | ||
66 | if (ctxt->type_update_fl) set_type(ctxt); | ||
67 | } | ||
68 | |||
69 | |||
70 | static void pvr2_tuner_detach(struct pvr2_tuner_handler *ctxt) | ||
71 | { | ||
72 | ctxt->client->handler = 0; | ||
73 | kfree(ctxt); | ||
74 | } | ||
75 | |||
76 | |||
77 | static unsigned int pvr2_tuner_describe(struct pvr2_tuner_handler *ctxt,char *buf,unsigned int cnt) | ||
78 | { | ||
79 | return scnprintf(buf,cnt,"handler: pvrusb2-tuner"); | ||
80 | } | ||
81 | |||
82 | |||
83 | const static struct pvr2_i2c_handler_functions tuner_funcs = { | ||
84 | .detach = (void (*)(void *))pvr2_tuner_detach, | ||
85 | .check = (int (*)(void *))tuner_check, | ||
86 | .update = (void (*)(void *))tuner_update, | ||
87 | .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_tuner_describe, | ||
88 | }; | ||
89 | |||
90 | |||
91 | int pvr2_i2c_tuner_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp) | ||
92 | { | ||
93 | struct pvr2_tuner_handler *ctxt; | ||
94 | if (cp->handler) return 0; | ||
95 | |||
96 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
97 | if (!ctxt) return 0; | ||
98 | memset(ctxt,0,sizeof(*ctxt)); | ||
99 | |||
100 | ctxt->i2c_handler.func_data = ctxt; | ||
101 | ctxt->i2c_handler.func_table = &tuner_funcs; | ||
102 | ctxt->type_update_fl = !0; | ||
103 | ctxt->client = cp; | ||
104 | ctxt->hdw = hdw; | ||
105 | cp->handler = &ctxt->i2c_handler; | ||
106 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tuner handler set up", | ||
107 | cp->client->addr); | ||
108 | return !0; | ||
109 | } | ||
110 | |||
111 | |||
112 | |||
113 | |||
114 | /* | ||
115 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
116 | *** Local Variables: *** | ||
117 | *** mode: c *** | ||
118 | *** fill-column: 70 *** | ||
119 | *** tab-width: 8 *** | ||
120 | *** c-basic-offset: 8 *** | ||
121 | *** End: *** | ||
122 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.h b/drivers/media/video/pvrusb2/pvrusb2-tuner.h new file mode 100644 index 000000000000..556f12aa9160 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_TUNER_H | ||
22 | #define __PVRUSB2_TUNER_H | ||
23 | |||
24 | #include "pvrusb2-i2c-core.h" | ||
25 | |||
26 | int pvr2_i2c_tuner_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
27 | |||
28 | #endif /* __PVRUSB2_TUNER_H */ | ||
29 | |||
30 | /* | ||
31 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
32 | *** Local Variables: *** | ||
33 | *** mode: c *** | ||
34 | *** fill-column: 70 *** | ||
35 | *** tab-width: 8 *** | ||
36 | *** c-basic-offset: 8 *** | ||
37 | *** End: *** | ||
38 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-util.h b/drivers/media/video/pvrusb2/pvrusb2-util.h new file mode 100644 index 000000000000..e53aee416f56 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-util.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_UTIL_H | ||
22 | #define __PVRUSB2_UTIL_H | ||
23 | |||
24 | #define PVR2_DECOMPOSE_LE(t,i,d) \ | ||
25 | do { \ | ||
26 | (t)[i] = (d) & 0xff;\ | ||
27 | (t)[i+1] = ((d) >> 8) & 0xff;\ | ||
28 | (t)[i+2] = ((d) >> 16) & 0xff;\ | ||
29 | (t)[i+3] = ((d) >> 24) & 0xff;\ | ||
30 | } while(0) | ||
31 | |||
32 | #define PVR2_DECOMPOSE_BE(t,i,d) \ | ||
33 | do { \ | ||
34 | (t)[i+3] = (d) & 0xff;\ | ||
35 | (t)[i+2] = ((d) >> 8) & 0xff;\ | ||
36 | (t)[i+1] = ((d) >> 16) & 0xff;\ | ||
37 | (t)[i] = ((d) >> 24) & 0xff;\ | ||
38 | } while(0) | ||
39 | |||
40 | #define PVR2_COMPOSE_LE(t,i) \ | ||
41 | ((((u32)((t)[i+3])) << 24) | \ | ||
42 | (((u32)((t)[i+2])) << 16) | \ | ||
43 | (((u32)((t)[i+1])) << 8) | \ | ||
44 | ((u32)((t)[i]))) | ||
45 | |||
46 | #define PVR2_COMPOSE_BE(t,i) \ | ||
47 | ((((u32)((t)[i])) << 24) | \ | ||
48 | (((u32)((t)[i+1])) << 16) | \ | ||
49 | (((u32)((t)[i+2])) << 8) | \ | ||
50 | ((u32)((t)[i+3]))) | ||
51 | |||
52 | |||
53 | #endif /* __PVRUSB2_UTIL_H */ | ||
54 | |||
55 | /* | ||
56 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
57 | *** Local Variables: *** | ||
58 | *** mode: c *** | ||
59 | *** fill-column: 75 *** | ||
60 | *** tab-width: 8 *** | ||
61 | *** c-basic-offset: 8 *** | ||
62 | *** End: *** | ||
63 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c new file mode 100644 index 000000000000..961951010c27 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c | |||
@@ -0,0 +1,1126 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/version.h> | ||
25 | #include "pvrusb2-context.h" | ||
26 | #include "pvrusb2-hdw.h" | ||
27 | #include "pvrusb2.h" | ||
28 | #include "pvrusb2-debug.h" | ||
29 | #include "pvrusb2-v4l2.h" | ||
30 | #include "pvrusb2-ioread.h" | ||
31 | #include <linux/videodev2.h> | ||
32 | #include <media/v4l2-common.h> | ||
33 | |||
34 | struct pvr2_v4l2_dev; | ||
35 | struct pvr2_v4l2_fh; | ||
36 | struct pvr2_v4l2; | ||
37 | |||
38 | /* V4L no longer provide the ability to set / get a private context pointer | ||
39 | (i.e. video_get_drvdata / video_set_drvdata), which means we have to | ||
40 | concoct our own context locating mechanism. Supposedly this is intended | ||
41 | to simplify driver implementation. It's not clear to me how that can | ||
42 | possibly be true. Our solution here is to maintain a lookup table of | ||
43 | our context instances, indexed by the minor device number of the V4L | ||
44 | device. See pvr2_v4l2_open() for some implications of this approach. */ | ||
45 | static struct pvr2_v4l2_dev *devices[256]; | ||
46 | static DEFINE_MUTEX(device_lock); | ||
47 | |||
48 | struct pvr2_v4l2_dev { | ||
49 | struct pvr2_v4l2 *v4lp; | ||
50 | struct video_device *vdev; | ||
51 | struct pvr2_context_stream *stream; | ||
52 | int ctxt_idx; | ||
53 | enum pvr2_config config; | ||
54 | }; | ||
55 | |||
56 | struct pvr2_v4l2_fh { | ||
57 | struct pvr2_channel channel; | ||
58 | struct pvr2_v4l2_dev *dev_info; | ||
59 | enum v4l2_priority prio; | ||
60 | struct pvr2_ioread *rhp; | ||
61 | struct file *file; | ||
62 | struct pvr2_v4l2 *vhead; | ||
63 | struct pvr2_v4l2_fh *vnext; | ||
64 | struct pvr2_v4l2_fh *vprev; | ||
65 | wait_queue_head_t wait_data; | ||
66 | int fw_mode_flag; | ||
67 | }; | ||
68 | |||
69 | struct pvr2_v4l2 { | ||
70 | struct pvr2_channel channel; | ||
71 | struct pvr2_v4l2_fh *vfirst; | ||
72 | struct pvr2_v4l2_fh *vlast; | ||
73 | |||
74 | struct v4l2_prio_state prio; | ||
75 | |||
76 | /* streams */ | ||
77 | struct pvr2_v4l2_dev video_dev; | ||
78 | }; | ||
79 | |||
80 | static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; | ||
81 | module_param_array(video_nr, int, NULL, 0444); | ||
82 | MODULE_PARM_DESC(video_nr, "Offset for device's minor"); | ||
83 | |||
84 | struct v4l2_capability pvr_capability ={ | ||
85 | .driver = "pvrusb2", | ||
86 | .card = "Hauppauge WinTV pvr-usb2", | ||
87 | .bus_info = "usb", | ||
88 | .version = KERNEL_VERSION(0,8,0), | ||
89 | .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE | | ||
90 | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | | ||
91 | V4L2_CAP_READWRITE), | ||
92 | .reserved = {0,0,0,0} | ||
93 | }; | ||
94 | |||
95 | static struct v4l2_tuner pvr_v4l2_tuners[]= { | ||
96 | { | ||
97 | .index = 0, | ||
98 | .name = "TV Tuner", | ||
99 | .type = V4L2_TUNER_ANALOG_TV, | ||
100 | .capability = (V4L2_TUNER_CAP_NORM | | ||
101 | V4L2_TUNER_CAP_STEREO | | ||
102 | V4L2_TUNER_CAP_LANG1 | | ||
103 | V4L2_TUNER_CAP_LANG2), | ||
104 | .rangelow = 0, | ||
105 | .rangehigh = 0, | ||
106 | .rxsubchans = V4L2_TUNER_SUB_STEREO, | ||
107 | .audmode = V4L2_TUNER_MODE_STEREO, | ||
108 | .signal = 0, | ||
109 | .afc = 0, | ||
110 | .reserved = {0,0,0,0} | ||
111 | } | ||
112 | }; | ||
113 | |||
114 | struct v4l2_fmtdesc pvr_fmtdesc [] = { | ||
115 | { | ||
116 | .index = 0, | ||
117 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | ||
118 | .flags = V4L2_FMT_FLAG_COMPRESSED, | ||
119 | .description = "MPEG1/2", | ||
120 | // This should really be V4L2_PIX_FMT_MPEG, but xawtv | ||
121 | // breaks when I do that. | ||
122 | .pixelformat = 0, // V4L2_PIX_FMT_MPEG, | ||
123 | .reserved = { 0, 0, 0, 0 } | ||
124 | } | ||
125 | }; | ||
126 | |||
127 | #define PVR_FORMAT_PIX 0 | ||
128 | #define PVR_FORMAT_VBI 1 | ||
129 | |||
130 | struct v4l2_format pvr_format [] = { | ||
131 | [PVR_FORMAT_PIX] = { | ||
132 | .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, | ||
133 | .fmt = { | ||
134 | .pix = { | ||
135 | .width = 720, | ||
136 | .height = 576, | ||
137 | // This should really be V4L2_PIX_FMT_MPEG, | ||
138 | // but xawtv breaks when I do that. | ||
139 | .pixelformat = 0, // V4L2_PIX_FMT_MPEG, | ||
140 | .field = V4L2_FIELD_INTERLACED, | ||
141 | .bytesperline = 0, // doesn't make sense | ||
142 | // here | ||
143 | //FIXME : Don't know what to put here... | ||
144 | .sizeimage = (32*1024), | ||
145 | .colorspace = 0, // doesn't make sense here | ||
146 | .priv = 0 | ||
147 | } | ||
148 | } | ||
149 | }, | ||
150 | [PVR_FORMAT_VBI] = { | ||
151 | .type = V4L2_BUF_TYPE_VBI_CAPTURE, | ||
152 | .fmt = { | ||
153 | .vbi = { | ||
154 | .sampling_rate = 27000000, | ||
155 | .offset = 248, | ||
156 | .samples_per_line = 1443, | ||
157 | .sample_format = V4L2_PIX_FMT_GREY, | ||
158 | .start = { 0, 0 }, | ||
159 | .count = { 0, 0 }, | ||
160 | .flags = 0, | ||
161 | .reserved = { 0, 0 } | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | }; | ||
166 | |||
167 | /* | ||
168 | * pvr_ioctl() | ||
169 | * | ||
170 | * This is part of Video 4 Linux API. The procedure handles ioctl() calls. | ||
171 | * | ||
172 | */ | ||
173 | static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file, | ||
174 | unsigned int cmd, void *arg) | ||
175 | { | ||
176 | struct pvr2_v4l2_fh *fh = file->private_data; | ||
177 | struct pvr2_v4l2 *vp = fh->vhead; | ||
178 | struct pvr2_v4l2_dev *dev_info = fh->dev_info; | ||
179 | struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; | ||
180 | int ret = -EINVAL; | ||
181 | |||
182 | if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { | ||
183 | v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd); | ||
184 | } | ||
185 | |||
186 | if (!pvr2_hdw_dev_ok(hdw)) { | ||
187 | pvr2_trace(PVR2_TRACE_ERROR_LEGS, | ||
188 | "ioctl failed - bad or no context"); | ||
189 | return -EFAULT; | ||
190 | } | ||
191 | |||
192 | /* check priority */ | ||
193 | switch (cmd) { | ||
194 | case VIDIOC_S_CTRL: | ||
195 | case VIDIOC_S_STD: | ||
196 | case VIDIOC_S_INPUT: | ||
197 | case VIDIOC_S_TUNER: | ||
198 | case VIDIOC_S_FREQUENCY: | ||
199 | ret = v4l2_prio_check(&vp->prio, &fh->prio); | ||
200 | if (ret) | ||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | switch (cmd) { | ||
205 | case VIDIOC_QUERYCAP: | ||
206 | { | ||
207 | struct v4l2_capability *cap = arg; | ||
208 | |||
209 | memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); | ||
210 | |||
211 | ret = 0; | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | case VIDIOC_G_PRIORITY: | ||
216 | { | ||
217 | enum v4l2_priority *p = arg; | ||
218 | |||
219 | *p = v4l2_prio_max(&vp->prio); | ||
220 | ret = 0; | ||
221 | break; | ||
222 | } | ||
223 | |||
224 | case VIDIOC_S_PRIORITY: | ||
225 | { | ||
226 | enum v4l2_priority *prio = arg; | ||
227 | |||
228 | ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio); | ||
229 | break; | ||
230 | } | ||
231 | |||
232 | case VIDIOC_ENUMSTD: | ||
233 | { | ||
234 | struct v4l2_standard *vs = (struct v4l2_standard *)arg; | ||
235 | int idx = vs->index; | ||
236 | ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1); | ||
237 | break; | ||
238 | } | ||
239 | |||
240 | case VIDIOC_G_STD: | ||
241 | { | ||
242 | int val = 0; | ||
243 | ret = pvr2_ctrl_get_value( | ||
244 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val); | ||
245 | *(v4l2_std_id *)arg = val; | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | case VIDIOC_S_STD: | ||
250 | { | ||
251 | ret = pvr2_ctrl_set_value( | ||
252 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR), | ||
253 | *(v4l2_std_id *)arg); | ||
254 | break; | ||
255 | } | ||
256 | |||
257 | case VIDIOC_ENUMINPUT: | ||
258 | { | ||
259 | struct pvr2_ctrl *cptr; | ||
260 | struct v4l2_input *vi = (struct v4l2_input *)arg; | ||
261 | struct v4l2_input tmp; | ||
262 | unsigned int cnt; | ||
263 | |||
264 | cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); | ||
265 | |||
266 | memset(&tmp,0,sizeof(tmp)); | ||
267 | tmp.index = vi->index; | ||
268 | ret = 0; | ||
269 | switch (vi->index) { | ||
270 | case PVR2_CVAL_INPUT_TV: | ||
271 | case PVR2_CVAL_INPUT_RADIO: | ||
272 | tmp.type = V4L2_INPUT_TYPE_TUNER; | ||
273 | break; | ||
274 | case PVR2_CVAL_INPUT_SVIDEO: | ||
275 | case PVR2_CVAL_INPUT_COMPOSITE: | ||
276 | tmp.type = V4L2_INPUT_TYPE_CAMERA; | ||
277 | break; | ||
278 | default: | ||
279 | ret = -EINVAL; | ||
280 | break; | ||
281 | } | ||
282 | if (ret < 0) break; | ||
283 | |||
284 | cnt = 0; | ||
285 | pvr2_ctrl_get_valname(cptr,vi->index, | ||
286 | tmp.name,sizeof(tmp.name)-1,&cnt); | ||
287 | tmp.name[cnt] = 0; | ||
288 | |||
289 | /* Don't bother with audioset, since this driver currently | ||
290 | always switches the audio whenever the video is | ||
291 | switched. */ | ||
292 | |||
293 | /* Handling std is a tougher problem. It doesn't make | ||
294 | sense in cases where a device might be multi-standard. | ||
295 | We could just copy out the current value for the | ||
296 | standard, but it can change over time. For now just | ||
297 | leave it zero. */ | ||
298 | |||
299 | memcpy(vi, &tmp, sizeof(tmp)); | ||
300 | |||
301 | ret = 0; | ||
302 | break; | ||
303 | } | ||
304 | |||
305 | case VIDIOC_G_INPUT: | ||
306 | { | ||
307 | struct pvr2_ctrl *cptr; | ||
308 | struct v4l2_input *vi = (struct v4l2_input *)arg; | ||
309 | int val; | ||
310 | cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT); | ||
311 | val = 0; | ||
312 | ret = pvr2_ctrl_get_value(cptr,&val); | ||
313 | vi->index = val; | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | case VIDIOC_S_INPUT: | ||
318 | { | ||
319 | struct v4l2_input *vi = (struct v4l2_input *)arg; | ||
320 | ret = pvr2_ctrl_set_value( | ||
321 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT), | ||
322 | vi->index); | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | case VIDIOC_ENUMAUDIO: | ||
327 | { | ||
328 | ret = -EINVAL; | ||
329 | break; | ||
330 | } | ||
331 | |||
332 | case VIDIOC_G_AUDIO: | ||
333 | { | ||
334 | ret = -EINVAL; | ||
335 | break; | ||
336 | } | ||
337 | |||
338 | case VIDIOC_S_AUDIO: | ||
339 | { | ||
340 | ret = -EINVAL; | ||
341 | break; | ||
342 | } | ||
343 | case VIDIOC_G_TUNER: | ||
344 | { | ||
345 | struct v4l2_tuner *vt = (struct v4l2_tuner *)arg; | ||
346 | unsigned int status_mask; | ||
347 | int val; | ||
348 | if (vt->index !=0) break; | ||
349 | |||
350 | status_mask = pvr2_hdw_get_signal_status(hdw); | ||
351 | |||
352 | memcpy(vt, &pvr_v4l2_tuners[vt->index], | ||
353 | sizeof(struct v4l2_tuner)); | ||
354 | |||
355 | vt->signal = 0; | ||
356 | if (status_mask & PVR2_SIGNAL_OK) { | ||
357 | if (status_mask & PVR2_SIGNAL_STEREO) { | ||
358 | vt->rxsubchans = V4L2_TUNER_SUB_STEREO; | ||
359 | } else { | ||
360 | vt->rxsubchans = V4L2_TUNER_SUB_MONO; | ||
361 | } | ||
362 | if (status_mask & PVR2_SIGNAL_SAP) { | ||
363 | vt->rxsubchans |= (V4L2_TUNER_SUB_LANG1 | | ||
364 | V4L2_TUNER_SUB_LANG2); | ||
365 | } | ||
366 | vt->signal = 65535; | ||
367 | } | ||
368 | |||
369 | val = 0; | ||
370 | ret = pvr2_ctrl_get_value( | ||
371 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE), | ||
372 | &val); | ||
373 | vt->audmode = val; | ||
374 | break; | ||
375 | } | ||
376 | |||
377 | case VIDIOC_S_TUNER: | ||
378 | { | ||
379 | struct v4l2_tuner *vt=(struct v4l2_tuner *)arg; | ||
380 | |||
381 | if (vt->index != 0) | ||
382 | break; | ||
383 | |||
384 | ret = pvr2_ctrl_set_value( | ||
385 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE), | ||
386 | vt->audmode); | ||
387 | } | ||
388 | |||
389 | case VIDIOC_S_FREQUENCY: | ||
390 | { | ||
391 | const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; | ||
392 | ret = pvr2_ctrl_set_value( | ||
393 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY), | ||
394 | vf->frequency * 62500); | ||
395 | break; | ||
396 | } | ||
397 | |||
398 | case VIDIOC_G_FREQUENCY: | ||
399 | { | ||
400 | struct v4l2_frequency *vf = (struct v4l2_frequency *)arg; | ||
401 | int val = 0; | ||
402 | ret = pvr2_ctrl_get_value( | ||
403 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY), | ||
404 | &val); | ||
405 | val /= 62500; | ||
406 | vf->frequency = val; | ||
407 | break; | ||
408 | } | ||
409 | |||
410 | case VIDIOC_ENUM_FMT: | ||
411 | { | ||
412 | struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg; | ||
413 | |||
414 | /* Only one format is supported : mpeg.*/ | ||
415 | if (fd->index != 0) | ||
416 | break; | ||
417 | |||
418 | memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc)); | ||
419 | ret = 0; | ||
420 | break; | ||
421 | } | ||
422 | |||
423 | case VIDIOC_G_FMT: | ||
424 | { | ||
425 | struct v4l2_format *vf = (struct v4l2_format *)arg; | ||
426 | int val; | ||
427 | switch(vf->type) { | ||
428 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: | ||
429 | memcpy(vf, &pvr_format[PVR_FORMAT_PIX], | ||
430 | sizeof(struct v4l2_format)); | ||
431 | val = 0; | ||
432 | pvr2_ctrl_get_value( | ||
433 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES), | ||
434 | &val); | ||
435 | vf->fmt.pix.width = val; | ||
436 | val = 0; | ||
437 | pvr2_ctrl_get_value( | ||
438 | pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES), | ||
439 | &val); | ||
440 | vf->fmt.pix.height = val; | ||
441 | ret = 0; | ||
442 | break; | ||
443 | case V4L2_BUF_TYPE_VBI_CAPTURE: | ||
444 | // ????? Still need to figure out to do VBI correctly | ||
445 | ret = -EINVAL; | ||
446 | break; | ||
447 | default: | ||
448 | ret = -EINVAL; | ||
449 | break; | ||
450 | } | ||
451 | break; | ||
452 | } | ||
453 | |||
454 | case VIDIOC_TRY_FMT: | ||
455 | case VIDIOC_S_FMT: | ||
456 | { | ||
457 | struct v4l2_format *vf = (struct v4l2_format *)arg; | ||
458 | |||
459 | ret = 0; | ||
460 | switch(vf->type) { | ||
461 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: { | ||
462 | int h = vf->fmt.pix.height; | ||
463 | int w = vf->fmt.pix.width; | ||
464 | |||
465 | if (h < 200) { | ||
466 | h = 200; | ||
467 | } else if (h > 625) { | ||
468 | h = 625; | ||
469 | } | ||
470 | if (w < 320) { | ||
471 | w = 320; | ||
472 | } else if (w > 720) { | ||
473 | w = 720; | ||
474 | } | ||
475 | |||
476 | memcpy(vf, &pvr_format[PVR_FORMAT_PIX], | ||
477 | sizeof(struct v4l2_format)); | ||
478 | vf->fmt.pix.width = w; | ||
479 | vf->fmt.pix.height = h; | ||
480 | |||
481 | if (cmd == VIDIOC_S_FMT) { | ||
482 | pvr2_ctrl_set_value( | ||
483 | pvr2_hdw_get_ctrl_by_id(hdw, | ||
484 | PVR2_CID_HRES), | ||
485 | vf->fmt.pix.width); | ||
486 | pvr2_ctrl_set_value( | ||
487 | pvr2_hdw_get_ctrl_by_id(hdw, | ||
488 | PVR2_CID_VRES), | ||
489 | vf->fmt.pix.height); | ||
490 | } | ||
491 | } break; | ||
492 | case V4L2_BUF_TYPE_VBI_CAPTURE: | ||
493 | // ????? Still need to figure out to do VBI correctly | ||
494 | ret = -EINVAL; | ||
495 | break; | ||
496 | default: | ||
497 | ret = -EINVAL; | ||
498 | break; | ||
499 | } | ||
500 | break; | ||
501 | } | ||
502 | |||
503 | case VIDIOC_STREAMON: | ||
504 | { | ||
505 | ret = pvr2_hdw_set_stream_type(hdw,dev_info->config); | ||
506 | if (ret < 0) return ret; | ||
507 | ret = pvr2_hdw_set_streaming(hdw,!0); | ||
508 | break; | ||
509 | } | ||
510 | |||
511 | case VIDIOC_STREAMOFF: | ||
512 | { | ||
513 | ret = pvr2_hdw_set_streaming(hdw,0); | ||
514 | break; | ||
515 | } | ||
516 | |||
517 | case VIDIOC_QUERYCTRL: | ||
518 | { | ||
519 | struct pvr2_ctrl *cptr; | ||
520 | struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg; | ||
521 | ret = 0; | ||
522 | if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) { | ||
523 | cptr = pvr2_hdw_get_ctrl_nextv4l( | ||
524 | hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL)); | ||
525 | if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr); | ||
526 | } else { | ||
527 | cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id); | ||
528 | } | ||
529 | if (!cptr) { | ||
530 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
531 | "QUERYCTRL id=0x%x not implemented here", | ||
532 | vc->id); | ||
533 | ret = -EINVAL; | ||
534 | break; | ||
535 | } | ||
536 | |||
537 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
538 | "QUERYCTRL id=0x%x mapping name=%s (%s)", | ||
539 | vc->id,pvr2_ctrl_get_name(cptr), | ||
540 | pvr2_ctrl_get_desc(cptr)); | ||
541 | strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name)); | ||
542 | vc->flags = pvr2_ctrl_get_v4lflags(cptr); | ||
543 | vc->default_value = pvr2_ctrl_get_def(cptr); | ||
544 | switch (pvr2_ctrl_get_type(cptr)) { | ||
545 | case pvr2_ctl_enum: | ||
546 | vc->type = V4L2_CTRL_TYPE_MENU; | ||
547 | vc->minimum = 0; | ||
548 | vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1; | ||
549 | vc->step = 1; | ||
550 | break; | ||
551 | case pvr2_ctl_bool: | ||
552 | vc->type = V4L2_CTRL_TYPE_BOOLEAN; | ||
553 | vc->minimum = 0; | ||
554 | vc->maximum = 1; | ||
555 | vc->step = 1; | ||
556 | break; | ||
557 | case pvr2_ctl_int: | ||
558 | vc->type = V4L2_CTRL_TYPE_INTEGER; | ||
559 | vc->minimum = pvr2_ctrl_get_min(cptr); | ||
560 | vc->maximum = pvr2_ctrl_get_max(cptr); | ||
561 | vc->step = 1; | ||
562 | break; | ||
563 | default: | ||
564 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
565 | "QUERYCTRL id=0x%x name=%s not mappable", | ||
566 | vc->id,pvr2_ctrl_get_name(cptr)); | ||
567 | ret = -EINVAL; | ||
568 | break; | ||
569 | } | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | case VIDIOC_QUERYMENU: | ||
574 | { | ||
575 | struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg; | ||
576 | unsigned int cnt = 0; | ||
577 | ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id), | ||
578 | vm->index, | ||
579 | vm->name,sizeof(vm->name)-1, | ||
580 | &cnt); | ||
581 | vm->name[cnt] = 0; | ||
582 | break; | ||
583 | } | ||
584 | |||
585 | case VIDIOC_G_CTRL: | ||
586 | { | ||
587 | struct v4l2_control *vc = (struct v4l2_control *)arg; | ||
588 | int val = 0; | ||
589 | ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), | ||
590 | &val); | ||
591 | vc->value = val; | ||
592 | break; | ||
593 | } | ||
594 | |||
595 | case VIDIOC_S_CTRL: | ||
596 | { | ||
597 | struct v4l2_control *vc = (struct v4l2_control *)arg; | ||
598 | ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id), | ||
599 | vc->value); | ||
600 | break; | ||
601 | } | ||
602 | |||
603 | case VIDIOC_G_EXT_CTRLS: | ||
604 | { | ||
605 | struct v4l2_ext_controls *ctls = | ||
606 | (struct v4l2_ext_controls *)arg; | ||
607 | struct v4l2_ext_control *ctrl; | ||
608 | unsigned int idx; | ||
609 | int val; | ||
610 | for (idx = 0; idx < ctls->count; idx++) { | ||
611 | ctrl = ctls->controls + idx; | ||
612 | ret = pvr2_ctrl_get_value( | ||
613 | pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val); | ||
614 | if (ret) { | ||
615 | ctls->error_idx = idx; | ||
616 | break; | ||
617 | } | ||
618 | /* Ensure that if read as a 64 bit value, the user | ||
619 | will still get a hopefully sane value */ | ||
620 | ctrl->value64 = 0; | ||
621 | ctrl->value = val; | ||
622 | } | ||
623 | break; | ||
624 | } | ||
625 | |||
626 | case VIDIOC_S_EXT_CTRLS: | ||
627 | { | ||
628 | struct v4l2_ext_controls *ctls = | ||
629 | (struct v4l2_ext_controls *)arg; | ||
630 | struct v4l2_ext_control *ctrl; | ||
631 | unsigned int idx; | ||
632 | for (idx = 0; idx < ctls->count; idx++) { | ||
633 | ctrl = ctls->controls + idx; | ||
634 | ret = pvr2_ctrl_set_value( | ||
635 | pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id), | ||
636 | ctrl->value); | ||
637 | if (ret) { | ||
638 | ctls->error_idx = idx; | ||
639 | break; | ||
640 | } | ||
641 | } | ||
642 | break; | ||
643 | } | ||
644 | |||
645 | case VIDIOC_TRY_EXT_CTRLS: | ||
646 | { | ||
647 | struct v4l2_ext_controls *ctls = | ||
648 | (struct v4l2_ext_controls *)arg; | ||
649 | struct v4l2_ext_control *ctrl; | ||
650 | struct pvr2_ctrl *pctl; | ||
651 | unsigned int idx; | ||
652 | /* For the moment just validate that the requested control | ||
653 | actually exists. */ | ||
654 | for (idx = 0; idx < ctls->count; idx++) { | ||
655 | ctrl = ctls->controls + idx; | ||
656 | pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id); | ||
657 | if (!pctl) { | ||
658 | ret = -EINVAL; | ||
659 | ctls->error_idx = idx; | ||
660 | break; | ||
661 | } | ||
662 | } | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | case VIDIOC_LOG_STATUS: | ||
667 | { | ||
668 | pvr2_hdw_trigger_module_log(hdw); | ||
669 | ret = 0; | ||
670 | break; | ||
671 | } | ||
672 | |||
673 | default : | ||
674 | ret = v4l_compat_translate_ioctl(inode,file,cmd, | ||
675 | arg,pvr2_v4l2_do_ioctl); | ||
676 | } | ||
677 | |||
678 | pvr2_hdw_commit_ctl(hdw); | ||
679 | |||
680 | if (ret < 0) { | ||
681 | if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { | ||
682 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
683 | "pvr2_v4l2_do_ioctl failure, ret=%d",ret); | ||
684 | } else { | ||
685 | if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) { | ||
686 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
687 | "pvr2_v4l2_do_ioctl failure, ret=%d" | ||
688 | " command was:",ret); | ||
689 | v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), | ||
690 | cmd); | ||
691 | } | ||
692 | } | ||
693 | } else { | ||
694 | pvr2_trace(PVR2_TRACE_V4LIOCTL, | ||
695 | "pvr2_v4l2_do_ioctl complete, ret=%d (0x%x)", | ||
696 | ret,ret); | ||
697 | } | ||
698 | return ret; | ||
699 | } | ||
700 | |||
701 | |||
702 | static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) | ||
703 | { | ||
704 | pvr2_trace(PVR2_TRACE_INIT, | ||
705 | "unregistering device video%d [%s]", | ||
706 | dip->vdev->minor,pvr2_config_get_name(dip->config)); | ||
707 | if (dip->ctxt_idx >= 0) { | ||
708 | mutex_lock(&device_lock); | ||
709 | devices[dip->ctxt_idx] = NULL; | ||
710 | dip->ctxt_idx = -1; | ||
711 | mutex_unlock(&device_lock); | ||
712 | } | ||
713 | video_unregister_device(dip->vdev); | ||
714 | } | ||
715 | |||
716 | |||
717 | static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp) | ||
718 | { | ||
719 | pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,-1); | ||
720 | pvr2_v4l2_dev_destroy(&vp->video_dev); | ||
721 | |||
722 | pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp); | ||
723 | pvr2_channel_done(&vp->channel); | ||
724 | kfree(vp); | ||
725 | } | ||
726 | |||
727 | |||
728 | void pvr2_v4l2_internal_check(struct pvr2_channel *chp) | ||
729 | { | ||
730 | struct pvr2_v4l2 *vp; | ||
731 | vp = container_of(chp,struct pvr2_v4l2,channel); | ||
732 | if (!vp->channel.mc_head->disconnect_flag) return; | ||
733 | if (vp->vfirst) return; | ||
734 | pvr2_v4l2_destroy_no_lock(vp); | ||
735 | } | ||
736 | |||
737 | |||
738 | int pvr2_v4l2_ioctl(struct inode *inode, struct file *file, | ||
739 | unsigned int cmd, unsigned long arg) | ||
740 | { | ||
741 | |||
742 | /* Temporary hack : use ivtv api until a v4l2 one is available. */ | ||
743 | #define IVTV_IOC_G_CODEC 0xFFEE7703 | ||
744 | #define IVTV_IOC_S_CODEC 0xFFEE7704 | ||
745 | if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0; | ||
746 | return video_usercopy(inode, file, cmd, arg, pvr2_v4l2_do_ioctl); | ||
747 | } | ||
748 | |||
749 | |||
750 | int pvr2_v4l2_release(struct inode *inode, struct file *file) | ||
751 | { | ||
752 | struct pvr2_v4l2_fh *fhp = file->private_data; | ||
753 | struct pvr2_v4l2 *vp = fhp->vhead; | ||
754 | struct pvr2_context *mp = fhp->vhead->channel.mc_head; | ||
755 | |||
756 | pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release"); | ||
757 | |||
758 | if (fhp->rhp) { | ||
759 | struct pvr2_stream *sp; | ||
760 | struct pvr2_hdw *hdw; | ||
761 | hdw = fhp->channel.mc_head->hdw; | ||
762 | pvr2_hdw_set_streaming(hdw,0); | ||
763 | sp = pvr2_ioread_get_stream(fhp->rhp); | ||
764 | if (sp) pvr2_stream_set_callback(sp,0,0); | ||
765 | pvr2_ioread_destroy(fhp->rhp); | ||
766 | fhp->rhp = 0; | ||
767 | } | ||
768 | v4l2_prio_close(&vp->prio, &fhp->prio); | ||
769 | file->private_data = NULL; | ||
770 | |||
771 | pvr2_context_enter(mp); do { | ||
772 | if (fhp->vnext) { | ||
773 | fhp->vnext->vprev = fhp->vprev; | ||
774 | } else { | ||
775 | vp->vlast = fhp->vprev; | ||
776 | } | ||
777 | if (fhp->vprev) { | ||
778 | fhp->vprev->vnext = fhp->vnext; | ||
779 | } else { | ||
780 | vp->vfirst = fhp->vnext; | ||
781 | } | ||
782 | fhp->vnext = 0; | ||
783 | fhp->vprev = 0; | ||
784 | fhp->vhead = 0; | ||
785 | pvr2_channel_done(&fhp->channel); | ||
786 | pvr2_trace(PVR2_TRACE_STRUCT, | ||
787 | "Destroying pvr_v4l2_fh id=%p",fhp); | ||
788 | kfree(fhp); | ||
789 | if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) { | ||
790 | pvr2_v4l2_destroy_no_lock(vp); | ||
791 | } | ||
792 | } while (0); pvr2_context_exit(mp); | ||
793 | return 0; | ||
794 | } | ||
795 | |||
796 | |||
797 | int pvr2_v4l2_open(struct inode *inode, struct file *file) | ||
798 | { | ||
799 | struct pvr2_v4l2_dev *dip = 0; /* Our own context pointer */ | ||
800 | struct pvr2_v4l2_fh *fhp; | ||
801 | struct pvr2_v4l2 *vp; | ||
802 | struct pvr2_hdw *hdw; | ||
803 | |||
804 | mutex_lock(&device_lock); | ||
805 | /* MCI 7-Jun-2006 Even though we're just doing what amounts to an | ||
806 | atomic read of the device mapping array here, we still need the | ||
807 | mutex. The problem is that there is a tiny race possible when | ||
808 | we register the device. We can't update the device mapping | ||
809 | array until after the device has been registered, owing to the | ||
810 | fact that we can't know the minor device number until after the | ||
811 | registration succeeds. And if another thread tries to open the | ||
812 | device in the window of time after registration but before the | ||
813 | map is updated, then it will get back an erroneous null pointer | ||
814 | and the open will result in a spurious failure. The only way to | ||
815 | prevent that is to (a) be inside the mutex here before we access | ||
816 | the array, and (b) cover the entire registration process later | ||
817 | on with this same mutex. Thus if we get inside the mutex here, | ||
818 | then we can be assured that the registration process actually | ||
819 | completed correctly. This is an unhappy complication from the | ||
820 | use of global data in a driver that lives in a preemptible | ||
821 | environment. It sure would be nice if the video device itself | ||
822 | had a means for storing and retrieving a local context pointer. | ||
823 | Oh wait. It did. But now it's gone. Silly me. */ | ||
824 | { | ||
825 | unsigned int midx = iminor(file->f_dentry->d_inode); | ||
826 | if (midx < sizeof(devices)/sizeof(devices[0])) { | ||
827 | dip = devices[midx]; | ||
828 | } | ||
829 | } | ||
830 | mutex_unlock(&device_lock); | ||
831 | |||
832 | if (!dip) return -ENODEV; /* Should be impossible but I'm paranoid */ | ||
833 | |||
834 | vp = dip->v4lp; | ||
835 | hdw = vp->channel.hdw; | ||
836 | |||
837 | pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open"); | ||
838 | |||
839 | if (!pvr2_hdw_dev_ok(hdw)) { | ||
840 | pvr2_trace(PVR2_TRACE_OPEN_CLOSE, | ||
841 | "pvr2_v4l2_open: hardware not ready"); | ||
842 | return -EIO; | ||
843 | } | ||
844 | |||
845 | fhp = kmalloc(sizeof(*fhp),GFP_KERNEL); | ||
846 | if (!fhp) { | ||
847 | return -ENOMEM; | ||
848 | } | ||
849 | memset(fhp,0,sizeof(*fhp)); | ||
850 | |||
851 | init_waitqueue_head(&fhp->wait_data); | ||
852 | fhp->dev_info = dip; | ||
853 | |||
854 | pvr2_context_enter(vp->channel.mc_head); do { | ||
855 | pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp); | ||
856 | pvr2_channel_init(&fhp->channel,vp->channel.mc_head); | ||
857 | fhp->vnext = 0; | ||
858 | fhp->vprev = vp->vlast; | ||
859 | if (vp->vlast) { | ||
860 | vp->vlast->vnext = fhp; | ||
861 | } else { | ||
862 | vp->vfirst = fhp; | ||
863 | } | ||
864 | vp->vlast = fhp; | ||
865 | fhp->vhead = vp; | ||
866 | } while (0); pvr2_context_exit(vp->channel.mc_head); | ||
867 | |||
868 | fhp->file = file; | ||
869 | file->private_data = fhp; | ||
870 | v4l2_prio_open(&vp->prio,&fhp->prio); | ||
871 | |||
872 | fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw); | ||
873 | |||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | |||
878 | static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) | ||
879 | { | ||
880 | wake_up(&fhp->wait_data); | ||
881 | } | ||
882 | |||
883 | static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) | ||
884 | { | ||
885 | int ret; | ||
886 | struct pvr2_stream *sp; | ||
887 | struct pvr2_hdw *hdw; | ||
888 | if (fh->rhp) return 0; | ||
889 | |||
890 | /* First read() attempt. Try to claim the stream and start | ||
891 | it... */ | ||
892 | if ((ret = pvr2_channel_claim_stream(&fh->channel, | ||
893 | fh->dev_info->stream)) != 0) { | ||
894 | /* Someone else must already have it */ | ||
895 | return ret; | ||
896 | } | ||
897 | |||
898 | fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream); | ||
899 | if (!fh->rhp) { | ||
900 | pvr2_channel_claim_stream(&fh->channel,0); | ||
901 | return -ENOMEM; | ||
902 | } | ||
903 | |||
904 | hdw = fh->channel.mc_head->hdw; | ||
905 | sp = fh->dev_info->stream->stream; | ||
906 | pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); | ||
907 | pvr2_hdw_set_stream_type(hdw,fh->dev_info->config); | ||
908 | pvr2_hdw_set_streaming(hdw,!0); | ||
909 | ret = pvr2_ioread_set_enabled(fh->rhp,!0); | ||
910 | |||
911 | return ret; | ||
912 | } | ||
913 | |||
914 | |||
915 | static ssize_t pvr2_v4l2_read(struct file *file, | ||
916 | char __user *buff, size_t count, loff_t *ppos) | ||
917 | { | ||
918 | struct pvr2_v4l2_fh *fh = file->private_data; | ||
919 | int ret; | ||
920 | |||
921 | if (fh->fw_mode_flag) { | ||
922 | struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; | ||
923 | char *tbuf; | ||
924 | int c1,c2; | ||
925 | int tcnt = 0; | ||
926 | unsigned int offs = *ppos; | ||
927 | |||
928 | tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL); | ||
929 | if (!tbuf) return -ENOMEM; | ||
930 | |||
931 | while (count) { | ||
932 | c1 = count; | ||
933 | if (c1 > PAGE_SIZE) c1 = PAGE_SIZE; | ||
934 | c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1); | ||
935 | if (c2 < 0) { | ||
936 | tcnt = c2; | ||
937 | break; | ||
938 | } | ||
939 | if (!c2) break; | ||
940 | if (copy_to_user(buff,tbuf,c2)) { | ||
941 | tcnt = -EFAULT; | ||
942 | break; | ||
943 | } | ||
944 | offs += c2; | ||
945 | tcnt += c2; | ||
946 | buff += c2; | ||
947 | count -= c2; | ||
948 | *ppos += c2; | ||
949 | } | ||
950 | kfree(tbuf); | ||
951 | return tcnt; | ||
952 | } | ||
953 | |||
954 | if (!fh->rhp) { | ||
955 | ret = pvr2_v4l2_iosetup(fh); | ||
956 | if (ret) { | ||
957 | return ret; | ||
958 | } | ||
959 | } | ||
960 | |||
961 | for (;;) { | ||
962 | ret = pvr2_ioread_read(fh->rhp,buff,count); | ||
963 | if (ret >= 0) break; | ||
964 | if (ret != -EAGAIN) break; | ||
965 | if (file->f_flags & O_NONBLOCK) break; | ||
966 | /* Doing blocking I/O. Wait here. */ | ||
967 | ret = wait_event_interruptible( | ||
968 | fh->wait_data, | ||
969 | pvr2_ioread_avail(fh->rhp) >= 0); | ||
970 | if (ret < 0) break; | ||
971 | } | ||
972 | |||
973 | return ret; | ||
974 | } | ||
975 | |||
976 | |||
977 | static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait) | ||
978 | { | ||
979 | unsigned int mask = 0; | ||
980 | struct pvr2_v4l2_fh *fh = file->private_data; | ||
981 | int ret; | ||
982 | |||
983 | if (fh->fw_mode_flag) { | ||
984 | mask |= POLLIN | POLLRDNORM; | ||
985 | return mask; | ||
986 | } | ||
987 | |||
988 | if (!fh->rhp) { | ||
989 | ret = pvr2_v4l2_iosetup(fh); | ||
990 | if (ret) return POLLERR; | ||
991 | } | ||
992 | |||
993 | poll_wait(file,&fh->wait_data,wait); | ||
994 | |||
995 | if (pvr2_ioread_avail(fh->rhp) >= 0) { | ||
996 | mask |= POLLIN | POLLRDNORM; | ||
997 | } | ||
998 | |||
999 | return mask; | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | static struct file_operations vdev_fops = { | ||
1004 | .owner = THIS_MODULE, | ||
1005 | .open = pvr2_v4l2_open, | ||
1006 | .release = pvr2_v4l2_release, | ||
1007 | .read = pvr2_v4l2_read, | ||
1008 | .ioctl = pvr2_v4l2_ioctl, | ||
1009 | .llseek = no_llseek, | ||
1010 | .poll = pvr2_v4l2_poll, | ||
1011 | }; | ||
1012 | |||
1013 | |||
1014 | #define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */ | ||
1015 | |||
1016 | static struct video_device vdev_template = { | ||
1017 | .owner = THIS_MODULE, | ||
1018 | .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER, | ||
1019 | .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE | ||
1020 | | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | ||
1021 | | V4L2_CAP_READWRITE), | ||
1022 | .hardware = VID_HARDWARE_PVRUSB2, | ||
1023 | .fops = &vdev_fops, | ||
1024 | }; | ||
1025 | |||
1026 | |||
1027 | static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, | ||
1028 | struct pvr2_v4l2 *vp, | ||
1029 | enum pvr2_config cfg) | ||
1030 | { | ||
1031 | int mindevnum; | ||
1032 | int unit_number; | ||
1033 | int v4l_type; | ||
1034 | dip->v4lp = vp; | ||
1035 | dip->config = cfg; | ||
1036 | |||
1037 | |||
1038 | switch (cfg) { | ||
1039 | case pvr2_config_mpeg: | ||
1040 | v4l_type = VFL_TYPE_GRABBER; | ||
1041 | dip->stream = &vp->channel.mc_head->video_stream; | ||
1042 | break; | ||
1043 | case pvr2_config_vbi: | ||
1044 | v4l_type = VFL_TYPE_VBI; | ||
1045 | break; | ||
1046 | case pvr2_config_radio: | ||
1047 | v4l_type = VFL_TYPE_RADIO; | ||
1048 | break; | ||
1049 | default: | ||
1050 | /* Bail out (this should be impossible) */ | ||
1051 | err("Failed to set up pvrusb2 v4l dev" | ||
1052 | " due to unrecognized config"); | ||
1053 | return; | ||
1054 | } | ||
1055 | |||
1056 | if (!dip->stream) { | ||
1057 | err("Failed to set up pvrusb2 v4l dev" | ||
1058 | " due to missing stream instance"); | ||
1059 | return; | ||
1060 | } | ||
1061 | |||
1062 | dip->vdev = video_device_alloc(); | ||
1063 | if (!dip->vdev) { | ||
1064 | err("Alloc of pvrusb2 v4l video device failed"); | ||
1065 | return; | ||
1066 | } | ||
1067 | |||
1068 | memcpy(dip->vdev,&vdev_template,sizeof(vdev_template)); | ||
1069 | dip->vdev->release = video_device_release; | ||
1070 | mutex_lock(&device_lock); | ||
1071 | |||
1072 | mindevnum = -1; | ||
1073 | unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw); | ||
1074 | if ((unit_number >= 0) && (unit_number < PVR_NUM)) { | ||
1075 | mindevnum = video_nr[unit_number]; | ||
1076 | } | ||
1077 | if ((video_register_device(dip->vdev, v4l_type, mindevnum) < 0) && | ||
1078 | (video_register_device(dip->vdev, v4l_type, -1) < 0)) { | ||
1079 | err("Failed to register pvrusb2 v4l video device"); | ||
1080 | } else { | ||
1081 | pvr2_trace(PVR2_TRACE_INIT, | ||
1082 | "registered device video%d [%s]", | ||
1083 | dip->vdev->minor,pvr2_config_get_name(dip->config)); | ||
1084 | } | ||
1085 | |||
1086 | if ((dip->vdev->minor < sizeof(devices)/sizeof(devices[0])) && | ||
1087 | (devices[dip->vdev->minor] == NULL)) { | ||
1088 | dip->ctxt_idx = dip->vdev->minor; | ||
1089 | devices[dip->ctxt_idx] = dip; | ||
1090 | } | ||
1091 | mutex_unlock(&device_lock); | ||
1092 | |||
1093 | pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, | ||
1094 | dip->vdev->minor); | ||
1095 | } | ||
1096 | |||
1097 | |||
1098 | struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp) | ||
1099 | { | ||
1100 | struct pvr2_v4l2 *vp; | ||
1101 | |||
1102 | vp = kmalloc(sizeof(*vp),GFP_KERNEL); | ||
1103 | if (!vp) return vp; | ||
1104 | memset(vp,0,sizeof(*vp)); | ||
1105 | vp->video_dev.ctxt_idx = -1; | ||
1106 | pvr2_channel_init(&vp->channel,mnp); | ||
1107 | pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp); | ||
1108 | |||
1109 | vp->channel.check_func = pvr2_v4l2_internal_check; | ||
1110 | |||
1111 | /* register streams */ | ||
1112 | pvr2_v4l2_dev_init(&vp->video_dev,vp,pvr2_config_mpeg); | ||
1113 | |||
1114 | |||
1115 | return vp; | ||
1116 | } | ||
1117 | |||
1118 | /* | ||
1119 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
1120 | *** Local Variables: *** | ||
1121 | *** mode: c *** | ||
1122 | *** fill-column: 75 *** | ||
1123 | *** tab-width: 8 *** | ||
1124 | *** c-basic-offset: 8 *** | ||
1125 | *** End: *** | ||
1126 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h new file mode 100644 index 000000000000..9a995e2d2256 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | #ifndef __PVRUSB2_V4L2_H | ||
22 | #define __PVRUSB2_V4L2_H | ||
23 | |||
24 | #include "pvrusb2-context.h" | ||
25 | |||
26 | struct pvr2_v4l2; | ||
27 | |||
28 | struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *); | ||
29 | |||
30 | #endif /* __PVRUSB2_V4L2_H */ | ||
31 | |||
32 | /* | ||
33 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
34 | *** Local Variables: *** | ||
35 | *** mode: c *** | ||
36 | *** fill-column: 75 *** | ||
37 | *** tab-width: 8 *** | ||
38 | *** c-basic-offset: 8 *** | ||
39 | *** End: *** | ||
40 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c new file mode 100644 index 000000000000..e4ec7f25194c --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | |||
25 | This source file is specifically designed to interface with the | ||
26 | saa711x support that is available in the v4l available starting | ||
27 | with linux 2.6.15. | ||
28 | |||
29 | */ | ||
30 | |||
31 | #include "pvrusb2-video-v4l.h" | ||
32 | #include "pvrusb2-i2c-cmd-v4l2.h" | ||
33 | |||
34 | |||
35 | #include "pvrusb2-hdw-internal.h" | ||
36 | #include "pvrusb2-debug.h" | ||
37 | #include <linux/videodev2.h> | ||
38 | #include <media/v4l2-common.h> | ||
39 | #include <media/saa7115.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/slab.h> | ||
42 | |||
43 | struct pvr2_v4l_decoder { | ||
44 | struct pvr2_i2c_handler handler; | ||
45 | struct pvr2_decoder_ctrl ctrl; | ||
46 | struct pvr2_i2c_client *client; | ||
47 | struct pvr2_hdw *hdw; | ||
48 | unsigned long stale_mask; | ||
49 | }; | ||
50 | |||
51 | |||
52 | static void set_input(struct pvr2_v4l_decoder *ctxt) | ||
53 | { | ||
54 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
55 | struct v4l2_routing route; | ||
56 | |||
57 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val); | ||
58 | switch(hdw->input_val) { | ||
59 | case PVR2_CVAL_INPUT_TV: | ||
60 | route.input = SAA7115_COMPOSITE4; | ||
61 | break; | ||
62 | case PVR2_CVAL_INPUT_COMPOSITE: | ||
63 | route.input = SAA7115_COMPOSITE5; | ||
64 | break; | ||
65 | case PVR2_CVAL_INPUT_SVIDEO: | ||
66 | route.input = SAA7115_SVIDEO2; | ||
67 | break; | ||
68 | case PVR2_CVAL_INPUT_RADIO: | ||
69 | // ????? No idea yet what to do here | ||
70 | default: | ||
71 | return; | ||
72 | } | ||
73 | route.output = 0; | ||
74 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route); | ||
75 | } | ||
76 | |||
77 | |||
78 | static int check_input(struct pvr2_v4l_decoder *ctxt) | ||
79 | { | ||
80 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
81 | return hdw->input_dirty != 0; | ||
82 | } | ||
83 | |||
84 | |||
85 | static void set_audio(struct pvr2_v4l_decoder *ctxt) | ||
86 | { | ||
87 | u32 val; | ||
88 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
89 | |||
90 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_audio %d", | ||
91 | hdw->srate_val); | ||
92 | switch (hdw->srate_val) { | ||
93 | default: | ||
94 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000: | ||
95 | val = 48000; | ||
96 | break; | ||
97 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100: | ||
98 | val = 44100; | ||
99 | break; | ||
100 | case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000: | ||
101 | val = 32000; | ||
102 | break; | ||
103 | } | ||
104 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val); | ||
105 | } | ||
106 | |||
107 | |||
108 | static int check_audio(struct pvr2_v4l_decoder *ctxt) | ||
109 | { | ||
110 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
111 | return hdw->srate_dirty != 0; | ||
112 | } | ||
113 | |||
114 | |||
115 | struct pvr2_v4l_decoder_ops { | ||
116 | void (*update)(struct pvr2_v4l_decoder *); | ||
117 | int (*check)(struct pvr2_v4l_decoder *); | ||
118 | }; | ||
119 | |||
120 | |||
121 | static const struct pvr2_v4l_decoder_ops decoder_ops[] = { | ||
122 | { .update = set_input, .check = check_input}, | ||
123 | { .update = set_audio, .check = check_audio}, | ||
124 | }; | ||
125 | |||
126 | |||
127 | static void decoder_detach(struct pvr2_v4l_decoder *ctxt) | ||
128 | { | ||
129 | ctxt->client->handler = 0; | ||
130 | ctxt->hdw->decoder_ctrl = 0; | ||
131 | kfree(ctxt); | ||
132 | } | ||
133 | |||
134 | |||
135 | static int decoder_check(struct pvr2_v4l_decoder *ctxt) | ||
136 | { | ||
137 | unsigned long msk; | ||
138 | unsigned int idx; | ||
139 | |||
140 | for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]); | ||
141 | idx++) { | ||
142 | msk = 1 << idx; | ||
143 | if (ctxt->stale_mask & msk) continue; | ||
144 | if (decoder_ops[idx].check(ctxt)) { | ||
145 | ctxt->stale_mask |= msk; | ||
146 | } | ||
147 | } | ||
148 | return ctxt->stale_mask != 0; | ||
149 | } | ||
150 | |||
151 | |||
152 | static void decoder_update(struct pvr2_v4l_decoder *ctxt) | ||
153 | { | ||
154 | unsigned long msk; | ||
155 | unsigned int idx; | ||
156 | |||
157 | for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]); | ||
158 | idx++) { | ||
159 | msk = 1 << idx; | ||
160 | if (!(ctxt->stale_mask & msk)) continue; | ||
161 | ctxt->stale_mask &= ~msk; | ||
162 | decoder_ops[idx].update(ctxt); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | |||
167 | static int decoder_detect(struct pvr2_i2c_client *cp) | ||
168 | { | ||
169 | /* Attempt to query the decoder - let's see if it will answer */ | ||
170 | struct v4l2_tuner vt; | ||
171 | int ret; | ||
172 | |||
173 | memset(&vt,0,sizeof(vt)); | ||
174 | ret = pvr2_i2c_client_cmd(cp,VIDIOC_G_TUNER,&vt); | ||
175 | return ret == 0; /* Return true if it answered */ | ||
176 | } | ||
177 | |||
178 | |||
179 | static void decoder_enable(struct pvr2_v4l_decoder *ctxt,int fl) | ||
180 | { | ||
181 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 decoder_enable(%d)",fl); | ||
182 | pvr2_v4l2_cmd_stream(ctxt->client,fl); | ||
183 | } | ||
184 | |||
185 | |||
186 | static int decoder_is_tuned(struct pvr2_v4l_decoder *ctxt) | ||
187 | { | ||
188 | struct v4l2_tuner vt; | ||
189 | int ret; | ||
190 | |||
191 | memset(&vt,0,sizeof(vt)); | ||
192 | ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt); | ||
193 | if (ret < 0) return -EINVAL; | ||
194 | return vt.signal ? 1 : 0; | ||
195 | } | ||
196 | |||
197 | |||
198 | static unsigned int decoder_describe(struct pvr2_v4l_decoder *ctxt,char *buf,unsigned int cnt) | ||
199 | { | ||
200 | return scnprintf(buf,cnt,"handler: pvrusb2-video-v4l"); | ||
201 | } | ||
202 | |||
203 | |||
204 | const static struct pvr2_i2c_handler_functions hfuncs = { | ||
205 | .detach = (void (*)(void *))decoder_detach, | ||
206 | .check = (int (*)(void *))decoder_check, | ||
207 | .update = (void (*)(void *))decoder_update, | ||
208 | .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe, | ||
209 | }; | ||
210 | |||
211 | |||
212 | int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *hdw, | ||
213 | struct pvr2_i2c_client *cp) | ||
214 | { | ||
215 | struct pvr2_v4l_decoder *ctxt; | ||
216 | |||
217 | if (hdw->decoder_ctrl) return 0; | ||
218 | if (cp->handler) return 0; | ||
219 | if (!decoder_detect(cp)) return 0; | ||
220 | |||
221 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
222 | if (!ctxt) return 0; | ||
223 | memset(ctxt,0,sizeof(*ctxt)); | ||
224 | |||
225 | ctxt->handler.func_data = ctxt; | ||
226 | ctxt->handler.func_table = &hfuncs; | ||
227 | ctxt->ctrl.ctxt = ctxt; | ||
228 | ctxt->ctrl.detach = (void (*)(void *))decoder_detach; | ||
229 | ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable; | ||
230 | ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned; | ||
231 | ctxt->client = cp; | ||
232 | ctxt->hdw = hdw; | ||
233 | ctxt->stale_mask = (1 << (sizeof(decoder_ops)/ | ||
234 | sizeof(decoder_ops[0]))) - 1; | ||
235 | hdw->decoder_ctrl = &ctxt->ctrl; | ||
236 | cp->handler = &ctxt->handler; | ||
237 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x saa711x V4L2 handler set up", | ||
238 | cp->client->addr); | ||
239 | return !0; | ||
240 | } | ||
241 | |||
242 | |||
243 | |||
244 | |||
245 | /* | ||
246 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
247 | *** Local Variables: *** | ||
248 | *** mode: c *** | ||
249 | *** fill-column: 70 *** | ||
250 | *** tab-width: 8 *** | ||
251 | *** c-basic-offset: 8 *** | ||
252 | *** End: *** | ||
253 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h new file mode 100644 index 000000000000..2b917fda02e4 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_VIDEO_V4L_H | ||
24 | #define __PVRUSB2_VIDEO_V4L_H | ||
25 | |||
26 | /* | ||
27 | |||
28 | This module connects the pvrusb2 driver to the I2C chip level | ||
29 | driver which handles device video processing. This interface is | ||
30 | used internally by the driver; higher level code should only | ||
31 | interact through the interface provided by pvrusb2-hdw.h. | ||
32 | |||
33 | */ | ||
34 | |||
35 | |||
36 | |||
37 | #include "pvrusb2-i2c-core.h" | ||
38 | |||
39 | int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
40 | |||
41 | |||
42 | #endif /* __PVRUSB2_VIDEO_V4L_H */ | ||
43 | |||
44 | /* | ||
45 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
46 | *** Local Variables: *** | ||
47 | *** mode: c *** | ||
48 | *** fill-column: 70 *** | ||
49 | *** tab-width: 8 *** | ||
50 | *** c-basic-offset: 8 *** | ||
51 | *** End: *** | ||
52 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.c b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c new file mode 100644 index 000000000000..fcad346e3955 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | |||
25 | This source file is specifically designed to interface with the | ||
26 | wm8775. | ||
27 | |||
28 | */ | ||
29 | |||
30 | #include "pvrusb2-wm8775.h" | ||
31 | #include "pvrusb2-i2c-cmd-v4l2.h" | ||
32 | |||
33 | |||
34 | #include "pvrusb2-hdw-internal.h" | ||
35 | #include "pvrusb2-debug.h" | ||
36 | #include <linux/videodev2.h> | ||
37 | #include <media/v4l2-common.h> | ||
38 | #include <linux/errno.h> | ||
39 | #include <linux/slab.h> | ||
40 | |||
41 | struct pvr2_v4l_wm8775 { | ||
42 | struct pvr2_i2c_handler handler; | ||
43 | struct pvr2_i2c_client *client; | ||
44 | struct pvr2_hdw *hdw; | ||
45 | unsigned long stale_mask; | ||
46 | }; | ||
47 | |||
48 | |||
49 | static void set_input(struct pvr2_v4l_wm8775 *ctxt) | ||
50 | { | ||
51 | struct v4l2_routing route; | ||
52 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
53 | int msk = 0; | ||
54 | |||
55 | memset(&route,0,sizeof(route)); | ||
56 | |||
57 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c wm8775 set_input(val=%d msk=0x%x)", | ||
58 | hdw->input_val,msk); | ||
59 | |||
60 | // Always point to input #1 no matter what | ||
61 | route.input = 2; | ||
62 | pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route); | ||
63 | } | ||
64 | |||
65 | static int check_input(struct pvr2_v4l_wm8775 *ctxt) | ||
66 | { | ||
67 | struct pvr2_hdw *hdw = ctxt->hdw; | ||
68 | return hdw->input_dirty != 0; | ||
69 | } | ||
70 | |||
71 | |||
72 | struct pvr2_v4l_wm8775_ops { | ||
73 | void (*update)(struct pvr2_v4l_wm8775 *); | ||
74 | int (*check)(struct pvr2_v4l_wm8775 *); | ||
75 | }; | ||
76 | |||
77 | |||
78 | static const struct pvr2_v4l_wm8775_ops wm8775_ops[] = { | ||
79 | { .update = set_input, .check = check_input}, | ||
80 | }; | ||
81 | |||
82 | |||
83 | static unsigned int wm8775_describe(struct pvr2_v4l_wm8775 *ctxt, | ||
84 | char *buf,unsigned int cnt) | ||
85 | { | ||
86 | return scnprintf(buf,cnt,"handler: pvrusb2-wm8775"); | ||
87 | } | ||
88 | |||
89 | |||
90 | static void wm8775_detach(struct pvr2_v4l_wm8775 *ctxt) | ||
91 | { | ||
92 | ctxt->client->handler = 0; | ||
93 | kfree(ctxt); | ||
94 | } | ||
95 | |||
96 | |||
97 | static int wm8775_check(struct pvr2_v4l_wm8775 *ctxt) | ||
98 | { | ||
99 | unsigned long msk; | ||
100 | unsigned int idx; | ||
101 | |||
102 | for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]); | ||
103 | idx++) { | ||
104 | msk = 1 << idx; | ||
105 | if (ctxt->stale_mask & msk) continue; | ||
106 | if (wm8775_ops[idx].check(ctxt)) { | ||
107 | ctxt->stale_mask |= msk; | ||
108 | } | ||
109 | } | ||
110 | return ctxt->stale_mask != 0; | ||
111 | } | ||
112 | |||
113 | |||
114 | static void wm8775_update(struct pvr2_v4l_wm8775 *ctxt) | ||
115 | { | ||
116 | unsigned long msk; | ||
117 | unsigned int idx; | ||
118 | |||
119 | for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]); | ||
120 | idx++) { | ||
121 | msk = 1 << idx; | ||
122 | if (!(ctxt->stale_mask & msk)) continue; | ||
123 | ctxt->stale_mask &= ~msk; | ||
124 | wm8775_ops[idx].update(ctxt); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | |||
129 | const static struct pvr2_i2c_handler_functions hfuncs = { | ||
130 | .detach = (void (*)(void *))wm8775_detach, | ||
131 | .check = (int (*)(void *))wm8775_check, | ||
132 | .update = (void (*)(void *))wm8775_update, | ||
133 | .describe = (unsigned int (*)(void *,char *,unsigned int))wm8775_describe, | ||
134 | }; | ||
135 | |||
136 | |||
137 | int pvr2_i2c_wm8775_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp) | ||
138 | { | ||
139 | struct pvr2_v4l_wm8775 *ctxt; | ||
140 | |||
141 | if (cp->handler) return 0; | ||
142 | |||
143 | ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL); | ||
144 | if (!ctxt) return 0; | ||
145 | memset(ctxt,0,sizeof(*ctxt)); | ||
146 | |||
147 | ctxt->handler.func_data = ctxt; | ||
148 | ctxt->handler.func_table = &hfuncs; | ||
149 | ctxt->client = cp; | ||
150 | ctxt->hdw = hdw; | ||
151 | ctxt->stale_mask = (1 << (sizeof(wm8775_ops)/ | ||
152 | sizeof(wm8775_ops[0]))) - 1; | ||
153 | cp->handler = &ctxt->handler; | ||
154 | pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x wm8775 V4L2 handler set up", | ||
155 | cp->client->addr); | ||
156 | return !0; | ||
157 | } | ||
158 | |||
159 | |||
160 | |||
161 | |||
162 | /* | ||
163 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
164 | *** Local Variables: *** | ||
165 | *** mode: c *** | ||
166 | *** fill-column: 70 *** | ||
167 | *** tab-width: 8 *** | ||
168 | *** c-basic-offset: 8 *** | ||
169 | *** End: *** | ||
170 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.h b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h new file mode 100644 index 000000000000..8aaeff4e1e20 --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_WM8775_H | ||
24 | #define __PVRUSB2_WM8775_H | ||
25 | |||
26 | /* | ||
27 | |||
28 | This module connects the pvrusb2 driver to the I2C chip level | ||
29 | driver which performs analog -> digital audio conversion for | ||
30 | external audio inputs. This interface is used internally by the | ||
31 | driver; higher level code should only interact through the | ||
32 | interface provided by pvrusb2-hdw.h. | ||
33 | |||
34 | */ | ||
35 | |||
36 | |||
37 | |||
38 | #include "pvrusb2-i2c-core.h" | ||
39 | |||
40 | int pvr2_i2c_wm8775_setup(struct pvr2_hdw *,struct pvr2_i2c_client *); | ||
41 | |||
42 | |||
43 | #endif /* __PVRUSB2_WM8775_H */ | ||
44 | |||
45 | /* | ||
46 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
47 | *** Local Variables: *** | ||
48 | *** mode: c *** | ||
49 | *** fill-column: 70 *** | ||
50 | *** tab-width: 8 *** | ||
51 | *** c-basic-offset: 8 *** | ||
52 | *** End: *** | ||
53 | */ | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2.h b/drivers/media/video/pvrusb2/pvrusb2.h new file mode 100644 index 000000000000..074533e9c21e --- /dev/null +++ b/drivers/media/video/pvrusb2/pvrusb2.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * | ||
3 | * $Id$ | ||
4 | * | ||
5 | * Copyright (C) 2005 Mike Isely <isely@pobox.com> | ||
6 | * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __PVRUSB2_H | ||
24 | #define __PVRUSB2_H | ||
25 | |||
26 | /* Maximum number of pvrusb2 instances we can track at once. You | ||
27 | might want to increase this - however the driver operation will not | ||
28 | be impaired if it is too small. Instead additional units just | ||
29 | won't have an ID assigned and it might not be possible to specify | ||
30 | module paramters for those extra units. */ | ||
31 | #define PVR_NUM 20 | ||
32 | |||
33 | #endif /* __PVRUSB2_H */ | ||
34 | |||
35 | /* | ||
36 | Stuff for Emacs to see, in order to encourage consistent editing style: | ||
37 | *** Local Variables: *** | ||
38 | *** mode: c *** | ||
39 | *** fill-column: 70 *** | ||
40 | *** tab-width: 8 *** | ||
41 | *** c-basic-offset: 8 *** | ||
42 | *** End: *** | ||
43 | */ | ||
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c index de7b9e6e932a..afc8f352b8e7 100644 --- a/drivers/media/video/saa7134/saa6752hs.c +++ b/drivers/media/video/saa7134/saa6752hs.c | |||
@@ -432,10 +432,10 @@ static void saa6752hs_old_set_params(struct i2c_client* client, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | static int handle_ctrl(struct saa6752hs_mpeg_params *params, | 434 | static int handle_ctrl(struct saa6752hs_mpeg_params *params, |
435 | struct v4l2_ext_control *ctrl, int cmd) | 435 | struct v4l2_ext_control *ctrl, unsigned int cmd) |
436 | { | 436 | { |
437 | int old = 0, new; | 437 | int old = 0, new; |
438 | int set = cmd == VIDIOC_S_EXT_CTRLS; | 438 | int set = (cmd == VIDIOC_S_EXT_CTRLS); |
439 | 439 | ||
440 | new = ctrl->value; | 440 | new = ctrl->value; |
441 | switch (ctrl->id) { | 441 | switch (ctrl->id) { |
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c index 6be9c1131e1f..c18b31d9928c 100644 --- a/drivers/media/video/stradis.c +++ b/drivers/media/video/stradis.c | |||
@@ -2190,7 +2190,7 @@ static struct pci_driver stradis_driver = { | |||
2190 | .remove = __devexit_p(stradis_remove) | 2190 | .remove = __devexit_p(stradis_remove) |
2191 | }; | 2191 | }; |
2192 | 2192 | ||
2193 | int __init stradis_init(void) | 2193 | static int __init stradis_init(void) |
2194 | { | 2194 | { |
2195 | int retval; | 2195 | int retval; |
2196 | 2196 | ||
@@ -2203,7 +2203,7 @@ int __init stradis_init(void) | |||
2203 | return retval; | 2203 | return retval; |
2204 | } | 2204 | } |
2205 | 2205 | ||
2206 | void __exit stradis_exit(void) | 2206 | static void __exit stradis_exit(void) |
2207 | { | 2207 | { |
2208 | pci_unregister_driver(&stradis_driver); | 2208 | pci_unregister_driver(&stradis_driver); |
2209 | printk(KERN_INFO "stradis: module cleanup complete\n"); | 2209 | printk(KERN_INFO "stradis: module cleanup complete\n"); |
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c index b6ae969563b2..2fadabf99688 100644 --- a/drivers/media/video/tda9887.c +++ b/drivers/media/video/tda9887.c | |||
@@ -22,11 +22,11 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define tda9887_info(fmt, arg...) do {\ | 24 | #define tda9887_info(fmt, arg...) do {\ |
25 | printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \ | 25 | printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \ |
26 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) | 26 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) |
27 | #define tda9887_dbg(fmt, arg...) do {\ | 27 | #define tda9887_dbg(fmt, arg...) do {\ |
28 | if (tuner_debug) \ | 28 | if (tuner_debug) \ |
29 | printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \ | 29 | printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \ |
30 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) | 30 | i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) |
31 | 31 | ||
32 | 32 | ||
@@ -84,8 +84,7 @@ struct tvnorm { | |||
84 | #define cAudioGain6 0x80 // bit c7 | 84 | #define cAudioGain6 0x80 // bit c7 |
85 | 85 | ||
86 | #define cTopMask 0x1f // bit c0:4 | 86 | #define cTopMask 0x1f // bit c0:4 |
87 | #define cTopPalSecamDefault 0x14 // bit c0:4 | 87 | #define cTopDefault 0x10 // bit c0:4 |
88 | #define cTopNtscRadioDefault 0x10 // bit c0:4 | ||
89 | 88 | ||
90 | //// third reg (e) | 89 | //// third reg (e) |
91 | #define cAudioIF_4_5 0x00 // bit e0:1 | 90 | #define cAudioIF_4_5 0x00 // bit e0:1 |
@@ -123,7 +122,7 @@ static struct tvnorm tvnorms[] = { | |||
123 | cQSS ), | 122 | cQSS ), |
124 | .c = ( cDeemphasisON | | 123 | .c = ( cDeemphasisON | |
125 | cDeemphasis50 | | 124 | cDeemphasis50 | |
126 | cTopPalSecamDefault), | 125 | cTopDefault), |
127 | .e = ( cGating_36 | | 126 | .e = ( cGating_36 | |
128 | cAudioIF_5_5 | | 127 | cAudioIF_5_5 | |
129 | cVideoIF_38_90 ), | 128 | cVideoIF_38_90 ), |
@@ -134,7 +133,7 @@ static struct tvnorm tvnorms[] = { | |||
134 | cQSS ), | 133 | cQSS ), |
135 | .c = ( cDeemphasisON | | 134 | .c = ( cDeemphasisON | |
136 | cDeemphasis50 | | 135 | cDeemphasis50 | |
137 | cTopPalSecamDefault), | 136 | cTopDefault), |
138 | .e = ( cGating_36 | | 137 | .e = ( cGating_36 | |
139 | cAudioIF_6_0 | | 138 | cAudioIF_6_0 | |
140 | cVideoIF_38_90 ), | 139 | cVideoIF_38_90 ), |
@@ -145,7 +144,7 @@ static struct tvnorm tvnorms[] = { | |||
145 | cQSS ), | 144 | cQSS ), |
146 | .c = ( cDeemphasisON | | 145 | .c = ( cDeemphasisON | |
147 | cDeemphasis50 | | 146 | cDeemphasis50 | |
148 | cTopPalSecamDefault), | 147 | cTopDefault), |
149 | .e = ( cGating_36 | | 148 | .e = ( cGating_36 | |
150 | cAudioIF_6_5 | | 149 | cAudioIF_6_5 | |
151 | cVideoIF_38_90 ), | 150 | cVideoIF_38_90 ), |
@@ -156,7 +155,7 @@ static struct tvnorm tvnorms[] = { | |||
156 | cQSS ), | 155 | cQSS ), |
157 | .c = ( cDeemphasisON | | 156 | .c = ( cDeemphasisON | |
158 | cDeemphasis75 | | 157 | cDeemphasis75 | |
159 | cTopNtscRadioDefault), | 158 | cTopDefault), |
160 | .e = ( cGating_36 | | 159 | .e = ( cGating_36 | |
161 | cAudioIF_4_5 | | 160 | cAudioIF_4_5 | |
162 | cVideoIF_45_75 ), | 161 | cVideoIF_45_75 ), |
@@ -165,7 +164,7 @@ static struct tvnorm tvnorms[] = { | |||
165 | .name = "SECAM-BGH", | 164 | .name = "SECAM-BGH", |
166 | .b = ( cPositiveAmTV | | 165 | .b = ( cPositiveAmTV | |
167 | cQSS ), | 166 | cQSS ), |
168 | .c = ( cTopPalSecamDefault), | 167 | .c = ( cTopDefault), |
169 | .e = ( cGating_36 | | 168 | .e = ( cGating_36 | |
170 | cAudioIF_5_5 | | 169 | cAudioIF_5_5 | |
171 | cVideoIF_38_90 ), | 170 | cVideoIF_38_90 ), |
@@ -174,7 +173,7 @@ static struct tvnorm tvnorms[] = { | |||
174 | .name = "SECAM-L", | 173 | .name = "SECAM-L", |
175 | .b = ( cPositiveAmTV | | 174 | .b = ( cPositiveAmTV | |
176 | cQSS ), | 175 | cQSS ), |
177 | .c = ( cTopPalSecamDefault), | 176 | .c = ( cTopDefault), |
178 | .e = ( cGating_36 | | 177 | .e = ( cGating_36 | |
179 | cAudioIF_6_5 | | 178 | cAudioIF_6_5 | |
180 | cVideoIF_38_90 ), | 179 | cVideoIF_38_90 ), |
@@ -184,7 +183,7 @@ static struct tvnorm tvnorms[] = { | |||
184 | .b = ( cOutputPort2Inactive | | 183 | .b = ( cOutputPort2Inactive | |
185 | cPositiveAmTV | | 184 | cPositiveAmTV | |
186 | cQSS ), | 185 | cQSS ), |
187 | .c = ( cTopPalSecamDefault), | 186 | .c = ( cTopDefault), |
188 | .e = ( cGating_36 | | 187 | .e = ( cGating_36 | |
189 | cAudioIF_6_5 | | 188 | cAudioIF_6_5 | |
190 | cVideoIF_33_90 ), | 189 | cVideoIF_33_90 ), |
@@ -195,7 +194,7 @@ static struct tvnorm tvnorms[] = { | |||
195 | cQSS ), | 194 | cQSS ), |
196 | .c = ( cDeemphasisON | | 195 | .c = ( cDeemphasisON | |
197 | cDeemphasis50 | | 196 | cDeemphasis50 | |
198 | cTopPalSecamDefault), | 197 | cTopDefault), |
199 | .e = ( cGating_36 | | 198 | .e = ( cGating_36 | |
200 | cAudioIF_6_5 | | 199 | cAudioIF_6_5 | |
201 | cVideoIF_38_90 ), | 200 | cVideoIF_38_90 ), |
@@ -206,7 +205,7 @@ static struct tvnorm tvnorms[] = { | |||
206 | cQSS ), | 205 | cQSS ), |
207 | .c = ( cDeemphasisON | | 206 | .c = ( cDeemphasisON | |
208 | cDeemphasis75 | | 207 | cDeemphasis75 | |
209 | cTopNtscRadioDefault), | 208 | cTopDefault), |
210 | .e = ( cGating_36 | | 209 | .e = ( cGating_36 | |
211 | cAudioIF_4_5 | | 210 | cAudioIF_4_5 | |
212 | cVideoIF_45_75 ), | 211 | cVideoIF_45_75 ), |
@@ -217,7 +216,7 @@ static struct tvnorm tvnorms[] = { | |||
217 | cQSS ), | 216 | cQSS ), |
218 | .c = ( cDeemphasisON | | 217 | .c = ( cDeemphasisON | |
219 | cDeemphasis50 | | 218 | cDeemphasis50 | |
220 | cTopNtscRadioDefault), | 219 | cTopDefault), |
221 | .e = ( cGating_36 | | 220 | .e = ( cGating_36 | |
222 | cAudioIF_4_5 | | 221 | cAudioIF_4_5 | |
223 | cVideoIF_58_75 ), | 222 | cVideoIF_58_75 ), |
@@ -230,7 +229,7 @@ static struct tvnorm radio_stereo = { | |||
230 | cQSS ), | 229 | cQSS ), |
231 | .c = ( cDeemphasisOFF | | 230 | .c = ( cDeemphasisOFF | |
232 | cAudioGain6 | | 231 | cAudioGain6 | |
233 | cTopNtscRadioDefault), | 232 | cTopDefault), |
234 | .e = ( cTunerGainLow | | 233 | .e = ( cTunerGainLow | |
235 | cAudioIF_5_5 | | 234 | cAudioIF_5_5 | |
236 | cRadioIF_38_90 ), | 235 | cRadioIF_38_90 ), |
@@ -242,7 +241,7 @@ static struct tvnorm radio_mono = { | |||
242 | cQSS ), | 241 | cQSS ), |
243 | .c = ( cDeemphasisON | | 242 | .c = ( cDeemphasisON | |
244 | cDeemphasis75 | | 243 | cDeemphasis75 | |
245 | cTopNtscRadioDefault), | 244 | cTopDefault), |
246 | .e = ( cTunerGainLow | | 245 | .e = ( cTunerGainLow | |
247 | cAudioIF_5_5 | | 246 | cAudioIF_5_5 | |
248 | cRadioIF_38_90 ), | 247 | cRadioIF_38_90 ), |
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c index a26ded7d6fae..011413cf34a8 100644 --- a/drivers/media/video/tuner-core.c +++ b/drivers/media/video/tuner-core.c | |||
@@ -40,7 +40,6 @@ static unsigned int no_autodetect = 0; | |||
40 | static unsigned int show_i2c = 0; | 40 | static unsigned int show_i2c = 0; |
41 | 41 | ||
42 | /* insmod options used at runtime => read/write */ | 42 | /* insmod options used at runtime => read/write */ |
43 | static unsigned int tuner_debug_old = 0; | ||
44 | int tuner_debug = 0; | 43 | int tuner_debug = 0; |
45 | 44 | ||
46 | static unsigned int tv_range[2] = { 44, 958 }; | 45 | static unsigned int tv_range[2] = { 44, 958 }; |
@@ -54,8 +53,6 @@ static char ntsc[] = "-"; | |||
54 | module_param(addr, int, 0444); | 53 | module_param(addr, int, 0444); |
55 | module_param(no_autodetect, int, 0444); | 54 | module_param(no_autodetect, int, 0444); |
56 | module_param(show_i2c, int, 0444); | 55 | module_param(show_i2c, int, 0444); |
57 | /* Note: tuner_debug is deprecated and will be removed in 2.6.17 */ | ||
58 | module_param_named(tuner_debug,tuner_debug_old, int, 0444); | ||
59 | module_param_named(debug,tuner_debug, int, 0644); | 56 | module_param_named(debug,tuner_debug, int, 0644); |
60 | module_param_string(pal, pal, sizeof(pal), 0644); | 57 | module_param_string(pal, pal, sizeof(pal), 0644); |
61 | module_param_string(secam, secam, sizeof(secam), 0644); | 58 | module_param_string(secam, secam, sizeof(secam), 0644); |
@@ -442,11 +439,6 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind) | |||
442 | t->audmode = V4L2_TUNER_MODE_STEREO; | 439 | t->audmode = V4L2_TUNER_MODE_STEREO; |
443 | t->mode_mask = T_UNINITIALIZED; | 440 | t->mode_mask = T_UNINITIALIZED; |
444 | t->tuner_status = tuner_status; | 441 | t->tuner_status = tuner_status; |
445 | if (tuner_debug_old) { | ||
446 | tuner_debug = tuner_debug_old; | ||
447 | printk(KERN_ERR "tuner: tuner_debug is deprecated and will be removed in 2.6.17.\n"); | ||
448 | printk(KERN_ERR "tuner: use the debug option instead.\n"); | ||
449 | } | ||
450 | 442 | ||
451 | if (show_i2c) { | 443 | if (show_i2c) { |
452 | unsigned char buffer[16]; | 444 | unsigned char buffer[16]; |
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index f4b3d64ebf73..97f946db8597 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c | |||
@@ -1103,7 +1103,7 @@ const char **v4l2_ctrl_get_menu(u32 id) | |||
1103 | }; | 1103 | }; |
1104 | static const char *mpeg_stream_vbi_fmt[] = { | 1104 | static const char *mpeg_stream_vbi_fmt[] = { |
1105 | "No VBI", | 1105 | "No VBI", |
1106 | "VBI in private packets, IVTV format", | 1106 | "Private packet, IVTV format", |
1107 | NULL | 1107 | NULL |
1108 | }; | 1108 | }; |
1109 | 1109 | ||
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 74714e5bcf03..3ff8378ea660 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -305,10 +305,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port, | |||
305 | } | 305 | } |
306 | 306 | ||
307 | out: | 307 | out: |
308 | if (pp0_array) | 308 | kfree(pp0_array); |
309 | kfree(pp0_array); | 309 | kfree(p0_array); |
310 | if (p0_array) | ||
311 | kfree(p0_array); | ||
312 | return rc; | 310 | return rc; |
313 | } | 311 | } |
314 | 312 | ||
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index af6ec553ff7c..85689ab46cbc 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1378,8 +1378,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) | |||
1378 | return 0; | 1378 | return 0; |
1379 | 1379 | ||
1380 | out_free_port_info: | 1380 | out_free_port_info: |
1381 | if (hba) | 1381 | kfree(hba); |
1382 | kfree(hba); | ||
1383 | out: | 1382 | out: |
1384 | return error; | 1383 | return error; |
1385 | } | 1384 | } |
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index febbdd4e0605..c74e5460f834 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c | |||
@@ -1239,7 +1239,6 @@ EXPORT_SYMBOL(i2o_cntxt_list_remove); | |||
1239 | EXPORT_SYMBOL(i2o_cntxt_list_get_ptr); | 1239 | EXPORT_SYMBOL(i2o_cntxt_list_get_ptr); |
1240 | #endif | 1240 | #endif |
1241 | EXPORT_SYMBOL(i2o_msg_get_wait); | 1241 | EXPORT_SYMBOL(i2o_msg_get_wait); |
1242 | EXPORT_SYMBOL(i2o_msg_nop); | ||
1243 | EXPORT_SYMBOL(i2o_find_iop); | 1242 | EXPORT_SYMBOL(i2o_find_iop); |
1244 | EXPORT_SYMBOL(i2o_iop_find_device); | 1243 | EXPORT_SYMBOL(i2o_iop_find_device); |
1245 | EXPORT_SYMBOL(i2o_event_register); | 1244 | EXPORT_SYMBOL(i2o_event_register); |
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index 1fdf03fd2da7..9706cc19134a 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c | |||
@@ -85,7 +85,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi | |||
85 | } | 85 | } |
86 | memset(sp, 0, sizeof(struct service_processor)); | 86 | memset(sp, 0, sizeof(struct service_processor)); |
87 | 87 | ||
88 | sp->lock = SPIN_LOCK_UNLOCKED; | 88 | spin_lock_init(&sp->lock); |
89 | INIT_LIST_HEAD(&sp->command_queue); | 89 | INIT_LIST_HEAD(&sp->command_queue); |
90 | 90 | ||
91 | pci_set_drvdata(pdev, (void *)sp); | 91 | pci_set_drvdata(pdev, (void *)sp); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 0d435814aaa1..39edb8250fbc 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -357,6 +357,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
357 | mtd->resume = cfi_intelext_resume; | 357 | mtd->resume = cfi_intelext_resume; |
358 | mtd->flags = MTD_CAP_NORFLASH; | 358 | mtd->flags = MTD_CAP_NORFLASH; |
359 | mtd->name = map->name; | 359 | mtd->name = map->name; |
360 | mtd->writesize = 1; | ||
360 | 361 | ||
361 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; | 362 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; |
362 | 363 | ||
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c index c40b48dabed3..2c3f019197c1 100644 --- a/drivers/mtd/chips/jedec.c +++ b/drivers/mtd/chips/jedec.c | |||
@@ -256,6 +256,7 @@ static struct mtd_info *jedec_probe(struct map_info *map) | |||
256 | MTD->name = map->name; | 256 | MTD->name = map->name; |
257 | MTD->type = MTD_NORFLASH; | 257 | MTD->type = MTD_NORFLASH; |
258 | MTD->flags = MTD_CAP_NORFLASH; | 258 | MTD->flags = MTD_CAP_NORFLASH; |
259 | MTD->writesize = 1; | ||
259 | MTD->erasesize = SectorSize*(map->buswidth); | 260 | MTD->erasesize = SectorSize*(map->buswidth); |
260 | // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize); | 261 | // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize); |
261 | MTD->size = priv->size; | 262 | MTD->size = priv->size; |
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c index a611de9b1515..ac01a949b687 100644 --- a/drivers/mtd/chips/map_absent.c +++ b/drivers/mtd/chips/map_absent.c | |||
@@ -64,7 +64,8 @@ static struct mtd_info *map_absent_probe(struct map_info *map) | |||
64 | mtd->write = map_absent_write; | 64 | mtd->write = map_absent_write; |
65 | mtd->sync = map_absent_sync; | 65 | mtd->sync = map_absent_sync; |
66 | mtd->flags = 0; | 66 | mtd->flags = 0; |
67 | mtd->erasesize = PAGE_SIZE; | 67 | mtd->erasesize = PAGE_SIZE; |
68 | mtd->writesize = 1; | ||
68 | 69 | ||
69 | __module_get(THIS_MODULE); | 70 | __module_get(THIS_MODULE); |
70 | return mtd; | 71 | return mtd; |
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index 763925747db6..3a66680abfd0 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c | |||
@@ -71,6 +71,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map) | |||
71 | mtd->write = mapram_write; | 71 | mtd->write = mapram_write; |
72 | mtd->sync = mapram_nop; | 72 | mtd->sync = mapram_nop; |
73 | mtd->flags = MTD_CAP_RAM; | 73 | mtd->flags = MTD_CAP_RAM; |
74 | mtd->writesize = 1; | ||
74 | 75 | ||
75 | mtd->erasesize = PAGE_SIZE; | 76 | mtd->erasesize = PAGE_SIZE; |
76 | while(mtd->size & (mtd->erasesize - 1)) | 77 | while(mtd->size & (mtd->erasesize - 1)) |
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index bc6ee9ef8a31..1b328b1378fd 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c | |||
@@ -47,6 +47,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map) | |||
47 | mtd->sync = maprom_nop; | 47 | mtd->sync = maprom_nop; |
48 | mtd->flags = MTD_CAP_ROM; | 48 | mtd->flags = MTD_CAP_ROM; |
49 | mtd->erasesize = map->size; | 49 | mtd->erasesize = map->size; |
50 | mtd->writesize = 1; | ||
50 | 51 | ||
51 | __module_get(THIS_MODULE); | 52 | __module_get(THIS_MODULE); |
52 | return mtd; | 53 | return mtd; |
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 0d98c223c5fc..be3f1c136d02 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c | |||
@@ -324,6 +324,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) | |||
324 | 324 | ||
325 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | 325 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; |
326 | dev->mtd.erasesize = erase_size; | 326 | dev->mtd.erasesize = erase_size; |
327 | dev->mtd.writesize = 1; | ||
327 | dev->mtd.type = MTD_RAM; | 328 | dev->mtd.type = MTD_RAM; |
328 | dev->mtd.flags = MTD_CAP_RAM; | 329 | dev->mtd.flags = MTD_CAP_RAM; |
329 | dev->mtd.erase = block2mtd_erase; | 330 | dev->mtd.erase = block2mtd_erase; |
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c index 4ab7670770e4..08dfb899b272 100644 --- a/drivers/mtd/devices/ms02-nv.c +++ b/drivers/mtd/devices/ms02-nv.c | |||
@@ -225,6 +225,7 @@ static int __init ms02nv_init_one(ulong addr) | |||
225 | mtd->owner = THIS_MODULE; | 225 | mtd->owner = THIS_MODULE; |
226 | mtd->read = ms02nv_read; | 226 | mtd->read = ms02nv_read; |
227 | mtd->write = ms02nv_write; | 227 | mtd->write = ms02nv_write; |
228 | mtd->writesize = 1; | ||
228 | 229 | ||
229 | ret = -EIO; | 230 | ret = -EIO; |
230 | if (add_mtd_device(mtd)) { | 231 | if (add_mtd_device(mtd)) { |
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index a19480d07888..04271d02b6b6 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c | |||
@@ -478,6 +478,7 @@ add_dataflash(struct spi_device *spi, char *name, | |||
478 | device->name = (pdata && pdata->name) ? pdata->name : priv->name; | 478 | device->name = (pdata && pdata->name) ? pdata->name : priv->name; |
479 | device->size = nr_pages * pagesize; | 479 | device->size = nr_pages * pagesize; |
480 | device->erasesize = pagesize; | 480 | device->erasesize = pagesize; |
481 | device->writesize = pagesize; | ||
481 | device->owner = THIS_MODULE; | 482 | device->owner = THIS_MODULE; |
482 | device->type = MTD_DATAFLASH; | 483 | device->type = MTD_DATAFLASH; |
483 | device->flags = MTD_CAP_NORFLASH; | 484 | device->flags = MTD_CAP_NORFLASH; |
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index e09e416667d3..6c7337f9ebbb 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c | |||
@@ -151,6 +151,7 @@ static int register_device(char *name, unsigned long start, unsigned long len) | |||
151 | new->mtd.owner = THIS_MODULE; | 151 | new->mtd.owner = THIS_MODULE; |
152 | new->mtd.type = MTD_RAM; | 152 | new->mtd.type = MTD_RAM; |
153 | new->mtd.erasesize = PAGE_SIZE; | 153 | new->mtd.erasesize = PAGE_SIZE; |
154 | new->mtd.writesize = 1; | ||
154 | 155 | ||
155 | ret = -EAGAIN; | 156 | ret = -EAGAIN; |
156 | if (add_mtd_device(&new->mtd)) { | 157 | if (add_mtd_device(&new->mtd)) { |
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index 666cce1bf60c..f620d74f1004 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c | |||
@@ -778,7 +778,8 @@ static int __init init_pmc551(void) | |||
778 | mtd->type = MTD_RAM; | 778 | mtd->type = MTD_RAM; |
779 | mtd->name = "PMC551 RAM board"; | 779 | mtd->name = "PMC551 RAM board"; |
780 | mtd->erasesize = 0x10000; | 780 | mtd->erasesize = 0x10000; |
781 | mtd->owner = THIS_MODULE; | 781 | mtd->writesize = 1; |
782 | mtd->owner = THIS_MODULE; | ||
782 | 783 | ||
783 | if (add_mtd_device(mtd)) { | 784 | if (add_mtd_device(mtd)) { |
784 | printk(KERN_NOTICE "pmc551: Failed to register new device\n"); | 785 | printk(KERN_NOTICE "pmc551: Failed to register new device\n"); |
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index b3f665e3c38b..542a0c009006 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c | |||
@@ -209,6 +209,7 @@ static int register_device(char *name, unsigned long start, unsigned long length | |||
209 | (*curmtd)->mtdinfo->owner = THIS_MODULE; | 209 | (*curmtd)->mtdinfo->owner = THIS_MODULE; |
210 | (*curmtd)->mtdinfo->type = MTD_RAM; | 210 | (*curmtd)->mtdinfo->type = MTD_RAM; |
211 | (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; | 211 | (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; |
212 | (*curmtd)->mtdinfo->writesize = 1; | ||
212 | 213 | ||
213 | if (add_mtd_device((*curmtd)->mtdinfo)) { | 214 | if (add_mtd_device((*curmtd)->mtdinfo)) { |
214 | E("slram: Failed to register new device\n"); | 215 | E("slram: Failed to register new device\n"); |
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index 2c9cc7f37e92..c26488a1793a 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c | |||
@@ -42,7 +42,6 @@ struct ixp2000_flash_info { | |||
42 | struct map_info map; | 42 | struct map_info map; |
43 | struct mtd_partition *partitions; | 43 | struct mtd_partition *partitions; |
44 | struct resource *res; | 44 | struct resource *res; |
45 | int nr_banks; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) | 47 | static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) |
@@ -183,7 +182,6 @@ static int ixp2000_flash_probe(struct platform_device *dev) | |||
183 | */ | 182 | */ |
184 | info->map.phys = NO_XIP; | 183 | info->map.phys = NO_XIP; |
185 | 184 | ||
186 | info->nr_banks = ixp_data->nr_banks; | ||
187 | info->map.size = ixp_data->nr_banks * window_size; | 185 | info->map.size = ixp_data->nr_banks * window_size; |
188 | info->map.bankwidth = 1; | 186 | info->map.bankwidth = 1; |
189 | 187 | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 433c3cac3ca9..d6301f08906d 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -182,7 +182,7 @@ static struct physmap_flash_data physmap_flash_data = { | |||
182 | 182 | ||
183 | static struct resource physmap_flash_resource = { | 183 | static struct resource physmap_flash_resource = { |
184 | .start = CONFIG_MTD_PHYSMAP_START, | 184 | .start = CONFIG_MTD_PHYSMAP_START, |
185 | .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN, | 185 | .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1, |
186 | .flags = IORESOURCE_MEM, | 186 | .flags = IORESOURCE_MEM, |
187 | }; | 187 | }; |
188 | 188 | ||
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index aa18d45b264b..9a4b59d92525 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -78,7 +78,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) | |||
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | } | 79 | } |
80 | 80 | ||
81 | if (offset >= 0 && offset < mtd->size) | 81 | if (offset >= 0 && offset <= mtd->size) |
82 | return file->f_pos = offset; | 82 | return file->f_pos = offset; |
83 | 83 | ||
84 | return -EINVAL; | 84 | return -EINVAL; |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 27083ed0a017..80a76654d963 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -1176,7 +1176,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | |||
1176 | 1176 | ||
1177 | status = chip->waitfunc(mtd, chip); | 1177 | status = chip->waitfunc(mtd, chip); |
1178 | 1178 | ||
1179 | return status; | 1179 | return status & NAND_STATUS_FAIL ? -EIO : 0; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | /** | 1182 | /** |
@@ -1271,10 +1271,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1271 | sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); | 1271 | sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); |
1272 | buf = nand_transfer_oob(chip, buf, ops); | 1272 | buf = nand_transfer_oob(chip, buf, ops); |
1273 | 1273 | ||
1274 | readlen -= ops->ooblen; | ||
1275 | if (!readlen) | ||
1276 | break; | ||
1277 | |||
1278 | if (!(chip->options & NAND_NO_READRDY)) { | 1274 | if (!(chip->options & NAND_NO_READRDY)) { |
1279 | /* | 1275 | /* |
1280 | * Apply delay or wait for ready/busy pin. Do this | 1276 | * Apply delay or wait for ready/busy pin. Do this |
@@ -1288,6 +1284,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1288 | nand_wait_ready(mtd); | 1284 | nand_wait_ready(mtd); |
1289 | } | 1285 | } |
1290 | 1286 | ||
1287 | readlen -= ops->ooblen; | ||
1288 | if (!readlen) | ||
1289 | break; | ||
1290 | |||
1291 | /* Increment page address */ | 1291 | /* Increment page address */ |
1292 | realpage++; | 1292 | realpage++; |
1293 | 1293 | ||
@@ -1610,13 +1610,13 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
1610 | if (!writelen) | 1610 | if (!writelen) |
1611 | return 0; | 1611 | return 0; |
1612 | 1612 | ||
1613 | chipnr = (int)(to >> chip->chip_shift); | ||
1614 | chip->select_chip(mtd, chipnr); | ||
1615 | |||
1613 | /* Check, if it is write protected */ | 1616 | /* Check, if it is write protected */ |
1614 | if (nand_check_wp(mtd)) | 1617 | if (nand_check_wp(mtd)) |
1615 | return -EIO; | 1618 | return -EIO; |
1616 | 1619 | ||
1617 | chipnr = (int)(to >> chip->chip_shift); | ||
1618 | chip->select_chip(mtd, chipnr); | ||
1619 | |||
1620 | realpage = (int)(to >> chip->page_shift); | 1620 | realpage = (int)(to >> chip->page_shift); |
1621 | page = realpage & chip->pagemask; | 1621 | page = realpage & chip->pagemask; |
1622 | blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; | 1622 | blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index fe8d38514ba6..e5bd88f2d560 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
@@ -61,15 +61,15 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip) | |||
61 | 61 | ||
62 | static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 62 | static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
63 | { | 63 | { |
64 | struct nand_chip *chip = mtd->priv; | 64 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
65 | 65 | ||
66 | if (cmd == NAND_CMD_NONE) | 66 | if (cmd == NAND_CMD_NONE) |
67 | return; | 67 | return; |
68 | 68 | ||
69 | if (ctrl & NAND_CLE) | 69 | if (ctrl & NAND_CLE) |
70 | writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_CMD); | 70 | writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD); |
71 | else | 71 | else |
72 | writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_ALE); | 72 | writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE); |
73 | } | 73 | } |
74 | 74 | ||
75 | static int ndfc_ready(struct mtd_info *mtd) | 75 | static int ndfc_ready(struct mtd_info *mtd) |
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 2c262fe03d8a..ff5cef24d5bb 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
@@ -63,8 +63,6 @@ | |||
63 | #include <asm/arch/regs-nand.h> | 63 | #include <asm/arch/regs-nand.h> |
64 | #include <asm/arch/nand.h> | 64 | #include <asm/arch/nand.h> |
65 | 65 | ||
66 | #define PFX "s3c2410-nand: " | ||
67 | |||
68 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC | 66 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC |
69 | static int hardware_ecc = 1; | 67 | static int hardware_ecc = 1; |
70 | #else | 68 | #else |
@@ -99,6 +97,12 @@ struct s3c2410_nand_mtd { | |||
99 | int scan_res; | 97 | int scan_res; |
100 | }; | 98 | }; |
101 | 99 | ||
100 | enum s3c_cpu_type { | ||
101 | TYPE_S3C2410, | ||
102 | TYPE_S3C2412, | ||
103 | TYPE_S3C2440, | ||
104 | }; | ||
105 | |||
102 | /* overview of the s3c2410 nand state */ | 106 | /* overview of the s3c2410 nand state */ |
103 | 107 | ||
104 | struct s3c2410_nand_info { | 108 | struct s3c2410_nand_info { |
@@ -112,9 +116,11 @@ struct s3c2410_nand_info { | |||
112 | struct resource *area; | 116 | struct resource *area; |
113 | struct clk *clk; | 117 | struct clk *clk; |
114 | void __iomem *regs; | 118 | void __iomem *regs; |
119 | void __iomem *sel_reg; | ||
120 | int sel_bit; | ||
115 | int mtd_count; | 121 | int mtd_count; |
116 | 122 | ||
117 | unsigned char is_s3c2440; | 123 | enum s3c_cpu_type cpu_type; |
118 | }; | 124 | }; |
119 | 125 | ||
120 | /* conversion functions */ | 126 | /* conversion functions */ |
@@ -148,7 +154,7 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info) | |||
148 | 154 | ||
149 | #define NS_IN_KHZ 1000000 | 155 | #define NS_IN_KHZ 1000000 |
150 | 156 | ||
151 | static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max) | 157 | static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) |
152 | { | 158 | { |
153 | int result; | 159 | int result; |
154 | 160 | ||
@@ -172,53 +178,58 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max) | |||
172 | 178 | ||
173 | /* controller setup */ | 179 | /* controller setup */ |
174 | 180 | ||
175 | static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, struct platform_device *pdev) | 181 | static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, |
182 | struct platform_device *pdev) | ||
176 | { | 183 | { |
177 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); | 184 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); |
178 | unsigned long clkrate = clk_get_rate(info->clk); | 185 | unsigned long clkrate = clk_get_rate(info->clk); |
186 | int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; | ||
179 | int tacls, twrph0, twrph1; | 187 | int tacls, twrph0, twrph1; |
180 | unsigned long cfg; | 188 | unsigned long cfg = 0; |
181 | 189 | ||
182 | /* calculate the timing information for the controller */ | 190 | /* calculate the timing information for the controller */ |
183 | 191 | ||
184 | clkrate /= 1000; /* turn clock into kHz for ease of use */ | 192 | clkrate /= 1000; /* turn clock into kHz for ease of use */ |
185 | 193 | ||
186 | if (plat != NULL) { | 194 | if (plat != NULL) { |
187 | tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4); | 195 | tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); |
188 | twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8); | 196 | twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); |
189 | twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8); | 197 | twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); |
190 | } else { | 198 | } else { |
191 | /* default timings */ | 199 | /* default timings */ |
192 | tacls = 4; | 200 | tacls = tacls_max; |
193 | twrph0 = 8; | 201 | twrph0 = 8; |
194 | twrph1 = 8; | 202 | twrph1 = 8; |
195 | } | 203 | } |
196 | 204 | ||
197 | if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { | 205 | if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { |
198 | printk(KERN_ERR PFX "cannot get timings suitable for board\n"); | 206 | dev_err(info->device, "cannot get suitable timings\n"); |
199 | return -EINVAL; | 207 | return -EINVAL; |
200 | } | 208 | } |
201 | 209 | ||
202 | printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", | 210 | dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", |
203 | tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); | 211 | tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); |
204 | 212 | ||
205 | if (!info->is_s3c2440) { | 213 | switch (info->cpu_type) { |
214 | case TYPE_S3C2410: | ||
206 | cfg = S3C2410_NFCONF_EN; | 215 | cfg = S3C2410_NFCONF_EN; |
207 | cfg |= S3C2410_NFCONF_TACLS(tacls - 1); | 216 | cfg |= S3C2410_NFCONF_TACLS(tacls - 1); |
208 | cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); | 217 | cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); |
209 | cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); | 218 | cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); |
210 | } else { | 219 | break; |
220 | |||
221 | case TYPE_S3C2440: | ||
222 | case TYPE_S3C2412: | ||
211 | cfg = S3C2440_NFCONF_TACLS(tacls - 1); | 223 | cfg = S3C2440_NFCONF_TACLS(tacls - 1); |
212 | cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); | 224 | cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); |
213 | cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); | 225 | cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); |
214 | 226 | ||
215 | /* enable the controller and de-assert nFCE */ | 227 | /* enable the controller and de-assert nFCE */ |
216 | 228 | ||
217 | writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE, | 229 | writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); |
218 | info->regs + S3C2440_NFCONT); | ||
219 | } | 230 | } |
220 | 231 | ||
221 | pr_debug(PFX "NF_CONF is 0x%lx\n", cfg); | 232 | dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); |
222 | 233 | ||
223 | writel(cfg, info->regs + S3C2410_NFCONF); | 234 | writel(cfg, info->regs + S3C2410_NFCONF); |
224 | return 0; | 235 | return 0; |
@@ -231,26 +242,21 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) | |||
231 | struct s3c2410_nand_info *info; | 242 | struct s3c2410_nand_info *info; |
232 | struct s3c2410_nand_mtd *nmtd; | 243 | struct s3c2410_nand_mtd *nmtd; |
233 | struct nand_chip *this = mtd->priv; | 244 | struct nand_chip *this = mtd->priv; |
234 | void __iomem *reg; | ||
235 | unsigned long cur; | 245 | unsigned long cur; |
236 | unsigned long bit; | ||
237 | 246 | ||
238 | nmtd = this->priv; | 247 | nmtd = this->priv; |
239 | info = nmtd->info; | 248 | info = nmtd->info; |
240 | 249 | ||
241 | bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE; | ||
242 | reg = info->regs + ((info->is_s3c2440) ? S3C2440_NFCONT : S3C2410_NFCONF); | ||
243 | |||
244 | if (chip != -1 && allow_clk_stop(info)) | 250 | if (chip != -1 && allow_clk_stop(info)) |
245 | clk_enable(info->clk); | 251 | clk_enable(info->clk); |
246 | 252 | ||
247 | cur = readl(reg); | 253 | cur = readl(info->sel_reg); |
248 | 254 | ||
249 | if (chip == -1) { | 255 | if (chip == -1) { |
250 | cur |= bit; | 256 | cur |= info->sel_bit; |
251 | } else { | 257 | } else { |
252 | if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { | 258 | if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { |
253 | printk(KERN_ERR PFX "chip %d out of range\n", chip); | 259 | dev_err(info->device, "invalid chip %d\n", chip); |
254 | return; | 260 | return; |
255 | } | 261 | } |
256 | 262 | ||
@@ -259,10 +265,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) | |||
259 | (info->platform->select_chip) (nmtd->set, chip); | 265 | (info->platform->select_chip) (nmtd->set, chip); |
260 | } | 266 | } |
261 | 267 | ||
262 | cur &= ~bit; | 268 | cur &= ~info->sel_bit; |
263 | } | 269 | } |
264 | 270 | ||
265 | writel(cur, reg); | 271 | writel(cur, info->sel_reg); |
266 | 272 | ||
267 | if (chip == -1 && allow_clk_stop(info)) | 273 | if (chip == -1 && allow_clk_stop(info)) |
268 | clk_disable(info->clk); | 274 | clk_disable(info->clk); |
@@ -311,15 +317,25 @@ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd, | |||
311 | static int s3c2410_nand_devready(struct mtd_info *mtd) | 317 | static int s3c2410_nand_devready(struct mtd_info *mtd) |
312 | { | 318 | { |
313 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | 319 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); |
314 | |||
315 | if (info->is_s3c2440) | ||
316 | return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; | ||
317 | return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; | 320 | return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; |
318 | } | 321 | } |
319 | 322 | ||
323 | static int s3c2440_nand_devready(struct mtd_info *mtd) | ||
324 | { | ||
325 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | ||
326 | return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; | ||
327 | } | ||
328 | |||
329 | static int s3c2412_nand_devready(struct mtd_info *mtd) | ||
330 | { | ||
331 | struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); | ||
332 | return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; | ||
333 | } | ||
334 | |||
320 | /* ECC handling functions */ | 335 | /* ECC handling functions */ |
321 | 336 | ||
322 | static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) | 337 | static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, |
338 | u_char *read_ecc, u_char *calc_ecc) | ||
323 | { | 339 | { |
324 | pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc); | 340 | pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc); |
325 | 341 | ||
@@ -487,11 +503,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
487 | struct s3c2410_nand_set *set) | 503 | struct s3c2410_nand_set *set) |
488 | { | 504 | { |
489 | struct nand_chip *chip = &nmtd->chip; | 505 | struct nand_chip *chip = &nmtd->chip; |
506 | void __iomem *regs = info->regs; | ||
490 | 507 | ||
491 | chip->IO_ADDR_R = info->regs + S3C2410_NFDATA; | ||
492 | chip->IO_ADDR_W = info->regs + S3C2410_NFDATA; | ||
493 | chip->cmd_ctrl = s3c2410_nand_hwcontrol; | ||
494 | chip->dev_ready = s3c2410_nand_devready; | ||
495 | chip->write_buf = s3c2410_nand_write_buf; | 508 | chip->write_buf = s3c2410_nand_write_buf; |
496 | chip->read_buf = s3c2410_nand_read_buf; | 509 | chip->read_buf = s3c2410_nand_read_buf; |
497 | chip->select_chip = s3c2410_nand_select_chip; | 510 | chip->select_chip = s3c2410_nand_select_chip; |
@@ -500,11 +513,37 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
500 | chip->options = 0; | 513 | chip->options = 0; |
501 | chip->controller = &info->controller; | 514 | chip->controller = &info->controller; |
502 | 515 | ||
503 | if (info->is_s3c2440) { | 516 | switch (info->cpu_type) { |
504 | chip->IO_ADDR_R = info->regs + S3C2440_NFDATA; | 517 | case TYPE_S3C2410: |
505 | chip->IO_ADDR_W = info->regs + S3C2440_NFDATA; | 518 | chip->IO_ADDR_W = regs + S3C2410_NFDATA; |
506 | chip->cmd_ctrl = s3c2440_nand_hwcontrol; | 519 | info->sel_reg = regs + S3C2410_NFCONF; |
507 | } | 520 | info->sel_bit = S3C2410_NFCONF_nFCE; |
521 | chip->cmd_ctrl = s3c2410_nand_hwcontrol; | ||
522 | chip->dev_ready = s3c2410_nand_devready; | ||
523 | break; | ||
524 | |||
525 | case TYPE_S3C2440: | ||
526 | chip->IO_ADDR_W = regs + S3C2440_NFDATA; | ||
527 | info->sel_reg = regs + S3C2440_NFCONT; | ||
528 | info->sel_bit = S3C2440_NFCONT_nFCE; | ||
529 | chip->cmd_ctrl = s3c2440_nand_hwcontrol; | ||
530 | chip->dev_ready = s3c2440_nand_devready; | ||
531 | break; | ||
532 | |||
533 | case TYPE_S3C2412: | ||
534 | chip->IO_ADDR_W = regs + S3C2440_NFDATA; | ||
535 | info->sel_reg = regs + S3C2440_NFCONT; | ||
536 | info->sel_bit = S3C2412_NFCONT_nFCE0; | ||
537 | chip->cmd_ctrl = s3c2440_nand_hwcontrol; | ||
538 | chip->dev_ready = s3c2412_nand_devready; | ||
539 | |||
540 | if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) | ||
541 | dev_info(info->device, "System booted from NAND\n"); | ||
542 | |||
543 | break; | ||
544 | } | ||
545 | |||
546 | chip->IO_ADDR_R = chip->IO_ADDR_W; | ||
508 | 547 | ||
509 | nmtd->info = info; | 548 | nmtd->info = info; |
510 | nmtd->mtd.priv = chip; | 549 | nmtd->mtd.priv = chip; |
@@ -512,17 +551,25 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
512 | nmtd->set = set; | 551 | nmtd->set = set; |
513 | 552 | ||
514 | if (hardware_ecc) { | 553 | if (hardware_ecc) { |
515 | chip->ecc.correct = s3c2410_nand_correct_data; | ||
516 | chip->ecc.hwctl = s3c2410_nand_enable_hwecc; | ||
517 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | 554 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; |
555 | chip->ecc.correct = s3c2410_nand_correct_data; | ||
518 | chip->ecc.mode = NAND_ECC_HW; | 556 | chip->ecc.mode = NAND_ECC_HW; |
519 | chip->ecc.size = 512; | 557 | chip->ecc.size = 512; |
520 | chip->ecc.bytes = 3; | 558 | chip->ecc.bytes = 3; |
521 | chip->ecc.layout = &nand_hw_eccoob; | 559 | chip->ecc.layout = &nand_hw_eccoob; |
522 | 560 | ||
523 | if (info->is_s3c2440) { | 561 | switch (info->cpu_type) { |
524 | chip->ecc.hwctl = s3c2440_nand_enable_hwecc; | 562 | case TYPE_S3C2410: |
525 | chip->ecc.calculate = s3c2440_nand_calculate_ecc; | 563 | chip->ecc.hwctl = s3c2410_nand_enable_hwecc; |
564 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | ||
565 | break; | ||
566 | |||
567 | case TYPE_S3C2412: | ||
568 | case TYPE_S3C2440: | ||
569 | chip->ecc.hwctl = s3c2440_nand_enable_hwecc; | ||
570 | chip->ecc.calculate = s3c2440_nand_calculate_ecc; | ||
571 | break; | ||
572 | |||
526 | } | 573 | } |
527 | } else { | 574 | } else { |
528 | chip->ecc.mode = NAND_ECC_SOFT; | 575 | chip->ecc.mode = NAND_ECC_SOFT; |
@@ -537,7 +584,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
537 | * nand layer to look for devices | 584 | * nand layer to look for devices |
538 | */ | 585 | */ |
539 | 586 | ||
540 | static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440) | 587 | static int s3c24xx_nand_probe(struct platform_device *pdev, |
588 | enum s3c_cpu_type cpu_type) | ||
541 | { | 589 | { |
542 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); | 590 | struct s3c2410_platform_nand *plat = to_nand_plat(pdev); |
543 | struct s3c2410_nand_info *info; | 591 | struct s3c2410_nand_info *info; |
@@ -592,7 +640,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440) | |||
592 | info->device = &pdev->dev; | 640 | info->device = &pdev->dev; |
593 | info->platform = plat; | 641 | info->platform = plat; |
594 | info->regs = ioremap(res->start, size); | 642 | info->regs = ioremap(res->start, size); |
595 | info->is_s3c2440 = is_s3c2440; | 643 | info->cpu_type = cpu_type; |
596 | 644 | ||
597 | if (info->regs == NULL) { | 645 | if (info->regs == NULL) { |
598 | dev_err(&pdev->dev, "cannot reserve register region\n"); | 646 | dev_err(&pdev->dev, "cannot reserve register region\n"); |
@@ -699,12 +747,17 @@ static int s3c24xx_nand_resume(struct platform_device *dev) | |||
699 | 747 | ||
700 | static int s3c2410_nand_probe(struct platform_device *dev) | 748 | static int s3c2410_nand_probe(struct platform_device *dev) |
701 | { | 749 | { |
702 | return s3c24xx_nand_probe(dev, 0); | 750 | return s3c24xx_nand_probe(dev, TYPE_S3C2410); |
703 | } | 751 | } |
704 | 752 | ||
705 | static int s3c2440_nand_probe(struct platform_device *dev) | 753 | static int s3c2440_nand_probe(struct platform_device *dev) |
706 | { | 754 | { |
707 | return s3c24xx_nand_probe(dev, 1); | 755 | return s3c24xx_nand_probe(dev, TYPE_S3C2440); |
756 | } | ||
757 | |||
758 | static int s3c2412_nand_probe(struct platform_device *dev) | ||
759 | { | ||
760 | return s3c24xx_nand_probe(dev, TYPE_S3C2412); | ||
708 | } | 761 | } |
709 | 762 | ||
710 | static struct platform_driver s3c2410_nand_driver = { | 763 | static struct platform_driver s3c2410_nand_driver = { |
@@ -729,16 +782,29 @@ static struct platform_driver s3c2440_nand_driver = { | |||
729 | }, | 782 | }, |
730 | }; | 783 | }; |
731 | 784 | ||
785 | static struct platform_driver s3c2412_nand_driver = { | ||
786 | .probe = s3c2412_nand_probe, | ||
787 | .remove = s3c2410_nand_remove, | ||
788 | .suspend = s3c24xx_nand_suspend, | ||
789 | .resume = s3c24xx_nand_resume, | ||
790 | .driver = { | ||
791 | .name = "s3c2412-nand", | ||
792 | .owner = THIS_MODULE, | ||
793 | }, | ||
794 | }; | ||
795 | |||
732 | static int __init s3c2410_nand_init(void) | 796 | static int __init s3c2410_nand_init(void) |
733 | { | 797 | { |
734 | printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); | 798 | printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); |
735 | 799 | ||
800 | platform_driver_register(&s3c2412_nand_driver); | ||
736 | platform_driver_register(&s3c2440_nand_driver); | 801 | platform_driver_register(&s3c2440_nand_driver); |
737 | return platform_driver_register(&s3c2410_nand_driver); | 802 | return platform_driver_register(&s3c2410_nand_driver); |
738 | } | 803 | } |
739 | 804 | ||
740 | static void __exit s3c2410_nand_exit(void) | 805 | static void __exit s3c2410_nand_exit(void) |
741 | { | 806 | { |
807 | platform_driver_unregister(&s3c2412_nand_driver); | ||
742 | platform_driver_unregister(&s3c2440_nand_driver); | 808 | platform_driver_unregister(&s3c2440_nand_driver); |
743 | platform_driver_unregister(&s3c2410_nand_driver); | 809 | platform_driver_unregister(&s3c2410_nand_driver); |
744 | } | 810 | } |
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c index a0b4b1edcb0d..f40081069ab2 100644 --- a/drivers/mtd/nand/ts7250.c +++ b/drivers/mtd/nand/ts7250.c | |||
@@ -97,7 +97,7 @@ static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
97 | unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; | 97 | unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; |
98 | unsigned char bits; | 98 | unsigned char bits; |
99 | 99 | ||
100 | bits = (ctrl & NAND_CNE) << 2; | 100 | bits = (ctrl & NAND_NCE) << 2; |
101 | bits |= ctrl & NAND_CLE; | 101 | bits |= ctrl & NAND_CLE; |
102 | bits |= (ctrl & NAND_ALE) >> 2; | 102 | bits |= (ctrl & NAND_ALE) >> 2; |
103 | 103 | ||
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index bd6983d1afba..db694c832989 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -22,7 +22,7 @@ | |||
22 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | 22 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) |
23 | * | 23 | * |
24 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | 24 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) |
25 | * Copyright (c) 2004-2005 Macq Electronique SA. | 25 | * Copyright (c) 2004-2006 Macq Electronique SA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/config.h> | 28 | #include <linux/config.h> |
@@ -51,7 +51,7 @@ | |||
51 | 51 | ||
52 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ | 52 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ |
53 | defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ | 53 | defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ |
54 | defined(CONFIG_M520x) | 54 | defined(CONFIG_M520x) || defined(CONFIG_M532x) |
55 | #include <asm/coldfire.h> | 55 | #include <asm/coldfire.h> |
56 | #include <asm/mcfsim.h> | 56 | #include <asm/mcfsim.h> |
57 | #include "fec.h" | 57 | #include "fec.h" |
@@ -80,6 +80,8 @@ static unsigned int fec_hw[] = { | |||
80 | (MCF_MBAR + 0x1000), | 80 | (MCF_MBAR + 0x1000), |
81 | #elif defined(CONFIG_M520x) | 81 | #elif defined(CONFIG_M520x) |
82 | (MCF_MBAR+0x30000), | 82 | (MCF_MBAR+0x30000), |
83 | #elif defined(CONFIG_M532x) | ||
84 | (MCF_MBAR+0xfc030000), | ||
83 | #else | 85 | #else |
84 | &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), | 86 | &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), |
85 | #endif | 87 | #endif |
@@ -143,7 +145,7 @@ typedef struct { | |||
143 | #define TX_RING_MOD_MASK 15 /* for this to work */ | 145 | #define TX_RING_MOD_MASK 15 /* for this to work */ |
144 | 146 | ||
145 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) | 147 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) |
146 | #error "FEC: descriptor ring size contants too large" | 148 | #error "FEC: descriptor ring size constants too large" |
147 | #endif | 149 | #endif |
148 | 150 | ||
149 | /* Interrupt events/masks. | 151 | /* Interrupt events/masks. |
@@ -167,12 +169,12 @@ typedef struct { | |||
167 | 169 | ||
168 | 170 | ||
169 | /* | 171 | /* |
170 | * The 5270/5271/5280/5282 RX control register also contains maximum frame | 172 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
171 | * size bits. Other FEC hardware does not, so we need to take that into | 173 | * size bits. Other FEC hardware does not, so we need to take that into |
172 | * account when setting it. | 174 | * account when setting it. |
173 | */ | 175 | */ |
174 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 176 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
175 | defined(CONFIG_M520x) | 177 | defined(CONFIG_M520x) || defined(CONFIG_M532x) |
176 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | 178 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
177 | #else | 179 | #else |
178 | #define OPT_FRAME_SIZE 0 | 180 | #define OPT_FRAME_SIZE 0 |
@@ -308,6 +310,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
308 | struct fec_enet_private *fep; | 310 | struct fec_enet_private *fep; |
309 | volatile fec_t *fecp; | 311 | volatile fec_t *fecp; |
310 | volatile cbd_t *bdp; | 312 | volatile cbd_t *bdp; |
313 | unsigned short status; | ||
311 | 314 | ||
312 | fep = netdev_priv(dev); | 315 | fep = netdev_priv(dev); |
313 | fecp = (volatile fec_t*)dev->base_addr; | 316 | fecp = (volatile fec_t*)dev->base_addr; |
@@ -320,8 +323,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
320 | /* Fill in a Tx ring entry */ | 323 | /* Fill in a Tx ring entry */ |
321 | bdp = fep->cur_tx; | 324 | bdp = fep->cur_tx; |
322 | 325 | ||
326 | status = bdp->cbd_sc; | ||
323 | #ifndef final_version | 327 | #ifndef final_version |
324 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | 328 | if (status & BD_ENET_TX_READY) { |
325 | /* Ooops. All transmit buffers are full. Bail out. | 329 | /* Ooops. All transmit buffers are full. Bail out. |
326 | * This should not happen, since dev->tbusy should be set. | 330 | * This should not happen, since dev->tbusy should be set. |
327 | */ | 331 | */ |
@@ -332,7 +336,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
332 | 336 | ||
333 | /* Clear all of the status flags. | 337 | /* Clear all of the status flags. |
334 | */ | 338 | */ |
335 | bdp->cbd_sc &= ~BD_ENET_TX_STATS; | 339 | status &= ~BD_ENET_TX_STATS; |
336 | 340 | ||
337 | /* Set buffer length and buffer pointer. | 341 | /* Set buffer length and buffer pointer. |
338 | */ | 342 | */ |
@@ -366,21 +370,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
366 | 370 | ||
367 | spin_lock_irq(&fep->lock); | 371 | spin_lock_irq(&fep->lock); |
368 | 372 | ||
369 | /* Send it on its way. Tell FEC its ready, interrupt when done, | 373 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
370 | * its the last BD of the frame, and to put the CRC on the end. | 374 | * it's the last BD of the frame, and to put the CRC on the end. |
371 | */ | 375 | */ |
372 | 376 | ||
373 | bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 377 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
374 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 378 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
379 | bdp->cbd_sc = status; | ||
375 | 380 | ||
376 | dev->trans_start = jiffies; | 381 | dev->trans_start = jiffies; |
377 | 382 | ||
378 | /* Trigger transmission start */ | 383 | /* Trigger transmission start */ |
379 | fecp->fec_x_des_active = 0x01000000; | 384 | fecp->fec_x_des_active = 0; |
380 | 385 | ||
381 | /* If this was the last BD in the ring, start at the beginning again. | 386 | /* If this was the last BD in the ring, start at the beginning again. |
382 | */ | 387 | */ |
383 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) { | 388 | if (status & BD_ENET_TX_WRAP) { |
384 | bdp = fep->tx_bd_base; | 389 | bdp = fep->tx_bd_base; |
385 | } else { | 390 | } else { |
386 | bdp++; | 391 | bdp++; |
@@ -491,43 +496,44 @@ fec_enet_tx(struct net_device *dev) | |||
491 | { | 496 | { |
492 | struct fec_enet_private *fep; | 497 | struct fec_enet_private *fep; |
493 | volatile cbd_t *bdp; | 498 | volatile cbd_t *bdp; |
499 | unsigned short status; | ||
494 | struct sk_buff *skb; | 500 | struct sk_buff *skb; |
495 | 501 | ||
496 | fep = netdev_priv(dev); | 502 | fep = netdev_priv(dev); |
497 | spin_lock(&fep->lock); | 503 | spin_lock(&fep->lock); |
498 | bdp = fep->dirty_tx; | 504 | bdp = fep->dirty_tx; |
499 | 505 | ||
500 | while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { | 506 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
501 | if (bdp == fep->cur_tx && fep->tx_full == 0) break; | 507 | if (bdp == fep->cur_tx && fep->tx_full == 0) break; |
502 | 508 | ||
503 | skb = fep->tx_skbuff[fep->skb_dirty]; | 509 | skb = fep->tx_skbuff[fep->skb_dirty]; |
504 | /* Check for errors. */ | 510 | /* Check for errors. */ |
505 | if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 511 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
506 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 512 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
507 | BD_ENET_TX_CSL)) { | 513 | BD_ENET_TX_CSL)) { |
508 | fep->stats.tx_errors++; | 514 | fep->stats.tx_errors++; |
509 | if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ | 515 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
510 | fep->stats.tx_heartbeat_errors++; | 516 | fep->stats.tx_heartbeat_errors++; |
511 | if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ | 517 | if (status & BD_ENET_TX_LC) /* Late collision */ |
512 | fep->stats.tx_window_errors++; | 518 | fep->stats.tx_window_errors++; |
513 | if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ | 519 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
514 | fep->stats.tx_aborted_errors++; | 520 | fep->stats.tx_aborted_errors++; |
515 | if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ | 521 | if (status & BD_ENET_TX_UN) /* Underrun */ |
516 | fep->stats.tx_fifo_errors++; | 522 | fep->stats.tx_fifo_errors++; |
517 | if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ | 523 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
518 | fep->stats.tx_carrier_errors++; | 524 | fep->stats.tx_carrier_errors++; |
519 | } else { | 525 | } else { |
520 | fep->stats.tx_packets++; | 526 | fep->stats.tx_packets++; |
521 | } | 527 | } |
522 | 528 | ||
523 | #ifndef final_version | 529 | #ifndef final_version |
524 | if (bdp->cbd_sc & BD_ENET_TX_READY) | 530 | if (status & BD_ENET_TX_READY) |
525 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 531 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
526 | #endif | 532 | #endif |
527 | /* Deferred means some collisions occurred during transmit, | 533 | /* Deferred means some collisions occurred during transmit, |
528 | * but we eventually sent the packet OK. | 534 | * but we eventually sent the packet OK. |
529 | */ | 535 | */ |
530 | if (bdp->cbd_sc & BD_ENET_TX_DEF) | 536 | if (status & BD_ENET_TX_DEF) |
531 | fep->stats.collisions++; | 537 | fep->stats.collisions++; |
532 | 538 | ||
533 | /* Free the sk buffer associated with this last transmit. | 539 | /* Free the sk buffer associated with this last transmit. |
@@ -538,7 +544,7 @@ fec_enet_tx(struct net_device *dev) | |||
538 | 544 | ||
539 | /* Update pointer to next buffer descriptor to be transmitted. | 545 | /* Update pointer to next buffer descriptor to be transmitted. |
540 | */ | 546 | */ |
541 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | 547 | if (status & BD_ENET_TX_WRAP) |
542 | bdp = fep->tx_bd_base; | 548 | bdp = fep->tx_bd_base; |
543 | else | 549 | else |
544 | bdp++; | 550 | bdp++; |
@@ -568,9 +574,14 @@ fec_enet_rx(struct net_device *dev) | |||
568 | struct fec_enet_private *fep; | 574 | struct fec_enet_private *fep; |
569 | volatile fec_t *fecp; | 575 | volatile fec_t *fecp; |
570 | volatile cbd_t *bdp; | 576 | volatile cbd_t *bdp; |
577 | unsigned short status; | ||
571 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
572 | ushort pkt_len; | 579 | ushort pkt_len; |
573 | __u8 *data; | 580 | __u8 *data; |
581 | |||
582 | #ifdef CONFIG_M532x | ||
583 | flush_cache_all(); | ||
584 | #endif | ||
574 | 585 | ||
575 | fep = netdev_priv(dev); | 586 | fep = netdev_priv(dev); |
576 | fecp = (volatile fec_t*)dev->base_addr; | 587 | fecp = (volatile fec_t*)dev->base_addr; |
@@ -580,13 +591,13 @@ fec_enet_rx(struct net_device *dev) | |||
580 | */ | 591 | */ |
581 | bdp = fep->cur_rx; | 592 | bdp = fep->cur_rx; |
582 | 593 | ||
583 | while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | 594 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
584 | 595 | ||
585 | #ifndef final_version | 596 | #ifndef final_version |
586 | /* Since we have allocated space to hold a complete frame, | 597 | /* Since we have allocated space to hold a complete frame, |
587 | * the last indicator should be set. | 598 | * the last indicator should be set. |
588 | */ | 599 | */ |
589 | if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) | 600 | if ((status & BD_ENET_RX_LAST) == 0) |
590 | printk("FEC ENET: rcv is not +last\n"); | 601 | printk("FEC ENET: rcv is not +last\n"); |
591 | #endif | 602 | #endif |
592 | 603 | ||
@@ -594,26 +605,26 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
594 | goto rx_processing_done; | 605 | goto rx_processing_done; |
595 | 606 | ||
596 | /* Check for errors. */ | 607 | /* Check for errors. */ |
597 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 608 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
598 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 609 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
599 | fep->stats.rx_errors++; | 610 | fep->stats.rx_errors++; |
600 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 611 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
601 | /* Frame too long or too short. */ | 612 | /* Frame too long or too short. */ |
602 | fep->stats.rx_length_errors++; | 613 | fep->stats.rx_length_errors++; |
603 | } | 614 | } |
604 | if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ | 615 | if (status & BD_ENET_RX_NO) /* Frame alignment */ |
605 | fep->stats.rx_frame_errors++; | 616 | fep->stats.rx_frame_errors++; |
606 | if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ | 617 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
607 | fep->stats.rx_crc_errors++; | ||
608 | if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ | ||
609 | fep->stats.rx_crc_errors++; | 618 | fep->stats.rx_crc_errors++; |
619 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
620 | fep->stats.rx_fifo_errors++; | ||
610 | } | 621 | } |
611 | 622 | ||
612 | /* Report late collisions as a frame error. | 623 | /* Report late collisions as a frame error. |
613 | * On this error, the BD is closed, but we don't know what we | 624 | * On this error, the BD is closed, but we don't know what we |
614 | * have in the buffer. So, just drop this frame on the floor. | 625 | * have in the buffer. So, just drop this frame on the floor. |
615 | */ | 626 | */ |
616 | if (bdp->cbd_sc & BD_ENET_RX_CL) { | 627 | if (status & BD_ENET_RX_CL) { |
617 | fep->stats.rx_errors++; | 628 | fep->stats.rx_errors++; |
618 | fep->stats.rx_frame_errors++; | 629 | fep->stats.rx_frame_errors++; |
619 | goto rx_processing_done; | 630 | goto rx_processing_done; |
@@ -639,9 +650,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
639 | } else { | 650 | } else { |
640 | skb->dev = dev; | 651 | skb->dev = dev; |
641 | skb_put(skb,pkt_len-4); /* Make room */ | 652 | skb_put(skb,pkt_len-4); /* Make room */ |
642 | eth_copy_and_sum(skb, | 653 | eth_copy_and_sum(skb, data, pkt_len-4, 0); |
643 | (unsigned char *)__va(bdp->cbd_bufaddr), | ||
644 | pkt_len-4, 0); | ||
645 | skb->protocol=eth_type_trans(skb,dev); | 654 | skb->protocol=eth_type_trans(skb,dev); |
646 | netif_rx(skb); | 655 | netif_rx(skb); |
647 | } | 656 | } |
@@ -649,15 +658,16 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
649 | 658 | ||
650 | /* Clear the status flags for this buffer. | 659 | /* Clear the status flags for this buffer. |
651 | */ | 660 | */ |
652 | bdp->cbd_sc &= ~BD_ENET_RX_STATS; | 661 | status &= ~BD_ENET_RX_STATS; |
653 | 662 | ||
654 | /* Mark the buffer empty. | 663 | /* Mark the buffer empty. |
655 | */ | 664 | */ |
656 | bdp->cbd_sc |= BD_ENET_RX_EMPTY; | 665 | status |= BD_ENET_RX_EMPTY; |
666 | bdp->cbd_sc = status; | ||
657 | 667 | ||
658 | /* Update BD pointer to next entry. | 668 | /* Update BD pointer to next entry. |
659 | */ | 669 | */ |
660 | if (bdp->cbd_sc & BD_ENET_RX_WRAP) | 670 | if (status & BD_ENET_RX_WRAP) |
661 | bdp = fep->rx_bd_base; | 671 | bdp = fep->rx_bd_base; |
662 | else | 672 | else |
663 | bdp++; | 673 | bdp++; |
@@ -667,9 +677,9 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
667 | * incoming frames. On a heavily loaded network, we should be | 677 | * incoming frames. On a heavily loaded network, we should be |
668 | * able to keep up at the expense of system resources. | 678 | * able to keep up at the expense of system resources. |
669 | */ | 679 | */ |
670 | fecp->fec_r_des_active = 0x01000000; | 680 | fecp->fec_r_des_active = 0; |
671 | #endif | 681 | #endif |
672 | } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ | 682 | } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ |
673 | fep->cur_rx = (cbd_t *)bdp; | 683 | fep->cur_rx = (cbd_t *)bdp; |
674 | 684 | ||
675 | #if 0 | 685 | #if 0 |
@@ -680,11 +690,12 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
680 | * our way back to the interrupt return only to come right back | 690 | * our way back to the interrupt return only to come right back |
681 | * here. | 691 | * here. |
682 | */ | 692 | */ |
683 | fecp->fec_r_des_active = 0x01000000; | 693 | fecp->fec_r_des_active = 0; |
684 | #endif | 694 | #endif |
685 | } | 695 | } |
686 | 696 | ||
687 | 697 | ||
698 | /* called from interrupt context */ | ||
688 | static void | 699 | static void |
689 | fec_enet_mii(struct net_device *dev) | 700 | fec_enet_mii(struct net_device *dev) |
690 | { | 701 | { |
@@ -696,10 +707,12 @@ fec_enet_mii(struct net_device *dev) | |||
696 | fep = netdev_priv(dev); | 707 | fep = netdev_priv(dev); |
697 | ep = fep->hwp; | 708 | ep = fep->hwp; |
698 | mii_reg = ep->fec_mii_data; | 709 | mii_reg = ep->fec_mii_data; |
710 | |||
711 | spin_lock(&fep->lock); | ||
699 | 712 | ||
700 | if ((mip = mii_head) == NULL) { | 713 | if ((mip = mii_head) == NULL) { |
701 | printk("MII and no head!\n"); | 714 | printk("MII and no head!\n"); |
702 | return; | 715 | goto unlock; |
703 | } | 716 | } |
704 | 717 | ||
705 | if (mip->mii_func != NULL) | 718 | if (mip->mii_func != NULL) |
@@ -711,6 +724,9 @@ fec_enet_mii(struct net_device *dev) | |||
711 | 724 | ||
712 | if ((mip = mii_head) != NULL) | 725 | if ((mip = mii_head) != NULL) |
713 | ep->fec_mii_data = mip->mii_regval; | 726 | ep->fec_mii_data = mip->mii_regval; |
727 | |||
728 | unlock: | ||
729 | spin_unlock(&fep->lock); | ||
714 | } | 730 | } |
715 | 731 | ||
716 | static int | 732 | static int |
@@ -728,8 +744,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
728 | 744 | ||
729 | retval = 0; | 745 | retval = 0; |
730 | 746 | ||
731 | save_flags(flags); | 747 | spin_lock_irqsave(&fep->lock,flags); |
732 | cli(); | ||
733 | 748 | ||
734 | if ((mip = mii_free) != NULL) { | 749 | if ((mip = mii_free) != NULL) { |
735 | mii_free = mip->mii_next; | 750 | mii_free = mip->mii_next; |
@@ -749,7 +764,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
749 | retval = 1; | 764 | retval = 1; |
750 | } | 765 | } |
751 | 766 | ||
752 | restore_flags(flags); | 767 | spin_unlock_irqrestore(&fep->lock,flags); |
753 | 768 | ||
754 | return(retval); | 769 | return(retval); |
755 | } | 770 | } |
@@ -1216,7 +1231,7 @@ static phy_info_t const * const phy_info[] = { | |||
1216 | }; | 1231 | }; |
1217 | 1232 | ||
1218 | /* ------------------------------------------------------------------------- */ | 1233 | /* ------------------------------------------------------------------------- */ |
1219 | 1234 | #if !defined(CONFIG_M532x) | |
1220 | #ifdef CONFIG_RPXCLASSIC | 1235 | #ifdef CONFIG_RPXCLASSIC |
1221 | static void | 1236 | static void |
1222 | mii_link_interrupt(void *dev_id); | 1237 | mii_link_interrupt(void *dev_id); |
@@ -1224,6 +1239,7 @@ mii_link_interrupt(void *dev_id); | |||
1224 | static irqreturn_t | 1239 | static irqreturn_t |
1225 | mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs); | 1240 | mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs); |
1226 | #endif | 1241 | #endif |
1242 | #endif | ||
1227 | 1243 | ||
1228 | #if defined(CONFIG_M5272) | 1244 | #if defined(CONFIG_M5272) |
1229 | 1245 | ||
@@ -1384,13 +1400,13 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1384 | { | 1400 | { |
1385 | volatile unsigned char *icrp; | 1401 | volatile unsigned char *icrp; |
1386 | volatile unsigned long *imrp; | 1402 | volatile unsigned long *imrp; |
1387 | int i; | 1403 | int i, ilip; |
1388 | 1404 | ||
1389 | b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; | 1405 | b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; |
1390 | icrp = (volatile unsigned char *) (MCF_IPSBAR + b + | 1406 | icrp = (volatile unsigned char *) (MCF_IPSBAR + b + |
1391 | MCFINTC_ICR0); | 1407 | MCFINTC_ICR0); |
1392 | for (i = 23; (i < 36); i++) | 1408 | for (i = 23, ilip = 0x28; (i < 36); i++) |
1393 | icrp[i] = 0x23; | 1409 | icrp[i] = ilip--; |
1394 | 1410 | ||
1395 | imrp = (volatile unsigned long *) (MCF_IPSBAR + b + | 1411 | imrp = (volatile unsigned long *) (MCF_IPSBAR + b + |
1396 | MCFINTC_IMRH); | 1412 | MCFINTC_IMRH); |
@@ -1618,6 +1634,159 @@ static void __inline__ fec_uncache(unsigned long addr) | |||
1618 | 1634 | ||
1619 | /* ------------------------------------------------------------------------- */ | 1635 | /* ------------------------------------------------------------------------- */ |
1620 | 1636 | ||
1637 | #elif defined(CONFIG_M532x) | ||
1638 | /* | ||
1639 | * Code specific for M532x | ||
1640 | */ | ||
1641 | static void __inline__ fec_request_intrs(struct net_device *dev) | ||
1642 | { | ||
1643 | struct fec_enet_private *fep; | ||
1644 | int b; | ||
1645 | static const struct idesc { | ||
1646 | char *name; | ||
1647 | unsigned short irq; | ||
1648 | } *idp, id[] = { | ||
1649 | { "fec(TXF)", 36 }, | ||
1650 | { "fec(TXB)", 37 }, | ||
1651 | { "fec(TXFIFO)", 38 }, | ||
1652 | { "fec(TXCR)", 39 }, | ||
1653 | { "fec(RXF)", 40 }, | ||
1654 | { "fec(RXB)", 41 }, | ||
1655 | { "fec(MII)", 42 }, | ||
1656 | { "fec(LC)", 43 }, | ||
1657 | { "fec(HBERR)", 44 }, | ||
1658 | { "fec(GRA)", 45 }, | ||
1659 | { "fec(EBERR)", 46 }, | ||
1660 | { "fec(BABT)", 47 }, | ||
1661 | { "fec(BABR)", 48 }, | ||
1662 | { NULL }, | ||
1663 | }; | ||
1664 | |||
1665 | fep = netdev_priv(dev); | ||
1666 | b = (fep->index) ? 128 : 64; | ||
1667 | |||
1668 | /* Setup interrupt handlers. */ | ||
1669 | for (idp = id; idp->name; idp++) { | ||
1670 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | ||
1671 | printk("FEC: Could not allocate %s IRQ(%d)!\n", | ||
1672 | idp->name, b+idp->irq); | ||
1673 | } | ||
1674 | |||
1675 | /* Unmask interrupts */ | ||
1676 | MCF_INTC0_ICR36 = 0x2; | ||
1677 | MCF_INTC0_ICR37 = 0x2; | ||
1678 | MCF_INTC0_ICR38 = 0x2; | ||
1679 | MCF_INTC0_ICR39 = 0x2; | ||
1680 | MCF_INTC0_ICR40 = 0x2; | ||
1681 | MCF_INTC0_ICR41 = 0x2; | ||
1682 | MCF_INTC0_ICR42 = 0x2; | ||
1683 | MCF_INTC0_ICR43 = 0x2; | ||
1684 | MCF_INTC0_ICR44 = 0x2; | ||
1685 | MCF_INTC0_ICR45 = 0x2; | ||
1686 | MCF_INTC0_ICR46 = 0x2; | ||
1687 | MCF_INTC0_ICR47 = 0x2; | ||
1688 | MCF_INTC0_ICR48 = 0x2; | ||
1689 | |||
1690 | MCF_INTC0_IMRH &= ~( | ||
1691 | MCF_INTC_IMRH_INT_MASK36 | | ||
1692 | MCF_INTC_IMRH_INT_MASK37 | | ||
1693 | MCF_INTC_IMRH_INT_MASK38 | | ||
1694 | MCF_INTC_IMRH_INT_MASK39 | | ||
1695 | MCF_INTC_IMRH_INT_MASK40 | | ||
1696 | MCF_INTC_IMRH_INT_MASK41 | | ||
1697 | MCF_INTC_IMRH_INT_MASK42 | | ||
1698 | MCF_INTC_IMRH_INT_MASK43 | | ||
1699 | MCF_INTC_IMRH_INT_MASK44 | | ||
1700 | MCF_INTC_IMRH_INT_MASK45 | | ||
1701 | MCF_INTC_IMRH_INT_MASK46 | | ||
1702 | MCF_INTC_IMRH_INT_MASK47 | | ||
1703 | MCF_INTC_IMRH_INT_MASK48 ); | ||
1704 | |||
1705 | /* Set up gpio outputs for MII lines */ | ||
1706 | MCF_GPIO_PAR_FECI2C |= (0 | | ||
1707 | MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC | | ||
1708 | MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO); | ||
1709 | MCF_GPIO_PAR_FEC = (0 | | ||
1710 | MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | | ||
1711 | MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC); | ||
1712 | } | ||
1713 | |||
1714 | static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) | ||
1715 | { | ||
1716 | volatile fec_t *fecp; | ||
1717 | |||
1718 | fecp = fep->hwp; | ||
1719 | fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; | ||
1720 | fecp->fec_x_cntrl = 0x00; | ||
1721 | |||
1722 | /* | ||
1723 | * Set MII speed to 2.5 MHz | ||
1724 | */ | ||
1725 | fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; | ||
1726 | fecp->fec_mii_speed = fep->phy_speed; | ||
1727 | |||
1728 | fec_restart(dev, 0); | ||
1729 | } | ||
1730 | |||
1731 | static void __inline__ fec_get_mac(struct net_device *dev) | ||
1732 | { | ||
1733 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1734 | volatile fec_t *fecp; | ||
1735 | unsigned char *iap, tmpaddr[ETH_ALEN]; | ||
1736 | |||
1737 | fecp = fep->hwp; | ||
1738 | |||
1739 | if (FEC_FLASHMAC) { | ||
1740 | /* | ||
1741 | * Get MAC address from FLASH. | ||
1742 | * If it is all 1's or 0's, use the default. | ||
1743 | */ | ||
1744 | iap = FEC_FLASHMAC; | ||
1745 | if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && | ||
1746 | (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) | ||
1747 | iap = fec_mac_default; | ||
1748 | if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && | ||
1749 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) | ||
1750 | iap = fec_mac_default; | ||
1751 | } else { | ||
1752 | *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; | ||
1753 | *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); | ||
1754 | iap = &tmpaddr[0]; | ||
1755 | } | ||
1756 | |||
1757 | memcpy(dev->dev_addr, iap, ETH_ALEN); | ||
1758 | |||
1759 | /* Adjust MAC if using default MAC address */ | ||
1760 | if (iap == fec_mac_default) | ||
1761 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | ||
1762 | } | ||
1763 | |||
1764 | static void __inline__ fec_enable_phy_intr(void) | ||
1765 | { | ||
1766 | } | ||
1767 | |||
1768 | static void __inline__ fec_disable_phy_intr(void) | ||
1769 | { | ||
1770 | } | ||
1771 | |||
1772 | static void __inline__ fec_phy_ack_intr(void) | ||
1773 | { | ||
1774 | } | ||
1775 | |||
1776 | static void __inline__ fec_localhw_setup(void) | ||
1777 | { | ||
1778 | } | ||
1779 | |||
1780 | /* | ||
1781 | * Do not need to make region uncached on 532x. | ||
1782 | */ | ||
1783 | static void __inline__ fec_uncache(unsigned long addr) | ||
1784 | { | ||
1785 | } | ||
1786 | |||
1787 | /* ------------------------------------------------------------------------- */ | ||
1788 | |||
1789 | |||
1621 | #else | 1790 | #else |
1622 | 1791 | ||
1623 | /* | 1792 | /* |
@@ -1985,9 +2154,12 @@ fec_enet_open(struct net_device *dev) | |||
1985 | mii_do_cmd(dev, fep->phy->config); | 2154 | mii_do_cmd(dev, fep->phy->config); |
1986 | mii_do_cmd(dev, phy_cmd_config); /* display configuration */ | 2155 | mii_do_cmd(dev, phy_cmd_config); /* display configuration */ |
1987 | 2156 | ||
1988 | /* FIXME: use netif_carrier_{on,off} ; this polls | 2157 | /* Poll until the PHY tells us its configuration |
1989 | * until link is up which is wrong... could be | 2158 | * (not link state). |
1990 | * 30 seconds or more we are trapped in here. -jgarzik | 2159 | * Request is initiated by mii_do_cmd above, but answer |
2160 | * comes by interrupt. | ||
2161 | * This should take about 25 usec per register at 2.5 MHz, | ||
2162 | * and we read approximately 5 registers. | ||
1991 | */ | 2163 | */ |
1992 | while(!fep->sequence_done) | 2164 | while(!fep->sequence_done) |
1993 | schedule(); | 2165 | schedule(); |
@@ -2253,15 +2425,11 @@ int __init fec_enet_init(struct net_device *dev) | |||
2253 | */ | 2425 | */ |
2254 | fec_request_intrs(dev); | 2426 | fec_request_intrs(dev); |
2255 | 2427 | ||
2256 | /* Clear and enable interrupts */ | ||
2257 | fecp->fec_ievent = 0xffc00000; | ||
2258 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | | ||
2259 | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); | ||
2260 | fecp->fec_hash_table_high = 0; | 2428 | fecp->fec_hash_table_high = 0; |
2261 | fecp->fec_hash_table_low = 0; | 2429 | fecp->fec_hash_table_low = 0; |
2262 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; | 2430 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; |
2263 | fecp->fec_ecntrl = 2; | 2431 | fecp->fec_ecntrl = 2; |
2264 | fecp->fec_r_des_active = 0x01000000; | 2432 | fecp->fec_r_des_active = 0; |
2265 | 2433 | ||
2266 | dev->base_addr = (unsigned long)fecp; | 2434 | dev->base_addr = (unsigned long)fecp; |
2267 | 2435 | ||
@@ -2281,6 +2449,11 @@ int __init fec_enet_init(struct net_device *dev) | |||
2281 | /* setup MII interface */ | 2449 | /* setup MII interface */ |
2282 | fec_set_mii(dev, fep); | 2450 | fec_set_mii(dev, fep); |
2283 | 2451 | ||
2452 | /* Clear and enable interrupts */ | ||
2453 | fecp->fec_ievent = 0xffc00000; | ||
2454 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | | ||
2455 | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); | ||
2456 | |||
2284 | /* Queue up command to detect the PHY and initialize the | 2457 | /* Queue up command to detect the PHY and initialize the |
2285 | * remainder of the interface. | 2458 | * remainder of the interface. |
2286 | */ | 2459 | */ |
@@ -2312,11 +2485,6 @@ fec_restart(struct net_device *dev, int duplex) | |||
2312 | fecp->fec_ecntrl = 1; | 2485 | fecp->fec_ecntrl = 1; |
2313 | udelay(10); | 2486 | udelay(10); |
2314 | 2487 | ||
2315 | /* Enable interrupts we wish to service. | ||
2316 | */ | ||
2317 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | | ||
2318 | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); | ||
2319 | |||
2320 | /* Clear any outstanding interrupt. | 2488 | /* Clear any outstanding interrupt. |
2321 | */ | 2489 | */ |
2322 | fecp->fec_ievent = 0xffc00000; | 2490 | fecp->fec_ievent = 0xffc00000; |
@@ -2408,7 +2576,12 @@ fec_restart(struct net_device *dev, int duplex) | |||
2408 | /* And last, enable the transmit and receive processing. | 2576 | /* And last, enable the transmit and receive processing. |
2409 | */ | 2577 | */ |
2410 | fecp->fec_ecntrl = 2; | 2578 | fecp->fec_ecntrl = 2; |
2411 | fecp->fec_r_des_active = 0x01000000; | 2579 | fecp->fec_r_des_active = 0; |
2580 | |||
2581 | /* Enable interrupts we wish to service. | ||
2582 | */ | ||
2583 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | | ||
2584 | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); | ||
2412 | } | 2585 | } |
2413 | 2586 | ||
2414 | static void | 2587 | static void |
@@ -2420,9 +2593,16 @@ fec_stop(struct net_device *dev) | |||
2420 | fep = netdev_priv(dev); | 2593 | fep = netdev_priv(dev); |
2421 | fecp = fep->hwp; | 2594 | fecp = fep->hwp; |
2422 | 2595 | ||
2423 | fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ | 2596 | /* |
2424 | 2597 | ** We cannot expect a graceful transmit stop without link !!! | |
2425 | while(!(fecp->fec_ievent & FEC_ENET_GRA)); | 2598 | */ |
2599 | if (fep->link) | ||
2600 | { | ||
2601 | fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ | ||
2602 | udelay(10); | ||
2603 | if (!(fecp->fec_ievent & FEC_ENET_GRA)) | ||
2604 | printk("fec_stop : Graceful transmit stop did not complete !\n"); | ||
2605 | } | ||
2426 | 2606 | ||
2427 | /* Whack a reset. We should wait for this. | 2607 | /* Whack a reset. We should wait for this. |
2428 | */ | 2608 | */ |
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c index c6770377ef87..0cd07150bf4a 100644 --- a/drivers/net/fs_enet/fs_enet-mii.c +++ b/drivers/net/fs_enet/fs_enet-mii.c | |||
@@ -431,8 +431,7 @@ static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi) | |||
431 | return bus; | 431 | return bus; |
432 | 432 | ||
433 | err: | 433 | err: |
434 | if (bus) | 434 | kfree(bus); |
435 | kfree(bus); | ||
436 | return ERR_PTR(ret); | 435 | return ERR_PTR(ret); |
437 | } | 436 | } |
438 | 437 | ||
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 081a8999666e..a8a8f975432f 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1229,12 +1229,6 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) | |||
1229 | return error; | 1229 | return error; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static void ipw_free_error_log(struct ipw_fw_error *error) | ||
1233 | { | ||
1234 | if (error) | ||
1235 | kfree(error); | ||
1236 | } | ||
1237 | |||
1238 | static ssize_t show_event_log(struct device *d, | 1232 | static ssize_t show_event_log(struct device *d, |
1239 | struct device_attribute *attr, char *buf) | 1233 | struct device_attribute *attr, char *buf) |
1240 | { | 1234 | { |
@@ -1296,10 +1290,9 @@ static ssize_t clear_error(struct device *d, | |||
1296 | const char *buf, size_t count) | 1290 | const char *buf, size_t count) |
1297 | { | 1291 | { |
1298 | struct ipw_priv *priv = dev_get_drvdata(d); | 1292 | struct ipw_priv *priv = dev_get_drvdata(d); |
1299 | if (priv->error) { | 1293 | |
1300 | ipw_free_error_log(priv->error); | 1294 | kfree(priv->error); |
1301 | priv->error = NULL; | 1295 | priv->error = NULL; |
1302 | } | ||
1303 | return count; | 1296 | return count; |
1304 | } | 1297 | } |
1305 | 1298 | ||
@@ -1970,8 +1963,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1970 | struct ipw_fw_error *error = | 1963 | struct ipw_fw_error *error = |
1971 | ipw_alloc_error_log(priv); | 1964 | ipw_alloc_error_log(priv); |
1972 | ipw_dump_error_log(priv, error); | 1965 | ipw_dump_error_log(priv, error); |
1973 | if (error) | 1966 | kfree(error); |
1974 | ipw_free_error_log(error); | ||
1975 | } | 1967 | } |
1976 | #endif | 1968 | #endif |
1977 | } else { | 1969 | } else { |
@@ -11693,10 +11685,8 @@ static void ipw_pci_remove(struct pci_dev *pdev) | |||
11693 | } | 11685 | } |
11694 | } | 11686 | } |
11695 | 11687 | ||
11696 | if (priv->error) { | 11688 | kfree(priv->error); |
11697 | ipw_free_error_log(priv->error); | 11689 | priv->error = NULL; |
11698 | priv->error = NULL; | ||
11699 | } | ||
11700 | 11690 | ||
11701 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 11691 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
11702 | ipw_prom_free(priv); | 11692 | ipw_prom_free(priv); |
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 0e07d9535116..d0f68ab8f041 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c | |||
@@ -157,7 +157,7 @@ MODULE_LICENSE("Dual MPL/GPL"); | |||
157 | 157 | ||
158 | static int pcmcia_schlvl = PCMCIA_SCHLVL; | 158 | static int pcmcia_schlvl = PCMCIA_SCHLVL; |
159 | 159 | ||
160 | static spinlock_t events_lock = SPIN_LOCK_UNLOCKED; | 160 | static DEFINE_SPINLOCK(events_lock); |
161 | 161 | ||
162 | 162 | ||
163 | #define PCMCIA_SOCKET_KEY_5V 1 | 163 | #define PCMCIA_SOCKET_KEY_5V 1 |
@@ -644,7 +644,7 @@ static struct platform_device m8xx_device = { | |||
644 | }; | 644 | }; |
645 | 645 | ||
646 | static u32 pending_events[PCMCIA_SOCKETS_NO]; | 646 | static u32 pending_events[PCMCIA_SOCKETS_NO]; |
647 | static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED; | 647 | static DEFINE_SPINLOCK(pending_event_lock); |
648 | 648 | ||
649 | static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs) | 649 | static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs) |
650 | { | 650 | { |
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c index b9fab2ae3a36..8b56bbdd011e 100644 --- a/drivers/rapidio/rio-access.c +++ b/drivers/rapidio/rio-access.c | |||
@@ -17,8 +17,8 @@ | |||
17 | * These interrupt-safe spinlocks protect all accesses to RIO | 17 | * These interrupt-safe spinlocks protect all accesses to RIO |
18 | * configuration space and doorbell access. | 18 | * configuration space and doorbell access. |
19 | */ | 19 | */ |
20 | static spinlock_t rio_config_lock = SPIN_LOCK_UNLOCKED; | 20 | static DEFINE_SPINLOCK(rio_config_lock); |
21 | static spinlock_t rio_doorbell_lock = SPIN_LOCK_UNLOCKED; | 21 | static DEFINE_SPINLOCK(rio_doorbell_lock); |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Wrappers for all RIO configuration access functions. They just check | 24 | * Wrappers for all RIO configuration access functions. They just check |
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 5396beec30d0..1cb61a761cb2 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
@@ -94,7 +94,9 @@ exit_kfree: | |||
94 | kfree(rtc); | 94 | kfree(rtc); |
95 | 95 | ||
96 | exit_idr: | 96 | exit_idr: |
97 | mutex_lock(&idr_lock); | ||
97 | idr_remove(&rtc_idr, id); | 98 | idr_remove(&rtc_idr, id); |
99 | mutex_unlock(&idr_lock); | ||
98 | 100 | ||
99 | exit: | 101 | exit: |
100 | dev_err(dev, "rtc core: unable to register %s, err = %d\n", | 102 | dev_err(dev, "rtc core: unable to register %s, err = %d\n", |
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c index ecafbad41a24..762521a1419c 100644 --- a/drivers/rtc/rtc-ds1553.c +++ b/drivers/rtc/rtc-ds1553.c | |||
@@ -226,7 +226,7 @@ static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
226 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); | 226 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); |
227 | 227 | ||
228 | if (pdata->irq < 0) | 228 | if (pdata->irq < 0) |
229 | return -ENOIOCTLCMD; | 229 | return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ |
230 | switch (cmd) { | 230 | switch (cmd) { |
231 | case RTC_AIE_OFF: | 231 | case RTC_AIE_OFF: |
232 | pdata->irqen &= ~RTC_AF; | 232 | pdata->irqen &= ~RTC_AF; |
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index ab486fbc828d..9cd1cb304bb2 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | static unsigned long rtc_freq = 1024; | 46 | static unsigned long rtc_freq = 1024; |
47 | static struct rtc_time rtc_alarm; | 47 | static struct rtc_time rtc_alarm; |
48 | static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED; | 48 | static DEFINE_SPINLOCK(sa1100_rtc_lock); |
49 | 49 | ||
50 | static int rtc_update_alarm(struct rtc_time *alrm) | 50 | static int rtc_update_alarm(struct rtc_time *alrm) |
51 | { | 51 | { |
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index 33e029207e26..4b9291dd4443 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -93,7 +93,7 @@ static void __iomem *rtc2_base; | |||
93 | 93 | ||
94 | static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */ | 94 | static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */ |
95 | 95 | ||
96 | static spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; | 96 | static DEFINE_SPINLOCK(rtc_lock); |
97 | static char rtc_name[] = "RTC"; | 97 | static char rtc_name[] = "RTC"; |
98 | static unsigned long periodic_frequency; | 98 | static unsigned long periodic_frequency; |
99 | static unsigned long periodic_count; | 99 | static unsigned long periodic_count; |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 2d946b6ca074..2d8af709947f 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -89,7 +89,7 @@ struct eerbuffer { | |||
89 | }; | 89 | }; |
90 | 90 | ||
91 | static LIST_HEAD(bufferlist); | 91 | static LIST_HEAD(bufferlist); |
92 | static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; | 92 | static DEFINE_SPINLOCK(bufferlock); |
93 | static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); | 93 | static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); |
94 | 94 | ||
95 | /* | 95 | /* |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index c84b02aec1f3..96a81cd17617 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -501,7 +501,7 @@ config SCSI_ATA_PIIX | |||
501 | tristate "Intel PIIX/ICH SATA support" | 501 | tristate "Intel PIIX/ICH SATA support" |
502 | depends on SCSI_SATA && PCI | 502 | depends on SCSI_SATA && PCI |
503 | help | 503 | help |
504 | This option enables support for ICH5 Serial ATA. | 504 | This option enables support for ICH5/6/7/8 Serial ATA. |
505 | If PATA support was enabled previously, this enables | 505 | If PATA support was enabled previously, this enables |
506 | support for select Intel PIIX/ICH PATA host controllers. | 506 | support for select Intel PIIX/ICH PATA host controllers. |
507 | 507 | ||
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 4bb77f62b3b9..f05946777718 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <asm/io.h> | 48 | #include <asm/io.h> |
49 | 49 | ||
50 | #define DRV_NAME "ahci" | 50 | #define DRV_NAME "ahci" |
51 | #define DRV_VERSION "1.3" | 51 | #define DRV_VERSION "2.0" |
52 | 52 | ||
53 | 53 | ||
54 | enum { | 54 | enum { |
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 521b718763f6..94b1261a259d 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c | |||
@@ -93,7 +93,7 @@ | |||
93 | #include <linux/libata.h> | 93 | #include <linux/libata.h> |
94 | 94 | ||
95 | #define DRV_NAME "ata_piix" | 95 | #define DRV_NAME "ata_piix" |
96 | #define DRV_VERSION "1.10" | 96 | #define DRV_VERSION "2.00" |
97 | 97 | ||
98 | enum { | 98 | enum { |
99 | PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ | 99 | PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 6c66877be2bf..d1c1c30d123f 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -88,6 +88,10 @@ int libata_fua = 0; | |||
88 | module_param_named(fua, libata_fua, int, 0444); | 88 | module_param_named(fua, libata_fua, int, 0444); |
89 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); | 89 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); |
90 | 90 | ||
91 | static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; | ||
92 | module_param(ata_probe_timeout, int, 0444); | ||
93 | MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); | ||
94 | |||
91 | MODULE_AUTHOR("Jeff Garzik"); | 95 | MODULE_AUTHOR("Jeff Garzik"); |
92 | MODULE_DESCRIPTION("Library module for ATA devices"); | 96 | MODULE_DESCRIPTION("Library module for ATA devices"); |
93 | MODULE_LICENSE("GPL"); | 97 | MODULE_LICENSE("GPL"); |
@@ -777,11 +781,9 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device) | |||
777 | void ata_dev_select(struct ata_port *ap, unsigned int device, | 781 | void ata_dev_select(struct ata_port *ap, unsigned int device, |
778 | unsigned int wait, unsigned int can_sleep) | 782 | unsigned int wait, unsigned int can_sleep) |
779 | { | 783 | { |
780 | if (ata_msg_probe(ap)) { | 784 | if (ata_msg_probe(ap)) |
781 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " | 785 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " |
782 | "device %u, wait %u\n", | 786 | "device %u, wait %u\n", ap->id, device, wait); |
783 | ap->id, device, wait); | ||
784 | } | ||
785 | 787 | ||
786 | if (wait) | 788 | if (wait) |
787 | ata_wait_idle(ap); | 789 | ata_wait_idle(ap); |
@@ -950,7 +952,8 @@ void ata_port_flush_task(struct ata_port *ap) | |||
950 | */ | 952 | */ |
951 | if (!cancel_delayed_work(&ap->port_task)) { | 953 | if (!cancel_delayed_work(&ap->port_task)) { |
952 | if (ata_msg_ctl(ap)) | 954 | if (ata_msg_ctl(ap)) |
953 | ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__); | 955 | ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", |
956 | __FUNCTION__); | ||
954 | flush_workqueue(ata_wq); | 957 | flush_workqueue(ata_wq); |
955 | } | 958 | } |
956 | 959 | ||
@@ -1059,7 +1062,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1059 | 1062 | ||
1060 | spin_unlock_irqrestore(ap->lock, flags); | 1063 | spin_unlock_irqrestore(ap->lock, flags); |
1061 | 1064 | ||
1062 | rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL); | 1065 | rc = wait_for_completion_timeout(&wait, ata_probe_timeout); |
1063 | 1066 | ||
1064 | ata_port_flush_task(ap); | 1067 | ata_port_flush_task(ap); |
1065 | 1068 | ||
@@ -1081,7 +1084,7 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1081 | 1084 | ||
1082 | if (ata_msg_warn(ap)) | 1085 | if (ata_msg_warn(ap)) |
1083 | ata_dev_printk(dev, KERN_WARNING, | 1086 | ata_dev_printk(dev, KERN_WARNING, |
1084 | "qc timeout (cmd 0x%x)\n", command); | 1087 | "qc timeout (cmd 0x%x)\n", command); |
1085 | } | 1088 | } |
1086 | 1089 | ||
1087 | spin_unlock_irqrestore(ap->lock, flags); | 1090 | spin_unlock_irqrestore(ap->lock, flags); |
@@ -1093,9 +1096,9 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1093 | 1096 | ||
1094 | if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) { | 1097 | if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) { |
1095 | if (ata_msg_warn(ap)) | 1098 | if (ata_msg_warn(ap)) |
1096 | ata_dev_printk(dev, KERN_WARNING, | 1099 | ata_dev_printk(dev, KERN_WARNING, |
1097 | "zero err_mask for failed " | 1100 | "zero err_mask for failed " |
1098 | "internal command, assuming AC_ERR_OTHER\n"); | 1101 | "internal command, assuming AC_ERR_OTHER\n"); |
1099 | qc->err_mask |= AC_ERR_OTHER; | 1102 | qc->err_mask |= AC_ERR_OTHER; |
1100 | } | 1103 | } |
1101 | 1104 | ||
@@ -1132,6 +1135,33 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1132 | } | 1135 | } |
1133 | 1136 | ||
1134 | /** | 1137 | /** |
1138 | * ata_do_simple_cmd - execute simple internal command | ||
1139 | * @dev: Device to which the command is sent | ||
1140 | * @cmd: Opcode to execute | ||
1141 | * | ||
1142 | * Execute a 'simple' command, that only consists of the opcode | ||
1143 | * 'cmd' itself, without filling any other registers | ||
1144 | * | ||
1145 | * LOCKING: | ||
1146 | * Kernel thread context (may sleep). | ||
1147 | * | ||
1148 | * RETURNS: | ||
1149 | * Zero on success, AC_ERR_* mask on failure | ||
1150 | */ | ||
1151 | unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) | ||
1152 | { | ||
1153 | struct ata_taskfile tf; | ||
1154 | |||
1155 | ata_tf_init(dev, &tf); | ||
1156 | |||
1157 | tf.command = cmd; | ||
1158 | tf.flags |= ATA_TFLAG_DEVICE; | ||
1159 | tf.protocol = ATA_PROT_NODATA; | ||
1160 | |||
1161 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | ||
1162 | } | ||
1163 | |||
1164 | /** | ||
1135 | * ata_pio_need_iordy - check if iordy needed | 1165 | * ata_pio_need_iordy - check if iordy needed |
1136 | * @adev: ATA device | 1166 | * @adev: ATA device |
1137 | * | 1167 | * |
@@ -1193,8 +1223,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1193 | int rc; | 1223 | int rc; |
1194 | 1224 | ||
1195 | if (ata_msg_ctl(ap)) | 1225 | if (ata_msg_ctl(ap)) |
1196 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", | 1226 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", |
1197 | __FUNCTION__, ap->id, dev->devno); | 1227 | __FUNCTION__, ap->id, dev->devno); |
1198 | 1228 | ||
1199 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ | 1229 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ |
1200 | 1230 | ||
@@ -1263,9 +1293,9 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1263 | return 0; | 1293 | return 0; |
1264 | 1294 | ||
1265 | err_out: | 1295 | err_out: |
1266 | if (ata_msg_warn(ap)) | 1296 | if (ata_msg_warn(ap)) |
1267 | ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " | 1297 | ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " |
1268 | "(%s, err_mask=0x%x)\n", reason, err_mask); | 1298 | "(%s, err_mask=0x%x)\n", reason, err_mask); |
1269 | return rc; | 1299 | return rc; |
1270 | } | 1300 | } |
1271 | 1301 | ||
@@ -1318,19 +1348,21 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1318 | int i, rc; | 1348 | int i, rc; |
1319 | 1349 | ||
1320 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { | 1350 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { |
1321 | ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", | 1351 | ata_dev_printk(dev, KERN_INFO, |
1322 | __FUNCTION__, ap->id, dev->devno); | 1352 | "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", |
1353 | __FUNCTION__, ap->id, dev->devno); | ||
1323 | return 0; | 1354 | return 0; |
1324 | } | 1355 | } |
1325 | 1356 | ||
1326 | if (ata_msg_probe(ap)) | 1357 | if (ata_msg_probe(ap)) |
1327 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", | 1358 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", |
1328 | __FUNCTION__, ap->id, dev->devno); | 1359 | __FUNCTION__, ap->id, dev->devno); |
1329 | 1360 | ||
1330 | /* print device capabilities */ | 1361 | /* print device capabilities */ |
1331 | if (ata_msg_probe(ap)) | 1362 | if (ata_msg_probe(ap)) |
1332 | ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x " | 1363 | ata_dev_printk(dev, KERN_DEBUG, |
1333 | "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", | 1364 | "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " |
1365 | "85:%04x 86:%04x 87:%04x 88:%04x\n", | ||
1334 | __FUNCTION__, | 1366 | __FUNCTION__, |
1335 | id[49], id[82], id[83], id[84], | 1367 | id[49], id[82], id[83], id[84], |
1336 | id[85], id[86], id[87], id[88]); | 1368 | id[85], id[86], id[87], id[88]); |
@@ -1402,14 +1434,16 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1402 | ata_id_major_version(id), | 1434 | ata_id_major_version(id), |
1403 | ata_mode_string(xfer_mask), | 1435 | ata_mode_string(xfer_mask), |
1404 | (unsigned long long)dev->n_sectors, | 1436 | (unsigned long long)dev->n_sectors, |
1405 | dev->cylinders, dev->heads, dev->sectors); | 1437 | dev->cylinders, dev->heads, |
1438 | dev->sectors); | ||
1406 | } | 1439 | } |
1407 | 1440 | ||
1408 | if (dev->id[59] & 0x100) { | 1441 | if (dev->id[59] & 0x100) { |
1409 | dev->multi_count = dev->id[59] & 0xff; | 1442 | dev->multi_count = dev->id[59] & 0xff; |
1410 | if (ata_msg_info(ap)) | 1443 | if (ata_msg_info(ap)) |
1411 | ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n", | 1444 | ata_dev_printk(dev, KERN_INFO, |
1412 | ap->id, dev->devno, dev->multi_count); | 1445 | "ata%u: dev %u multi count %u\n", |
1446 | ap->id, dev->devno, dev->multi_count); | ||
1413 | } | 1447 | } |
1414 | 1448 | ||
1415 | dev->cdb_len = 16; | 1449 | dev->cdb_len = 16; |
@@ -1422,8 +1456,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1422 | rc = atapi_cdb_len(id); | 1456 | rc = atapi_cdb_len(id); |
1423 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { | 1457 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
1424 | if (ata_msg_warn(ap)) | 1458 | if (ata_msg_warn(ap)) |
1425 | ata_dev_printk(dev, KERN_WARNING, | 1459 | ata_dev_printk(dev, KERN_WARNING, |
1426 | "unsupported CDB len\n"); | 1460 | "unsupported CDB len\n"); |
1427 | rc = -EINVAL; | 1461 | rc = -EINVAL; |
1428 | goto err_out_nosup; | 1462 | goto err_out_nosup; |
1429 | } | 1463 | } |
@@ -1466,8 +1500,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info) | |||
1466 | 1500 | ||
1467 | err_out_nosup: | 1501 | err_out_nosup: |
1468 | if (ata_msg_probe(ap)) | 1502 | if (ata_msg_probe(ap)) |
1469 | ata_dev_printk(dev, KERN_DEBUG, | 1503 | ata_dev_printk(dev, KERN_DEBUG, |
1470 | "%s: EXIT, err\n", __FUNCTION__); | 1504 | "%s: EXIT, err\n", __FUNCTION__); |
1471 | return rc; | 1505 | return rc; |
1472 | } | 1506 | } |
1473 | 1507 | ||
@@ -3527,7 +3561,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
3527 | * Inherited from caller. | 3561 | * Inherited from caller. |
3528 | */ | 3562 | */ |
3529 | 3563 | ||
3530 | void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, | 3564 | void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, |
3531 | unsigned int buflen, int write_data) | 3565 | unsigned int buflen, int write_data) |
3532 | { | 3566 | { |
3533 | struct ata_port *ap = adev->ap; | 3567 | struct ata_port *ap = adev->ap; |
@@ -3573,7 +3607,7 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
3573 | * Inherited from caller. | 3607 | * Inherited from caller. |
3574 | */ | 3608 | */ |
3575 | 3609 | ||
3576 | void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, | 3610 | void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, |
3577 | unsigned int buflen, int write_data) | 3611 | unsigned int buflen, int write_data) |
3578 | { | 3612 | { |
3579 | struct ata_port *ap = adev->ap; | 3613 | struct ata_port *ap = adev->ap; |
@@ -3607,7 +3641,7 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
3607 | * @buflen: buffer length | 3641 | * @buflen: buffer length |
3608 | * @write_data: read/write | 3642 | * @write_data: read/write |
3609 | * | 3643 | * |
3610 | * Transfer data from/to the device data register by PIO. Do the | 3644 | * Transfer data from/to the device data register by PIO. Do the |
3611 | * transfer with interrupts disabled. | 3645 | * transfer with interrupts disabled. |
3612 | * | 3646 | * |
3613 | * LOCKING: | 3647 | * LOCKING: |
@@ -4946,31 +4980,9 @@ int ata_port_offline(struct ata_port *ap) | |||
4946 | return 0; | 4980 | return 0; |
4947 | } | 4981 | } |
4948 | 4982 | ||
4949 | /* | 4983 | int ata_flush_cache(struct ata_device *dev) |
4950 | * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, | ||
4951 | * without filling any other registers | ||
4952 | */ | ||
4953 | static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) | ||
4954 | { | ||
4955 | struct ata_taskfile tf; | ||
4956 | int err; | ||
4957 | |||
4958 | ata_tf_init(dev, &tf); | ||
4959 | |||
4960 | tf.command = cmd; | ||
4961 | tf.flags |= ATA_TFLAG_DEVICE; | ||
4962 | tf.protocol = ATA_PROT_NODATA; | ||
4963 | |||
4964 | err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); | ||
4965 | if (err) | ||
4966 | ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n", | ||
4967 | __FUNCTION__, err); | ||
4968 | |||
4969 | return err; | ||
4970 | } | ||
4971 | |||
4972 | static int ata_flush_cache(struct ata_device *dev) | ||
4973 | { | 4984 | { |
4985 | unsigned int err_mask; | ||
4974 | u8 cmd; | 4986 | u8 cmd; |
4975 | 4987 | ||
4976 | if (!ata_try_flush_cache(dev)) | 4988 | if (!ata_try_flush_cache(dev)) |
@@ -4981,17 +4993,41 @@ static int ata_flush_cache(struct ata_device *dev) | |||
4981 | else | 4993 | else |
4982 | cmd = ATA_CMD_FLUSH; | 4994 | cmd = ATA_CMD_FLUSH; |
4983 | 4995 | ||
4984 | return ata_do_simple_cmd(dev, cmd); | 4996 | err_mask = ata_do_simple_cmd(dev, cmd); |
4997 | if (err_mask) { | ||
4998 | ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); | ||
4999 | return -EIO; | ||
5000 | } | ||
5001 | |||
5002 | return 0; | ||
4985 | } | 5003 | } |
4986 | 5004 | ||
4987 | static int ata_standby_drive(struct ata_device *dev) | 5005 | static int ata_standby_drive(struct ata_device *dev) |
4988 | { | 5006 | { |
4989 | return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); | 5007 | unsigned int err_mask; |
5008 | |||
5009 | err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); | ||
5010 | if (err_mask) { | ||
5011 | ata_dev_printk(dev, KERN_ERR, "failed to standby drive " | ||
5012 | "(err_mask=0x%x)\n", err_mask); | ||
5013 | return -EIO; | ||
5014 | } | ||
5015 | |||
5016 | return 0; | ||
4990 | } | 5017 | } |
4991 | 5018 | ||
4992 | static int ata_start_drive(struct ata_device *dev) | 5019 | static int ata_start_drive(struct ata_device *dev) |
4993 | { | 5020 | { |
4994 | return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); | 5021 | unsigned int err_mask; |
5022 | |||
5023 | err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); | ||
5024 | if (err_mask) { | ||
5025 | ata_dev_printk(dev, KERN_ERR, "failed to start drive " | ||
5026 | "(err_mask=0x%x)\n", err_mask); | ||
5027 | return -EIO; | ||
5028 | } | ||
5029 | |||
5030 | return 0; | ||
4995 | } | 5031 | } |
4996 | 5032 | ||
4997 | /** | 5033 | /** |
@@ -5212,7 +5248,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
5212 | ap->msg_enable = 0x00FF; | 5248 | ap->msg_enable = 0x00FF; |
5213 | #elif defined(ATA_DEBUG) | 5249 | #elif defined(ATA_DEBUG) |
5214 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; | 5250 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; |
5215 | #else | 5251 | #else |
5216 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; | 5252 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
5217 | #endif | 5253 | #endif |
5218 | 5254 | ||
@@ -5709,6 +5745,7 @@ int ata_pci_device_resume(struct pci_dev *pdev) | |||
5709 | 5745 | ||
5710 | static int __init ata_init(void) | 5746 | static int __init ata_init(void) |
5711 | { | 5747 | { |
5748 | ata_probe_timeout *= HZ; | ||
5712 | ata_wq = create_workqueue("ata"); | 5749 | ata_wq = create_workqueue("ata"); |
5713 | if (!ata_wq) | 5750 | if (!ata_wq) |
5714 | return -ENOMEM; | 5751 | return -ENOMEM; |
@@ -5733,7 +5770,7 @@ module_init(ata_init); | |||
5733 | module_exit(ata_exit); | 5770 | module_exit(ata_exit); |
5734 | 5771 | ||
5735 | static unsigned long ratelimit_time; | 5772 | static unsigned long ratelimit_time; |
5736 | static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; | 5773 | static DEFINE_SPINLOCK(ata_ratelimit_lock); |
5737 | 5774 | ||
5738 | int ata_ratelimit(void) | 5775 | int ata_ratelimit(void) |
5739 | { | 5776 | { |
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index 823385981a7a..bf5a72aca8a4 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
@@ -93,6 +93,38 @@ static int ata_ering_map(struct ata_ering *ering, | |||
93 | return rc; | 93 | return rc; |
94 | } | 94 | } |
95 | 95 | ||
96 | static unsigned int ata_eh_dev_action(struct ata_device *dev) | ||
97 | { | ||
98 | struct ata_eh_context *ehc = &dev->ap->eh_context; | ||
99 | |||
100 | return ehc->i.action | ehc->i.dev_action[dev->devno]; | ||
101 | } | ||
102 | |||
103 | static void ata_eh_clear_action(struct ata_device *dev, | ||
104 | struct ata_eh_info *ehi, unsigned int action) | ||
105 | { | ||
106 | int i; | ||
107 | |||
108 | if (!dev) { | ||
109 | ehi->action &= ~action; | ||
110 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
111 | ehi->dev_action[i] &= ~action; | ||
112 | } else { | ||
113 | /* doesn't make sense for port-wide EH actions */ | ||
114 | WARN_ON(!(action & ATA_EH_PERDEV_MASK)); | ||
115 | |||
116 | /* break ehi->action into ehi->dev_action */ | ||
117 | if (ehi->action & action) { | ||
118 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
119 | ehi->dev_action[i] |= ehi->action & action; | ||
120 | ehi->action &= ~action; | ||
121 | } | ||
122 | |||
123 | /* turn off the specified per-dev action */ | ||
124 | ehi->dev_action[dev->devno] &= ~action; | ||
125 | } | ||
126 | } | ||
127 | |||
96 | /** | 128 | /** |
97 | * ata_scsi_timed_out - SCSI layer time out callback | 129 | * ata_scsi_timed_out - SCSI layer time out callback |
98 | * @cmd: timed out SCSI command | 130 | * @cmd: timed out SCSI command |
@@ -702,32 +734,11 @@ static void ata_eh_detach_dev(struct ata_device *dev) | |||
702 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; | 734 | ap->flags |= ATA_FLAG_SCSI_HOTPLUG; |
703 | } | 735 | } |
704 | 736 | ||
705 | spin_unlock_irqrestore(ap->lock, flags); | 737 | /* clear per-dev EH actions */ |
706 | } | 738 | ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); |
707 | 739 | ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); | |
708 | static void ata_eh_clear_action(struct ata_device *dev, | ||
709 | struct ata_eh_info *ehi, unsigned int action) | ||
710 | { | ||
711 | int i; | ||
712 | 740 | ||
713 | if (!dev) { | 741 | spin_unlock_irqrestore(ap->lock, flags); |
714 | ehi->action &= ~action; | ||
715 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
716 | ehi->dev_action[i] &= ~action; | ||
717 | } else { | ||
718 | /* doesn't make sense for port-wide EH actions */ | ||
719 | WARN_ON(!(action & ATA_EH_PERDEV_MASK)); | ||
720 | |||
721 | /* break ehi->action into ehi->dev_action */ | ||
722 | if (ehi->action & action) { | ||
723 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
724 | ehi->dev_action[i] |= ehi->action & action; | ||
725 | ehi->action &= ~action; | ||
726 | } | ||
727 | |||
728 | /* turn off the specified per-dev action */ | ||
729 | ehi->dev_action[dev->devno] &= ~action; | ||
730 | } | ||
731 | } | 742 | } |
732 | 743 | ||
733 | /** | 744 | /** |
@@ -1592,7 +1603,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap, | |||
1592 | unsigned int action; | 1603 | unsigned int action; |
1593 | 1604 | ||
1594 | dev = &ap->device[i]; | 1605 | dev = &ap->device[i]; |
1595 | action = ehc->i.action | ehc->i.dev_action[dev->devno]; | 1606 | action = ata_eh_dev_action(dev); |
1596 | 1607 | ||
1597 | if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { | 1608 | if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { |
1598 | if (ata_port_offline(ap)) { | 1609 | if (ata_port_offline(ap)) { |
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 93d18a74c401..2915bca691e8 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -222,9 +222,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
222 | && copy_to_user(arg + sizeof(args), argbuf, argsize)) | 222 | && copy_to_user(arg + sizeof(args), argbuf, argsize)) |
223 | rc = -EFAULT; | 223 | rc = -EFAULT; |
224 | error: | 224 | error: |
225 | if (argbuf) | 225 | kfree(argbuf); |
226 | kfree(argbuf); | ||
227 | |||
228 | return rc; | 226 | return rc; |
229 | } | 227 | } |
230 | 228 | ||
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index bdd488897096..c325679d9b54 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #define __LIBATA_H__ | 29 | #define __LIBATA_H__ |
30 | 30 | ||
31 | #define DRV_NAME "libata" | 31 | #define DRV_NAME "libata" |
32 | #define DRV_VERSION "1.30" /* must be exactly four chars */ | 32 | #define DRV_VERSION "2.00" /* must be exactly four chars */ |
33 | 33 | ||
34 | struct ata_scsi_args { | 34 | struct ata_scsi_args { |
35 | struct ata_device *dev; | 35 | struct ata_device *dev; |
@@ -50,6 +50,7 @@ extern void ata_port_flush_task(struct ata_port *ap); | |||
50 | extern unsigned ata_exec_internal(struct ata_device *dev, | 50 | extern unsigned ata_exec_internal(struct ata_device *dev, |
51 | struct ata_taskfile *tf, const u8 *cdb, | 51 | struct ata_taskfile *tf, const u8 *cdb, |
52 | int dma_dir, void *buf, unsigned int buflen); | 52 | int dma_dir, void *buf, unsigned int buflen); |
53 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); | ||
53 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | 54 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
54 | int post_reset, u16 *id); | 55 | int post_reset, u16 *id); |
55 | extern int ata_dev_configure(struct ata_device *dev, int print_info); | 56 | extern int ata_dev_configure(struct ata_device *dev, int print_info); |
@@ -64,6 +65,7 @@ extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); | |||
64 | extern void ata_dev_select(struct ata_port *ap, unsigned int device, | 65 | extern void ata_dev_select(struct ata_port *ap, unsigned int device, |
65 | unsigned int wait, unsigned int can_sleep); | 66 | unsigned int wait, unsigned int can_sleep); |
66 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); | 67 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); |
68 | extern int ata_flush_cache(struct ata_device *dev); | ||
67 | extern void ata_dev_init(struct ata_device *dev); | 69 | extern void ata_dev_init(struct ata_device *dev); |
68 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); | 70 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); |
69 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); | 71 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index d18e7e0932ef..5cc42c6054eb 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <linux/libata.h> | 44 | #include <linux/libata.h> |
45 | 45 | ||
46 | #define DRV_NAME "sata_nv" | 46 | #define DRV_NAME "sata_nv" |
47 | #define DRV_VERSION "0.9" | 47 | #define DRV_VERSION "2.0" |
48 | 48 | ||
49 | enum { | 49 | enum { |
50 | NV_PORTS = 2, | 50 | NV_PORTS = 2, |
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index bc9f918a7f28..51d86d750e84 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c | |||
@@ -46,12 +46,13 @@ | |||
46 | #include <linux/libata.h> | 46 | #include <linux/libata.h> |
47 | 47 | ||
48 | #define DRV_NAME "sata_sil" | 48 | #define DRV_NAME "sata_sil" |
49 | #define DRV_VERSION "1.0" | 49 | #define DRV_VERSION "2.0" |
50 | 50 | ||
51 | enum { | 51 | enum { |
52 | /* | 52 | /* |
53 | * host flags | 53 | * host flags |
54 | */ | 54 | */ |
55 | SIL_FLAG_NO_SATA_IRQ = (1 << 28), | ||
55 | SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), | 56 | SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), |
56 | SIL_FLAG_MOD15WRITE = (1 << 30), | 57 | SIL_FLAG_MOD15WRITE = (1 << 30), |
57 | 58 | ||
@@ -62,8 +63,9 @@ enum { | |||
62 | * Controller IDs | 63 | * Controller IDs |
63 | */ | 64 | */ |
64 | sil_3112 = 0, | 65 | sil_3112 = 0, |
65 | sil_3512 = 1, | 66 | sil_3112_no_sata_irq = 1, |
66 | sil_3114 = 2, | 67 | sil_3512 = 2, |
68 | sil_3114 = 3, | ||
67 | 69 | ||
68 | /* | 70 | /* |
69 | * Register offsets | 71 | * Register offsets |
@@ -123,8 +125,8 @@ static const struct pci_device_id sil_pci_tbl[] = { | |||
123 | { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, | 125 | { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, |
124 | { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, | 126 | { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, |
125 | { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, | 127 | { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, |
126 | { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, | 128 | { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, |
127 | { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, | 129 | { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, |
128 | { } /* terminate list */ | 130 | { } /* terminate list */ |
129 | }; | 131 | }; |
130 | 132 | ||
@@ -217,6 +219,16 @@ static const struct ata_port_info sil_port_info[] = { | |||
217 | .udma_mask = 0x3f, /* udma0-5 */ | 219 | .udma_mask = 0x3f, /* udma0-5 */ |
218 | .port_ops = &sil_ops, | 220 | .port_ops = &sil_ops, |
219 | }, | 221 | }, |
222 | /* sil_3112_no_sata_irq */ | ||
223 | { | ||
224 | .sht = &sil_sht, | ||
225 | .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE | | ||
226 | SIL_FLAG_NO_SATA_IRQ, | ||
227 | .pio_mask = 0x1f, /* pio0-4 */ | ||
228 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
229 | .udma_mask = 0x3f, /* udma0-5 */ | ||
230 | .port_ops = &sil_ops, | ||
231 | }, | ||
220 | /* sil_3512 */ | 232 | /* sil_3512 */ |
221 | { | 233 | { |
222 | .sht = &sil_sht, | 234 | .sht = &sil_sht, |
@@ -437,6 +449,10 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance, | |||
437 | if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) | 449 | if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) |
438 | continue; | 450 | continue; |
439 | 451 | ||
452 | /* turn off SATA_IRQ if not supported */ | ||
453 | if (ap->flags & SIL_FLAG_NO_SATA_IRQ) | ||
454 | bmdma2 &= ~SIL_DMA_SATA_IRQ; | ||
455 | |||
440 | if (bmdma2 == 0xffffffff || | 456 | if (bmdma2 == 0xffffffff || |
441 | !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) | 457 | !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) |
442 | continue; | 458 | continue; |
@@ -474,8 +490,9 @@ static void sil_thaw(struct ata_port *ap) | |||
474 | ata_chk_status(ap); | 490 | ata_chk_status(ap); |
475 | ata_bmdma_irq_clear(ap); | 491 | ata_bmdma_irq_clear(ap); |
476 | 492 | ||
477 | /* turn on SATA IRQ */ | 493 | /* turn on SATA IRQ if supported */ |
478 | writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); | 494 | if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) |
495 | writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); | ||
479 | 496 | ||
480 | /* turn on IRQ */ | 497 | /* turn on IRQ */ |
481 | tmp = readl(mmio_base + SIL_SYSCFG); | 498 | tmp = readl(mmio_base + SIL_SYSCFG); |
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index c8b477c67247..b5f8fa955679 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | 32 | ||
33 | #define DRV_NAME "sata_sil24" | 33 | #define DRV_NAME "sata_sil24" |
34 | #define DRV_VERSION "0.24" | 34 | #define DRV_VERSION "0.3" |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Port request block (PRB) 32 bytes | 37 | * Port request block (PRB) 32 bytes |
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index c94b870cf378..7566c2cabaf7 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #endif /* CONFIG_PPC_OF */ | 54 | #endif /* CONFIG_PPC_OF */ |
55 | 55 | ||
56 | #define DRV_NAME "sata_svw" | 56 | #define DRV_NAME "sata_svw" |
57 | #define DRV_VERSION "1.8" | 57 | #define DRV_VERSION "2.0" |
58 | 58 | ||
59 | enum { | 59 | enum { |
60 | /* Taskfile registers offsets */ | 60 | /* Taskfile registers offsets */ |
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c index f668c997e9af..64f3c1aeed21 100644 --- a/drivers/scsi/sata_uli.c +++ b/drivers/scsi/sata_uli.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/libata.h> | 37 | #include <linux/libata.h> |
38 | 38 | ||
39 | #define DRV_NAME "sata_uli" | 39 | #define DRV_NAME "sata_uli" |
40 | #define DRV_VERSION "0.6" | 40 | #define DRV_VERSION "1.0" |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | uli_5289 = 0, | 43 | uli_5289 = 0, |
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 322890b400a6..67c3d2999775 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
48 | 48 | ||
49 | #define DRV_NAME "sata_via" | 49 | #define DRV_NAME "sata_via" |
50 | #define DRV_VERSION "1.2" | 50 | #define DRV_VERSION "2.0" |
51 | 51 | ||
52 | enum board_ids_enum { | 52 | enum board_ids_enum { |
53 | vt6420, | 53 | vt6420, |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 6d0c4f18e652..616fd9634b4b 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <linux/libata.h> | 47 | #include <linux/libata.h> |
48 | 48 | ||
49 | #define DRV_NAME "sata_vsc" | 49 | #define DRV_NAME "sata_vsc" |
50 | #define DRV_VERSION "1.2" | 50 | #define DRV_VERSION "2.0" |
51 | 51 | ||
52 | enum { | 52 | enum { |
53 | /* Interrupt register offsets (from chip base address) */ | 53 | /* Interrupt register offsets (from chip base address) */ |
@@ -443,16 +443,12 @@ err_out: | |||
443 | } | 443 | } |
444 | 444 | ||
445 | 445 | ||
446 | /* | ||
447 | * Intel 31244 is supposed to be identical. | ||
448 | * Compatibility is untested as of yet. | ||
449 | */ | ||
450 | static const struct pci_device_id vsc_sata_pci_tbl[] = { | 446 | static const struct pci_device_id vsc_sata_pci_tbl[] = { |
451 | { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174, | 447 | { PCI_VENDOR_ID_VITESSE, 0x7174, |
452 | PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, | 448 | PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, |
453 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244, | 449 | { PCI_VENDOR_ID_INTEL, 0x3200, |
454 | PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, | 450 | PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, |
455 | { } | 451 | { } /* terminate list */ |
456 | }; | 452 | }; |
457 | 453 | ||
458 | 454 | ||
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c index 501316b198e5..ed946311d3a4 100644 --- a/drivers/sn/ioc3.c +++ b/drivers/sn/ioc3.c | |||
@@ -26,7 +26,7 @@ static DECLARE_RWSEM(ioc3_devices_rwsem); | |||
26 | 26 | ||
27 | static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES]; | 27 | static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES]; |
28 | static struct ioc3_submodule *ioc3_ethernet; | 28 | static struct ioc3_submodule *ioc3_ethernet; |
29 | static rwlock_t ioc3_submodules_lock = RW_LOCK_UNLOCKED; | 29 | static DEFINE_RWLOCK(ioc3_submodules_lock); |
30 | 30 | ||
31 | /* NIC probing code */ | 31 | /* NIC probing code */ |
32 | 32 | ||
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index d63c3f485853..9ef68cd83bb4 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c | |||
@@ -743,8 +743,7 @@ void __exit au1100fb_cleanup(void) | |||
743 | { | 743 | { |
744 | driver_unregister(&au1100fb_driver); | 744 | driver_unregister(&au1100fb_driver); |
745 | 745 | ||
746 | if (drv_info.opt_mode) | 746 | kfree(drv_info.opt_mode); |
747 | kfree(drv_info.opt_mode); | ||
748 | } | 747 | } |
749 | 748 | ||
750 | module_init(au1100fb_init); | 749 | module_init(au1100fb_init); |
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index a71e984c93d4..ffc72ae3ada8 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | static int hp680bl_suspended; | 28 | static int hp680bl_suspended; |
29 | static int current_intensity = 0; | 29 | static int current_intensity = 0; |
30 | static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED; | 30 | static DEFINE_SPINLOCK(bl_lock); |
31 | static struct backlight_device *hp680_backlight_device; | 31 | static struct backlight_device *hp680_backlight_device; |
32 | 32 | ||
33 | static void hp680bl_send_intensity(struct backlight_device *bd) | 33 | static void hp680bl_send_intensity(struct backlight_device *bd) |
diff --git a/fs/Kconfig b/fs/Kconfig index 6c5051802bd2..6dc8cfd6d80c 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -1116,7 +1116,7 @@ config JFFS2_SUMMARY | |||
1116 | 1116 | ||
1117 | config JFFS2_FS_XATTR | 1117 | config JFFS2_FS_XATTR |
1118 | bool "JFFS2 XATTR support (EXPERIMENTAL)" | 1118 | bool "JFFS2 XATTR support (EXPERIMENTAL)" |
1119 | depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER | 1119 | depends on JFFS2_FS && EXPERIMENTAL |
1120 | default n | 1120 | default n |
1121 | help | 1121 | help |
1122 | Extended attributes are name:value pairs associated with inodes by | 1122 | Extended attributes are name:value pairs associated with inodes by |
@@ -1722,7 +1722,7 @@ config CIFS_STATS | |||
1722 | mounted by the cifs client to be displayed in /proc/fs/cifs/Stats | 1722 | mounted by the cifs client to be displayed in /proc/fs/cifs/Stats |
1723 | 1723 | ||
1724 | config CIFS_STATS2 | 1724 | config CIFS_STATS2 |
1725 | bool "CIFS extended statistics" | 1725 | bool "Extended statistics" |
1726 | depends on CIFS_STATS | 1726 | depends on CIFS_STATS |
1727 | help | 1727 | help |
1728 | Enabling this option will allow more detailed statistics on SMB | 1728 | Enabling this option will allow more detailed statistics on SMB |
@@ -1735,6 +1735,32 @@ config CIFS_STATS2 | |||
1735 | Unless you are a developer or are doing network performance analysis | 1735 | Unless you are a developer or are doing network performance analysis |
1736 | or tuning, say N. | 1736 | or tuning, say N. |
1737 | 1737 | ||
1738 | config CIFS_WEAK_PW_HASH | ||
1739 | bool "Support legacy servers which use weaker LANMAN security" | ||
1740 | depends on CIFS | ||
1741 | help | ||
1742 | Modern CIFS servers including Samba and most Windows versions | ||
1743 | (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos) | ||
1744 | security mechanisms. These hash the password more securely | ||
1745 | than the mechanisms used in the older LANMAN version of the | ||
1746 | SMB protocol needed to establish sessions with old SMB servers. | ||
1747 | |||
1748 | Enabling this option allows the cifs module to mount to older | ||
1749 | LANMAN based servers such as OS/2 and Windows 95, but such | ||
1750 | mounts may be less secure than mounts using NTLM or more recent | ||
1751 | security mechanisms if you are on a public network. Unless you | ||
1752 | have a need to access old SMB servers (and are on a private | ||
1753 | network) you probably want to say N. Even if this support | ||
1754 | is enabled in the kernel build, they will not be used | ||
1755 | automatically. At runtime LANMAN mounts are disabled but | ||
1756 | can be set to required (or optional) either in | ||
1757 | /proc/fs/cifs (see fs/cifs/README for more detail) or via an | ||
1758 | option on the mount command. This support is disabled by | ||
1759 | default in order to reduce the possibility of a downgrade | ||
1760 | attack. | ||
1761 | |||
1762 | If unsure, say N. | ||
1763 | |||
1738 | config CIFS_XATTR | 1764 | config CIFS_XATTR |
1739 | bool "CIFS extended attributes" | 1765 | bool "CIFS extended attributes" |
1740 | depends on CIFS | 1766 | depends on CIFS |
@@ -1763,6 +1789,16 @@ config CIFS_POSIX | |||
1763 | (such as Samba 3.10 and later) which can negotiate | 1789 | (such as Samba 3.10 and later) which can negotiate |
1764 | CIFS POSIX ACL support. If unsure, say N. | 1790 | CIFS POSIX ACL support. If unsure, say N. |
1765 | 1791 | ||
1792 | config CIFS_DEBUG2 | ||
1793 | bool "Enable additional CIFS debugging routines" | ||
1794 | help | ||
1795 | Enabling this option adds a few more debugging routines | ||
1796 | to the cifs code which slightly increases the size of | ||
1797 | the cifs module and can cause additional logging of debug | ||
1798 | messages in some error paths, slowing performance. This | ||
1799 | option can be turned off unless you are debugging | ||
1800 | cifs problems. If unsure, say N. | ||
1801 | |||
1766 | config CIFS_EXPERIMENTAL | 1802 | config CIFS_EXPERIMENTAL |
1767 | bool "CIFS Experimental Features (EXPERIMENTAL)" | 1803 | bool "CIFS Experimental Features (EXPERIMENTAL)" |
1768 | depends on CIFS && EXPERIMENTAL | 1804 | depends on CIFS && EXPERIMENTAL |
@@ -1778,7 +1814,7 @@ config CIFS_EXPERIMENTAL | |||
1778 | If unsure, say N. | 1814 | If unsure, say N. |
1779 | 1815 | ||
1780 | config CIFS_UPCALL | 1816 | config CIFS_UPCALL |
1781 | bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)" | 1817 | bool "Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)" |
1782 | depends on CIFS_EXPERIMENTAL | 1818 | depends on CIFS_EXPERIMENTAL |
1783 | select CONNECTOR | 1819 | select CONNECTOR |
1784 | help | 1820 | help |
diff --git a/fs/buffer.c b/fs/buffer.c index 373bb6292bdc..f23bb647db47 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -564,7 +564,7 @@ still_busy: | |||
564 | * Completion handler for block_write_full_page() - pages which are unlocked | 564 | * Completion handler for block_write_full_page() - pages which are unlocked |
565 | * during I/O, and which have PageWriteback cleared upon I/O completion. | 565 | * during I/O, and which have PageWriteback cleared upon I/O completion. |
566 | */ | 566 | */ |
567 | void end_buffer_async_write(struct buffer_head *bh, int uptodate) | 567 | static void end_buffer_async_write(struct buffer_head *bh, int uptodate) |
568 | { | 568 | { |
569 | char b[BDEVNAME_SIZE]; | 569 | char b[BDEVNAME_SIZE]; |
570 | unsigned long flags; | 570 | unsigned long flags; |
@@ -3166,7 +3166,6 @@ EXPORT_SYMBOL(block_sync_page); | |||
3166 | EXPORT_SYMBOL(block_truncate_page); | 3166 | EXPORT_SYMBOL(block_truncate_page); |
3167 | EXPORT_SYMBOL(block_write_full_page); | 3167 | EXPORT_SYMBOL(block_write_full_page); |
3168 | EXPORT_SYMBOL(cont_prepare_write); | 3168 | EXPORT_SYMBOL(cont_prepare_write); |
3169 | EXPORT_SYMBOL(end_buffer_async_write); | ||
3170 | EXPORT_SYMBOL(end_buffer_read_sync); | 3169 | EXPORT_SYMBOL(end_buffer_read_sync); |
3171 | EXPORT_SYMBOL(end_buffer_write_sync); | 3170 | EXPORT_SYMBOL(end_buffer_write_sync); |
3172 | EXPORT_SYMBOL(file_fsync); | 3171 | EXPORT_SYMBOL(file_fsync); |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 7271bb0257f6..a61d17ed1827 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -1,9 +1,24 @@ | |||
1 | Version 1.44 | ||
2 | ------------ | ||
3 | Rewritten sessionsetup support, including support for legacy SMB | ||
4 | session setup needed for OS/2 and older servers such as Windows 95 and 98. | ||
5 | Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst | ||
6 | so we can do search (ls etc.) to OS/2. Do not send NTCreateX | ||
7 | or recent levels of FindFirst unless server says it supports NT SMBs | ||
8 | (instead use legacy equivalents from LANMAN dialect). Fix to allow | ||
9 | NTLMv2 authentication support (now can use stronger password hashing | ||
10 | on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004). | ||
11 | Allow override of global cifs security flags on mount via "sec=" option(s). | ||
12 | |||
1 | Version 1.43 | 13 | Version 1.43 |
2 | ------------ | 14 | ------------ |
3 | POSIX locking to servers which support CIFS POSIX Extensions | 15 | POSIX locking to servers which support CIFS POSIX Extensions |
4 | (disabled by default controlled by proc/fs/cifs/Experimental). | 16 | (disabled by default controlled by proc/fs/cifs/Experimental). |
5 | Handle conversion of long share names (especially Asian languages) | 17 | Handle conversion of long share names (especially Asian languages) |
6 | to Unicode during mount. | 18 | to Unicode during mount. Fix memory leak in sess struct on reconnect. |
19 | Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on | ||
20 | cifs open which helps rare case when setpathinfo fails or server does | ||
21 | not support it. | ||
7 | 22 | ||
8 | Version 1.42 | 23 | Version 1.42 |
9 | ------------ | 24 | ------------ |
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 58c77254a23b..a26f26ed5a17 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | obj-$(CONFIG_CIFS) += cifs.o | 4 | obj-$(CONFIG_CIFS) += cifs.o |
5 | 5 | ||
6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o | 6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o |
diff --git a/fs/cifs/README b/fs/cifs/README index 0355003f4f0a..7986d0d97ace 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
@@ -443,7 +443,10 @@ A partial list of the supported mount options follows: | |||
443 | SFU does). In the future the bottom 9 bits of the mode | 443 | SFU does). In the future the bottom 9 bits of the mode |
444 | mode also will be emulated using queries of the security | 444 | mode also will be emulated using queries of the security |
445 | descriptor (ACL). | 445 | descriptor (ACL). |
446 | sec Security mode. Allowed values are: | 446 | sign Must use packet signing (helps avoid unwanted data modification |
447 | by intermediate systems in the route). Note that signing | ||
448 | does not work with lanman or plaintext authentication. | ||
449 | sec Security mode. Allowed values are: | ||
447 | none attempt to connection as a null user (no name) | 450 | none attempt to connection as a null user (no name) |
448 | krb5 Use Kerberos version 5 authentication | 451 | krb5 Use Kerberos version 5 authentication |
449 | krb5i Use Kerberos authentication and packet signing | 452 | krb5i Use Kerberos authentication and packet signing |
@@ -453,6 +456,8 @@ sec Security mode. Allowed values are: | |||
453 | server requires signing also can be the default) | 456 | server requires signing also can be the default) |
454 | ntlmv2 Use NTLMv2 password hashing | 457 | ntlmv2 Use NTLMv2 password hashing |
455 | ntlmv2i Use NTLMv2 password hashing with packet signing | 458 | ntlmv2i Use NTLMv2 password hashing with packet signing |
459 | lanman (if configured in kernel config) use older | ||
460 | lanman hash | ||
456 | 461 | ||
457 | The mount.cifs mount helper also accepts a few mount options before -o | 462 | The mount.cifs mount helper also accepts a few mount options before -o |
458 | including: | 463 | including: |
@@ -485,14 +490,34 @@ PacketSigningEnabled If set to one, cifs packet signing is enabled | |||
485 | it. If set to two, cifs packet signing is | 490 | it. If set to two, cifs packet signing is |
486 | required even if the server considers packet | 491 | required even if the server considers packet |
487 | signing optional. (default 1) | 492 | signing optional. (default 1) |
493 | SecurityFlags Flags which control security negotiation and | ||
494 | also packet signing. Authentication (may/must) | ||
495 | flags (e.g. for NTLM and/or NTLMv2) may be combined with | ||
496 | the signing flags. Specifying two different password | ||
497 | hashing mechanisms (as "must use") on the other hand | ||
498 | does not make much sense. Default flags are | ||
499 | 0x07007 | ||
500 | (NTLM, NTLMv2 and packet signing allowed). Maximum | ||
501 | allowable flags if you want to allow mounts to servers | ||
502 | using weaker password hashes is 0x37037 (lanman, | ||
503 | plaintext, ntlm, ntlmv2, signing allowed): | ||
504 | |||
505 | may use packet signing 0x00001 | ||
506 | must use packet signing 0x01001 | ||
507 | may use NTLM (most common password hash) 0x00002 | ||
508 | must use NTLM 0x02002 | ||
509 | may use NTLMv2 0x00004 | ||
510 | must use NTLMv2 0x04004 | ||
511 | may use Kerberos security (not implemented yet) 0x00008 | ||
512 | must use Kerberos (not implemented yet) 0x08008 | ||
513 | may use lanman (weak) password hash 0x00010 | ||
514 | must use lanman password hash 0x10010 | ||
515 | may use plaintext passwords 0x00020 | ||
516 | must use plaintext passwords 0x20020 | ||
517 | (reserved for future packet encryption) 0x00040 | ||
518 | |||
488 | cifsFYI If set to one, additional debug information is | 519 | cifsFYI If set to one, additional debug information is |
489 | logged to the system error log. (default 0) | 520 | logged to the system error log. (default 0) |
490 | ExtendedSecurity If set to one, SPNEGO session establishment | ||
491 | is allowed which enables more advanced | ||
492 | secure CIFS session establishment (default 0) | ||
493 | NTLMV2Enabled If set to one, more secure password hashes | ||
494 | are used when the server supports them and | ||
495 | when kerberos is not negotiated (default 0) | ||
496 | traceSMB If set to one, debug information is logged to the | 521 | traceSMB If set to one, debug information is logged to the |
497 | system error log with the start of smb requests | 522 | system error log with the start of smb requests |
498 | and responses (default 0) | 523 | and responses (default 0) |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 086ae8f4a207..031cdf293256 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -467,7 +467,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
467 | asn1_open(&ctx, security_blob, length); | 467 | asn1_open(&ctx, security_blob, length); |
468 | 468 | ||
469 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 469 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
470 | cFYI(1, ("Error decoding negTokenInit header ")); | 470 | cFYI(1, ("Error decoding negTokenInit header")); |
471 | return 0; | 471 | return 0; |
472 | } else if ((cls != ASN1_APL) || (con != ASN1_CON) | 472 | } else if ((cls != ASN1_APL) || (con != ASN1_CON) |
473 | || (tag != ASN1_EOC)) { | 473 | || (tag != ASN1_EOC)) { |
@@ -495,7 +495,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
495 | } | 495 | } |
496 | 496 | ||
497 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 497 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
498 | cFYI(1, ("Error decoding negTokenInit ")); | 498 | cFYI(1, ("Error decoding negTokenInit")); |
499 | return 0; | 499 | return 0; |
500 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) | 500 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) |
501 | || (tag != ASN1_EOC)) { | 501 | || (tag != ASN1_EOC)) { |
@@ -505,7 +505,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
505 | } | 505 | } |
506 | 506 | ||
507 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 507 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
508 | cFYI(1, ("Error decoding negTokenInit ")); | 508 | cFYI(1, ("Error decoding negTokenInit")); |
509 | return 0; | 509 | return 0; |
510 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | 510 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) |
511 | || (tag != ASN1_SEQ)) { | 511 | || (tag != ASN1_SEQ)) { |
@@ -515,7 +515,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
515 | } | 515 | } |
516 | 516 | ||
517 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 517 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { |
518 | cFYI(1, ("Error decoding 2nd part of negTokenInit ")); | 518 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); |
519 | return 0; | 519 | return 0; |
520 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) | 520 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON) |
521 | || (tag != ASN1_EOC)) { | 521 | || (tag != ASN1_EOC)) { |
@@ -527,7 +527,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
527 | 527 | ||
528 | if (asn1_header_decode | 528 | if (asn1_header_decode |
529 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { | 529 | (&ctx, &sequence_end, &cls, &con, &tag) == 0) { |
530 | cFYI(1, ("Error decoding 2nd part of negTokenInit ")); | 530 | cFYI(1, ("Error decoding 2nd part of negTokenInit")); |
531 | return 0; | 531 | return 0; |
532 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | 532 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) |
533 | || (tag != ASN1_SEQ)) { | 533 | || (tag != ASN1_SEQ)) { |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index f4124a32bef8..96abeb738978 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -39,7 +39,7 @@ cifs_dump_mem(char *label, void *data, int length) | |||
39 | char *charptr = data; | 39 | char *charptr = data; |
40 | char buf[10], line[80]; | 40 | char buf[10], line[80]; |
41 | 41 | ||
42 | printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n\n", | 42 | printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n", |
43 | label, length, data); | 43 | label, length, data); |
44 | for (i = 0; i < length; i += 16) { | 44 | for (i = 0; i < length; i += 16) { |
45 | line[0] = 0; | 45 | line[0] = 0; |
@@ -57,6 +57,57 @@ cifs_dump_mem(char *label, void *data, int length) | |||
57 | } | 57 | } |
58 | } | 58 | } |
59 | 59 | ||
60 | #ifdef CONFIG_CIFS_DEBUG2 | ||
61 | void cifs_dump_detail(struct smb_hdr * smb) | ||
62 | { | ||
63 | cERROR(1,("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d", | ||
64 | smb->Command, smb->Status.CifsError, | ||
65 | smb->Flags, smb->Flags2, smb->Mid, smb->Pid)); | ||
66 | cERROR(1,("smb buf %p len %d", smb, smbCalcSize_LE(smb))); | ||
67 | } | ||
68 | |||
69 | |||
70 | void cifs_dump_mids(struct TCP_Server_Info * server) | ||
71 | { | ||
72 | struct list_head *tmp; | ||
73 | struct mid_q_entry * mid_entry; | ||
74 | |||
75 | if(server == NULL) | ||
76 | return; | ||
77 | |||
78 | cERROR(1,("Dump pending requests:")); | ||
79 | spin_lock(&GlobalMid_Lock); | ||
80 | list_for_each(tmp, &server->pending_mid_q) { | ||
81 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | ||
82 | if(mid_entry) { | ||
83 | cERROR(1,("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d", | ||
84 | mid_entry->midState, | ||
85 | (int)mid_entry->command, | ||
86 | mid_entry->pid, | ||
87 | mid_entry->tsk, | ||
88 | mid_entry->mid)); | ||
89 | #ifdef CONFIG_CIFS_STATS2 | ||
90 | cERROR(1,("IsLarge: %d buf: %p time rcv: %ld now: %ld", | ||
91 | mid_entry->largeBuf, | ||
92 | mid_entry->resp_buf, | ||
93 | mid_entry->when_received, | ||
94 | jiffies)); | ||
95 | #endif /* STATS2 */ | ||
96 | cERROR(1,("IsMult: %d IsEnd: %d", mid_entry->multiRsp, | ||
97 | mid_entry->multiEnd)); | ||
98 | if(mid_entry->resp_buf) { | ||
99 | cifs_dump_detail(mid_entry->resp_buf); | ||
100 | cifs_dump_mem("existing buf: ", | ||
101 | mid_entry->resp_buf, | ||
102 | 62 /* fixme */); | ||
103 | } | ||
104 | |||
105 | } | ||
106 | } | ||
107 | spin_unlock(&GlobalMid_Lock); | ||
108 | } | ||
109 | #endif /* CONFIG_CIFS_DEBUG2 */ | ||
110 | |||
60 | #ifdef CONFIG_PROC_FS | 111 | #ifdef CONFIG_PROC_FS |
61 | static int | 112 | static int |
62 | cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset, | 113 | cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset, |
@@ -73,7 +124,6 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset, | |||
73 | 124 | ||
74 | *beginBuffer = buf + offset; | 125 | *beginBuffer = buf + offset; |
75 | 126 | ||
76 | |||
77 | length = | 127 | length = |
78 | sprintf(buf, | 128 | sprintf(buf, |
79 | "Display Internal CIFS Data Structures for Debugging\n" | 129 | "Display Internal CIFS Data Structures for Debugging\n" |
@@ -395,12 +445,12 @@ static read_proc_t traceSMB_read; | |||
395 | static write_proc_t traceSMB_write; | 445 | static write_proc_t traceSMB_write; |
396 | static read_proc_t multiuser_mount_read; | 446 | static read_proc_t multiuser_mount_read; |
397 | static write_proc_t multiuser_mount_write; | 447 | static write_proc_t multiuser_mount_write; |
398 | static read_proc_t extended_security_read; | 448 | static read_proc_t security_flags_read; |
399 | static write_proc_t extended_security_write; | 449 | static write_proc_t security_flags_write; |
400 | static read_proc_t ntlmv2_enabled_read; | 450 | /* static read_proc_t ntlmv2_enabled_read; |
401 | static write_proc_t ntlmv2_enabled_write; | 451 | static write_proc_t ntlmv2_enabled_write; |
402 | static read_proc_t packet_signing_enabled_read; | 452 | static read_proc_t packet_signing_enabled_read; |
403 | static write_proc_t packet_signing_enabled_write; | 453 | static write_proc_t packet_signing_enabled_write;*/ |
404 | static read_proc_t experimEnabled_read; | 454 | static read_proc_t experimEnabled_read; |
405 | static write_proc_t experimEnabled_write; | 455 | static write_proc_t experimEnabled_write; |
406 | static read_proc_t linuxExtensionsEnabled_read; | 456 | static read_proc_t linuxExtensionsEnabled_read; |
@@ -458,10 +508,10 @@ cifs_proc_init(void) | |||
458 | pde->write_proc = multiuser_mount_write; | 508 | pde->write_proc = multiuser_mount_write; |
459 | 509 | ||
460 | pde = | 510 | pde = |
461 | create_proc_read_entry("ExtendedSecurity", 0, proc_fs_cifs, | 511 | create_proc_read_entry("SecurityFlags", 0, proc_fs_cifs, |
462 | extended_security_read, NULL); | 512 | security_flags_read, NULL); |
463 | if (pde) | 513 | if (pde) |
464 | pde->write_proc = extended_security_write; | 514 | pde->write_proc = security_flags_write; |
465 | 515 | ||
466 | pde = | 516 | pde = |
467 | create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs, | 517 | create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs, |
@@ -469,7 +519,7 @@ cifs_proc_init(void) | |||
469 | if (pde) | 519 | if (pde) |
470 | pde->write_proc = lookupFlag_write; | 520 | pde->write_proc = lookupFlag_write; |
471 | 521 | ||
472 | pde = | 522 | /* pde = |
473 | create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs, | 523 | create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs, |
474 | ntlmv2_enabled_read, NULL); | 524 | ntlmv2_enabled_read, NULL); |
475 | if (pde) | 525 | if (pde) |
@@ -479,7 +529,7 @@ cifs_proc_init(void) | |||
479 | create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs, | 529 | create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs, |
480 | packet_signing_enabled_read, NULL); | 530 | packet_signing_enabled_read, NULL); |
481 | if (pde) | 531 | if (pde) |
482 | pde->write_proc = packet_signing_enabled_write; | 532 | pde->write_proc = packet_signing_enabled_write;*/ |
483 | } | 533 | } |
484 | 534 | ||
485 | void | 535 | void |
@@ -496,9 +546,9 @@ cifs_proc_clean(void) | |||
496 | #endif | 546 | #endif |
497 | remove_proc_entry("MultiuserMount", proc_fs_cifs); | 547 | remove_proc_entry("MultiuserMount", proc_fs_cifs); |
498 | remove_proc_entry("OplockEnabled", proc_fs_cifs); | 548 | remove_proc_entry("OplockEnabled", proc_fs_cifs); |
499 | remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); | 549 | /* remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); */ |
500 | remove_proc_entry("ExtendedSecurity",proc_fs_cifs); | 550 | remove_proc_entry("SecurityFlags",proc_fs_cifs); |
501 | remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); | 551 | /* remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); */ |
502 | remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs); | 552 | remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs); |
503 | remove_proc_entry("Experimental",proc_fs_cifs); | 553 | remove_proc_entry("Experimental",proc_fs_cifs); |
504 | remove_proc_entry("LookupCacheEnabled",proc_fs_cifs); | 554 | remove_proc_entry("LookupCacheEnabled",proc_fs_cifs); |
@@ -782,12 +832,12 @@ multiuser_mount_write(struct file *file, const char __user *buffer, | |||
782 | } | 832 | } |
783 | 833 | ||
784 | static int | 834 | static int |
785 | extended_security_read(char *page, char **start, off_t off, | 835 | security_flags_read(char *page, char **start, off_t off, |
786 | int count, int *eof, void *data) | 836 | int count, int *eof, void *data) |
787 | { | 837 | { |
788 | int len; | 838 | int len; |
789 | 839 | ||
790 | len = sprintf(page, "%d\n", extended_security); | 840 | len = sprintf(page, "0x%x\n", extended_security); |
791 | 841 | ||
792 | len -= off; | 842 | len -= off; |
793 | *start = page + off; | 843 | *start = page + off; |
@@ -803,24 +853,52 @@ extended_security_read(char *page, char **start, off_t off, | |||
803 | return len; | 853 | return len; |
804 | } | 854 | } |
805 | static int | 855 | static int |
806 | extended_security_write(struct file *file, const char __user *buffer, | 856 | security_flags_write(struct file *file, const char __user *buffer, |
807 | unsigned long count, void *data) | 857 | unsigned long count, void *data) |
808 | { | 858 | { |
859 | unsigned int flags; | ||
860 | char flags_string[12]; | ||
809 | char c; | 861 | char c; |
810 | int rc; | ||
811 | 862 | ||
812 | rc = get_user(c, buffer); | 863 | if((count < 1) || (count > 11)) |
813 | if (rc) | 864 | return -EINVAL; |
814 | return rc; | 865 | |
815 | if (c == '0' || c == 'n' || c == 'N') | 866 | memset(flags_string, 0, 12); |
816 | extended_security = 0; | 867 | |
817 | else if (c == '1' || c == 'y' || c == 'Y') | 868 | if(copy_from_user(flags_string, buffer, count)) |
818 | extended_security = 1; | 869 | return -EFAULT; |
870 | |||
871 | if(count < 3) { | ||
872 | /* single char or single char followed by null */ | ||
873 | c = flags_string[0]; | ||
874 | if (c == '0' || c == 'n' || c == 'N') | ||
875 | extended_security = CIFSSEC_DEF; /* default */ | ||
876 | else if (c == '1' || c == 'y' || c == 'Y') | ||
877 | extended_security = CIFSSEC_MAX; | ||
878 | return count; | ||
879 | } | ||
880 | /* else we have a number */ | ||
881 | |||
882 | flags = simple_strtoul(flags_string, NULL, 0); | ||
883 | |||
884 | cFYI(1,("sec flags 0x%x", flags)); | ||
885 | |||
886 | if(flags <= 0) { | ||
887 | cERROR(1,("invalid security flags %s",flags_string)); | ||
888 | return -EINVAL; | ||
889 | } | ||
819 | 890 | ||
891 | if(flags & ~CIFSSEC_MASK) { | ||
892 | cERROR(1,("attempt to set unsupported security flags 0x%x", | ||
893 | flags & ~CIFSSEC_MASK)); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | /* flags look ok - update the global security flags for cifs module */ | ||
897 | extended_security = flags; | ||
820 | return count; | 898 | return count; |
821 | } | 899 | } |
822 | 900 | ||
823 | static int | 901 | /* static int |
824 | ntlmv2_enabled_read(char *page, char **start, off_t off, | 902 | ntlmv2_enabled_read(char *page, char **start, off_t off, |
825 | int count, int *eof, void *data) | 903 | int count, int *eof, void *data) |
826 | { | 904 | { |
@@ -855,6 +933,8 @@ ntlmv2_enabled_write(struct file *file, const char __user *buffer, | |||
855 | ntlmv2_support = 0; | 933 | ntlmv2_support = 0; |
856 | else if (c == '1' || c == 'y' || c == 'Y') | 934 | else if (c == '1' || c == 'y' || c == 'Y') |
857 | ntlmv2_support = 1; | 935 | ntlmv2_support = 1; |
936 | else if (c == '2') | ||
937 | ntlmv2_support = 2; | ||
858 | 938 | ||
859 | return count; | 939 | return count; |
860 | } | 940 | } |
@@ -898,7 +978,7 @@ packet_signing_enabled_write(struct file *file, const char __user *buffer, | |||
898 | sign_CIFS_PDUs = 2; | 978 | sign_CIFS_PDUs = 2; |
899 | 979 | ||
900 | return count; | 980 | return count; |
901 | } | 981 | } */ |
902 | 982 | ||
903 | 983 | ||
904 | #endif | 984 | #endif |
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index 4304d9dcfb6c..c26cd0d2c6d5 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h | |||
@@ -24,6 +24,10 @@ | |||
24 | #define _H_CIFS_DEBUG | 24 | #define _H_CIFS_DEBUG |
25 | 25 | ||
26 | void cifs_dump_mem(char *label, void *data, int length); | 26 | void cifs_dump_mem(char *label, void *data, int length); |
27 | #ifdef CONFIG_CIFS_DEBUG2 | ||
28 | void cifs_dump_detail(struct smb_hdr *); | ||
29 | void cifs_dump_mids(struct TCP_Server_Info *); | ||
30 | #endif | ||
27 | extern int traceSMB; /* flag which enables the function below */ | 31 | extern int traceSMB; /* flag which enables the function below */ |
28 | void dump_smb(struct smb_hdr *, int); | 32 | void dump_smb(struct smb_hdr *, int); |
29 | #define CIFS_INFO 0x01 | 33 | #define CIFS_INFO 0x01 |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index d2b128255944..d2a8b2941fc2 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "cifs_unicode.h" | 22 | #include "cifs_unicode.h" |
23 | #include "cifs_uniupr.h" | 23 | #include "cifs_uniupr.h" |
24 | #include "cifspdu.h" | 24 | #include "cifspdu.h" |
25 | #include "cifsglob.h" | ||
25 | #include "cifs_debug.h" | 26 | #include "cifs_debug.h" |
26 | 27 | ||
27 | /* | 28 | /* |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index e7d63737e651..a89efaf78a26 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include "md5.h" | 26 | #include "md5.h" |
27 | #include "cifs_unicode.h" | 27 | #include "cifs_unicode.h" |
28 | #include "cifsproto.h" | 28 | #include "cifsproto.h" |
29 | #include <linux/ctype.h> | ||
30 | #include <linux/random.h> | ||
29 | 31 | ||
30 | /* Calculate and return the CIFS signature based on the mac key and the smb pdu */ | 32 | /* Calculate and return the CIFS signature based on the mac key and the smb pdu */ |
31 | /* the 16 byte signature must be allocated by the caller */ | 33 | /* the 16 byte signature must be allocated by the caller */ |
@@ -35,6 +37,8 @@ | |||
35 | 37 | ||
36 | extern void mdfour(unsigned char *out, unsigned char *in, int n); | 38 | extern void mdfour(unsigned char *out, unsigned char *in, int n); |
37 | extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); | 39 | extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); |
40 | extern void SMBencrypt(unsigned char *passwd, unsigned char *c8, | ||
41 | unsigned char *p24); | ||
38 | 42 | ||
39 | static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, | 43 | static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, |
40 | const char * key, char * signature) | 44 | const char * key, char * signature) |
@@ -45,7 +49,7 @@ static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, | |||
45 | return -EINVAL; | 49 | return -EINVAL; |
46 | 50 | ||
47 | MD5Init(&context); | 51 | MD5Init(&context); |
48 | MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16); | 52 | MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16); |
49 | MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); | 53 | MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); |
50 | MD5Final(signature,&context); | 54 | MD5Final(signature,&context); |
51 | return 0; | 55 | return 0; |
@@ -90,7 +94,7 @@ static int cifs_calc_signature2(const struct kvec * iov, int n_vec, | |||
90 | return -EINVAL; | 94 | return -EINVAL; |
91 | 95 | ||
92 | MD5Init(&context); | 96 | MD5Init(&context); |
93 | MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16); | 97 | MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16); |
94 | for(i=0;i<n_vec;i++) { | 98 | for(i=0;i<n_vec;i++) { |
95 | if(iov[i].iov_base == NULL) { | 99 | if(iov[i].iov_base == NULL) { |
96 | cERROR(1,("null iovec entry")); | 100 | cERROR(1,("null iovec entry")); |
@@ -204,11 +208,12 @@ int cifs_calculate_mac_key(char * key, const char * rn, const char * password) | |||
204 | 208 | ||
205 | E_md4hash(password, temp_key); | 209 | E_md4hash(password, temp_key); |
206 | mdfour(key,temp_key,16); | 210 | mdfour(key,temp_key,16); |
207 | memcpy(key+16,rn, CIFS_SESSION_KEY_SIZE); | 211 | memcpy(key+16,rn, CIFS_SESS_KEY_SIZE); |
208 | return 0; | 212 | return 0; |
209 | } | 213 | } |
210 | 214 | ||
211 | int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_info) | 215 | int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, |
216 | const struct nls_table * nls_info) | ||
212 | { | 217 | { |
213 | char temp_hash[16]; | 218 | char temp_hash[16]; |
214 | struct HMACMD5Context ctx; | 219 | struct HMACMD5Context ctx; |
@@ -225,6 +230,8 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_ | |||
225 | user_name_len = strlen(ses->userName); | 230 | user_name_len = strlen(ses->userName); |
226 | if(user_name_len > MAX_USERNAME_SIZE) | 231 | if(user_name_len > MAX_USERNAME_SIZE) |
227 | return -EINVAL; | 232 | return -EINVAL; |
233 | if(ses->domainName == NULL) | ||
234 | return -EINVAL; /* BB should we use CIFS_LINUX_DOM */ | ||
228 | dom_name_len = strlen(ses->domainName); | 235 | dom_name_len = strlen(ses->domainName); |
229 | if(dom_name_len > MAX_USERNAME_SIZE) | 236 | if(dom_name_len > MAX_USERNAME_SIZE) |
230 | return -EINVAL; | 237 | return -EINVAL; |
@@ -259,16 +266,131 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_ | |||
259 | kfree(unicode_buf); | 266 | kfree(unicode_buf); |
260 | return 0; | 267 | return 0; |
261 | } | 268 | } |
262 | void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_response) | 269 | |
270 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
271 | void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key) | ||
272 | { | ||
273 | int i; | ||
274 | char password_with_pad[CIFS_ENCPWD_SIZE]; | ||
275 | |||
276 | if(ses->server == NULL) | ||
277 | return; | ||
278 | |||
279 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); | ||
280 | strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); | ||
281 | |||
282 | if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) | ||
283 | if(extended_security & CIFSSEC_MAY_PLNTXT) { | ||
284 | memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | /* calculate old style session key */ | ||
289 | /* calling toupper is less broken than repeatedly | ||
290 | calling nls_toupper would be since that will never | ||
291 | work for UTF8, but neither handles multibyte code pages | ||
292 | but the only alternative would be converting to UCS-16 (Unicode) | ||
293 | (using a routine something like UniStrupr) then | ||
294 | uppercasing and then converting back from Unicode - which | ||
295 | would only worth doing it if we knew it were utf8. Basically | ||
296 | utf8 and other multibyte codepages each need their own strupper | ||
297 | function since a byte at a time will ont work. */ | ||
298 | |||
299 | for(i = 0; i < CIFS_ENCPWD_SIZE; i++) { | ||
300 | password_with_pad[i] = toupper(password_with_pad[i]); | ||
301 | } | ||
302 | |||
303 | SMBencrypt(password_with_pad, ses->server->cryptKey, lnm_session_key); | ||
304 | /* clear password before we return/free memory */ | ||
305 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); | ||
306 | } | ||
307 | #endif /* CIFS_WEAK_PW_HASH */ | ||
308 | |||
309 | static int calc_ntlmv2_hash(struct cifsSesInfo *ses, | ||
310 | const struct nls_table * nls_cp) | ||
311 | { | ||
312 | int rc = 0; | ||
313 | int len; | ||
314 | char nt_hash[16]; | ||
315 | struct HMACMD5Context * pctxt; | ||
316 | wchar_t * user; | ||
317 | wchar_t * domain; | ||
318 | |||
319 | pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL); | ||
320 | |||
321 | if(pctxt == NULL) | ||
322 | return -ENOMEM; | ||
323 | |||
324 | /* calculate md4 hash of password */ | ||
325 | E_md4hash(ses->password, nt_hash); | ||
326 | |||
327 | /* convert Domainname to unicode and uppercase */ | ||
328 | hmac_md5_init_limK_to_64(nt_hash, 16, pctxt); | ||
329 | |||
330 | /* convert ses->userName to unicode and uppercase */ | ||
331 | len = strlen(ses->userName); | ||
332 | user = kmalloc(2 + (len * 2), GFP_KERNEL); | ||
333 | if(user == NULL) | ||
334 | goto calc_exit_2; | ||
335 | len = cifs_strtoUCS(user, ses->userName, len, nls_cp); | ||
336 | UniStrupr(user); | ||
337 | hmac_md5_update((char *)user, 2*len, pctxt); | ||
338 | |||
339 | /* convert ses->domainName to unicode and uppercase */ | ||
340 | if(ses->domainName) { | ||
341 | len = strlen(ses->domainName); | ||
342 | |||
343 | domain = kmalloc(2 + (len * 2), GFP_KERNEL); | ||
344 | if(domain == NULL) | ||
345 | goto calc_exit_1; | ||
346 | len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp); | ||
347 | UniStrupr(domain); | ||
348 | |||
349 | hmac_md5_update((char *)domain, 2*len, pctxt); | ||
350 | |||
351 | kfree(domain); | ||
352 | } | ||
353 | calc_exit_1: | ||
354 | kfree(user); | ||
355 | calc_exit_2: | ||
356 | /* BB FIXME what about bytes 24 through 40 of the signing key? | ||
357 | compare with the NTLM example */ | ||
358 | hmac_md5_final(ses->server->mac_signing_key, pctxt); | ||
359 | |||
360 | return rc; | ||
361 | } | ||
362 | |||
363 | void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf, | ||
364 | const struct nls_table * nls_cp) | ||
365 | { | ||
366 | int rc; | ||
367 | struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf; | ||
368 | |||
369 | buf->blob_signature = cpu_to_le32(0x00000101); | ||
370 | buf->reserved = 0; | ||
371 | buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); | ||
372 | get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); | ||
373 | buf->reserved2 = 0; | ||
374 | buf->names[0].type = 0; | ||
375 | buf->names[0].length = 0; | ||
376 | |||
377 | /* calculate buf->ntlmv2_hash */ | ||
378 | rc = calc_ntlmv2_hash(ses, nls_cp); | ||
379 | if(rc) | ||
380 | cERROR(1,("could not get v2 hash rc %d",rc)); | ||
381 | CalcNTLMv2_response(ses, resp_buf); | ||
382 | } | ||
383 | |||
384 | void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response) | ||
263 | { | 385 | { |
264 | struct HMACMD5Context context; | 386 | struct HMACMD5Context context; |
387 | /* rest of v2 struct already generated */ | ||
265 | memcpy(v2_session_response + 8, ses->server->cryptKey,8); | 388 | memcpy(v2_session_response + 8, ses->server->cryptKey,8); |
266 | /* gen_blob(v2_session_response + 16); */ | ||
267 | hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context); | 389 | hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context); |
268 | 390 | ||
269 | hmac_md5_update(ses->server->cryptKey,8,&context); | 391 | hmac_md5_update(v2_session_response+8, |
270 | /* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */ | 392 | sizeof(struct ntlmv2_resp) - 8, &context); |
271 | 393 | ||
272 | hmac_md5_final(v2_session_response,&context); | 394 | hmac_md5_final(v2_session_response,&context); |
273 | cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */ | 395 | /* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */ |
274 | } | 396 | } |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8b4de6eaabd0..c28ede599946 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -56,8 +56,8 @@ unsigned int experimEnabled = 0; | |||
56 | unsigned int linuxExtEnabled = 1; | 56 | unsigned int linuxExtEnabled = 1; |
57 | unsigned int lookupCacheEnabled = 1; | 57 | unsigned int lookupCacheEnabled = 1; |
58 | unsigned int multiuser_mount = 0; | 58 | unsigned int multiuser_mount = 0; |
59 | unsigned int extended_security = 0; | 59 | unsigned int extended_security = CIFSSEC_DEF; |
60 | unsigned int ntlmv2_support = 0; | 60 | /* unsigned int ntlmv2_support = 0; */ |
61 | unsigned int sign_CIFS_PDUs = 1; | 61 | unsigned int sign_CIFS_PDUs = 1; |
62 | extern struct task_struct * oplockThread; /* remove sparse warning */ | 62 | extern struct task_struct * oplockThread; /* remove sparse warning */ |
63 | struct task_struct * oplockThread = NULL; | 63 | struct task_struct * oplockThread = NULL; |
@@ -908,7 +908,7 @@ static int cifs_dnotify_thread(void * dummyarg) | |||
908 | struct cifsSesInfo *ses; | 908 | struct cifsSesInfo *ses; |
909 | 909 | ||
910 | do { | 910 | do { |
911 | if(try_to_freeze()) | 911 | if (try_to_freeze()) |
912 | continue; | 912 | continue; |
913 | set_current_state(TASK_INTERRUPTIBLE); | 913 | set_current_state(TASK_INTERRUPTIBLE); |
914 | schedule_timeout(15*HZ); | 914 | schedule_timeout(15*HZ); |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index d56c0577c710..a6384d83fdef 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | extern struct address_space_operations cifs_addr_ops; | 35 | extern struct address_space_operations cifs_addr_ops; |
36 | extern struct address_space_operations cifs_addr_ops_smallbuf; | ||
36 | 37 | ||
37 | /* Functions related to super block operations */ | 38 | /* Functions related to super block operations */ |
38 | extern struct super_operations cifs_super_ops; | 39 | extern struct super_operations cifs_super_ops; |
@@ -99,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); | |||
99 | extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); | 100 | extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); |
100 | extern int cifs_ioctl (struct inode * inode, struct file * filep, | 101 | extern int cifs_ioctl (struct inode * inode, struct file * filep, |
101 | unsigned int command, unsigned long arg); | 102 | unsigned int command, unsigned long arg); |
102 | #define CIFS_VERSION "1.43" | 103 | #define CIFS_VERSION "1.44" |
103 | #endif /* _CIFSFS_H */ | 104 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 006eb33bff5f..6d7cf5f3bc0b 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -88,7 +88,8 @@ enum statusEnum { | |||
88 | }; | 88 | }; |
89 | 89 | ||
90 | enum securityEnum { | 90 | enum securityEnum { |
91 | NTLM = 0, /* Legacy NTLM012 auth with NTLM hash */ | 91 | LANMAN = 0, /* Legacy LANMAN auth */ |
92 | NTLM, /* Legacy NTLM012 auth with NTLM hash */ | ||
92 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ | 93 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ |
93 | RawNTLMSSP, /* NTLMSSP without SPNEGO */ | 94 | RawNTLMSSP, /* NTLMSSP without SPNEGO */ |
94 | NTLMSSP, /* NTLMSSP via SPNEGO */ | 95 | NTLMSSP, /* NTLMSSP via SPNEGO */ |
@@ -157,7 +158,7 @@ struct TCP_Server_Info { | |||
157 | /* 16th byte of RFC1001 workstation name is always null */ | 158 | /* 16th byte of RFC1001 workstation name is always null */ |
158 | char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; | 159 | char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; |
159 | __u32 sequence_number; /* needed for CIFS PDU signature */ | 160 | __u32 sequence_number; /* needed for CIFS PDU signature */ |
160 | char mac_signing_key[CIFS_SESSION_KEY_SIZE + 16]; | 161 | char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; |
161 | }; | 162 | }; |
162 | 163 | ||
163 | /* | 164 | /* |
@@ -179,10 +180,13 @@ struct cifsUidInfo { | |||
179 | struct cifsSesInfo { | 180 | struct cifsSesInfo { |
180 | struct list_head cifsSessionList; | 181 | struct list_head cifsSessionList; |
181 | struct semaphore sesSem; | 182 | struct semaphore sesSem; |
183 | #if 0 | ||
182 | struct cifsUidInfo *uidInfo; /* pointer to user info */ | 184 | struct cifsUidInfo *uidInfo; /* pointer to user info */ |
185 | #endif | ||
183 | struct TCP_Server_Info *server; /* pointer to server info */ | 186 | struct TCP_Server_Info *server; /* pointer to server info */ |
184 | atomic_t inUse; /* # of mounts (tree connections) on this ses */ | 187 | atomic_t inUse; /* # of mounts (tree connections) on this ses */ |
185 | enum statusEnum status; | 188 | enum statusEnum status; |
189 | unsigned overrideSecFlg; /* if non-zero override global sec flags */ | ||
186 | __u16 ipc_tid; /* special tid for connection to IPC share */ | 190 | __u16 ipc_tid; /* special tid for connection to IPC share */ |
187 | __u16 flags; | 191 | __u16 flags; |
188 | char *serverOS; /* name of operating system underlying server */ | 192 | char *serverOS; /* name of operating system underlying server */ |
@@ -194,7 +198,7 @@ struct cifsSesInfo { | |||
194 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for | 198 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for |
195 | TCP names - will ipv6 and sctp addresses fit? */ | 199 | TCP names - will ipv6 and sctp addresses fit? */ |
196 | char userName[MAX_USERNAME_SIZE + 1]; | 200 | char userName[MAX_USERNAME_SIZE + 1]; |
197 | char domainName[MAX_USERNAME_SIZE + 1]; | 201 | char * domainName; |
198 | char * password; | 202 | char * password; |
199 | }; | 203 | }; |
200 | /* session flags */ | 204 | /* session flags */ |
@@ -209,12 +213,12 @@ struct cifsTconInfo { | |||
209 | struct list_head openFileList; | 213 | struct list_head openFileList; |
210 | struct semaphore tconSem; | 214 | struct semaphore tconSem; |
211 | struct cifsSesInfo *ses; /* pointer to session associated with */ | 215 | struct cifsSesInfo *ses; /* pointer to session associated with */ |
212 | char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource (in ASCII not UTF) */ | 216 | char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ |
213 | char *nativeFileSystem; | 217 | char *nativeFileSystem; |
214 | __u16 tid; /* The 2 byte tree id */ | 218 | __u16 tid; /* The 2 byte tree id */ |
215 | __u16 Flags; /* optional support bits */ | 219 | __u16 Flags; /* optional support bits */ |
216 | enum statusEnum tidStatus; | 220 | enum statusEnum tidStatus; |
217 | atomic_t useCount; /* how many mounts (explicit or implicit) to this share */ | 221 | atomic_t useCount; /* how many explicit/implicit mounts to share */ |
218 | #ifdef CONFIG_CIFS_STATS | 222 | #ifdef CONFIG_CIFS_STATS |
219 | atomic_t num_smbs_sent; | 223 | atomic_t num_smbs_sent; |
220 | atomic_t num_writes; | 224 | atomic_t num_writes; |
@@ -254,7 +258,7 @@ struct cifsTconInfo { | |||
254 | spinlock_t stat_lock; | 258 | spinlock_t stat_lock; |
255 | #endif /* CONFIG_CIFS_STATS */ | 259 | #endif /* CONFIG_CIFS_STATS */ |
256 | FILE_SYSTEM_DEVICE_INFO fsDevInfo; | 260 | FILE_SYSTEM_DEVICE_INFO fsDevInfo; |
257 | FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if file system name truncated */ | 261 | FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ |
258 | FILE_SYSTEM_UNIX_INFO fsUnixInfo; | 262 | FILE_SYSTEM_UNIX_INFO fsUnixInfo; |
259 | unsigned retry:1; | 263 | unsigned retry:1; |
260 | unsigned nocase:1; | 264 | unsigned nocase:1; |
@@ -305,7 +309,6 @@ struct cifsFileInfo { | |||
305 | atomic_t wrtPending; /* handle in use - defer close */ | 309 | atomic_t wrtPending; /* handle in use - defer close */ |
306 | struct semaphore fh_sem; /* prevents reopen race after dead ses*/ | 310 | struct semaphore fh_sem; /* prevents reopen race after dead ses*/ |
307 | char * search_resume_name; /* BB removeme BB */ | 311 | char * search_resume_name; /* BB removeme BB */ |
308 | unsigned int resume_name_length; /* BB removeme - field renamed and moved BB */ | ||
309 | struct cifs_search_info srch_inf; | 312 | struct cifs_search_info srch_inf; |
310 | }; | 313 | }; |
311 | 314 | ||
@@ -391,9 +394,9 @@ struct mid_q_entry { | |||
391 | struct smb_hdr *resp_buf; /* response buffer */ | 394 | struct smb_hdr *resp_buf; /* response buffer */ |
392 | int midState; /* wish this were enum but can not pass to wait_event */ | 395 | int midState; /* wish this were enum but can not pass to wait_event */ |
393 | __u8 command; /* smb command code */ | 396 | __u8 command; /* smb command code */ |
394 | unsigned multiPart:1; /* multiple responses to one SMB request */ | ||
395 | unsigned largeBuf:1; /* if valid response, is pointer to large buf */ | 397 | unsigned largeBuf:1; /* if valid response, is pointer to large buf */ |
396 | unsigned multiResp:1; /* multiple trans2 responses for one request */ | 398 | unsigned multiRsp:1; /* multiple trans2 responses for one request */ |
399 | unsigned multiEnd:1; /* both received */ | ||
397 | }; | 400 | }; |
398 | 401 | ||
399 | struct oplock_q_entry { | 402 | struct oplock_q_entry { |
@@ -430,15 +433,35 @@ struct dir_notify_req { | |||
430 | #define CIFS_LARGE_BUFFER 2 | 433 | #define CIFS_LARGE_BUFFER 2 |
431 | #define CIFS_IOVEC 4 /* array of response buffers */ | 434 | #define CIFS_IOVEC 4 /* array of response buffers */ |
432 | 435 | ||
433 | /* Type of session setup needed */ | 436 | /* Security Flags: indicate type of session setup needed */ |
434 | #define CIFS_PLAINTEXT 0 | 437 | #define CIFSSEC_MAY_SIGN 0x00001 |
435 | #define CIFS_LANMAN 1 | 438 | #define CIFSSEC_MAY_NTLM 0x00002 |
436 | #define CIFS_NTLM 2 | 439 | #define CIFSSEC_MAY_NTLMV2 0x00004 |
437 | #define CIFS_NTLMSSP_NEG 3 | 440 | #define CIFSSEC_MAY_KRB5 0x00008 |
438 | #define CIFS_NTLMSSP_AUTH 4 | 441 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
439 | #define CIFS_SPNEGO_INIT 5 | 442 | #define CIFSSEC_MAY_LANMAN 0x00010 |
440 | #define CIFS_SPNEGO_TARG 6 | 443 | #define CIFSSEC_MAY_PLNTXT 0x00020 |
441 | 444 | #endif /* weak passwords */ | |
445 | #define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ | ||
446 | |||
447 | #define CIFSSEC_MUST_SIGN 0x01001 | ||
448 | /* note that only one of the following can be set so the | ||
449 | result of setting MUST flags more than once will be to | ||
450 | require use of the stronger protocol */ | ||
451 | #define CIFSSEC_MUST_NTLM 0x02002 | ||
452 | #define CIFSSEC_MUST_NTLMV2 0x04004 | ||
453 | #define CIFSSEC_MUST_KRB5 0x08008 | ||
454 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
455 | #define CIFSSEC_MUST_LANMAN 0x10010 | ||
456 | #define CIFSSEC_MUST_PLNTXT 0x20020 | ||
457 | #define CIFSSEC_MASK 0x37037 /* current flags supported if weak */ | ||
458 | #else | ||
459 | #define CIFSSEC_MASK 0x07007 /* flags supported if no weak config */ | ||
460 | #endif /* WEAK_PW_HASH */ | ||
461 | #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ | ||
462 | |||
463 | #define CIFSSEC_DEF CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2 | ||
464 | #define CIFSSEC_MAX CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2 | ||
442 | /* | 465 | /* |
443 | ***************************************************************** | 466 | ***************************************************************** |
444 | * All constants go here | 467 | * All constants go here |
@@ -500,16 +523,16 @@ GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ | |||
500 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; | 523 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; |
501 | 524 | ||
502 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */ | 525 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */ |
503 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; /* Dir notify response queue */ | 526 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */ |
504 | 527 | ||
505 | /* | 528 | /* |
506 | * Global transaction id (XID) information | 529 | * Global transaction id (XID) information |
507 | */ | 530 | */ |
508 | GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ | 531 | GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ |
509 | GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ | 532 | GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ |
510 | GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ | 533 | GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ |
511 | GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above and list operations */ | 534 | GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ |
512 | /* on midQ entries */ | 535 | /* on midQ entries */ |
513 | GLOBAL_EXTERN char Local_System_Name[15]; | 536 | GLOBAL_EXTERN char Local_System_Name[15]; |
514 | 537 | ||
515 | /* | 538 | /* |
@@ -531,7 +554,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount; | |||
531 | GLOBAL_EXTERN atomic_t midCount; | 554 | GLOBAL_EXTERN atomic_t midCount; |
532 | 555 | ||
533 | /* Misc globals */ | 556 | /* Misc globals */ |
534 | GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions | 557 | GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions |
535 | to be established on existing mount if we | 558 | to be established on existing mount if we |
536 | have the uid/password or Kerberos credential | 559 | have the uid/password or Kerberos credential |
537 | or equivalent for current user */ | 560 | or equivalent for current user */ |
@@ -540,8 +563,8 @@ GLOBAL_EXTERN unsigned int experimEnabled; | |||
540 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; | 563 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; |
541 | GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent | 564 | GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent |
542 | with more secure ntlmssp2 challenge/resp */ | 565 | with more secure ntlmssp2 challenge/resp */ |
543 | GLOBAL_EXTERN unsigned int ntlmv2_support; /* better optional password hash */ | ||
544 | GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ | 566 | GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ |
567 | GLOBAL_EXTERN unsigned int secFlags; | ||
545 | GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ | 568 | GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ |
546 | GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */ | 569 | GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */ |
547 | GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ | 570 | GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ |
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index b2233ac05bd2..86239023545b 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h | |||
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | * You should have received a copy of the GNU Lesser General Public License | 17 | * You should have received a copy of the GNU Lesser General Public License |
18 | * along with this library; if not, write to the Free Software | 18 | * along with this library; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifndef _CIFSPDU_H | 22 | #ifndef _CIFSPDU_H |
@@ -24,8 +24,14 @@ | |||
24 | 24 | ||
25 | #include <net/sock.h> | 25 | #include <net/sock.h> |
26 | 26 | ||
27 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
28 | #define LANMAN_PROT 0 | ||
29 | #define CIFS_PROT 1 | ||
30 | #else | ||
27 | #define CIFS_PROT 0 | 31 | #define CIFS_PROT 0 |
28 | #define BAD_PROT CIFS_PROT+1 | 32 | #endif |
33 | #define POSIX_PROT CIFS_PROT+1 | ||
34 | #define BAD_PROT 0xFFFF | ||
29 | 35 | ||
30 | /* SMB command codes */ | 36 | /* SMB command codes */ |
31 | /* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses | 37 | /* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses |
@@ -110,7 +116,7 @@ | |||
110 | /* | 116 | /* |
111 | * Size of the session key (crypto key encrypted with the password | 117 | * Size of the session key (crypto key encrypted with the password |
112 | */ | 118 | */ |
113 | #define CIFS_SESSION_KEY_SIZE (24) | 119 | #define CIFS_SESS_KEY_SIZE (24) |
114 | 120 | ||
115 | /* | 121 | /* |
116 | * Maximum user name length | 122 | * Maximum user name length |
@@ -400,6 +406,29 @@ typedef struct negotiate_req { | |||
400 | unsigned char DialectsArray[1]; | 406 | unsigned char DialectsArray[1]; |
401 | } __attribute__((packed)) NEGOTIATE_REQ; | 407 | } __attribute__((packed)) NEGOTIATE_REQ; |
402 | 408 | ||
409 | /* Dialect index is 13 for LANMAN */ | ||
410 | |||
411 | typedef struct lanman_neg_rsp { | ||
412 | struct smb_hdr hdr; /* wct = 13 */ | ||
413 | __le16 DialectIndex; | ||
414 | __le16 SecurityMode; | ||
415 | __le16 MaxBufSize; | ||
416 | __le16 MaxMpxCount; | ||
417 | __le16 MaxNumberVcs; | ||
418 | __le16 RawMode; | ||
419 | __le32 SessionKey; | ||
420 | __le32 ServerTime; | ||
421 | __le16 ServerTimeZone; | ||
422 | __le16 EncryptionKeyLength; | ||
423 | __le16 Reserved; | ||
424 | __u16 ByteCount; | ||
425 | unsigned char EncryptionKey[1]; | ||
426 | } __attribute__((packed)) LANMAN_NEG_RSP; | ||
427 | |||
428 | #define READ_RAW_ENABLE 1 | ||
429 | #define WRITE_RAW_ENABLE 2 | ||
430 | #define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE) | ||
431 | |||
403 | typedef struct negotiate_rsp { | 432 | typedef struct negotiate_rsp { |
404 | struct smb_hdr hdr; /* wct = 17 */ | 433 | struct smb_hdr hdr; /* wct = 17 */ |
405 | __le16 DialectIndex; | 434 | __le16 DialectIndex; |
@@ -509,7 +538,7 @@ typedef union smb_com_session_setup_andx { | |||
509 | /* unsigned char * NativeOS; */ | 538 | /* unsigned char * NativeOS; */ |
510 | /* unsigned char * NativeLanMan; */ | 539 | /* unsigned char * NativeLanMan; */ |
511 | /* unsigned char * PrimaryDomain; */ | 540 | /* unsigned char * PrimaryDomain; */ |
512 | } __attribute__((packed)) resp; /* NTLM response format (with or without extended security */ | 541 | } __attribute__((packed)) resp; /* NTLM response with or without extended sec*/ |
513 | 542 | ||
514 | struct { /* request format */ | 543 | struct { /* request format */ |
515 | struct smb_hdr hdr; /* wct = 10 */ | 544 | struct smb_hdr hdr; /* wct = 10 */ |
@@ -520,8 +549,8 @@ typedef union smb_com_session_setup_andx { | |||
520 | __le16 MaxMpxCount; | 549 | __le16 MaxMpxCount; |
521 | __le16 VcNumber; | 550 | __le16 VcNumber; |
522 | __u32 SessionKey; | 551 | __u32 SessionKey; |
523 | __le16 PassswordLength; | 552 | __le16 PasswordLength; |
524 | __u32 Reserved; | 553 | __u32 Reserved; /* encrypt key len and offset */ |
525 | __le16 ByteCount; | 554 | __le16 ByteCount; |
526 | unsigned char AccountPassword[1]; /* followed by */ | 555 | unsigned char AccountPassword[1]; /* followed by */ |
527 | /* STRING AccountName */ | 556 | /* STRING AccountName */ |
@@ -543,6 +572,26 @@ typedef union smb_com_session_setup_andx { | |||
543 | } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */ | 572 | } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */ |
544 | } __attribute__((packed)) SESSION_SETUP_ANDX; | 573 | } __attribute__((packed)) SESSION_SETUP_ANDX; |
545 | 574 | ||
575 | /* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */ | ||
576 | |||
577 | struct ntlmssp2_name { | ||
578 | __le16 type; | ||
579 | __le16 length; | ||
580 | /* char name[length]; */ | ||
581 | } __attribute__((packed)); | ||
582 | |||
583 | struct ntlmv2_resp { | ||
584 | char ntlmv2_hash[CIFS_ENCPWD_SIZE]; | ||
585 | __le32 blob_signature; | ||
586 | __u32 reserved; | ||
587 | __le64 time; | ||
588 | __u64 client_chal; /* random */ | ||
589 | __u32 reserved2; | ||
590 | struct ntlmssp2_name names[1]; | ||
591 | /* array of name entries could follow ending in minimum 4 byte struct */ | ||
592 | } __attribute__((packed)); | ||
593 | |||
594 | |||
546 | #define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux" | 595 | #define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux" |
547 | 596 | ||
548 | /* Capabilities bits (for NTLM SessSetup request) */ | 597 | /* Capabilities bits (for NTLM SessSetup request) */ |
@@ -573,7 +622,9 @@ typedef struct smb_com_tconx_req { | |||
573 | } __attribute__((packed)) TCONX_REQ; | 622 | } __attribute__((packed)) TCONX_REQ; |
574 | 623 | ||
575 | typedef struct smb_com_tconx_rsp { | 624 | typedef struct smb_com_tconx_rsp { |
576 | struct smb_hdr hdr; /* wct = 3 *//* note that Win2000 has sent wct=7 in some cases on responses. Four unspecified words followed OptionalSupport */ | 625 | struct smb_hdr hdr; /* wct = 3 note that Win2000 has sent wct = 7 |
626 | in some cases on responses. Four unspecified | ||
627 | words followed OptionalSupport */ | ||
577 | __u8 AndXCommand; | 628 | __u8 AndXCommand; |
578 | __u8 AndXReserved; | 629 | __u8 AndXReserved; |
579 | __le16 AndXOffset; | 630 | __le16 AndXOffset; |
@@ -1323,6 +1374,9 @@ struct smb_t2_rsp { | |||
1323 | #define SMB_FILE_MAXIMUM_INFO 0x40d | 1374 | #define SMB_FILE_MAXIMUM_INFO 0x40d |
1324 | 1375 | ||
1325 | /* Find File infolevels */ | 1376 | /* Find File infolevels */ |
1377 | #define SMB_FIND_FILE_INFO_STANDARD 0x001 | ||
1378 | #define SMB_FIND_FILE_QUERY_EA_SIZE 0x002 | ||
1379 | #define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003 | ||
1326 | #define SMB_FIND_FILE_DIRECTORY_INFO 0x101 | 1380 | #define SMB_FIND_FILE_DIRECTORY_INFO 0x101 |
1327 | #define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102 | 1381 | #define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102 |
1328 | #define SMB_FIND_FILE_NAMES_INFO 0x103 | 1382 | #define SMB_FIND_FILE_NAMES_INFO 0x103 |
@@ -1844,13 +1898,13 @@ typedef struct { | |||
1844 | typedef struct { | 1898 | typedef struct { |
1845 | __le32 DeviceType; | 1899 | __le32 DeviceType; |
1846 | __le32 DeviceCharacteristics; | 1900 | __le32 DeviceCharacteristics; |
1847 | } __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info, level 0x104 */ | 1901 | } __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */ |
1848 | 1902 | ||
1849 | typedef struct { | 1903 | typedef struct { |
1850 | __le32 Attributes; | 1904 | __le32 Attributes; |
1851 | __le32 MaxPathNameComponentLength; | 1905 | __le32 MaxPathNameComponentLength; |
1852 | __le32 FileSystemNameLen; | 1906 | __le32 FileSystemNameLen; |
1853 | char FileSystemName[52]; /* do not really need to save this - so potentially get only subset of name */ | 1907 | char FileSystemName[52]; /* do not have to save this - get subset? */ |
1854 | } __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO; | 1908 | } __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO; |
1855 | 1909 | ||
1856 | /******************************************************************************/ | 1910 | /******************************************************************************/ |
@@ -1947,7 +2001,8 @@ typedef struct { | |||
1947 | 2001 | ||
1948 | struct file_allocation_info { | 2002 | struct file_allocation_info { |
1949 | __le64 AllocationSize; /* Note old Samba srvr rounds this up too much */ | 2003 | __le64 AllocationSize; /* Note old Samba srvr rounds this up too much */ |
1950 | } __attribute__((packed)); /* size used on disk, level 0x103 for set, 0x105 for query */ | 2004 | } __attribute__((packed)); /* size used on disk, for level 0x103 for set, |
2005 | 0x105 for query */ | ||
1951 | 2006 | ||
1952 | struct file_end_of_file_info { | 2007 | struct file_end_of_file_info { |
1953 | __le64 FileSize; /* offset to end of file */ | 2008 | __le64 FileSize; /* offset to end of file */ |
@@ -2054,7 +2109,7 @@ typedef struct { | |||
2054 | __le32 ExtFileAttributes; | 2109 | __le32 ExtFileAttributes; |
2055 | __le32 FileNameLength; | 2110 | __le32 FileNameLength; |
2056 | char FileName[1]; | 2111 | char FileName[1]; |
2057 | } __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF response data area */ | 2112 | } __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF resp data */ |
2058 | 2113 | ||
2059 | typedef struct { | 2114 | typedef struct { |
2060 | __le32 NextEntryOffset; | 2115 | __le32 NextEntryOffset; |
@@ -2069,7 +2124,7 @@ typedef struct { | |||
2069 | __le32 FileNameLength; | 2124 | __le32 FileNameLength; |
2070 | __le32 EaSize; /* length of the xattrs */ | 2125 | __le32 EaSize; /* length of the xattrs */ |
2071 | char FileName[1]; | 2126 | char FileName[1]; |
2072 | } __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 FF response data area */ | 2127 | } __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */ |
2073 | 2128 | ||
2074 | typedef struct { | 2129 | typedef struct { |
2075 | __le32 NextEntryOffset; | 2130 | __le32 NextEntryOffset; |
@@ -2086,7 +2141,7 @@ typedef struct { | |||
2086 | __le32 Reserved; | 2141 | __le32 Reserved; |
2087 | __u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/ | 2142 | __u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/ |
2088 | char FileName[1]; | 2143 | char FileName[1]; |
2089 | } __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF response data area */ | 2144 | } __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */ |
2090 | 2145 | ||
2091 | typedef struct { | 2146 | typedef struct { |
2092 | __le32 NextEntryOffset; | 2147 | __le32 NextEntryOffset; |
@@ -2104,7 +2159,22 @@ typedef struct { | |||
2104 | __u8 Reserved; | 2159 | __u8 Reserved; |
2105 | __u8 ShortName[12]; | 2160 | __u8 ShortName[12]; |
2106 | char FileName[1]; | 2161 | char FileName[1]; |
2107 | } __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FF response data area */ | 2162 | } __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */ |
2163 | |||
2164 | typedef struct { | ||
2165 | __u32 ResumeKey; | ||
2166 | __le16 CreationDate; /* SMB Date */ | ||
2167 | __le16 CreationTime; /* SMB Time */ | ||
2168 | __le16 LastAccessDate; | ||
2169 | __le16 LastAccessTime; | ||
2170 | __le16 LastWriteDate; | ||
2171 | __le16 LastWriteTime; | ||
2172 | __le32 DataSize; /* File Size (EOF) */ | ||
2173 | __le32 AllocationSize; | ||
2174 | __le16 Attributes; /* verify not u32 */ | ||
2175 | __u8 FileNameLength; | ||
2176 | char FileName[1]; | ||
2177 | } __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */ | ||
2108 | 2178 | ||
2109 | 2179 | ||
2110 | struct win_dev { | 2180 | struct win_dev { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 310ea2f0e0bf..a5ddc62d6fe6 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -64,14 +64,12 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb); | |||
64 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 64 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
65 | const struct cifsTconInfo *, int /* length of | 65 | const struct cifsTconInfo *, int /* length of |
66 | fixed section (word count) in two byte units */); | 66 | fixed section (word count) in two byte units */); |
67 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
68 | extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | 67 | extern int small_smb_init_no_tc(const int smb_cmd, const int wct, |
69 | struct cifsSesInfo *ses, | 68 | struct cifsSesInfo *ses, |
70 | void ** request_buf); | 69 | void ** request_buf); |
71 | extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, | 70 | extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, |
72 | const int stage, int * pNTLMv2_flg, | 71 | const int stage, |
73 | const struct nls_table *nls_cp); | 72 | const struct nls_table *nls_cp); |
74 | #endif | ||
75 | extern __u16 GetNextMid(struct TCP_Server_Info *server); | 73 | extern __u16 GetNextMid(struct TCP_Server_Info *server); |
76 | extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, | 74 | extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, |
77 | struct cifsTconInfo *); | 75 | struct cifsTconInfo *); |
@@ -285,8 +283,14 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, | |||
285 | extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key, | 283 | extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key, |
286 | __u32 expected_sequence_number); | 284 | __u32 expected_sequence_number); |
287 | extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass); | 285 | extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass); |
288 | extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *); | 286 | extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, |
289 | extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * ); | 287 | const struct nls_table *); |
288 | extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * ); | ||
289 | extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, | ||
290 | const struct nls_table *); | ||
291 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
292 | extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key); | ||
293 | #endif /* CIFS_WEAK_PW_HASH */ | ||
290 | extern int CIFSSMBCopy(int xid, | 294 | extern int CIFSSMBCopy(int xid, |
291 | struct cifsTconInfo *source_tcon, | 295 | struct cifsTconInfo *source_tcon, |
292 | const char *fromName, | 296 | const char *fromName, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 925881e00ff2..19678c575dfc 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -44,8 +44,11 @@ static struct { | |||
44 | int index; | 44 | int index; |
45 | char *name; | 45 | char *name; |
46 | } protocols[] = { | 46 | } protocols[] = { |
47 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
48 | {LANMAN_PROT, "\2LM1.2X002"}, | ||
49 | #endif /* weak password hashing for legacy clients */ | ||
47 | {CIFS_PROT, "\2NT LM 0.12"}, | 50 | {CIFS_PROT, "\2NT LM 0.12"}, |
48 | {CIFS_PROT, "\2POSIX 2"}, | 51 | {POSIX_PROT, "\2POSIX 2"}, |
49 | {BAD_PROT, "\2"} | 52 | {BAD_PROT, "\2"} |
50 | }; | 53 | }; |
51 | #else | 54 | #else |
@@ -53,11 +56,29 @@ static struct { | |||
53 | int index; | 56 | int index; |
54 | char *name; | 57 | char *name; |
55 | } protocols[] = { | 58 | } protocols[] = { |
59 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
60 | {LANMAN_PROT, "\2LM1.2X002"}, | ||
61 | #endif /* weak password hashing for legacy clients */ | ||
56 | {CIFS_PROT, "\2NT LM 0.12"}, | 62 | {CIFS_PROT, "\2NT LM 0.12"}, |
57 | {BAD_PROT, "\2"} | 63 | {BAD_PROT, "\2"} |
58 | }; | 64 | }; |
59 | #endif | 65 | #endif |
60 | 66 | ||
67 | /* define the number of elements in the cifs dialect array */ | ||
68 | #ifdef CONFIG_CIFS_POSIX | ||
69 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
70 | #define CIFS_NUM_PROT 3 | ||
71 | #else | ||
72 | #define CIFS_NUM_PROT 2 | ||
73 | #endif /* CIFS_WEAK_PW_HASH */ | ||
74 | #else /* not posix */ | ||
75 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
76 | #define CIFS_NUM_PROT 2 | ||
77 | #else | ||
78 | #define CIFS_NUM_PROT 1 | ||
79 | #endif /* CONFIG_CIFS_WEAK_PW_HASH */ | ||
80 | #endif /* CIFS_POSIX */ | ||
81 | |||
61 | 82 | ||
62 | /* Mark as invalid, all open files on tree connections since they | 83 | /* Mark as invalid, all open files on tree connections since they |
63 | were closed when session to server was lost */ | 84 | were closed when session to server was lost */ |
@@ -188,7 +209,6 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
188 | return rc; | 209 | return rc; |
189 | } | 210 | } |
190 | 211 | ||
191 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
192 | int | 212 | int |
193 | small_smb_init_no_tc(const int smb_command, const int wct, | 213 | small_smb_init_no_tc(const int smb_command, const int wct, |
194 | struct cifsSesInfo *ses, void **request_buf) | 214 | struct cifsSesInfo *ses, void **request_buf) |
@@ -214,7 +234,6 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
214 | 234 | ||
215 | return rc; | 235 | return rc; |
216 | } | 236 | } |
217 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
218 | 237 | ||
219 | /* If the return code is zero, this function must fill in request_buf pointer */ | 238 | /* If the return code is zero, this function must fill in request_buf pointer */ |
220 | static int | 239 | static int |
@@ -322,7 +341,8 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
322 | /* potential retries of smb operations it turns out we can determine */ | 341 | /* potential retries of smb operations it turns out we can determine */ |
323 | /* from the mid flags when the request buffer can be resent without */ | 342 | /* from the mid flags when the request buffer can be resent without */ |
324 | /* having to use a second distinct buffer for the response */ | 343 | /* having to use a second distinct buffer for the response */ |
325 | *response_buf = *request_buf; | 344 | if(response_buf) |
345 | *response_buf = *request_buf; | ||
326 | 346 | ||
327 | header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, | 347 | header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, |
328 | wct /*wct */ ); | 348 | wct /*wct */ ); |
@@ -373,8 +393,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
373 | NEGOTIATE_RSP *pSMBr; | 393 | NEGOTIATE_RSP *pSMBr; |
374 | int rc = 0; | 394 | int rc = 0; |
375 | int bytes_returned; | 395 | int bytes_returned; |
396 | int i; | ||
376 | struct TCP_Server_Info * server; | 397 | struct TCP_Server_Info * server; |
377 | u16 count; | 398 | u16 count; |
399 | unsigned int secFlags; | ||
378 | 400 | ||
379 | if(ses->server) | 401 | if(ses->server) |
380 | server = ses->server; | 402 | server = ses->server; |
@@ -386,101 +408,200 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
386 | (void **) &pSMB, (void **) &pSMBr); | 408 | (void **) &pSMB, (void **) &pSMBr); |
387 | if (rc) | 409 | if (rc) |
388 | return rc; | 410 | return rc; |
411 | |||
412 | /* if any of auth flags (ie not sign or seal) are overriden use them */ | ||
413 | if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) | ||
414 | secFlags = ses->overrideSecFlg; | ||
415 | else /* if override flags set only sign/seal OR them with global auth */ | ||
416 | secFlags = extended_security | ses->overrideSecFlg; | ||
417 | |||
418 | cFYI(1,("secFlags 0x%x",secFlags)); | ||
419 | |||
389 | pSMB->hdr.Mid = GetNextMid(server); | 420 | pSMB->hdr.Mid = GetNextMid(server); |
390 | pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; | 421 | pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; |
391 | if (extended_security) | 422 | if((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
392 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; | 423 | pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; |
393 | 424 | ||
394 | count = strlen(protocols[0].name) + 1; | 425 | count = 0; |
395 | strncpy(pSMB->DialectsArray, protocols[0].name, 30); | 426 | for(i=0;i<CIFS_NUM_PROT;i++) { |
396 | /* null guaranteed to be at end of source and target buffers anyway */ | 427 | strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); |
397 | 428 | count += strlen(protocols[i].name) + 1; | |
429 | /* null at end of source and target buffers anyway */ | ||
430 | } | ||
398 | pSMB->hdr.smb_buf_length += count; | 431 | pSMB->hdr.smb_buf_length += count; |
399 | pSMB->ByteCount = cpu_to_le16(count); | 432 | pSMB->ByteCount = cpu_to_le16(count); |
400 | 433 | ||
401 | rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, | 434 | rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, |
402 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 435 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
403 | if (rc == 0) { | 436 | if (rc != 0) |
404 | server->secMode = pSMBr->SecurityMode; | 437 | goto neg_err_exit; |
405 | if((server->secMode & SECMODE_USER) == 0) | 438 | |
406 | cFYI(1,("share mode security")); | 439 | cFYI(1,("Dialect: %d", pSMBr->DialectIndex)); |
407 | server->secType = NTLM; /* BB override default for | 440 | /* Check wct = 1 error case */ |
408 | NTLMv2 or kerberos v5 */ | 441 | if((pSMBr->hdr.WordCount < 13) || (pSMBr->DialectIndex == BAD_PROT)) { |
409 | /* one byte - no need to convert this or EncryptionKeyLen | 442 | /* core returns wct = 1, but we do not ask for core - otherwise |
410 | from little endian */ | 443 | small wct just comes when dialect index is -1 indicating we |
411 | server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); | 444 | could not negotiate a common dialect */ |
412 | /* probably no need to store and check maxvcs */ | 445 | rc = -EOPNOTSUPP; |
413 | server->maxBuf = | 446 | goto neg_err_exit; |
414 | min(le32_to_cpu(pSMBr->MaxBufferSize), | 447 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
448 | } else if((pSMBr->hdr.WordCount == 13) | ||
449 | && (pSMBr->DialectIndex == LANMAN_PROT)) { | ||
450 | struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr; | ||
451 | |||
452 | if((secFlags & CIFSSEC_MAY_LANMAN) || | ||
453 | (secFlags & CIFSSEC_MAY_PLNTXT)) | ||
454 | server->secType = LANMAN; | ||
455 | else { | ||
456 | cERROR(1, ("mount failed weak security disabled" | ||
457 | " in /proc/fs/cifs/SecurityFlags")); | ||
458 | rc = -EOPNOTSUPP; | ||
459 | goto neg_err_exit; | ||
460 | } | ||
461 | server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode); | ||
462 | server->maxReq = le16_to_cpu(rsp->MaxMpxCount); | ||
463 | server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), | ||
464 | (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); | ||
465 | GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey); | ||
466 | /* even though we do not use raw we might as well set this | ||
467 | accurately, in case we ever find a need for it */ | ||
468 | if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { | ||
469 | server->maxRw = 0xFF00; | ||
470 | server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; | ||
471 | } else { | ||
472 | server->maxRw = 0;/* we do not need to use raw anyway */ | ||
473 | server->capabilities = CAP_MPX_MODE; | ||
474 | } | ||
475 | server->timeZone = le16_to_cpu(rsp->ServerTimeZone); | ||
476 | |||
477 | /* BB get server time for time conversions and add | ||
478 | code to use it and timezone since this is not UTC */ | ||
479 | |||
480 | if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { | ||
481 | memcpy(server->cryptKey, rsp->EncryptionKey, | ||
482 | CIFS_CRYPTO_KEY_SIZE); | ||
483 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { | ||
484 | rc = -EIO; /* need cryptkey unless plain text */ | ||
485 | goto neg_err_exit; | ||
486 | } | ||
487 | |||
488 | cFYI(1,("LANMAN negotiated")); | ||
489 | /* we will not end up setting signing flags - as no signing | ||
490 | was in LANMAN and server did not return the flags on */ | ||
491 | goto signing_check; | ||
492 | #else /* weak security disabled */ | ||
493 | } else if(pSMBr->hdr.WordCount == 13) { | ||
494 | cERROR(1,("mount failed, cifs module not built " | ||
495 | "with CIFS_WEAK_PW_HASH support")); | ||
496 | rc = -EOPNOTSUPP; | ||
497 | #endif /* WEAK_PW_HASH */ | ||
498 | goto neg_err_exit; | ||
499 | } else if(pSMBr->hdr.WordCount != 17) { | ||
500 | /* unknown wct */ | ||
501 | rc = -EOPNOTSUPP; | ||
502 | goto neg_err_exit; | ||
503 | } | ||
504 | /* else wct == 17 NTLM */ | ||
505 | server->secMode = pSMBr->SecurityMode; | ||
506 | if((server->secMode & SECMODE_USER) == 0) | ||
507 | cFYI(1,("share mode security")); | ||
508 | |||
509 | if((server->secMode & SECMODE_PW_ENCRYPT) == 0) | ||
510 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
511 | if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) | ||
512 | #endif /* CIFS_WEAK_PW_HASH */ | ||
513 | cERROR(1,("Server requests plain text password" | ||
514 | " but client support disabled")); | ||
515 | |||
516 | if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) | ||
517 | server->secType = NTLMv2; | ||
518 | else if(secFlags & CIFSSEC_MAY_NTLM) | ||
519 | server->secType = NTLM; | ||
520 | else if(secFlags & CIFSSEC_MAY_NTLMV2) | ||
521 | server->secType = NTLMv2; | ||
522 | /* else krb5 ... any others ... */ | ||
523 | |||
524 | /* one byte, so no need to convert this or EncryptionKeyLen from | ||
525 | little endian */ | ||
526 | server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); | ||
527 | /* probably no need to store and check maxvcs */ | ||
528 | server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), | ||
415 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); | 529 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); |
416 | server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); | 530 | server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); |
417 | cFYI(0, ("Max buf = %d", ses->server->maxBuf)); | 531 | cFYI(0, ("Max buf = %d", ses->server->maxBuf)); |
418 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); | 532 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); |
419 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); | 533 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); |
420 | server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone); | 534 | server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone); |
421 | /* BB with UTC do we ever need to be using srvr timezone? */ | 535 | if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { |
422 | if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { | 536 | memcpy(server->cryptKey, pSMBr->u.EncryptionKey, |
423 | memcpy(server->cryptKey, pSMBr->u.EncryptionKey, | 537 | CIFS_CRYPTO_KEY_SIZE); |
424 | CIFS_CRYPTO_KEY_SIZE); | 538 | } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) |
425 | } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) | 539 | && (pSMBr->EncryptionKeyLength == 0)) { |
426 | && (pSMBr->EncryptionKeyLength == 0)) { | 540 | /* decode security blob */ |
427 | /* decode security blob */ | 541 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { |
428 | } else | 542 | rc = -EIO; /* no crypt key only if plain text pwd */ |
429 | rc = -EIO; | 543 | goto neg_err_exit; |
544 | } | ||
430 | 545 | ||
431 | /* BB might be helpful to save off the domain of server here */ | 546 | /* BB might be helpful to save off the domain of server here */ |
432 | 547 | ||
433 | if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && | 548 | if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && |
434 | (server->capabilities & CAP_EXTENDED_SECURITY)) { | 549 | (server->capabilities & CAP_EXTENDED_SECURITY)) { |
435 | count = pSMBr->ByteCount; | 550 | count = pSMBr->ByteCount; |
436 | if (count < 16) | 551 | if (count < 16) |
437 | rc = -EIO; | 552 | rc = -EIO; |
438 | else if (count == 16) { | 553 | else if (count == 16) { |
439 | server->secType = RawNTLMSSP; | 554 | server->secType = RawNTLMSSP; |
440 | if (server->socketUseCount.counter > 1) { | 555 | if (server->socketUseCount.counter > 1) { |
441 | if (memcmp | 556 | if (memcmp(server->server_GUID, |
442 | (server->server_GUID, | 557 | pSMBr->u.extended_response. |
443 | pSMBr->u.extended_response. | 558 | GUID, 16) != 0) { |
444 | GUID, 16) != 0) { | 559 | cFYI(1, ("server UID changed")); |
445 | cFYI(1, ("server UID changed")); | ||
446 | memcpy(server-> | ||
447 | server_GUID, | ||
448 | pSMBr->u. | ||
449 | extended_response. | ||
450 | GUID, 16); | ||
451 | } | ||
452 | } else | ||
453 | memcpy(server->server_GUID, | 560 | memcpy(server->server_GUID, |
454 | pSMBr->u.extended_response. | 561 | pSMBr->u.extended_response.GUID, |
455 | GUID, 16); | 562 | 16); |
456 | } else { | ||
457 | rc = decode_negTokenInit(pSMBr->u. | ||
458 | extended_response. | ||
459 | SecurityBlob, | ||
460 | count - 16, | ||
461 | &server->secType); | ||
462 | if(rc == 1) { | ||
463 | /* BB Need to fill struct for sessetup here */ | ||
464 | rc = -EOPNOTSUPP; | ||
465 | } else { | ||
466 | rc = -EINVAL; | ||
467 | } | 563 | } |
564 | } else | ||
565 | memcpy(server->server_GUID, | ||
566 | pSMBr->u.extended_response.GUID, 16); | ||
567 | } else { | ||
568 | rc = decode_negTokenInit(pSMBr->u.extended_response. | ||
569 | SecurityBlob, | ||
570 | count - 16, | ||
571 | &server->secType); | ||
572 | if(rc == 1) { | ||
573 | /* BB Need to fill struct for sessetup here */ | ||
574 | rc = -EOPNOTSUPP; | ||
575 | } else { | ||
576 | rc = -EINVAL; | ||
468 | } | 577 | } |
469 | } else | ||
470 | server->capabilities &= ~CAP_EXTENDED_SECURITY; | ||
471 | if(sign_CIFS_PDUs == FALSE) { | ||
472 | if(server->secMode & SECMODE_SIGN_REQUIRED) | ||
473 | cERROR(1, | ||
474 | ("Server requires /proc/fs/cifs/PacketSigningEnabled")); | ||
475 | server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | ||
476 | } else if(sign_CIFS_PDUs == 1) { | ||
477 | if((server->secMode & SECMODE_SIGN_REQUIRED) == 0) | ||
478 | server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | ||
479 | } | 578 | } |
480 | 579 | } else | |
580 | server->capabilities &= ~CAP_EXTENDED_SECURITY; | ||
581 | |||
582 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
583 | signing_check: | ||
584 | #endif | ||
585 | if(sign_CIFS_PDUs == FALSE) { | ||
586 | if(server->secMode & SECMODE_SIGN_REQUIRED) | ||
587 | cERROR(1,("Server requires " | ||
588 | "/proc/fs/cifs/PacketSigningEnabled to be on")); | ||
589 | server->secMode &= | ||
590 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | ||
591 | } else if(sign_CIFS_PDUs == 1) { | ||
592 | if((server->secMode & SECMODE_SIGN_REQUIRED) == 0) | ||
593 | server->secMode &= | ||
594 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | ||
595 | } else if(sign_CIFS_PDUs == 2) { | ||
596 | if((server->secMode & | ||
597 | (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { | ||
598 | cERROR(1,("signing required but server lacks support")); | ||
599 | } | ||
481 | } | 600 | } |
482 | 601 | neg_err_exit: | |
483 | cifs_buf_release(pSMB); | 602 | cifs_buf_release(pSMB); |
603 | |||
604 | cFYI(1,("negprot rc %d",rc)); | ||
484 | return rc; | 605 | return rc; |
485 | } | 606 | } |
486 | 607 | ||
@@ -2239,7 +2360,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | |||
2239 | } | 2360 | } |
2240 | symlinkinfo[buflen] = 0; /* just in case so the caller | 2361 | symlinkinfo[buflen] = 0; /* just in case so the caller |
2241 | does not go off the end of the buffer */ | 2362 | does not go off the end of the buffer */ |
2242 | cFYI(1,("readlink result - %s ",symlinkinfo)); | 2363 | cFYI(1,("readlink result - %s",symlinkinfo)); |
2243 | } | 2364 | } |
2244 | } | 2365 | } |
2245 | qreparse_out: | 2366 | qreparse_out: |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index bae1479318d1..876eb9ef85fe 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -49,8 +49,6 @@ | |||
49 | 49 | ||
50 | static DECLARE_COMPLETION(cifsd_complete); | 50 | static DECLARE_COMPLETION(cifsd_complete); |
51 | 51 | ||
52 | extern void SMBencrypt(unsigned char *passwd, unsigned char *c8, | ||
53 | unsigned char *p24); | ||
54 | extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, | 52 | extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, |
55 | unsigned char *p24); | 53 | unsigned char *p24); |
56 | 54 | ||
@@ -70,6 +68,7 @@ struct smb_vol { | |||
70 | gid_t linux_gid; | 68 | gid_t linux_gid; |
71 | mode_t file_mode; | 69 | mode_t file_mode; |
72 | mode_t dir_mode; | 70 | mode_t dir_mode; |
71 | unsigned secFlg; | ||
73 | unsigned rw:1; | 72 | unsigned rw:1; |
74 | unsigned retry:1; | 73 | unsigned retry:1; |
75 | unsigned intr:1; | 74 | unsigned intr:1; |
@@ -83,12 +82,7 @@ struct smb_vol { | |||
83 | unsigned remap:1; /* set to remap seven reserved chars in filenames */ | 82 | unsigned remap:1; /* set to remap seven reserved chars in filenames */ |
84 | unsigned posix_paths:1; /* unset to not ask for posix pathnames. */ | 83 | unsigned posix_paths:1; /* unset to not ask for posix pathnames. */ |
85 | unsigned sfu_emul:1; | 84 | unsigned sfu_emul:1; |
86 | unsigned krb5:1; | ||
87 | unsigned ntlm:1; | ||
88 | unsigned ntlmv2:1; | ||
89 | unsigned nullauth:1; /* attempt to authenticate with null user */ | 85 | unsigned nullauth:1; /* attempt to authenticate with null user */ |
90 | unsigned sign:1; | ||
91 | unsigned seal:1; /* encrypt */ | ||
92 | unsigned nocase; /* request case insensitive filenames */ | 86 | unsigned nocase; /* request case insensitive filenames */ |
93 | unsigned nobrl; /* disable sending byte range locks to srv */ | 87 | unsigned nobrl; /* disable sending byte range locks to srv */ |
94 | unsigned int rsize; | 88 | unsigned int rsize; |
@@ -369,21 +363,21 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
369 | continue; | 363 | continue; |
370 | if (bigbuf == NULL) { | 364 | if (bigbuf == NULL) { |
371 | bigbuf = cifs_buf_get(); | 365 | bigbuf = cifs_buf_get(); |
372 | if(bigbuf == NULL) { | 366 | if (!bigbuf) { |
373 | cERROR(1,("No memory for large SMB response")); | 367 | cERROR(1, ("No memory for large SMB response")); |
374 | msleep(3000); | 368 | msleep(3000); |
375 | /* retry will check if exiting */ | 369 | /* retry will check if exiting */ |
376 | continue; | 370 | continue; |
377 | } | 371 | } |
378 | } else if(isLargeBuf) { | 372 | } else if (isLargeBuf) { |
379 | /* we are reusing a dirtry large buf, clear its start */ | 373 | /* we are reusing a dirty large buf, clear its start */ |
380 | memset(bigbuf, 0, sizeof (struct smb_hdr)); | 374 | memset(bigbuf, 0, sizeof (struct smb_hdr)); |
381 | } | 375 | } |
382 | 376 | ||
383 | if (smallbuf == NULL) { | 377 | if (smallbuf == NULL) { |
384 | smallbuf = cifs_small_buf_get(); | 378 | smallbuf = cifs_small_buf_get(); |
385 | if(smallbuf == NULL) { | 379 | if (!smallbuf) { |
386 | cERROR(1,("No memory for SMB response")); | 380 | cERROR(1, ("No memory for SMB response")); |
387 | msleep(1000); | 381 | msleep(1000); |
388 | /* retry will check if exiting */ | 382 | /* retry will check if exiting */ |
389 | continue; | 383 | continue; |
@@ -403,12 +397,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
403 | kernel_recvmsg(csocket, &smb_msg, | 397 | kernel_recvmsg(csocket, &smb_msg, |
404 | &iov, 1, 4, 0 /* BB see socket.h flags */); | 398 | &iov, 1, 4, 0 /* BB see socket.h flags */); |
405 | 399 | ||
406 | if(server->tcpStatus == CifsExiting) { | 400 | if (server->tcpStatus == CifsExiting) { |
407 | break; | 401 | break; |
408 | } else if (server->tcpStatus == CifsNeedReconnect) { | 402 | } else if (server->tcpStatus == CifsNeedReconnect) { |
409 | cFYI(1,("Reconnect after server stopped responding")); | 403 | cFYI(1, ("Reconnect after server stopped responding")); |
410 | cifs_reconnect(server); | 404 | cifs_reconnect(server); |
411 | cFYI(1,("call to reconnect done")); | 405 | cFYI(1, ("call to reconnect done")); |
412 | csocket = server->ssocket; | 406 | csocket = server->ssocket; |
413 | continue; | 407 | continue; |
414 | } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { | 408 | } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { |
@@ -417,15 +411,15 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
417 | tcpStatus CifsNeedReconnect if server hung */ | 411 | tcpStatus CifsNeedReconnect if server hung */ |
418 | continue; | 412 | continue; |
419 | } else if (length <= 0) { | 413 | } else if (length <= 0) { |
420 | if(server->tcpStatus == CifsNew) { | 414 | if (server->tcpStatus == CifsNew) { |
421 | cFYI(1,("tcp session abend after SMBnegprot")); | 415 | cFYI(1, ("tcp session abend after SMBnegprot")); |
422 | /* some servers kill the TCP session rather than | 416 | /* some servers kill the TCP session rather than |
423 | returning an SMB negprot error, in which | 417 | returning an SMB negprot error, in which |
424 | case reconnecting here is not going to help, | 418 | case reconnecting here is not going to help, |
425 | and so simply return error to mount */ | 419 | and so simply return error to mount */ |
426 | break; | 420 | break; |
427 | } | 421 | } |
428 | if(length == -EINTR) { | 422 | if (!try_to_freeze() && (length == -EINTR)) { |
429 | cFYI(1,("cifsd thread killed")); | 423 | cFYI(1,("cifsd thread killed")); |
430 | break; | 424 | break; |
431 | } | 425 | } |
@@ -585,9 +579,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
585 | /* merge response - fix up 1st*/ | 579 | /* merge response - fix up 1st*/ |
586 | if(coalesce_t2(smb_buffer, | 580 | if(coalesce_t2(smb_buffer, |
587 | mid_entry->resp_buf)) { | 581 | mid_entry->resp_buf)) { |
582 | mid_entry->multiRsp = 1; | ||
588 | break; | 583 | break; |
589 | } else { | 584 | } else { |
590 | /* all parts received */ | 585 | /* all parts received */ |
586 | mid_entry->multiEnd = 1; | ||
591 | goto multi_t2_fnd; | 587 | goto multi_t2_fnd; |
592 | } | 588 | } |
593 | } else { | 589 | } else { |
@@ -632,9 +628,14 @@ multi_t2_fnd: | |||
632 | wake_up_process(task_to_wake); | 628 | wake_up_process(task_to_wake); |
633 | } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE) | 629 | } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE) |
634 | && (isMultiRsp == FALSE)) { | 630 | && (isMultiRsp == FALSE)) { |
635 | cERROR(1, ("No task to wake, unknown frame rcvd!")); | 631 | cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter)); |
636 | cifs_dump_mem("Received Data is: ",(char *)smb_buffer, | 632 | cifs_dump_mem("Received Data is: ",(char *)smb_buffer, |
637 | sizeof(struct smb_hdr)); | 633 | sizeof(struct smb_hdr)); |
634 | #ifdef CONFIG_CIFS_DEBUG2 | ||
635 | cifs_dump_detail(smb_buffer); | ||
636 | cifs_dump_mids(server); | ||
637 | #endif /* CIFS_DEBUG2 */ | ||
638 | |||
638 | } | 639 | } |
639 | } /* end while !EXITING */ | 640 | } /* end while !EXITING */ |
640 | 641 | ||
@@ -784,7 +785,6 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol) | |||
784 | 785 | ||
785 | /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ | 786 | /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ |
786 | vol->rw = TRUE; | 787 | vol->rw = TRUE; |
787 | vol->ntlm = TRUE; | ||
788 | /* default is always to request posix paths. */ | 788 | /* default is always to request posix paths. */ |
789 | vol->posix_paths = 1; | 789 | vol->posix_paths = 1; |
790 | 790 | ||
@@ -915,30 +915,35 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol) | |||
915 | cERROR(1,("no security value specified")); | 915 | cERROR(1,("no security value specified")); |
916 | continue; | 916 | continue; |
917 | } else if (strnicmp(value, "krb5i", 5) == 0) { | 917 | } else if (strnicmp(value, "krb5i", 5) == 0) { |
918 | vol->sign = 1; | 918 | vol->secFlg |= CIFSSEC_MAY_KRB5 | |
919 | vol->krb5 = 1; | 919 | CIFSSEC_MUST_SIGN; |
920 | } else if (strnicmp(value, "krb5p", 5) == 0) { | 920 | } else if (strnicmp(value, "krb5p", 5) == 0) { |
921 | /* vol->seal = 1; | 921 | /* vol->secFlg |= CIFSSEC_MUST_SEAL | |
922 | vol->krb5 = 1; */ | 922 | CIFSSEC_MAY_KRB5; */ |
923 | cERROR(1,("Krb5 cifs privacy not supported")); | 923 | cERROR(1,("Krb5 cifs privacy not supported")); |
924 | return 1; | 924 | return 1; |
925 | } else if (strnicmp(value, "krb5", 4) == 0) { | 925 | } else if (strnicmp(value, "krb5", 4) == 0) { |
926 | vol->krb5 = 1; | 926 | vol->secFlg |= CIFSSEC_MAY_KRB5; |
927 | } else if (strnicmp(value, "ntlmv2i", 7) == 0) { | 927 | } else if (strnicmp(value, "ntlmv2i", 7) == 0) { |
928 | vol->ntlmv2 = 1; | 928 | vol->secFlg |= CIFSSEC_MAY_NTLMV2 | |
929 | vol->sign = 1; | 929 | CIFSSEC_MUST_SIGN; |
930 | } else if (strnicmp(value, "ntlmv2", 6) == 0) { | 930 | } else if (strnicmp(value, "ntlmv2", 6) == 0) { |
931 | vol->ntlmv2 = 1; | 931 | vol->secFlg |= CIFSSEC_MAY_NTLMV2; |
932 | } else if (strnicmp(value, "ntlmi", 5) == 0) { | 932 | } else if (strnicmp(value, "ntlmi", 5) == 0) { |
933 | vol->ntlm = 1; | 933 | vol->secFlg |= CIFSSEC_MAY_NTLM | |
934 | vol->sign = 1; | 934 | CIFSSEC_MUST_SIGN; |
935 | } else if (strnicmp(value, "ntlm", 4) == 0) { | 935 | } else if (strnicmp(value, "ntlm", 4) == 0) { |
936 | /* ntlm is default so can be turned off too */ | 936 | /* ntlm is default so can be turned off too */ |
937 | vol->ntlm = 1; | 937 | vol->secFlg |= CIFSSEC_MAY_NTLM; |
938 | } else if (strnicmp(value, "nontlm", 6) == 0) { | 938 | } else if (strnicmp(value, "nontlm", 6) == 0) { |
939 | vol->ntlm = 0; | 939 | /* BB is there a better way to do this? */ |
940 | vol->secFlg |= CIFSSEC_MAY_NTLMV2; | ||
941 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
942 | } else if (strnicmp(value, "lanman", 6) == 0) { | ||
943 | vol->secFlg |= CIFSSEC_MAY_LANMAN; | ||
944 | #endif | ||
940 | } else if (strnicmp(value, "none", 4) == 0) { | 945 | } else if (strnicmp(value, "none", 4) == 0) { |
941 | vol->nullauth = 1; | 946 | vol->nullauth = 1; |
942 | } else { | 947 | } else { |
943 | cERROR(1,("bad security option: %s", value)); | 948 | cERROR(1,("bad security option: %s", value)); |
944 | return 1; | 949 | return 1; |
@@ -976,7 +981,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol) | |||
976 | } | 981 | } |
977 | /* BB are there cases in which a comma can be valid in | 982 | /* BB are there cases in which a comma can be valid in |
978 | a domain name and need special handling? */ | 983 | a domain name and need special handling? */ |
979 | if (strnlen(value, 65) < 65) { | 984 | if (strnlen(value, 256) < 256) { |
980 | vol->domainname = value; | 985 | vol->domainname = value; |
981 | cFYI(1, ("Domain name set")); | 986 | cFYI(1, ("Domain name set")); |
982 | } else { | 987 | } else { |
@@ -1168,6 +1173,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol) | |||
1168 | vol->no_psx_acl = 0; | 1173 | vol->no_psx_acl = 0; |
1169 | } else if (strnicmp(data, "noacl",5) == 0) { | 1174 | } else if (strnicmp(data, "noacl",5) == 0) { |
1170 | vol->no_psx_acl = 1; | 1175 | vol->no_psx_acl = 1; |
1176 | } else if (strnicmp(data, "sign",4) == 0) { | ||
1177 | vol->secFlg |= CIFSSEC_MUST_SIGN; | ||
1178 | /* } else if (strnicmp(data, "seal",4) == 0) { | ||
1179 | vol->secFlg |= CIFSSEC_MUST_SEAL; */ | ||
1171 | } else if (strnicmp(data, "direct",6) == 0) { | 1180 | } else if (strnicmp(data, "direct",6) == 0) { |
1172 | vol->direct_io = 1; | 1181 | vol->direct_io = 1; |
1173 | } else if (strnicmp(data, "forcedirectio",13) == 0) { | 1182 | } else if (strnicmp(data, "forcedirectio",13) == 0) { |
@@ -1762,11 +1771,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1762 | if (volume_info.username) | 1771 | if (volume_info.username) |
1763 | strncpy(pSesInfo->userName, | 1772 | strncpy(pSesInfo->userName, |
1764 | volume_info.username,MAX_USERNAME_SIZE); | 1773 | volume_info.username,MAX_USERNAME_SIZE); |
1765 | if (volume_info.domainname) | 1774 | if (volume_info.domainname) { |
1766 | strncpy(pSesInfo->domainName, | 1775 | int len = strlen(volume_info.domainname); |
1767 | volume_info.domainname,MAX_USERNAME_SIZE); | 1776 | pSesInfo->domainName = |
1777 | kmalloc(len + 1, GFP_KERNEL); | ||
1778 | if(pSesInfo->domainName) | ||
1779 | strcpy(pSesInfo->domainName, | ||
1780 | volume_info.domainname); | ||
1781 | } | ||
1768 | pSesInfo->linux_uid = volume_info.linux_uid; | 1782 | pSesInfo->linux_uid = volume_info.linux_uid; |
1783 | pSesInfo->overrideSecFlg = volume_info.secFlg; | ||
1769 | down(&pSesInfo->sesSem); | 1784 | down(&pSesInfo->sesSem); |
1785 | /* BB FIXME need to pass vol->secFlgs BB */ | ||
1770 | rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls); | 1786 | rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls); |
1771 | up(&pSesInfo->sesSem); | 1787 | up(&pSesInfo->sesSem); |
1772 | if(!rc) | 1788 | if(!rc) |
@@ -1980,7 +1996,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1980 | 1996 | ||
1981 | static int | 1997 | static int |
1982 | CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | 1998 | CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, |
1983 | char session_key[CIFS_SESSION_KEY_SIZE], | 1999 | char session_key[CIFS_SESS_KEY_SIZE], |
1984 | const struct nls_table *nls_codepage) | 2000 | const struct nls_table *nls_codepage) |
1985 | { | 2001 | { |
1986 | struct smb_hdr *smb_buffer; | 2002 | struct smb_hdr *smb_buffer; |
@@ -2038,15 +2054,15 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2038 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | 2054 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); |
2039 | 2055 | ||
2040 | pSMB->req_no_secext.CaseInsensitivePasswordLength = | 2056 | pSMB->req_no_secext.CaseInsensitivePasswordLength = |
2041 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | 2057 | cpu_to_le16(CIFS_SESS_KEY_SIZE); |
2042 | 2058 | ||
2043 | pSMB->req_no_secext.CaseSensitivePasswordLength = | 2059 | pSMB->req_no_secext.CaseSensitivePasswordLength = |
2044 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | 2060 | cpu_to_le16(CIFS_SESS_KEY_SIZE); |
2045 | bcc_ptr = pByteArea(smb_buffer); | 2061 | bcc_ptr = pByteArea(smb_buffer); |
2046 | memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE); | 2062 | memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE); |
2047 | bcc_ptr += CIFS_SESSION_KEY_SIZE; | 2063 | bcc_ptr += CIFS_SESS_KEY_SIZE; |
2048 | memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE); | 2064 | memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE); |
2049 | bcc_ptr += CIFS_SESSION_KEY_SIZE; | 2065 | bcc_ptr += CIFS_SESS_KEY_SIZE; |
2050 | 2066 | ||
2051 | if (ses->capabilities & CAP_UNICODE) { | 2067 | if (ses->capabilities & CAP_UNICODE) { |
2052 | if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */ | 2068 | if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */ |
@@ -2054,7 +2070,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2054 | bcc_ptr++; | 2070 | bcc_ptr++; |
2055 | } | 2071 | } |
2056 | if(user == NULL) | 2072 | if(user == NULL) |
2057 | bytes_returned = 0; /* skill null user */ | 2073 | bytes_returned = 0; /* skip null user */ |
2058 | else | 2074 | else |
2059 | bytes_returned = | 2075 | bytes_returned = |
2060 | cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, | 2076 | cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, |
@@ -2162,8 +2178,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2162 | if (remaining_words > 0) { | 2178 | if (remaining_words > 0) { |
2163 | len = UniStrnlen((wchar_t *)bcc_ptr, | 2179 | len = UniStrnlen((wchar_t *)bcc_ptr, |
2164 | remaining_words-1); | 2180 | remaining_words-1); |
2165 | if(ses->serverNOS) | 2181 | kfree(ses->serverNOS); |
2166 | kfree(ses->serverNOS); | ||
2167 | ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL); | 2182 | ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL); |
2168 | if(ses->serverNOS == NULL) | 2183 | if(ses->serverNOS == NULL) |
2169 | goto sesssetup_nomem; | 2184 | goto sesssetup_nomem; |
@@ -2203,12 +2218,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2203 | /* if these kcallocs fail not much we | 2218 | /* if these kcallocs fail not much we |
2204 | can do, but better to not fail the | 2219 | can do, but better to not fail the |
2205 | sesssetup itself */ | 2220 | sesssetup itself */ |
2206 | if(ses->serverDomain) | 2221 | kfree(ses->serverDomain); |
2207 | kfree(ses->serverDomain); | ||
2208 | ses->serverDomain = | 2222 | ses->serverDomain = |
2209 | kzalloc(2, GFP_KERNEL); | 2223 | kzalloc(2, GFP_KERNEL); |
2210 | if(ses->serverNOS) | 2224 | kfree(ses->serverNOS); |
2211 | kfree(ses->serverNOS); | ||
2212 | ses->serverNOS = | 2225 | ses->serverNOS = |
2213 | kzalloc(2, GFP_KERNEL); | 2226 | kzalloc(2, GFP_KERNEL); |
2214 | } | 2227 | } |
@@ -2217,8 +2230,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2217 | if (((long) bcc_ptr + len) - (long) | 2230 | if (((long) bcc_ptr + len) - (long) |
2218 | pByteArea(smb_buffer_response) | 2231 | pByteArea(smb_buffer_response) |
2219 | <= BCC(smb_buffer_response)) { | 2232 | <= BCC(smb_buffer_response)) { |
2220 | if(ses->serverOS) | 2233 | kfree(ses->serverOS); |
2221 | kfree(ses->serverOS); | ||
2222 | ses->serverOS = kzalloc(len + 1,GFP_KERNEL); | 2234 | ses->serverOS = kzalloc(len + 1,GFP_KERNEL); |
2223 | if(ses->serverOS == NULL) | 2235 | if(ses->serverOS == NULL) |
2224 | goto sesssetup_nomem; | 2236 | goto sesssetup_nomem; |
@@ -2229,8 +2241,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2229 | bcc_ptr++; | 2241 | bcc_ptr++; |
2230 | 2242 | ||
2231 | len = strnlen(bcc_ptr, 1024); | 2243 | len = strnlen(bcc_ptr, 1024); |
2232 | if(ses->serverNOS) | 2244 | kfree(ses->serverNOS); |
2233 | kfree(ses->serverNOS); | ||
2234 | ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); | 2245 | ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); |
2235 | if(ses->serverNOS == NULL) | 2246 | if(ses->serverNOS == NULL) |
2236 | goto sesssetup_nomem; | 2247 | goto sesssetup_nomem; |
@@ -2274,292 +2285,6 @@ sesssetup_nomem: /* do not return an error on nomem for the info strings, | |||
2274 | } | 2285 | } |
2275 | 2286 | ||
2276 | static int | 2287 | static int |
2277 | CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses, | ||
2278 | char *SecurityBlob,int SecurityBlobLength, | ||
2279 | const struct nls_table *nls_codepage) | ||
2280 | { | ||
2281 | struct smb_hdr *smb_buffer; | ||
2282 | struct smb_hdr *smb_buffer_response; | ||
2283 | SESSION_SETUP_ANDX *pSMB; | ||
2284 | SESSION_SETUP_ANDX *pSMBr; | ||
2285 | char *bcc_ptr; | ||
2286 | char *user; | ||
2287 | char *domain; | ||
2288 | int rc = 0; | ||
2289 | int remaining_words = 0; | ||
2290 | int bytes_returned = 0; | ||
2291 | int len; | ||
2292 | __u32 capabilities; | ||
2293 | __u16 count; | ||
2294 | |||
2295 | cFYI(1, ("In spnego sesssetup ")); | ||
2296 | if(ses == NULL) | ||
2297 | return -EINVAL; | ||
2298 | user = ses->userName; | ||
2299 | domain = ses->domainName; | ||
2300 | |||
2301 | smb_buffer = cifs_buf_get(); | ||
2302 | if (smb_buffer == NULL) { | ||
2303 | return -ENOMEM; | ||
2304 | } | ||
2305 | smb_buffer_response = smb_buffer; | ||
2306 | pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer; | ||
2307 | |||
2308 | /* send SMBsessionSetup here */ | ||
2309 | header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX, | ||
2310 | NULL /* no tCon exists yet */ , 12 /* wct */ ); | ||
2311 | |||
2312 | smb_buffer->Mid = GetNextMid(ses->server); | ||
2313 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | ||
2314 | pSMB->req.AndXCommand = 0xFF; | ||
2315 | if(ses->server->maxBuf > 64*1024) | ||
2316 | ses->server->maxBuf = (64*1023); | ||
2317 | pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); | ||
2318 | pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); | ||
2319 | |||
2320 | if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | ||
2321 | smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | ||
2322 | |||
2323 | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | | ||
2324 | CAP_EXTENDED_SECURITY; | ||
2325 | if (ses->capabilities & CAP_UNICODE) { | ||
2326 | smb_buffer->Flags2 |= SMBFLG2_UNICODE; | ||
2327 | capabilities |= CAP_UNICODE; | ||
2328 | } | ||
2329 | if (ses->capabilities & CAP_STATUS32) { | ||
2330 | smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; | ||
2331 | capabilities |= CAP_STATUS32; | ||
2332 | } | ||
2333 | if (ses->capabilities & CAP_DFS) { | ||
2334 | smb_buffer->Flags2 |= SMBFLG2_DFS; | ||
2335 | capabilities |= CAP_DFS; | ||
2336 | } | ||
2337 | pSMB->req.Capabilities = cpu_to_le32(capabilities); | ||
2338 | |||
2339 | pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength); | ||
2340 | bcc_ptr = pByteArea(smb_buffer); | ||
2341 | memcpy(bcc_ptr, SecurityBlob, SecurityBlobLength); | ||
2342 | bcc_ptr += SecurityBlobLength; | ||
2343 | |||
2344 | if (ses->capabilities & CAP_UNICODE) { | ||
2345 | if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode strings */ | ||
2346 | *bcc_ptr = 0; | ||
2347 | bcc_ptr++; | ||
2348 | } | ||
2349 | bytes_returned = | ||
2350 | cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, nls_codepage); | ||
2351 | bcc_ptr += 2 * bytes_returned; /* convert num of 16 bit words to bytes */ | ||
2352 | bcc_ptr += 2; /* trailing null */ | ||
2353 | if (domain == NULL) | ||
2354 | bytes_returned = | ||
2355 | cifs_strtoUCS((__le16 *) bcc_ptr, | ||
2356 | "CIFS_LINUX_DOM", 32, nls_codepage); | ||
2357 | else | ||
2358 | bytes_returned = | ||
2359 | cifs_strtoUCS((__le16 *) bcc_ptr, domain, 64, | ||
2360 | nls_codepage); | ||
2361 | bcc_ptr += 2 * bytes_returned; | ||
2362 | bcc_ptr += 2; | ||
2363 | bytes_returned = | ||
2364 | cifs_strtoUCS((__le16 *) bcc_ptr, "Linux version ", | ||
2365 | 32, nls_codepage); | ||
2366 | bcc_ptr += 2 * bytes_returned; | ||
2367 | bytes_returned = | ||
2368 | cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release, 32, | ||
2369 | nls_codepage); | ||
2370 | bcc_ptr += 2 * bytes_returned; | ||
2371 | bcc_ptr += 2; | ||
2372 | bytes_returned = | ||
2373 | cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, | ||
2374 | 64, nls_codepage); | ||
2375 | bcc_ptr += 2 * bytes_returned; | ||
2376 | bcc_ptr += 2; | ||
2377 | } else { | ||
2378 | strncpy(bcc_ptr, user, 200); | ||
2379 | bcc_ptr += strnlen(user, 200); | ||
2380 | *bcc_ptr = 0; | ||
2381 | bcc_ptr++; | ||
2382 | if (domain == NULL) { | ||
2383 | strcpy(bcc_ptr, "CIFS_LINUX_DOM"); | ||
2384 | bcc_ptr += strlen("CIFS_LINUX_DOM") + 1; | ||
2385 | } else { | ||
2386 | strncpy(bcc_ptr, domain, 64); | ||
2387 | bcc_ptr += strnlen(domain, 64); | ||
2388 | *bcc_ptr = 0; | ||
2389 | bcc_ptr++; | ||
2390 | } | ||
2391 | strcpy(bcc_ptr, "Linux version "); | ||
2392 | bcc_ptr += strlen("Linux version "); | ||
2393 | strcpy(bcc_ptr, system_utsname.release); | ||
2394 | bcc_ptr += strlen(system_utsname.release) + 1; | ||
2395 | strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); | ||
2396 | bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; | ||
2397 | } | ||
2398 | count = (long) bcc_ptr - (long) pByteArea(smb_buffer); | ||
2399 | smb_buffer->smb_buf_length += count; | ||
2400 | pSMB->req.ByteCount = cpu_to_le16(count); | ||
2401 | |||
2402 | rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, | ||
2403 | &bytes_returned, 1); | ||
2404 | if (rc) { | ||
2405 | /* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */ | ||
2406 | } else if ((smb_buffer_response->WordCount == 3) | ||
2407 | || (smb_buffer_response->WordCount == 4)) { | ||
2408 | __u16 action = le16_to_cpu(pSMBr->resp.Action); | ||
2409 | __u16 blob_len = | ||
2410 | le16_to_cpu(pSMBr->resp.SecurityBlobLength); | ||
2411 | if (action & GUEST_LOGIN) | ||
2412 | cFYI(1, (" Guest login")); /* BB do we want to set anything in SesInfo struct ? */ | ||
2413 | if (ses) { | ||
2414 | ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (le) */ | ||
2415 | cFYI(1, ("UID = %d ", ses->Suid)); | ||
2416 | bcc_ptr = pByteArea(smb_buffer_response); /* response can have either 3 or 4 word count - Samba sends 3 */ | ||
2417 | |||
2418 | /* BB Fix below to make endian neutral !! */ | ||
2419 | |||
2420 | if ((pSMBr->resp.hdr.WordCount == 3) | ||
2421 | || ((pSMBr->resp.hdr.WordCount == 4) | ||
2422 | && (blob_len < | ||
2423 | pSMBr->resp.ByteCount))) { | ||
2424 | if (pSMBr->resp.hdr.WordCount == 4) { | ||
2425 | bcc_ptr += | ||
2426 | blob_len; | ||
2427 | cFYI(1, | ||
2428 | ("Security Blob Length %d ", | ||
2429 | blob_len)); | ||
2430 | } | ||
2431 | |||
2432 | if (smb_buffer->Flags2 & SMBFLG2_UNICODE) { | ||
2433 | if ((long) (bcc_ptr) % 2) { | ||
2434 | remaining_words = | ||
2435 | (BCC(smb_buffer_response) | ||
2436 | - 1) / 2; | ||
2437 | bcc_ptr++; /* Unicode strings must be word aligned */ | ||
2438 | } else { | ||
2439 | remaining_words = | ||
2440 | BCC | ||
2441 | (smb_buffer_response) / 2; | ||
2442 | } | ||
2443 | len = | ||
2444 | UniStrnlen((wchar_t *) bcc_ptr, | ||
2445 | remaining_words - 1); | ||
2446 | /* We look for obvious messed up bcc or strings in response so we do not go off | ||
2447 | the end since (at least) WIN2K and Windows XP have a major bug in not null | ||
2448 | terminating last Unicode string in response */ | ||
2449 | if(ses->serverOS) | ||
2450 | kfree(ses->serverOS); | ||
2451 | ses->serverOS = | ||
2452 | kzalloc(2 * (len + 1), GFP_KERNEL); | ||
2453 | cifs_strfromUCS_le(ses->serverOS, | ||
2454 | (__le16 *) | ||
2455 | bcc_ptr, len, | ||
2456 | nls_codepage); | ||
2457 | bcc_ptr += 2 * (len + 1); | ||
2458 | remaining_words -= len + 1; | ||
2459 | ses->serverOS[2 * len] = 0; | ||
2460 | ses->serverOS[1 + (2 * len)] = 0; | ||
2461 | if (remaining_words > 0) { | ||
2462 | len = UniStrnlen((wchar_t *)bcc_ptr, | ||
2463 | remaining_words | ||
2464 | - 1); | ||
2465 | if(ses->serverNOS) | ||
2466 | kfree(ses->serverNOS); | ||
2467 | ses->serverNOS = | ||
2468 | kzalloc(2 * (len + 1), | ||
2469 | GFP_KERNEL); | ||
2470 | cifs_strfromUCS_le(ses->serverNOS, | ||
2471 | (__le16 *)bcc_ptr, | ||
2472 | len, | ||
2473 | nls_codepage); | ||
2474 | bcc_ptr += 2 * (len + 1); | ||
2475 | ses->serverNOS[2 * len] = 0; | ||
2476 | ses->serverNOS[1 + (2 * len)] = 0; | ||
2477 | remaining_words -= len + 1; | ||
2478 | if (remaining_words > 0) { | ||
2479 | len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); | ||
2480 | /* last string not null terminated (e.g.Windows XP/2000) */ | ||
2481 | if(ses->serverDomain) | ||
2482 | kfree(ses->serverDomain); | ||
2483 | ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL); | ||
2484 | cifs_strfromUCS_le(ses->serverDomain, | ||
2485 | (__le16 *)bcc_ptr, | ||
2486 | len, nls_codepage); | ||
2487 | bcc_ptr += 2*(len+1); | ||
2488 | ses->serverDomain[2*len] = 0; | ||
2489 | ses->serverDomain[1+(2*len)] = 0; | ||
2490 | } /* else no more room so create dummy domain string */ | ||
2491 | else { | ||
2492 | if(ses->serverDomain) | ||
2493 | kfree(ses->serverDomain); | ||
2494 | ses->serverDomain = | ||
2495 | kzalloc(2,GFP_KERNEL); | ||
2496 | } | ||
2497 | } else {/* no room use dummy domain&NOS */ | ||
2498 | if(ses->serverDomain) | ||
2499 | kfree(ses->serverDomain); | ||
2500 | ses->serverDomain = kzalloc(2, GFP_KERNEL); | ||
2501 | if(ses->serverNOS) | ||
2502 | kfree(ses->serverNOS); | ||
2503 | ses->serverNOS = kzalloc(2, GFP_KERNEL); | ||
2504 | } | ||
2505 | } else { /* ASCII */ | ||
2506 | |||
2507 | len = strnlen(bcc_ptr, 1024); | ||
2508 | if (((long) bcc_ptr + len) - (long) | ||
2509 | pByteArea(smb_buffer_response) | ||
2510 | <= BCC(smb_buffer_response)) { | ||
2511 | if(ses->serverOS) | ||
2512 | kfree(ses->serverOS); | ||
2513 | ses->serverOS = kzalloc(len + 1, GFP_KERNEL); | ||
2514 | strncpy(ses->serverOS, bcc_ptr, len); | ||
2515 | |||
2516 | bcc_ptr += len; | ||
2517 | bcc_ptr[0] = 0; /* null terminate the string */ | ||
2518 | bcc_ptr++; | ||
2519 | |||
2520 | len = strnlen(bcc_ptr, 1024); | ||
2521 | if(ses->serverNOS) | ||
2522 | kfree(ses->serverNOS); | ||
2523 | ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); | ||
2524 | strncpy(ses->serverNOS, bcc_ptr, len); | ||
2525 | bcc_ptr += len; | ||
2526 | bcc_ptr[0] = 0; | ||
2527 | bcc_ptr++; | ||
2528 | |||
2529 | len = strnlen(bcc_ptr, 1024); | ||
2530 | if(ses->serverDomain) | ||
2531 | kfree(ses->serverDomain); | ||
2532 | ses->serverDomain = kzalloc(len + 1, GFP_KERNEL); | ||
2533 | strncpy(ses->serverDomain, bcc_ptr, len); | ||
2534 | bcc_ptr += len; | ||
2535 | bcc_ptr[0] = 0; | ||
2536 | bcc_ptr++; | ||
2537 | } else | ||
2538 | cFYI(1, | ||
2539 | ("Variable field of length %d extends beyond end of smb ", | ||
2540 | len)); | ||
2541 | } | ||
2542 | } else { | ||
2543 | cERROR(1, | ||
2544 | (" Security Blob Length extends beyond end of SMB")); | ||
2545 | } | ||
2546 | } else { | ||
2547 | cERROR(1, ("No session structure passed in.")); | ||
2548 | } | ||
2549 | } else { | ||
2550 | cERROR(1, | ||
2551 | (" Invalid Word count %d: ", | ||
2552 | smb_buffer_response->WordCount)); | ||
2553 | rc = -EIO; | ||
2554 | } | ||
2555 | |||
2556 | if (smb_buffer) | ||
2557 | cifs_buf_release(smb_buffer); | ||
2558 | |||
2559 | return rc; | ||
2560 | } | ||
2561 | |||
2562 | static int | ||
2563 | CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | 2288 | CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, |
2564 | struct cifsSesInfo *ses, int * pNTLMv2_flag, | 2289 | struct cifsSesInfo *ses, int * pNTLMv2_flag, |
2565 | const struct nls_table *nls_codepage) | 2290 | const struct nls_table *nls_codepage) |
@@ -2635,8 +2360,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2635 | /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128; | 2360 | /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128; |
2636 | if(sign_CIFS_PDUs) | 2361 | if(sign_CIFS_PDUs) |
2637 | negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN; | 2362 | negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN; |
2638 | if(ntlmv2_support) | 2363 | /* if(ntlmv2_support) |
2639 | negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2; | 2364 | negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/ |
2640 | /* setup pointers to domain name and workstation name */ | 2365 | /* setup pointers to domain name and workstation name */ |
2641 | bcc_ptr += SecurityBlobLength; | 2366 | bcc_ptr += SecurityBlobLength; |
2642 | 2367 | ||
@@ -2783,8 +2508,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2783 | bcc_ptr, | 2508 | bcc_ptr, |
2784 | remaining_words | 2509 | remaining_words |
2785 | - 1); | 2510 | - 1); |
2786 | if(ses->serverNOS) | 2511 | kfree(ses->serverNOS); |
2787 | kfree(ses->serverNOS); | ||
2788 | ses->serverNOS = | 2512 | ses->serverNOS = |
2789 | kzalloc(2 * (len + 1), | 2513 | kzalloc(2 * (len + 1), |
2790 | GFP_KERNEL); | 2514 | GFP_KERNEL); |
@@ -2802,8 +2526,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2802 | if (remaining_words > 0) { | 2526 | if (remaining_words > 0) { |
2803 | len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); | 2527 | len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); |
2804 | /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ | 2528 | /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ |
2805 | if(ses->serverDomain) | 2529 | kfree(ses->serverDomain); |
2806 | kfree(ses->serverDomain); | ||
2807 | ses->serverDomain = | 2530 | ses->serverDomain = |
2808 | kzalloc(2 * | 2531 | kzalloc(2 * |
2809 | (len + | 2532 | (len + |
@@ -2822,19 +2545,16 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2822 | = 0; | 2545 | = 0; |
2823 | } /* else no more room so create dummy domain string */ | 2546 | } /* else no more room so create dummy domain string */ |
2824 | else { | 2547 | else { |
2825 | if(ses->serverDomain) | 2548 | kfree(ses->serverDomain); |
2826 | kfree(ses->serverDomain); | ||
2827 | ses->serverDomain = | 2549 | ses->serverDomain = |
2828 | kzalloc(2, | 2550 | kzalloc(2, |
2829 | GFP_KERNEL); | 2551 | GFP_KERNEL); |
2830 | } | 2552 | } |
2831 | } else { /* no room so create dummy domain and NOS string */ | 2553 | } else { /* no room so create dummy domain and NOS string */ |
2832 | if(ses->serverDomain); | 2554 | kfree(ses->serverDomain); |
2833 | kfree(ses->serverDomain); | ||
2834 | ses->serverDomain = | 2555 | ses->serverDomain = |
2835 | kzalloc(2, GFP_KERNEL); | 2556 | kzalloc(2, GFP_KERNEL); |
2836 | if(ses->serverNOS) | 2557 | kfree(ses->serverNOS); |
2837 | kfree(ses->serverNOS); | ||
2838 | ses->serverNOS = | 2558 | ses->serverNOS = |
2839 | kzalloc(2, GFP_KERNEL); | 2559 | kzalloc(2, GFP_KERNEL); |
2840 | } | 2560 | } |
@@ -2856,8 +2576,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2856 | bcc_ptr++; | 2576 | bcc_ptr++; |
2857 | 2577 | ||
2858 | len = strnlen(bcc_ptr, 1024); | 2578 | len = strnlen(bcc_ptr, 1024); |
2859 | if(ses->serverNOS) | 2579 | kfree(ses->serverNOS); |
2860 | kfree(ses->serverNOS); | ||
2861 | ses->serverNOS = | 2580 | ses->serverNOS = |
2862 | kzalloc(len + 1, | 2581 | kzalloc(len + 1, |
2863 | GFP_KERNEL); | 2582 | GFP_KERNEL); |
@@ -2867,8 +2586,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, | |||
2867 | bcc_ptr++; | 2586 | bcc_ptr++; |
2868 | 2587 | ||
2869 | len = strnlen(bcc_ptr, 1024); | 2588 | len = strnlen(bcc_ptr, 1024); |
2870 | if(ses->serverDomain) | 2589 | kfree(ses->serverDomain); |
2871 | kfree(ses->serverDomain); | ||
2872 | ses->serverDomain = | 2590 | ses->serverDomain = |
2873 | kzalloc(len + 1, | 2591 | kzalloc(len + 1, |
2874 | GFP_KERNEL); | 2592 | GFP_KERNEL); |
@@ -2994,14 +2712,14 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
2994 | SecurityBlob->LmChallengeResponse.Buffer = 0; | 2712 | SecurityBlob->LmChallengeResponse.Buffer = 0; |
2995 | 2713 | ||
2996 | SecurityBlob->NtChallengeResponse.Length = | 2714 | SecurityBlob->NtChallengeResponse.Length = |
2997 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | 2715 | cpu_to_le16(CIFS_SESS_KEY_SIZE); |
2998 | SecurityBlob->NtChallengeResponse.MaximumLength = | 2716 | SecurityBlob->NtChallengeResponse.MaximumLength = |
2999 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | 2717 | cpu_to_le16(CIFS_SESS_KEY_SIZE); |
3000 | memcpy(bcc_ptr, ntlm_session_key, CIFS_SESSION_KEY_SIZE); | 2718 | memcpy(bcc_ptr, ntlm_session_key, CIFS_SESS_KEY_SIZE); |
3001 | SecurityBlob->NtChallengeResponse.Buffer = | 2719 | SecurityBlob->NtChallengeResponse.Buffer = |
3002 | cpu_to_le32(SecurityBlobLength); | 2720 | cpu_to_le32(SecurityBlobLength); |
3003 | SecurityBlobLength += CIFS_SESSION_KEY_SIZE; | 2721 | SecurityBlobLength += CIFS_SESS_KEY_SIZE; |
3004 | bcc_ptr += CIFS_SESSION_KEY_SIZE; | 2722 | bcc_ptr += CIFS_SESS_KEY_SIZE; |
3005 | 2723 | ||
3006 | if (ses->capabilities & CAP_UNICODE) { | 2724 | if (ses->capabilities & CAP_UNICODE) { |
3007 | if (domain == NULL) { | 2725 | if (domain == NULL) { |
@@ -3190,8 +2908,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
3190 | bcc_ptr, | 2908 | bcc_ptr, |
3191 | remaining_words | 2909 | remaining_words |
3192 | - 1); | 2910 | - 1); |
3193 | if(ses->serverNOS) | 2911 | kfree(ses->serverNOS); |
3194 | kfree(ses->serverNOS); | ||
3195 | ses->serverNOS = | 2912 | ses->serverNOS = |
3196 | kzalloc(2 * (len + 1), | 2913 | kzalloc(2 * (len + 1), |
3197 | GFP_KERNEL); | 2914 | GFP_KERNEL); |
@@ -3244,8 +2961,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
3244 | if(ses->serverDomain) | 2961 | if(ses->serverDomain) |
3245 | kfree(ses->serverDomain); | 2962 | kfree(ses->serverDomain); |
3246 | ses->serverDomain = kzalloc(2, GFP_KERNEL); | 2963 | ses->serverDomain = kzalloc(2, GFP_KERNEL); |
3247 | if(ses->serverNOS) | 2964 | kfree(ses->serverNOS); |
3248 | kfree(ses->serverNOS); | ||
3249 | ses->serverNOS = kzalloc(2, GFP_KERNEL); | 2965 | ses->serverNOS = kzalloc(2, GFP_KERNEL); |
3250 | } | 2966 | } |
3251 | } else { /* ASCII */ | 2967 | } else { /* ASCII */ |
@@ -3263,8 +2979,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
3263 | bcc_ptr++; | 2979 | bcc_ptr++; |
3264 | 2980 | ||
3265 | len = strnlen(bcc_ptr, 1024); | 2981 | len = strnlen(bcc_ptr, 1024); |
3266 | if(ses->serverNOS) | 2982 | kfree(ses->serverNOS); |
3267 | kfree(ses->serverNOS); | ||
3268 | ses->serverNOS = kzalloc(len+1,GFP_KERNEL); | 2983 | ses->serverNOS = kzalloc(len+1,GFP_KERNEL); |
3269 | strncpy(ses->serverNOS, bcc_ptr, len); | 2984 | strncpy(ses->serverNOS, bcc_ptr, len); |
3270 | bcc_ptr += len; | 2985 | bcc_ptr += len; |
@@ -3340,22 +3055,33 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3340 | bcc_ptr = &pSMB->Password[0]; | 3055 | bcc_ptr = &pSMB->Password[0]; |
3341 | if((ses->server->secMode) & SECMODE_USER) { | 3056 | if((ses->server->secMode) & SECMODE_USER) { |
3342 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ | 3057 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ |
3058 | *bcc_ptr = 0; /* password is null byte */ | ||
3343 | bcc_ptr++; /* skip password */ | 3059 | bcc_ptr++; /* skip password */ |
3060 | /* already aligned so no need to do it below */ | ||
3344 | } else { | 3061 | } else { |
3345 | pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE); | 3062 | pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); |
3346 | /* BB FIXME add code to fail this if NTLMv2 or Kerberos | 3063 | /* BB FIXME add code to fail this if NTLMv2 or Kerberos |
3347 | specified as required (when that support is added to | 3064 | specified as required (when that support is added to |
3348 | the vfs in the future) as only NTLM or the much | 3065 | the vfs in the future) as only NTLM or the much |
3349 | weaker LANMAN (which we do not send) is accepted | 3066 | weaker LANMAN (which we do not send by default) is accepted |
3350 | by Samba (not sure whether other servers allow | 3067 | by Samba (not sure whether other servers allow |
3351 | NTLMv2 password here) */ | 3068 | NTLMv2 password here) */ |
3069 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
3070 | if((extended_security & CIFSSEC_MAY_LANMAN) && | ||
3071 | (ses->server->secType == LANMAN)) | ||
3072 | calc_lanman_hash(ses, bcc_ptr); | ||
3073 | else | ||
3074 | #endif /* CIFS_WEAK_PW_HASH */ | ||
3352 | SMBNTencrypt(ses->password, | 3075 | SMBNTencrypt(ses->password, |
3353 | ses->server->cryptKey, | 3076 | ses->server->cryptKey, |
3354 | bcc_ptr); | 3077 | bcc_ptr); |
3355 | 3078 | ||
3356 | bcc_ptr += CIFS_SESSION_KEY_SIZE; | 3079 | bcc_ptr += CIFS_SESS_KEY_SIZE; |
3357 | *bcc_ptr = 0; | 3080 | if(ses->capabilities & CAP_UNICODE) { |
3358 | bcc_ptr++; /* align */ | 3081 | /* must align unicode strings */ |
3082 | *bcc_ptr = 0; /* null byte password */ | ||
3083 | bcc_ptr++; | ||
3084 | } | ||
3359 | } | 3085 | } |
3360 | 3086 | ||
3361 | if(ses->server->secMode & | 3087 | if(ses->server->secMode & |
@@ -3429,7 +3155,10 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3429 | } | 3155 | } |
3430 | /* else do not bother copying these informational fields */ | 3156 | /* else do not bother copying these informational fields */ |
3431 | } | 3157 | } |
3432 | tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); | 3158 | if(smb_buffer_response->WordCount == 3) |
3159 | tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); | ||
3160 | else | ||
3161 | tcon->Flags = 0; | ||
3433 | cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags)); | 3162 | cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags)); |
3434 | } else if ((rc == 0) && tcon == NULL) { | 3163 | } else if ((rc == 0) && tcon == NULL) { |
3435 | /* all we need to save for IPC$ connection */ | 3164 | /* all we need to save for IPC$ connection */ |
@@ -3494,7 +3223,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, | |||
3494 | struct nls_table * nls_info) | 3223 | struct nls_table * nls_info) |
3495 | { | 3224 | { |
3496 | int rc = 0; | 3225 | int rc = 0; |
3497 | char ntlm_session_key[CIFS_SESSION_KEY_SIZE]; | 3226 | char ntlm_session_key[CIFS_SESS_KEY_SIZE]; |
3498 | int ntlmv2_flag = FALSE; | 3227 | int ntlmv2_flag = FALSE; |
3499 | int first_time = 0; | 3228 | int first_time = 0; |
3500 | 3229 | ||
@@ -3526,20 +3255,13 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo, | |||
3526 | pSesInfo->server->secMode, | 3255 | pSesInfo->server->secMode, |
3527 | pSesInfo->server->capabilities, | 3256 | pSesInfo->server->capabilities, |
3528 | pSesInfo->server->timeZone)); | 3257 | pSesInfo->server->timeZone)); |
3529 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 3258 | if(experimEnabled < 2) |
3530 | if(experimEnabled > 1) | 3259 | rc = CIFS_SessSetup(xid, pSesInfo, |
3531 | rc = CIFS_SessSetup(xid, pSesInfo, CIFS_NTLM /* type */, | 3260 | first_time, nls_info); |
3532 | &ntlmv2_flag, nls_info); | 3261 | else if (extended_security |
3533 | else | ||
3534 | #endif | ||
3535 | if (extended_security | ||
3536 | && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) | 3262 | && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) |
3537 | && (pSesInfo->server->secType == NTLMSSP)) { | 3263 | && (pSesInfo->server->secType == NTLMSSP)) { |
3538 | cFYI(1, ("New style sesssetup")); | 3264 | rc = -EOPNOTSUPP; |
3539 | rc = CIFSSpnegoSessSetup(xid, pSesInfo, | ||
3540 | NULL /* security blob */, | ||
3541 | 0 /* blob length */, | ||
3542 | nls_info); | ||
3543 | } else if (extended_security | 3265 | } else if (extended_security |
3544 | && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) | 3266 | && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) |
3545 | && (pSesInfo->server->secType == RawNTLMSSP)) { | 3267 | && (pSesInfo->server->secType == RawNTLMSSP)) { |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 82315edc77d7..ba4cbe9b0684 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -113,7 +113,7 @@ cifs_bp_rename_retry: | |||
113 | full_path[namelen+2] = 0; | 113 | full_path[namelen+2] = 0; |
114 | BB remove above eight lines BB */ | 114 | BB remove above eight lines BB */ |
115 | 115 | ||
116 | /* Inode operations in similar order to how they appear in the Linux file fs.h */ | 116 | /* Inode operations in similar order to how they appear in Linux file fs.h */ |
117 | 117 | ||
118 | int | 118 | int |
119 | cifs_create(struct inode *inode, struct dentry *direntry, int mode, | 119 | cifs_create(struct inode *inode, struct dentry *direntry, int mode, |
@@ -178,11 +178,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
178 | FreeXid(xid); | 178 | FreeXid(xid); |
179 | return -ENOMEM; | 179 | return -ENOMEM; |
180 | } | 180 | } |
181 | 181 | if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) | |
182 | rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, | 182 | rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, |
183 | desiredAccess, CREATE_NOT_DIR, | 183 | desiredAccess, CREATE_NOT_DIR, |
184 | &fileHandle, &oplock, buf, cifs_sb->local_nls, | 184 | &fileHandle, &oplock, buf, cifs_sb->local_nls, |
185 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 185 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
186 | else | ||
187 | rc = -EIO; /* no NT SMB support fall into legacy open below */ | ||
188 | |||
186 | if(rc == -EIO) { | 189 | if(rc == -EIO) { |
187 | /* old server, retry the open legacy style */ | 190 | /* old server, retry the open legacy style */ |
188 | rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, | 191 | rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, |
@@ -191,7 +194,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
191 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 194 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
192 | } | 195 | } |
193 | if (rc) { | 196 | if (rc) { |
194 | cFYI(1, ("cifs_create returned 0x%x ", rc)); | 197 | cFYI(1, ("cifs_create returned 0x%x", rc)); |
195 | } else { | 198 | } else { |
196 | /* If Open reported that we actually created a file | 199 | /* If Open reported that we actually created a file |
197 | then we now have to set the mode if possible */ | 200 | then we now have to set the mode if possible */ |
@@ -369,6 +372,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
369 | cifs_sb->mnt_cifs_flags & | 372 | cifs_sb->mnt_cifs_flags & |
370 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 373 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
371 | 374 | ||
375 | /* BB FIXME - add handling for backlevel servers | ||
376 | which need legacy open and check for all | ||
377 | calls to SMBOpen for fallback to | ||
378 | SMBLeagcyOpen */ | ||
372 | if(!rc) { | 379 | if(!rc) { |
373 | /* BB Do not bother to decode buf since no | 380 | /* BB Do not bother to decode buf since no |
374 | local inode yet to put timestamps in, | 381 | local inode yet to put timestamps in, |
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index 633a93811328..d91a3d44e9e3 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c | |||
@@ -91,14 +91,14 @@ int cifs_dir_notify(struct file * file, unsigned long arg) | |||
91 | if(full_path == NULL) { | 91 | if(full_path == NULL) { |
92 | rc = -ENOMEM; | 92 | rc = -ENOMEM; |
93 | } else { | 93 | } else { |
94 | cERROR(1,("cifs dir notify on file %s with arg 0x%lx",full_path,arg)); /* BB removeme BB */ | 94 | cFYI(1,("dir notify on file %s Arg 0x%lx",full_path,arg)); |
95 | rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, | 95 | rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, |
96 | GENERIC_READ | SYNCHRONIZE, 0 /* create options */, | 96 | GENERIC_READ | SYNCHRONIZE, 0 /* create options */, |
97 | &netfid, &oplock,NULL, cifs_sb->local_nls, | 97 | &netfid, &oplock,NULL, cifs_sb->local_nls, |
98 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 98 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
99 | /* BB fixme - add this handle to a notify handle list */ | 99 | /* BB fixme - add this handle to a notify handle list */ |
100 | if(rc) { | 100 | if(rc) { |
101 | cERROR(1,("Could not open directory for notify")); /* BB remove BB */ | 101 | cFYI(1,("Could not open directory for notify")); |
102 | } else { | 102 | } else { |
103 | filter = convert_to_cifs_notify_flags(arg); | 103 | filter = convert_to_cifs_notify_flags(arg); |
104 | if(filter != 0) { | 104 | if(filter != 0) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b4a18c1cab0a..e9c1573f6aa7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -110,7 +110,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | |||
110 | &pCifsInode->openFileList); | 110 | &pCifsInode->openFileList); |
111 | } | 111 | } |
112 | write_unlock(&GlobalSMBSeslock); | 112 | write_unlock(&GlobalSMBSeslock); |
113 | write_unlock(&file->f_owner.lock); | ||
114 | if (pCifsInode->clientCanCacheRead) { | 113 | if (pCifsInode->clientCanCacheRead) { |
115 | /* we have the inode open somewhere else | 114 | /* we have the inode open somewhere else |
116 | no need to discard cache data */ | 115 | no need to discard cache data */ |
@@ -201,7 +200,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
201 | } else { | 200 | } else { |
202 | if (file->f_flags & O_EXCL) | 201 | if (file->f_flags & O_EXCL) |
203 | cERROR(1, ("could not find file instance for " | 202 | cERROR(1, ("could not find file instance for " |
204 | "new file %p ", file)); | 203 | "new file %p", file)); |
205 | } | 204 | } |
206 | } | 205 | } |
207 | 206 | ||
@@ -260,10 +259,15 @@ int cifs_open(struct inode *inode, struct file *file) | |||
260 | rc = -ENOMEM; | 259 | rc = -ENOMEM; |
261 | goto out; | 260 | goto out; |
262 | } | 261 | } |
263 | rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess, | 262 | |
264 | CREATE_NOT_DIR, &netfid, &oplock, buf, | 263 | if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) |
264 | rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, | ||
265 | desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, | ||
265 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags | 266 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags |
266 | & CIFS_MOUNT_MAP_SPECIAL_CHR); | 267 | & CIFS_MOUNT_MAP_SPECIAL_CHR); |
268 | else | ||
269 | rc = -EIO; /* no NT SMB support fall into legacy open below */ | ||
270 | |||
267 | if (rc == -EIO) { | 271 | if (rc == -EIO) { |
268 | /* Old server, try legacy style OpenX */ | 272 | /* Old server, try legacy style OpenX */ |
269 | rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, | 273 | rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, |
@@ -272,7 +276,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
272 | & CIFS_MOUNT_MAP_SPECIAL_CHR); | 276 | & CIFS_MOUNT_MAP_SPECIAL_CHR); |
273 | } | 277 | } |
274 | if (rc) { | 278 | if (rc) { |
275 | cFYI(1, ("cifs_open returned 0x%x ", rc)); | 279 | cFYI(1, ("cifs_open returned 0x%x", rc)); |
276 | goto out; | 280 | goto out; |
277 | } | 281 | } |
278 | file->private_data = | 282 | file->private_data = |
@@ -282,7 +286,6 @@ int cifs_open(struct inode *inode, struct file *file) | |||
282 | goto out; | 286 | goto out; |
283 | } | 287 | } |
284 | pCifsFile = cifs_init_private(file->private_data, inode, file, netfid); | 288 | pCifsFile = cifs_init_private(file->private_data, inode, file, netfid); |
285 | write_lock(&file->f_owner.lock); | ||
286 | write_lock(&GlobalSMBSeslock); | 289 | write_lock(&GlobalSMBSeslock); |
287 | list_add(&pCifsFile->tlist, &pTcon->openFileList); | 290 | list_add(&pCifsFile->tlist, &pTcon->openFileList); |
288 | 291 | ||
@@ -293,7 +296,6 @@ int cifs_open(struct inode *inode, struct file *file) | |||
293 | &oplock, buf, full_path, xid); | 296 | &oplock, buf, full_path, xid); |
294 | } else { | 297 | } else { |
295 | write_unlock(&GlobalSMBSeslock); | 298 | write_unlock(&GlobalSMBSeslock); |
296 | write_unlock(&file->f_owner.lock); | ||
297 | } | 299 | } |
298 | 300 | ||
299 | if (oplock & CIFS_CREATE_ACTION) { | 301 | if (oplock & CIFS_CREATE_ACTION) { |
@@ -409,8 +411,8 @@ static int cifs_reopen_file(struct inode *inode, struct file *file, | |||
409 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 411 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
410 | if (rc) { | 412 | if (rc) { |
411 | up(&pCifsFile->fh_sem); | 413 | up(&pCifsFile->fh_sem); |
412 | cFYI(1, ("cifs_open returned 0x%x ", rc)); | 414 | cFYI(1, ("cifs_open returned 0x%x", rc)); |
413 | cFYI(1, ("oplock: %d ", oplock)); | 415 | cFYI(1, ("oplock: %d", oplock)); |
414 | } else { | 416 | } else { |
415 | pCifsFile->netfid = netfid; | 417 | pCifsFile->netfid = netfid; |
416 | pCifsFile->invalidHandle = FALSE; | 418 | pCifsFile->invalidHandle = FALSE; |
@@ -472,7 +474,6 @@ int cifs_close(struct inode *inode, struct file *file) | |||
472 | pTcon = cifs_sb->tcon; | 474 | pTcon = cifs_sb->tcon; |
473 | if (pSMBFile) { | 475 | if (pSMBFile) { |
474 | pSMBFile->closePend = TRUE; | 476 | pSMBFile->closePend = TRUE; |
475 | write_lock(&file->f_owner.lock); | ||
476 | if (pTcon) { | 477 | if (pTcon) { |
477 | /* no sense reconnecting to close a file that is | 478 | /* no sense reconnecting to close a file that is |
478 | already closed */ | 479 | already closed */ |
@@ -487,23 +488,18 @@ int cifs_close(struct inode *inode, struct file *file) | |||
487 | the struct would be in each open file, | 488 | the struct would be in each open file, |
488 | but this should give enough time to | 489 | but this should give enough time to |
489 | clear the socket */ | 490 | clear the socket */ |
490 | write_unlock(&file->f_owner.lock); | ||
491 | cERROR(1,("close with pending writes")); | 491 | cERROR(1,("close with pending writes")); |
492 | msleep(timeout); | 492 | msleep(timeout); |
493 | write_lock(&file->f_owner.lock); | ||
494 | timeout *= 4; | 493 | timeout *= 4; |
495 | } | 494 | } |
496 | write_unlock(&file->f_owner.lock); | ||
497 | rc = CIFSSMBClose(xid, pTcon, | 495 | rc = CIFSSMBClose(xid, pTcon, |
498 | pSMBFile->netfid); | 496 | pSMBFile->netfid); |
499 | write_lock(&file->f_owner.lock); | ||
500 | } | 497 | } |
501 | } | 498 | } |
502 | write_lock(&GlobalSMBSeslock); | 499 | write_lock(&GlobalSMBSeslock); |
503 | list_del(&pSMBFile->flist); | 500 | list_del(&pSMBFile->flist); |
504 | list_del(&pSMBFile->tlist); | 501 | list_del(&pSMBFile->tlist); |
505 | write_unlock(&GlobalSMBSeslock); | 502 | write_unlock(&GlobalSMBSeslock); |
506 | write_unlock(&file->f_owner.lock); | ||
507 | kfree(pSMBFile->search_resume_name); | 503 | kfree(pSMBFile->search_resume_name); |
508 | kfree(file->private_data); | 504 | kfree(file->private_data); |
509 | file->private_data = NULL; | 505 | file->private_data = NULL; |
@@ -531,7 +527,7 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
531 | (struct cifsFileInfo *)file->private_data; | 527 | (struct cifsFileInfo *)file->private_data; |
532 | char *ptmp; | 528 | char *ptmp; |
533 | 529 | ||
534 | cFYI(1, ("Closedir inode = 0x%p with ", inode)); | 530 | cFYI(1, ("Closedir inode = 0x%p", inode)); |
535 | 531 | ||
536 | xid = GetXid(); | 532 | xid = GetXid(); |
537 | 533 | ||
@@ -605,7 +601,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
605 | } | 601 | } |
606 | if (pfLock->fl_flags & FL_ACCESS) | 602 | if (pfLock->fl_flags & FL_ACCESS) |
607 | cFYI(1, ("Process suspended by mandatory locking - " | 603 | cFYI(1, ("Process suspended by mandatory locking - " |
608 | "not implemented yet ")); | 604 | "not implemented yet")); |
609 | if (pfLock->fl_flags & FL_LEASE) | 605 | if (pfLock->fl_flags & FL_LEASE) |
610 | cFYI(1, ("Lease on file - not implemented yet")); | 606 | cFYI(1, ("Lease on file - not implemented yet")); |
611 | if (pfLock->fl_flags & | 607 | if (pfLock->fl_flags & |
@@ -1375,7 +1371,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1375 | 1371 | ||
1376 | xid = GetXid(); | 1372 | xid = GetXid(); |
1377 | 1373 | ||
1378 | cFYI(1, ("Sync file - name: %s datasync: 0x%x ", | 1374 | cFYI(1, ("Sync file - name: %s datasync: 0x%x", |
1379 | dentry->d_name.name, datasync)); | 1375 | dentry->d_name.name, datasync)); |
1380 | 1376 | ||
1381 | rc = filemap_fdatawrite(inode->i_mapping); | 1377 | rc = filemap_fdatawrite(inode->i_mapping); |
@@ -1404,7 +1400,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1404 | /* fill in rpages then | 1400 | /* fill in rpages then |
1405 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ | 1401 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ |
1406 | 1402 | ||
1407 | /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); | 1403 | /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index)); |
1408 | 1404 | ||
1409 | #if 0 | 1405 | #if 0 |
1410 | if (rc < 0) | 1406 | if (rc < 0) |
@@ -1836,7 +1832,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
1836 | if (rc < 0) | 1832 | if (rc < 0) |
1837 | goto io_error; | 1833 | goto io_error; |
1838 | else | 1834 | else |
1839 | cFYI(1, ("Bytes read %d ",rc)); | 1835 | cFYI(1, ("Bytes read %d",rc)); |
1840 | 1836 | ||
1841 | file->f_dentry->d_inode->i_atime = | 1837 | file->f_dentry->d_inode->i_atime = |
1842 | current_fs_time(file->f_dentry->d_inode->i_sb); | 1838 | current_fs_time(file->f_dentry->d_inode->i_sb); |
@@ -1957,3 +1953,19 @@ struct address_space_operations cifs_addr_ops = { | |||
1957 | /* .sync_page = cifs_sync_page, */ | 1953 | /* .sync_page = cifs_sync_page, */ |
1958 | /* .direct_IO = */ | 1954 | /* .direct_IO = */ |
1959 | }; | 1955 | }; |
1956 | |||
1957 | /* | ||
1958 | * cifs_readpages requires the server to support a buffer large enough to | ||
1959 | * contain the header plus one complete page of data. Otherwise, we need | ||
1960 | * to leave cifs_readpages out of the address space operations. | ||
1961 | */ | ||
1962 | struct address_space_operations cifs_addr_ops_smallbuf = { | ||
1963 | .readpage = cifs_readpage, | ||
1964 | .writepage = cifs_writepage, | ||
1965 | .writepages = cifs_writepages, | ||
1966 | .prepare_write = cifs_prepare_write, | ||
1967 | .commit_write = cifs_commit_write, | ||
1968 | .set_page_dirty = __set_page_dirty_nobuffers, | ||
1969 | /* .sync_page = cifs_sync_page, */ | ||
1970 | /* .direct_IO = */ | ||
1971 | }; | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 4093764ef461..b88147c1dc27 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -41,7 +41,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
41 | char *tmp_path; | 41 | char *tmp_path; |
42 | 42 | ||
43 | pTcon = cifs_sb->tcon; | 43 | pTcon = cifs_sb->tcon; |
44 | cFYI(1, ("Getting info on %s ", search_path)); | 44 | cFYI(1, ("Getting info on %s", search_path)); |
45 | /* could have done a find first instead but this returns more info */ | 45 | /* could have done a find first instead but this returns more info */ |
46 | rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData, | 46 | rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData, |
47 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | 47 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & |
@@ -97,9 +97,9 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
97 | inode = *pinode; | 97 | inode = *pinode; |
98 | cifsInfo = CIFS_I(inode); | 98 | cifsInfo = CIFS_I(inode); |
99 | 99 | ||
100 | cFYI(1, ("Old time %ld ", cifsInfo->time)); | 100 | cFYI(1, ("Old time %ld", cifsInfo->time)); |
101 | cifsInfo->time = jiffies; | 101 | cifsInfo->time = jiffies; |
102 | cFYI(1, ("New time %ld ", cifsInfo->time)); | 102 | cFYI(1, ("New time %ld", cifsInfo->time)); |
103 | /* this is ok to set on every inode revalidate */ | 103 | /* this is ok to set on every inode revalidate */ |
104 | atomic_set(&cifsInfo->inUse,1); | 104 | atomic_set(&cifsInfo->inUse,1); |
105 | 105 | ||
@@ -180,11 +180,12 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
180 | else /* not direct, send byte range locks */ | 180 | else /* not direct, send byte range locks */ |
181 | inode->i_fop = &cifs_file_ops; | 181 | inode->i_fop = &cifs_file_ops; |
182 | 182 | ||
183 | inode->i_data.a_ops = &cifs_addr_ops; | ||
184 | /* check if server can support readpages */ | 183 | /* check if server can support readpages */ |
185 | if(pTcon->ses->server->maxBuf < | 184 | if(pTcon->ses->server->maxBuf < |
186 | 4096 + MAX_CIFS_HDR_SIZE) | 185 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) |
187 | inode->i_data.a_ops->readpages = NULL; | 186 | inode->i_data.a_ops = &cifs_addr_ops_smallbuf; |
187 | else | ||
188 | inode->i_data.a_ops = &cifs_addr_ops; | ||
188 | } else if (S_ISDIR(inode->i_mode)) { | 189 | } else if (S_ISDIR(inode->i_mode)) { |
189 | cFYI(1, ("Directory inode")); | 190 | cFYI(1, ("Directory inode")); |
190 | inode->i_op = &cifs_dir_inode_ops; | 191 | inode->i_op = &cifs_dir_inode_ops; |
@@ -421,23 +422,23 @@ int cifs_get_inode_info(struct inode **pinode, | |||
421 | inode = *pinode; | 422 | inode = *pinode; |
422 | cifsInfo = CIFS_I(inode); | 423 | cifsInfo = CIFS_I(inode); |
423 | cifsInfo->cifsAttrs = attr; | 424 | cifsInfo->cifsAttrs = attr; |
424 | cFYI(1, ("Old time %ld ", cifsInfo->time)); | 425 | cFYI(1, ("Old time %ld", cifsInfo->time)); |
425 | cifsInfo->time = jiffies; | 426 | cifsInfo->time = jiffies; |
426 | cFYI(1, ("New time %ld ", cifsInfo->time)); | 427 | cFYI(1, ("New time %ld", cifsInfo->time)); |
427 | 428 | ||
428 | /* blksize needs to be multiple of two. So safer to default to | 429 | /* blksize needs to be multiple of two. So safer to default to |
429 | blksize and blkbits set in superblock so 2**blkbits and blksize | 430 | blksize and blkbits set in superblock so 2**blkbits and blksize |
430 | will match rather than setting to: | 431 | will match rather than setting to: |
431 | (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/ | 432 | (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/ |
432 | 433 | ||
433 | /* Linux can not store file creation time unfortunately so we ignore it */ | 434 | /* Linux can not store file creation time so ignore it */ |
434 | inode->i_atime = | 435 | inode->i_atime = |
435 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); | 436 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); |
436 | inode->i_mtime = | 437 | inode->i_mtime = |
437 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); | 438 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); |
438 | inode->i_ctime = | 439 | inode->i_ctime = |
439 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); | 440 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); |
440 | cFYI(0, ("Attributes came in as 0x%x ", attr)); | 441 | cFYI(0, ("Attributes came in as 0x%x", attr)); |
441 | 442 | ||
442 | /* set default mode. will override for dirs below */ | 443 | /* set default mode. will override for dirs below */ |
443 | if (atomic_read(&cifsInfo->inUse) == 0) | 444 | if (atomic_read(&cifsInfo->inUse) == 0) |
@@ -519,10 +520,11 @@ int cifs_get_inode_info(struct inode **pinode, | |||
519 | else /* not direct, send byte range locks */ | 520 | else /* not direct, send byte range locks */ |
520 | inode->i_fop = &cifs_file_ops; | 521 | inode->i_fop = &cifs_file_ops; |
521 | 522 | ||
522 | inode->i_data.a_ops = &cifs_addr_ops; | ||
523 | if(pTcon->ses->server->maxBuf < | 523 | if(pTcon->ses->server->maxBuf < |
524 | 4096 + MAX_CIFS_HDR_SIZE) | 524 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) |
525 | inode->i_data.a_ops->readpages = NULL; | 525 | inode->i_data.a_ops = &cifs_addr_ops_smallbuf; |
526 | else | ||
527 | inode->i_data.a_ops = &cifs_addr_ops; | ||
526 | } else if (S_ISDIR(inode->i_mode)) { | 528 | } else if (S_ISDIR(inode->i_mode)) { |
527 | cFYI(1, ("Directory inode")); | 529 | cFYI(1, ("Directory inode")); |
528 | inode->i_op = &cifs_dir_inode_ops; | 530 | inode->i_op = &cifs_dir_inode_ops; |
@@ -731,7 +733,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
731 | rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls, | 733 | rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls, |
732 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 734 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
733 | if (rc) { | 735 | if (rc) { |
734 | cFYI(1, ("cifs_mkdir returned 0x%x ", rc)); | 736 | cFYI(1, ("cifs_mkdir returned 0x%x", rc)); |
735 | d_drop(direntry); | 737 | d_drop(direntry); |
736 | } else { | 738 | } else { |
737 | inode->i_nlink++; | 739 | inode->i_nlink++; |
@@ -798,7 +800,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
798 | char *full_path = NULL; | 800 | char *full_path = NULL; |
799 | struct cifsInodeInfo *cifsInode; | 801 | struct cifsInodeInfo *cifsInode; |
800 | 802 | ||
801 | cFYI(1, ("cifs_rmdir, inode = 0x%p with ", inode)); | 803 | cFYI(1, ("cifs_rmdir, inode = 0x%p", inode)); |
802 | 804 | ||
803 | xid = GetXid(); | 805 | xid = GetXid(); |
804 | 806 | ||
@@ -1121,7 +1123,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1121 | 1123 | ||
1122 | xid = GetXid(); | 1124 | xid = GetXid(); |
1123 | 1125 | ||
1124 | cFYI(1, ("In cifs_setattr, name = %s attrs->iavalid 0x%x ", | 1126 | cFYI(1, ("setattr on file %s attrs->iavalid 0x%x", |
1125 | direntry->d_name.name, attrs->ia_valid)); | 1127 | direntry->d_name.name, attrs->ia_valid)); |
1126 | 1128 | ||
1127 | cifs_sb = CIFS_SB(direntry->d_inode->i_sb); | 1129 | cifs_sb = CIFS_SB(direntry->d_inode->i_sb); |
@@ -1157,6 +1159,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1157 | when the local oplock break takes longer to flush | 1159 | when the local oplock break takes longer to flush |
1158 | writebehind data than the SMB timeout for the SetPathInfo | 1160 | writebehind data than the SMB timeout for the SetPathInfo |
1159 | request would allow */ | 1161 | request would allow */ |
1162 | |||
1160 | open_file = find_writable_file(cifsInode); | 1163 | open_file = find_writable_file(cifsInode); |
1161 | if (open_file) { | 1164 | if (open_file) { |
1162 | __u16 nfid = open_file->netfid; | 1165 | __u16 nfid = open_file->netfid; |
@@ -1289,7 +1292,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1289 | it may be useful to Windows - but we do | 1292 | it may be useful to Windows - but we do |
1290 | not want to set ctime unless some other | 1293 | not want to set ctime unless some other |
1291 | timestamp is changing */ | 1294 | timestamp is changing */ |
1292 | cFYI(1, ("CIFS - CTIME changed ")); | 1295 | cFYI(1, ("CIFS - CTIME changed")); |
1293 | time_buf.ChangeTime = | 1296 | time_buf.ChangeTime = |
1294 | cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); | 1297 | cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); |
1295 | } else | 1298 | } else |
@@ -1356,7 +1359,7 @@ cifs_setattr_exit: | |||
1356 | 1359 | ||
1357 | void cifs_delete_inode(struct inode *inode) | 1360 | void cifs_delete_inode(struct inode *inode) |
1358 | { | 1361 | { |
1359 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p ", inode)); | 1362 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); |
1360 | /* may have to add back in if and when safe distributed caching of | 1363 | /* may have to add back in if and when safe distributed caching of |
1361 | directories added e.g. via FindNotify */ | 1364 | directories added e.g. via FindNotify */ |
1362 | } | 1365 | } |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 2ec99f833142..a57f5d6e6213 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -167,7 +167,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
167 | return -ENOMEM; | 167 | return -ENOMEM; |
168 | } | 168 | } |
169 | 169 | ||
170 | cFYI(1, ("Full path: %s ", full_path)); | 170 | cFYI(1, ("Full path: %s", full_path)); |
171 | cFYI(1, ("symname is %s", symname)); | 171 | cFYI(1, ("symname is %s", symname)); |
172 | 172 | ||
173 | /* BB what if DFS and this volume is on different share? BB */ | 173 | /* BB what if DFS and this volume is on different share? BB */ |
@@ -186,8 +186,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
186 | inode->i_sb,xid); | 186 | inode->i_sb,xid); |
187 | 187 | ||
188 | if (rc != 0) { | 188 | if (rc != 0) { |
189 | cFYI(1, | 189 | cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d", |
190 | ("Create symlink worked but get_inode_info failed with rc = %d ", | ||
191 | rc)); | 190 | rc)); |
192 | } else { | 191 | } else { |
193 | if (pTcon->nocase) | 192 | if (pTcon->nocase) |
@@ -289,7 +288,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) | |||
289 | else { | 288 | else { |
290 | cFYI(1,("num referral: %d",num_referrals)); | 289 | cFYI(1,("num referral: %d",num_referrals)); |
291 | if(referrals) { | 290 | if(referrals) { |
292 | cFYI(1,("referral string: %s ",referrals)); | 291 | cFYI(1,("referral string: %s",referrals)); |
293 | strncpy(tmpbuffer, referrals, len-1); | 292 | strncpy(tmpbuffer, referrals, len-1); |
294 | } | 293 | } |
295 | } | 294 | } |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index fafd056426e4..22c937e5884f 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -101,6 +101,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) | |||
101 | kfree(buf_to_free->serverDomain); | 101 | kfree(buf_to_free->serverDomain); |
102 | kfree(buf_to_free->serverNOS); | 102 | kfree(buf_to_free->serverNOS); |
103 | kfree(buf_to_free->password); | 103 | kfree(buf_to_free->password); |
104 | kfree(buf_to_free->domainName); | ||
104 | kfree(buf_to_free); | 105 | kfree(buf_to_free); |
105 | } | 106 | } |
106 | 107 | ||
@@ -499,11 +500,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
499 | if(pSMBr->ByteCount > sizeof(struct file_notify_information)) { | 500 | if(pSMBr->ByteCount > sizeof(struct file_notify_information)) { |
500 | data_offset = le32_to_cpu(pSMBr->DataOffset); | 501 | data_offset = le32_to_cpu(pSMBr->DataOffset); |
501 | 502 | ||
502 | pnotify = (struct file_notify_information *)((char *)&pSMBr->hdr.Protocol | 503 | pnotify = (struct file_notify_information *) |
503 | + data_offset); | 504 | ((char *)&pSMBr->hdr.Protocol + data_offset); |
504 | cFYI(1,("dnotify on %s with action: 0x%x",pnotify->FileName, | 505 | cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName, |
505 | pnotify->Action)); /* BB removeme BB */ | 506 | pnotify->Action)); /* BB removeme BB */ |
506 | /* cifs_dump_mem("Received notify Data is: ",buf,sizeof(struct smb_hdr)+60); */ | 507 | /* cifs_dump_mem("Rcvd notify Data: ",buf, |
508 | sizeof(struct smb_hdr)+60); */ | ||
507 | return TRUE; | 509 | return TRUE; |
508 | } | 510 | } |
509 | if(pSMBr->hdr.Status.CifsError) { | 511 | if(pSMBr->hdr.Status.CifsError) { |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 5de74d216fdd..b66eff5dc624 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -84,11 +84,11 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { | |||
84 | 84 | ||
85 | static const struct smb_to_posix_error mapping_table_ERRSRV[] = { | 85 | static const struct smb_to_posix_error mapping_table_ERRSRV[] = { |
86 | {ERRerror, -EIO}, | 86 | {ERRerror, -EIO}, |
87 | {ERRbadpw, -EPERM}, | 87 | {ERRbadpw, -EACCES}, /* was EPERM */ |
88 | {ERRbadtype, -EREMOTE}, | 88 | {ERRbadtype, -EREMOTE}, |
89 | {ERRaccess, -EACCES}, | 89 | {ERRaccess, -EACCES}, |
90 | {ERRinvtid, -ENXIO}, | 90 | {ERRinvtid, -ENXIO}, |
91 | {ERRinvnetname, -ENODEV}, | 91 | {ERRinvnetname, -ENXIO}, |
92 | {ERRinvdevice, -ENXIO}, | 92 | {ERRinvdevice, -ENXIO}, |
93 | {ERRqfull, -ENOSPC}, | 93 | {ERRqfull, -ENOSPC}, |
94 | {ERRqtoobig, -ENOSPC}, | 94 | {ERRqtoobig, -ENOSPC}, |
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c deleted file mode 100644 index 115359cc7a32..000000000000 --- a/fs/cifs/ntlmssp.c +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | /* | ||
2 | * fs/cifs/ntlmssp.h | ||
3 | * | ||
4 | * Copyright (c) International Business Machines Corp., 2006 | ||
5 | * Author(s): Steve French (sfrench@us.ibm.com) | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Lesser General Public License as published | ||
9 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
15 | * the GNU Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public License | ||
18 | * along with this library; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #include "cifspdu.h" | ||
23 | #include "cifsglob.h" | ||
24 | #include "cifsproto.h" | ||
25 | #include "cifs_unicode.h" | ||
26 | #include "cifs_debug.h" | ||
27 | #include "ntlmssp.h" | ||
28 | #include "nterr.h" | ||
29 | |||
30 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
31 | static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB) | ||
32 | { | ||
33 | __u32 capabilities = 0; | ||
34 | |||
35 | /* init fields common to all four types of SessSetup */ | ||
36 | /* note that header is initialized to zero in header_assemble */ | ||
37 | pSMB->req.AndXCommand = 0xFF; | ||
38 | pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); | ||
39 | pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); | ||
40 | |||
41 | /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ | ||
42 | |||
43 | /* BB verify whether signing required on neg or just on auth frame | ||
44 | (and NTLM case) */ | ||
45 | |||
46 | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | | ||
47 | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; | ||
48 | |||
49 | if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | ||
50 | pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | ||
51 | |||
52 | if (ses->capabilities & CAP_UNICODE) { | ||
53 | pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE; | ||
54 | capabilities |= CAP_UNICODE; | ||
55 | } | ||
56 | if (ses->capabilities & CAP_STATUS32) { | ||
57 | pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS; | ||
58 | capabilities |= CAP_STATUS32; | ||
59 | } | ||
60 | if (ses->capabilities & CAP_DFS) { | ||
61 | pSMB->req.hdr.Flags2 |= SMBFLG2_DFS; | ||
62 | capabilities |= CAP_DFS; | ||
63 | } | ||
64 | |||
65 | /* BB check whether to init vcnum BB */ | ||
66 | return capabilities; | ||
67 | } | ||
68 | int | ||
69 | CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type, | ||
70 | int * pNTLMv2_flg, const struct nls_table *nls_cp) | ||
71 | { | ||
72 | int rc = 0; | ||
73 | int wct; | ||
74 | struct smb_hdr *smb_buffer; | ||
75 | char *bcc_ptr; | ||
76 | SESSION_SETUP_ANDX *pSMB; | ||
77 | __u32 capabilities; | ||
78 | |||
79 | if(ses == NULL) | ||
80 | return -EINVAL; | ||
81 | |||
82 | cFYI(1,("SStp type: %d",type)); | ||
83 | if(type < CIFS_NTLM) { | ||
84 | #ifndef CONFIG_CIFS_WEAK_PW_HASH | ||
85 | /* LANMAN and plaintext are less secure and off by default. | ||
86 | So we make this explicitly be turned on in kconfig (in the | ||
87 | build) and turned on at runtime (changed from the default) | ||
88 | in proc/fs/cifs or via mount parm. Unfortunately this is | ||
89 | needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ | ||
90 | return -EOPNOTSUPP; | ||
91 | #endif | ||
92 | wct = 10; /* lanman 2 style sessionsetup */ | ||
93 | } else if(type < CIFS_NTLMSSP_NEG) | ||
94 | wct = 13; /* old style NTLM sessionsetup */ | ||
95 | else /* same size for negotiate or auth, NTLMSSP or extended security */ | ||
96 | wct = 12; | ||
97 | |||
98 | rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, | ||
99 | (void **)&smb_buffer); | ||
100 | if(rc) | ||
101 | return rc; | ||
102 | |||
103 | pSMB = (SESSION_SETUP_ANDX *)smb_buffer; | ||
104 | |||
105 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
106 | bcc_ptr = pByteArea(smb_buffer); | ||
107 | if(type > CIFS_NTLM) { | ||
108 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | ||
109 | capabilities |= CAP_EXTENDED_SECURITY; | ||
110 | pSMB->req.Capabilities = cpu_to_le32(capabilities); | ||
111 | /* BB set password lengths */ | ||
112 | } else if(type < CIFS_NTLM) /* lanman */ { | ||
113 | /* no capabilities flags in old lanman negotiation */ | ||
114 | /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */ | ||
115 | } else /* type CIFS_NTLM */ { | ||
116 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
117 | pSMB->req_no_secext.CaseInsensitivePasswordLength = | ||
118 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | ||
119 | pSMB->req_no_secext.CaseSensitivePasswordLength = | ||
120 | cpu_to_le16(CIFS_SESSION_KEY_SIZE); | ||
121 | } | ||
122 | |||
123 | |||
124 | /* copy session key */ | ||
125 | |||
126 | /* if Unicode, align strings to two byte boundary */ | ||
127 | |||
128 | /* copy user name */ /* BB Do we need to special case null user name? */ | ||
129 | |||
130 | /* copy domain name */ | ||
131 | |||
132 | /* copy Linux version */ | ||
133 | |||
134 | /* copy network operating system name */ | ||
135 | |||
136 | /* update bcc and smb buffer length */ | ||
137 | |||
138 | /* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */ | ||
139 | /* SMB request buf freed in SendReceive2 */ | ||
140 | |||
141 | return rc; | ||
142 | } | ||
143 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index b689c5035124..03bbcb377913 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 | */ | 22 | */ |
23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
24 | #include <linux/pagemap.h> | ||
24 | #include <linux/stat.h> | 25 | #include <linux/stat.h> |
25 | #include <linux/smp_lock.h> | 26 | #include <linux/smp_lock.h> |
26 | #include "cifspdu.h" | 27 | #include "cifspdu.h" |
@@ -31,8 +32,8 @@ | |||
31 | #include "cifs_fs_sb.h" | 32 | #include "cifs_fs_sb.h" |
32 | #include "cifsfs.h" | 33 | #include "cifsfs.h" |
33 | 34 | ||
34 | /* BB fixme - add debug wrappers around this function to disable it fixme BB */ | 35 | #ifdef CONFIG_CIFS_DEBUG2 |
35 | /* static void dump_cifs_file_struct(struct file *file, char *label) | 36 | static void dump_cifs_file_struct(struct file *file, char *label) |
36 | { | 37 | { |
37 | struct cifsFileInfo * cf; | 38 | struct cifsFileInfo * cf; |
38 | 39 | ||
@@ -53,7 +54,8 @@ | |||
53 | } | 54 | } |
54 | 55 | ||
55 | } | 56 | } |
56 | } */ | 57 | } |
58 | #endif /* DEBUG2 */ | ||
57 | 59 | ||
58 | /* Returns one if new inode created (which therefore needs to be hashed) */ | 60 | /* Returns one if new inode created (which therefore needs to be hashed) */ |
59 | /* Might check in the future if inode number changed so we can rehash inode */ | 61 | /* Might check in the future if inode number changed so we can rehash inode */ |
@@ -107,32 +109,52 @@ static int construct_dentry(struct qstr *qstring, struct file *file, | |||
107 | return rc; | 109 | return rc; |
108 | } | 110 | } |
109 | 111 | ||
110 | static void fill_in_inode(struct inode *tmp_inode, | 112 | static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, |
111 | FILE_DIRECTORY_INFO *pfindData, int *pobject_type, int isNewInode) | 113 | char * buf, int *pobject_type, int isNewInode) |
112 | { | 114 | { |
113 | loff_t local_size; | 115 | loff_t local_size; |
114 | struct timespec local_mtime; | 116 | struct timespec local_mtime; |
115 | 117 | ||
116 | struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); | 118 | struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); |
117 | struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); | 119 | struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); |
118 | __u32 attr = le32_to_cpu(pfindData->ExtFileAttributes); | 120 | __u32 attr; |
119 | __u64 allocation_size = le64_to_cpu(pfindData->AllocationSize); | 121 | __u64 allocation_size; |
120 | __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile); | 122 | __u64 end_of_file; |
121 | |||
122 | cifsInfo->cifsAttrs = attr; | ||
123 | cifsInfo->time = jiffies; | ||
124 | 123 | ||
125 | /* save mtime and size */ | 124 | /* save mtime and size */ |
126 | local_mtime = tmp_inode->i_mtime; | 125 | local_mtime = tmp_inode->i_mtime; |
127 | local_size = tmp_inode->i_size; | 126 | local_size = tmp_inode->i_size; |
128 | 127 | ||
128 | if(new_buf_type) { | ||
129 | FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf; | ||
130 | |||
131 | attr = le32_to_cpu(pfindData->ExtFileAttributes); | ||
132 | allocation_size = le64_to_cpu(pfindData->AllocationSize); | ||
133 | end_of_file = le64_to_cpu(pfindData->EndOfFile); | ||
134 | tmp_inode->i_atime = | ||
135 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); | ||
136 | tmp_inode->i_mtime = | ||
137 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); | ||
138 | tmp_inode->i_ctime = | ||
139 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); | ||
140 | } else { /* legacy, OS2 and DOS style */ | ||
141 | FIND_FILE_STANDARD_INFO * pfindData = | ||
142 | (FIND_FILE_STANDARD_INFO *)buf; | ||
143 | |||
144 | attr = le16_to_cpu(pfindData->Attributes); | ||
145 | allocation_size = le32_to_cpu(pfindData->AllocationSize); | ||
146 | end_of_file = le32_to_cpu(pfindData->DataSize); | ||
147 | tmp_inode->i_atime = CURRENT_TIME; | ||
148 | /* tmp_inode->i_mtime = BB FIXME - add dos time handling | ||
149 | tmp_inode->i_ctime = 0; BB FIXME */ | ||
150 | |||
151 | } | ||
152 | |||
129 | /* Linux can not store file creation time unfortunately so ignore it */ | 153 | /* Linux can not store file creation time unfortunately so ignore it */ |
130 | tmp_inode->i_atime = | 154 | |
131 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); | 155 | cifsInfo->cifsAttrs = attr; |
132 | tmp_inode->i_mtime = | 156 | cifsInfo->time = jiffies; |
133 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); | 157 | |
134 | tmp_inode->i_ctime = | ||
135 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); | ||
136 | /* treat dos attribute of read-only as read-only mode bit e.g. 555? */ | 158 | /* treat dos attribute of read-only as read-only mode bit e.g. 555? */ |
137 | /* 2767 perms - indicate mandatory locking */ | 159 | /* 2767 perms - indicate mandatory locking */ |
138 | /* BB fill in uid and gid here? with help from winbind? | 160 | /* BB fill in uid and gid here? with help from winbind? |
@@ -215,11 +237,13 @@ static void fill_in_inode(struct inode *tmp_inode, | |||
215 | else | 237 | else |
216 | tmp_inode->i_fop = &cifs_file_ops; | 238 | tmp_inode->i_fop = &cifs_file_ops; |
217 | 239 | ||
218 | tmp_inode->i_data.a_ops = &cifs_addr_ops; | ||
219 | if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && | 240 | if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && |
220 | (cifs_sb->tcon->ses->server->maxBuf < | 241 | (cifs_sb->tcon->ses->server->maxBuf < |
221 | 4096 + MAX_CIFS_HDR_SIZE)) | 242 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)) |
222 | tmp_inode->i_data.a_ops->readpages = NULL; | 243 | tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf; |
244 | else | ||
245 | tmp_inode->i_data.a_ops = &cifs_addr_ops; | ||
246 | |||
223 | if(isNewInode) | 247 | if(isNewInode) |
224 | return; /* No sense invalidating pages for new inode | 248 | return; /* No sense invalidating pages for new inode |
225 | since have not started caching readahead file | 249 | since have not started caching readahead file |
@@ -338,11 +362,12 @@ static void unix_fill_in_inode(struct inode *tmp_inode, | |||
338 | else | 362 | else |
339 | tmp_inode->i_fop = &cifs_file_ops; | 363 | tmp_inode->i_fop = &cifs_file_ops; |
340 | 364 | ||
341 | tmp_inode->i_data.a_ops = &cifs_addr_ops; | ||
342 | if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && | 365 | if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && |
343 | (cifs_sb->tcon->ses->server->maxBuf < | 366 | (cifs_sb->tcon->ses->server->maxBuf < |
344 | 4096 + MAX_CIFS_HDR_SIZE)) | 367 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)) |
345 | tmp_inode->i_data.a_ops->readpages = NULL; | 368 | tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf; |
369 | else | ||
370 | tmp_inode->i_data.a_ops = &cifs_addr_ops; | ||
346 | 371 | ||
347 | if(isNewInode) | 372 | if(isNewInode) |
348 | return; /* No sense invalidating pages for new inode since we | 373 | return; /* No sense invalidating pages for new inode since we |
@@ -415,7 +440,10 @@ static int initiate_cifs_search(const int xid, struct file *file) | |||
415 | ffirst_retry: | 440 | ffirst_retry: |
416 | /* test for Unix extensions */ | 441 | /* test for Unix extensions */ |
417 | if (pTcon->ses->capabilities & CAP_UNIX) { | 442 | if (pTcon->ses->capabilities & CAP_UNIX) { |
418 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; | 443 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; |
444 | } else if ((pTcon->ses->capabilities & | ||
445 | (CAP_NT_SMBS | CAP_NT_FIND)) == 0) { | ||
446 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; | ||
419 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 447 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
420 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; | 448 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; |
421 | } else /* not srvinos - BB fixme add check for backlevel? */ { | 449 | } else /* not srvinos - BB fixme add check for backlevel? */ { |
@@ -451,12 +479,19 @@ static int cifs_unicode_bytelen(char *str) | |||
451 | return len << 1; | 479 | return len << 1; |
452 | } | 480 | } |
453 | 481 | ||
454 | static char *nxt_dir_entry(char *old_entry, char *end_of_smb) | 482 | static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) |
455 | { | 483 | { |
456 | char * new_entry; | 484 | char * new_entry; |
457 | FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; | 485 | FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; |
458 | 486 | ||
459 | new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); | 487 | if(level == SMB_FIND_FILE_INFO_STANDARD) { |
488 | FIND_FILE_STANDARD_INFO * pfData; | ||
489 | pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; | ||
490 | |||
491 | new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + | ||
492 | pfData->FileNameLength; | ||
493 | } else | ||
494 | new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); | ||
460 | cFYI(1,("new entry %p old entry %p",new_entry,old_entry)); | 495 | cFYI(1,("new entry %p old entry %p",new_entry,old_entry)); |
461 | /* validate that new_entry is not past end of SMB */ | 496 | /* validate that new_entry is not past end of SMB */ |
462 | if(new_entry >= end_of_smb) { | 497 | if(new_entry >= end_of_smb) { |
@@ -464,7 +499,10 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb) | |||
464 | ("search entry %p began after end of SMB %p old entry %p", | 499 | ("search entry %p began after end of SMB %p old entry %p", |
465 | new_entry, end_of_smb, old_entry)); | 500 | new_entry, end_of_smb, old_entry)); |
466 | return NULL; | 501 | return NULL; |
467 | } else if (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb) { | 502 | } else if(((level == SMB_FIND_FILE_INFO_STANDARD) && |
503 | (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) || | ||
504 | ((level != SMB_FIND_FILE_INFO_STANDARD) && | ||
505 | (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { | ||
468 | cERROR(1,("search entry %p extends after end of SMB %p", | 506 | cERROR(1,("search entry %p extends after end of SMB %p", |
469 | new_entry, end_of_smb)); | 507 | new_entry, end_of_smb)); |
470 | return NULL; | 508 | return NULL; |
@@ -482,7 +520,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
482 | char * filename = NULL; | 520 | char * filename = NULL; |
483 | int len = 0; | 521 | int len = 0; |
484 | 522 | ||
485 | if(cfile->srch_inf.info_level == 0x202) { | 523 | if(cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) { |
486 | FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry; | 524 | FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry; |
487 | filename = &pFindData->FileName[0]; | 525 | filename = &pFindData->FileName[0]; |
488 | if(cfile->srch_inf.unicode) { | 526 | if(cfile->srch_inf.unicode) { |
@@ -491,26 +529,34 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
491 | /* BB should we make this strnlen of PATH_MAX? */ | 529 | /* BB should we make this strnlen of PATH_MAX? */ |
492 | len = strnlen(filename, 5); | 530 | len = strnlen(filename, 5); |
493 | } | 531 | } |
494 | } else if(cfile->srch_inf.info_level == 0x101) { | 532 | } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) { |
495 | FILE_DIRECTORY_INFO * pFindData = | 533 | FILE_DIRECTORY_INFO * pFindData = |
496 | (FILE_DIRECTORY_INFO *)current_entry; | 534 | (FILE_DIRECTORY_INFO *)current_entry; |
497 | filename = &pFindData->FileName[0]; | 535 | filename = &pFindData->FileName[0]; |
498 | len = le32_to_cpu(pFindData->FileNameLength); | 536 | len = le32_to_cpu(pFindData->FileNameLength); |
499 | } else if(cfile->srch_inf.info_level == 0x102) { | 537 | } else if(cfile->srch_inf.info_level == |
538 | SMB_FIND_FILE_FULL_DIRECTORY_INFO) { | ||
500 | FILE_FULL_DIRECTORY_INFO * pFindData = | 539 | FILE_FULL_DIRECTORY_INFO * pFindData = |
501 | (FILE_FULL_DIRECTORY_INFO *)current_entry; | 540 | (FILE_FULL_DIRECTORY_INFO *)current_entry; |
502 | filename = &pFindData->FileName[0]; | 541 | filename = &pFindData->FileName[0]; |
503 | len = le32_to_cpu(pFindData->FileNameLength); | 542 | len = le32_to_cpu(pFindData->FileNameLength); |
504 | } else if(cfile->srch_inf.info_level == 0x105) { | 543 | } else if(cfile->srch_inf.info_level == |
544 | SMB_FIND_FILE_ID_FULL_DIR_INFO) { | ||
505 | SEARCH_ID_FULL_DIR_INFO * pFindData = | 545 | SEARCH_ID_FULL_DIR_INFO * pFindData = |
506 | (SEARCH_ID_FULL_DIR_INFO *)current_entry; | 546 | (SEARCH_ID_FULL_DIR_INFO *)current_entry; |
507 | filename = &pFindData->FileName[0]; | 547 | filename = &pFindData->FileName[0]; |
508 | len = le32_to_cpu(pFindData->FileNameLength); | 548 | len = le32_to_cpu(pFindData->FileNameLength); |
509 | } else if(cfile->srch_inf.info_level == 0x104) { | 549 | } else if(cfile->srch_inf.info_level == |
550 | SMB_FIND_FILE_BOTH_DIRECTORY_INFO) { | ||
510 | FILE_BOTH_DIRECTORY_INFO * pFindData = | 551 | FILE_BOTH_DIRECTORY_INFO * pFindData = |
511 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; | 552 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; |
512 | filename = &pFindData->FileName[0]; | 553 | filename = &pFindData->FileName[0]; |
513 | len = le32_to_cpu(pFindData->FileNameLength); | 554 | len = le32_to_cpu(pFindData->FileNameLength); |
555 | } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) { | ||
556 | FIND_FILE_STANDARD_INFO * pFindData = | ||
557 | (FIND_FILE_STANDARD_INFO *)current_entry; | ||
558 | filename = &pFindData->FileName[0]; | ||
559 | len = le32_to_cpu(pFindData->FileNameLength); | ||
514 | } else { | 560 | } else { |
515 | cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); | 561 | cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); |
516 | } | 562 | } |
@@ -597,7 +643,9 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
597 | . and .. for the root of a drive and for those we need | 643 | . and .. for the root of a drive and for those we need |
598 | to start two entries earlier */ | 644 | to start two entries earlier */ |
599 | 645 | ||
600 | /* dump_cifs_file_struct(file, "In fce ");*/ | 646 | #ifdef CONFIG_CIFS_DEBUG2 |
647 | dump_cifs_file_struct(file, "In fce "); | ||
648 | #endif | ||
601 | if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) && | 649 | if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) && |
602 | is_dir_changed(file)) || | 650 | is_dir_changed(file)) || |
603 | (index_to_find < first_entry_in_buffer)) { | 651 | (index_to_find < first_entry_in_buffer)) { |
@@ -644,10 +692,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
644 | first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry | 692 | first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry |
645 | - cifsFile->srch_inf.entries_in_buffer; | 693 | - cifsFile->srch_inf.entries_in_buffer; |
646 | pos_in_buf = index_to_find - first_entry_in_buffer; | 694 | pos_in_buf = index_to_find - first_entry_in_buffer; |
647 | cFYI(1,("found entry - pos_in_buf %d",pos_in_buf)); | 695 | cFYI(1,("found entry - pos_in_buf %d",pos_in_buf)); |
696 | |||
648 | for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) { | 697 | for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) { |
649 | /* go entry by entry figuring out which is first */ | 698 | /* go entry by entry figuring out which is first */ |
650 | current_entry = nxt_dir_entry(current_entry,end_of_smb); | 699 | current_entry = nxt_dir_entry(current_entry,end_of_smb, |
700 | cifsFile->srch_inf.info_level); | ||
651 | } | 701 | } |
652 | if((current_entry == NULL) && (i < pos_in_buf)) { | 702 | if((current_entry == NULL) && (i < pos_in_buf)) { |
653 | /* BB fixme - check if we should flag this error */ | 703 | /* BB fixme - check if we should flag this error */ |
@@ -674,7 +724,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
674 | /* inode num, inode type and filename returned */ | 724 | /* inode num, inode type and filename returned */ |
675 | static int cifs_get_name_from_search_buf(struct qstr *pqst, | 725 | static int cifs_get_name_from_search_buf(struct qstr *pqst, |
676 | char *current_entry, __u16 level, unsigned int unicode, | 726 | char *current_entry, __u16 level, unsigned int unicode, |
677 | struct cifs_sb_info * cifs_sb, ino_t *pinum) | 727 | struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum) |
678 | { | 728 | { |
679 | int rc = 0; | 729 | int rc = 0; |
680 | unsigned int len = 0; | 730 | unsigned int len = 0; |
@@ -718,10 +768,22 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, | |||
718 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; | 768 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; |
719 | filename = &pFindData->FileName[0]; | 769 | filename = &pFindData->FileName[0]; |
720 | len = le32_to_cpu(pFindData->FileNameLength); | 770 | len = le32_to_cpu(pFindData->FileNameLength); |
771 | } else if(level == SMB_FIND_FILE_INFO_STANDARD) { | ||
772 | FIND_FILE_STANDARD_INFO * pFindData = | ||
773 | (FIND_FILE_STANDARD_INFO *)current_entry; | ||
774 | filename = &pFindData->FileName[0]; | ||
775 | /* one byte length, no name conversion */ | ||
776 | len = (unsigned int)pFindData->FileNameLength; | ||
721 | } else { | 777 | } else { |
722 | cFYI(1,("Unknown findfirst level %d",level)); | 778 | cFYI(1,("Unknown findfirst level %d",level)); |
723 | return -EINVAL; | 779 | return -EINVAL; |
724 | } | 780 | } |
781 | |||
782 | if(len > max_len) { | ||
783 | cERROR(1,("bad search response length %d past smb end", len)); | ||
784 | return -EINVAL; | ||
785 | } | ||
786 | |||
725 | if(unicode) { | 787 | if(unicode) { |
726 | /* BB fixme - test with long names */ | 788 | /* BB fixme - test with long names */ |
727 | /* Note converted filename can be longer than in unicode */ | 789 | /* Note converted filename can be longer than in unicode */ |
@@ -741,7 +803,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, | |||
741 | } | 803 | } |
742 | 804 | ||
743 | static int cifs_filldir(char *pfindEntry, struct file *file, | 805 | static int cifs_filldir(char *pfindEntry, struct file *file, |
744 | filldir_t filldir, void *direntry, char *scratch_buf) | 806 | filldir_t filldir, void *direntry, char *scratch_buf, int max_len) |
745 | { | 807 | { |
746 | int rc = 0; | 808 | int rc = 0; |
747 | struct qstr qstring; | 809 | struct qstr qstring; |
@@ -777,6 +839,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, | |||
777 | rc = cifs_get_name_from_search_buf(&qstring,pfindEntry, | 839 | rc = cifs_get_name_from_search_buf(&qstring,pfindEntry, |
778 | pCifsF->srch_inf.info_level, | 840 | pCifsF->srch_inf.info_level, |
779 | pCifsF->srch_inf.unicode,cifs_sb, | 841 | pCifsF->srch_inf.unicode,cifs_sb, |
842 | max_len, | ||
780 | &inum /* returned */); | 843 | &inum /* returned */); |
781 | 844 | ||
782 | if(rc) | 845 | if(rc) |
@@ -798,13 +861,16 @@ static int cifs_filldir(char *pfindEntry, struct file *file, | |||
798 | /* we pass in rc below, indicating whether it is a new inode, | 861 | /* we pass in rc below, indicating whether it is a new inode, |
799 | so we can figure out whether to invalidate the inode cached | 862 | so we can figure out whether to invalidate the inode cached |
800 | data if the file has changed */ | 863 | data if the file has changed */ |
801 | if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) { | 864 | if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) |
802 | unix_fill_in_inode(tmp_inode, | 865 | unix_fill_in_inode(tmp_inode, |
803 | (FILE_UNIX_INFO *)pfindEntry,&obj_type, rc); | 866 | (FILE_UNIX_INFO *)pfindEntry, |
804 | } else { | 867 | &obj_type, rc); |
805 | fill_in_inode(tmp_inode, | 868 | else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) |
806 | (FILE_DIRECTORY_INFO *)pfindEntry,&obj_type, rc); | 869 | fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */, |
807 | } | 870 | pfindEntry, &obj_type, rc); |
871 | else | ||
872 | fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc); | ||
873 | |||
808 | 874 | ||
809 | rc = filldir(direntry,qstring.name,qstring.len,file->f_pos, | 875 | rc = filldir(direntry,qstring.name,qstring.len,file->f_pos, |
810 | tmp_inode->i_ino,obj_type); | 876 | tmp_inode->i_ino,obj_type); |
@@ -864,6 +930,12 @@ static int cifs_save_resume_key(const char *current_entry, | |||
864 | filename = &pFindData->FileName[0]; | 930 | filename = &pFindData->FileName[0]; |
865 | len = le32_to_cpu(pFindData->FileNameLength); | 931 | len = le32_to_cpu(pFindData->FileNameLength); |
866 | cifsFile->srch_inf.resume_key = pFindData->FileIndex; | 932 | cifsFile->srch_inf.resume_key = pFindData->FileIndex; |
933 | } else if(level == SMB_FIND_FILE_INFO_STANDARD) { | ||
934 | FIND_FILE_STANDARD_INFO * pFindData = | ||
935 | (FIND_FILE_STANDARD_INFO *)current_entry; | ||
936 | filename = &pFindData->FileName[0]; | ||
937 | /* one byte length, no name conversion */ | ||
938 | len = (unsigned int)pFindData->FileNameLength; | ||
867 | } else { | 939 | } else { |
868 | cFYI(1,("Unknown findfirst level %d",level)); | 940 | cFYI(1,("Unknown findfirst level %d",level)); |
869 | return -EINVAL; | 941 | return -EINVAL; |
@@ -884,6 +956,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
884 | int num_to_fill = 0; | 956 | int num_to_fill = 0; |
885 | char * tmp_buf = NULL; | 957 | char * tmp_buf = NULL; |
886 | char * end_of_smb; | 958 | char * end_of_smb; |
959 | int max_len; | ||
887 | 960 | ||
888 | xid = GetXid(); | 961 | xid = GetXid(); |
889 | 962 | ||
@@ -909,7 +982,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
909 | case 1: | 982 | case 1: |
910 | if (filldir(direntry, "..", 2, file->f_pos, | 983 | if (filldir(direntry, "..", 2, file->f_pos, |
911 | file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { | 984 | file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { |
912 | cERROR(1, ("Filldir for parent dir failed ")); | 985 | cERROR(1, ("Filldir for parent dir failed")); |
913 | rc = -ENOMEM; | 986 | rc = -ENOMEM; |
914 | break; | 987 | break; |
915 | } | 988 | } |
@@ -959,10 +1032,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
959 | goto rddir2_exit; | 1032 | goto rddir2_exit; |
960 | } | 1033 | } |
961 | cFYI(1,("loop through %d times filling dir for net buf %p", | 1034 | cFYI(1,("loop through %d times filling dir for net buf %p", |
962 | num_to_fill,cifsFile->srch_inf.ntwrk_buf_start)); | 1035 | num_to_fill,cifsFile->srch_inf.ntwrk_buf_start)); |
963 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + | 1036 | max_len = smbCalcSize((struct smb_hdr *) |
964 | smbCalcSize((struct smb_hdr *) | 1037 | cifsFile->srch_inf.ntwrk_buf_start); |
965 | cifsFile->srch_inf.ntwrk_buf_start); | 1038 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; |
1039 | |||
966 | /* To be safe - for UCS to UTF-8 with strings loaded | 1040 | /* To be safe - for UCS to UTF-8 with strings loaded |
967 | with the rare long characters alloc more to account for | 1041 | with the rare long characters alloc more to account for |
968 | such multibyte target UTF-8 characters. cifs_unicode.c, | 1042 | such multibyte target UTF-8 characters. cifs_unicode.c, |
@@ -977,17 +1051,19 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
977 | } | 1051 | } |
978 | /* if buggy server returns . and .. late do | 1052 | /* if buggy server returns . and .. late do |
979 | we want to check for that here? */ | 1053 | we want to check for that here? */ |
980 | rc = cifs_filldir(current_entry, file, | 1054 | rc = cifs_filldir(current_entry, file, |
981 | filldir, direntry,tmp_buf); | 1055 | filldir, direntry, tmp_buf, max_len); |
982 | file->f_pos++; | 1056 | file->f_pos++; |
983 | if(file->f_pos == cifsFile->srch_inf.index_of_last_entry) { | 1057 | if(file->f_pos == |
1058 | cifsFile->srch_inf.index_of_last_entry) { | ||
984 | cFYI(1,("last entry in buf at pos %lld %s", | 1059 | cFYI(1,("last entry in buf at pos %lld %s", |
985 | file->f_pos,tmp_buf)); /* BB removeme BB */ | 1060 | file->f_pos,tmp_buf)); |
986 | cifs_save_resume_key(current_entry,cifsFile); | 1061 | cifs_save_resume_key(current_entry,cifsFile); |
987 | break; | 1062 | break; |
988 | } else | 1063 | } else |
989 | current_entry = nxt_dir_entry(current_entry, | 1064 | current_entry = |
990 | end_of_smb); | 1065 | nxt_dir_entry(current_entry, end_of_smb, |
1066 | cifsFile->srch_inf.info_level); | ||
991 | } | 1067 | } |
992 | kfree(tmp_buf); | 1068 | kfree(tmp_buf); |
993 | break; | 1069 | break; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c new file mode 100644 index 000000000000..7202d534ef0b --- /dev/null +++ b/fs/cifs/sess.c | |||
@@ -0,0 +1,538 @@ | |||
1 | /* | ||
2 | * fs/cifs/sess.c | ||
3 | * | ||
4 | * SMB/CIFS session setup handling routines | ||
5 | * | ||
6 | * Copyright (c) International Business Machines Corp., 2006 | ||
7 | * Author(s): Steve French (sfrench@us.ibm.com) | ||
8 | * | ||
9 | * This library is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU Lesser General Public License as published | ||
11 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This library is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
17 | * the GNU Lesser General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU Lesser General Public License | ||
20 | * along with this library; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #include "cifspdu.h" | ||
25 | #include "cifsglob.h" | ||
26 | #include "cifsproto.h" | ||
27 | #include "cifs_unicode.h" | ||
28 | #include "cifs_debug.h" | ||
29 | #include "ntlmssp.h" | ||
30 | #include "nterr.h" | ||
31 | #include <linux/utsname.h> | ||
32 | |||
33 | extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, | ||
34 | unsigned char *p24); | ||
35 | |||
36 | static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB) | ||
37 | { | ||
38 | __u32 capabilities = 0; | ||
39 | |||
40 | /* init fields common to all four types of SessSetup */ | ||
41 | /* note that header is initialized to zero in header_assemble */ | ||
42 | pSMB->req.AndXCommand = 0xFF; | ||
43 | pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); | ||
44 | pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); | ||
45 | |||
46 | /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ | ||
47 | |||
48 | /* BB verify whether signing required on neg or just on auth frame | ||
49 | (and NTLM case) */ | ||
50 | |||
51 | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | | ||
52 | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; | ||
53 | |||
54 | if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | ||
55 | pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | ||
56 | |||
57 | if (ses->capabilities & CAP_UNICODE) { | ||
58 | pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE; | ||
59 | capabilities |= CAP_UNICODE; | ||
60 | } | ||
61 | if (ses->capabilities & CAP_STATUS32) { | ||
62 | pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS; | ||
63 | capabilities |= CAP_STATUS32; | ||
64 | } | ||
65 | if (ses->capabilities & CAP_DFS) { | ||
66 | pSMB->req.hdr.Flags2 |= SMBFLG2_DFS; | ||
67 | capabilities |= CAP_DFS; | ||
68 | } | ||
69 | if (ses->capabilities & CAP_UNIX) { | ||
70 | capabilities |= CAP_UNIX; | ||
71 | } | ||
72 | |||
73 | /* BB check whether to init vcnum BB */ | ||
74 | return capabilities; | ||
75 | } | ||
76 | |||
77 | static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses, | ||
78 | const struct nls_table * nls_cp) | ||
79 | { | ||
80 | char * bcc_ptr = *pbcc_area; | ||
81 | int bytes_ret = 0; | ||
82 | |||
83 | /* BB FIXME add check that strings total less | ||
84 | than 335 or will need to send them as arrays */ | ||
85 | |||
86 | /* unicode strings, must be word aligned before the call */ | ||
87 | /* if ((long) bcc_ptr % 2) { | ||
88 | *bcc_ptr = 0; | ||
89 | bcc_ptr++; | ||
90 | } */ | ||
91 | /* copy user */ | ||
92 | if(ses->userName == NULL) { | ||
93 | /* BB what about null user mounts - check that we do this BB */ | ||
94 | } else { /* 300 should be long enough for any conceivable user name */ | ||
95 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, | ||
96 | 300, nls_cp); | ||
97 | } | ||
98 | bcc_ptr += 2 * bytes_ret; | ||
99 | bcc_ptr += 2; /* account for null termination */ | ||
100 | /* copy domain */ | ||
101 | if(ses->domainName == NULL) | ||
102 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, | ||
103 | "CIFS_LINUX_DOM", 32, nls_cp); | ||
104 | else | ||
105 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName, | ||
106 | 256, nls_cp); | ||
107 | bcc_ptr += 2 * bytes_ret; | ||
108 | bcc_ptr += 2; /* account for null terminator */ | ||
109 | |||
110 | /* Copy OS version */ | ||
111 | bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32, | ||
112 | nls_cp); | ||
113 | bcc_ptr += 2 * bytes_ret; | ||
114 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release, | ||
115 | 32, nls_cp); | ||
116 | bcc_ptr += 2 * bytes_ret; | ||
117 | bcc_ptr += 2; /* trailing null */ | ||
118 | |||
119 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, | ||
120 | 32, nls_cp); | ||
121 | bcc_ptr += 2 * bytes_ret; | ||
122 | bcc_ptr += 2; /* trailing null */ | ||
123 | |||
124 | *pbcc_area = bcc_ptr; | ||
125 | } | ||
126 | |||
127 | static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses, | ||
128 | const struct nls_table * nls_cp) | ||
129 | { | ||
130 | char * bcc_ptr = *pbcc_area; | ||
131 | |||
132 | /* copy user */ | ||
133 | /* BB what about null user mounts - check that we do this BB */ | ||
134 | /* copy user */ | ||
135 | if(ses->userName == NULL) { | ||
136 | /* BB what about null user mounts - check that we do this BB */ | ||
137 | } else { /* 300 should be long enough for any conceivable user name */ | ||
138 | strncpy(bcc_ptr, ses->userName, 300); | ||
139 | } | ||
140 | /* BB improve check for overflow */ | ||
141 | bcc_ptr += strnlen(ses->userName, 300); | ||
142 | *bcc_ptr = 0; | ||
143 | bcc_ptr++; /* account for null termination */ | ||
144 | |||
145 | /* copy domain */ | ||
146 | |||
147 | if(ses->domainName == NULL) { | ||
148 | strcpy(bcc_ptr, "CIFS_LINUX_DOM"); | ||
149 | bcc_ptr += 14; /* strlen(CIFS_LINUX_DOM) */ | ||
150 | } else { | ||
151 | strncpy(bcc_ptr, ses->domainName, 256); | ||
152 | bcc_ptr += strnlen(ses->domainName, 256); | ||
153 | } | ||
154 | *bcc_ptr = 0; | ||
155 | bcc_ptr++; | ||
156 | |||
157 | /* BB check for overflow here */ | ||
158 | |||
159 | strcpy(bcc_ptr, "Linux version "); | ||
160 | bcc_ptr += strlen("Linux version "); | ||
161 | strcpy(bcc_ptr, system_utsname.release); | ||
162 | bcc_ptr += strlen(system_utsname.release) + 1; | ||
163 | |||
164 | strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); | ||
165 | bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; | ||
166 | |||
167 | *pbcc_area = bcc_ptr; | ||
168 | } | ||
169 | |||
170 | static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses, | ||
171 | const struct nls_table * nls_cp) | ||
172 | { | ||
173 | int rc = 0; | ||
174 | int words_left, len; | ||
175 | char * data = *pbcc_area; | ||
176 | |||
177 | |||
178 | |||
179 | cFYI(1,("bleft %d",bleft)); | ||
180 | |||
181 | |||
182 | /* word align, if bytes remaining is not even */ | ||
183 | if(bleft % 2) { | ||
184 | bleft--; | ||
185 | data++; | ||
186 | } | ||
187 | words_left = bleft / 2; | ||
188 | |||
189 | /* save off server operating system */ | ||
190 | len = UniStrnlen((wchar_t *) data, words_left); | ||
191 | |||
192 | /* We look for obvious messed up bcc or strings in response so we do not go off | ||
193 | the end since (at least) WIN2K and Windows XP have a major bug in not null | ||
194 | terminating last Unicode string in response */ | ||
195 | if(len >= words_left) | ||
196 | return rc; | ||
197 | |||
198 | if(ses->serverOS) | ||
199 | kfree(ses->serverOS); | ||
200 | /* UTF-8 string will not grow more than four times as big as UCS-16 */ | ||
201 | ses->serverOS = kzalloc(4 * len, GFP_KERNEL); | ||
202 | if(ses->serverOS != NULL) { | ||
203 | cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len, | ||
204 | nls_cp); | ||
205 | } | ||
206 | data += 2 * (len + 1); | ||
207 | words_left -= len + 1; | ||
208 | |||
209 | /* save off server network operating system */ | ||
210 | len = UniStrnlen((wchar_t *) data, words_left); | ||
211 | |||
212 | if(len >= words_left) | ||
213 | return rc; | ||
214 | |||
215 | if(ses->serverNOS) | ||
216 | kfree(ses->serverNOS); | ||
217 | ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */ | ||
218 | if(ses->serverNOS != NULL) { | ||
219 | cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len, | ||
220 | nls_cp); | ||
221 | if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) { | ||
222 | cFYI(1,("NT4 server")); | ||
223 | ses->flags |= CIFS_SES_NT4; | ||
224 | } | ||
225 | } | ||
226 | data += 2 * (len + 1); | ||
227 | words_left -= len + 1; | ||
228 | |||
229 | /* save off server domain */ | ||
230 | len = UniStrnlen((wchar_t *) data, words_left); | ||
231 | |||
232 | if(len > words_left) | ||
233 | return rc; | ||
234 | |||
235 | if(ses->serverDomain) | ||
236 | kfree(ses->serverDomain); | ||
237 | ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */ | ||
238 | if(ses->serverDomain != NULL) { | ||
239 | cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len, | ||
240 | nls_cp); | ||
241 | ses->serverDomain[2*len] = 0; | ||
242 | ses->serverDomain[(2*len) + 1] = 0; | ||
243 | } | ||
244 | data += 2 * (len + 1); | ||
245 | words_left -= len + 1; | ||
246 | |||
247 | cFYI(1,("words left: %d",words_left)); | ||
248 | |||
249 | return rc; | ||
250 | } | ||
251 | |||
252 | static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses, | ||
253 | const struct nls_table * nls_cp) | ||
254 | { | ||
255 | int rc = 0; | ||
256 | int len; | ||
257 | char * bcc_ptr = *pbcc_area; | ||
258 | |||
259 | cFYI(1,("decode sessetup ascii. bleft %d", bleft)); | ||
260 | |||
261 | len = strnlen(bcc_ptr, bleft); | ||
262 | if(len >= bleft) | ||
263 | return rc; | ||
264 | |||
265 | if(ses->serverOS) | ||
266 | kfree(ses->serverOS); | ||
267 | |||
268 | ses->serverOS = kzalloc(len + 1, GFP_KERNEL); | ||
269 | if(ses->serverOS) | ||
270 | strncpy(ses->serverOS, bcc_ptr, len); | ||
271 | |||
272 | bcc_ptr += len + 1; | ||
273 | bleft -= len + 1; | ||
274 | |||
275 | len = strnlen(bcc_ptr, bleft); | ||
276 | if(len >= bleft) | ||
277 | return rc; | ||
278 | |||
279 | if(ses->serverNOS) | ||
280 | kfree(ses->serverNOS); | ||
281 | |||
282 | ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); | ||
283 | if(ses->serverNOS) | ||
284 | strncpy(ses->serverNOS, bcc_ptr, len); | ||
285 | |||
286 | bcc_ptr += len + 1; | ||
287 | bleft -= len + 1; | ||
288 | |||
289 | len = strnlen(bcc_ptr, bleft); | ||
290 | if(len > bleft) | ||
291 | return rc; | ||
292 | |||
293 | if(ses->serverDomain) | ||
294 | kfree(ses->serverDomain); | ||
295 | |||
296 | ses->serverDomain = kzalloc(len + 1, GFP_KERNEL); | ||
297 | if(ses->serverOS) | ||
298 | strncpy(ses->serverOS, bcc_ptr, len); | ||
299 | |||
300 | bcc_ptr += len + 1; | ||
301 | bleft -= len + 1; | ||
302 | |||
303 | cFYI(1,("ascii: bytes left %d",bleft)); | ||
304 | |||
305 | return rc; | ||
306 | } | ||
307 | |||
308 | int | ||
309 | CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, | ||
310 | const struct nls_table *nls_cp) | ||
311 | { | ||
312 | int rc = 0; | ||
313 | int wct; | ||
314 | struct smb_hdr *smb_buf; | ||
315 | char *bcc_ptr; | ||
316 | char *str_area; | ||
317 | SESSION_SETUP_ANDX *pSMB; | ||
318 | __u32 capabilities; | ||
319 | int count; | ||
320 | int resp_buf_type = 0; | ||
321 | struct kvec iov[2]; | ||
322 | enum securityEnum type; | ||
323 | __u16 action; | ||
324 | int bytes_remaining; | ||
325 | |||
326 | if(ses == NULL) | ||
327 | return -EINVAL; | ||
328 | |||
329 | type = ses->server->secType; | ||
330 | |||
331 | cFYI(1,("sess setup type %d",type)); | ||
332 | if(type == LANMAN) { | ||
333 | #ifndef CONFIG_CIFS_WEAK_PW_HASH | ||
334 | /* LANMAN and plaintext are less secure and off by default. | ||
335 | So we make this explicitly be turned on in kconfig (in the | ||
336 | build) and turned on at runtime (changed from the default) | ||
337 | in proc/fs/cifs or via mount parm. Unfortunately this is | ||
338 | needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ | ||
339 | return -EOPNOTSUPP; | ||
340 | #endif | ||
341 | wct = 10; /* lanman 2 style sessionsetup */ | ||
342 | } else if((type == NTLM) || (type == NTLMv2)) { | ||
343 | /* For NTLMv2 failures eventually may need to retry NTLM */ | ||
344 | wct = 13; /* old style NTLM sessionsetup */ | ||
345 | } else /* same size for negotiate or auth, NTLMSSP or extended security */ | ||
346 | wct = 12; | ||
347 | |||
348 | rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, | ||
349 | (void **)&smb_buf); | ||
350 | if(rc) | ||
351 | return rc; | ||
352 | |||
353 | pSMB = (SESSION_SETUP_ANDX *)smb_buf; | ||
354 | |||
355 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
356 | |||
357 | /* we will send the SMB in two pieces, | ||
358 | a fixed length beginning part, and a | ||
359 | second part which will include the strings | ||
360 | and rest of bcc area, in order to avoid having | ||
361 | to do a large buffer 17K allocation */ | ||
362 | iov[0].iov_base = (char *)pSMB; | ||
363 | iov[0].iov_len = smb_buf->smb_buf_length + 4; | ||
364 | |||
365 | /* 2000 big enough to fit max user, domain, NOS name etc. */ | ||
366 | str_area = kmalloc(2000, GFP_KERNEL); | ||
367 | bcc_ptr = str_area; | ||
368 | |||
369 | if(type == LANMAN) { | ||
370 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
371 | char lnm_session_key[CIFS_SESS_KEY_SIZE]; | ||
372 | |||
373 | /* no capabilities flags in old lanman negotiation */ | ||
374 | |||
375 | pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE; | ||
376 | /* BB calculate hash with password */ | ||
377 | /* and copy into bcc */ | ||
378 | |||
379 | calc_lanman_hash(ses, lnm_session_key); | ||
380 | |||
381 | /* #ifdef CONFIG_CIFS_DEBUG2 | ||
382 | cifs_dump_mem("cryptkey: ",ses->server->cryptKey, | ||
383 | CIFS_SESS_KEY_SIZE); | ||
384 | #endif */ | ||
385 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); | ||
386 | bcc_ptr += CIFS_SESS_KEY_SIZE; | ||
387 | |||
388 | /* can not sign if LANMAN negotiated so no need | ||
389 | to calculate signing key? but what if server | ||
390 | changed to do higher than lanman dialect and | ||
391 | we reconnected would we ever calc signing_key? */ | ||
392 | |||
393 | cFYI(1,("Negotiating LANMAN setting up strings")); | ||
394 | /* Unicode not allowed for LANMAN dialects */ | ||
395 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
396 | #endif | ||
397 | } else if (type == NTLM) { | ||
398 | char ntlm_session_key[CIFS_SESS_KEY_SIZE]; | ||
399 | |||
400 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
401 | pSMB->req_no_secext.CaseInsensitivePasswordLength = | ||
402 | cpu_to_le16(CIFS_SESS_KEY_SIZE); | ||
403 | pSMB->req_no_secext.CaseSensitivePasswordLength = | ||
404 | cpu_to_le16(CIFS_SESS_KEY_SIZE); | ||
405 | |||
406 | /* calculate session key */ | ||
407 | SMBNTencrypt(ses->password, ses->server->cryptKey, | ||
408 | ntlm_session_key); | ||
409 | |||
410 | if(first_time) /* should this be moved into common code | ||
411 | with similar ntlmv2 path? */ | ||
412 | cifs_calculate_mac_key(ses->server->mac_signing_key, | ||
413 | ntlm_session_key, ses->password); | ||
414 | /* copy session key */ | ||
415 | |||
416 | memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE); | ||
417 | bcc_ptr += CIFS_SESS_KEY_SIZE; | ||
418 | memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE); | ||
419 | bcc_ptr += CIFS_SESS_KEY_SIZE; | ||
420 | if(ses->capabilities & CAP_UNICODE) { | ||
421 | /* unicode strings must be word aligned */ | ||
422 | if (iov[0].iov_len % 2) { | ||
423 | *bcc_ptr = 0; | ||
424 | bcc_ptr++; | ||
425 | } | ||
426 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
427 | } else | ||
428 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
429 | } else if (type == NTLMv2) { | ||
430 | char * v2_sess_key = | ||
431 | kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL); | ||
432 | |||
433 | /* BB FIXME change all users of v2_sess_key to | ||
434 | struct ntlmv2_resp */ | ||
435 | |||
436 | if(v2_sess_key == NULL) { | ||
437 | cifs_small_buf_release(smb_buf); | ||
438 | return -ENOMEM; | ||
439 | } | ||
440 | |||
441 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
442 | |||
443 | /* LM2 password would be here if we supported it */ | ||
444 | pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; | ||
445 | /* cpu_to_le16(LM2_SESS_KEY_SIZE); */ | ||
446 | |||
447 | pSMB->req_no_secext.CaseSensitivePasswordLength = | ||
448 | cpu_to_le16(sizeof(struct ntlmv2_resp)); | ||
449 | |||
450 | /* calculate session key */ | ||
451 | setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); | ||
452 | if(first_time) /* should this be moved into common code | ||
453 | with similar ntlmv2 path? */ | ||
454 | /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key, | ||
455 | response BB FIXME, v2_sess_key); */ | ||
456 | |||
457 | /* copy session key */ | ||
458 | |||
459 | /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE); | ||
460 | bcc_ptr += LM2_SESS_KEY_SIZE; */ | ||
461 | memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp)); | ||
462 | bcc_ptr += sizeof(struct ntlmv2_resp); | ||
463 | kfree(v2_sess_key); | ||
464 | if(ses->capabilities & CAP_UNICODE) { | ||
465 | if(iov[0].iov_len % 2) { | ||
466 | *bcc_ptr = 0; | ||
467 | } bcc_ptr++; | ||
468 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
469 | } else | ||
470 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
471 | } else /* NTLMSSP or SPNEGO */ { | ||
472 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | ||
473 | capabilities |= CAP_EXTENDED_SECURITY; | ||
474 | pSMB->req.Capabilities = cpu_to_le32(capabilities); | ||
475 | /* BB set password lengths */ | ||
476 | } | ||
477 | |||
478 | count = (long) bcc_ptr - (long) str_area; | ||
479 | smb_buf->smb_buf_length += count; | ||
480 | |||
481 | BCC_LE(smb_buf) = cpu_to_le16(count); | ||
482 | |||
483 | iov[1].iov_base = str_area; | ||
484 | iov[1].iov_len = count; | ||
485 | rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0); | ||
486 | /* SMB request buf freed in SendReceive2 */ | ||
487 | |||
488 | cFYI(1,("ssetup rc from sendrecv2 is %d",rc)); | ||
489 | if(rc) | ||
490 | goto ssetup_exit; | ||
491 | |||
492 | pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base; | ||
493 | smb_buf = (struct smb_hdr *)iov[0].iov_base; | ||
494 | |||
495 | if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) { | ||
496 | rc = -EIO; | ||
497 | cERROR(1,("bad word count %d", smb_buf->WordCount)); | ||
498 | goto ssetup_exit; | ||
499 | } | ||
500 | action = le16_to_cpu(pSMB->resp.Action); | ||
501 | if (action & GUEST_LOGIN) | ||
502 | cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */ | ||
503 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | ||
504 | cFYI(1, ("UID = %d ", ses->Suid)); | ||
505 | /* response can have either 3 or 4 word count - Samba sends 3 */ | ||
506 | /* and lanman response is 3 */ | ||
507 | bytes_remaining = BCC(smb_buf); | ||
508 | bcc_ptr = pByteArea(smb_buf); | ||
509 | |||
510 | if(smb_buf->WordCount == 4) { | ||
511 | __u16 blob_len; | ||
512 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); | ||
513 | bcc_ptr += blob_len; | ||
514 | if(blob_len > bytes_remaining) { | ||
515 | cERROR(1,("bad security blob length %d", blob_len)); | ||
516 | rc = -EINVAL; | ||
517 | goto ssetup_exit; | ||
518 | } | ||
519 | bytes_remaining -= blob_len; | ||
520 | } | ||
521 | |||
522 | /* BB check if Unicode and decode strings */ | ||
523 | if(smb_buf->Flags2 & SMBFLG2_UNICODE) | ||
524 | rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining, | ||
525 | ses, nls_cp); | ||
526 | else | ||
527 | rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp); | ||
528 | |||
529 | ssetup_exit: | ||
530 | kfree(str_area); | ||
531 | if(resp_buf_type == CIFS_SMALL_BUFFER) { | ||
532 | cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base)); | ||
533 | cifs_small_buf_release(iov[0].iov_base); | ||
534 | } else if(resp_buf_type == CIFS_LARGE_BUFFER) | ||
535 | cifs_buf_release(iov[0].iov_base); | ||
536 | |||
537 | return rc; | ||
538 | } | ||
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 6103bcdfb16d..f518c5e45035 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | #include "cifs_unicode.h" | 31 | #include "cifs_unicode.h" |
32 | #include "cifspdu.h" | 32 | #include "cifspdu.h" |
33 | #include "cifsglob.h" | ||
33 | #include "md5.h" | 34 | #include "md5.h" |
34 | #include "cifs_debug.h" | 35 | #include "cifs_debug.h" |
35 | #include "cifsencrypt.h" | 36 | #include "cifsencrypt.h" |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 3da80409466c..17ba329e2b3d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -654,8 +654,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
654 | 654 | ||
655 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | 655 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
656 | up(&ses->server->tcpSem); | 656 | up(&ses->server->tcpSem); |
657 | cERROR(1, | 657 | cERROR(1, ("Illegal length, greater than maximum frame, %d", |
658 | ("Illegal length, greater than maximum frame, %d ", | ||
659 | in_buf->smb_buf_length)); | 658 | in_buf->smb_buf_length)); |
660 | DeleteMidQEntry(midQ); | 659 | DeleteMidQEntry(midQ); |
661 | /* If not lock req, update # of requests on wire to server */ | 660 | /* If not lock req, update # of requests on wire to server */ |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 7f96b5cb6781..8c9b28dff119 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/suspend.h> | 34 | #include <linux/suspend.h> |
35 | #include <linux/pagemap.h> | 35 | #include <linux/pagemap.h> |
36 | #include <linux/kthread.h> | 36 | #include <linux/kthread.h> |
37 | #include <linux/poison.h> | ||
37 | #include <linux/proc_fs.h> | 38 | #include <linux/proc_fs.h> |
38 | 39 | ||
39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
@@ -1675,7 +1676,7 @@ static void journal_free_journal_head(struct journal_head *jh) | |||
1675 | { | 1676 | { |
1676 | #ifdef CONFIG_JBD_DEBUG | 1677 | #ifdef CONFIG_JBD_DEBUG |
1677 | atomic_dec(&nr_journal_heads); | 1678 | atomic_dec(&nr_journal_heads); |
1678 | memset(jh, 0x5b, sizeof(*jh)); | 1679 | memset(jh, JBD_POISON_FREE, sizeof(*jh)); |
1679 | #endif | 1680 | #endif |
1680 | kmem_cache_free(journal_head_cache, jh); | 1681 | kmem_cache_free(journal_head_cache, jh); |
1681 | } | 1682 | } |
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 320dd48b834e..9c2077e7e081 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c | |||
@@ -267,6 +267,8 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
267 | } | 267 | } |
268 | 268 | ||
269 | rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); | 269 | rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); |
270 | if (!value && rc == -ENODATA) | ||
271 | rc = 0; | ||
270 | if (value) | 272 | if (value) |
271 | kfree(value); | 273 | kfree(value); |
272 | if (!rc) { | 274 | if (!rc) { |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index b8886f048eaa..ad0121088dde 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -225,7 +225,6 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
225 | at the end of the linked list. Stash it and continue | 225 | at the end of the linked list. Stash it and continue |
226 | from the beginning of the list */ | 226 | from the beginning of the list */ |
227 | ic = (struct jffs2_inode_cache *)(*prev); | 227 | ic = (struct jffs2_inode_cache *)(*prev); |
228 | BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE); | ||
229 | prev = &ic->nodes; | 228 | prev = &ic->nodes; |
230 | continue; | 229 | continue; |
231 | } | 230 | } |
@@ -249,7 +248,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
249 | 248 | ||
250 | /* PARANOIA */ | 249 | /* PARANOIA */ |
251 | if (!ic) { | 250 | if (!ic) { |
252 | printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n"); | 251 | JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" |
252 | " not found in remove_node_refs()!!\n"); | ||
253 | return; | 253 | return; |
254 | } | 254 | } |
255 | 255 | ||
@@ -274,8 +274,19 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
274 | printk("\n"); | 274 | printk("\n"); |
275 | }); | 275 | }); |
276 | 276 | ||
277 | if (ic->nodes == (void *)ic && ic->nlink == 0) | 277 | switch (ic->class) { |
278 | jffs2_del_ino_cache(c, ic); | 278 | #ifdef CONFIG_JFFS2_FS_XATTR |
279 | case RAWNODE_CLASS_XATTR_DATUM: | ||
280 | jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | ||
281 | break; | ||
282 | case RAWNODE_CLASS_XATTR_REF: | ||
283 | jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | ||
284 | break; | ||
285 | #endif | ||
286 | default: | ||
287 | if (ic->nodes == (void *)ic && ic->nlink == 0) | ||
288 | jffs2_del_ino_cache(c, ic); | ||
289 | } | ||
279 | } | 290 | } |
280 | 291 | ||
281 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 292 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 2900ec3ec3af..97caa77d60cf 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -227,8 +227,6 @@ void jffs2_clear_inode (struct inode *inode) | |||
227 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 227 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
228 | 228 | ||
229 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 229 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); |
230 | |||
231 | jffs2_xattr_delete_inode(c, f->inocache); | ||
232 | jffs2_do_clear_inode(c, f); | 230 | jffs2_do_clear_inode(c, f); |
233 | } | 231 | } |
234 | 232 | ||
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 477c526d638b..daff3341ff92 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -165,6 +165,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
165 | D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n", | 165 | D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n", |
166 | ic->ino)); | 166 | ic->ino)); |
167 | spin_unlock(&c->inocache_lock); | 167 | spin_unlock(&c->inocache_lock); |
168 | jffs2_xattr_delete_inode(c, ic); | ||
168 | continue; | 169 | continue; |
169 | } | 170 | } |
170 | switch(ic->state) { | 171 | switch(ic->state) { |
@@ -275,13 +276,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
275 | * We can decide whether this node is inode or xattr by ic->class. */ | 276 | * We can decide whether this node is inode or xattr by ic->class. */ |
276 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM | 277 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM |
277 | || ic->class == RAWNODE_CLASS_XATTR_REF) { | 278 | || ic->class == RAWNODE_CLASS_XATTR_REF) { |
278 | BUG_ON(raw->next_in_ino != (void *)ic); | ||
279 | spin_unlock(&c->erase_completion_lock); | 279 | spin_unlock(&c->erase_completion_lock); |
280 | 280 | ||
281 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { | 281 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { |
282 | ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | 282 | ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw); |
283 | } else { | 283 | } else { |
284 | ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | 284 | ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw); |
285 | } | 285 | } |
286 | goto release_sem; | 286 | goto release_sem; |
287 | } | 287 | } |
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index 935fec1b1201..b98594992eed 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h | |||
@@ -119,8 +119,11 @@ struct jffs2_sb_info { | |||
119 | #ifdef CONFIG_JFFS2_FS_XATTR | 119 | #ifdef CONFIG_JFFS2_FS_XATTR |
120 | #define XATTRINDEX_HASHSIZE (57) | 120 | #define XATTRINDEX_HASHSIZE (57) |
121 | uint32_t highest_xid; | 121 | uint32_t highest_xid; |
122 | uint32_t highest_xseqno; | ||
122 | struct list_head xattrindex[XATTRINDEX_HASHSIZE]; | 123 | struct list_head xattrindex[XATTRINDEX_HASHSIZE]; |
123 | struct list_head xattr_unchecked; | 124 | struct list_head xattr_unchecked; |
125 | struct list_head xattr_dead_list; | ||
126 | struct jffs2_xattr_ref *xref_dead_list; | ||
124 | struct jffs2_xattr_ref *xref_temp; | 127 | struct jffs2_xattr_ref *xref_temp; |
125 | struct rw_semaphore xattr_sem; | 128 | struct rw_semaphore xattr_sem; |
126 | uint32_t xdatum_mem_usage; | 129 | uint32_t xdatum_mem_usage; |
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 4889d0700c0e..8310c95478e9 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c | |||
@@ -291,6 +291,7 @@ struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void) | |||
291 | 291 | ||
292 | memset(xd, 0, sizeof(struct jffs2_xattr_datum)); | 292 | memset(xd, 0, sizeof(struct jffs2_xattr_datum)); |
293 | xd->class = RAWNODE_CLASS_XATTR_DATUM; | 293 | xd->class = RAWNODE_CLASS_XATTR_DATUM; |
294 | xd->node = (void *)xd; | ||
294 | INIT_LIST_HEAD(&xd->xindex); | 295 | INIT_LIST_HEAD(&xd->xindex); |
295 | return xd; | 296 | return xd; |
296 | } | 297 | } |
@@ -309,6 +310,7 @@ struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void) | |||
309 | 310 | ||
310 | memset(ref, 0, sizeof(struct jffs2_xattr_ref)); | 311 | memset(ref, 0, sizeof(struct jffs2_xattr_ref)); |
311 | ref->class = RAWNODE_CLASS_XATTR_REF; | 312 | ref->class = RAWNODE_CLASS_XATTR_REF; |
313 | ref->node = (void *)ref; | ||
312 | return ref; | 314 | return ref; |
313 | } | 315 | } |
314 | 316 | ||
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 927dfe42ba76..7675b33396c7 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -906,6 +906,9 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | |||
906 | { | 906 | { |
907 | struct jffs2_inode_cache **prev; | 907 | struct jffs2_inode_cache **prev; |
908 | 908 | ||
909 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
910 | BUG_ON(old->xref); | ||
911 | #endif | ||
909 | dbg_inocache("del %p (ino #%u)\n", old, old->ino); | 912 | dbg_inocache("del %p (ino #%u)\n", old, old->ino); |
910 | spin_lock(&c->inocache_lock); | 913 | spin_lock(&c->inocache_lock); |
911 | 914 | ||
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index ac0c350ed7d7..d88376992ed9 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -683,19 +683,26 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
683 | spin_lock(&c->erase_completion_lock); | 683 | spin_lock(&c->erase_completion_lock); |
684 | 684 | ||
685 | ic = jffs2_raw_ref_to_ic(ref); | 685 | ic = jffs2_raw_ref_to_ic(ref); |
686 | /* It seems we should never call jffs2_mark_node_obsolete() for | ||
687 | XATTR nodes.... yet. Make sure we notice if/when we change | ||
688 | that :) */ | ||
689 | BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE); | ||
690 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) | 686 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) |
691 | ; | 687 | ; |
692 | 688 | ||
693 | *p = ref->next_in_ino; | 689 | *p = ref->next_in_ino; |
694 | ref->next_in_ino = NULL; | 690 | ref->next_in_ino = NULL; |
695 | 691 | ||
696 | if (ic->nodes == (void *)ic && ic->nlink == 0) | 692 | switch (ic->class) { |
697 | jffs2_del_ino_cache(c, ic); | 693 | #ifdef CONFIG_JFFS2_FS_XATTR |
698 | 694 | case RAWNODE_CLASS_XATTR_DATUM: | |
695 | jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | ||
696 | break; | ||
697 | case RAWNODE_CLASS_XATTR_REF: | ||
698 | jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | ||
699 | break; | ||
700 | #endif | ||
701 | default: | ||
702 | if (ic->nodes == (void *)ic && ic->nlink == 0) | ||
703 | jffs2_del_ino_cache(c, ic); | ||
704 | break; | ||
705 | } | ||
699 | spin_unlock(&c->erase_completion_lock); | 706 | spin_unlock(&c->erase_completion_lock); |
700 | } | 707 | } |
701 | 708 | ||
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 5fec012b02ed..cc1899268c43 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | |||
968 | struct jffs2_full_dirent *fd, *fds; | 968 | struct jffs2_full_dirent *fd, *fds; |
969 | int deleted; | 969 | int deleted; |
970 | 970 | ||
971 | jffs2_xattr_delete_inode(c, f->inocache); | ||
971 | down(&f->sem); | 972 | down(&f->sem); |
972 | deleted = f->inocache && !f->inocache->nlink; | 973 | deleted = f->inocache && !f->inocache->nlink; |
973 | 974 | ||
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 61618080b86f..2bfdc33752d3 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -317,20 +317,23 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
317 | struct jffs2_summary *s) | 317 | struct jffs2_summary *s) |
318 | { | 318 | { |
319 | struct jffs2_xattr_datum *xd; | 319 | struct jffs2_xattr_datum *xd; |
320 | uint32_t totlen, crc; | 320 | uint32_t xid, version, totlen, crc; |
321 | int err; | 321 | int err; |
322 | 322 | ||
323 | crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); | 323 | crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); |
324 | if (crc != je32_to_cpu(rx->node_crc)) { | 324 | if (crc != je32_to_cpu(rx->node_crc)) { |
325 | if (je32_to_cpu(rx->node_crc) != 0xffffffff) | 325 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", |
326 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 326 | ofs, je32_to_cpu(rx->node_crc), crc); |
327 | ofs, je32_to_cpu(rx->node_crc), crc); | ||
328 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) | 327 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) |
329 | return err; | 328 | return err; |
330 | return 0; | 329 | return 0; |
331 | } | 330 | } |
332 | 331 | ||
333 | totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len)); | 332 | xid = je32_to_cpu(rx->xid); |
333 | version = je32_to_cpu(rx->version); | ||
334 | |||
335 | totlen = PAD(sizeof(struct jffs2_raw_xattr) | ||
336 | + rx->name_len + 1 + je16_to_cpu(rx->value_len)); | ||
334 | if (totlen != je32_to_cpu(rx->totlen)) { | 337 | if (totlen != je32_to_cpu(rx->totlen)) { |
335 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", | 338 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", |
336 | ofs, je32_to_cpu(rx->totlen), totlen); | 339 | ofs, je32_to_cpu(rx->totlen), totlen); |
@@ -339,22 +342,24 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
339 | return 0; | 342 | return 0; |
340 | } | 343 | } |
341 | 344 | ||
342 | xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version)); | 345 | xd = jffs2_setup_xattr_datum(c, xid, version); |
343 | if (IS_ERR(xd)) { | 346 | if (IS_ERR(xd)) |
344 | if (PTR_ERR(xd) == -EEXIST) { | ||
345 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen))))) | ||
346 | return err; | ||
347 | return 0; | ||
348 | } | ||
349 | return PTR_ERR(xd); | 347 | return PTR_ERR(xd); |
350 | } | ||
351 | xd->xprefix = rx->xprefix; | ||
352 | xd->name_len = rx->name_len; | ||
353 | xd->value_len = je16_to_cpu(rx->value_len); | ||
354 | xd->data_crc = je32_to_cpu(rx->data_crc); | ||
355 | 348 | ||
356 | xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); | 349 | if (xd->version > version) { |
357 | /* FIXME */ xd->node->next_in_ino = (void *)xd; | 350 | struct jffs2_raw_node_ref *raw |
351 | = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); | ||
352 | raw->next_in_ino = xd->node->next_in_ino; | ||
353 | xd->node->next_in_ino = raw; | ||
354 | } else { | ||
355 | xd->version = version; | ||
356 | xd->xprefix = rx->xprefix; | ||
357 | xd->name_len = rx->name_len; | ||
358 | xd->value_len = je16_to_cpu(rx->value_len); | ||
359 | xd->data_crc = je32_to_cpu(rx->data_crc); | ||
360 | |||
361 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd); | ||
362 | } | ||
358 | 363 | ||
359 | if (jffs2_sum_active()) | 364 | if (jffs2_sum_active()) |
360 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); | 365 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); |
@@ -373,9 +378,8 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
373 | 378 | ||
374 | crc = crc32(0, rr, sizeof(*rr) - 4); | 379 | crc = crc32(0, rr, sizeof(*rr) - 4); |
375 | if (crc != je32_to_cpu(rr->node_crc)) { | 380 | if (crc != je32_to_cpu(rr->node_crc)) { |
376 | if (je32_to_cpu(rr->node_crc) != 0xffffffff) | 381 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", |
377 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 382 | ofs, je32_to_cpu(rr->node_crc), crc); |
378 | ofs, je32_to_cpu(rr->node_crc), crc); | ||
379 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) | 383 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) |
380 | return err; | 384 | return err; |
381 | return 0; | 385 | return 0; |
@@ -395,6 +399,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
395 | return -ENOMEM; | 399 | return -ENOMEM; |
396 | 400 | ||
397 | /* BEFORE jffs2_build_xattr_subsystem() called, | 401 | /* BEFORE jffs2_build_xattr_subsystem() called, |
402 | * and AFTER xattr_ref is marked as a dead xref, | ||
398 | * ref->xid is used to store 32bit xid, xd is not used | 403 | * ref->xid is used to store 32bit xid, xd is not used |
399 | * ref->ino is used to store 32bit inode-number, ic is not used | 404 | * ref->ino is used to store 32bit inode-number, ic is not used |
400 | * Thoes variables are declared as union, thus using those | 405 | * Thoes variables are declared as union, thus using those |
@@ -404,11 +409,13 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
404 | */ | 409 | */ |
405 | ref->ino = je32_to_cpu(rr->ino); | 410 | ref->ino = je32_to_cpu(rr->ino); |
406 | ref->xid = je32_to_cpu(rr->xid); | 411 | ref->xid = je32_to_cpu(rr->xid); |
412 | ref->xseqno = je32_to_cpu(rr->xseqno); | ||
413 | if (ref->xseqno > c->highest_xseqno) | ||
414 | c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); | ||
407 | ref->next = c->xref_temp; | 415 | ref->next = c->xref_temp; |
408 | c->xref_temp = ref; | 416 | c->xref_temp = ref; |
409 | 417 | ||
410 | ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL); | 418 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); |
411 | /* FIXME */ ref->node->next_in_ino = (void *)ref; | ||
412 | 419 | ||
413 | if (jffs2_sum_active()) | 420 | if (jffs2_sum_active()) |
414 | jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); | 421 | jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); |
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index be1acc3dad97..c19bd476e8ec 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | 5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, |
6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | 6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, |
7 | * University of Szeged, Hungary | 7 | * University of Szeged, Hungary |
8 | * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com> | 8 | * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com> |
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
@@ -310,8 +310,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
310 | #ifdef CONFIG_JFFS2_FS_XATTR | 310 | #ifdef CONFIG_JFFS2_FS_XATTR |
311 | case JFFS2_NODETYPE_XATTR: { | 311 | case JFFS2_NODETYPE_XATTR: { |
312 | struct jffs2_sum_xattr_mem *temp; | 312 | struct jffs2_sum_xattr_mem *temp; |
313 | if (je32_to_cpu(node->x.version) == 0xffffffff) | ||
314 | return 0; | ||
315 | temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); | 313 | temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); |
316 | if (!temp) | 314 | if (!temp) |
317 | goto no_mem; | 315 | goto no_mem; |
@@ -327,10 +325,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
327 | } | 325 | } |
328 | case JFFS2_NODETYPE_XREF: { | 326 | case JFFS2_NODETYPE_XREF: { |
329 | struct jffs2_sum_xref_mem *temp; | 327 | struct jffs2_sum_xref_mem *temp; |
330 | |||
331 | if (je32_to_cpu(node->r.ino) == 0xffffffff | ||
332 | && je32_to_cpu(node->r.xid) == 0xffffffff) | ||
333 | return 0; | ||
334 | temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); | 328 | temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); |
335 | if (!temp) | 329 | if (!temp) |
336 | goto no_mem; | 330 | goto no_mem; |
@@ -483,22 +477,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
483 | 477 | ||
484 | xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), | 478 | xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), |
485 | je32_to_cpu(spx->version)); | 479 | je32_to_cpu(spx->version)); |
486 | if (IS_ERR(xd)) { | 480 | if (IS_ERR(xd)) |
487 | if (PTR_ERR(xd) == -EEXIST) { | ||
488 | /* a newer version of xd exists */ | ||
489 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen)))) | ||
490 | return err; | ||
491 | sp += JFFS2_SUMMARY_XATTR_SIZE; | ||
492 | break; | ||
493 | } | ||
494 | JFFS2_NOTICE("allocation of xattr_datum failed\n"); | ||
495 | return PTR_ERR(xd); | 481 | return PTR_ERR(xd); |
482 | if (xd->version > je32_to_cpu(spx->version)) { | ||
483 | /* node is not the newest one */ | ||
484 | struct jffs2_raw_node_ref *raw | ||
485 | = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, | ||
486 | PAD(je32_to_cpu(spx->totlen)), NULL); | ||
487 | raw->next_in_ino = xd->node->next_in_ino; | ||
488 | xd->node->next_in_ino = raw; | ||
489 | } else { | ||
490 | xd->version = je32_to_cpu(spx->version); | ||
491 | sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, | ||
492 | PAD(je32_to_cpu(spx->totlen)), (void *)xd); | ||
496 | } | 493 | } |
497 | |||
498 | xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, | ||
499 | PAD(je32_to_cpu(spx->totlen)), NULL); | ||
500 | /* FIXME */ xd->node->next_in_ino = (void *)xd; | ||
501 | |||
502 | *pseudo_random += je32_to_cpu(spx->xid); | 494 | *pseudo_random += je32_to_cpu(spx->xid); |
503 | sp += JFFS2_SUMMARY_XATTR_SIZE; | 495 | sp += JFFS2_SUMMARY_XATTR_SIZE; |
504 | 496 | ||
@@ -519,14 +511,11 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
519 | JFFS2_NOTICE("allocation of xattr_datum failed\n"); | 511 | JFFS2_NOTICE("allocation of xattr_datum failed\n"); |
520 | return -ENOMEM; | 512 | return -ENOMEM; |
521 | } | 513 | } |
522 | ref->ino = 0xfffffffe; | ||
523 | ref->xid = 0xfffffffd; | ||
524 | ref->next = c->xref_temp; | 514 | ref->next = c->xref_temp; |
525 | c->xref_temp = ref; | 515 | c->xref_temp = ref; |
526 | 516 | ||
527 | ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, | 517 | sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, |
528 | PAD(sizeof(struct jffs2_raw_xref)), NULL); | 518 | PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); |
529 | /* FIXME */ ref->node->next_in_ino = (void *)ref; | ||
530 | 519 | ||
531 | *pseudo_random += ref->node->flash_offset; | 520 | *pseudo_random += ref->node->flash_offset; |
532 | sp += JFFS2_SUMMARY_XREF_SIZE; | 521 | sp += JFFS2_SUMMARY_XREF_SIZE; |
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 2d82e250be34..18e66dbf23b4 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c | |||
@@ -23,18 +23,15 @@ | |||
23 | * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) | 23 | * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) |
24 | * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is | 24 | * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is |
25 | * the index of the xattr name/value pair cache (c->xattrindex). | 25 | * the index of the xattr name/value pair cache (c->xattrindex). |
26 | * is_xattr_datum_unchecked(c, xd) | ||
27 | * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not | ||
28 | * unchecked, it returns 0. | ||
26 | * unload_xattr_datum(c, xd) | 29 | * unload_xattr_datum(c, xd) |
27 | * is used to release xattr name/value pair and detach from c->xattrindex. | 30 | * is used to release xattr name/value pair and detach from c->xattrindex. |
28 | * reclaim_xattr_datum(c) | 31 | * reclaim_xattr_datum(c) |
29 | * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when | 32 | * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when |
30 | * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold | 33 | * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold |
31 | * is hard coded as 32KiB. | 34 | * is hard coded as 32KiB. |
32 | * delete_xattr_datum_node(c, xd) | ||
33 | * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is | ||
34 | * enabled, it overwrites the obsolete node by myself. | ||
35 | * delete_xattr_datum(c, xd) | ||
36 | * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference | ||
37 | * counter. (It means how many jffs2_xattr_ref object refers this xdatum.) | ||
38 | * do_verify_xattr_datum(c, xd) | 35 | * do_verify_xattr_datum(c, xd) |
39 | * is used to load the xdatum informations without name/value pair from the medium. | 36 | * is used to load the xdatum informations without name/value pair from the medium. |
40 | * It's necessary once, because those informations are not collected during mounting | 37 | * It's necessary once, because those informations are not collected during mounting |
@@ -53,8 +50,10 @@ | |||
53 | * is used to write xdatum to medium. xd->version will be incremented. | 50 | * is used to write xdatum to medium. xd->version will be incremented. |
54 | * create_xattr_datum(c, xprefix, xname, xvalue, xsize) | 51 | * create_xattr_datum(c, xprefix, xname, xvalue, xsize) |
55 | * is used to create new xdatum and write to medium. | 52 | * is used to create new xdatum and write to medium. |
53 | * delete_xattr_datum(c, xd) | ||
54 | * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows | ||
55 | * GC to reclaim those physical nodes. | ||
56 | * -------------------------------------------------- */ | 56 | * -------------------------------------------------- */ |
57 | |||
58 | static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) | 57 | static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) |
59 | { | 58 | { |
60 | int name_len = strlen(xname); | 59 | int name_len = strlen(xname); |
@@ -62,6 +61,22 @@ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char * | |||
62 | return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); | 61 | return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); |
63 | } | 62 | } |
64 | 63 | ||
64 | static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
65 | { | ||
66 | struct jffs2_raw_node_ref *raw; | ||
67 | int rc = 0; | ||
68 | |||
69 | spin_lock(&c->erase_completion_lock); | ||
70 | for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { | ||
71 | if (ref_flags(raw) == REF_UNCHECKED) { | ||
72 | rc = 1; | ||
73 | break; | ||
74 | } | ||
75 | } | ||
76 | spin_unlock(&c->erase_completion_lock); | ||
77 | return rc; | ||
78 | } | ||
79 | |||
65 | static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | 80 | static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) |
66 | { | 81 | { |
67 | /* must be called under down_write(xattr_sem) */ | 82 | /* must be called under down_write(xattr_sem) */ |
@@ -107,77 +122,33 @@ static void reclaim_xattr_datum(struct jffs2_sb_info *c) | |||
107 | before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); | 122 | before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); |
108 | } | 123 | } |
109 | 124 | ||
110 | static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
111 | { | ||
112 | /* must be called under down_write(xattr_sem) */ | ||
113 | struct jffs2_raw_xattr rx; | ||
114 | size_t length; | ||
115 | int rc; | ||
116 | |||
117 | if (!xd->node) { | ||
118 | JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid); | ||
119 | return; | ||
120 | } | ||
121 | if (jffs2_sum_active()) { | ||
122 | memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr)); | ||
123 | rc = jffs2_flash_read(c, ref_offset(xd->node), | ||
124 | sizeof(struct jffs2_unknown_node), | ||
125 | &length, (char *)&rx); | ||
126 | if (rc || length != sizeof(struct jffs2_unknown_node)) { | ||
127 | JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | ||
128 | rc, sizeof(struct jffs2_unknown_node), | ||
129 | length, ref_offset(xd->node)); | ||
130 | } | ||
131 | rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx), | ||
132 | &length, (char *)&rx); | ||
133 | if (rc || length != sizeof(struct jffs2_raw_xattr)) { | ||
134 | JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n", | ||
135 | rc, sizeof(rx), length, ref_offset(xd->node)); | ||
136 | } | ||
137 | } | ||
138 | spin_lock(&c->erase_completion_lock); | ||
139 | xd->node->next_in_ino = NULL; | ||
140 | spin_unlock(&c->erase_completion_lock); | ||
141 | jffs2_mark_node_obsolete(c, xd->node); | ||
142 | xd->node = NULL; | ||
143 | } | ||
144 | |||
145 | static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
146 | { | ||
147 | /* must be called under down_write(xattr_sem) */ | ||
148 | BUG_ON(xd->refcnt); | ||
149 | |||
150 | unload_xattr_datum(c, xd); | ||
151 | if (xd->node) { | ||
152 | delete_xattr_datum_node(c, xd); | ||
153 | xd->node = NULL; | ||
154 | } | ||
155 | jffs2_free_xattr_datum(xd); | ||
156 | } | ||
157 | |||
158 | static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | 125 | static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) |
159 | { | 126 | { |
160 | /* must be called under down_write(xattr_sem) */ | 127 | /* must be called under down_write(xattr_sem) */ |
161 | struct jffs2_eraseblock *jeb; | 128 | struct jffs2_eraseblock *jeb; |
129 | struct jffs2_raw_node_ref *raw; | ||
162 | struct jffs2_raw_xattr rx; | 130 | struct jffs2_raw_xattr rx; |
163 | size_t readlen; | 131 | size_t readlen; |
164 | uint32_t crc, totlen; | 132 | uint32_t crc, offset, totlen; |
165 | int rc; | 133 | int rc; |
166 | 134 | ||
167 | BUG_ON(!xd->node); | 135 | spin_lock(&c->erase_completion_lock); |
168 | BUG_ON(ref_flags(xd->node) != REF_UNCHECKED); | 136 | offset = ref_offset(xd->node); |
137 | if (ref_flags(xd->node) == REF_PRISTINE) | ||
138 | goto complete; | ||
139 | spin_unlock(&c->erase_completion_lock); | ||
169 | 140 | ||
170 | rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx); | 141 | rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx); |
171 | if (rc || readlen != sizeof(rx)) { | 142 | if (rc || readlen != sizeof(rx)) { |
172 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | 143 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", |
173 | rc, sizeof(rx), readlen, ref_offset(xd->node)); | 144 | rc, sizeof(rx), readlen, offset); |
174 | return rc ? rc : -EIO; | 145 | return rc ? rc : -EIO; |
175 | } | 146 | } |
176 | crc = crc32(0, &rx, sizeof(rx) - 4); | 147 | crc = crc32(0, &rx, sizeof(rx) - 4); |
177 | if (crc != je32_to_cpu(rx.node_crc)) { | 148 | if (crc != je32_to_cpu(rx.node_crc)) { |
178 | if (je32_to_cpu(rx.node_crc) != 0xffffffff) | 149 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", |
179 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 150 | offset, je32_to_cpu(rx.hdr_crc), crc); |
180 | ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc); | 151 | xd->flags |= JFFS2_XFLAGS_INVALID; |
181 | return EIO; | 152 | return EIO; |
182 | } | 153 | } |
183 | totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); | 154 | totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); |
@@ -188,11 +159,12 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat | |||
188 | || je32_to_cpu(rx.version) != xd->version) { | 159 | || je32_to_cpu(rx.version) != xd->version) { |
189 | JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " | 160 | JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " |
190 | "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", | 161 | "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", |
191 | ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, | 162 | offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, |
192 | je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, | 163 | je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, |
193 | je32_to_cpu(rx.totlen), totlen, | 164 | je32_to_cpu(rx.totlen), totlen, |
194 | je32_to_cpu(rx.xid), xd->xid, | 165 | je32_to_cpu(rx.xid), xd->xid, |
195 | je32_to_cpu(rx.version), xd->version); | 166 | je32_to_cpu(rx.version), xd->version); |
167 | xd->flags |= JFFS2_XFLAGS_INVALID; | ||
196 | return EIO; | 168 | return EIO; |
197 | } | 169 | } |
198 | xd->xprefix = rx.xprefix; | 170 | xd->xprefix = rx.xprefix; |
@@ -200,14 +172,17 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat | |||
200 | xd->value_len = je16_to_cpu(rx.value_len); | 172 | xd->value_len = je16_to_cpu(rx.value_len); |
201 | xd->data_crc = je32_to_cpu(rx.data_crc); | 173 | xd->data_crc = je32_to_cpu(rx.data_crc); |
202 | 174 | ||
203 | /* This JFFS2_NODETYPE_XATTR node is checked */ | ||
204 | jeb = &c->blocks[ref_offset(xd->node) / c->sector_size]; | ||
205 | totlen = PAD(je32_to_cpu(rx.totlen)); | ||
206 | |||
207 | spin_lock(&c->erase_completion_lock); | 175 | spin_lock(&c->erase_completion_lock); |
208 | c->unchecked_size -= totlen; c->used_size += totlen; | 176 | complete: |
209 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | 177 | for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { |
210 | xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE; | 178 | jeb = &c->blocks[ref_offset(raw) / c->sector_size]; |
179 | totlen = PAD(ref_totlen(c, jeb, raw)); | ||
180 | if (ref_flags(raw) == REF_UNCHECKED) { | ||
181 | c->unchecked_size -= totlen; c->used_size += totlen; | ||
182 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | ||
183 | } | ||
184 | raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL); | ||
185 | } | ||
211 | spin_unlock(&c->erase_completion_lock); | 186 | spin_unlock(&c->erase_completion_lock); |
212 | 187 | ||
213 | /* unchecked xdatum is chained with c->xattr_unchecked */ | 188 | /* unchecked xdatum is chained with c->xattr_unchecked */ |
@@ -227,7 +202,6 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum | |||
227 | uint32_t crc, length; | 202 | uint32_t crc, length; |
228 | int i, ret, retry = 0; | 203 | int i, ret, retry = 0; |
229 | 204 | ||
230 | BUG_ON(!xd->node); | ||
231 | BUG_ON(ref_flags(xd->node) != REF_PRISTINE); | 205 | BUG_ON(ref_flags(xd->node) != REF_PRISTINE); |
232 | BUG_ON(!list_empty(&xd->xindex)); | 206 | BUG_ON(!list_empty(&xd->xindex)); |
233 | retry: | 207 | retry: |
@@ -253,6 +227,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum | |||
253 | " at %#08x, read: 0x%08x calculated: 0x%08x\n", | 227 | " at %#08x, read: 0x%08x calculated: 0x%08x\n", |
254 | ref_offset(xd->node), xd->data_crc, crc); | 228 | ref_offset(xd->node), xd->data_crc, crc); |
255 | kfree(data); | 229 | kfree(data); |
230 | xd->flags |= JFFS2_XFLAGS_INVALID; | ||
256 | return EIO; | 231 | return EIO; |
257 | } | 232 | } |
258 | 233 | ||
@@ -286,16 +261,14 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x | |||
286 | * rc > 0 : Unrecoverable error, this node should be deleted. | 261 | * rc > 0 : Unrecoverable error, this node should be deleted. |
287 | */ | 262 | */ |
288 | int rc = 0; | 263 | int rc = 0; |
289 | BUG_ON(xd->xname); | 264 | |
290 | if (!xd->node) | 265 | BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD); |
266 | if (xd->xname) | ||
267 | return 0; | ||
268 | if (xd->flags & JFFS2_XFLAGS_INVALID) | ||
291 | return EIO; | 269 | return EIO; |
292 | if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) { | 270 | if (unlikely(is_xattr_datum_unchecked(c, xd))) |
293 | rc = do_verify_xattr_datum(c, xd); | 271 | rc = do_verify_xattr_datum(c, xd); |
294 | if (rc > 0) { | ||
295 | list_del_init(&xd->xindex); | ||
296 | delete_xattr_datum_node(c, xd); | ||
297 | } | ||
298 | } | ||
299 | if (!rc) | 272 | if (!rc) |
300 | rc = do_load_xattr_datum(c, xd); | 273 | rc = do_load_xattr_datum(c, xd); |
301 | return rc; | 274 | return rc; |
@@ -304,7 +277,6 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x | |||
304 | static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | 277 | static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) |
305 | { | 278 | { |
306 | /* must be called under down_write(xattr_sem) */ | 279 | /* must be called under down_write(xattr_sem) */ |
307 | struct jffs2_raw_node_ref *raw; | ||
308 | struct jffs2_raw_xattr rx; | 280 | struct jffs2_raw_xattr rx; |
309 | struct kvec vecs[2]; | 281 | struct kvec vecs[2]; |
310 | size_t length; | 282 | size_t length; |
@@ -312,14 +284,16 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x | |||
312 | uint32_t phys_ofs = write_ofs(c); | 284 | uint32_t phys_ofs = write_ofs(c); |
313 | 285 | ||
314 | BUG_ON(!xd->xname); | 286 | BUG_ON(!xd->xname); |
287 | BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)); | ||
315 | 288 | ||
316 | vecs[0].iov_base = ℞ | 289 | vecs[0].iov_base = ℞ |
317 | vecs[0].iov_len = PAD(sizeof(rx)); | 290 | vecs[0].iov_len = sizeof(rx); |
318 | vecs[1].iov_base = xd->xname; | 291 | vecs[1].iov_base = xd->xname; |
319 | vecs[1].iov_len = xd->name_len + 1 + xd->value_len; | 292 | vecs[1].iov_len = xd->name_len + 1 + xd->value_len; |
320 | totlen = vecs[0].iov_len + vecs[1].iov_len; | 293 | totlen = vecs[0].iov_len + vecs[1].iov_len; |
321 | 294 | ||
322 | /* Setup raw-xattr */ | 295 | /* Setup raw-xattr */ |
296 | memset(&rx, 0, sizeof(rx)); | ||
323 | rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 297 | rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
324 | rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); | 298 | rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); |
325 | rx.totlen = cpu_to_je32(PAD(totlen)); | 299 | rx.totlen = cpu_to_je32(PAD(totlen)); |
@@ -343,14 +317,8 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x | |||
343 | 317 | ||
344 | return rc; | 318 | return rc; |
345 | } | 319 | } |
346 | |||
347 | /* success */ | 320 | /* success */ |
348 | raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL); | 321 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd); |
349 | /* FIXME */ raw->next_in_ino = (void *)xd; | ||
350 | |||
351 | if (xd->node) | ||
352 | delete_xattr_datum_node(c, xd); | ||
353 | xd->node = raw; | ||
354 | 322 | ||
355 | dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", | 323 | dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", |
356 | xd->xid, xd->version, xd->xprefix, xd->xname); | 324 | xd->xid, xd->version, xd->xprefix, xd->xname); |
@@ -377,7 +345,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, | |||
377 | && xd->value_len==xsize | 345 | && xd->value_len==xsize |
378 | && !strcmp(xd->xname, xname) | 346 | && !strcmp(xd->xname, xname) |
379 | && !memcmp(xd->xvalue, xvalue, xsize)) { | 347 | && !memcmp(xd->xvalue, xvalue, xsize)) { |
380 | xd->refcnt++; | 348 | atomic_inc(&xd->refcnt); |
381 | return xd; | 349 | return xd; |
382 | } | 350 | } |
383 | } | 351 | } |
@@ -397,7 +365,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, | |||
397 | strcpy(data, xname); | 365 | strcpy(data, xname); |
398 | memcpy(data + name_len + 1, xvalue, xsize); | 366 | memcpy(data + name_len + 1, xvalue, xsize); |
399 | 367 | ||
400 | xd->refcnt = 1; | 368 | atomic_set(&xd->refcnt, 1); |
401 | xd->xid = ++c->highest_xid; | 369 | xd->xid = ++c->highest_xid; |
402 | xd->flags |= JFFS2_XFLAGS_HOT; | 370 | xd->flags |= JFFS2_XFLAGS_HOT; |
403 | xd->xprefix = xprefix; | 371 | xd->xprefix = xprefix; |
@@ -426,20 +394,36 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, | |||
426 | return xd; | 394 | return xd; |
427 | } | 395 | } |
428 | 396 | ||
397 | static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
398 | { | ||
399 | /* must be called under down_write(xattr_sem) */ | ||
400 | BUG_ON(atomic_read(&xd->refcnt)); | ||
401 | |||
402 | unload_xattr_datum(c, xd); | ||
403 | xd->flags |= JFFS2_XFLAGS_DEAD; | ||
404 | spin_lock(&c->erase_completion_lock); | ||
405 | if (xd->node == (void *)xd) { | ||
406 | BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); | ||
407 | jffs2_free_xattr_datum(xd); | ||
408 | } else { | ||
409 | list_add(&xd->xindex, &c->xattr_dead_list); | ||
410 | } | ||
411 | spin_unlock(&c->erase_completion_lock); | ||
412 | dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version); | ||
413 | } | ||
414 | |||
429 | /* -------- xref related functions ------------------ | 415 | /* -------- xref related functions ------------------ |
430 | * verify_xattr_ref(c, ref) | 416 | * verify_xattr_ref(c, ref) |
431 | * is used to load xref information from medium. Because summary data does not | 417 | * is used to load xref information from medium. Because summary data does not |
432 | * contain xid/ino, it's necessary to verify once while mounting process. | 418 | * contain xid/ino, it's necessary to verify once while mounting process. |
433 | * delete_xattr_ref_node(c, ref) | ||
434 | * is used to delete a jffs2 node is dominated by xref. When EBS is enabled, | ||
435 | * it overwrites the obsolete node by myself. | ||
436 | * delete_xattr_ref(c, ref) | ||
437 | * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum | ||
438 | * is refered by this xref become 0, delete_xattr_datum() is called later. | ||
439 | * save_xattr_ref(c, ref) | 419 | * save_xattr_ref(c, ref) |
440 | * is used to write xref to medium. | 420 | * is used to write xref to medium. If delete marker is marked, it write |
421 | * a delete marker of xref into medium. | ||
441 | * create_xattr_ref(c, ic, xd) | 422 | * create_xattr_ref(c, ic, xd) |
442 | * is used to create a new xref and write to medium. | 423 | * is used to create a new xref and write to medium. |
424 | * delete_xattr_ref(c, ref) | ||
425 | * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER, | ||
426 | * and allows GC to reclaim those physical nodes. | ||
443 | * jffs2_xattr_delete_inode(c, ic) | 427 | * jffs2_xattr_delete_inode(c, ic) |
444 | * is called to remove xrefs related to obsolete inode when inode is unlinked. | 428 | * is called to remove xrefs related to obsolete inode when inode is unlinked. |
445 | * jffs2_xattr_free_inode(c, ic) | 429 | * jffs2_xattr_free_inode(c, ic) |
@@ -450,25 +434,29 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, | |||
450 | static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | 434 | static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) |
451 | { | 435 | { |
452 | struct jffs2_eraseblock *jeb; | 436 | struct jffs2_eraseblock *jeb; |
437 | struct jffs2_raw_node_ref *raw; | ||
453 | struct jffs2_raw_xref rr; | 438 | struct jffs2_raw_xref rr; |
454 | size_t readlen; | 439 | size_t readlen; |
455 | uint32_t crc, totlen; | 440 | uint32_t crc, offset, totlen; |
456 | int rc; | 441 | int rc; |
457 | 442 | ||
458 | BUG_ON(ref_flags(ref->node) != REF_UNCHECKED); | 443 | spin_lock(&c->erase_completion_lock); |
444 | if (ref_flags(ref->node) != REF_UNCHECKED) | ||
445 | goto complete; | ||
446 | offset = ref_offset(ref->node); | ||
447 | spin_unlock(&c->erase_completion_lock); | ||
459 | 448 | ||
460 | rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr); | 449 | rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr); |
461 | if (rc || sizeof(rr) != readlen) { | 450 | if (rc || sizeof(rr) != readlen) { |
462 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", | 451 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", |
463 | rc, sizeof(rr), readlen, ref_offset(ref->node)); | 452 | rc, sizeof(rr), readlen, offset); |
464 | return rc ? rc : -EIO; | 453 | return rc ? rc : -EIO; |
465 | } | 454 | } |
466 | /* obsolete node */ | 455 | /* obsolete node */ |
467 | crc = crc32(0, &rr, sizeof(rr) - 4); | 456 | crc = crc32(0, &rr, sizeof(rr) - 4); |
468 | if (crc != je32_to_cpu(rr.node_crc)) { | 457 | if (crc != je32_to_cpu(rr.node_crc)) { |
469 | if (je32_to_cpu(rr.node_crc) != 0xffffffff) | 458 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", |
470 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | 459 | offset, je32_to_cpu(rr.node_crc), crc); |
471 | ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc); | ||
472 | return EIO; | 460 | return EIO; |
473 | } | 461 | } |
474 | if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK | 462 | if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK |
@@ -476,22 +464,28 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref | |||
476 | || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { | 464 | || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { |
477 | JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " | 465 | JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " |
478 | "nodetype=%#04x/%#04x, totlen=%u/%zu\n", | 466 | "nodetype=%#04x/%#04x, totlen=%u/%zu\n", |
479 | ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, | 467 | offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, |
480 | je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, | 468 | je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, |
481 | je32_to_cpu(rr.totlen), PAD(sizeof(rr))); | 469 | je32_to_cpu(rr.totlen), PAD(sizeof(rr))); |
482 | return EIO; | 470 | return EIO; |
483 | } | 471 | } |
484 | ref->ino = je32_to_cpu(rr.ino); | 472 | ref->ino = je32_to_cpu(rr.ino); |
485 | ref->xid = je32_to_cpu(rr.xid); | 473 | ref->xid = je32_to_cpu(rr.xid); |
486 | 474 | ref->xseqno = je32_to_cpu(rr.xseqno); | |
487 | /* fixup superblock/eraseblock info */ | 475 | if (ref->xseqno > c->highest_xseqno) |
488 | jeb = &c->blocks[ref_offset(ref->node) / c->sector_size]; | 476 | c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); |
489 | totlen = PAD(sizeof(rr)); | ||
490 | 477 | ||
491 | spin_lock(&c->erase_completion_lock); | 478 | spin_lock(&c->erase_completion_lock); |
492 | c->unchecked_size -= totlen; c->used_size += totlen; | 479 | complete: |
493 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | 480 | for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) { |
494 | ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE; | 481 | jeb = &c->blocks[ref_offset(raw) / c->sector_size]; |
482 | totlen = PAD(ref_totlen(c, jeb, raw)); | ||
483 | if (ref_flags(raw) == REF_UNCHECKED) { | ||
484 | c->unchecked_size -= totlen; c->used_size += totlen; | ||
485 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | ||
486 | } | ||
487 | raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL); | ||
488 | } | ||
495 | spin_unlock(&c->erase_completion_lock); | 489 | spin_unlock(&c->erase_completion_lock); |
496 | 490 | ||
497 | dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", | 491 | dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", |
@@ -499,58 +493,12 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref | |||
499 | return 0; | 493 | return 0; |
500 | } | 494 | } |
501 | 495 | ||
502 | static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
503 | { | ||
504 | struct jffs2_raw_xref rr; | ||
505 | size_t length; | ||
506 | int rc; | ||
507 | |||
508 | if (jffs2_sum_active()) { | ||
509 | memset(&rr, 0xff, sizeof(rr)); | ||
510 | rc = jffs2_flash_read(c, ref_offset(ref->node), | ||
511 | sizeof(struct jffs2_unknown_node), | ||
512 | &length, (char *)&rr); | ||
513 | if (rc || length != sizeof(struct jffs2_unknown_node)) { | ||
514 | JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | ||
515 | rc, sizeof(struct jffs2_unknown_node), | ||
516 | length, ref_offset(ref->node)); | ||
517 | } | ||
518 | rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr), | ||
519 | &length, (char *)&rr); | ||
520 | if (rc || length != sizeof(struct jffs2_raw_xref)) { | ||
521 | JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n", | ||
522 | rc, sizeof(rr), length, ref_offset(ref->node)); | ||
523 | } | ||
524 | } | ||
525 | spin_lock(&c->erase_completion_lock); | ||
526 | ref->node->next_in_ino = NULL; | ||
527 | spin_unlock(&c->erase_completion_lock); | ||
528 | jffs2_mark_node_obsolete(c, ref->node); | ||
529 | ref->node = NULL; | ||
530 | } | ||
531 | |||
532 | static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
533 | { | ||
534 | /* must be called under down_write(xattr_sem) */ | ||
535 | struct jffs2_xattr_datum *xd; | ||
536 | |||
537 | BUG_ON(!ref->node); | ||
538 | delete_xattr_ref_node(c, ref); | ||
539 | |||
540 | xd = ref->xd; | ||
541 | xd->refcnt--; | ||
542 | if (!xd->refcnt) | ||
543 | delete_xattr_datum(c, xd); | ||
544 | jffs2_free_xattr_ref(ref); | ||
545 | } | ||
546 | |||
547 | static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | 496 | static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) |
548 | { | 497 | { |
549 | /* must be called under down_write(xattr_sem) */ | 498 | /* must be called under down_write(xattr_sem) */ |
550 | struct jffs2_raw_node_ref *raw; | ||
551 | struct jffs2_raw_xref rr; | 499 | struct jffs2_raw_xref rr; |
552 | size_t length; | 500 | size_t length; |
553 | uint32_t phys_ofs = write_ofs(c); | 501 | uint32_t xseqno, phys_ofs = write_ofs(c); |
554 | int ret; | 502 | int ret; |
555 | 503 | ||
556 | rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 504 | rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -558,8 +506,16 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | |||
558 | rr.totlen = cpu_to_je32(PAD(sizeof(rr))); | 506 | rr.totlen = cpu_to_je32(PAD(sizeof(rr))); |
559 | rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); | 507 | rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); |
560 | 508 | ||
561 | rr.ino = cpu_to_je32(ref->ic->ino); | 509 | xseqno = (c->highest_xseqno += 2); |
562 | rr.xid = cpu_to_je32(ref->xd->xid); | 510 | if (is_xattr_ref_dead(ref)) { |
511 | xseqno |= XREF_DELETE_MARKER; | ||
512 | rr.ino = cpu_to_je32(ref->ino); | ||
513 | rr.xid = cpu_to_je32(ref->xid); | ||
514 | } else { | ||
515 | rr.ino = cpu_to_je32(ref->ic->ino); | ||
516 | rr.xid = cpu_to_je32(ref->xd->xid); | ||
517 | } | ||
518 | rr.xseqno = cpu_to_je32(xseqno); | ||
563 | rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); | 519 | rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); |
564 | 520 | ||
565 | ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); | 521 | ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); |
@@ -572,12 +528,9 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | |||
572 | 528 | ||
573 | return ret; | 529 | return ret; |
574 | } | 530 | } |
575 | 531 | /* success */ | |
576 | raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL); | 532 | ref->xseqno = xseqno; |
577 | /* FIXME */ raw->next_in_ino = (void *)ref; | 533 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref); |
578 | if (ref->node) | ||
579 | delete_xattr_ref_node(c, ref); | ||
580 | ref->node = raw; | ||
581 | 534 | ||
582 | dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); | 535 | dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); |
583 | 536 | ||
@@ -610,6 +563,27 @@ static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct | |||
610 | return ref; /* success */ | 563 | return ref; /* success */ |
611 | } | 564 | } |
612 | 565 | ||
566 | static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
567 | { | ||
568 | /* must be called under down_write(xattr_sem) */ | ||
569 | struct jffs2_xattr_datum *xd; | ||
570 | |||
571 | xd = ref->xd; | ||
572 | ref->xseqno |= XREF_DELETE_MARKER; | ||
573 | ref->ino = ref->ic->ino; | ||
574 | ref->xid = ref->xd->xid; | ||
575 | spin_lock(&c->erase_completion_lock); | ||
576 | ref->next = c->xref_dead_list; | ||
577 | c->xref_dead_list = ref; | ||
578 | spin_unlock(&c->erase_completion_lock); | ||
579 | |||
580 | dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n", | ||
581 | ref->ino, ref->xid, ref->xseqno); | ||
582 | |||
583 | if (atomic_dec_and_test(&xd->refcnt)) | ||
584 | delete_xattr_datum(c, xd); | ||
585 | } | ||
586 | |||
613 | void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 587 | void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) |
614 | { | 588 | { |
615 | /* It's called from jffs2_clear_inode() on inode removing. | 589 | /* It's called from jffs2_clear_inode() on inode removing. |
@@ -638,8 +612,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i | |||
638 | for (ref = ic->xref; ref; ref = _ref) { | 612 | for (ref = ic->xref; ref; ref = _ref) { |
639 | _ref = ref->next; | 613 | _ref = ref->next; |
640 | xd = ref->xd; | 614 | xd = ref->xd; |
641 | xd->refcnt--; | 615 | if (atomic_dec_and_test(&xd->refcnt)) { |
642 | if (!xd->refcnt) { | ||
643 | unload_xattr_datum(c, xd); | 616 | unload_xattr_datum(c, xd); |
644 | jffs2_free_xattr_datum(xd); | 617 | jffs2_free_xattr_datum(xd); |
645 | } | 618 | } |
@@ -655,7 +628,7 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac | |||
655 | * duplicate name/value pairs. If duplicate name/value pair would be found, | 628 | * duplicate name/value pairs. If duplicate name/value pair would be found, |
656 | * one will be removed. | 629 | * one will be removed. |
657 | */ | 630 | */ |
658 | struct jffs2_xattr_ref *ref, *cmp, **pref; | 631 | struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp; |
659 | int rc = 0; | 632 | int rc = 0; |
660 | 633 | ||
661 | if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) | 634 | if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) |
@@ -673,13 +646,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac | |||
673 | } else if (unlikely(rc < 0)) | 646 | } else if (unlikely(rc < 0)) |
674 | goto out; | 647 | goto out; |
675 | } | 648 | } |
676 | for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) { | 649 | for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) { |
677 | if (!cmp->xd->xname) { | 650 | if (!cmp->xd->xname) { |
678 | ref->xd->flags |= JFFS2_XFLAGS_BIND; | 651 | ref->xd->flags |= JFFS2_XFLAGS_BIND; |
679 | rc = load_xattr_datum(c, cmp->xd); | 652 | rc = load_xattr_datum(c, cmp->xd); |
680 | ref->xd->flags &= ~JFFS2_XFLAGS_BIND; | 653 | ref->xd->flags &= ~JFFS2_XFLAGS_BIND; |
681 | if (unlikely(rc > 0)) { | 654 | if (unlikely(rc > 0)) { |
682 | *pref = cmp->next; | 655 | *pcmp = cmp->next; |
683 | delete_xattr_ref(c, cmp); | 656 | delete_xattr_ref(c, cmp); |
684 | goto retry; | 657 | goto retry; |
685 | } else if (unlikely(rc < 0)) | 658 | } else if (unlikely(rc < 0)) |
@@ -687,8 +660,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac | |||
687 | } | 660 | } |
688 | if (ref->xd->xprefix == cmp->xd->xprefix | 661 | if (ref->xd->xprefix == cmp->xd->xprefix |
689 | && !strcmp(ref->xd->xname, cmp->xd->xname)) { | 662 | && !strcmp(ref->xd->xname, cmp->xd->xname)) { |
690 | *pref = cmp->next; | 663 | if (ref->xseqno > cmp->xseqno) { |
691 | delete_xattr_ref(c, cmp); | 664 | *pcmp = cmp->next; |
665 | delete_xattr_ref(c, cmp); | ||
666 | } else { | ||
667 | *pref = ref->next; | ||
668 | delete_xattr_ref(c, ref); | ||
669 | } | ||
692 | goto retry; | 670 | goto retry; |
693 | } | 671 | } |
694 | } | 672 | } |
@@ -719,9 +697,13 @@ void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c) | |||
719 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) | 697 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) |
720 | INIT_LIST_HEAD(&c->xattrindex[i]); | 698 | INIT_LIST_HEAD(&c->xattrindex[i]); |
721 | INIT_LIST_HEAD(&c->xattr_unchecked); | 699 | INIT_LIST_HEAD(&c->xattr_unchecked); |
700 | INIT_LIST_HEAD(&c->xattr_dead_list); | ||
701 | c->xref_dead_list = NULL; | ||
722 | c->xref_temp = NULL; | 702 | c->xref_temp = NULL; |
723 | 703 | ||
724 | init_rwsem(&c->xattr_sem); | 704 | init_rwsem(&c->xattr_sem); |
705 | c->highest_xid = 0; | ||
706 | c->highest_xseqno = 0; | ||
725 | c->xdatum_mem_usage = 0; | 707 | c->xdatum_mem_usage = 0; |
726 | c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ | 708 | c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ |
727 | } | 709 | } |
@@ -751,7 +733,11 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) | |||
751 | _ref = ref->next; | 733 | _ref = ref->next; |
752 | jffs2_free_xattr_ref(ref); | 734 | jffs2_free_xattr_ref(ref); |
753 | } | 735 | } |
754 | c->xref_temp = NULL; | 736 | |
737 | for (ref=c->xref_dead_list; ref; ref = _ref) { | ||
738 | _ref = ref->next; | ||
739 | jffs2_free_xattr_ref(ref); | ||
740 | } | ||
755 | 741 | ||
756 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { | 742 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { |
757 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { | 743 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { |
@@ -761,100 +747,143 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) | |||
761 | jffs2_free_xattr_datum(xd); | 747 | jffs2_free_xattr_datum(xd); |
762 | } | 748 | } |
763 | } | 749 | } |
750 | |||
751 | list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) { | ||
752 | list_del(&xd->xindex); | ||
753 | jffs2_free_xattr_datum(xd); | ||
754 | } | ||
764 | } | 755 | } |
765 | 756 | ||
757 | #define XREF_TMPHASH_SIZE (128) | ||
766 | void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) | 758 | void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) |
767 | { | 759 | { |
768 | struct jffs2_xattr_ref *ref, *_ref; | 760 | struct jffs2_xattr_ref *ref, *_ref; |
761 | struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE]; | ||
769 | struct jffs2_xattr_datum *xd, *_xd; | 762 | struct jffs2_xattr_datum *xd, *_xd; |
770 | struct jffs2_inode_cache *ic; | 763 | struct jffs2_inode_cache *ic; |
771 | int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0; | 764 | struct jffs2_raw_node_ref *raw; |
765 | int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0; | ||
766 | int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0; | ||
772 | 767 | ||
773 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); | 768 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); |
774 | 769 | ||
775 | /* Phase.1 */ | 770 | /* Phase.1 : Merge same xref */ |
771 | for (i=0; i < XREF_TMPHASH_SIZE; i++) | ||
772 | xref_tmphash[i] = NULL; | ||
776 | for (ref=c->xref_temp; ref; ref=_ref) { | 773 | for (ref=c->xref_temp; ref; ref=_ref) { |
774 | struct jffs2_xattr_ref *tmp; | ||
775 | |||
777 | _ref = ref->next; | 776 | _ref = ref->next; |
778 | /* checking REF_UNCHECKED nodes */ | ||
779 | if (ref_flags(ref->node) != REF_PRISTINE) { | 777 | if (ref_flags(ref->node) != REF_PRISTINE) { |
780 | if (verify_xattr_ref(c, ref)) { | 778 | if (verify_xattr_ref(c, ref)) { |
781 | delete_xattr_ref_node(c, ref); | 779 | BUG_ON(ref->node->next_in_ino != (void *)ref); |
780 | ref->node->next_in_ino = NULL; | ||
781 | jffs2_mark_node_obsolete(c, ref->node); | ||
782 | jffs2_free_xattr_ref(ref); | 782 | jffs2_free_xattr_ref(ref); |
783 | continue; | 783 | continue; |
784 | } | 784 | } |
785 | } | 785 | } |
786 | /* At this point, ref->xid and ref->ino contain XID and inode number. | 786 | |
787 | ref->xd and ref->ic are not valid yet. */ | 787 | i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE; |
788 | xd = jffs2_find_xattr_datum(c, ref->xid); | 788 | for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) { |
789 | ic = jffs2_get_ino_cache(c, ref->ino); | 789 | if (tmp->ino == ref->ino && tmp->xid == ref->xid) |
790 | if (!xd || !ic) { | 790 | break; |
791 | if (ref_flags(ref->node) != REF_UNCHECKED) | 791 | } |
792 | JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n", | 792 | if (tmp) { |
793 | ref->ino, ref->xid); | 793 | raw = ref->node; |
794 | delete_xattr_ref_node(c, ref); | 794 | if (ref->xseqno > tmp->xseqno) { |
795 | tmp->xseqno = ref->xseqno; | ||
796 | raw->next_in_ino = tmp->node; | ||
797 | tmp->node = raw; | ||
798 | } else { | ||
799 | raw->next_in_ino = tmp->node->next_in_ino; | ||
800 | tmp->node->next_in_ino = raw; | ||
801 | } | ||
795 | jffs2_free_xattr_ref(ref); | 802 | jffs2_free_xattr_ref(ref); |
796 | continue; | 803 | continue; |
804 | } else { | ||
805 | ref->next = xref_tmphash[i]; | ||
806 | xref_tmphash[i] = ref; | ||
797 | } | 807 | } |
798 | ref->xd = xd; | ||
799 | ref->ic = ic; | ||
800 | xd->refcnt++; | ||
801 | ref->next = ic->xref; | ||
802 | ic->xref = ref; | ||
803 | xref_count++; | ||
804 | } | 808 | } |
805 | c->xref_temp = NULL; | 809 | c->xref_temp = NULL; |
806 | /* After this, ref->xid/ino are NEVER used. */ | ||
807 | 810 | ||
808 | /* Phase.2 */ | 811 | /* Phase.2 : Bind xref with inode_cache and xattr_datum */ |
812 | for (i=0; i < XREF_TMPHASH_SIZE; i++) { | ||
813 | for (ref=xref_tmphash[i]; ref; ref=_ref) { | ||
814 | xref_count++; | ||
815 | _ref = ref->next; | ||
816 | if (is_xattr_ref_dead(ref)) { | ||
817 | ref->next = c->xref_dead_list; | ||
818 | c->xref_dead_list = ref; | ||
819 | xref_dead_count++; | ||
820 | continue; | ||
821 | } | ||
822 | /* At this point, ref->xid and ref->ino contain XID and inode number. | ||
823 | ref->xd and ref->ic are not valid yet. */ | ||
824 | xd = jffs2_find_xattr_datum(c, ref->xid); | ||
825 | ic = jffs2_get_ino_cache(c, ref->ino); | ||
826 | if (!xd || !ic) { | ||
827 | dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n", | ||
828 | ref->ino, ref->xid, ref->xseqno); | ||
829 | ref->xseqno |= XREF_DELETE_MARKER; | ||
830 | ref->next = c->xref_dead_list; | ||
831 | c->xref_dead_list = ref; | ||
832 | xref_orphan_count++; | ||
833 | continue; | ||
834 | } | ||
835 | ref->xd = xd; | ||
836 | ref->ic = ic; | ||
837 | atomic_inc(&xd->refcnt); | ||
838 | ref->next = ic->xref; | ||
839 | ic->xref = ref; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */ | ||
809 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { | 844 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { |
810 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { | 845 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { |
846 | xdatum_count++; | ||
811 | list_del_init(&xd->xindex); | 847 | list_del_init(&xd->xindex); |
812 | if (!xd->refcnt) { | 848 | if (!atomic_read(&xd->refcnt)) { |
813 | if (ref_flags(xd->node) != REF_UNCHECKED) | 849 | dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n", |
814 | JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n", | 850 | xd->xid, xd->version); |
815 | xd->xid, xd->version, ref_offset(xd->node)); | 851 | xd->flags |= JFFS2_XFLAGS_DEAD; |
816 | delete_xattr_datum(c, xd); | 852 | list_add(&xd->xindex, &c->xattr_unchecked); |
853 | xdatum_orphan_count++; | ||
817 | continue; | 854 | continue; |
818 | } | 855 | } |
819 | if (ref_flags(xd->node) != REF_PRISTINE) { | 856 | if (is_xattr_datum_unchecked(c, xd)) { |
820 | dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n", | 857 | dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n", |
821 | xd->xid, ref_offset(xd->node)); | 858 | xd->xid, xd->version); |
822 | list_add(&xd->xindex, &c->xattr_unchecked); | 859 | list_add(&xd->xindex, &c->xattr_unchecked); |
823 | xdatum_unchecked_count++; | 860 | xdatum_unchecked_count++; |
824 | } | 861 | } |
825 | xdatum_count++; | ||
826 | } | 862 | } |
827 | } | 863 | } |
828 | /* build complete */ | 864 | /* build complete */ |
829 | JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and " | 865 | JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum" |
830 | "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count); | 866 | " (%u unchecked, %u orphan) and " |
867 | "%u of xref (%u dead, %u orphan) found.\n", | ||
868 | xdatum_count, xdatum_unchecked_count, xdatum_orphan_count, | ||
869 | xref_count, xref_dead_count, xref_orphan_count); | ||
831 | } | 870 | } |
832 | 871 | ||
833 | struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, | 872 | struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, |
834 | uint32_t xid, uint32_t version) | 873 | uint32_t xid, uint32_t version) |
835 | { | 874 | { |
836 | struct jffs2_xattr_datum *xd, *_xd; | 875 | struct jffs2_xattr_datum *xd; |
837 | 876 | ||
838 | _xd = jffs2_find_xattr_datum(c, xid); | 877 | xd = jffs2_find_xattr_datum(c, xid); |
839 | if (_xd) { | 878 | if (!xd) { |
840 | dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n", | 879 | xd = jffs2_alloc_xattr_datum(); |
841 | xid, version, _xd->version, ref_offset(_xd->node)); | 880 | if (!xd) |
842 | if (version < _xd->version) | 881 | return ERR_PTR(-ENOMEM); |
843 | return ERR_PTR(-EEXIST); | 882 | xd->xid = xid; |
844 | } | 883 | xd->version = version; |
845 | xd = jffs2_alloc_xattr_datum(); | 884 | if (xd->xid > c->highest_xid) |
846 | if (!xd) | 885 | c->highest_xid = xd->xid; |
847 | return ERR_PTR(-ENOMEM); | 886 | list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); |
848 | xd->xid = xid; | ||
849 | xd->version = version; | ||
850 | if (xd->xid > c->highest_xid) | ||
851 | c->highest_xid = xd->xid; | ||
852 | list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); | ||
853 | |||
854 | if (_xd) { | ||
855 | list_del_init(&_xd->xindex); | ||
856 | delete_xattr_datum_node(c, _xd); | ||
857 | jffs2_free_xattr_datum(_xd); | ||
858 | } | 887 | } |
859 | return xd; | 888 | return xd; |
860 | } | 889 | } |
@@ -1080,9 +1109,23 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | |||
1080 | goto out; | 1109 | goto out; |
1081 | } | 1110 | } |
1082 | if (!buffer) { | 1111 | if (!buffer) { |
1083 | *pref = ref->next; | 1112 | ref->ino = ic->ino; |
1084 | delete_xattr_ref(c, ref); | 1113 | ref->xid = xd->xid; |
1085 | rc = 0; | 1114 | ref->xseqno |= XREF_DELETE_MARKER; |
1115 | rc = save_xattr_ref(c, ref); | ||
1116 | if (!rc) { | ||
1117 | *pref = ref->next; | ||
1118 | spin_lock(&c->erase_completion_lock); | ||
1119 | ref->next = c->xref_dead_list; | ||
1120 | c->xref_dead_list = ref; | ||
1121 | spin_unlock(&c->erase_completion_lock); | ||
1122 | if (atomic_dec_and_test(&xd->refcnt)) | ||
1123 | delete_xattr_datum(c, xd); | ||
1124 | } else { | ||
1125 | ref->ic = ic; | ||
1126 | ref->xd = xd; | ||
1127 | ref->xseqno &= ~XREF_DELETE_MARKER; | ||
1128 | } | ||
1086 | goto out; | 1129 | goto out; |
1087 | } | 1130 | } |
1088 | goto found; | 1131 | goto found; |
@@ -1094,7 +1137,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | |||
1094 | goto out; | 1137 | goto out; |
1095 | } | 1138 | } |
1096 | if (!buffer) { | 1139 | if (!buffer) { |
1097 | rc = -EINVAL; | 1140 | rc = -ENODATA; |
1098 | goto out; | 1141 | goto out; |
1099 | } | 1142 | } |
1100 | found: | 1143 | found: |
@@ -1110,16 +1153,14 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | |||
1110 | request = PAD(sizeof(struct jffs2_raw_xref)); | 1153 | request = PAD(sizeof(struct jffs2_raw_xref)); |
1111 | rc = jffs2_reserve_space(c, request, &length, | 1154 | rc = jffs2_reserve_space(c, request, &length, |
1112 | ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); | 1155 | ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); |
1156 | down_write(&c->xattr_sem); | ||
1113 | if (rc) { | 1157 | if (rc) { |
1114 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); | 1158 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); |
1115 | down_write(&c->xattr_sem); | 1159 | if (atomic_dec_and_test(&xd->refcnt)) |
1116 | xd->refcnt--; | ||
1117 | if (!xd->refcnt) | ||
1118 | delete_xattr_datum(c, xd); | 1160 | delete_xattr_datum(c, xd); |
1119 | up_write(&c->xattr_sem); | 1161 | up_write(&c->xattr_sem); |
1120 | return rc; | 1162 | return rc; |
1121 | } | 1163 | } |
1122 | down_write(&c->xattr_sem); | ||
1123 | if (ref) | 1164 | if (ref) |
1124 | *pref = ref->next; | 1165 | *pref = ref->next; |
1125 | newref = create_xattr_ref(c, ic, xd); | 1166 | newref = create_xattr_ref(c, ic, xd); |
@@ -1129,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | |||
1129 | ic->xref = ref; | 1170 | ic->xref = ref; |
1130 | } | 1171 | } |
1131 | rc = PTR_ERR(newref); | 1172 | rc = PTR_ERR(newref); |
1132 | xd->refcnt--; | 1173 | if (atomic_dec_and_test(&xd->refcnt)) |
1133 | if (!xd->refcnt) | ||
1134 | delete_xattr_datum(c, xd); | 1174 | delete_xattr_datum(c, xd); |
1135 | } else if (ref) { | 1175 | } else if (ref) { |
1136 | delete_xattr_ref(c, ref); | 1176 | delete_xattr_ref(c, ref); |
@@ -1142,38 +1182,40 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | |||
1142 | } | 1182 | } |
1143 | 1183 | ||
1144 | /* -------- garbage collector functions ------------- | 1184 | /* -------- garbage collector functions ------------- |
1145 | * jffs2_garbage_collect_xattr_datum(c, xd) | 1185 | * jffs2_garbage_collect_xattr_datum(c, xd, raw) |
1146 | * is used to move xdatum into new node. | 1186 | * is used to move xdatum into new node. |
1147 | * jffs2_garbage_collect_xattr_ref(c, ref) | 1187 | * jffs2_garbage_collect_xattr_ref(c, ref, raw) |
1148 | * is used to move xref into new node. | 1188 | * is used to move xref into new node. |
1149 | * jffs2_verify_xattr(c) | 1189 | * jffs2_verify_xattr(c) |
1150 | * is used to call do_verify_xattr_datum() before garbage collecting. | 1190 | * is used to call do_verify_xattr_datum() before garbage collecting. |
1191 | * jffs2_release_xattr_datum(c, xd) | ||
1192 | * is used to release an in-memory object of xdatum. | ||
1193 | * jffs2_release_xattr_ref(c, ref) | ||
1194 | * is used to release an in-memory object of xref. | ||
1151 | * -------------------------------------------------- */ | 1195 | * -------------------------------------------------- */ |
1152 | int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | 1196 | int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, |
1197 | struct jffs2_raw_node_ref *raw) | ||
1153 | { | 1198 | { |
1154 | uint32_t totlen, length, old_ofs; | 1199 | uint32_t totlen, length, old_ofs; |
1155 | int rc = -EINVAL; | 1200 | int rc = 0; |
1156 | 1201 | ||
1157 | down_write(&c->xattr_sem); | 1202 | down_write(&c->xattr_sem); |
1158 | BUG_ON(!xd->node); | 1203 | if (xd->node != raw) |
1159 | 1204 | goto out; | |
1160 | old_ofs = ref_offset(xd->node); | 1205 | if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)) |
1161 | totlen = ref_totlen(c, c->gcblock, xd->node); | ||
1162 | if (totlen < sizeof(struct jffs2_raw_xattr)) | ||
1163 | goto out; | 1206 | goto out; |
1164 | 1207 | ||
1165 | if (!xd->xname) { | 1208 | rc = load_xattr_datum(c, xd); |
1166 | rc = load_xattr_datum(c, xd); | 1209 | if (unlikely(rc)) { |
1167 | if (unlikely(rc > 0)) { | 1210 | rc = (rc > 0) ? 0 : rc; |
1168 | delete_xattr_datum_node(c, xd); | 1211 | goto out; |
1169 | rc = 0; | ||
1170 | goto out; | ||
1171 | } else if (unlikely(rc < 0)) | ||
1172 | goto out; | ||
1173 | } | 1212 | } |
1213 | old_ofs = ref_offset(xd->node); | ||
1214 | totlen = PAD(sizeof(struct jffs2_raw_xattr) | ||
1215 | + xd->name_len + 1 + xd->value_len); | ||
1174 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); | 1216 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); |
1175 | if (rc || length < totlen) { | 1217 | if (rc) { |
1176 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen); | 1218 | JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen); |
1177 | rc = rc ? rc : -EBADFD; | 1219 | rc = rc ? rc : -EBADFD; |
1178 | goto out; | 1220 | goto out; |
1179 | } | 1221 | } |
@@ -1182,27 +1224,32 @@ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xatt | |||
1182 | dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", | 1224 | dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", |
1183 | xd->xid, xd->version, old_ofs, ref_offset(xd->node)); | 1225 | xd->xid, xd->version, old_ofs, ref_offset(xd->node)); |
1184 | out: | 1226 | out: |
1227 | if (!rc) | ||
1228 | jffs2_mark_node_obsolete(c, raw); | ||
1185 | up_write(&c->xattr_sem); | 1229 | up_write(&c->xattr_sem); |
1186 | return rc; | 1230 | return rc; |
1187 | } | 1231 | } |
1188 | 1232 | ||
1189 | 1233 | int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, | |
1190 | int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | 1234 | struct jffs2_raw_node_ref *raw) |
1191 | { | 1235 | { |
1192 | uint32_t totlen, length, old_ofs; | 1236 | uint32_t totlen, length, old_ofs; |
1193 | int rc = -EINVAL; | 1237 | int rc = 0; |
1194 | 1238 | ||
1195 | down_write(&c->xattr_sem); | 1239 | down_write(&c->xattr_sem); |
1196 | BUG_ON(!ref->node); | 1240 | BUG_ON(!ref->node); |
1197 | 1241 | ||
1242 | if (ref->node != raw) | ||
1243 | goto out; | ||
1244 | if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref)) | ||
1245 | goto out; | ||
1246 | |||
1198 | old_ofs = ref_offset(ref->node); | 1247 | old_ofs = ref_offset(ref->node); |
1199 | totlen = ref_totlen(c, c->gcblock, ref->node); | 1248 | totlen = ref_totlen(c, c->gcblock, ref->node); |
1200 | if (totlen != sizeof(struct jffs2_raw_xref)) | ||
1201 | goto out; | ||
1202 | 1249 | ||
1203 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); | 1250 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); |
1204 | if (rc || length < totlen) { | 1251 | if (rc) { |
1205 | JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n", | 1252 | JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n", |
1206 | __FUNCTION__, rc, totlen); | 1253 | __FUNCTION__, rc, totlen); |
1207 | rc = rc ? rc : -EBADFD; | 1254 | rc = rc ? rc : -EBADFD; |
1208 | goto out; | 1255 | goto out; |
@@ -1212,6 +1259,8 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ | |||
1212 | dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", | 1259 | dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", |
1213 | ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); | 1260 | ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); |
1214 | out: | 1261 | out: |
1262 | if (!rc) | ||
1263 | jffs2_mark_node_obsolete(c, raw); | ||
1215 | up_write(&c->xattr_sem); | 1264 | up_write(&c->xattr_sem); |
1216 | return rc; | 1265 | return rc; |
1217 | } | 1266 | } |
@@ -1219,20 +1268,59 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ | |||
1219 | int jffs2_verify_xattr(struct jffs2_sb_info *c) | 1268 | int jffs2_verify_xattr(struct jffs2_sb_info *c) |
1220 | { | 1269 | { |
1221 | struct jffs2_xattr_datum *xd, *_xd; | 1270 | struct jffs2_xattr_datum *xd, *_xd; |
1271 | struct jffs2_eraseblock *jeb; | ||
1272 | struct jffs2_raw_node_ref *raw; | ||
1273 | uint32_t totlen; | ||
1222 | int rc; | 1274 | int rc; |
1223 | 1275 | ||
1224 | down_write(&c->xattr_sem); | 1276 | down_write(&c->xattr_sem); |
1225 | list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { | 1277 | list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { |
1226 | rc = do_verify_xattr_datum(c, xd); | 1278 | rc = do_verify_xattr_datum(c, xd); |
1227 | if (rc == 0) { | 1279 | if (rc < 0) |
1228 | list_del_init(&xd->xindex); | 1280 | continue; |
1229 | break; | 1281 | list_del_init(&xd->xindex); |
1230 | } else if (rc > 0) { | 1282 | spin_lock(&c->erase_completion_lock); |
1231 | list_del_init(&xd->xindex); | 1283 | for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { |
1232 | delete_xattr_datum_node(c, xd); | 1284 | if (ref_flags(raw) != REF_UNCHECKED) |
1285 | continue; | ||
1286 | jeb = &c->blocks[ref_offset(raw) / c->sector_size]; | ||
1287 | totlen = PAD(ref_totlen(c, jeb, raw)); | ||
1288 | c->unchecked_size -= totlen; c->used_size += totlen; | ||
1289 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | ||
1290 | raw->flash_offset = ref_offset(raw) | ||
1291 | | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL); | ||
1233 | } | 1292 | } |
1293 | if (xd->flags & JFFS2_XFLAGS_DEAD) | ||
1294 | list_add(&xd->xindex, &c->xattr_dead_list); | ||
1295 | spin_unlock(&c->erase_completion_lock); | ||
1234 | } | 1296 | } |
1235 | up_write(&c->xattr_sem); | 1297 | up_write(&c->xattr_sem); |
1236 | |||
1237 | return list_empty(&c->xattr_unchecked) ? 1 : 0; | 1298 | return list_empty(&c->xattr_unchecked) ? 1 : 0; |
1238 | } | 1299 | } |
1300 | |||
1301 | void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
1302 | { | ||
1303 | /* must be called under spin_lock(&c->erase_completion_lock) */ | ||
1304 | if (atomic_read(&xd->refcnt) || xd->node != (void *)xd) | ||
1305 | return; | ||
1306 | |||
1307 | list_del(&xd->xindex); | ||
1308 | jffs2_free_xattr_datum(xd); | ||
1309 | } | ||
1310 | |||
1311 | void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
1312 | { | ||
1313 | /* must be called under spin_lock(&c->erase_completion_lock) */ | ||
1314 | struct jffs2_xattr_ref *tmp, **ptmp; | ||
1315 | |||
1316 | if (ref->node != (void *)ref) | ||
1317 | return; | ||
1318 | |||
1319 | for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) { | ||
1320 | if (ref == tmp) { | ||
1321 | *ptmp = tmp->next; | ||
1322 | break; | ||
1323 | } | ||
1324 | } | ||
1325 | jffs2_free_xattr_ref(ref); | ||
1326 | } | ||
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h index 2c199856c582..06a5c69dcf8b 100644 --- a/fs/jffs2/xattr.h +++ b/fs/jffs2/xattr.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */ | 17 | #define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */ |
18 | #define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */ | 18 | #define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */ |
19 | #define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */ | ||
20 | #define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */ | ||
19 | 21 | ||
20 | struct jffs2_xattr_datum | 22 | struct jffs2_xattr_datum |
21 | { | 23 | { |
@@ -23,10 +25,10 @@ struct jffs2_xattr_datum | |||
23 | struct jffs2_raw_node_ref *node; | 25 | struct jffs2_raw_node_ref *node; |
24 | uint8_t class; | 26 | uint8_t class; |
25 | uint8_t flags; | 27 | uint8_t flags; |
26 | uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */ | 28 | uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */ |
27 | 29 | ||
28 | struct list_head xindex; /* chained from c->xattrindex[n] */ | 30 | struct list_head xindex; /* chained from c->xattrindex[n] */ |
29 | uint32_t refcnt; /* # of xattr_ref refers this */ | 31 | atomic_t refcnt; /* # of xattr_ref refers this */ |
30 | uint32_t xid; | 32 | uint32_t xid; |
31 | uint32_t version; | 33 | uint32_t version; |
32 | 34 | ||
@@ -47,6 +49,7 @@ struct jffs2_xattr_ref | |||
47 | uint8_t flags; /* Currently unused */ | 49 | uint8_t flags; /* Currently unused */ |
48 | u16 unused; | 50 | u16 unused; |
49 | 51 | ||
52 | uint32_t xseqno; | ||
50 | union { | 53 | union { |
51 | struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ | 54 | struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ |
52 | uint32_t ino; /* only used in scanning/building */ | 55 | uint32_t ino; /* only used in scanning/building */ |
@@ -58,6 +61,12 @@ struct jffs2_xattr_ref | |||
58 | struct jffs2_xattr_ref *next; /* chained from ic->xref_list */ | 61 | struct jffs2_xattr_ref *next; /* chained from ic->xref_list */ |
59 | }; | 62 | }; |
60 | 63 | ||
64 | #define XREF_DELETE_MARKER (0x00000001) | ||
65 | static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref) | ||
66 | { | ||
67 | return ((ref->xseqno & XREF_DELETE_MARKER) != 0); | ||
68 | } | ||
69 | |||
61 | #ifdef CONFIG_JFFS2_FS_XATTR | 70 | #ifdef CONFIG_JFFS2_FS_XATTR |
62 | 71 | ||
63 | extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); | 72 | extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); |
@@ -70,9 +79,13 @@ extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c | |||
70 | extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | 79 | extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); |
71 | extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | 80 | extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); |
72 | 81 | ||
73 | extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); | 82 | extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, |
74 | extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); | 83 | struct jffs2_raw_node_ref *raw); |
84 | extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, | ||
85 | struct jffs2_raw_node_ref *raw); | ||
75 | extern int jffs2_verify_xattr(struct jffs2_sb_info *c); | 86 | extern int jffs2_verify_xattr(struct jffs2_sb_info *c); |
87 | extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); | ||
88 | extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); | ||
76 | 89 | ||
77 | extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, | 90 | extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, |
78 | char *buffer, size_t size); | 91 | char *buffer, size_t size); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 402005c35ab3..8ca9707be6c9 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -909,7 +909,7 @@ int __init nfs_init_directcache(void) | |||
909 | * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures | 909 | * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures |
910 | * | 910 | * |
911 | */ | 911 | */ |
912 | void __exit nfs_destroy_directcache(void) | 912 | void nfs_destroy_directcache(void) |
913 | { | 913 | { |
914 | if (kmem_cache_destroy(nfs_direct_cachep)) | 914 | if (kmem_cache_destroy(nfs_direct_cachep)) |
915 | printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); | 915 | printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 51bc88b662fe..c5b916605fb0 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1132,7 +1132,7 @@ static int __init nfs_init_inodecache(void) | |||
1132 | return 0; | 1132 | return 0; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | static void __exit nfs_destroy_inodecache(void) | 1135 | static void nfs_destroy_inodecache(void) |
1136 | { | 1136 | { |
1137 | if (kmem_cache_destroy(nfs_inode_cachep)) | 1137 | if (kmem_cache_destroy(nfs_inode_cachep)) |
1138 | printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n"); | 1138 | printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n"); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index bd2815e2dec1..4fe51c1292bb 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -31,15 +31,15 @@ extern struct svc_version nfs4_callback_version1; | |||
31 | 31 | ||
32 | /* pagelist.c */ | 32 | /* pagelist.c */ |
33 | extern int __init nfs_init_nfspagecache(void); | 33 | extern int __init nfs_init_nfspagecache(void); |
34 | extern void __exit nfs_destroy_nfspagecache(void); | 34 | extern void nfs_destroy_nfspagecache(void); |
35 | extern int __init nfs_init_readpagecache(void); | 35 | extern int __init nfs_init_readpagecache(void); |
36 | extern void __exit nfs_destroy_readpagecache(void); | 36 | extern void nfs_destroy_readpagecache(void); |
37 | extern int __init nfs_init_writepagecache(void); | 37 | extern int __init nfs_init_writepagecache(void); |
38 | extern void __exit nfs_destroy_writepagecache(void); | 38 | extern void nfs_destroy_writepagecache(void); |
39 | 39 | ||
40 | #ifdef CONFIG_NFS_DIRECTIO | 40 | #ifdef CONFIG_NFS_DIRECTIO |
41 | extern int __init nfs_init_directcache(void); | 41 | extern int __init nfs_init_directcache(void); |
42 | extern void __exit nfs_destroy_directcache(void); | 42 | extern void nfs_destroy_directcache(void); |
43 | #else | 43 | #else |
44 | #define nfs_init_directcache() (0) | 44 | #define nfs_init_directcache() (0) |
45 | #define nfs_destroy_directcache() do {} while(0) | 45 | #define nfs_destroy_directcache() do {} while(0) |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ef9429643ebc..d89f6fb3b3a3 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -390,7 +390,7 @@ int __init nfs_init_nfspagecache(void) | |||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | 392 | ||
393 | void __exit nfs_destroy_nfspagecache(void) | 393 | void nfs_destroy_nfspagecache(void) |
394 | { | 394 | { |
395 | if (kmem_cache_destroy(nfs_page_cachep)) | 395 | if (kmem_cache_destroy(nfs_page_cachep)) |
396 | printk(KERN_INFO "nfs_page: not all structures were freed\n"); | 396 | printk(KERN_INFO "nfs_page: not all structures were freed\n"); |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 41c2ffee24f5..32cf3773af0c 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -711,7 +711,7 @@ int __init nfs_init_readpagecache(void) | |||
711 | return 0; | 711 | return 0; |
712 | } | 712 | } |
713 | 713 | ||
714 | void __exit nfs_destroy_readpagecache(void) | 714 | void nfs_destroy_readpagecache(void) |
715 | { | 715 | { |
716 | mempool_destroy(nfs_rdata_mempool); | 716 | mempool_destroy(nfs_rdata_mempool); |
717 | if (kmem_cache_destroy(nfs_rdata_cachep)) | 717 | if (kmem_cache_destroy(nfs_rdata_cachep)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b383fdd3a15c..8fccb9cb173b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1551,7 +1551,7 @@ int __init nfs_init_writepagecache(void) | |||
1551 | return 0; | 1551 | return 0; |
1552 | } | 1552 | } |
1553 | 1553 | ||
1554 | void __exit nfs_destroy_writepagecache(void) | 1554 | void nfs_destroy_writepagecache(void) |
1555 | { | 1555 | { |
1556 | mempool_destroy(nfs_commit_mempool); | 1556 | mempool_destroy(nfs_commit_mempool); |
1557 | mempool_destroy(nfs_wdata_mempool); | 1557 | mempool_destroy(nfs_wdata_mempool); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1630b5670dc2..7c7d01672d35 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -123,7 +123,7 @@ static void release_stateid(struct nfs4_stateid *stp, int flags); | |||
123 | */ | 123 | */ |
124 | 124 | ||
125 | /* recall_lock protects the del_recall_lru */ | 125 | /* recall_lock protects the del_recall_lru */ |
126 | static spinlock_t recall_lock = SPIN_LOCK_UNLOCKED; | 126 | static DEFINE_SPINLOCK(recall_lock); |
127 | static struct list_head del_recall_lru; | 127 | static struct list_head del_recall_lru; |
128 | 128 | ||
129 | static void | 129 | static void |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 21f38accd039..1d26cfcd9f84 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem); | |||
54 | * multiple hb threads are watching multiple regions. A node is live | 54 | * multiple hb threads are watching multiple regions. A node is live |
55 | * whenever any of the threads sees activity from the node in its region. | 55 | * whenever any of the threads sees activity from the node in its region. |
56 | */ | 56 | */ |
57 | static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED; | 57 | static DEFINE_SPINLOCK(o2hb_live_lock); |
58 | static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; | 58 | static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; |
59 | static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 59 | static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
60 | static LIST_HEAD(o2hb_node_events); | 60 | static LIST_HEAD(o2hb_node_events); |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 0f60cc0d3985..1591eb37a723 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -108,7 +108,7 @@ | |||
108 | ##args); \ | 108 | ##args); \ |
109 | } while (0) | 109 | } while (0) |
110 | 110 | ||
111 | static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED; | 111 | static DEFINE_RWLOCK(o2net_handler_lock); |
112 | static struct rb_root o2net_handler_tree = RB_ROOT; | 112 | static struct rb_root o2net_handler_tree = RB_ROOT; |
113 | 113 | ||
114 | static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; | 114 | static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index ba27c5c5e959..b8c23f7ba67e 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -88,7 +88,7 @@ out_free: | |||
88 | * | 88 | * |
89 | */ | 89 | */ |
90 | 90 | ||
91 | spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED; | 91 | DEFINE_SPINLOCK(dlm_domain_lock); |
92 | LIST_HEAD(dlm_domains); | 92 | LIST_HEAD(dlm_domains); |
93 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | 93 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); |
94 | 94 | ||
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index d6f89577e25f..5ca57ec650c7 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #define MLOG_MASK_PREFIX ML_DLM | 53 | #define MLOG_MASK_PREFIX ML_DLM |
54 | #include "cluster/masklog.h" | 54 | #include "cluster/masklog.h" |
55 | 55 | ||
56 | static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; | 56 | static DEFINE_SPINLOCK(dlm_cookie_lock); |
57 | static u64 dlm_next_cookie = 1; | 57 | static u64 dlm_next_cookie = 1; |
58 | 58 | ||
59 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | 59 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index da399013516f..29b2845f370d 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -98,8 +98,8 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); | |||
98 | 98 | ||
99 | static u64 dlm_get_next_mig_cookie(void); | 99 | static u64 dlm_get_next_mig_cookie(void); |
100 | 100 | ||
101 | static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED; | 101 | static DEFINE_SPINLOCK(dlm_reco_state_lock); |
102 | static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED; | 102 | static DEFINE_SPINLOCK(dlm_mig_cookie_lock); |
103 | static u64 dlm_mig_cookie = 1; | 103 | static u64 dlm_mig_cookie = 1; |
104 | 104 | ||
105 | static u64 dlm_get_next_mig_cookie(void) | 105 | static u64 dlm_get_next_mig_cookie(void) |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 64cd52860c87..4acd37286bdd 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -242,7 +242,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type, | |||
242 | mlog_exit_void(); | 242 | mlog_exit_void(); |
243 | } | 243 | } |
244 | 244 | ||
245 | static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED; | 245 | static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); |
246 | 246 | ||
247 | static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, | 247 | static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, |
248 | struct ocfs2_dlm_debug *dlm_debug) | 248 | struct ocfs2_dlm_debug *dlm_debug) |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 3fe8781c22cb..910a601b2e98 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | #include "buffer_head_io.h" | 50 | #include "buffer_head_io.h" |
51 | 51 | ||
52 | spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED; | 52 | DEFINE_SPINLOCK(trans_inc_lock); |
53 | 53 | ||
54 | static int ocfs2_force_read_journal(struct inode *inode); | 54 | static int ocfs2_force_read_journal(struct inode *inode); |
55 | static int ocfs2_recover_node(struct ocfs2_super *osb, | 55 | static int ocfs2_recover_node(struct ocfs2_super *osb, |
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c index ee42765a8553..cf70fe2075b8 100644 --- a/fs/ocfs2/vote.c +++ b/fs/ocfs2/vote.c | |||
@@ -988,9 +988,7 @@ int ocfs2_request_mount_vote(struct ocfs2_super *osb) | |||
988 | } | 988 | } |
989 | 989 | ||
990 | bail: | 990 | bail: |
991 | if (request) | 991 | kfree(request); |
992 | kfree(request); | ||
993 | |||
994 | return status; | 992 | return status; |
995 | } | 993 | } |
996 | 994 | ||
@@ -1021,9 +1019,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb) | |||
1021 | } | 1019 | } |
1022 | 1020 | ||
1023 | bail: | 1021 | bail: |
1024 | if (request) | 1022 | kfree(request); |
1025 | kfree(request); | ||
1026 | |||
1027 | return status; | 1023 | return status; |
1028 | } | 1024 | } |
1029 | 1025 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0137ec4c1368..0a163a4f7764 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -122,6 +122,11 @@ struct mem_size_stats | |||
122 | unsigned long private_dirty; | 122 | unsigned long private_dirty; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | ||
126 | { | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
125 | static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) | 130 | static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) |
126 | { | 131 | { |
127 | struct proc_maps_private *priv = m->private; | 132 | struct proc_maps_private *priv = m->private; |
@@ -158,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats | |||
158 | pad_len_spaces(m, len); | 163 | pad_len_spaces(m, len); |
159 | seq_path(m, file->f_vfsmnt, file->f_dentry, "\n"); | 164 | seq_path(m, file->f_vfsmnt, file->f_dentry, "\n"); |
160 | } else { | 165 | } else { |
161 | if (mm) { | 166 | const char *name = arch_vma_name(vma); |
162 | if (vma->vm_start <= mm->start_brk && | 167 | if (!name) { |
168 | if (mm) { | ||
169 | if (vma->vm_start <= mm->start_brk && | ||
163 | vma->vm_end >= mm->brk) { | 170 | vma->vm_end >= mm->brk) { |
164 | pad_len_spaces(m, len); | 171 | name = "[heap]"; |
165 | seq_puts(m, "[heap]"); | 172 | } else if (vma->vm_start <= mm->start_stack && |
166 | } else { | 173 | vma->vm_end >= mm->start_stack) { |
167 | if (vma->vm_start <= mm->start_stack && | 174 | name = "[stack]"; |
168 | vma->vm_end >= mm->start_stack) { | ||
169 | |||
170 | pad_len_spaces(m, len); | ||
171 | seq_puts(m, "[stack]"); | ||
172 | } | 175 | } |
176 | } else { | ||
177 | name = "[vdso]"; | ||
173 | } | 178 | } |
174 | } else { | 179 | } |
180 | if (name) { | ||
175 | pad_len_spaces(m, len); | 181 | pad_len_spaces(m, len); |
176 | seq_puts(m, "[vdso]"); | 182 | seq_puts(m, name); |
177 | } | 183 | } |
178 | } | 184 | } |
179 | seq_putc(m, '\n'); | 185 | seq_putc(m, '\n'); |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index f2dbdf5a8769..259bd196099d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -605,39 +605,12 @@ static void ufs_set_inode_ops(struct inode *inode) | |||
605 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); | 605 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); |
606 | } | 606 | } |
607 | 607 | ||
608 | void ufs_read_inode (struct inode * inode) | 608 | static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
609 | { | 609 | { |
610 | struct ufs_inode_info *ufsi = UFS_I(inode); | 610 | struct ufs_inode_info *ufsi = UFS_I(inode); |
611 | struct super_block * sb; | 611 | struct super_block *sb = inode->i_sb; |
612 | struct ufs_sb_private_info * uspi; | ||
613 | struct ufs_inode * ufs_inode; | ||
614 | struct ufs2_inode *ufs2_inode; | ||
615 | struct buffer_head * bh; | ||
616 | mode_t mode; | 612 | mode_t mode; |
617 | unsigned i; | 613 | unsigned i; |
618 | unsigned flags; | ||
619 | |||
620 | UFSD("ENTER, ino %lu\n", inode->i_ino); | ||
621 | |||
622 | sb = inode->i_sb; | ||
623 | uspi = UFS_SB(sb)->s_uspi; | ||
624 | flags = UFS_SB(sb)->s_flags; | ||
625 | |||
626 | if (inode->i_ino < UFS_ROOTINO || | ||
627 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { | ||
628 | ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); | ||
629 | goto bad_inode; | ||
630 | } | ||
631 | |||
632 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); | ||
633 | if (!bh) { | ||
634 | ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); | ||
635 | goto bad_inode; | ||
636 | } | ||
637 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) | ||
638 | goto ufs2_inode; | ||
639 | |||
640 | ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino)); | ||
641 | 614 | ||
642 | /* | 615 | /* |
643 | * Copy data to the in-core inode. | 616 | * Copy data to the in-core inode. |
@@ -661,14 +634,11 @@ void ufs_read_inode (struct inode * inode) | |||
661 | inode->i_atime.tv_nsec = 0; | 634 | inode->i_atime.tv_nsec = 0; |
662 | inode->i_ctime.tv_nsec = 0; | 635 | inode->i_ctime.tv_nsec = 0; |
663 | inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); | 636 | inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); |
664 | inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */ | ||
665 | inode->i_version++; | ||
666 | ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); | 637 | ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); |
667 | ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); | 638 | ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); |
668 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); | 639 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); |
669 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); | 640 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
670 | ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; | 641 | |
671 | ufsi->i_dir_start_lookup = 0; | ||
672 | 642 | ||
673 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { | 643 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
674 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) | 644 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) |
@@ -677,24 +647,16 @@ void ufs_read_inode (struct inode * inode) | |||
677 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | 647 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) |
678 | ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; | 648 | ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; |
679 | } | 649 | } |
680 | ufsi->i_osync = 0; | 650 | } |
681 | |||
682 | ufs_set_inode_ops(inode); | ||
683 | |||
684 | brelse (bh); | ||
685 | |||
686 | UFSD("EXIT\n"); | ||
687 | return; | ||
688 | 651 | ||
689 | bad_inode: | 652 | static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) |
690 | make_bad_inode(inode); | 653 | { |
691 | return; | 654 | struct ufs_inode_info *ufsi = UFS_I(inode); |
655 | struct super_block *sb = inode->i_sb; | ||
656 | mode_t mode; | ||
657 | unsigned i; | ||
692 | 658 | ||
693 | ufs2_inode : | ||
694 | UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); | 659 | UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); |
695 | |||
696 | ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino)); | ||
697 | |||
698 | /* | 660 | /* |
699 | * Copy data to the in-core inode. | 661 | * Copy data to the in-core inode. |
700 | */ | 662 | */ |
@@ -717,26 +679,64 @@ ufs2_inode : | |||
717 | inode->i_atime.tv_nsec = 0; | 679 | inode->i_atime.tv_nsec = 0; |
718 | inode->i_ctime.tv_nsec = 0; | 680 | inode->i_ctime.tv_nsec = 0; |
719 | inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); | 681 | inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); |
720 | inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/ | ||
721 | |||
722 | inode->i_version++; | ||
723 | ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); | 682 | ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); |
724 | ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); | 683 | ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); |
725 | /* | 684 | /* |
726 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); | 685 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); |
727 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); | 686 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
728 | */ | 687 | */ |
729 | ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift; | ||
730 | 688 | ||
731 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { | 689 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
732 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) | 690 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) |
733 | ufsi->i_u1.u2_i_data[i] = | 691 | ufsi->i_u1.u2_i_data[i] = |
734 | ufs2_inode->ui_u2.ui_addr.ui_db[i]; | 692 | ufs2_inode->ui_u2.ui_addr.ui_db[i]; |
735 | } | 693 | } else { |
736 | else { | ||
737 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | 694 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) |
738 | ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; | 695 | ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; |
739 | } | 696 | } |
697 | } | ||
698 | |||
699 | void ufs_read_inode(struct inode * inode) | ||
700 | { | ||
701 | struct ufs_inode_info *ufsi = UFS_I(inode); | ||
702 | struct super_block * sb; | ||
703 | struct ufs_sb_private_info * uspi; | ||
704 | struct buffer_head * bh; | ||
705 | |||
706 | UFSD("ENTER, ino %lu\n", inode->i_ino); | ||
707 | |||
708 | sb = inode->i_sb; | ||
709 | uspi = UFS_SB(sb)->s_uspi; | ||
710 | |||
711 | if (inode->i_ino < UFS_ROOTINO || | ||
712 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { | ||
713 | ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", | ||
714 | inode->i_ino); | ||
715 | goto bad_inode; | ||
716 | } | ||
717 | |||
718 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); | ||
719 | if (!bh) { | ||
720 | ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", | ||
721 | inode->i_ino); | ||
722 | goto bad_inode; | ||
723 | } | ||
724 | if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { | ||
725 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; | ||
726 | |||
727 | ufs2_read_inode(inode, | ||
728 | ufs2_inode + ufs_inotofsbo(inode->i_ino)); | ||
729 | } else { | ||
730 | struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; | ||
731 | |||
732 | ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); | ||
733 | } | ||
734 | |||
735 | inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/ | ||
736 | inode->i_version++; | ||
737 | ufsi->i_lastfrag = | ||
738 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; | ||
739 | ufsi->i_dir_start_lookup = 0; | ||
740 | ufsi->i_osync = 0; | 740 | ufsi->i_osync = 0; |
741 | 741 | ||
742 | ufs_set_inode_ops(inode); | 742 | ufs_set_inode_ops(inode); |
@@ -745,6 +745,9 @@ ufs2_inode : | |||
745 | 745 | ||
746 | UFSD("EXIT\n"); | 746 | UFSD("EXIT\n"); |
747 | return; | 747 | return; |
748 | |||
749 | bad_inode: | ||
750 | make_bad_inode(inode); | ||
748 | } | 751 | } |
749 | 752 | ||
750 | static int ufs_update_inode(struct inode * inode, int do_sync) | 753 | static int ufs_update_inode(struct inode * inode, int do_sync) |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 12810baeb5d4..d9180020de63 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -419,16 +419,15 @@ xfs_vn_link( | |||
419 | int error; | 419 | int error; |
420 | 420 | ||
421 | ip = old_dentry->d_inode; /* inode being linked to */ | 421 | ip = old_dentry->d_inode; /* inode being linked to */ |
422 | if (S_ISDIR(ip->i_mode)) | ||
423 | return -EPERM; | ||
424 | |||
425 | tdvp = vn_from_inode(dir); | 422 | tdvp = vn_from_inode(dir); |
426 | vp = vn_from_inode(ip); | 423 | vp = vn_from_inode(ip); |
427 | 424 | ||
425 | VN_HOLD(vp); | ||
428 | error = bhv_vop_link(tdvp, vp, dentry, NULL); | 426 | error = bhv_vop_link(tdvp, vp, dentry, NULL); |
429 | if (likely(!error)) { | 427 | if (unlikely(error)) { |
428 | VN_RELE(vp); | ||
429 | } else { | ||
430 | VMODIFY(tdvp); | 430 | VMODIFY(tdvp); |
431 | VN_HOLD(vp); | ||
432 | xfs_validate_fields(ip, &vattr); | 431 | xfs_validate_fields(ip, &vattr); |
433 | d_instantiate(dentry, ip); | 432 | d_instantiate(dentry, ip); |
434 | } | 433 | } |
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index aa26ab906c88..028eb17ec2ed 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -140,9 +140,7 @@ BUFFER_FNS(PrivateStart, unwritten); | |||
140 | #define current_pid() (current->pid) | 140 | #define current_pid() (current->pid) |
141 | #define current_fsuid(cred) (current->fsuid) | 141 | #define current_fsuid(cred) (current->fsuid) |
142 | #define current_fsgid(cred) (current->fsgid) | 142 | #define current_fsgid(cred) (current->fsgid) |
143 | #define current_set_flags(f) (current->flags |= (f)) | ||
144 | #define current_test_flags(f) (current->flags & (f)) | 143 | #define current_test_flags(f) (current->flags & (f)) |
145 | #define current_clear_flags(f) (current->flags & ~(f)) | ||
146 | #define current_set_flags_nested(sp, f) \ | 144 | #define current_set_flags_nested(sp, f) \ |
147 | (*(sp) = current->flags, current->flags |= (f)) | 145 | (*(sp) = current->flags, current->flags |= (f)) |
148 | #define current_clear_flags_nested(sp, f) \ | 146 | #define current_clear_flags_nested(sp, f) \ |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 35c6a01963a7..c42b3221b20c 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h | |||
@@ -93,7 +93,7 @@ typedef enum { | |||
93 | */ | 93 | */ |
94 | static inline struct bhv_vnode *vn_from_inode(struct inode *inode) | 94 | static inline struct bhv_vnode *vn_from_inode(struct inode *inode) |
95 | { | 95 | { |
96 | return (bhv_vnode_t *)list_entry(inode, bhv_vnode_t, v_inode); | 96 | return container_of(inode, bhv_vnode_t, v_inode); |
97 | } | 97 | } |
98 | static inline struct inode *vn_to_inode(struct bhv_vnode *vnode) | 98 | static inline struct inode *vn_to_inode(struct bhv_vnode *vnode) |
99 | { | 99 | { |
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h index 1d8ff103201c..6e6e56fb352d 100644 --- a/fs/xfs/xfs_behavior.h +++ b/fs/xfs/xfs_behavior.h | |||
@@ -78,15 +78,12 @@ | |||
78 | * | 78 | * |
79 | */ | 79 | */ |
80 | 80 | ||
81 | struct bhv_head_lock; | ||
82 | |||
83 | /* | 81 | /* |
84 | * Behavior head. Head of the chain of behaviors. | 82 | * Behavior head. Head of the chain of behaviors. |
85 | * Contained within each virtualized object data structure. | 83 | * Contained within each virtualized object data structure. |
86 | */ | 84 | */ |
87 | typedef struct bhv_head { | 85 | typedef struct bhv_head { |
88 | struct bhv_desc *bh_first; /* first behavior in chain */ | 86 | struct bhv_desc *bh_first; /* first behavior in chain */ |
89 | struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */ | ||
90 | } bhv_head_t; | 87 | } bhv_head_t; |
91 | 88 | ||
92 | /* | 89 | /* |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5fa0adb7e173..86c1bf0bba9e 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1961,9 +1961,9 @@ xfs_iunlink_remove( | |||
1961 | xfs_agino_t agino; | 1961 | xfs_agino_t agino; |
1962 | xfs_agino_t next_agino; | 1962 | xfs_agino_t next_agino; |
1963 | xfs_buf_t *last_ibp; | 1963 | xfs_buf_t *last_ibp; |
1964 | xfs_dinode_t *last_dip; | 1964 | xfs_dinode_t *last_dip = NULL; |
1965 | short bucket_index; | 1965 | short bucket_index; |
1966 | int offset, last_offset; | 1966 | int offset, last_offset = 0; |
1967 | int error; | 1967 | int error; |
1968 | int agi_ok; | 1968 | int agi_ok; |
1969 | 1969 | ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index d8f5d4cbe8b7..e730328636c3 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1740,10 +1740,10 @@ xlog_write(xfs_mount_t * mp, | |||
1740 | xlog_in_core_t **commit_iclog, | 1740 | xlog_in_core_t **commit_iclog, |
1741 | uint flags) | 1741 | uint flags) |
1742 | { | 1742 | { |
1743 | xlog_t *log = mp->m_log; | 1743 | xlog_t *log = mp->m_log; |
1744 | xlog_ticket_t *ticket = (xlog_ticket_t *)tic; | 1744 | xlog_ticket_t *ticket = (xlog_ticket_t *)tic; |
1745 | xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */ | ||
1745 | xlog_op_header_t *logop_head; /* ptr to log operation header */ | 1746 | xlog_op_header_t *logop_head; /* ptr to log operation header */ |
1746 | xlog_in_core_t *iclog; /* ptr to current in-core log */ | ||
1747 | __psint_t ptr; /* copy address into data region */ | 1747 | __psint_t ptr; /* copy address into data region */ |
1748 | int len; /* # xlog_write() bytes 2 still copy */ | 1748 | int len; /* # xlog_write() bytes 2 still copy */ |
1749 | int index; /* region index currently copying */ | 1749 | int index; /* region index currently copying */ |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 55b4237c2153..3cb678e3a132 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -990,6 +990,8 @@ xlog_find_zeroed( | |||
990 | xfs_daddr_t num_scan_bblks; | 990 | xfs_daddr_t num_scan_bblks; |
991 | int error, log_bbnum = log->l_logBBsize; | 991 | int error, log_bbnum = log->l_logBBsize; |
992 | 992 | ||
993 | *blk_no = 0; | ||
994 | |||
993 | /* check totally zeroed log */ | 995 | /* check totally zeroed log */ |
994 | bp = xlog_get_bp(log, 1); | 996 | bp = xlog_get_bp(log, 1); |
995 | if (!bp) | 997 | if (!bp) |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 10dbf203c62f..4be5c0b2d296 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -1721,15 +1721,14 @@ xfs_mount_log_sbunit( | |||
1721 | * is present to prevent thrashing). | 1721 | * is present to prevent thrashing). |
1722 | */ | 1722 | */ |
1723 | 1723 | ||
1724 | #ifdef CONFIG_HOTPLUG_CPU | ||
1724 | /* | 1725 | /* |
1725 | * hot-plug CPU notifier support. | 1726 | * hot-plug CPU notifier support. |
1726 | * | 1727 | * |
1727 | * We cannot use the hotcpu_register() function because it does | 1728 | * We need a notifier per filesystem as we need to be able to identify |
1728 | * not allow notifier instances. We need a notifier per filesystem | 1729 | * the filesystem to balance the counters out. This is achieved by |
1729 | * as we need to be able to identify the filesystem to balance | 1730 | * having a notifier block embedded in the xfs_mount_t and doing pointer |
1730 | * the counters out. This is achieved by having a notifier block | 1731 | * magic to get the mount pointer from the notifier block address. |
1731 | * embedded in the xfs_mount_t and doing pointer magic to get the | ||
1732 | * mount pointer from the notifier block address. | ||
1733 | */ | 1732 | */ |
1734 | STATIC int | 1733 | STATIC int |
1735 | xfs_icsb_cpu_notify( | 1734 | xfs_icsb_cpu_notify( |
@@ -1779,6 +1778,7 @@ xfs_icsb_cpu_notify( | |||
1779 | 1778 | ||
1780 | return NOTIFY_OK; | 1779 | return NOTIFY_OK; |
1781 | } | 1780 | } |
1781 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1782 | 1782 | ||
1783 | int | 1783 | int |
1784 | xfs_icsb_init_counters( | 1784 | xfs_icsb_init_counters( |
@@ -1791,9 +1791,11 @@ xfs_icsb_init_counters( | |||
1791 | if (mp->m_sb_cnts == NULL) | 1791 | if (mp->m_sb_cnts == NULL) |
1792 | return -ENOMEM; | 1792 | return -ENOMEM; |
1793 | 1793 | ||
1794 | #ifdef CONFIG_HOTPLUG_CPU | ||
1794 | mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; | 1795 | mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; |
1795 | mp->m_icsb_notifier.priority = 0; | 1796 | mp->m_icsb_notifier.priority = 0; |
1796 | register_cpu_notifier(&mp->m_icsb_notifier); | 1797 | register_hotcpu_notifier(&mp->m_icsb_notifier); |
1798 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1797 | 1799 | ||
1798 | for_each_online_cpu(i) { | 1800 | for_each_online_cpu(i) { |
1799 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); | 1801 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
@@ -1812,7 +1814,7 @@ xfs_icsb_destroy_counters( | |||
1812 | xfs_mount_t *mp) | 1814 | xfs_mount_t *mp) |
1813 | { | 1815 | { |
1814 | if (mp->m_sb_cnts) { | 1816 | if (mp->m_sb_cnts) { |
1815 | unregister_cpu_notifier(&mp->m_icsb_notifier); | 1817 | unregister_hotcpu_notifier(&mp->m_icsb_notifier); |
1816 | free_percpu(mp->m_sb_cnts); | 1818 | free_percpu(mp->m_sb_cnts); |
1817 | } | 1819 | } |
1818 | } | 1820 | } |
@@ -2026,7 +2028,7 @@ xfs_icsb_balance_counter( | |||
2026 | xfs_sb_field_t field, | 2028 | xfs_sb_field_t field, |
2027 | int flags) | 2029 | int flags) |
2028 | { | 2030 | { |
2029 | uint64_t count, resid = 0; | 2031 | uint64_t count, resid; |
2030 | int weight = num_online_cpus(); | 2032 | int weight = num_online_cpus(); |
2031 | int s; | 2033 | int s; |
2032 | 2034 | ||
@@ -2058,6 +2060,7 @@ xfs_icsb_balance_counter( | |||
2058 | break; | 2060 | break; |
2059 | default: | 2061 | default: |
2060 | BUG(); | 2062 | BUG(); |
2063 | count = resid = 0; /* quiet, gcc */ | ||
2061 | break; | 2064 | break; |
2062 | } | 2065 | } |
2063 | 2066 | ||
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 0c1e42b037ef..5a0b678956e0 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -1929,7 +1929,7 @@ xfs_growfs_rt( | |||
1929 | /* | 1929 | /* |
1930 | * Initial error checking. | 1930 | * Initial error checking. |
1931 | */ | 1931 | */ |
1932 | if (mp->m_rtdev_targp || mp->m_rbmip == NULL || | 1932 | if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || |
1933 | (nrblocks = in->newblocks) <= sbp->sb_rblocks || | 1933 | (nrblocks = in->newblocks) <= sbp->sb_rblocks || |
1934 | (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) | 1934 | (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) |
1935 | return XFS_ERROR(EINVAL); | 1935 | return XFS_ERROR(EINVAL); |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index cb65c3a603f5..9dc88b380608 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -338,8 +338,6 @@ typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *); | |||
338 | typedef struct xfs_trans { | 338 | typedef struct xfs_trans { |
339 | unsigned int t_magic; /* magic number */ | 339 | unsigned int t_magic; /* magic number */ |
340 | xfs_log_callback_t t_logcb; /* log callback struct */ | 340 | xfs_log_callback_t t_logcb; /* log callback struct */ |
341 | struct xfs_trans *t_forw; /* async list pointers */ | ||
342 | struct xfs_trans *t_back; /* async list pointers */ | ||
343 | unsigned int t_type; /* transaction type */ | 341 | unsigned int t_type; /* transaction type */ |
344 | unsigned int t_log_res; /* amt of log space resvd */ | 342 | unsigned int t_log_res; /* amt of log space resvd */ |
345 | unsigned int t_log_count; /* count for perm log res */ | 343 | unsigned int t_log_count; /* count for perm log res */ |
@@ -364,9 +362,11 @@ typedef struct xfs_trans { | |||
364 | long t_res_fdblocks_delta; /* on-disk only chg */ | 362 | long t_res_fdblocks_delta; /* on-disk only chg */ |
365 | long t_frextents_delta;/* superblock freextents chg*/ | 363 | long t_frextents_delta;/* superblock freextents chg*/ |
366 | long t_res_frextents_delta; /* on-disk only chg */ | 364 | long t_res_frextents_delta; /* on-disk only chg */ |
365 | #ifdef DEBUG | ||
367 | long t_ag_freeblks_delta; /* debugging counter */ | 366 | long t_ag_freeblks_delta; /* debugging counter */ |
368 | long t_ag_flist_delta; /* debugging counter */ | 367 | long t_ag_flist_delta; /* debugging counter */ |
369 | long t_ag_btree_delta; /* debugging counter */ | 368 | long t_ag_btree_delta; /* debugging counter */ |
369 | #endif | ||
370 | long t_dblocks_delta;/* superblock dblocks change */ | 370 | long t_dblocks_delta;/* superblock dblocks change */ |
371 | long t_agcount_delta;/* superblock agcount change */ | 371 | long t_agcount_delta;/* superblock agcount change */ |
372 | long t_imaxpct_delta;/* superblock imaxpct change */ | 372 | long t_imaxpct_delta;/* superblock imaxpct change */ |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 00a6b7dc24a0..23cfa5837728 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -2603,8 +2603,7 @@ xfs_link( | |||
2603 | vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); | 2603 | vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); |
2604 | 2604 | ||
2605 | target_namelen = VNAMELEN(dentry); | 2605 | target_namelen = VNAMELEN(dentry); |
2606 | if (VN_ISDIR(src_vp)) | 2606 | ASSERT(!VN_ISDIR(src_vp)); |
2607 | return XFS_ERROR(EPERM); | ||
2608 | 2607 | ||
2609 | sip = xfs_vtoi(src_vp); | 2608 | sip = xfs_vtoi(src_vp); |
2610 | tdp = XFS_BHVTOI(target_dir_bdp); | 2609 | tdp = XFS_BHVTOI(target_dir_bdp); |
@@ -2699,9 +2698,8 @@ xfs_link( | |||
2699 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); | 2698 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); |
2700 | 2699 | ||
2701 | error = xfs_bumplink(tp, sip); | 2700 | error = xfs_bumplink(tp, sip); |
2702 | if (error) { | 2701 | if (error) |
2703 | goto abort_return; | 2702 | goto abort_return; |
2704 | } | ||
2705 | 2703 | ||
2706 | /* | 2704 | /* |
2707 | * If this is a synchronous mount, make sure that the | 2705 | * If this is a synchronous mount, make sure that the |
@@ -2719,9 +2717,8 @@ xfs_link( | |||
2719 | } | 2717 | } |
2720 | 2718 | ||
2721 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); | 2719 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); |
2722 | if (error) { | 2720 | if (error) |
2723 | goto std_return; | 2721 | goto std_return; |
2724 | } | ||
2725 | 2722 | ||
2726 | /* Fall through to std_return with error = 0. */ | 2723 | /* Fall through to std_return with error = 0. */ |
2727 | std_return: | 2724 | std_return: |
@@ -2742,6 +2739,8 @@ std_return: | |||
2742 | xfs_trans_cancel(tp, cancel_flags); | 2739 | xfs_trans_cancel(tp, cancel_flags); |
2743 | goto std_return; | 2740 | goto std_return; |
2744 | } | 2741 | } |
2742 | |||
2743 | |||
2745 | /* | 2744 | /* |
2746 | * xfs_mkdir | 2745 | * xfs_mkdir |
2747 | * | 2746 | * |
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h index dba70c62a16c..457c34b6eb09 100644 --- a/include/asm-alpha/core_t2.h +++ b/include/asm-alpha/core_t2.h | |||
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr) | |||
435 | set_hae(msb); \ | 435 | set_hae(msb); \ |
436 | } | 436 | } |
437 | 437 | ||
438 | static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED; | 438 | static DEFINE_SPINLOCK(t2_hae_lock); |
439 | 439 | ||
440 | __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) | 440 | __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) |
441 | { | 441 | { |
diff --git a/include/asm-arm/arch-s3c2410/regs-nand.h b/include/asm-arm/arch-s3c2410/regs-nand.h index 7cff235e667a..c1470c695c33 100644 --- a/include/asm-arm/arch-s3c2410/regs-nand.h +++ b/include/asm-arm/arch-s3c2410/regs-nand.h | |||
@@ -39,10 +39,19 @@ | |||
39 | #define S3C2440_NFESTAT1 S3C2410_NFREG(0x28) | 39 | #define S3C2440_NFESTAT1 S3C2410_NFREG(0x28) |
40 | #define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) | 40 | #define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) |
41 | #define S3C2440_NFMECC1 S3C2410_NFREG(0x30) | 41 | #define S3C2440_NFMECC1 S3C2410_NFREG(0x30) |
42 | #define S3C2440_NFSECC S3C2410_NFREG(0x34) | 42 | #define S3C2440_NFSECC S3C24E10_NFREG(0x34) |
43 | #define S3C2440_NFSBLK S3C2410_NFREG(0x38) | 43 | #define S3C2440_NFSBLK S3C2410_NFREG(0x38) |
44 | #define S3C2440_NFEBLK S3C2410_NFREG(0x3C) | 44 | #define S3C2440_NFEBLK S3C2410_NFREG(0x3C) |
45 | 45 | ||
46 | #define S3C2412_NFSBLK S3C2410_NFREG(0x20) | ||
47 | #define S3C2412_NFEBLK S3C2410_NFREG(0x24) | ||
48 | #define S3C2412_NFSTAT S3C2410_NFREG(0x28) | ||
49 | #define S3C2412_NFMECC_ERR0 S3C2410_NFREG(0x2C) | ||
50 | #define S3C2412_NFMECC_ERR1 S3C2410_NFREG(0x30) | ||
51 | #define S3C2412_NFMECC0 S3C2410_NFREG(0x34) | ||
52 | #define S3C2412_NFMECC1 S3C2410_NFREG(0x38) | ||
53 | #define S3C2412_NFSECC S3C2410_NFREG(0x3C) | ||
54 | |||
46 | #define S3C2410_NFCONF_EN (1<<15) | 55 | #define S3C2410_NFCONF_EN (1<<15) |
47 | #define S3C2410_NFCONF_512BYTE (1<<14) | 56 | #define S3C2410_NFCONF_512BYTE (1<<14) |
48 | #define S3C2410_NFCONF_4STEP (1<<13) | 57 | #define S3C2410_NFCONF_4STEP (1<<13) |
@@ -77,5 +86,42 @@ | |||
77 | #define S3C2440_NFSTAT_RnB_CHANGE (1<<2) | 86 | #define S3C2440_NFSTAT_RnB_CHANGE (1<<2) |
78 | #define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3) | 87 | #define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3) |
79 | 88 | ||
89 | #define S3C2412_NFCONF_NANDBOOT (1<<31) | ||
90 | #define S3C2412_NFCONF_ECCCLKCON (1<<30) | ||
91 | #define S3C2412_NFCONF_ECC_MLC (1<<24) | ||
92 | #define S3C2412_NFCONF_TACLS_MASK (7<<12) /* 1 extra bit of Tacls */ | ||
93 | |||
94 | #define S3C2412_NFCONT_ECC4_DIRWR (1<<18) | ||
95 | #define S3C2412_NFCONT_LOCKTIGHT (1<<17) | ||
96 | #define S3C2412_NFCONT_SOFTLOCK (1<<16) | ||
97 | #define S3C2412_NFCONT_ECC4_ENCINT (1<<13) | ||
98 | #define S3C2412_NFCONT_ECC4_DECINT (1<<12) | ||
99 | #define S3C2412_NFCONT_MAIN_ECC_LOCK (1<<7) | ||
100 | #define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5) | ||
101 | #define S3C2412_NFCONT_nFCE1 (1<<2) | ||
102 | #define S3C2412_NFCONT_nFCE0 (1<<1) | ||
103 | |||
104 | #define S3C2412_NFSTAT_ECC_ENCDONE (1<<7) | ||
105 | #define S3C2412_NFSTAT_ECC_DECDONE (1<<6) | ||
106 | #define S3C2412_NFSTAT_ILLEGAL_ACCESS (1<<5) | ||
107 | #define S3C2412_NFSTAT_RnB_CHANGE (1<<4) | ||
108 | #define S3C2412_NFSTAT_nFCE1 (1<<3) | ||
109 | #define S3C2412_NFSTAT_nFCE0 (1<<2) | ||
110 | #define S3C2412_NFSTAT_Res1 (1<<1) | ||
111 | #define S3C2412_NFSTAT_READY (1<<0) | ||
112 | |||
113 | #define S3C2412_NFECCERR_SERRDATA(x) (((x) >> 21) & 0xf) | ||
114 | #define S3C2412_NFECCERR_SERRBIT(x) (((x) >> 18) & 0x7) | ||
115 | #define S3C2412_NFECCERR_MERRDATA(x) (((x) >> 7) & 0x3ff) | ||
116 | #define S3C2412_NFECCERR_MERRBIT(x) (((x) >> 4) & 0x7) | ||
117 | #define S3C2412_NFECCERR_SPARE_ERR(x) (((x) >> 2) & 0x3) | ||
118 | #define S3C2412_NFECCERR_MAIN_ERR(x) (((x) >> 2) & 0x3) | ||
119 | #define S3C2412_NFECCERR_NONE (0) | ||
120 | #define S3C2412_NFECCERR_1BIT (1) | ||
121 | #define S3C2412_NFECCERR_MULTIBIT (2) | ||
122 | #define S3C2412_NFECCERR_ECCAREA (3) | ||
123 | |||
124 | |||
125 | |||
80 | #endif /* __ASM_ARM_REGS_NAND */ | 126 | #endif /* __ASM_ARM_REGS_NAND */ |
81 | 127 | ||
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 845cb67ad8ea..8ceab7bcd8b4 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -51,4 +51,10 @@ | |||
51 | __ret; \ | 51 | __ret; \ |
52 | }) | 52 | }) |
53 | 53 | ||
54 | #ifdef CONFIG_SMP | ||
55 | # define WARN_ON_SMP(x) WARN_ON(x) | ||
56 | #else | ||
57 | # define WARN_ON_SMP(x) do { } while (0) | ||
58 | #endif | ||
59 | |||
54 | #endif | 60 | #endif |
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index e7252c216ca8..b1bc7b1b64b0 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h | |||
@@ -7,8 +7,6 @@ | |||
7 | #include <linux/nodemask.h> | 7 | #include <linux/nodemask.h> |
8 | #include <linux/percpu.h> | 8 | #include <linux/percpu.h> |
9 | 9 | ||
10 | #include <asm/node.h> | ||
11 | |||
12 | struct i386_cpu { | 10 | struct i386_cpu { |
13 | struct cpu cpu; | 11 | struct cpu cpu; |
14 | }; | 12 | }; |
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 4153d80e4d2b..1eac92cb5b16 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/system.h> /* for savesegment */ | 11 | #include <asm/system.h> /* for savesegment */ |
12 | #include <asm/auxvec.h> | 12 | #include <asm/auxvec.h> |
13 | #include <asm/desc.h> | ||
13 | 14 | ||
14 | #include <linux/utsname.h> | 15 | #include <linux/utsname.h> |
15 | 16 | ||
@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct | |||
129 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | 130 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) |
130 | #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) | 131 | #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) |
131 | 132 | ||
132 | #define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) | 133 | #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) |
133 | #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) | 134 | #define VDSO_BASE ((unsigned long)current->mm->context.vdso) |
134 | #define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall) | 135 | |
136 | #ifdef CONFIG_COMPAT_VDSO | ||
137 | # define VDSO_COMPAT_BASE VDSO_HIGH_BASE | ||
138 | # define VDSO_PRELINK VDSO_HIGH_BASE | ||
139 | #else | ||
140 | # define VDSO_COMPAT_BASE VDSO_BASE | ||
141 | # define VDSO_PRELINK 0 | ||
142 | #endif | ||
143 | |||
144 | #define VDSO_COMPAT_SYM(x) \ | ||
145 | (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) | ||
146 | |||
147 | #define VDSO_SYM(x) \ | ||
148 | (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK) | ||
149 | |||
150 | #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) | ||
151 | #define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) | ||
152 | |||
135 | extern void __kernel_vsyscall; | 153 | extern void __kernel_vsyscall; |
136 | 154 | ||
155 | #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) | ||
156 | |||
157 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
158 | struct linux_binprm; | ||
159 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
160 | int executable_stack); | ||
161 | |||
162 | extern unsigned int vdso_enabled; | ||
163 | |||
137 | #define ARCH_DLINFO \ | 164 | #define ARCH_DLINFO \ |
138 | do { \ | 165 | do if (vdso_enabled) { \ |
139 | NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ | 166 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ |
140 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ | 167 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ |
141 | } while (0) | 168 | } while (0) |
142 | 169 | ||
143 | /* | 170 | /* |
@@ -148,15 +175,15 @@ do { \ | |||
148 | * Dumping its extra ELF program headers includes all the other information | 175 | * Dumping its extra ELF program headers includes all the other information |
149 | * a debugger needs to easily find how the vsyscall DSO was being used. | 176 | * a debugger needs to easily find how the vsyscall DSO was being used. |
150 | */ | 177 | */ |
151 | #define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) | 178 | #define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum) |
152 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ | 179 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ |
153 | do { \ | 180 | do { \ |
154 | const struct elf_phdr *const vsyscall_phdrs = \ | 181 | const struct elf_phdr *const vsyscall_phdrs = \ |
155 | (const struct elf_phdr *) (VSYSCALL_BASE \ | 182 | (const struct elf_phdr *) (VDSO_HIGH_BASE \ |
156 | + VSYSCALL_EHDR->e_phoff); \ | 183 | + VDSO_HIGH_EHDR->e_phoff); \ |
157 | int i; \ | 184 | int i; \ |
158 | Elf32_Off ofs = 0; \ | 185 | Elf32_Off ofs = 0; \ |
159 | for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ | 186 | for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ |
160 | struct elf_phdr phdr = vsyscall_phdrs[i]; \ | 187 | struct elf_phdr phdr = vsyscall_phdrs[i]; \ |
161 | if (phdr.p_type == PT_LOAD) { \ | 188 | if (phdr.p_type == PT_LOAD) { \ |
162 | BUG_ON(ofs != 0); \ | 189 | BUG_ON(ofs != 0); \ |
@@ -174,10 +201,10 @@ do { \ | |||
174 | #define ELF_CORE_WRITE_EXTRA_DATA \ | 201 | #define ELF_CORE_WRITE_EXTRA_DATA \ |
175 | do { \ | 202 | do { \ |
176 | const struct elf_phdr *const vsyscall_phdrs = \ | 203 | const struct elf_phdr *const vsyscall_phdrs = \ |
177 | (const struct elf_phdr *) (VSYSCALL_BASE \ | 204 | (const struct elf_phdr *) (VDSO_HIGH_BASE \ |
178 | + VSYSCALL_EHDR->e_phoff); \ | 205 | + VDSO_HIGH_EHDR->e_phoff); \ |
179 | int i; \ | 206 | int i; \ |
180 | for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ | 207 | for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \ |
181 | if (vsyscall_phdrs[i].p_type == PT_LOAD) \ | 208 | if (vsyscall_phdrs[i].p_type == PT_LOAD) \ |
182 | DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ | 209 | DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ |
183 | PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ | 210 | PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ |
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h index f7e068f4d2f9..a48cc3f7ccc6 100644 --- a/include/asm-i386/fixmap.h +++ b/include/asm-i386/fixmap.h | |||
@@ -51,7 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | enum fixed_addresses { | 52 | enum fixed_addresses { |
53 | FIX_HOLE, | 53 | FIX_HOLE, |
54 | FIX_VSYSCALL, | 54 | FIX_VDSO, |
55 | #ifdef CONFIG_X86_LOCAL_APIC | 55 | #ifdef CONFIG_X86_LOCAL_APIC |
56 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 56 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
57 | #endif | 57 | #endif |
@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx, | |||
115 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | 115 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) |
116 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | 116 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) |
117 | 117 | ||
118 | /* | ||
119 | * This is the range that is readable by user mode, and things | ||
120 | * acting like user mode such as get_user_pages. | ||
121 | */ | ||
122 | #define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) | ||
123 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) | ||
124 | |||
125 | |||
126 | extern void __this_fixmap_does_not_exist(void); | 118 | extern void __this_fixmap_does_not_exist(void); |
127 | 119 | ||
128 | /* | 120 | /* |
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h index f431a0b86d4c..8358dd3df7aa 100644 --- a/include/asm-i386/mmu.h +++ b/include/asm-i386/mmu.h | |||
@@ -12,6 +12,7 @@ typedef struct { | |||
12 | int size; | 12 | int size; |
13 | struct semaphore sem; | 13 | struct semaphore sem; |
14 | void *ldt; | 14 | void *ldt; |
15 | void *vdso; | ||
15 | } mm_context_t; | 16 | } mm_context_t; |
16 | 17 | ||
17 | #endif | 18 | #endif |
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h deleted file mode 100644 index e13c6ffa72ae..000000000000 --- a/include/asm-i386/node.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #ifndef _ASM_I386_NODE_H_ | ||
2 | #define _ASM_I386_NODE_H_ | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | #include <linux/mmzone.h> | ||
6 | #include <linux/node.h> | ||
7 | #include <linux/topology.h> | ||
8 | #include <linux/nodemask.h> | ||
9 | |||
10 | struct i386_node { | ||
11 | struct node node; | ||
12 | }; | ||
13 | extern struct i386_node node_devices[MAX_NUMNODES]; | ||
14 | |||
15 | static inline int arch_register_node(int num){ | ||
16 | int p_node; | ||
17 | struct node *parent = NULL; | ||
18 | |||
19 | if (!node_online(num)) | ||
20 | return 0; | ||
21 | p_node = parent_node(num); | ||
22 | |||
23 | if (p_node != num) | ||
24 | parent = &node_devices[p_node].node; | ||
25 | |||
26 | return register_node(&node_devices[num].node, num, parent); | ||
27 | } | ||
28 | |||
29 | #endif /* _ASM_I386_NODE_H_ */ | ||
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index e3a552fa5538..f5bf544c729a 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
96 | 96 | ||
97 | #ifndef __ASSEMBLY__ | 97 | #ifndef __ASSEMBLY__ |
98 | 98 | ||
99 | struct vm_area_struct; | ||
100 | |||
99 | /* | 101 | /* |
100 | * This much address space is reserved for vmalloc() and iomap() | 102 | * This much address space is reserved for vmalloc() and iomap() |
101 | * as well as fixmap mappings. | 103 | * as well as fixmap mappings. |
@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr); | |||
139 | #include <asm-generic/memory_model.h> | 141 | #include <asm-generic/memory_model.h> |
140 | #include <asm-generic/page.h> | 142 | #include <asm-generic/page.h> |
141 | 143 | ||
144 | #define __HAVE_ARCH_GATE_AREA 1 | ||
142 | #endif /* __KERNEL__ */ | 145 | #endif /* __KERNEL__ */ |
143 | 146 | ||
144 | #endif /* _I386_PAGE_H */ | 147 | #endif /* _I386_PAGE_H */ |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 55ea992da329..b32346d62e10 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -71,8 +71,12 @@ struct cpuinfo_x86 { | |||
71 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | 71 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ |
72 | #endif | 72 | #endif |
73 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | 73 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
74 | unsigned char booted_cores; /* number of cores as seen by OS */ | ||
75 | unsigned char apicid; | 74 | unsigned char apicid; |
75 | #ifdef CONFIG_SMP | ||
76 | unsigned char booted_cores; /* number of cores as seen by OS */ | ||
77 | __u8 phys_proc_id; /* Physical processor id. */ | ||
78 | __u8 cpu_core_id; /* Core id */ | ||
79 | #endif | ||
76 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 80 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
77 | 81 | ||
78 | #define X86_VENDOR_INTEL 0 | 82 | #define X86_VENDOR_INTEL 0 |
@@ -104,8 +108,6 @@ extern struct cpuinfo_x86 cpu_data[]; | |||
104 | #define current_cpu_data boot_cpu_data | 108 | #define current_cpu_data boot_cpu_data |
105 | #endif | 109 | #endif |
106 | 110 | ||
107 | extern int phys_proc_id[NR_CPUS]; | ||
108 | extern int cpu_core_id[NR_CPUS]; | ||
109 | extern int cpu_llc_id[NR_CPUS]; | 111 | extern int cpu_llc_id[NR_CPUS]; |
110 | extern char ignore_fpu_irq; | 112 | extern char ignore_fpu_irq; |
111 | 113 | ||
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index fdbc7f422ea5..2833fa2c0dd0 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h | |||
@@ -37,6 +37,7 @@ struct thread_info { | |||
37 | 0-0xBFFFFFFF for user-thead | 37 | 0-0xBFFFFFFF for user-thead |
38 | 0-0xFFFFFFFF for kernel-thread | 38 | 0-0xFFFFFFFF for kernel-thread |
39 | */ | 39 | */ |
40 | void *sysenter_return; | ||
40 | struct restart_block restart_block; | 41 | struct restart_block restart_block; |
41 | 42 | ||
42 | unsigned long previous_esp; /* ESP of the previous stack in case | 43 | unsigned long previous_esp; /* ESP of the previous stack in case |
@@ -83,17 +84,15 @@ struct thread_info { | |||
83 | #define init_stack (init_thread_union.stack) | 84 | #define init_stack (init_thread_union.stack) |
84 | 85 | ||
85 | 86 | ||
87 | /* how to get the current stack pointer from C */ | ||
88 | register unsigned long current_stack_pointer asm("esp") __attribute_used__; | ||
89 | |||
86 | /* how to get the thread information struct from C */ | 90 | /* how to get the thread information struct from C */ |
87 | static inline struct thread_info *current_thread_info(void) | 91 | static inline struct thread_info *current_thread_info(void) |
88 | { | 92 | { |
89 | struct thread_info *ti; | 93 | return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); |
90 | __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); | ||
91 | return ti; | ||
92 | } | 94 | } |
93 | 95 | ||
94 | /* how to get the current stack pointer from C */ | ||
95 | register unsigned long current_stack_pointer asm("esp") __attribute_used__; | ||
96 | |||
97 | /* thread information allocation */ | 96 | /* thread information allocation */ |
98 | #ifdef CONFIG_DEBUG_STACK_USAGE | 97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
99 | #define alloc_thread_info(tsk) \ | 98 | #define alloc_thread_info(tsk) \ |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index b94e5eeef917..6adbd9b1ae88 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -28,10 +28,8 @@ | |||
28 | #define _ASM_I386_TOPOLOGY_H | 28 | #define _ASM_I386_TOPOLOGY_H |
29 | 29 | ||
30 | #ifdef CONFIG_X86_HT | 30 | #ifdef CONFIG_X86_HT |
31 | #define topology_physical_package_id(cpu) \ | 31 | #define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) |
32 | (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) | 32 | #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) |
33 | #define topology_core_id(cpu) \ | ||
34 | (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) | ||
35 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 33 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
36 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) | 34 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) |
37 | #endif | 35 | #endif |
@@ -114,4 +112,9 @@ extern unsigned long node_remap_size[]; | |||
114 | 112 | ||
115 | extern cpumask_t cpu_coregroup_map(int cpu); | 113 | extern cpumask_t cpu_coregroup_map(int cpu); |
116 | 114 | ||
115 | #ifdef CONFIG_SMP | ||
116 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | ||
117 | #define smt_capable() (smp_num_siblings > 1) | ||
118 | #endif | ||
119 | |||
117 | #endif /* _ASM_I386_TOPOLOGY_H */ | 120 | #endif /* _ASM_I386_TOPOLOGY_H */ |
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index d480f2e38215..69f0f1df6722 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h | |||
@@ -78,8 +78,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | |||
78 | return user_mode_vm(&info->regs); | 78 | return user_mode_vm(&info->regs); |
79 | #else | 79 | #else |
80 | return info->regs.eip < PAGE_OFFSET | 80 | return info->regs.eip < PAGE_OFFSET |
81 | || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL) | 81 | || (info->regs.eip >= __fix_to_virt(FIX_VDSO) |
82 | && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE) | 82 | && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) |
83 | || info->regs.esp < PAGE_OFFSET; | 83 | || info->regs.esp < PAGE_OFFSET; |
84 | #endif | 84 | #endif |
85 | } | 85 | } |
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h index a140310bf84d..2fb337b0e9b7 100644 --- a/include/asm-ia64/nodedata.h +++ b/include/asm-ia64/nodedata.h | |||
@@ -46,6 +46,18 @@ struct ia64_node_data { | |||
46 | */ | 46 | */ |
47 | #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) | 47 | #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) |
48 | 48 | ||
49 | /* | ||
50 | * LOCAL_DATA_ADDR - This is to calculate the address of other node's | ||
51 | * "local_node_data" at hot-plug phase. The local_node_data | ||
52 | * is pointed by per_cpu_page. Kernel usually use it for | ||
53 | * just executing cpu. However, when new node is hot-added, | ||
54 | * the addresses of local data for other nodes are necessary | ||
55 | * to update all of them. | ||
56 | */ | ||
57 | #define LOCAL_DATA_ADDR(pgdat) \ | ||
58 | ((struct ia64_node_data *)((u64)(pgdat) + \ | ||
59 | L1_CACHE_ALIGN(sizeof(struct pglist_data)))) | ||
60 | |||
49 | #endif /* CONFIG_NUMA */ | 61 | #endif /* CONFIG_NUMA */ |
50 | 62 | ||
51 | #endif /* _ASM_IA64_NODEDATA_H */ | 63 | #endif /* _ASM_IA64_NODEDATA_H */ |
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 616b5ed2aa72..937c21257523 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h | |||
@@ -112,6 +112,7 @@ void build_cpu_to_node_map(void); | |||
112 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) | 112 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) |
113 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 113 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
114 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) | 114 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) |
115 | #define smt_capable() (smp_num_siblings > 1) | ||
115 | #endif | 116 | #endif |
116 | 117 | ||
117 | #include <asm-generic/topology.h> | 118 | #include <asm-generic/topology.h> |
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h index 1e19c457de7d..47258e86e8c4 100644 --- a/include/asm-m68knommu/ptrace.h +++ b/include/asm-m68knommu/ptrace.h | |||
@@ -46,11 +46,9 @@ struct pt_regs { | |||
46 | #else | 46 | #else |
47 | unsigned short sr; | 47 | unsigned short sr; |
48 | unsigned long pc; | 48 | unsigned long pc; |
49 | #ifndef NO_FORMAT_VEC | ||
50 | unsigned format : 4; /* frame format specifier */ | 49 | unsigned format : 4; /* frame format specifier */ |
51 | unsigned vector : 12; /* vector offset */ | 50 | unsigned vector : 12; /* vector offset */ |
52 | #endif | 51 | #endif |
53 | #endif | ||
54 | }; | 52 | }; |
55 | 53 | ||
56 | /* | 54 | /* |
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 92f3e5507d22..bbc3844b086f 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h | |||
@@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, | |||
93 | 93 | ||
94 | #endif /* CONFIG_NUMA */ | 94 | #endif /* CONFIG_NUMA */ |
95 | 95 | ||
96 | #ifdef CONFIG_SMP | ||
97 | #include <asm/cputable.h> | ||
98 | #define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) | ||
99 | #endif | ||
100 | |||
96 | #endif /* __KERNEL__ */ | 101 | #endif /* __KERNEL__ */ |
97 | #endif /* _ASM_POWERPC_TOPOLOGY_H */ | 102 | #endif /* _ASM_POWERPC_TOPOLOGY_H */ |
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index 0e234e201bd6..98a6c613589d 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef _ASM_SPARC64_TOPOLOGY_H | 1 | #ifndef _ASM_SPARC64_TOPOLOGY_H |
2 | #define _ASM_SPARC64_TOPOLOGY_H | 2 | #define _ASM_SPARC64_TOPOLOGY_H |
3 | 3 | ||
4 | #include <asm/spitfire.h> | ||
5 | #define smt_capable() (tlb_type == hypervisor) | ||
6 | |||
4 | #include <asm-generic/topology.h> | 7 | #include <asm-generic/topology.h> |
5 | 8 | ||
6 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ | 9 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ |
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 1b2ac55d3204..931877462788 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h | |||
@@ -124,7 +124,7 @@ asmlinkage void IRQ_NAME(nr); \ | |||
124 | __asm__( \ | 124 | __asm__( \ |
125 | "\n.p2align\n" \ | 125 | "\n.p2align\n" \ |
126 | "IRQ" #nr "_interrupt:\n\t" \ | 126 | "IRQ" #nr "_interrupt:\n\t" \ |
127 | "push $" #nr "-256 ; " \ | 127 | "push $~(" #nr ") ; " \ |
128 | "jmp common_interrupt"); | 128 | "jmp common_interrupt"); |
129 | 129 | ||
130 | #if defined(CONFIG_X86_IO_APIC) | 130 | #if defined(CONFIG_X86_IO_APIC) |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c4e46e7fa7ba..6e7a2e976b04 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -59,6 +59,8 @@ extern int __node_distance(int, int); | |||
59 | #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) | 59 | #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) |
60 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 60 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
61 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) | 61 | #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) |
62 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | ||
63 | #define smt_capable() (smp_num_siblings > 1) | ||
62 | #endif | 64 | #endif |
63 | 65 | ||
64 | #include <asm-generic/topology.h> | 66 | #include <asm-generic/topology.h> |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 90d6df1551ed..88b5dfd8ee12 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; } | |||
528 | 528 | ||
529 | #ifdef CONFIG_ACPI_NUMA | 529 | #ifdef CONFIG_ACPI_NUMA |
530 | int acpi_get_pxm(acpi_handle handle); | 530 | int acpi_get_pxm(acpi_handle handle); |
531 | int acpi_get_node(acpi_handle *handle); | ||
531 | #else | 532 | #else |
532 | static inline int acpi_get_pxm(acpi_handle handle) | 533 | static inline int acpi_get_pxm(acpi_handle handle) |
533 | { | 534 | { |
534 | return 0; | 535 | return 0; |
535 | } | 536 | } |
537 | static inline int acpi_get_node(acpi_handle *handle) | ||
538 | { | ||
539 | return 0; | ||
540 | } | ||
536 | #endif | 541 | #endif |
542 | extern int acpi_paddr_to_node(u64 start_addr, u64 size); | ||
537 | 543 | ||
538 | extern int pnpacpi_disabled; | 544 | extern int pnpacpi_disabled; |
539 | 545 | ||
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index fb7e9b7ccbe3..737e407d0cd1 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long, | |||
149 | unsigned long b_state); | 149 | unsigned long b_state); |
150 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); | 150 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); |
151 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); | 151 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); |
152 | void end_buffer_async_write(struct buffer_head *bh, int uptodate); | ||
153 | 152 | ||
154 | /* Things to do with buffers at mapping->private_list */ | 153 | /* Things to do with buffers at mapping->private_list */ |
155 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); | 154 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); |
@@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t); | |||
214 | int nobh_writepage(struct page *page, get_block_t *get_block, | 213 | int nobh_writepage(struct page *page, get_block_t *get_block, |
215 | struct writeback_control *wbc); | 214 | struct writeback_control *wbc); |
216 | 215 | ||
216 | void buffer_init(void); | ||
217 | 217 | ||
218 | /* | 218 | /* |
219 | * inline definitions | 219 | * inline definitions |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 08d50c53aab4..a3caf6866bae 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -31,17 +31,23 @@ struct cpu { | |||
31 | struct sys_device sysdev; | 31 | struct sys_device sysdev; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | extern int register_cpu(struct cpu *, int, struct node *); | 34 | extern int register_cpu(struct cpu *cpu, int num); |
35 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); | 35 | extern struct sys_device *get_cpu_sysdev(unsigned cpu); |
36 | #ifdef CONFIG_HOTPLUG_CPU | 36 | #ifdef CONFIG_HOTPLUG_CPU |
37 | extern void unregister_cpu(struct cpu *, struct node *); | 37 | extern void unregister_cpu(struct cpu *cpu); |
38 | #endif | 38 | #endif |
39 | struct notifier_block; | 39 | struct notifier_block; |
40 | 40 | ||
41 | #ifdef CONFIG_SMP | 41 | #ifdef CONFIG_SMP |
42 | /* Need to know about CPUs going up/down? */ | 42 | /* Need to know about CPUs going up/down? */ |
43 | extern int register_cpu_notifier(struct notifier_block *nb); | 43 | extern int register_cpu_notifier(struct notifier_block *nb); |
44 | #ifdef CONFIG_HOTPLUG_CPU | ||
44 | extern void unregister_cpu_notifier(struct notifier_block *nb); | 45 | extern void unregister_cpu_notifier(struct notifier_block *nb); |
46 | #else | ||
47 | static inline void unregister_cpu_notifier(struct notifier_block *nb) | ||
48 | { | ||
49 | } | ||
50 | #endif | ||
45 | extern int current_in_cpu_hotplug(void); | 51 | extern int current_in_cpu_hotplug(void); |
46 | 52 | ||
47 | int cpu_up(unsigned int cpu); | 53 | int cpu_up(unsigned int cpu); |
@@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void); | |||
73 | { .notifier_call = fn, .priority = pri }; \ | 79 | { .notifier_call = fn, .priority = pri }; \ |
74 | register_cpu_notifier(&fn##_nb); \ | 80 | register_cpu_notifier(&fn##_nb); \ |
75 | } | 81 | } |
82 | #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) | ||
83 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) | ||
76 | int cpu_down(unsigned int cpu); | 84 | int cpu_down(unsigned int cpu); |
77 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 85 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
78 | #else | 86 | #else |
@@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu); | |||
80 | #define unlock_cpu_hotplug() do { } while (0) | 88 | #define unlock_cpu_hotplug() do { } while (0) |
81 | #define lock_cpu_hotplug_interruptible() 0 | 89 | #define lock_cpu_hotplug_interruptible() 0 |
82 | #define hotcpu_notifier(fn, pri) | 90 | #define hotcpu_notifier(fn, pri) |
91 | #define register_hotcpu_notifier(nb) | ||
92 | #define unregister_hotcpu_notifier(nb) | ||
83 | 93 | ||
84 | /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ | 94 | /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ |
85 | static inline int cpu_is_offline(int cpu) { return 0; } | 95 | static inline int cpu_is_offline(int cpu) { return 0; } |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78b236ca04f8..272010a6078a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | #ifndef DMAENGINE_H | 21 | #ifndef DMAENGINE_H |
22 | #define DMAENGINE_H | 22 | #define DMAENGINE_H |
23 | #include <linux/config.h> | 23 | |
24 | #ifdef CONFIG_DMA_ENGINE | 24 | #ifdef CONFIG_DMA_ENGINE |
25 | 25 | ||
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 966a5b3da439..34c3a215f2cd 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -12,6 +12,9 @@ | |||
12 | #define FUTEX_REQUEUE 3 | 12 | #define FUTEX_REQUEUE 3 |
13 | #define FUTEX_CMP_REQUEUE 4 | 13 | #define FUTEX_CMP_REQUEUE 4 |
14 | #define FUTEX_WAKE_OP 5 | 14 | #define FUTEX_WAKE_OP 5 |
15 | #define FUTEX_LOCK_PI 6 | ||
16 | #define FUTEX_UNLOCK_PI 7 | ||
17 | #define FUTEX_TRYLOCK_PI 8 | ||
15 | 18 | ||
16 | /* | 19 | /* |
17 | * Support for robust futexes: the kernel cleans up held futexes at | 20 | * Support for robust futexes: the kernel cleans up held futexes at |
@@ -90,18 +93,21 @@ struct robust_list_head { | |||
90 | */ | 93 | */ |
91 | #define ROBUST_LIST_LIMIT 2048 | 94 | #define ROBUST_LIST_LIMIT 2048 |
92 | 95 | ||
93 | long do_futex(unsigned long uaddr, int op, int val, | 96 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, |
94 | unsigned long timeout, unsigned long uaddr2, int val2, | 97 | u32 __user *uaddr2, u32 val2, u32 val3); |
95 | int val3); | ||
96 | 98 | ||
97 | extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); | 99 | extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); |
98 | 100 | ||
99 | #ifdef CONFIG_FUTEX | 101 | #ifdef CONFIG_FUTEX |
100 | extern void exit_robust_list(struct task_struct *curr); | 102 | extern void exit_robust_list(struct task_struct *curr); |
103 | extern void exit_pi_state_list(struct task_struct *curr); | ||
101 | #else | 104 | #else |
102 | static inline void exit_robust_list(struct task_struct *curr) | 105 | static inline void exit_robust_list(struct task_struct *curr) |
103 | { | 106 | { |
104 | } | 107 | } |
108 | static inline void exit_pi_state_list(struct task_struct *curr) | ||
109 | { | ||
110 | } | ||
105 | #endif | 111 | #endif |
106 | 112 | ||
107 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ | 113 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e127ef7e8da8..3a256957fb56 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -87,6 +87,7 @@ extern struct group_info init_groups; | |||
87 | .lock_depth = -1, \ | 87 | .lock_depth = -1, \ |
88 | .prio = MAX_PRIO-20, \ | 88 | .prio = MAX_PRIO-20, \ |
89 | .static_prio = MAX_PRIO-20, \ | 89 | .static_prio = MAX_PRIO-20, \ |
90 | .normal_prio = MAX_PRIO-20, \ | ||
90 | .policy = SCHED_NORMAL, \ | 91 | .policy = SCHED_NORMAL, \ |
91 | .cpus_allowed = CPU_MASK_ALL, \ | 92 | .cpus_allowed = CPU_MASK_ALL, \ |
92 | .mm = NULL, \ | 93 | .mm = NULL, \ |
@@ -122,6 +123,8 @@ extern struct group_info init_groups; | |||
122 | .journal_info = NULL, \ | 123 | .journal_info = NULL, \ |
123 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ | 124 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ |
124 | .fs_excl = ATOMIC_INIT(0), \ | 125 | .fs_excl = ATOMIC_INIT(0), \ |
126 | .pi_lock = SPIN_LOCK_UNLOCKED, \ | ||
127 | INIT_RT_MUTEXES(tsk) \ | ||
125 | } | 128 | } |
126 | 129 | ||
127 | 130 | ||
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index cd6bd001ba4e..edfc733b1575 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new, | |||
105 | int adjust_resource(struct resource *res, unsigned long start, | 105 | int adjust_resource(struct resource *res, unsigned long start, |
106 | unsigned long size); | 106 | unsigned long size); |
107 | 107 | ||
108 | /* get registered SYSTEM_RAM resources in specified area */ | ||
109 | extern int find_next_system_ram(struct resource *res); | ||
110 | |||
108 | /* Convenience shorthand with allocation */ | 111 | /* Convenience shorthand with allocation */ |
109 | #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) | 112 | #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) |
110 | #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) | 113 | #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) |
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 5653b2f23b6a..d09fbeabf1dc 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -210,11 +210,7 @@ struct kernel_ipmi_msg | |||
210 | #include <linux/list.h> | 210 | #include <linux/list.h> |
211 | #include <linux/module.h> | 211 | #include <linux/module.h> |
212 | #include <linux/device.h> | 212 | #include <linux/device.h> |
213 | |||
214 | #ifdef CONFIG_PROC_FS | ||
215 | #include <linux/proc_fs.h> | 213 | #include <linux/proc_fs.h> |
216 | extern struct proc_dir_entry *proc_ipmi_root; | ||
217 | #endif /* CONFIG_PROC_FS */ | ||
218 | 214 | ||
219 | /* Opaque type for a IPMI message user. One of these is needed to | 215 | /* Opaque type for a IPMI message user. One of these is needed to |
220 | send and receive messages. */ | 216 | send and receive messages. */ |
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index c6f70660b371..c9c760700bc3 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h | |||
@@ -186,6 +186,7 @@ struct jffs2_raw_xref | |||
186 | jint32_t hdr_crc; | 186 | jint32_t hdr_crc; |
187 | jint32_t ino; /* inode number */ | 187 | jint32_t ino; /* inode number */ |
188 | jint32_t xid; /* XATTR identifier number */ | 188 | jint32_t xid; /* XATTR identifier number */ |
189 | jint32_t xseqno; /* xref sequencial number */ | ||
189 | jint32_t node_crc; | 190 | jint32_t node_crc; |
190 | } __attribute__((packed)); | 191 | } __attribute__((packed)); |
191 | 192 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 20b1cf527c60..f4284bf89758 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
33 | #include <asm/scatterlist.h> | ||
33 | #include <asm/io.h> | 34 | #include <asm/io.h> |
34 | #include <linux/ata.h> | 35 | #include <linux/ata.h> |
35 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
@@ -887,6 +888,9 @@ static inline unsigned int ata_tag_internal(unsigned int tag) | |||
887 | return tag == ATA_MAX_QUEUE - 1; | 888 | return tag == ATA_MAX_QUEUE - 1; |
888 | } | 889 | } |
889 | 890 | ||
891 | /* | ||
892 | * device helpers | ||
893 | */ | ||
890 | static inline unsigned int ata_class_enabled(unsigned int class) | 894 | static inline unsigned int ata_class_enabled(unsigned int class) |
891 | { | 895 | { |
892 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; | 896 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; |
@@ -917,6 +921,17 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev) | |||
917 | return ata_class_absent(dev->class); | 921 | return ata_class_absent(dev->class); |
918 | } | 922 | } |
919 | 923 | ||
924 | /* | ||
925 | * port helpers | ||
926 | */ | ||
927 | static inline int ata_port_max_devices(const struct ata_port *ap) | ||
928 | { | ||
929 | if (ap->flags & ATA_FLAG_SLAVE_POSS) | ||
930 | return 2; | ||
931 | return 1; | ||
932 | } | ||
933 | |||
934 | |||
920 | static inline u8 ata_chk_status(struct ata_port *ap) | 935 | static inline u8 ata_chk_status(struct ata_port *ap) |
921 | { | 936 | { |
922 | return ap->ops->check_status(ap); | 937 | return ap->ops->check_status(ap); |
diff --git a/include/linux/list.h b/include/linux/list.h index 37ca31b21bb7..6b74adf5297f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -4,18 +4,11 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/stddef.h> | 6 | #include <linux/stddef.h> |
7 | #include <linux/poison.h> | ||
7 | #include <linux/prefetch.h> | 8 | #include <linux/prefetch.h> |
8 | #include <asm/system.h> | 9 | #include <asm/system.h> |
9 | 10 | ||
10 | /* | 11 | /* |
11 | * These are non-NULL pointers that will result in page faults | ||
12 | * under normal circumstances, used to verify that nobody uses | ||
13 | * non-initialized list entries. | ||
14 | */ | ||
15 | #define LIST_POISON1 ((void *) 0x00100100) | ||
16 | #define LIST_POISON2 ((void *) 0x00200200) | ||
17 | |||
18 | /* | ||
19 | * Simple doubly linked list implementation. | 12 | * Simple doubly linked list implementation. |
20 | * | 13 | * |
21 | * Some of the internal functions ("__xxx") are useful when | 14 | * Some of the internal functions ("__xxx") are useful when |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 911206386171..218501cfaeb9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long); | |||
63 | /* reasonably generic interface to expand the physical pages in a zone */ | 63 | /* reasonably generic interface to expand the physical pages in a zone */ |
64 | extern int __add_pages(struct zone *zone, unsigned long start_pfn, | 64 | extern int __add_pages(struct zone *zone, unsigned long start_pfn, |
65 | unsigned long nr_pages); | 65 | unsigned long nr_pages); |
66 | |||
67 | #ifdef CONFIG_NUMA | ||
68 | extern int memory_add_physaddr_to_nid(u64 start); | ||
69 | #else | ||
70 | static inline int memory_add_physaddr_to_nid(u64 start) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | #endif | ||
75 | |||
76 | #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION | ||
77 | /* | ||
78 | * For supporting node-hotadd, we have to allocate a new pgdat. | ||
79 | * | ||
80 | * If an arch has generic style NODE_DATA(), | ||
81 | * node_data[nid] = kzalloc() works well. But it depends on the architecture. | ||
82 | * | ||
83 | * In general, generic_alloc_nodedata() is used. | ||
84 | * Now, arch_free_nodedata() is just defined for error path of node_hot_add. | ||
85 | * | ||
86 | */ | ||
87 | extern pg_data_t *arch_alloc_nodedata(int nid); | ||
88 | extern void arch_free_nodedata(pg_data_t *pgdat); | ||
89 | extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); | ||
90 | |||
91 | #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ | ||
92 | |||
93 | #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) | ||
94 | #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) | ||
95 | |||
96 | #ifdef CONFIG_NUMA | ||
97 | /* | ||
98 | * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. | ||
99 | * XXX: kmalloc_node() can't work well to get new node's memory at this time. | ||
100 | * Because, pgdat for the new node is not allocated/initialized yet itself. | ||
101 | * To use new node's memory, more consideration will be necessary. | ||
102 | */ | ||
103 | #define generic_alloc_nodedata(nid) \ | ||
104 | ({ \ | ||
105 | kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ | ||
106 | }) | ||
107 | /* | ||
108 | * This definition is just for error path in node hotadd. | ||
109 | * For node hotremove, we have to replace this. | ||
110 | */ | ||
111 | #define generic_free_nodedata(pgdat) kfree(pgdat) | ||
112 | |||
113 | extern pg_data_t *node_data[]; | ||
114 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) | ||
115 | { | ||
116 | node_data[nid] = pgdat; | ||
117 | } | ||
118 | |||
119 | #else /* !CONFIG_NUMA */ | ||
120 | |||
121 | /* never called */ | ||
122 | static inline pg_data_t *generic_alloc_nodedata(int nid) | ||
123 | { | ||
124 | BUG(); | ||
125 | return NULL; | ||
126 | } | ||
127 | static inline void generic_free_nodedata(pg_data_t *pgdat) | ||
128 | { | ||
129 | } | ||
130 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) | ||
131 | { | ||
132 | } | ||
133 | #endif /* CONFIG_NUMA */ | ||
134 | #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ | ||
135 | |||
66 | #else /* ! CONFIG_MEMORY_HOTPLUG */ | 136 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
67 | /* | 137 | /* |
68 | * Stub functions for when hotplug is off | 138 | * Stub functions for when hotplug is off |
@@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, | |||
99 | return -ENOSYS; | 169 | return -ENOSYS; |
100 | } | 170 | } |
101 | 171 | ||
102 | extern int add_memory(u64 start, u64 size); | 172 | extern int add_memory(int nid, u64 start, u64 size); |
173 | extern int arch_add_memory(int nid, u64 start, u64 size); | ||
103 | extern int remove_memory(u64 start, u64 size); | 174 | extern int remove_memory(u64 start, u64 size); |
104 | 175 | ||
105 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 176 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a929ea197e48..c41a1299b8cf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm, | |||
1030 | } | 1030 | } |
1031 | #endif /* CONFIG_PROC_FS */ | 1031 | #endif /* CONFIG_PROC_FS */ |
1032 | 1032 | ||
1033 | static inline void | ||
1034 | debug_check_no_locks_freed(const void *from, unsigned long len) | ||
1035 | { | ||
1036 | mutex_debug_check_no_locks_freed(from, len); | ||
1037 | rt_mutex_debug_check_no_locks_freed(from, len); | ||
1038 | } | ||
1039 | |||
1033 | #ifndef CONFIG_DEBUG_PAGEALLOC | 1040 | #ifndef CONFIG_DEBUG_PAGEALLOC |
1034 | static inline void | 1041 | static inline void |
1035 | kernel_map_pages(struct page *page, int numpages, int enable) | 1042 | kernel_map_pages(struct page *page, int numpages, int enable) |
1036 | { | 1043 | { |
1037 | if (!PageHighMem(page) && !enable) | 1044 | if (!PageHighMem(page) && !enable) |
1038 | mutex_debug_check_no_locks_freed(page_address(page), | 1045 | debug_check_no_locks_freed(page_address(page), |
1039 | numpages * PAGE_SIZE); | 1046 | numpages * PAGE_SIZE); |
1040 | } | 1047 | } |
1041 | #endif | 1048 | #endif |
1042 | 1049 | ||
@@ -1065,5 +1072,7 @@ void drop_slab(void); | |||
1065 | extern int randomize_va_space; | 1072 | extern int randomize_va_space; |
1066 | #endif | 1073 | #endif |
1067 | 1074 | ||
1075 | const char *arch_vma_name(struct vm_area_struct *vma); | ||
1076 | |||
1068 | #endif /* __KERNEL__ */ | 1077 | #endif /* __KERNEL__ */ |
1069 | #endif /* _LINUX_MM_H */ | 1078 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/node.h b/include/linux/node.h index 254dc3de650b..81dcec84cd8f 100644 --- a/include/linux/node.h +++ b/include/linux/node.h | |||
@@ -26,8 +26,25 @@ struct node { | |||
26 | struct sys_device sysdev; | 26 | struct sys_device sysdev; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | extern struct node node_devices[]; | ||
30 | |||
29 | extern int register_node(struct node *, int, struct node *); | 31 | extern int register_node(struct node *, int, struct node *); |
30 | extern void unregister_node(struct node *node); | 32 | extern void unregister_node(struct node *node); |
33 | extern int register_one_node(int nid); | ||
34 | extern void unregister_one_node(int nid); | ||
35 | #ifdef CONFIG_NUMA | ||
36 | extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); | ||
37 | extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); | ||
38 | #else | ||
39 | static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) | ||
40 | { | ||
41 | return 0; | ||
42 | } | ||
43 | static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | #endif | ||
31 | 48 | ||
32 | #define to_node(sys_device) container_of(sys_device, struct node, sysdev) | 49 | #define to_node(sys_device) container_of(sys_device, struct node, sysdev) |
33 | 50 | ||
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h new file mode 100644 index 000000000000..135742cfada5 --- /dev/null +++ b/include/linux/nsc_gpio.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /** | ||
2 | nsc_gpio.c | ||
3 | |||
4 | National Semiconductor GPIO common access methods. | ||
5 | |||
6 | struct nsc_gpio_ops abstracts the low-level access | ||
7 | operations for the GPIO units on 2 NSC chip families; the GEODE | ||
8 | integrated CPU, and the PC-8736[03456] integrated PC-peripheral | ||
9 | chips. | ||
10 | |||
11 | The GPIO units on these chips have the same pin architecture, but | ||
12 | the access methods differ. Thus, scx200_gpio and pc8736x_gpio | ||
13 | implement their own versions of these routines; and use the common | ||
14 | file-operations routines implemented in nsc_gpio module. | ||
15 | |||
16 | Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> | ||
17 | |||
18 | NB: this work was tested on the Geode SC-1100 and PC-87366 chips. | ||
19 | NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond. | ||
20 | */ | ||
21 | |||
22 | struct nsc_gpio_ops { | ||
23 | struct module* owner; | ||
24 | u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits); | ||
25 | void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); | ||
26 | int (*gpio_get) (unsigned iminor); | ||
27 | void (*gpio_set) (unsigned iminor, int state); | ||
28 | void (*gpio_set_high)(unsigned iminor); | ||
29 | void (*gpio_set_low) (unsigned iminor); | ||
30 | void (*gpio_change) (unsigned iminor); | ||
31 | int (*gpio_current) (unsigned iminor); | ||
32 | struct device* dev; /* for dev_dbg() support, set in init */ | ||
33 | }; | ||
34 | |||
35 | extern ssize_t nsc_gpio_write(struct file *file, const char __user *data, | ||
36 | size_t len, loff_t *ppos); | ||
37 | |||
38 | extern ssize_t nsc_gpio_read(struct file *file, char __user *buf, | ||
39 | size_t len, loff_t *ppos); | ||
40 | |||
41 | extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index); | ||
42 | |||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c2fd2d19938b..9ae6b1a75366 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1202,6 +1202,7 @@ | |||
1202 | #define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF | 1202 | #define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF |
1203 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 | 1203 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 |
1204 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 | 1204 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 |
1205 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 | ||
1205 | #define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 | 1206 | #define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 |
1206 | #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 | 1207 | #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 |
1207 | #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 | 1208 | #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 |
@@ -2170,7 +2171,6 @@ | |||
2170 | #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 | 2171 | #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 |
2171 | #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e | 2172 | #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e |
2172 | #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 | 2173 | #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 |
2173 | #define PCI_DEVICE_ID_INTEL_GD31244 0x3200 | ||
2174 | #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 | 2174 | #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 |
2175 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 | 2175 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
2176 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 | 2176 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 |
diff --git a/include/linux/plist.h b/include/linux/plist.h new file mode 100644 index 000000000000..3404faef542c --- /dev/null +++ b/include/linux/plist.h | |||
@@ -0,0 +1,247 @@ | |||
1 | /* | ||
2 | * Descending-priority-sorted double-linked list | ||
3 | * | ||
4 | * (C) 2002-2003 Intel Corp | ||
5 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. | ||
6 | * | ||
7 | * 2001-2005 (c) MontaVista Software, Inc. | ||
8 | * Daniel Walker <dwalker@mvista.com> | ||
9 | * | ||
10 | * (C) 2005 Thomas Gleixner <tglx@linutronix.de> | ||
11 | * | ||
12 | * Simplifications of the original code by | ||
13 | * Oleg Nesterov <oleg@tv-sign.ru> | ||
14 | * | ||
15 | * Licensed under the FSF's GNU Public License v2 or later. | ||
16 | * | ||
17 | * Based on simple lists (include/linux/list.h). | ||
18 | * | ||
19 | * This is a priority-sorted list of nodes; each node has a | ||
20 | * priority from INT_MIN (highest) to INT_MAX (lowest). | ||
21 | * | ||
22 | * Addition is O(K), removal is O(1), change of priority of a node is | ||
23 | * O(K) and K is the number of RT priority levels used in the system. | ||
24 | * (1 <= K <= 99) | ||
25 | * | ||
26 | * This list is really a list of lists: | ||
27 | * | ||
28 | * - The tier 1 list is the prio_list, different priority nodes. | ||
29 | * | ||
30 | * - The tier 2 list is the node_list, serialized nodes. | ||
31 | * | ||
32 | * Simple ASCII art explanation: | ||
33 | * | ||
34 | * |HEAD | | ||
35 | * | | | ||
36 | * |prio_list.prev|<------------------------------------| | ||
37 | * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| | ||
38 | * |10 | |10| |21| |21| |21| |40| (prio) | ||
39 | * | | | | | | | | | | | | | ||
40 | * | | | | | | | | | | | | | ||
41 | * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| | ||
42 | * |node_list.prev|<------------------------------------| | ||
43 | * | ||
44 | * The nodes on the prio_list list are sorted by priority to simplify | ||
45 | * the insertion of new nodes. There are no nodes with duplicate | ||
46 | * priorites on the list. | ||
47 | * | ||
48 | * The nodes on the node_list is ordered by priority and can contain | ||
49 | * entries which have the same priority. Those entries are ordered | ||
50 | * FIFO | ||
51 | * | ||
52 | * Addition means: look for the prio_list node in the prio_list | ||
53 | * for the priority of the node and insert it before the node_list | ||
54 | * entry of the next prio_list node. If it is the first node of | ||
55 | * that priority, add it to the prio_list in the right position and | ||
56 | * insert it into the serialized node_list list | ||
57 | * | ||
58 | * Removal means remove it from the node_list and remove it from | ||
59 | * the prio_list if the node_list list_head is non empty. In case | ||
60 | * of removal from the prio_list it must be checked whether other | ||
61 | * entries of the same priority are on the list or not. If there | ||
62 | * is another entry of the same priority then this entry has to | ||
63 | * replace the removed entry on the prio_list. If the entry which | ||
64 | * is removed is the only entry of this priority then a simple | ||
65 | * remove from both list is sufficient. | ||
66 | * | ||
67 | * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX | ||
68 | * is lowest priority. | ||
69 | * | ||
70 | * No locking is done, up to the caller. | ||
71 | * | ||
72 | */ | ||
73 | #ifndef _LINUX_PLIST_H_ | ||
74 | #define _LINUX_PLIST_H_ | ||
75 | |||
76 | #include <linux/list.h> | ||
77 | #include <linux/spinlock_types.h> | ||
78 | |||
79 | struct plist_head { | ||
80 | struct list_head prio_list; | ||
81 | struct list_head node_list; | ||
82 | #ifdef CONFIG_DEBUG_PI_LIST | ||
83 | spinlock_t *lock; | ||
84 | #endif | ||
85 | }; | ||
86 | |||
87 | struct plist_node { | ||
88 | int prio; | ||
89 | struct plist_head plist; | ||
90 | }; | ||
91 | |||
92 | #ifdef CONFIG_DEBUG_PI_LIST | ||
93 | # define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock | ||
94 | #else | ||
95 | # define PLIST_HEAD_LOCK_INIT(_lock) | ||
96 | #endif | ||
97 | |||
98 | /** | ||
99 | * #PLIST_HEAD_INIT - static struct plist_head initializer | ||
100 | * | ||
101 | * @head: struct plist_head variable name | ||
102 | */ | ||
103 | #define PLIST_HEAD_INIT(head, _lock) \ | ||
104 | { \ | ||
105 | .prio_list = LIST_HEAD_INIT((head).prio_list), \ | ||
106 | .node_list = LIST_HEAD_INIT((head).node_list), \ | ||
107 | PLIST_HEAD_LOCK_INIT(&(_lock)) \ | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * #PLIST_NODE_INIT - static struct plist_node initializer | ||
112 | * | ||
113 | * @node: struct plist_node variable name | ||
114 | * @__prio: initial node priority | ||
115 | */ | ||
116 | #define PLIST_NODE_INIT(node, __prio) \ | ||
117 | { \ | ||
118 | .prio = (__prio), \ | ||
119 | .plist = PLIST_HEAD_INIT((node).plist, NULL), \ | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * plist_head_init - dynamic struct plist_head initializer | ||
124 | * | ||
125 | * @head: &struct plist_head pointer | ||
126 | */ | ||
127 | static inline void | ||
128 | plist_head_init(struct plist_head *head, spinlock_t *lock) | ||
129 | { | ||
130 | INIT_LIST_HEAD(&head->prio_list); | ||
131 | INIT_LIST_HEAD(&head->node_list); | ||
132 | #ifdef CONFIG_DEBUG_PI_LIST | ||
133 | head->lock = lock; | ||
134 | #endif | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * plist_node_init - Dynamic struct plist_node initializer | ||
139 | * | ||
140 | * @node: &struct plist_node pointer | ||
141 | * @prio: initial node priority | ||
142 | */ | ||
143 | static inline void plist_node_init(struct plist_node *node, int prio) | ||
144 | { | ||
145 | node->prio = prio; | ||
146 | plist_head_init(&node->plist, NULL); | ||
147 | } | ||
148 | |||
149 | extern void plist_add(struct plist_node *node, struct plist_head *head); | ||
150 | extern void plist_del(struct plist_node *node, struct plist_head *head); | ||
151 | |||
152 | /** | ||
153 | * plist_for_each - iterate over the plist | ||
154 | * | ||
155 | * @pos1: the type * to use as a loop counter. | ||
156 | * @head: the head for your list. | ||
157 | */ | ||
158 | #define plist_for_each(pos, head) \ | ||
159 | list_for_each_entry(pos, &(head)->node_list, plist.node_list) | ||
160 | |||
161 | /** | ||
162 | * plist_for_each_entry_safe - iterate over a plist of given type safe | ||
163 | * against removal of list entry | ||
164 | * | ||
165 | * @pos1: the type * to use as a loop counter. | ||
166 | * @n1: another type * to use as temporary storage | ||
167 | * @head: the head for your list. | ||
168 | */ | ||
169 | #define plist_for_each_safe(pos, n, head) \ | ||
170 | list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) | ||
171 | |||
172 | /** | ||
173 | * plist_for_each_entry - iterate over list of given type | ||
174 | * | ||
175 | * @pos: the type * to use as a loop counter. | ||
176 | * @head: the head for your list. | ||
177 | * @member: the name of the list_struct within the struct. | ||
178 | */ | ||
179 | #define plist_for_each_entry(pos, head, mem) \ | ||
180 | list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) | ||
181 | |||
182 | /** | ||
183 | * plist_for_each_entry_safe - iterate over list of given type safe against | ||
184 | * removal of list entry | ||
185 | * | ||
186 | * @pos: the type * to use as a loop counter. | ||
187 | * @n: another type * to use as temporary storage | ||
188 | * @head: the head for your list. | ||
189 | * @m: the name of the list_struct within the struct. | ||
190 | */ | ||
191 | #define plist_for_each_entry_safe(pos, n, head, m) \ | ||
192 | list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) | ||
193 | |||
194 | /** | ||
195 | * plist_head_empty - return !0 if a plist_head is empty | ||
196 | * | ||
197 | * @head: &struct plist_head pointer | ||
198 | */ | ||
199 | static inline int plist_head_empty(const struct plist_head *head) | ||
200 | { | ||
201 | return list_empty(&head->node_list); | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * plist_node_empty - return !0 if plist_node is not on a list | ||
206 | * | ||
207 | * @node: &struct plist_node pointer | ||
208 | */ | ||
209 | static inline int plist_node_empty(const struct plist_node *node) | ||
210 | { | ||
211 | return plist_head_empty(&node->plist); | ||
212 | } | ||
213 | |||
214 | /* All functions below assume the plist_head is not empty. */ | ||
215 | |||
216 | /** | ||
217 | * plist_first_entry - get the struct for the first entry | ||
218 | * | ||
219 | * @ptr: the &struct plist_head pointer. | ||
220 | * @type: the type of the struct this is embedded in. | ||
221 | * @member: the name of the list_struct within the struct. | ||
222 | */ | ||
223 | #ifdef CONFIG_DEBUG_PI_LIST | ||
224 | # define plist_first_entry(head, type, member) \ | ||
225 | ({ \ | ||
226 | WARN_ON(plist_head_empty(head)); \ | ||
227 | container_of(plist_first(head), type, member); \ | ||
228 | }) | ||
229 | #else | ||
230 | # define plist_first_entry(head, type, member) \ | ||
231 | container_of(plist_first(head), type, member) | ||
232 | #endif | ||
233 | |||
234 | /** | ||
235 | * plist_first - return the first node (and thus, highest priority) | ||
236 | * | ||
237 | * @head: the &struct plist_head pointer | ||
238 | * | ||
239 | * Assumes the plist is _not_ empty. | ||
240 | */ | ||
241 | static inline struct plist_node* plist_first(const struct plist_head *head) | ||
242 | { | ||
243 | return list_entry(head->node_list.next, | ||
244 | struct plist_node, plist.node_list); | ||
245 | } | ||
246 | |||
247 | #endif | ||
diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 000000000000..a5347c02432e --- /dev/null +++ b/include/linux/poison.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef _LINUX_POISON_H | ||
2 | #define _LINUX_POISON_H | ||
3 | |||
4 | /********** include/linux/list.h **********/ | ||
5 | /* | ||
6 | * These are non-NULL pointers that will result in page faults | ||
7 | * under normal circumstances, used to verify that nobody uses | ||
8 | * non-initialized list entries. | ||
9 | */ | ||
10 | #define LIST_POISON1 ((void *) 0x00100100) | ||
11 | #define LIST_POISON2 ((void *) 0x00200200) | ||
12 | |||
13 | /********** mm/slab.c **********/ | ||
14 | /* | ||
15 | * Magic nums for obj red zoning. | ||
16 | * Placed in the first word before and the first word after an obj. | ||
17 | */ | ||
18 | #define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ | ||
19 | #define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ | ||
20 | |||
21 | /* ...and for poisoning */ | ||
22 | #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ | ||
23 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ | ||
24 | #define POISON_END 0xa5 /* end-byte of poisoning */ | ||
25 | |||
26 | /********** arch/$ARCH/mm/init.c **********/ | ||
27 | #define POISON_FREE_INITMEM 0xcc | ||
28 | |||
29 | /********** arch/x86_64/mm/init.c **********/ | ||
30 | #define POISON_FREE_INITDATA 0xba | ||
31 | |||
32 | /********** arch/ia64/hp/common/sba_iommu.c **********/ | ||
33 | /* | ||
34 | * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a | ||
35 | * value of "SBAIOMMU POISON\0" for spill-over poisoning. | ||
36 | */ | ||
37 | |||
38 | /********** fs/jbd/journal.c **********/ | ||
39 | #define JBD_POISON_FREE 0x5b | ||
40 | |||
41 | /********** drivers/base/dmapool.c **********/ | ||
42 | #define POOL_POISON_FREED 0xa7 /* !inuse */ | ||
43 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ | ||
44 | |||
45 | /********** drivers/atm/ **********/ | ||
46 | #define ATM_POISON_FREE 0x12 | ||
47 | |||
48 | /********** kernel/mutexes **********/ | ||
49 | #define MUTEX_DEBUG_INIT 0x11 | ||
50 | #define MUTEX_DEBUG_FREE 0x22 | ||
51 | |||
52 | /********** security/ **********/ | ||
53 | #define KEY_DESTROY 0xbd | ||
54 | |||
55 | /********** sound/oss/ **********/ | ||
56 | #define OSS_POISON_FREE 0xAB | ||
57 | |||
58 | #endif | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6312758393b6..48dfe00070c7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -258,6 +258,7 @@ extern void rcu_init(void); | |||
258 | extern void rcu_check_callbacks(int cpu, int user); | 258 | extern void rcu_check_callbacks(int cpu, int user); |
259 | extern void rcu_restart_cpu(int cpu); | 259 | extern void rcu_restart_cpu(int cpu); |
260 | extern long rcu_batches_completed(void); | 260 | extern long rcu_batches_completed(void); |
261 | extern long rcu_batches_completed_bh(void); | ||
261 | 262 | ||
262 | /* Exported interfaces */ | 263 | /* Exported interfaces */ |
263 | extern void FASTCALL(call_rcu(struct rcu_head *head, | 264 | extern void FASTCALL(call_rcu(struct rcu_head *head, |
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h new file mode 100644 index 000000000000..fa4a3b82ba70 --- /dev/null +++ b/include/linux/rtmutex.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * RT Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This file contains the public data structure and API definitions. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_RT_MUTEX_H | ||
13 | #define __LINUX_RT_MUTEX_H | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/plist.h> | ||
17 | #include <linux/spinlock_types.h> | ||
18 | |||
19 | /* | ||
20 | * The rt_mutex structure | ||
21 | * | ||
22 | * @wait_lock: spinlock to protect the structure | ||
23 | * @wait_list: pilist head to enqueue waiters in priority order | ||
24 | * @owner: the mutex owner | ||
25 | */ | ||
26 | struct rt_mutex { | ||
27 | spinlock_t wait_lock; | ||
28 | struct plist_head wait_list; | ||
29 | struct task_struct *owner; | ||
30 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
31 | int save_state; | ||
32 | struct list_head held_list_entry; | ||
33 | unsigned long acquire_ip; | ||
34 | const char *name, *file; | ||
35 | int line; | ||
36 | void *magic; | ||
37 | #endif | ||
38 | }; | ||
39 | |||
40 | struct rt_mutex_waiter; | ||
41 | struct hrtimer_sleeper; | ||
42 | |||
43 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
44 | extern int rt_mutex_debug_check_no_locks_freed(const void *from, | ||
45 | unsigned long len); | ||
46 | extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); | ||
47 | #else | ||
48 | static inline int rt_mutex_debug_check_no_locks_freed(const void *from, | ||
49 | unsigned long len) | ||
50 | { | ||
51 | return 0; | ||
52 | } | ||
53 | # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) | ||
54 | #endif | ||
55 | |||
56 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
57 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ | ||
58 | , .name = #mutexname, .file = __FILE__, .line = __LINE__ | ||
59 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) | ||
60 | extern void rt_mutex_debug_task_free(struct task_struct *tsk); | ||
61 | #else | ||
62 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) | ||
63 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) | ||
64 | # define rt_mutex_debug_task_free(t) do { } while (0) | ||
65 | #endif | ||
66 | |||
67 | #define __RT_MUTEX_INITIALIZER(mutexname) \ | ||
68 | { .wait_lock = SPIN_LOCK_UNLOCKED \ | ||
69 | , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ | ||
70 | , .owner = NULL \ | ||
71 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} | ||
72 | |||
73 | #define DEFINE_RT_MUTEX(mutexname) \ | ||
74 | struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) | ||
75 | |||
76 | /*** | ||
77 | * rt_mutex_is_locked - is the mutex locked | ||
78 | * @lock: the mutex to be queried | ||
79 | * | ||
80 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
81 | */ | ||
82 | static inline int rt_mutex_is_locked(struct rt_mutex *lock) | ||
83 | { | ||
84 | return lock->owner != NULL; | ||
85 | } | ||
86 | |||
87 | extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); | ||
88 | extern void rt_mutex_destroy(struct rt_mutex *lock); | ||
89 | |||
90 | extern void rt_mutex_lock(struct rt_mutex *lock); | ||
91 | extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, | ||
92 | int detect_deadlock); | ||
93 | extern int rt_mutex_timed_lock(struct rt_mutex *lock, | ||
94 | struct hrtimer_sleeper *timeout, | ||
95 | int detect_deadlock); | ||
96 | |||
97 | extern int rt_mutex_trylock(struct rt_mutex *lock); | ||
98 | |||
99 | extern void rt_mutex_unlock(struct rt_mutex *lock); | ||
100 | |||
101 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
102 | # define INIT_RT_MUTEX_DEBUG(tsk) \ | ||
103 | .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ | ||
104 | .held_list_lock = SPIN_LOCK_UNLOCKED | ||
105 | #else | ||
106 | # define INIT_RT_MUTEX_DEBUG(tsk) | ||
107 | #endif | ||
108 | |||
109 | #ifdef CONFIG_RT_MUTEXES | ||
110 | # define INIT_RT_MUTEXES(tsk) \ | ||
111 | .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ | ||
112 | INIT_RT_MUTEX_DEBUG(tsk) | ||
113 | #else | ||
114 | # define INIT_RT_MUTEXES(tsk) | ||
115 | #endif | ||
116 | |||
117 | #endif | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 122a25c1b997..821f0481ebe1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -73,6 +73,7 @@ struct sched_param { | |||
73 | #include <linux/seccomp.h> | 73 | #include <linux/seccomp.h> |
74 | #include <linux/rcupdate.h> | 74 | #include <linux/rcupdate.h> |
75 | #include <linux/futex.h> | 75 | #include <linux/futex.h> |
76 | #include <linux/rtmutex.h> | ||
76 | 77 | ||
77 | #include <linux/time.h> | 78 | #include <linux/time.h> |
78 | #include <linux/param.h> | 79 | #include <linux/param.h> |
@@ -83,6 +84,7 @@ struct sched_param { | |||
83 | #include <asm/processor.h> | 84 | #include <asm/processor.h> |
84 | 85 | ||
85 | struct exec_domain; | 86 | struct exec_domain; |
87 | struct futex_pi_state; | ||
86 | 88 | ||
87 | /* | 89 | /* |
88 | * List of flags we want to share for kernel threads, | 90 | * List of flags we want to share for kernel threads, |
@@ -123,6 +125,7 @@ extern unsigned long nr_running(void); | |||
123 | extern unsigned long nr_uninterruptible(void); | 125 | extern unsigned long nr_uninterruptible(void); |
124 | extern unsigned long nr_active(void); | 126 | extern unsigned long nr_active(void); |
125 | extern unsigned long nr_iowait(void); | 127 | extern unsigned long nr_iowait(void); |
128 | extern unsigned long weighted_cpuload(const int cpu); | ||
126 | 129 | ||
127 | 130 | ||
128 | /* | 131 | /* |
@@ -494,8 +497,11 @@ struct signal_struct { | |||
494 | 497 | ||
495 | #define MAX_PRIO (MAX_RT_PRIO + 40) | 498 | #define MAX_PRIO (MAX_RT_PRIO + 40) |
496 | 499 | ||
497 | #define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) | 500 | #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) |
501 | #define rt_task(p) rt_prio((p)->prio) | ||
498 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) | 502 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) |
503 | #define has_rt_policy(p) \ | ||
504 | unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) | ||
499 | 505 | ||
500 | /* | 506 | /* |
501 | * Some day this will be a full-fledged user tracking system.. | 507 | * Some day this will be a full-fledged user tracking system.. |
@@ -558,9 +564,9 @@ enum idle_type | |||
558 | /* | 564 | /* |
559 | * sched-domains (multiprocessor balancing) declarations: | 565 | * sched-domains (multiprocessor balancing) declarations: |
560 | */ | 566 | */ |
561 | #ifdef CONFIG_SMP | ||
562 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ | 567 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ |
563 | 568 | ||
569 | #ifdef CONFIG_SMP | ||
564 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 570 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ |
565 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ | 571 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ |
566 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ | 572 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ |
@@ -569,6 +575,11 @@ enum idle_type | |||
569 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ | 575 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ |
570 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ | 576 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ |
571 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ | 577 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ |
578 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | ||
579 | |||
580 | #define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ | ||
581 | ? SD_POWERSAVINGS_BALANCE : 0) | ||
582 | |||
572 | 583 | ||
573 | struct sched_group { | 584 | struct sched_group { |
574 | struct sched_group *next; /* Must be a circular list */ | 585 | struct sched_group *next; /* Must be a circular list */ |
@@ -638,7 +649,7 @@ struct sched_domain { | |||
638 | #endif | 649 | #endif |
639 | }; | 650 | }; |
640 | 651 | ||
641 | extern void partition_sched_domains(cpumask_t *partition1, | 652 | extern int partition_sched_domains(cpumask_t *partition1, |
642 | cpumask_t *partition2); | 653 | cpumask_t *partition2); |
643 | 654 | ||
644 | /* | 655 | /* |
@@ -713,10 +724,13 @@ struct task_struct { | |||
713 | 724 | ||
714 | int lock_depth; /* BKL lock depth */ | 725 | int lock_depth; /* BKL lock depth */ |
715 | 726 | ||
716 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 727 | #ifdef CONFIG_SMP |
728 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
717 | int oncpu; | 729 | int oncpu; |
718 | #endif | 730 | #endif |
719 | int prio, static_prio; | 731 | #endif |
732 | int load_weight; /* for niceness load balancing purposes */ | ||
733 | int prio, static_prio, normal_prio; | ||
720 | struct list_head run_list; | 734 | struct list_head run_list; |
721 | prio_array_t *array; | 735 | prio_array_t *array; |
722 | 736 | ||
@@ -843,6 +857,20 @@ struct task_struct { | |||
843 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 857 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
844 | spinlock_t alloc_lock; | 858 | spinlock_t alloc_lock; |
845 | 859 | ||
860 | /* Protection of the PI data structures: */ | ||
861 | spinlock_t pi_lock; | ||
862 | |||
863 | #ifdef CONFIG_RT_MUTEXES | ||
864 | /* PI waiters blocked on a rt_mutex held by this task */ | ||
865 | struct plist_head pi_waiters; | ||
866 | /* Deadlock detection and priority inheritance handling */ | ||
867 | struct rt_mutex_waiter *pi_blocked_on; | ||
868 | # ifdef CONFIG_DEBUG_RT_MUTEXES | ||
869 | spinlock_t held_list_lock; | ||
870 | struct list_head held_list_head; | ||
871 | # endif | ||
872 | #endif | ||
873 | |||
846 | #ifdef CONFIG_DEBUG_MUTEXES | 874 | #ifdef CONFIG_DEBUG_MUTEXES |
847 | /* mutex deadlock detection */ | 875 | /* mutex deadlock detection */ |
848 | struct mutex_waiter *blocked_on; | 876 | struct mutex_waiter *blocked_on; |
@@ -888,6 +916,8 @@ struct task_struct { | |||
888 | #ifdef CONFIG_COMPAT | 916 | #ifdef CONFIG_COMPAT |
889 | struct compat_robust_list_head __user *compat_robust_list; | 917 | struct compat_robust_list_head __user *compat_robust_list; |
890 | #endif | 918 | #endif |
919 | struct list_head pi_state_list; | ||
920 | struct futex_pi_state *pi_state_cache; | ||
891 | 921 | ||
892 | atomic_t fs_excl; /* holding fs exclusive resources */ | 922 | atomic_t fs_excl; /* holding fs exclusive resources */ |
893 | struct rcu_head rcu; | 923 | struct rcu_head rcu; |
@@ -955,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
955 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 985 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
956 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 986 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
957 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 987 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
988 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | ||
958 | 989 | ||
959 | /* | 990 | /* |
960 | * Only the _current_ task can read/write to tsk->flags, but other | 991 | * Only the _current_ task can read/write to tsk->flags, but other |
@@ -1009,6 +1040,19 @@ static inline void idle_task_exit(void) {} | |||
1009 | #endif | 1040 | #endif |
1010 | 1041 | ||
1011 | extern void sched_idle_next(void); | 1042 | extern void sched_idle_next(void); |
1043 | |||
1044 | #ifdef CONFIG_RT_MUTEXES | ||
1045 | extern int rt_mutex_getprio(task_t *p); | ||
1046 | extern void rt_mutex_setprio(task_t *p, int prio); | ||
1047 | extern void rt_mutex_adjust_pi(task_t *p); | ||
1048 | #else | ||
1049 | static inline int rt_mutex_getprio(task_t *p) | ||
1050 | { | ||
1051 | return p->normal_prio; | ||
1052 | } | ||
1053 | # define rt_mutex_adjust_pi(p) do { } while (0) | ||
1054 | #endif | ||
1055 | |||
1012 | extern void set_user_nice(task_t *p, long nice); | 1056 | extern void set_user_nice(task_t *p, long nice); |
1013 | extern int task_prio(const task_t *p); | 1057 | extern int task_prio(const task_t *p); |
1014 | extern int task_nice(const task_t *p); | 1058 | extern int task_nice(const task_t *p); |
@@ -1408,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
1408 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 1452 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); |
1409 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 1453 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
1410 | 1454 | ||
1455 | #include <linux/sysdev.h> | ||
1456 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
1457 | extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; | ||
1458 | extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); | ||
1459 | |||
1411 | extern void normalize_rt_tasks(void); | 1460 | extern void normalize_rt_tasks(void); |
1412 | 1461 | ||
1413 | #ifdef CONFIG_PM | 1462 | #ifdef CONFIG_PM |
diff --git a/include/linux/scx200.h b/include/linux/scx200.h index a22f9e173ad2..693c0557e70b 100644 --- a/include/linux/scx200.h +++ b/include/linux/scx200.h | |||
@@ -49,10 +49,3 @@ extern unsigned scx200_cb_base; | |||
49 | #define SCx200_REV 0x3d /* Revision Register */ | 49 | #define SCx200_REV 0x3d /* Revision Register */ |
50 | #define SCx200_CBA 0x3e /* Configuration Base Address Register */ | 50 | #define SCx200_CBA 0x3e /* Configuration Base Address Register */ |
51 | #define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ | 51 | #define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ |
52 | |||
53 | /* | ||
54 | Local variables: | ||
55 | compile-command: "make -C ../.. bzImage modules" | ||
56 | c-basic-offset: 8 | ||
57 | End: | ||
58 | */ | ||
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h index 30cdd648ba79..90dd069cc145 100644 --- a/include/linux/scx200_gpio.h +++ b/include/linux/scx200_gpio.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/spinlock.h> | 1 | #include <linux/spinlock.h> |
2 | 2 | ||
3 | u32 scx200_gpio_configure(int index, u32 set, u32 clear); | 3 | u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); |
4 | 4 | ||
5 | extern unsigned scx200_gpio_base; | 5 | extern unsigned scx200_gpio_base; |
6 | extern long scx200_gpio_shadow[2]; | 6 | extern long scx200_gpio_shadow[2]; |
@@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2]; | |||
17 | 17 | ||
18 | /* returns the value of the GPIO pin */ | 18 | /* returns the value of the GPIO pin */ |
19 | 19 | ||
20 | static inline int scx200_gpio_get(int index) { | 20 | static inline int scx200_gpio_get(unsigned index) { |
21 | __SCx200_GPIO_BANK; | 21 | __SCx200_GPIO_BANK; |
22 | __SCx200_GPIO_IOADDR + 0x04; | 22 | __SCx200_GPIO_IOADDR + 0x04; |
23 | __SCx200_GPIO_INDEX; | 23 | __SCx200_GPIO_INDEX; |
@@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) { | |||
29 | driven if the GPIO is configured as an output, it might not be the | 29 | driven if the GPIO is configured as an output, it might not be the |
30 | state of the GPIO right now if the GPIO is configured as an input) */ | 30 | state of the GPIO right now if the GPIO is configured as an input) */ |
31 | 31 | ||
32 | static inline int scx200_gpio_current(int index) { | 32 | static inline int scx200_gpio_current(unsigned index) { |
33 | __SCx200_GPIO_BANK; | 33 | __SCx200_GPIO_BANK; |
34 | __SCx200_GPIO_INDEX; | 34 | __SCx200_GPIO_INDEX; |
35 | 35 | ||
@@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) { | |||
38 | 38 | ||
39 | /* drive the GPIO signal high */ | 39 | /* drive the GPIO signal high */ |
40 | 40 | ||
41 | static inline void scx200_gpio_set_high(int index) { | 41 | static inline void scx200_gpio_set_high(unsigned index) { |
42 | __SCx200_GPIO_BANK; | 42 | __SCx200_GPIO_BANK; |
43 | __SCx200_GPIO_IOADDR; | 43 | __SCx200_GPIO_IOADDR; |
44 | __SCx200_GPIO_SHADOW; | 44 | __SCx200_GPIO_SHADOW; |
@@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) { | |||
49 | 49 | ||
50 | /* drive the GPIO signal low */ | 50 | /* drive the GPIO signal low */ |
51 | 51 | ||
52 | static inline void scx200_gpio_set_low(int index) { | 52 | static inline void scx200_gpio_set_low(unsigned index) { |
53 | __SCx200_GPIO_BANK; | 53 | __SCx200_GPIO_BANK; |
54 | __SCx200_GPIO_IOADDR; | 54 | __SCx200_GPIO_IOADDR; |
55 | __SCx200_GPIO_SHADOW; | 55 | __SCx200_GPIO_SHADOW; |
@@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) { | |||
60 | 60 | ||
61 | /* drive the GPIO signal to state */ | 61 | /* drive the GPIO signal to state */ |
62 | 62 | ||
63 | static inline void scx200_gpio_set(int index, int state) { | 63 | static inline void scx200_gpio_set(unsigned index, int state) { |
64 | __SCx200_GPIO_BANK; | 64 | __SCx200_GPIO_BANK; |
65 | __SCx200_GPIO_IOADDR; | 65 | __SCx200_GPIO_IOADDR; |
66 | __SCx200_GPIO_SHADOW; | 66 | __SCx200_GPIO_SHADOW; |
@@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) { | |||
73 | } | 73 | } |
74 | 74 | ||
75 | /* toggle the GPIO signal */ | 75 | /* toggle the GPIO signal */ |
76 | static inline void scx200_gpio_change(int index) { | 76 | static inline void scx200_gpio_change(unsigned index) { |
77 | __SCx200_GPIO_BANK; | 77 | __SCx200_GPIO_BANK; |
78 | __SCx200_GPIO_IOADDR; | 78 | __SCx200_GPIO_IOADDR; |
79 | __SCx200_GPIO_SHADOW; | 79 | __SCx200_GPIO_SHADOW; |
@@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) { | |||
87 | #undef __SCx200_GPIO_SHADOW | 87 | #undef __SCx200_GPIO_SHADOW |
88 | #undef __SCx200_GPIO_INDEX | 88 | #undef __SCx200_GPIO_INDEX |
89 | #undef __SCx200_GPIO_OUT | 89 | #undef __SCx200_GPIO_OUT |
90 | |||
91 | /* | ||
92 | Local variables: | ||
93 | compile-command: "make -C ../.. bzImage modules" | ||
94 | c-basic-offset: 8 | ||
95 | End: | ||
96 | */ | ||
diff --git a/include/linux/swap.h b/include/linux/swap.h index dc3f3aa0c83e..c41e2d6d1acc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |||
199 | } | 199 | } |
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | extern int kswapd_run(int nid); | ||
203 | |||
202 | #ifdef CONFIG_MMU | 204 | #ifdef CONFIG_MMU |
203 | /* linux/mm/shmem.c */ | 205 | /* linux/mm/shmem.c */ |
204 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | 206 | extern int shmem_unuse(swp_entry_t entry, struct page *page); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 33785b79d548..008f04c56737 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid, | |||
174 | int options, struct rusage __user *ru); | 174 | int options, struct rusage __user *ru); |
175 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); | 175 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); |
176 | asmlinkage long sys_set_tid_address(int __user *tidptr); | 176 | asmlinkage long sys_set_tid_address(int __user *tidptr); |
177 | asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, | 177 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, |
178 | struct timespec __user *utime, u32 __user *uaddr2, | 178 | struct timespec __user *utime, u32 __user *uaddr2, |
179 | int val3); | 179 | u32 val3); |
180 | 180 | ||
181 | asmlinkage long sys_init_module(void __user *umod, unsigned long len, | 181 | asmlinkage long sys_init_module(void __user *umod, unsigned long len, |
182 | const char __user *uargs); | 182 | const char __user *uargs); |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 349ef908a222..46e4d8f2771f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -149,6 +149,7 @@ enum | |||
149 | KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ | 149 | KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ |
150 | KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ | 150 | KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ |
151 | KERN_COMPAT_LOG=73, /* int: print compat layer messages */ | 151 | KERN_COMPAT_LOG=73, /* int: print compat layer messages */ |
152 | KERN_MAX_LOCK_DEPTH=74, | ||
152 | }; | 153 | }; |
153 | 154 | ||
154 | 155 | ||
@@ -189,6 +190,7 @@ enum | |||
189 | VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ | 190 | VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ |
190 | VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ | 191 | VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ |
191 | VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ | 192 | VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ |
193 | VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ | ||
192 | }; | 194 | }; |
193 | 195 | ||
194 | 196 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index a305ae2e44b6..ec1eca85290a 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -134,7 +134,8 @@ | |||
134 | .flags = SD_LOAD_BALANCE \ | 134 | .flags = SD_LOAD_BALANCE \ |
135 | | SD_BALANCE_NEWIDLE \ | 135 | | SD_BALANCE_NEWIDLE \ |
136 | | SD_BALANCE_EXEC \ | 136 | | SD_BALANCE_EXEC \ |
137 | | SD_WAKE_AFFINE, \ | 137 | | SD_WAKE_AFFINE \ |
138 | | BALANCE_FOR_POWER, \ | ||
138 | .last_balance = jiffies, \ | 139 | .last_balance = jiffies, \ |
139 | .balance_interval = 1, \ | 140 | .balance_interval = 1, \ |
140 | .nr_balance_failed = 0, \ | 141 | .nr_balance_failed = 0, \ |
diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h index 074c4008ad52..d91d88f93c8b 100644 --- a/include/media/cx2341x.h +++ b/include/media/cx2341x.h | |||
@@ -89,9 +89,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, | |||
89 | struct v4l2_queryctrl *qctrl); | 89 | struct v4l2_queryctrl *qctrl); |
90 | const char **cx2341x_ctrl_get_menu(u32 id); | 90 | const char **cx2341x_ctrl_get_menu(u32 id); |
91 | int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, | 91 | int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, |
92 | struct v4l2_ext_controls *ctrls, int cmd); | 92 | struct v4l2_ext_controls *ctrls, unsigned int cmd); |
93 | void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p); | 93 | void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p); |
94 | void cx2341x_log_status(struct cx2341x_mpeg_params *p, int cardid); | 94 | void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix); |
95 | 95 | ||
96 | /* Firmware names */ | 96 | /* Firmware names */ |
97 | #define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw" | 97 | #define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw" |
diff --git a/init/Kconfig b/init/Kconfig index df55b3665601..f70f2fd273c2 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -339,9 +339,14 @@ config BASE_FULL | |||
339 | kernel data structures. This saves memory on small machines, | 339 | kernel data structures. This saves memory on small machines, |
340 | but may reduce performance. | 340 | but may reduce performance. |
341 | 341 | ||
342 | config RT_MUTEXES | ||
343 | boolean | ||
344 | select PLIST | ||
345 | |||
342 | config FUTEX | 346 | config FUTEX |
343 | bool "Enable futex support" if EMBEDDED | 347 | bool "Enable futex support" if EMBEDDED |
344 | default y | 348 | default y |
349 | select RT_MUTEXES | ||
345 | help | 350 | help |
346 | Disabling this option will cause the kernel to be built without | 351 | Disabling this option will cause the kernel to be built without |
347 | support for "fast userspace mutexes". The resulting kernel may not | 352 | support for "fast userspace mutexes". The resulting kernel may not |
diff --git a/init/main.c b/init/main.c index 80af1a52485f..0d57f6ccb63a 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/mempolicy.h> | 48 | #include <linux/mempolicy.h> |
49 | #include <linux/key.h> | 49 | #include <linux/key.h> |
50 | #include <linux/unwind.h> | 50 | #include <linux/unwind.h> |
51 | #include <linux/buffer_head.h> | ||
51 | 52 | ||
52 | #include <asm/io.h> | 53 | #include <asm/io.h> |
53 | #include <asm/bugs.h> | 54 | #include <asm/bugs.h> |
@@ -80,7 +81,6 @@ extern void mca_init(void); | |||
80 | extern void sbus_init(void); | 81 | extern void sbus_init(void); |
81 | extern void sysctl_init(void); | 82 | extern void sysctl_init(void); |
82 | extern void signals_init(void); | 83 | extern void signals_init(void); |
83 | extern void buffer_init(void); | ||
84 | extern void pidhash_init(void); | 84 | extern void pidhash_init(void); |
85 | extern void pidmap_init(void); | 85 | extern void pidmap_init(void); |
86 | extern void prio_tree_init(void); | 86 | extern void prio_tree_init(void); |
diff --git a/kernel/Makefile b/kernel/Makefile index 752bd7d383af..82fb182f6f61 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -16,6 +16,9 @@ obj-$(CONFIG_FUTEX) += futex.o | |||
16 | ifeq ($(CONFIG_COMPAT),y) | 16 | ifeq ($(CONFIG_COMPAT),y) |
17 | obj-$(CONFIG_FUTEX) += futex_compat.o | 17 | obj-$(CONFIG_FUTEX) += futex_compat.o |
18 | endif | 18 | endif |
19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | ||
20 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | ||
21 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | ||
19 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 22 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
20 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 23 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
21 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 24 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
diff --git a/kernel/acct.c b/kernel/acct.c index 368c4f03fe0e..126ca43d5d2b 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -521,6 +521,7 @@ static void do_acct_process(struct file *file) | |||
521 | 521 | ||
522 | /** | 522 | /** |
523 | * acct_init_pacct - initialize a new pacct_struct | 523 | * acct_init_pacct - initialize a new pacct_struct |
524 | * @pacct: per-process accounting info struct to initialize | ||
524 | */ | 525 | */ |
525 | void acct_init_pacct(struct pacct_struct *pacct) | 526 | void acct_init_pacct(struct pacct_struct *pacct) |
526 | { | 527 | { |
@@ -576,7 +577,7 @@ void acct_collect(long exitcode, int group_dead) | |||
576 | * | 577 | * |
577 | * handles process accounting for an exiting task | 578 | * handles process accounting for an exiting task |
578 | */ | 579 | */ |
579 | void acct_process() | 580 | void acct_process(void) |
580 | { | 581 | { |
581 | struct file *file = NULL; | 582 | struct file *file = NULL; |
582 | 583 | ||
diff --git a/kernel/audit.c b/kernel/audit.c index 7dfac7031bd7..82443fb433ef 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -818,7 +818,7 @@ err: | |||
818 | */ | 818 | */ |
819 | unsigned int audit_serial(void) | 819 | unsigned int audit_serial(void) |
820 | { | 820 | { |
821 | static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; | 821 | static DEFINE_SPINLOCK(serial_lock); |
822 | static unsigned int serial = 0; | 822 | static unsigned int serial = 0; |
823 | 823 | ||
824 | unsigned long flags; | 824 | unsigned long flags; |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 9ebd96fda295..dc5e3f01efe7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -658,8 +658,7 @@ static void audit_log_task_context(struct audit_buffer *ab) | |||
658 | return; | 658 | return; |
659 | 659 | ||
660 | error_path: | 660 | error_path: |
661 | if (ctx) | 661 | kfree(ctx); |
662 | kfree(ctx); | ||
663 | audit_panic("error in audit_log_task_context"); | 662 | audit_panic("error in audit_log_task_context"); |
664 | return; | 663 | return; |
665 | } | 664 | } |
@@ -1367,7 +1366,7 @@ int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) | |||
1367 | * @mqdes: MQ descriptor | 1366 | * @mqdes: MQ descriptor |
1368 | * @msg_len: Message length | 1367 | * @msg_len: Message length |
1369 | * @msg_prio: Message priority | 1368 | * @msg_prio: Message priority |
1370 | * @abs_timeout: Message timeout in absolute time | 1369 | * @u_abs_timeout: Message timeout in absolute time |
1371 | * | 1370 | * |
1372 | * Returns 0 for success or NULL context or < 0 on error. | 1371 | * Returns 0 for success or NULL context or < 0 on error. |
1373 | */ | 1372 | */ |
@@ -1409,8 +1408,8 @@ int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, | |||
1409 | * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive | 1408 | * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive |
1410 | * @mqdes: MQ descriptor | 1409 | * @mqdes: MQ descriptor |
1411 | * @msg_len: Message length | 1410 | * @msg_len: Message length |
1412 | * @msg_prio: Message priority | 1411 | * @u_msg_prio: Message priority |
1413 | * @abs_timeout: Message timeout in absolute time | 1412 | * @u_abs_timeout: Message timeout in absolute time |
1414 | * | 1413 | * |
1415 | * Returns 0 for success or NULL context or < 0 on error. | 1414 | * Returns 0 for success or NULL context or < 0 on error. |
1416 | */ | 1415 | */ |
@@ -1558,7 +1557,6 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp) | |||
1558 | * @uid: msgq user id | 1557 | * @uid: msgq user id |
1559 | * @gid: msgq group id | 1558 | * @gid: msgq group id |
1560 | * @mode: msgq mode (permissions) | 1559 | * @mode: msgq mode (permissions) |
1561 | * @ipcp: in-kernel IPC permissions | ||
1562 | * | 1560 | * |
1563 | * Returns 0 for success or NULL context or < 0 on error. | 1561 | * Returns 0 for success or NULL context or < 0 on error. |
1564 | */ | 1562 | */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 03dcd981846a..70fbf2e83766 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -18,7 +18,7 @@ | |||
18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
19 | static DEFINE_MUTEX(cpucontrol); | 19 | static DEFINE_MUTEX(cpucontrol); |
20 | 20 | ||
21 | static BLOCKING_NOTIFIER_HEAD(cpu_chain); | 21 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); |
22 | 22 | ||
23 | #ifdef CONFIG_HOTPLUG_CPU | 23 | #ifdef CONFIG_HOTPLUG_CPU |
24 | static struct task_struct *lock_cpu_hotplug_owner; | 24 | static struct task_struct *lock_cpu_hotplug_owner; |
@@ -69,10 +69,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | |||
69 | #endif /* CONFIG_HOTPLUG_CPU */ | 69 | #endif /* CONFIG_HOTPLUG_CPU */ |
70 | 70 | ||
71 | /* Need to know about CPUs going up/down? */ | 71 | /* Need to know about CPUs going up/down? */ |
72 | int register_cpu_notifier(struct notifier_block *nb) | 72 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
73 | { | 73 | { |
74 | return blocking_notifier_chain_register(&cpu_chain, nb); | 74 | return blocking_notifier_chain_register(&cpu_chain, nb); |
75 | } | 75 | } |
76 | |||
77 | #ifdef CONFIG_HOTPLUG_CPU | ||
78 | |||
76 | EXPORT_SYMBOL(register_cpu_notifier); | 79 | EXPORT_SYMBOL(register_cpu_notifier); |
77 | 80 | ||
78 | void unregister_cpu_notifier(struct notifier_block *nb) | 81 | void unregister_cpu_notifier(struct notifier_block *nb) |
@@ -81,7 +84,6 @@ void unregister_cpu_notifier(struct notifier_block *nb) | |||
81 | } | 84 | } |
82 | EXPORT_SYMBOL(unregister_cpu_notifier); | 85 | EXPORT_SYMBOL(unregister_cpu_notifier); |
83 | 86 | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | ||
85 | static inline void check_for_tasks(int cpu) | 87 | static inline void check_for_tasks(int cpu) |
86 | { | 88 | { |
87 | struct task_struct *p; | 89 | struct task_struct *p; |
diff --git a/kernel/exit.c b/kernel/exit.c index 304ef637be6c..ab06b9f88f64 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -926,9 +926,18 @@ fastcall NORET_TYPE void do_exit(long code) | |||
926 | tsk->mempolicy = NULL; | 926 | tsk->mempolicy = NULL; |
927 | #endif | 927 | #endif |
928 | /* | 928 | /* |
929 | * This must happen late, after the PID is not | ||
930 | * hashed anymore: | ||
931 | */ | ||
932 | if (unlikely(!list_empty(&tsk->pi_state_list))) | ||
933 | exit_pi_state_list(tsk); | ||
934 | if (unlikely(current->pi_state_cache)) | ||
935 | kfree(current->pi_state_cache); | ||
936 | /* | ||
929 | * If DEBUG_MUTEXES is on, make sure we are holding no locks: | 937 | * If DEBUG_MUTEXES is on, make sure we are holding no locks: |
930 | */ | 938 | */ |
931 | mutex_debug_check_no_locks_held(tsk); | 939 | mutex_debug_check_no_locks_held(tsk); |
940 | rt_mutex_debug_check_no_locks_held(tsk); | ||
932 | 941 | ||
933 | if (tsk->io_context) | 942 | if (tsk->io_context) |
934 | exit_io_context(); | 943 | exit_io_context(); |
diff --git a/kernel/fork.c b/kernel/fork.c index 9b4e54ef0225..628198a4f28a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -104,6 +104,7 @@ static kmem_cache_t *mm_cachep; | |||
104 | void free_task(struct task_struct *tsk) | 104 | void free_task(struct task_struct *tsk) |
105 | { | 105 | { |
106 | free_thread_info(tsk->thread_info); | 106 | free_thread_info(tsk->thread_info); |
107 | rt_mutex_debug_task_free(tsk); | ||
107 | free_task_struct(tsk); | 108 | free_task_struct(tsk); |
108 | } | 109 | } |
109 | EXPORT_SYMBOL(free_task); | 110 | EXPORT_SYMBOL(free_task); |
@@ -913,6 +914,19 @@ asmlinkage long sys_set_tid_address(int __user *tidptr) | |||
913 | return current->pid; | 914 | return current->pid; |
914 | } | 915 | } |
915 | 916 | ||
917 | static inline void rt_mutex_init_task(struct task_struct *p) | ||
918 | { | ||
919 | #ifdef CONFIG_RT_MUTEXES | ||
920 | spin_lock_init(&p->pi_lock); | ||
921 | plist_head_init(&p->pi_waiters, &p->pi_lock); | ||
922 | p->pi_blocked_on = NULL; | ||
923 | # ifdef CONFIG_DEBUG_RT_MUTEXES | ||
924 | spin_lock_init(&p->held_list_lock); | ||
925 | INIT_LIST_HEAD(&p->held_list_head); | ||
926 | # endif | ||
927 | #endif | ||
928 | } | ||
929 | |||
916 | /* | 930 | /* |
917 | * This creates a new process as a copy of the old one, | 931 | * This creates a new process as a copy of the old one, |
918 | * but does not actually start it yet. | 932 | * but does not actually start it yet. |
@@ -1034,6 +1048,8 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1034 | mpol_fix_fork_child_flag(p); | 1048 | mpol_fix_fork_child_flag(p); |
1035 | #endif | 1049 | #endif |
1036 | 1050 | ||
1051 | rt_mutex_init_task(p); | ||
1052 | |||
1037 | #ifdef CONFIG_DEBUG_MUTEXES | 1053 | #ifdef CONFIG_DEBUG_MUTEXES |
1038 | p->blocked_on = NULL; /* not blocked yet */ | 1054 | p->blocked_on = NULL; /* not blocked yet */ |
1039 | #endif | 1055 | #endif |
@@ -1076,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1076 | #ifdef CONFIG_COMPAT | 1092 | #ifdef CONFIG_COMPAT |
1077 | p->compat_robust_list = NULL; | 1093 | p->compat_robust_list = NULL; |
1078 | #endif | 1094 | #endif |
1095 | INIT_LIST_HEAD(&p->pi_state_list); | ||
1096 | p->pi_state_cache = NULL; | ||
1097 | |||
1079 | /* | 1098 | /* |
1080 | * sigaltstack should be cleared when sharing the same VM | 1099 | * sigaltstack should be cleared when sharing the same VM |
1081 | */ | 1100 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index e1a380c77a5a..6c91f938005d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -12,6 +12,10 @@ | |||
12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved | 12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved |
13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. | 13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. |
14 | * | 14 | * |
15 | * PI-futex support started by Ingo Molnar and Thomas Gleixner | ||
16 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
17 | * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
18 | * | ||
15 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly | 19 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
16 | * enough at me, Linus for the original (flawed) idea, Matthew | 20 | * enough at me, Linus for the original (flawed) idea, Matthew |
17 | * Kirkwood for proof-of-concept implementation. | 21 | * Kirkwood for proof-of-concept implementation. |
@@ -46,6 +50,8 @@ | |||
46 | #include <linux/signal.h> | 50 | #include <linux/signal.h> |
47 | #include <asm/futex.h> | 51 | #include <asm/futex.h> |
48 | 52 | ||
53 | #include "rtmutex_common.h" | ||
54 | |||
49 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | 55 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) |
50 | 56 | ||
51 | /* | 57 | /* |
@@ -63,7 +69,7 @@ union futex_key { | |||
63 | int offset; | 69 | int offset; |
64 | } shared; | 70 | } shared; |
65 | struct { | 71 | struct { |
66 | unsigned long uaddr; | 72 | unsigned long address; |
67 | struct mm_struct *mm; | 73 | struct mm_struct *mm; |
68 | int offset; | 74 | int offset; |
69 | } private; | 75 | } private; |
@@ -75,6 +81,27 @@ union futex_key { | |||
75 | }; | 81 | }; |
76 | 82 | ||
77 | /* | 83 | /* |
84 | * Priority Inheritance state: | ||
85 | */ | ||
86 | struct futex_pi_state { | ||
87 | /* | ||
88 | * list of 'owned' pi_state instances - these have to be | ||
89 | * cleaned up in do_exit() if the task exits prematurely: | ||
90 | */ | ||
91 | struct list_head list; | ||
92 | |||
93 | /* | ||
94 | * The PI object: | ||
95 | */ | ||
96 | struct rt_mutex pi_mutex; | ||
97 | |||
98 | struct task_struct *owner; | ||
99 | atomic_t refcount; | ||
100 | |||
101 | union futex_key key; | ||
102 | }; | ||
103 | |||
104 | /* | ||
78 | * We use this hashed waitqueue instead of a normal wait_queue_t, so | 105 | * We use this hashed waitqueue instead of a normal wait_queue_t, so |
79 | * we can wake only the relevant ones (hashed queues may be shared). | 106 | * we can wake only the relevant ones (hashed queues may be shared). |
80 | * | 107 | * |
@@ -87,15 +114,19 @@ struct futex_q { | |||
87 | struct list_head list; | 114 | struct list_head list; |
88 | wait_queue_head_t waiters; | 115 | wait_queue_head_t waiters; |
89 | 116 | ||
90 | /* Which hash list lock to use. */ | 117 | /* Which hash list lock to use: */ |
91 | spinlock_t *lock_ptr; | 118 | spinlock_t *lock_ptr; |
92 | 119 | ||
93 | /* Key which the futex is hashed on. */ | 120 | /* Key which the futex is hashed on: */ |
94 | union futex_key key; | 121 | union futex_key key; |
95 | 122 | ||
96 | /* For fd, sigio sent using these. */ | 123 | /* For fd, sigio sent using these: */ |
97 | int fd; | 124 | int fd; |
98 | struct file *filp; | 125 | struct file *filp; |
126 | |||
127 | /* Optional priority inheritance state: */ | ||
128 | struct futex_pi_state *pi_state; | ||
129 | struct task_struct *task; | ||
99 | }; | 130 | }; |
100 | 131 | ||
101 | /* | 132 | /* |
@@ -144,8 +175,9 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2) | |||
144 | * | 175 | * |
145 | * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. | 176 | * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks. |
146 | */ | 177 | */ |
147 | static int get_futex_key(unsigned long uaddr, union futex_key *key) | 178 | static int get_futex_key(u32 __user *uaddr, union futex_key *key) |
148 | { | 179 | { |
180 | unsigned long address = (unsigned long)uaddr; | ||
149 | struct mm_struct *mm = current->mm; | 181 | struct mm_struct *mm = current->mm; |
150 | struct vm_area_struct *vma; | 182 | struct vm_area_struct *vma; |
151 | struct page *page; | 183 | struct page *page; |
@@ -154,16 +186,16 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key) | |||
154 | /* | 186 | /* |
155 | * The futex address must be "naturally" aligned. | 187 | * The futex address must be "naturally" aligned. |
156 | */ | 188 | */ |
157 | key->both.offset = uaddr % PAGE_SIZE; | 189 | key->both.offset = address % PAGE_SIZE; |
158 | if (unlikely((key->both.offset % sizeof(u32)) != 0)) | 190 | if (unlikely((key->both.offset % sizeof(u32)) != 0)) |
159 | return -EINVAL; | 191 | return -EINVAL; |
160 | uaddr -= key->both.offset; | 192 | address -= key->both.offset; |
161 | 193 | ||
162 | /* | 194 | /* |
163 | * The futex is hashed differently depending on whether | 195 | * The futex is hashed differently depending on whether |
164 | * it's in a shared or private mapping. So check vma first. | 196 | * it's in a shared or private mapping. So check vma first. |
165 | */ | 197 | */ |
166 | vma = find_extend_vma(mm, uaddr); | 198 | vma = find_extend_vma(mm, address); |
167 | if (unlikely(!vma)) | 199 | if (unlikely(!vma)) |
168 | return -EFAULT; | 200 | return -EFAULT; |
169 | 201 | ||
@@ -184,7 +216,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key) | |||
184 | */ | 216 | */ |
185 | if (likely(!(vma->vm_flags & VM_MAYSHARE))) { | 217 | if (likely(!(vma->vm_flags & VM_MAYSHARE))) { |
186 | key->private.mm = mm; | 218 | key->private.mm = mm; |
187 | key->private.uaddr = uaddr; | 219 | key->private.address = address; |
188 | return 0; | 220 | return 0; |
189 | } | 221 | } |
190 | 222 | ||
@@ -194,7 +226,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key) | |||
194 | key->shared.inode = vma->vm_file->f_dentry->d_inode; | 226 | key->shared.inode = vma->vm_file->f_dentry->d_inode; |
195 | key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ | 227 | key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ |
196 | if (likely(!(vma->vm_flags & VM_NONLINEAR))) { | 228 | if (likely(!(vma->vm_flags & VM_NONLINEAR))) { |
197 | key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT) | 229 | key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) |
198 | + vma->vm_pgoff); | 230 | + vma->vm_pgoff); |
199 | return 0; | 231 | return 0; |
200 | } | 232 | } |
@@ -205,7 +237,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key) | |||
205 | * from swap. But that's a lot of code to duplicate here | 237 | * from swap. But that's a lot of code to duplicate here |
206 | * for a rare case, so we simply fetch the page. | 238 | * for a rare case, so we simply fetch the page. |
207 | */ | 239 | */ |
208 | err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); | 240 | err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); |
209 | if (err >= 0) { | 241 | if (err >= 0) { |
210 | key->shared.pgoff = | 242 | key->shared.pgoff = |
211 | page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 243 | page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
@@ -246,18 +278,244 @@ static void drop_key_refs(union futex_key *key) | |||
246 | } | 278 | } |
247 | } | 279 | } |
248 | 280 | ||
249 | static inline int get_futex_value_locked(int *dest, int __user *from) | 281 | static inline int get_futex_value_locked(u32 *dest, u32 __user *from) |
250 | { | 282 | { |
251 | int ret; | 283 | int ret; |
252 | 284 | ||
253 | inc_preempt_count(); | 285 | inc_preempt_count(); |
254 | ret = __copy_from_user_inatomic(dest, from, sizeof(int)); | 286 | ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); |
255 | dec_preempt_count(); | 287 | dec_preempt_count(); |
256 | 288 | ||
257 | return ret ? -EFAULT : 0; | 289 | return ret ? -EFAULT : 0; |
258 | } | 290 | } |
259 | 291 | ||
260 | /* | 292 | /* |
293 | * Fault handling. Called with current->mm->mmap_sem held. | ||
294 | */ | ||
295 | static int futex_handle_fault(unsigned long address, int attempt) | ||
296 | { | ||
297 | struct vm_area_struct * vma; | ||
298 | struct mm_struct *mm = current->mm; | ||
299 | |||
300 | if (attempt >= 2 || !(vma = find_vma(mm, address)) || | ||
301 | vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) | ||
302 | return -EFAULT; | ||
303 | |||
304 | switch (handle_mm_fault(mm, vma, address, 1)) { | ||
305 | case VM_FAULT_MINOR: | ||
306 | current->min_flt++; | ||
307 | break; | ||
308 | case VM_FAULT_MAJOR: | ||
309 | current->maj_flt++; | ||
310 | break; | ||
311 | default: | ||
312 | return -EFAULT; | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * PI code: | ||
319 | */ | ||
320 | static int refill_pi_state_cache(void) | ||
321 | { | ||
322 | struct futex_pi_state *pi_state; | ||
323 | |||
324 | if (likely(current->pi_state_cache)) | ||
325 | return 0; | ||
326 | |||
327 | pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL); | ||
328 | |||
329 | if (!pi_state) | ||
330 | return -ENOMEM; | ||
331 | |||
332 | memset(pi_state, 0, sizeof(*pi_state)); | ||
333 | INIT_LIST_HEAD(&pi_state->list); | ||
334 | /* pi_mutex gets initialized later */ | ||
335 | pi_state->owner = NULL; | ||
336 | atomic_set(&pi_state->refcount, 1); | ||
337 | |||
338 | current->pi_state_cache = pi_state; | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static struct futex_pi_state * alloc_pi_state(void) | ||
344 | { | ||
345 | struct futex_pi_state *pi_state = current->pi_state_cache; | ||
346 | |||
347 | WARN_ON(!pi_state); | ||
348 | current->pi_state_cache = NULL; | ||
349 | |||
350 | return pi_state; | ||
351 | } | ||
352 | |||
353 | static void free_pi_state(struct futex_pi_state *pi_state) | ||
354 | { | ||
355 | if (!atomic_dec_and_test(&pi_state->refcount)) | ||
356 | return; | ||
357 | |||
358 | /* | ||
359 | * If pi_state->owner is NULL, the owner is most probably dying | ||
360 | * and has cleaned up the pi_state already | ||
361 | */ | ||
362 | if (pi_state->owner) { | ||
363 | spin_lock_irq(&pi_state->owner->pi_lock); | ||
364 | list_del_init(&pi_state->list); | ||
365 | spin_unlock_irq(&pi_state->owner->pi_lock); | ||
366 | |||
367 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | ||
368 | } | ||
369 | |||
370 | if (current->pi_state_cache) | ||
371 | kfree(pi_state); | ||
372 | else { | ||
373 | /* | ||
374 | * pi_state->list is already empty. | ||
375 | * clear pi_state->owner. | ||
376 | * refcount is at 0 - put it back to 1. | ||
377 | */ | ||
378 | pi_state->owner = NULL; | ||
379 | atomic_set(&pi_state->refcount, 1); | ||
380 | current->pi_state_cache = pi_state; | ||
381 | } | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Look up the task based on what TID userspace gave us. | ||
386 | * We dont trust it. | ||
387 | */ | ||
388 | static struct task_struct * futex_find_get_task(pid_t pid) | ||
389 | { | ||
390 | struct task_struct *p; | ||
391 | |||
392 | read_lock(&tasklist_lock); | ||
393 | p = find_task_by_pid(pid); | ||
394 | if (!p) | ||
395 | goto out_unlock; | ||
396 | if ((current->euid != p->euid) && (current->euid != p->uid)) { | ||
397 | p = NULL; | ||
398 | goto out_unlock; | ||
399 | } | ||
400 | if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { | ||
401 | p = NULL; | ||
402 | goto out_unlock; | ||
403 | } | ||
404 | get_task_struct(p); | ||
405 | out_unlock: | ||
406 | read_unlock(&tasklist_lock); | ||
407 | |||
408 | return p; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * This task is holding PI mutexes at exit time => bad. | ||
413 | * Kernel cleans up PI-state, but userspace is likely hosed. | ||
414 | * (Robust-futex cleanup is separate and might save the day for userspace.) | ||
415 | */ | ||
416 | void exit_pi_state_list(struct task_struct *curr) | ||
417 | { | ||
418 | struct futex_hash_bucket *hb; | ||
419 | struct list_head *next, *head = &curr->pi_state_list; | ||
420 | struct futex_pi_state *pi_state; | ||
421 | union futex_key key; | ||
422 | |||
423 | /* | ||
424 | * We are a ZOMBIE and nobody can enqueue itself on | ||
425 | * pi_state_list anymore, but we have to be careful | ||
426 | * versus waiters unqueueing themselfs | ||
427 | */ | ||
428 | spin_lock_irq(&curr->pi_lock); | ||
429 | while (!list_empty(head)) { | ||
430 | |||
431 | next = head->next; | ||
432 | pi_state = list_entry(next, struct futex_pi_state, list); | ||
433 | key = pi_state->key; | ||
434 | spin_unlock_irq(&curr->pi_lock); | ||
435 | |||
436 | hb = hash_futex(&key); | ||
437 | spin_lock(&hb->lock); | ||
438 | |||
439 | spin_lock_irq(&curr->pi_lock); | ||
440 | if (head->next != next) { | ||
441 | spin_unlock(&hb->lock); | ||
442 | continue; | ||
443 | } | ||
444 | |||
445 | list_del_init(&pi_state->list); | ||
446 | |||
447 | WARN_ON(pi_state->owner != curr); | ||
448 | |||
449 | pi_state->owner = NULL; | ||
450 | spin_unlock_irq(&curr->pi_lock); | ||
451 | |||
452 | rt_mutex_unlock(&pi_state->pi_mutex); | ||
453 | |||
454 | spin_unlock(&hb->lock); | ||
455 | |||
456 | spin_lock_irq(&curr->pi_lock); | ||
457 | } | ||
458 | spin_unlock_irq(&curr->pi_lock); | ||
459 | } | ||
460 | |||
461 | static int | ||
462 | lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | ||
463 | { | ||
464 | struct futex_pi_state *pi_state = NULL; | ||
465 | struct futex_q *this, *next; | ||
466 | struct list_head *head; | ||
467 | struct task_struct *p; | ||
468 | pid_t pid; | ||
469 | |||
470 | head = &hb->chain; | ||
471 | |||
472 | list_for_each_entry_safe(this, next, head, list) { | ||
473 | if (match_futex (&this->key, &me->key)) { | ||
474 | /* | ||
475 | * Another waiter already exists - bump up | ||
476 | * the refcount and return its pi_state: | ||
477 | */ | ||
478 | pi_state = this->pi_state; | ||
479 | atomic_inc(&pi_state->refcount); | ||
480 | me->pi_state = pi_state; | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * We are the first waiter - try to look up the real owner and | ||
488 | * attach the new pi_state to it: | ||
489 | */ | ||
490 | pid = uval & FUTEX_TID_MASK; | ||
491 | p = futex_find_get_task(pid); | ||
492 | if (!p) | ||
493 | return -ESRCH; | ||
494 | |||
495 | pi_state = alloc_pi_state(); | ||
496 | |||
497 | /* | ||
498 | * Initialize the pi_mutex in locked state and make 'p' | ||
499 | * the owner of it: | ||
500 | */ | ||
501 | rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); | ||
502 | |||
503 | /* Store the key for possible exit cleanups: */ | ||
504 | pi_state->key = me->key; | ||
505 | |||
506 | spin_lock_irq(&p->pi_lock); | ||
507 | list_add(&pi_state->list, &p->pi_state_list); | ||
508 | pi_state->owner = p; | ||
509 | spin_unlock_irq(&p->pi_lock); | ||
510 | |||
511 | put_task_struct(p); | ||
512 | |||
513 | me->pi_state = pi_state; | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /* | ||
261 | * The hash bucket lock must be held when this is called. | 519 | * The hash bucket lock must be held when this is called. |
262 | * Afterwards, the futex_q must not be accessed. | 520 | * Afterwards, the futex_q must not be accessed. |
263 | */ | 521 | */ |
@@ -284,16 +542,80 @@ static void wake_futex(struct futex_q *q) | |||
284 | q->lock_ptr = NULL; | 542 | q->lock_ptr = NULL; |
285 | } | 543 | } |
286 | 544 | ||
545 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | ||
546 | { | ||
547 | struct task_struct *new_owner; | ||
548 | struct futex_pi_state *pi_state = this->pi_state; | ||
549 | u32 curval, newval; | ||
550 | |||
551 | if (!pi_state) | ||
552 | return -EINVAL; | ||
553 | |||
554 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | ||
555 | |||
556 | /* | ||
557 | * This happens when we have stolen the lock and the original | ||
558 | * pending owner did not enqueue itself back on the rt_mutex. | ||
559 | * Thats not a tragedy. We know that way, that a lock waiter | ||
560 | * is on the fly. We make the futex_q waiter the pending owner. | ||
561 | */ | ||
562 | if (!new_owner) | ||
563 | new_owner = this->task; | ||
564 | |||
565 | /* | ||
566 | * We pass it to the next owner. (The WAITERS bit is always | ||
567 | * kept enabled while there is PI state around. We must also | ||
568 | * preserve the owner died bit.) | ||
569 | */ | ||
570 | newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; | ||
571 | |||
572 | inc_preempt_count(); | ||
573 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
574 | dec_preempt_count(); | ||
575 | |||
576 | if (curval == -EFAULT) | ||
577 | return -EFAULT; | ||
578 | if (curval != uval) | ||
579 | return -EINVAL; | ||
580 | |||
581 | list_del_init(&pi_state->owner->pi_state_list); | ||
582 | list_add(&pi_state->list, &new_owner->pi_state_list); | ||
583 | pi_state->owner = new_owner; | ||
584 | rt_mutex_unlock(&pi_state->pi_mutex); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int unlock_futex_pi(u32 __user *uaddr, u32 uval) | ||
590 | { | ||
591 | u32 oldval; | ||
592 | |||
593 | /* | ||
594 | * There is no waiter, so we unlock the futex. The owner died | ||
595 | * bit has not to be preserved here. We are the owner: | ||
596 | */ | ||
597 | inc_preempt_count(); | ||
598 | oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); | ||
599 | dec_preempt_count(); | ||
600 | |||
601 | if (oldval == -EFAULT) | ||
602 | return oldval; | ||
603 | if (oldval != uval) | ||
604 | return -EAGAIN; | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
287 | /* | 609 | /* |
288 | * Wake up all waiters hashed on the physical page that is mapped | 610 | * Wake up all waiters hashed on the physical page that is mapped |
289 | * to this virtual address: | 611 | * to this virtual address: |
290 | */ | 612 | */ |
291 | static int futex_wake(unsigned long uaddr, int nr_wake) | 613 | static int futex_wake(u32 __user *uaddr, int nr_wake) |
292 | { | 614 | { |
293 | union futex_key key; | 615 | struct futex_hash_bucket *hb; |
294 | struct futex_hash_bucket *bh; | ||
295 | struct list_head *head; | ||
296 | struct futex_q *this, *next; | 616 | struct futex_q *this, *next; |
617 | struct list_head *head; | ||
618 | union futex_key key; | ||
297 | int ret; | 619 | int ret; |
298 | 620 | ||
299 | down_read(¤t->mm->mmap_sem); | 621 | down_read(¤t->mm->mmap_sem); |
@@ -302,19 +624,21 @@ static int futex_wake(unsigned long uaddr, int nr_wake) | |||
302 | if (unlikely(ret != 0)) | 624 | if (unlikely(ret != 0)) |
303 | goto out; | 625 | goto out; |
304 | 626 | ||
305 | bh = hash_futex(&key); | 627 | hb = hash_futex(&key); |
306 | spin_lock(&bh->lock); | 628 | spin_lock(&hb->lock); |
307 | head = &bh->chain; | 629 | head = &hb->chain; |
308 | 630 | ||
309 | list_for_each_entry_safe(this, next, head, list) { | 631 | list_for_each_entry_safe(this, next, head, list) { |
310 | if (match_futex (&this->key, &key)) { | 632 | if (match_futex (&this->key, &key)) { |
633 | if (this->pi_state) | ||
634 | return -EINVAL; | ||
311 | wake_futex(this); | 635 | wake_futex(this); |
312 | if (++ret >= nr_wake) | 636 | if (++ret >= nr_wake) |
313 | break; | 637 | break; |
314 | } | 638 | } |
315 | } | 639 | } |
316 | 640 | ||
317 | spin_unlock(&bh->lock); | 641 | spin_unlock(&hb->lock); |
318 | out: | 642 | out: |
319 | up_read(¤t->mm->mmap_sem); | 643 | up_read(¤t->mm->mmap_sem); |
320 | return ret; | 644 | return ret; |
@@ -324,10 +648,12 @@ out: | |||
324 | * Wake up all waiters hashed on the physical page that is mapped | 648 | * Wake up all waiters hashed on the physical page that is mapped |
325 | * to this virtual address: | 649 | * to this virtual address: |
326 | */ | 650 | */ |
327 | static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) | 651 | static int |
652 | futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2, | ||
653 | int nr_wake, int nr_wake2, int op) | ||
328 | { | 654 | { |
329 | union futex_key key1, key2; | 655 | union futex_key key1, key2; |
330 | struct futex_hash_bucket *bh1, *bh2; | 656 | struct futex_hash_bucket *hb1, *hb2; |
331 | struct list_head *head; | 657 | struct list_head *head; |
332 | struct futex_q *this, *next; | 658 | struct futex_q *this, *next; |
333 | int ret, op_ret, attempt = 0; | 659 | int ret, op_ret, attempt = 0; |
@@ -342,27 +668,29 @@ retryfull: | |||
342 | if (unlikely(ret != 0)) | 668 | if (unlikely(ret != 0)) |
343 | goto out; | 669 | goto out; |
344 | 670 | ||
345 | bh1 = hash_futex(&key1); | 671 | hb1 = hash_futex(&key1); |
346 | bh2 = hash_futex(&key2); | 672 | hb2 = hash_futex(&key2); |
347 | 673 | ||
348 | retry: | 674 | retry: |
349 | if (bh1 < bh2) | 675 | if (hb1 < hb2) |
350 | spin_lock(&bh1->lock); | 676 | spin_lock(&hb1->lock); |
351 | spin_lock(&bh2->lock); | 677 | spin_lock(&hb2->lock); |
352 | if (bh1 > bh2) | 678 | if (hb1 > hb2) |
353 | spin_lock(&bh1->lock); | 679 | spin_lock(&hb1->lock); |
354 | 680 | ||
355 | op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); | 681 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
356 | if (unlikely(op_ret < 0)) { | 682 | if (unlikely(op_ret < 0)) { |
357 | int dummy; | 683 | u32 dummy; |
358 | 684 | ||
359 | spin_unlock(&bh1->lock); | 685 | spin_unlock(&hb1->lock); |
360 | if (bh1 != bh2) | 686 | if (hb1 != hb2) |
361 | spin_unlock(&bh2->lock); | 687 | spin_unlock(&hb2->lock); |
362 | 688 | ||
363 | #ifndef CONFIG_MMU | 689 | #ifndef CONFIG_MMU |
364 | /* we don't get EFAULT from MMU faults if we don't have an MMU, | 690 | /* |
365 | * but we might get them from range checking */ | 691 | * we don't get EFAULT from MMU faults if we don't have an MMU, |
692 | * but we might get them from range checking | ||
693 | */ | ||
366 | ret = op_ret; | 694 | ret = op_ret; |
367 | goto out; | 695 | goto out; |
368 | #endif | 696 | #endif |
@@ -372,47 +700,34 @@ retry: | |||
372 | goto out; | 700 | goto out; |
373 | } | 701 | } |
374 | 702 | ||
375 | /* futex_atomic_op_inuser needs to both read and write | 703 | /* |
704 | * futex_atomic_op_inuser needs to both read and write | ||
376 | * *(int __user *)uaddr2, but we can't modify it | 705 | * *(int __user *)uaddr2, but we can't modify it |
377 | * non-atomically. Therefore, if get_user below is not | 706 | * non-atomically. Therefore, if get_user below is not |
378 | * enough, we need to handle the fault ourselves, while | 707 | * enough, we need to handle the fault ourselves, while |
379 | * still holding the mmap_sem. */ | 708 | * still holding the mmap_sem. |
709 | */ | ||
380 | if (attempt++) { | 710 | if (attempt++) { |
381 | struct vm_area_struct * vma; | 711 | if (futex_handle_fault((unsigned long)uaddr2, |
382 | struct mm_struct *mm = current->mm; | 712 | attempt)) |
383 | |||
384 | ret = -EFAULT; | ||
385 | if (attempt >= 2 || | ||
386 | !(vma = find_vma(mm, uaddr2)) || | ||
387 | vma->vm_start > uaddr2 || | ||
388 | !(vma->vm_flags & VM_WRITE)) | ||
389 | goto out; | ||
390 | |||
391 | switch (handle_mm_fault(mm, vma, uaddr2, 1)) { | ||
392 | case VM_FAULT_MINOR: | ||
393 | current->min_flt++; | ||
394 | break; | ||
395 | case VM_FAULT_MAJOR: | ||
396 | current->maj_flt++; | ||
397 | break; | ||
398 | default: | ||
399 | goto out; | 713 | goto out; |
400 | } | ||
401 | goto retry; | 714 | goto retry; |
402 | } | 715 | } |
403 | 716 | ||
404 | /* If we would have faulted, release mmap_sem, | 717 | /* |
405 | * fault it in and start all over again. */ | 718 | * If we would have faulted, release mmap_sem, |
719 | * fault it in and start all over again. | ||
720 | */ | ||
406 | up_read(¤t->mm->mmap_sem); | 721 | up_read(¤t->mm->mmap_sem); |
407 | 722 | ||
408 | ret = get_user(dummy, (int __user *)uaddr2); | 723 | ret = get_user(dummy, uaddr2); |
409 | if (ret) | 724 | if (ret) |
410 | return ret; | 725 | return ret; |
411 | 726 | ||
412 | goto retryfull; | 727 | goto retryfull; |
413 | } | 728 | } |
414 | 729 | ||
415 | head = &bh1->chain; | 730 | head = &hb1->chain; |
416 | 731 | ||
417 | list_for_each_entry_safe(this, next, head, list) { | 732 | list_for_each_entry_safe(this, next, head, list) { |
418 | if (match_futex (&this->key, &key1)) { | 733 | if (match_futex (&this->key, &key1)) { |
@@ -423,7 +738,7 @@ retry: | |||
423 | } | 738 | } |
424 | 739 | ||
425 | if (op_ret > 0) { | 740 | if (op_ret > 0) { |
426 | head = &bh2->chain; | 741 | head = &hb2->chain; |
427 | 742 | ||
428 | op_ret = 0; | 743 | op_ret = 0; |
429 | list_for_each_entry_safe(this, next, head, list) { | 744 | list_for_each_entry_safe(this, next, head, list) { |
@@ -436,9 +751,9 @@ retry: | |||
436 | ret += op_ret; | 751 | ret += op_ret; |
437 | } | 752 | } |
438 | 753 | ||
439 | spin_unlock(&bh1->lock); | 754 | spin_unlock(&hb1->lock); |
440 | if (bh1 != bh2) | 755 | if (hb1 != hb2) |
441 | spin_unlock(&bh2->lock); | 756 | spin_unlock(&hb2->lock); |
442 | out: | 757 | out: |
443 | up_read(¤t->mm->mmap_sem); | 758 | up_read(¤t->mm->mmap_sem); |
444 | return ret; | 759 | return ret; |
@@ -448,11 +763,11 @@ out: | |||
448 | * Requeue all waiters hashed on one physical page to another | 763 | * Requeue all waiters hashed on one physical page to another |
449 | * physical page. | 764 | * physical page. |
450 | */ | 765 | */ |
451 | static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, | 766 | static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, |
452 | int nr_wake, int nr_requeue, int *valp) | 767 | int nr_wake, int nr_requeue, u32 *cmpval) |
453 | { | 768 | { |
454 | union futex_key key1, key2; | 769 | union futex_key key1, key2; |
455 | struct futex_hash_bucket *bh1, *bh2; | 770 | struct futex_hash_bucket *hb1, *hb2; |
456 | struct list_head *head1; | 771 | struct list_head *head1; |
457 | struct futex_q *this, *next; | 772 | struct futex_q *this, *next; |
458 | int ret, drop_count = 0; | 773 | int ret, drop_count = 0; |
@@ -467,68 +782,72 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, | |||
467 | if (unlikely(ret != 0)) | 782 | if (unlikely(ret != 0)) |
468 | goto out; | 783 | goto out; |
469 | 784 | ||
470 | bh1 = hash_futex(&key1); | 785 | hb1 = hash_futex(&key1); |
471 | bh2 = hash_futex(&key2); | 786 | hb2 = hash_futex(&key2); |
472 | 787 | ||
473 | if (bh1 < bh2) | 788 | if (hb1 < hb2) |
474 | spin_lock(&bh1->lock); | 789 | spin_lock(&hb1->lock); |
475 | spin_lock(&bh2->lock); | 790 | spin_lock(&hb2->lock); |
476 | if (bh1 > bh2) | 791 | if (hb1 > hb2) |
477 | spin_lock(&bh1->lock); | 792 | spin_lock(&hb1->lock); |
478 | 793 | ||
479 | if (likely(valp != NULL)) { | 794 | if (likely(cmpval != NULL)) { |
480 | int curval; | 795 | u32 curval; |
481 | 796 | ||
482 | ret = get_futex_value_locked(&curval, (int __user *)uaddr1); | 797 | ret = get_futex_value_locked(&curval, uaddr1); |
483 | 798 | ||
484 | if (unlikely(ret)) { | 799 | if (unlikely(ret)) { |
485 | spin_unlock(&bh1->lock); | 800 | spin_unlock(&hb1->lock); |
486 | if (bh1 != bh2) | 801 | if (hb1 != hb2) |
487 | spin_unlock(&bh2->lock); | 802 | spin_unlock(&hb2->lock); |
488 | 803 | ||
489 | /* If we would have faulted, release mmap_sem, fault | 804 | /* |
805 | * If we would have faulted, release mmap_sem, fault | ||
490 | * it in and start all over again. | 806 | * it in and start all over again. |
491 | */ | 807 | */ |
492 | up_read(¤t->mm->mmap_sem); | 808 | up_read(¤t->mm->mmap_sem); |
493 | 809 | ||
494 | ret = get_user(curval, (int __user *)uaddr1); | 810 | ret = get_user(curval, uaddr1); |
495 | 811 | ||
496 | if (!ret) | 812 | if (!ret) |
497 | goto retry; | 813 | goto retry; |
498 | 814 | ||
499 | return ret; | 815 | return ret; |
500 | } | 816 | } |
501 | if (curval != *valp) { | 817 | if (curval != *cmpval) { |
502 | ret = -EAGAIN; | 818 | ret = -EAGAIN; |
503 | goto out_unlock; | 819 | goto out_unlock; |
504 | } | 820 | } |
505 | } | 821 | } |
506 | 822 | ||
507 | head1 = &bh1->chain; | 823 | head1 = &hb1->chain; |
508 | list_for_each_entry_safe(this, next, head1, list) { | 824 | list_for_each_entry_safe(this, next, head1, list) { |
509 | if (!match_futex (&this->key, &key1)) | 825 | if (!match_futex (&this->key, &key1)) |
510 | continue; | 826 | continue; |
511 | if (++ret <= nr_wake) { | 827 | if (++ret <= nr_wake) { |
512 | wake_futex(this); | 828 | wake_futex(this); |
513 | } else { | 829 | } else { |
514 | list_move_tail(&this->list, &bh2->chain); | 830 | /* |
515 | this->lock_ptr = &bh2->lock; | 831 | * If key1 and key2 hash to the same bucket, no need to |
832 | * requeue. | ||
833 | */ | ||
834 | if (likely(head1 != &hb2->chain)) { | ||
835 | list_move_tail(&this->list, &hb2->chain); | ||
836 | this->lock_ptr = &hb2->lock; | ||
837 | } | ||
516 | this->key = key2; | 838 | this->key = key2; |
517 | get_key_refs(&key2); | 839 | get_key_refs(&key2); |
518 | drop_count++; | 840 | drop_count++; |
519 | 841 | ||
520 | if (ret - nr_wake >= nr_requeue) | 842 | if (ret - nr_wake >= nr_requeue) |
521 | break; | 843 | break; |
522 | /* Make sure to stop if key1 == key2 */ | ||
523 | if (head1 == &bh2->chain && head1 != &next->list) | ||
524 | head1 = &this->list; | ||
525 | } | 844 | } |
526 | } | 845 | } |
527 | 846 | ||
528 | out_unlock: | 847 | out_unlock: |
529 | spin_unlock(&bh1->lock); | 848 | spin_unlock(&hb1->lock); |
530 | if (bh1 != bh2) | 849 | if (hb1 != hb2) |
531 | spin_unlock(&bh2->lock); | 850 | spin_unlock(&hb2->lock); |
532 | 851 | ||
533 | /* drop_key_refs() must be called outside the spinlocks. */ | 852 | /* drop_key_refs() must be called outside the spinlocks. */ |
534 | while (--drop_count >= 0) | 853 | while (--drop_count >= 0) |
@@ -543,7 +862,7 @@ out: | |||
543 | static inline struct futex_hash_bucket * | 862 | static inline struct futex_hash_bucket * |
544 | queue_lock(struct futex_q *q, int fd, struct file *filp) | 863 | queue_lock(struct futex_q *q, int fd, struct file *filp) |
545 | { | 864 | { |
546 | struct futex_hash_bucket *bh; | 865 | struct futex_hash_bucket *hb; |
547 | 866 | ||
548 | q->fd = fd; | 867 | q->fd = fd; |
549 | q->filp = filp; | 868 | q->filp = filp; |
@@ -551,23 +870,24 @@ queue_lock(struct futex_q *q, int fd, struct file *filp) | |||
551 | init_waitqueue_head(&q->waiters); | 870 | init_waitqueue_head(&q->waiters); |
552 | 871 | ||
553 | get_key_refs(&q->key); | 872 | get_key_refs(&q->key); |
554 | bh = hash_futex(&q->key); | 873 | hb = hash_futex(&q->key); |
555 | q->lock_ptr = &bh->lock; | 874 | q->lock_ptr = &hb->lock; |
556 | 875 | ||
557 | spin_lock(&bh->lock); | 876 | spin_lock(&hb->lock); |
558 | return bh; | 877 | return hb; |
559 | } | 878 | } |
560 | 879 | ||
561 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) | 880 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
562 | { | 881 | { |
563 | list_add_tail(&q->list, &bh->chain); | 882 | list_add_tail(&q->list, &hb->chain); |
564 | spin_unlock(&bh->lock); | 883 | q->task = current; |
884 | spin_unlock(&hb->lock); | ||
565 | } | 885 | } |
566 | 886 | ||
567 | static inline void | 887 | static inline void |
568 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) | 888 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) |
569 | { | 889 | { |
570 | spin_unlock(&bh->lock); | 890 | spin_unlock(&hb->lock); |
571 | drop_key_refs(&q->key); | 891 | drop_key_refs(&q->key); |
572 | } | 892 | } |
573 | 893 | ||
@@ -579,16 +899,17 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) | |||
579 | /* The key must be already stored in q->key. */ | 899 | /* The key must be already stored in q->key. */ |
580 | static void queue_me(struct futex_q *q, int fd, struct file *filp) | 900 | static void queue_me(struct futex_q *q, int fd, struct file *filp) |
581 | { | 901 | { |
582 | struct futex_hash_bucket *bh; | 902 | struct futex_hash_bucket *hb; |
583 | bh = queue_lock(q, fd, filp); | 903 | |
584 | __queue_me(q, bh); | 904 | hb = queue_lock(q, fd, filp); |
905 | __queue_me(q, hb); | ||
585 | } | 906 | } |
586 | 907 | ||
587 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ | 908 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ |
588 | static int unqueue_me(struct futex_q *q) | 909 | static int unqueue_me(struct futex_q *q) |
589 | { | 910 | { |
590 | int ret = 0; | ||
591 | spinlock_t *lock_ptr; | 911 | spinlock_t *lock_ptr; |
912 | int ret = 0; | ||
592 | 913 | ||
593 | /* In the common case we don't take the spinlock, which is nice. */ | 914 | /* In the common case we don't take the spinlock, which is nice. */ |
594 | retry: | 915 | retry: |
@@ -614,6 +935,9 @@ static int unqueue_me(struct futex_q *q) | |||
614 | } | 935 | } |
615 | WARN_ON(list_empty(&q->list)); | 936 | WARN_ON(list_empty(&q->list)); |
616 | list_del(&q->list); | 937 | list_del(&q->list); |
938 | |||
939 | BUG_ON(q->pi_state); | ||
940 | |||
617 | spin_unlock(lock_ptr); | 941 | spin_unlock(lock_ptr); |
618 | ret = 1; | 942 | ret = 1; |
619 | } | 943 | } |
@@ -622,21 +946,42 @@ static int unqueue_me(struct futex_q *q) | |||
622 | return ret; | 946 | return ret; |
623 | } | 947 | } |
624 | 948 | ||
625 | static int futex_wait(unsigned long uaddr, int val, unsigned long time) | 949 | /* |
950 | * PI futexes can not be requeued and must remove themself from the | ||
951 | * hash bucket. The hash bucket lock is held on entry and dropped here. | ||
952 | */ | ||
953 | static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) | ||
626 | { | 954 | { |
627 | DECLARE_WAITQUEUE(wait, current); | 955 | WARN_ON(list_empty(&q->list)); |
628 | int ret, curval; | 956 | list_del(&q->list); |
957 | |||
958 | BUG_ON(!q->pi_state); | ||
959 | free_pi_state(q->pi_state); | ||
960 | q->pi_state = NULL; | ||
961 | |||
962 | spin_unlock(&hb->lock); | ||
963 | |||
964 | drop_key_refs(&q->key); | ||
965 | } | ||
966 | |||
967 | static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time) | ||
968 | { | ||
969 | struct task_struct *curr = current; | ||
970 | DECLARE_WAITQUEUE(wait, curr); | ||
971 | struct futex_hash_bucket *hb; | ||
629 | struct futex_q q; | 972 | struct futex_q q; |
630 | struct futex_hash_bucket *bh; | 973 | u32 uval; |
974 | int ret; | ||
631 | 975 | ||
976 | q.pi_state = NULL; | ||
632 | retry: | 977 | retry: |
633 | down_read(¤t->mm->mmap_sem); | 978 | down_read(&curr->mm->mmap_sem); |
634 | 979 | ||
635 | ret = get_futex_key(uaddr, &q.key); | 980 | ret = get_futex_key(uaddr, &q.key); |
636 | if (unlikely(ret != 0)) | 981 | if (unlikely(ret != 0)) |
637 | goto out_release_sem; | 982 | goto out_release_sem; |
638 | 983 | ||
639 | bh = queue_lock(&q, -1, NULL); | 984 | hb = queue_lock(&q, -1, NULL); |
640 | 985 | ||
641 | /* | 986 | /* |
642 | * Access the page AFTER the futex is queued. | 987 | * Access the page AFTER the futex is queued. |
@@ -658,37 +1003,35 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) | |||
658 | * We hold the mmap semaphore, so the mapping cannot have changed | 1003 | * We hold the mmap semaphore, so the mapping cannot have changed |
659 | * since we looked it up in get_futex_key. | 1004 | * since we looked it up in get_futex_key. |
660 | */ | 1005 | */ |
661 | 1006 | ret = get_futex_value_locked(&uval, uaddr); | |
662 | ret = get_futex_value_locked(&curval, (int __user *)uaddr); | ||
663 | 1007 | ||
664 | if (unlikely(ret)) { | 1008 | if (unlikely(ret)) { |
665 | queue_unlock(&q, bh); | 1009 | queue_unlock(&q, hb); |
666 | 1010 | ||
667 | /* If we would have faulted, release mmap_sem, fault it in and | 1011 | /* |
1012 | * If we would have faulted, release mmap_sem, fault it in and | ||
668 | * start all over again. | 1013 | * start all over again. |
669 | */ | 1014 | */ |
670 | up_read(¤t->mm->mmap_sem); | 1015 | up_read(&curr->mm->mmap_sem); |
671 | 1016 | ||
672 | ret = get_user(curval, (int __user *)uaddr); | 1017 | ret = get_user(uval, uaddr); |
673 | 1018 | ||
674 | if (!ret) | 1019 | if (!ret) |
675 | goto retry; | 1020 | goto retry; |
676 | return ret; | 1021 | return ret; |
677 | } | 1022 | } |
678 | if (curval != val) { | 1023 | ret = -EWOULDBLOCK; |
679 | ret = -EWOULDBLOCK; | 1024 | if (uval != val) |
680 | queue_unlock(&q, bh); | 1025 | goto out_unlock_release_sem; |
681 | goto out_release_sem; | ||
682 | } | ||
683 | 1026 | ||
684 | /* Only actually queue if *uaddr contained val. */ | 1027 | /* Only actually queue if *uaddr contained val. */ |
685 | __queue_me(&q, bh); | 1028 | __queue_me(&q, hb); |
686 | 1029 | ||
687 | /* | 1030 | /* |
688 | * Now the futex is queued and we have checked the data, we | 1031 | * Now the futex is queued and we have checked the data, we |
689 | * don't want to hold mmap_sem while we sleep. | 1032 | * don't want to hold mmap_sem while we sleep. |
690 | */ | 1033 | */ |
691 | up_read(¤t->mm->mmap_sem); | 1034 | up_read(&curr->mm->mmap_sem); |
692 | 1035 | ||
693 | /* | 1036 | /* |
694 | * There might have been scheduling since the queue_me(), as we | 1037 | * There might have been scheduling since the queue_me(), as we |
@@ -720,12 +1063,421 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time) | |||
720 | return 0; | 1063 | return 0; |
721 | if (time == 0) | 1064 | if (time == 0) |
722 | return -ETIMEDOUT; | 1065 | return -ETIMEDOUT; |
723 | /* We expect signal_pending(current), but another thread may | 1066 | /* |
724 | * have handled it for us already. */ | 1067 | * We expect signal_pending(current), but another thread may |
1068 | * have handled it for us already. | ||
1069 | */ | ||
725 | return -EINTR; | 1070 | return -EINTR; |
726 | 1071 | ||
1072 | out_unlock_release_sem: | ||
1073 | queue_unlock(&q, hb); | ||
1074 | |||
727 | out_release_sem: | 1075 | out_release_sem: |
1076 | up_read(&curr->mm->mmap_sem); | ||
1077 | return ret; | ||
1078 | } | ||
1079 | |||
1080 | /* | ||
1081 | * Userspace tried a 0 -> TID atomic transition of the futex value | ||
1082 | * and failed. The kernel side here does the whole locking operation: | ||
1083 | * if there are waiters then it will block, it does PI, etc. (Due to | ||
1084 | * races the kernel might see a 0 value of the futex too.) | ||
1085 | */ | ||
1086 | static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | ||
1087 | struct hrtimer_sleeper *to) | ||
1088 | { | ||
1089 | struct task_struct *curr = current; | ||
1090 | struct futex_hash_bucket *hb; | ||
1091 | u32 uval, newval, curval; | ||
1092 | struct futex_q q; | ||
1093 | int ret, attempt = 0; | ||
1094 | |||
1095 | if (refill_pi_state_cache()) | ||
1096 | return -ENOMEM; | ||
1097 | |||
1098 | q.pi_state = NULL; | ||
1099 | retry: | ||
1100 | down_read(&curr->mm->mmap_sem); | ||
1101 | |||
1102 | ret = get_futex_key(uaddr, &q.key); | ||
1103 | if (unlikely(ret != 0)) | ||
1104 | goto out_release_sem; | ||
1105 | |||
1106 | hb = queue_lock(&q, -1, NULL); | ||
1107 | |||
1108 | retry_locked: | ||
1109 | /* | ||
1110 | * To avoid races, we attempt to take the lock here again | ||
1111 | * (by doing a 0 -> TID atomic cmpxchg), while holding all | ||
1112 | * the locks. It will most likely not succeed. | ||
1113 | */ | ||
1114 | newval = current->pid; | ||
1115 | |||
1116 | inc_preempt_count(); | ||
1117 | curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); | ||
1118 | dec_preempt_count(); | ||
1119 | |||
1120 | if (unlikely(curval == -EFAULT)) | ||
1121 | goto uaddr_faulted; | ||
1122 | |||
1123 | /* We own the lock already */ | ||
1124 | if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { | ||
1125 | if (!detect && 0) | ||
1126 | force_sig(SIGKILL, current); | ||
1127 | ret = -EDEADLK; | ||
1128 | goto out_unlock_release_sem; | ||
1129 | } | ||
1130 | |||
1131 | /* | ||
1132 | * Surprise - we got the lock. Just return | ||
1133 | * to userspace: | ||
1134 | */ | ||
1135 | if (unlikely(!curval)) | ||
1136 | goto out_unlock_release_sem; | ||
1137 | |||
1138 | uval = curval; | ||
1139 | newval = uval | FUTEX_WAITERS; | ||
1140 | |||
1141 | inc_preempt_count(); | ||
1142 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
1143 | dec_preempt_count(); | ||
1144 | |||
1145 | if (unlikely(curval == -EFAULT)) | ||
1146 | goto uaddr_faulted; | ||
1147 | if (unlikely(curval != uval)) | ||
1148 | goto retry_locked; | ||
1149 | |||
1150 | /* | ||
1151 | * We dont have the lock. Look up the PI state (or create it if | ||
1152 | * we are the first waiter): | ||
1153 | */ | ||
1154 | ret = lookup_pi_state(uval, hb, &q); | ||
1155 | |||
1156 | if (unlikely(ret)) { | ||
1157 | /* | ||
1158 | * There were no waiters and the owner task lookup | ||
1159 | * failed. When the OWNER_DIED bit is set, then we | ||
1160 | * know that this is a robust futex and we actually | ||
1161 | * take the lock. This is safe as we are protected by | ||
1162 | * the hash bucket lock. We also set the waiters bit | ||
1163 | * unconditionally here, to simplify glibc handling of | ||
1164 | * multiple tasks racing to acquire the lock and | ||
1165 | * cleanup the problems which were left by the dead | ||
1166 | * owner. | ||
1167 | */ | ||
1168 | if (curval & FUTEX_OWNER_DIED) { | ||
1169 | uval = newval; | ||
1170 | newval = current->pid | | ||
1171 | FUTEX_OWNER_DIED | FUTEX_WAITERS; | ||
1172 | |||
1173 | inc_preempt_count(); | ||
1174 | curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
1175 | uval, newval); | ||
1176 | dec_preempt_count(); | ||
1177 | |||
1178 | if (unlikely(curval == -EFAULT)) | ||
1179 | goto uaddr_faulted; | ||
1180 | if (unlikely(curval != uval)) | ||
1181 | goto retry_locked; | ||
1182 | ret = 0; | ||
1183 | } | ||
1184 | goto out_unlock_release_sem; | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Only actually queue now that the atomic ops are done: | ||
1189 | */ | ||
1190 | __queue_me(&q, hb); | ||
1191 | |||
1192 | /* | ||
1193 | * Now the futex is queued and we have checked the data, we | ||
1194 | * don't want to hold mmap_sem while we sleep. | ||
1195 | */ | ||
1196 | up_read(&curr->mm->mmap_sem); | ||
1197 | |||
1198 | WARN_ON(!q.pi_state); | ||
1199 | /* | ||
1200 | * Block on the PI mutex: | ||
1201 | */ | ||
1202 | if (!trylock) | ||
1203 | ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); | ||
1204 | else { | ||
1205 | ret = rt_mutex_trylock(&q.pi_state->pi_mutex); | ||
1206 | /* Fixup the trylock return value: */ | ||
1207 | ret = ret ? 0 : -EWOULDBLOCK; | ||
1208 | } | ||
1209 | |||
1210 | down_read(&curr->mm->mmap_sem); | ||
1211 | hb = queue_lock(&q, -1, NULL); | ||
1212 | |||
1213 | /* | ||
1214 | * Got the lock. We might not be the anticipated owner if we | ||
1215 | * did a lock-steal - fix up the PI-state in that case. | ||
1216 | */ | ||
1217 | if (!ret && q.pi_state->owner != curr) { | ||
1218 | u32 newtid = current->pid | FUTEX_WAITERS; | ||
1219 | |||
1220 | /* Owner died? */ | ||
1221 | if (q.pi_state->owner != NULL) { | ||
1222 | spin_lock_irq(&q.pi_state->owner->pi_lock); | ||
1223 | list_del_init(&q.pi_state->list); | ||
1224 | spin_unlock_irq(&q.pi_state->owner->pi_lock); | ||
1225 | } else | ||
1226 | newtid |= FUTEX_OWNER_DIED; | ||
1227 | |||
1228 | q.pi_state->owner = current; | ||
1229 | |||
1230 | spin_lock_irq(¤t->pi_lock); | ||
1231 | list_add(&q.pi_state->list, ¤t->pi_state_list); | ||
1232 | spin_unlock_irq(¤t->pi_lock); | ||
1233 | |||
1234 | /* Unqueue and drop the lock */ | ||
1235 | unqueue_me_pi(&q, hb); | ||
1236 | up_read(&curr->mm->mmap_sem); | ||
1237 | /* | ||
1238 | * We own it, so we have to replace the pending owner | ||
1239 | * TID. This must be atomic as we have preserve the | ||
1240 | * owner died bit here. | ||
1241 | */ | ||
1242 | ret = get_user(uval, uaddr); | ||
1243 | while (!ret) { | ||
1244 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | ||
1245 | curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
1246 | uval, newval); | ||
1247 | if (curval == -EFAULT) | ||
1248 | ret = -EFAULT; | ||
1249 | if (curval == uval) | ||
1250 | break; | ||
1251 | uval = curval; | ||
1252 | } | ||
1253 | } else { | ||
1254 | /* | ||
1255 | * Catch the rare case, where the lock was released | ||
1256 | * when we were on the way back before we locked | ||
1257 | * the hash bucket. | ||
1258 | */ | ||
1259 | if (ret && q.pi_state->owner == curr) { | ||
1260 | if (rt_mutex_trylock(&q.pi_state->pi_mutex)) | ||
1261 | ret = 0; | ||
1262 | } | ||
1263 | /* Unqueue and drop the lock */ | ||
1264 | unqueue_me_pi(&q, hb); | ||
1265 | up_read(&curr->mm->mmap_sem); | ||
1266 | } | ||
1267 | |||
1268 | if (!detect && ret == -EDEADLK && 0) | ||
1269 | force_sig(SIGKILL, current); | ||
1270 | |||
1271 | return ret; | ||
1272 | |||
1273 | out_unlock_release_sem: | ||
1274 | queue_unlock(&q, hb); | ||
1275 | |||
1276 | out_release_sem: | ||
1277 | up_read(&curr->mm->mmap_sem); | ||
1278 | return ret; | ||
1279 | |||
1280 | uaddr_faulted: | ||
1281 | /* | ||
1282 | * We have to r/w *(int __user *)uaddr, but we can't modify it | ||
1283 | * non-atomically. Therefore, if get_user below is not | ||
1284 | * enough, we need to handle the fault ourselves, while | ||
1285 | * still holding the mmap_sem. | ||
1286 | */ | ||
1287 | if (attempt++) { | ||
1288 | if (futex_handle_fault((unsigned long)uaddr, attempt)) | ||
1289 | goto out_unlock_release_sem; | ||
1290 | |||
1291 | goto retry_locked; | ||
1292 | } | ||
1293 | |||
1294 | queue_unlock(&q, hb); | ||
1295 | up_read(&curr->mm->mmap_sem); | ||
1296 | |||
1297 | ret = get_user(uval, uaddr); | ||
1298 | if (!ret && (uval != -EFAULT)) | ||
1299 | goto retry; | ||
1300 | |||
1301 | return ret; | ||
1302 | } | ||
1303 | |||
1304 | /* | ||
1305 | * Restart handler | ||
1306 | */ | ||
1307 | static long futex_lock_pi_restart(struct restart_block *restart) | ||
1308 | { | ||
1309 | struct hrtimer_sleeper timeout, *to = NULL; | ||
1310 | int ret; | ||
1311 | |||
1312 | restart->fn = do_no_restart_syscall; | ||
1313 | |||
1314 | if (restart->arg2 || restart->arg3) { | ||
1315 | to = &timeout; | ||
1316 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); | ||
1317 | hrtimer_init_sleeper(to, current); | ||
1318 | to->timer.expires.tv64 = ((u64)restart->arg1 << 32) | | ||
1319 | (u64) restart->arg0; | ||
1320 | } | ||
1321 | |||
1322 | pr_debug("lock_pi restart: %p, %d (%d)\n", | ||
1323 | (u32 __user *)restart->arg0, current->pid); | ||
1324 | |||
1325 | ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1, | ||
1326 | 0, to); | ||
1327 | |||
1328 | if (ret != -EINTR) | ||
1329 | return ret; | ||
1330 | |||
1331 | restart->fn = futex_lock_pi_restart; | ||
1332 | |||
1333 | /* The other values are filled in */ | ||
1334 | return -ERESTART_RESTARTBLOCK; | ||
1335 | } | ||
1336 | |||
1337 | /* | ||
1338 | * Called from the syscall entry below. | ||
1339 | */ | ||
1340 | static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
1341 | long nsec, int trylock) | ||
1342 | { | ||
1343 | struct hrtimer_sleeper timeout, *to = NULL; | ||
1344 | struct restart_block *restart; | ||
1345 | int ret; | ||
1346 | |||
1347 | if (sec != MAX_SCHEDULE_TIMEOUT) { | ||
1348 | to = &timeout; | ||
1349 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); | ||
1350 | hrtimer_init_sleeper(to, current); | ||
1351 | to->timer.expires = ktime_set(sec, nsec); | ||
1352 | } | ||
1353 | |||
1354 | ret = do_futex_lock_pi(uaddr, detect, trylock, to); | ||
1355 | |||
1356 | if (ret != -EINTR) | ||
1357 | return ret; | ||
1358 | |||
1359 | pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid); | ||
1360 | |||
1361 | restart = ¤t_thread_info()->restart_block; | ||
1362 | restart->fn = futex_lock_pi_restart; | ||
1363 | restart->arg0 = (unsigned long) uaddr; | ||
1364 | restart->arg1 = detect; | ||
1365 | if (to) { | ||
1366 | restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF; | ||
1367 | restart->arg3 = to->timer.expires.tv64 >> 32; | ||
1368 | } else | ||
1369 | restart->arg2 = restart->arg3 = 0; | ||
1370 | |||
1371 | return -ERESTART_RESTARTBLOCK; | ||
1372 | } | ||
1373 | |||
1374 | /* | ||
1375 | * Userspace attempted a TID -> 0 atomic transition, and failed. | ||
1376 | * This is the in-kernel slowpath: we look up the PI state (if any), | ||
1377 | * and do the rt-mutex unlock. | ||
1378 | */ | ||
1379 | static int futex_unlock_pi(u32 __user *uaddr) | ||
1380 | { | ||
1381 | struct futex_hash_bucket *hb; | ||
1382 | struct futex_q *this, *next; | ||
1383 | u32 uval; | ||
1384 | struct list_head *head; | ||
1385 | union futex_key key; | ||
1386 | int ret, attempt = 0; | ||
1387 | |||
1388 | retry: | ||
1389 | if (get_user(uval, uaddr)) | ||
1390 | return -EFAULT; | ||
1391 | /* | ||
1392 | * We release only a lock we actually own: | ||
1393 | */ | ||
1394 | if ((uval & FUTEX_TID_MASK) != current->pid) | ||
1395 | return -EPERM; | ||
1396 | /* | ||
1397 | * First take all the futex related locks: | ||
1398 | */ | ||
1399 | down_read(¤t->mm->mmap_sem); | ||
1400 | |||
1401 | ret = get_futex_key(uaddr, &key); | ||
1402 | if (unlikely(ret != 0)) | ||
1403 | goto out; | ||
1404 | |||
1405 | hb = hash_futex(&key); | ||
1406 | spin_lock(&hb->lock); | ||
1407 | |||
1408 | retry_locked: | ||
1409 | /* | ||
1410 | * To avoid races, try to do the TID -> 0 atomic transition | ||
1411 | * again. If it succeeds then we can return without waking | ||
1412 | * anyone else up: | ||
1413 | */ | ||
1414 | inc_preempt_count(); | ||
1415 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); | ||
1416 | dec_preempt_count(); | ||
1417 | |||
1418 | if (unlikely(uval == -EFAULT)) | ||
1419 | goto pi_faulted; | ||
1420 | /* | ||
1421 | * Rare case: we managed to release the lock atomically, | ||
1422 | * no need to wake anyone else up: | ||
1423 | */ | ||
1424 | if (unlikely(uval == current->pid)) | ||
1425 | goto out_unlock; | ||
1426 | |||
1427 | /* | ||
1428 | * Ok, other tasks may need to be woken up - check waiters | ||
1429 | * and do the wakeup if necessary: | ||
1430 | */ | ||
1431 | head = &hb->chain; | ||
1432 | |||
1433 | list_for_each_entry_safe(this, next, head, list) { | ||
1434 | if (!match_futex (&this->key, &key)) | ||
1435 | continue; | ||
1436 | ret = wake_futex_pi(uaddr, uval, this); | ||
1437 | /* | ||
1438 | * The atomic access to the futex value | ||
1439 | * generated a pagefault, so retry the | ||
1440 | * user-access and the wakeup: | ||
1441 | */ | ||
1442 | if (ret == -EFAULT) | ||
1443 | goto pi_faulted; | ||
1444 | goto out_unlock; | ||
1445 | } | ||
1446 | /* | ||
1447 | * No waiters - kernel unlocks the futex: | ||
1448 | */ | ||
1449 | ret = unlock_futex_pi(uaddr, uval); | ||
1450 | if (ret == -EFAULT) | ||
1451 | goto pi_faulted; | ||
1452 | |||
1453 | out_unlock: | ||
1454 | spin_unlock(&hb->lock); | ||
1455 | out: | ||
728 | up_read(¤t->mm->mmap_sem); | 1456 | up_read(¤t->mm->mmap_sem); |
1457 | |||
1458 | return ret; | ||
1459 | |||
1460 | pi_faulted: | ||
1461 | /* | ||
1462 | * We have to r/w *(int __user *)uaddr, but we can't modify it | ||
1463 | * non-atomically. Therefore, if get_user below is not | ||
1464 | * enough, we need to handle the fault ourselves, while | ||
1465 | * still holding the mmap_sem. | ||
1466 | */ | ||
1467 | if (attempt++) { | ||
1468 | if (futex_handle_fault((unsigned long)uaddr, attempt)) | ||
1469 | goto out_unlock; | ||
1470 | |||
1471 | goto retry_locked; | ||
1472 | } | ||
1473 | |||
1474 | spin_unlock(&hb->lock); | ||
1475 | up_read(¤t->mm->mmap_sem); | ||
1476 | |||
1477 | ret = get_user(uval, uaddr); | ||
1478 | if (!ret && (uval != -EFAULT)) | ||
1479 | goto retry; | ||
1480 | |||
729 | return ret; | 1481 | return ret; |
730 | } | 1482 | } |
731 | 1483 | ||
@@ -735,6 +1487,7 @@ static int futex_close(struct inode *inode, struct file *filp) | |||
735 | 1487 | ||
736 | unqueue_me(q); | 1488 | unqueue_me(q); |
737 | kfree(q); | 1489 | kfree(q); |
1490 | |||
738 | return 0; | 1491 | return 0; |
739 | } | 1492 | } |
740 | 1493 | ||
@@ -766,7 +1519,7 @@ static struct file_operations futex_fops = { | |||
766 | * Signal allows caller to avoid the race which would occur if they | 1519 | * Signal allows caller to avoid the race which would occur if they |
767 | * set the sigio stuff up afterwards. | 1520 | * set the sigio stuff up afterwards. |
768 | */ | 1521 | */ |
769 | static int futex_fd(unsigned long uaddr, int signal) | 1522 | static int futex_fd(u32 __user *uaddr, int signal) |
770 | { | 1523 | { |
771 | struct futex_q *q; | 1524 | struct futex_q *q; |
772 | struct file *filp; | 1525 | struct file *filp; |
@@ -803,6 +1556,7 @@ static int futex_fd(unsigned long uaddr, int signal) | |||
803 | err = -ENOMEM; | 1556 | err = -ENOMEM; |
804 | goto error; | 1557 | goto error; |
805 | } | 1558 | } |
1559 | q->pi_state = NULL; | ||
806 | 1560 | ||
807 | down_read(¤t->mm->mmap_sem); | 1561 | down_read(¤t->mm->mmap_sem); |
808 | err = get_futex_key(uaddr, &q->key); | 1562 | err = get_futex_key(uaddr, &q->key); |
@@ -840,7 +1594,7 @@ error: | |||
840 | * Implementation: user-space maintains a per-thread list of locks it | 1594 | * Implementation: user-space maintains a per-thread list of locks it |
841 | * is holding. Upon do_exit(), the kernel carefully walks this list, | 1595 | * is holding. Upon do_exit(), the kernel carefully walks this list, |
842 | * and marks all locks that are owned by this thread with the | 1596 | * and marks all locks that are owned by this thread with the |
843 | * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is | 1597 | * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
844 | * always manipulated with the lock held, so the list is private and | 1598 | * always manipulated with the lock held, so the list is private and |
845 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' | 1599 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' |
846 | * field, to allow the kernel to clean up if the thread dies after | 1600 | * field, to allow the kernel to clean up if the thread dies after |
@@ -915,7 +1669,7 @@ err_unlock: | |||
915 | */ | 1669 | */ |
916 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) | 1670 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) |
917 | { | 1671 | { |
918 | u32 uval; | 1672 | u32 uval, nval; |
919 | 1673 | ||
920 | retry: | 1674 | retry: |
921 | if (get_user(uval, uaddr)) | 1675 | if (get_user(uval, uaddr)) |
@@ -932,12 +1686,16 @@ retry: | |||
932 | * thread-death.) The rest of the cleanup is done in | 1686 | * thread-death.) The rest of the cleanup is done in |
933 | * userspace. | 1687 | * userspace. |
934 | */ | 1688 | */ |
935 | if (futex_atomic_cmpxchg_inatomic(uaddr, uval, | 1689 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, |
936 | uval | FUTEX_OWNER_DIED) != uval) | 1690 | uval | FUTEX_OWNER_DIED); |
1691 | if (nval == -EFAULT) | ||
1692 | return -1; | ||
1693 | |||
1694 | if (nval != uval) | ||
937 | goto retry; | 1695 | goto retry; |
938 | 1696 | ||
939 | if (uval & FUTEX_WAITERS) | 1697 | if (uval & FUTEX_WAITERS) |
940 | futex_wake((unsigned long)uaddr, 1); | 1698 | futex_wake(uaddr, 1); |
941 | } | 1699 | } |
942 | return 0; | 1700 | return 0; |
943 | } | 1701 | } |
@@ -978,7 +1736,7 @@ void exit_robust_list(struct task_struct *curr) | |||
978 | while (entry != &head->list) { | 1736 | while (entry != &head->list) { |
979 | /* | 1737 | /* |
980 | * A pending lock might already be on the list, so | 1738 | * A pending lock might already be on the list, so |
981 | * dont process it twice: | 1739 | * don't process it twice: |
982 | */ | 1740 | */ |
983 | if (entry != pending) | 1741 | if (entry != pending) |
984 | if (handle_futex_death((void *)entry + futex_offset, | 1742 | if (handle_futex_death((void *)entry + futex_offset, |
@@ -999,8 +1757,8 @@ void exit_robust_list(struct task_struct *curr) | |||
999 | } | 1757 | } |
1000 | } | 1758 | } |
1001 | 1759 | ||
1002 | long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, | 1760 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, |
1003 | unsigned long uaddr2, int val2, int val3) | 1761 | u32 __user *uaddr2, u32 val2, u32 val3) |
1004 | { | 1762 | { |
1005 | int ret; | 1763 | int ret; |
1006 | 1764 | ||
@@ -1024,6 +1782,15 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, | |||
1024 | case FUTEX_WAKE_OP: | 1782 | case FUTEX_WAKE_OP: |
1025 | ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); | 1783 | ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); |
1026 | break; | 1784 | break; |
1785 | case FUTEX_LOCK_PI: | ||
1786 | ret = futex_lock_pi(uaddr, val, timeout, val2, 0); | ||
1787 | break; | ||
1788 | case FUTEX_UNLOCK_PI: | ||
1789 | ret = futex_unlock_pi(uaddr); | ||
1790 | break; | ||
1791 | case FUTEX_TRYLOCK_PI: | ||
1792 | ret = futex_lock_pi(uaddr, 0, timeout, val2, 1); | ||
1793 | break; | ||
1027 | default: | 1794 | default: |
1028 | ret = -ENOSYS; | 1795 | ret = -ENOSYS; |
1029 | } | 1796 | } |
@@ -1031,29 +1798,33 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, | |||
1031 | } | 1798 | } |
1032 | 1799 | ||
1033 | 1800 | ||
1034 | asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, | 1801 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, |
1035 | struct timespec __user *utime, u32 __user *uaddr2, | 1802 | struct timespec __user *utime, u32 __user *uaddr2, |
1036 | int val3) | 1803 | u32 val3) |
1037 | { | 1804 | { |
1038 | struct timespec t; | 1805 | struct timespec t; |
1039 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | 1806 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; |
1040 | int val2 = 0; | 1807 | u32 val2 = 0; |
1041 | 1808 | ||
1042 | if (utime && (op == FUTEX_WAIT)) { | 1809 | if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { |
1043 | if (copy_from_user(&t, utime, sizeof(t)) != 0) | 1810 | if (copy_from_user(&t, utime, sizeof(t)) != 0) |
1044 | return -EFAULT; | 1811 | return -EFAULT; |
1045 | if (!timespec_valid(&t)) | 1812 | if (!timespec_valid(&t)) |
1046 | return -EINVAL; | 1813 | return -EINVAL; |
1047 | timeout = timespec_to_jiffies(&t) + 1; | 1814 | if (op == FUTEX_WAIT) |
1815 | timeout = timespec_to_jiffies(&t) + 1; | ||
1816 | else { | ||
1817 | timeout = t.tv_sec; | ||
1818 | val2 = t.tv_nsec; | ||
1819 | } | ||
1048 | } | 1820 | } |
1049 | /* | 1821 | /* |
1050 | * requeue parameter in 'utime' if op == FUTEX_REQUEUE. | 1822 | * requeue parameter in 'utime' if op == FUTEX_REQUEUE. |
1051 | */ | 1823 | */ |
1052 | if (op >= FUTEX_REQUEUE) | 1824 | if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) |
1053 | val2 = (int) (unsigned long) utime; | 1825 | val2 = (u32) (unsigned long) utime; |
1054 | 1826 | ||
1055 | return do_futex((unsigned long)uaddr, op, val, timeout, | 1827 | return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); |
1056 | (unsigned long)uaddr2, val2, val3); | ||
1057 | } | 1828 | } |
1058 | 1829 | ||
1059 | static int futexfs_get_sb(struct file_system_type *fs_type, | 1830 | static int futexfs_get_sb(struct file_system_type *fs_type, |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 1ab6a0ea3d14..d1d92b441fb7 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -129,16 +129,20 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | |||
129 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | 129 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; |
130 | int val2 = 0; | 130 | int val2 = 0; |
131 | 131 | ||
132 | if (utime && (op == FUTEX_WAIT)) { | 132 | if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { |
133 | if (get_compat_timespec(&t, utime)) | 133 | if (get_compat_timespec(&t, utime)) |
134 | return -EFAULT; | 134 | return -EFAULT; |
135 | if (!timespec_valid(&t)) | 135 | if (!timespec_valid(&t)) |
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | timeout = timespec_to_jiffies(&t) + 1; | 137 | if (op == FUTEX_WAIT) |
138 | timeout = timespec_to_jiffies(&t) + 1; | ||
139 | else { | ||
140 | timeout = t.tv_sec; | ||
141 | val2 = t.tv_nsec; | ||
142 | } | ||
138 | } | 143 | } |
139 | if (op >= FUTEX_REQUEUE) | 144 | if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) |
140 | val2 = (int) (unsigned long) utime; | 145 | val2 = (int) (unsigned long) utime; |
141 | 146 | ||
142 | return do_futex((unsigned long)uaddr, op, val, timeout, | 147 | return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); |
143 | (unsigned long)uaddr2, val2, val3); | ||
144 | } | 148 | } |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 55601b3ce60e..8d3dc29ef41a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu) | |||
833 | } | 833 | } |
834 | #endif /* CONFIG_HOTPLUG_CPU */ | 834 | #endif /* CONFIG_HOTPLUG_CPU */ |
835 | 835 | ||
836 | static int hrtimer_cpu_notify(struct notifier_block *self, | 836 | static int __devinit hrtimer_cpu_notify(struct notifier_block *self, |
837 | unsigned long action, void *hcpu) | 837 | unsigned long action, void *hcpu) |
838 | { | 838 | { |
839 | long cpu = (long)hcpu; | 839 | long cpu = (long)hcpu; |
@@ -857,7 +857,7 @@ static int hrtimer_cpu_notify(struct notifier_block *self, | |||
857 | return NOTIFY_OK; | 857 | return NOTIFY_OK; |
858 | } | 858 | } |
859 | 859 | ||
860 | static struct notifier_block hrtimers_nb = { | 860 | static struct notifier_block __devinitdata hrtimers_nb = { |
861 | .notifier_call = hrtimer_cpu_notify, | 861 | .notifier_call = hrtimer_cpu_notify, |
862 | }; | 862 | }; |
863 | 863 | ||
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 036b6285b15c..e38e4bac97ca 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/poison.h> | ||
19 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
20 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
@@ -381,7 +382,7 @@ void debug_mutex_set_owner(struct mutex *lock, | |||
381 | 382 | ||
382 | void debug_mutex_init_waiter(struct mutex_waiter *waiter) | 383 | void debug_mutex_init_waiter(struct mutex_waiter *waiter) |
383 | { | 384 | { |
384 | memset(waiter, 0x11, sizeof(*waiter)); | 385 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); |
385 | waiter->magic = waiter; | 386 | waiter->magic = waiter; |
386 | INIT_LIST_HEAD(&waiter->list); | 387 | INIT_LIST_HEAD(&waiter->list); |
387 | } | 388 | } |
@@ -397,7 +398,7 @@ void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) | |||
397 | void debug_mutex_free_waiter(struct mutex_waiter *waiter) | 398 | void debug_mutex_free_waiter(struct mutex_waiter *waiter) |
398 | { | 399 | { |
399 | DEBUG_WARN_ON(!list_empty(&waiter->list)); | 400 | DEBUG_WARN_ON(!list_empty(&waiter->list)); |
400 | memset(waiter, 0x22, sizeof(*waiter)); | 401 | memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter)); |
401 | } | 402 | } |
402 | 403 | ||
403 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 404 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index fc311a4673a2..857b4fa09124 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -38,13 +38,22 @@ config PM_DEBUG | |||
38 | 38 | ||
39 | config PM_TRACE | 39 | config PM_TRACE |
40 | bool "Suspend/resume event tracing" | 40 | bool "Suspend/resume event tracing" |
41 | depends on PM && PM_DEBUG && X86_32 | 41 | depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL |
42 | default y | 42 | default n |
43 | ---help--- | 43 | ---help--- |
44 | This enables some cheesy code to save the last PM event point in the | 44 | This enables some cheesy code to save the last PM event point in the |
45 | RTC across reboots, so that you can debug a machine that just hangs | 45 | RTC across reboots, so that you can debug a machine that just hangs |
46 | during suspend (or more commonly, during resume). | 46 | during suspend (or more commonly, during resume). |
47 | 47 | ||
48 | To use this debugging feature you should attempt to suspend the machine, | ||
49 | then reboot it, then run | ||
50 | |||
51 | dmesg -s 1000000 | grep 'hash matches' | ||
52 | |||
53 | CAUTION: this option will cause your machine's real-time clock to be | ||
54 | set to an invalid time after a resume. | ||
55 | |||
56 | |||
48 | config SOFTWARE_SUSPEND | 57 | config SOFTWARE_SUSPEND |
49 | bool "Software Suspend" | 58 | bool "Software Suspend" |
50 | depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) | 59 | depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) |
diff --git a/kernel/profile.c b/kernel/profile.c index 68afe121e507..5a730fdb1a2c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -299,7 +299,7 @@ out: | |||
299 | } | 299 | } |
300 | 300 | ||
301 | #ifdef CONFIG_HOTPLUG_CPU | 301 | #ifdef CONFIG_HOTPLUG_CPU |
302 | static int profile_cpu_callback(struct notifier_block *info, | 302 | static int __devinit profile_cpu_callback(struct notifier_block *info, |
303 | unsigned long action, void *__cpu) | 303 | unsigned long action, void *__cpu) |
304 | { | 304 | { |
305 | int node, cpu = (unsigned long)__cpu; | 305 | int node, cpu = (unsigned long)__cpu; |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 20e9710fc21c..f464f5ae3f11 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -182,6 +182,15 @@ long rcu_batches_completed(void) | |||
182 | return rcu_ctrlblk.completed; | 182 | return rcu_ctrlblk.completed; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* | ||
186 | * Return the number of RCU batches processed thus far. Useful | ||
187 | * for debug and statistics. | ||
188 | */ | ||
189 | long rcu_batches_completed_bh(void) | ||
190 | { | ||
191 | return rcu_bh_ctrlblk.completed; | ||
192 | } | ||
193 | |||
185 | static void rcu_barrier_callback(struct rcu_head *notused) | 194 | static void rcu_barrier_callback(struct rcu_head *notused) |
186 | { | 195 | { |
187 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 196 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
@@ -539,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu) | |||
539 | tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); | 548 | tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); |
540 | } | 549 | } |
541 | 550 | ||
542 | static int rcu_cpu_notify(struct notifier_block *self, | 551 | static int __devinit rcu_cpu_notify(struct notifier_block *self, |
543 | unsigned long action, void *hcpu) | 552 | unsigned long action, void *hcpu) |
544 | { | 553 | { |
545 | long cpu = (long)hcpu; | 554 | long cpu = (long)hcpu; |
@@ -556,7 +565,7 @@ static int rcu_cpu_notify(struct notifier_block *self, | |||
556 | return NOTIFY_OK; | 565 | return NOTIFY_OK; |
557 | } | 566 | } |
558 | 567 | ||
559 | static struct notifier_block rcu_nb = { | 568 | static struct notifier_block __devinitdata rcu_nb = { |
560 | .notifier_call = rcu_cpu_notify, | 569 | .notifier_call = rcu_cpu_notify, |
561 | }; | 570 | }; |
562 | 571 | ||
@@ -619,6 +628,7 @@ module_param(qlowmark, int, 0); | |||
619 | module_param(rsinterval, int, 0); | 628 | module_param(rsinterval, int, 0); |
620 | #endif | 629 | #endif |
621 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 630 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
631 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | ||
622 | EXPORT_SYMBOL_GPL(call_rcu); | 632 | EXPORT_SYMBOL_GPL(call_rcu); |
623 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 633 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
624 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 634 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 8154e7589d12..4d1c3d247127 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Read-Copy Update /proc-based torture test facility | 2 | * Read-Copy Update module-based torture test facility |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -53,6 +53,7 @@ static int stat_interval; /* Interval between stats, in seconds. */ | |||
53 | static int verbose; /* Print more debug info. */ | 53 | static int verbose; /* Print more debug info. */ |
54 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | 54 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ |
55 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ | 55 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ |
56 | static char *torture_type = "rcu"; /* What to torture. */ | ||
56 | 57 | ||
57 | module_param(nreaders, int, 0); | 58 | module_param(nreaders, int, 0); |
58 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | 59 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); |
@@ -64,13 +65,16 @@ module_param(test_no_idle_hz, bool, 0); | |||
64 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | 65 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); |
65 | module_param(shuffle_interval, int, 0); | 66 | module_param(shuffle_interval, int, 0); |
66 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | 67 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); |
67 | #define TORTURE_FLAG "rcutorture: " | 68 | module_param(torture_type, charp, 0); |
69 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh)"); | ||
70 | |||
71 | #define TORTURE_FLAG "-torture:" | ||
68 | #define PRINTK_STRING(s) \ | 72 | #define PRINTK_STRING(s) \ |
69 | do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0) | 73 | do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) |
70 | #define VERBOSE_PRINTK_STRING(s) \ | 74 | #define VERBOSE_PRINTK_STRING(s) \ |
71 | do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0) | 75 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) |
72 | #define VERBOSE_PRINTK_ERRSTRING(s) \ | 76 | #define VERBOSE_PRINTK_ERRSTRING(s) \ |
73 | do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0) | 77 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) |
74 | 78 | ||
75 | static char printk_buf[4096]; | 79 | static char printk_buf[4096]; |
76 | 80 | ||
@@ -139,28 +143,6 @@ rcu_torture_free(struct rcu_torture *p) | |||
139 | spin_unlock_bh(&rcu_torture_lock); | 143 | spin_unlock_bh(&rcu_torture_lock); |
140 | } | 144 | } |
141 | 145 | ||
142 | static void | ||
143 | rcu_torture_cb(struct rcu_head *p) | ||
144 | { | ||
145 | int i; | ||
146 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | ||
147 | |||
148 | if (fullstop) { | ||
149 | /* Test is ending, just drop callbacks on the floor. */ | ||
150 | /* The next initialization will pick up the pieces. */ | ||
151 | return; | ||
152 | } | ||
153 | i = rp->rtort_pipe_count; | ||
154 | if (i > RCU_TORTURE_PIPE_LEN) | ||
155 | i = RCU_TORTURE_PIPE_LEN; | ||
156 | atomic_inc(&rcu_torture_wcount[i]); | ||
157 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | ||
158 | rp->rtort_mbtest = 0; | ||
159 | rcu_torture_free(rp); | ||
160 | } else | ||
161 | call_rcu(p, rcu_torture_cb); | ||
162 | } | ||
163 | |||
164 | struct rcu_random_state { | 146 | struct rcu_random_state { |
165 | unsigned long rrs_state; | 147 | unsigned long rrs_state; |
166 | unsigned long rrs_count; | 148 | unsigned long rrs_count; |
@@ -191,6 +173,119 @@ rcu_random(struct rcu_random_state *rrsp) | |||
191 | } | 173 | } |
192 | 174 | ||
193 | /* | 175 | /* |
176 | * Operations vector for selecting different types of tests. | ||
177 | */ | ||
178 | |||
179 | struct rcu_torture_ops { | ||
180 | void (*init)(void); | ||
181 | void (*cleanup)(void); | ||
182 | int (*readlock)(void); | ||
183 | void (*readunlock)(int idx); | ||
184 | int (*completed)(void); | ||
185 | void (*deferredfree)(struct rcu_torture *p); | ||
186 | int (*stats)(char *page); | ||
187 | char *name; | ||
188 | }; | ||
189 | static struct rcu_torture_ops *cur_ops = NULL; | ||
190 | |||
191 | /* | ||
192 | * Definitions for rcu torture testing. | ||
193 | */ | ||
194 | |||
195 | static int rcu_torture_read_lock(void) | ||
196 | { | ||
197 | rcu_read_lock(); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void rcu_torture_read_unlock(int idx) | ||
202 | { | ||
203 | rcu_read_unlock(); | ||
204 | } | ||
205 | |||
206 | static int rcu_torture_completed(void) | ||
207 | { | ||
208 | return rcu_batches_completed(); | ||
209 | } | ||
210 | |||
211 | static void | ||
212 | rcu_torture_cb(struct rcu_head *p) | ||
213 | { | ||
214 | int i; | ||
215 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | ||
216 | |||
217 | if (fullstop) { | ||
218 | /* Test is ending, just drop callbacks on the floor. */ | ||
219 | /* The next initialization will pick up the pieces. */ | ||
220 | return; | ||
221 | } | ||
222 | i = rp->rtort_pipe_count; | ||
223 | if (i > RCU_TORTURE_PIPE_LEN) | ||
224 | i = RCU_TORTURE_PIPE_LEN; | ||
225 | atomic_inc(&rcu_torture_wcount[i]); | ||
226 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | ||
227 | rp->rtort_mbtest = 0; | ||
228 | rcu_torture_free(rp); | ||
229 | } else | ||
230 | cur_ops->deferredfree(rp); | ||
231 | } | ||
232 | |||
233 | static void rcu_torture_deferred_free(struct rcu_torture *p) | ||
234 | { | ||
235 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | ||
236 | } | ||
237 | |||
238 | static struct rcu_torture_ops rcu_ops = { | ||
239 | .init = NULL, | ||
240 | .cleanup = NULL, | ||
241 | .readlock = rcu_torture_read_lock, | ||
242 | .readunlock = rcu_torture_read_unlock, | ||
243 | .completed = rcu_torture_completed, | ||
244 | .deferredfree = rcu_torture_deferred_free, | ||
245 | .stats = NULL, | ||
246 | .name = "rcu" | ||
247 | }; | ||
248 | |||
249 | /* | ||
250 | * Definitions for rcu_bh torture testing. | ||
251 | */ | ||
252 | |||
253 | static int rcu_bh_torture_read_lock(void) | ||
254 | { | ||
255 | rcu_read_lock_bh(); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void rcu_bh_torture_read_unlock(int idx) | ||
260 | { | ||
261 | rcu_read_unlock_bh(); | ||
262 | } | ||
263 | |||
264 | static int rcu_bh_torture_completed(void) | ||
265 | { | ||
266 | return rcu_batches_completed_bh(); | ||
267 | } | ||
268 | |||
269 | static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | ||
270 | { | ||
271 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); | ||
272 | } | ||
273 | |||
274 | static struct rcu_torture_ops rcu_bh_ops = { | ||
275 | .init = NULL, | ||
276 | .cleanup = NULL, | ||
277 | .readlock = rcu_bh_torture_read_lock, | ||
278 | .readunlock = rcu_bh_torture_read_unlock, | ||
279 | .completed = rcu_bh_torture_completed, | ||
280 | .deferredfree = rcu_bh_torture_deferred_free, | ||
281 | .stats = NULL, | ||
282 | .name = "rcu_bh" | ||
283 | }; | ||
284 | |||
285 | static struct rcu_torture_ops *torture_ops[] = | ||
286 | { &rcu_ops, &rcu_bh_ops, NULL }; | ||
287 | |||
288 | /* | ||
194 | * RCU torture writer kthread. Repeatedly substitutes a new structure | 289 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
195 | * for that pointed to by rcu_torture_current, freeing the old structure | 290 | * for that pointed to by rcu_torture_current, freeing the old structure |
196 | * after a series of grace periods (the "pipeline"). | 291 | * after a series of grace periods (the "pipeline"). |
@@ -209,8 +304,6 @@ rcu_torture_writer(void *arg) | |||
209 | 304 | ||
210 | do { | 305 | do { |
211 | schedule_timeout_uninterruptible(1); | 306 | schedule_timeout_uninterruptible(1); |
212 | if (rcu_batches_completed() == oldbatch) | ||
213 | continue; | ||
214 | if ((rp = rcu_torture_alloc()) == NULL) | 307 | if ((rp = rcu_torture_alloc()) == NULL) |
215 | continue; | 308 | continue; |
216 | rp->rtort_pipe_count = 0; | 309 | rp->rtort_pipe_count = 0; |
@@ -225,10 +318,10 @@ rcu_torture_writer(void *arg) | |||
225 | i = RCU_TORTURE_PIPE_LEN; | 318 | i = RCU_TORTURE_PIPE_LEN; |
226 | atomic_inc(&rcu_torture_wcount[i]); | 319 | atomic_inc(&rcu_torture_wcount[i]); |
227 | old_rp->rtort_pipe_count++; | 320 | old_rp->rtort_pipe_count++; |
228 | call_rcu(&old_rp->rtort_rcu, rcu_torture_cb); | 321 | cur_ops->deferredfree(old_rp); |
229 | } | 322 | } |
230 | rcu_torture_current_version++; | 323 | rcu_torture_current_version++; |
231 | oldbatch = rcu_batches_completed(); | 324 | oldbatch = cur_ops->completed(); |
232 | } while (!kthread_should_stop() && !fullstop); | 325 | } while (!kthread_should_stop() && !fullstop); |
233 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 326 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
234 | while (!kthread_should_stop()) | 327 | while (!kthread_should_stop()) |
@@ -246,6 +339,7 @@ static int | |||
246 | rcu_torture_reader(void *arg) | 339 | rcu_torture_reader(void *arg) |
247 | { | 340 | { |
248 | int completed; | 341 | int completed; |
342 | int idx; | ||
249 | DEFINE_RCU_RANDOM(rand); | 343 | DEFINE_RCU_RANDOM(rand); |
250 | struct rcu_torture *p; | 344 | struct rcu_torture *p; |
251 | int pipe_count; | 345 | int pipe_count; |
@@ -254,12 +348,12 @@ rcu_torture_reader(void *arg) | |||
254 | set_user_nice(current, 19); | 348 | set_user_nice(current, 19); |
255 | 349 | ||
256 | do { | 350 | do { |
257 | rcu_read_lock(); | 351 | idx = cur_ops->readlock(); |
258 | completed = rcu_batches_completed(); | 352 | completed = cur_ops->completed(); |
259 | p = rcu_dereference(rcu_torture_current); | 353 | p = rcu_dereference(rcu_torture_current); |
260 | if (p == NULL) { | 354 | if (p == NULL) { |
261 | /* Wait for rcu_torture_writer to get underway */ | 355 | /* Wait for rcu_torture_writer to get underway */ |
262 | rcu_read_unlock(); | 356 | cur_ops->readunlock(idx); |
263 | schedule_timeout_interruptible(HZ); | 357 | schedule_timeout_interruptible(HZ); |
264 | continue; | 358 | continue; |
265 | } | 359 | } |
@@ -273,14 +367,14 @@ rcu_torture_reader(void *arg) | |||
273 | pipe_count = RCU_TORTURE_PIPE_LEN; | 367 | pipe_count = RCU_TORTURE_PIPE_LEN; |
274 | } | 368 | } |
275 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; | 369 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; |
276 | completed = rcu_batches_completed() - completed; | 370 | completed = cur_ops->completed() - completed; |
277 | if (completed > RCU_TORTURE_PIPE_LEN) { | 371 | if (completed > RCU_TORTURE_PIPE_LEN) { |
278 | /* Should not happen, but... */ | 372 | /* Should not happen, but... */ |
279 | completed = RCU_TORTURE_PIPE_LEN; | 373 | completed = RCU_TORTURE_PIPE_LEN; |
280 | } | 374 | } |
281 | ++__get_cpu_var(rcu_torture_batch)[completed]; | 375 | ++__get_cpu_var(rcu_torture_batch)[completed]; |
282 | preempt_enable(); | 376 | preempt_enable(); |
283 | rcu_read_unlock(); | 377 | cur_ops->readunlock(idx); |
284 | schedule(); | 378 | schedule(); |
285 | } while (!kthread_should_stop() && !fullstop); | 379 | } while (!kthread_should_stop() && !fullstop); |
286 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 380 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
@@ -311,7 +405,7 @@ rcu_torture_printk(char *page) | |||
311 | if (pipesummary[i] != 0) | 405 | if (pipesummary[i] != 0) |
312 | break; | 406 | break; |
313 | } | 407 | } |
314 | cnt += sprintf(&page[cnt], "rcutorture: "); | 408 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); |
315 | cnt += sprintf(&page[cnt], | 409 | cnt += sprintf(&page[cnt], |
316 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " | 410 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " |
317 | "rtmbe: %d", | 411 | "rtmbe: %d", |
@@ -324,7 +418,7 @@ rcu_torture_printk(char *page) | |||
324 | atomic_read(&n_rcu_torture_mberror)); | 418 | atomic_read(&n_rcu_torture_mberror)); |
325 | if (atomic_read(&n_rcu_torture_mberror) != 0) | 419 | if (atomic_read(&n_rcu_torture_mberror) != 0) |
326 | cnt += sprintf(&page[cnt], " !!!"); | 420 | cnt += sprintf(&page[cnt], " !!!"); |
327 | cnt += sprintf(&page[cnt], "\nrcutorture: "); | 421 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); |
328 | if (i > 1) { | 422 | if (i > 1) { |
329 | cnt += sprintf(&page[cnt], "!!! "); | 423 | cnt += sprintf(&page[cnt], "!!! "); |
330 | atomic_inc(&n_rcu_torture_error); | 424 | atomic_inc(&n_rcu_torture_error); |
@@ -332,17 +426,19 @@ rcu_torture_printk(char *page) | |||
332 | cnt += sprintf(&page[cnt], "Reader Pipe: "); | 426 | cnt += sprintf(&page[cnt], "Reader Pipe: "); |
333 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 427 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
334 | cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); | 428 | cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); |
335 | cnt += sprintf(&page[cnt], "\nrcutorture: "); | 429 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); |
336 | cnt += sprintf(&page[cnt], "Reader Batch: "); | 430 | cnt += sprintf(&page[cnt], "Reader Batch: "); |
337 | for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++) | 431 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
338 | cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); | 432 | cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); |
339 | cnt += sprintf(&page[cnt], "\nrcutorture: "); | 433 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); |
340 | cnt += sprintf(&page[cnt], "Free-Block Circulation: "); | 434 | cnt += sprintf(&page[cnt], "Free-Block Circulation: "); |
341 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { | 435 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
342 | cnt += sprintf(&page[cnt], " %d", | 436 | cnt += sprintf(&page[cnt], " %d", |
343 | atomic_read(&rcu_torture_wcount[i])); | 437 | atomic_read(&rcu_torture_wcount[i])); |
344 | } | 438 | } |
345 | cnt += sprintf(&page[cnt], "\n"); | 439 | cnt += sprintf(&page[cnt], "\n"); |
440 | if (cur_ops->stats != NULL) | ||
441 | cnt += cur_ops->stats(&page[cnt]); | ||
346 | return cnt; | 442 | return cnt; |
347 | } | 443 | } |
348 | 444 | ||
@@ -444,11 +540,11 @@ rcu_torture_shuffle(void *arg) | |||
444 | static inline void | 540 | static inline void |
445 | rcu_torture_print_module_parms(char *tag) | 541 | rcu_torture_print_module_parms(char *tag) |
446 | { | 542 | { |
447 | printk(KERN_ALERT TORTURE_FLAG "--- %s: nreaders=%d " | 543 | printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d " |
448 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | 544 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
449 | "shuffle_interval = %d\n", | 545 | "shuffle_interval = %d\n", |
450 | tag, nrealreaders, stat_interval, verbose, test_no_idle_hz, | 546 | torture_type, tag, nrealreaders, stat_interval, verbose, |
451 | shuffle_interval); | 547 | test_no_idle_hz, shuffle_interval); |
452 | } | 548 | } |
453 | 549 | ||
454 | static void | 550 | static void |
@@ -493,6 +589,9 @@ rcu_torture_cleanup(void) | |||
493 | rcu_barrier(); | 589 | rcu_barrier(); |
494 | 590 | ||
495 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ | 591 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
592 | |||
593 | if (cur_ops->cleanup != NULL) | ||
594 | cur_ops->cleanup(); | ||
496 | if (atomic_read(&n_rcu_torture_error)) | 595 | if (atomic_read(&n_rcu_torture_error)) |
497 | rcu_torture_print_module_parms("End of test: FAILURE"); | 596 | rcu_torture_print_module_parms("End of test: FAILURE"); |
498 | else | 597 | else |
@@ -508,6 +607,20 @@ rcu_torture_init(void) | |||
508 | 607 | ||
509 | /* Process args and tell the world that the torturer is on the job. */ | 608 | /* Process args and tell the world that the torturer is on the job. */ |
510 | 609 | ||
610 | for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) { | ||
611 | cur_ops = torture_ops[i]; | ||
612 | if (strcmp(torture_type, cur_ops->name) == 0) { | ||
613 | break; | ||
614 | } | ||
615 | } | ||
616 | if (cur_ops == NULL) { | ||
617 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | ||
618 | torture_type); | ||
619 | return (-EINVAL); | ||
620 | } | ||
621 | if (cur_ops->init != NULL) | ||
622 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | ||
623 | |||
511 | if (nreaders >= 0) | 624 | if (nreaders >= 0) |
512 | nrealreaders = nreaders; | 625 | nrealreaders = nreaders; |
513 | else | 626 | else |
diff --git a/kernel/resource.c b/kernel/resource.c index e3080fcc66a3..2404f9b0bc47 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -232,6 +232,44 @@ int release_resource(struct resource *old) | |||
232 | 232 | ||
233 | EXPORT_SYMBOL(release_resource); | 233 | EXPORT_SYMBOL(release_resource); |
234 | 234 | ||
235 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
236 | /* | ||
237 | * Finds the lowest memory reosurce exists within [res->start.res->end) | ||
238 | * the caller must specify res->start, res->end, res->flags. | ||
239 | * If found, returns 0, res is overwritten, if not found, returns -1. | ||
240 | */ | ||
241 | int find_next_system_ram(struct resource *res) | ||
242 | { | ||
243 | resource_size_t start, end; | ||
244 | struct resource *p; | ||
245 | |||
246 | BUG_ON(!res); | ||
247 | |||
248 | start = res->start; | ||
249 | end = res->end; | ||
250 | |||
251 | read_lock(&resource_lock); | ||
252 | for (p = iomem_resource.child; p ; p = p->sibling) { | ||
253 | /* system ram is just marked as IORESOURCE_MEM */ | ||
254 | if (p->flags != res->flags) | ||
255 | continue; | ||
256 | if (p->start > end) { | ||
257 | p = NULL; | ||
258 | break; | ||
259 | } | ||
260 | if (p->start >= start) | ||
261 | break; | ||
262 | } | ||
263 | read_unlock(&resource_lock); | ||
264 | if (!p) | ||
265 | return -1; | ||
266 | /* copy data */ | ||
267 | res->start = p->start; | ||
268 | res->end = p->end; | ||
269 | return 0; | ||
270 | } | ||
271 | #endif | ||
272 | |||
235 | /* | 273 | /* |
236 | * Find empty slot in the resource tree given range and alignment. | 274 | * Find empty slot in the resource tree given range and alignment. |
237 | */ | 275 | */ |
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c new file mode 100644 index 000000000000..4aa8a2c9f453 --- /dev/null +++ b/kernel/rtmutex-debug.c | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This code is based on the rt.c implementation in the preempt-rt tree. | ||
10 | * Portions of said code are | ||
11 | * | ||
12 | * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey | ||
13 | * Copyright (C) 2006 Esben Nielsen | ||
14 | * Copyright (C) 2006 Kihon Technologies Inc., | ||
15 | * Steven Rostedt <rostedt@goodmis.org> | ||
16 | * | ||
17 | * See rt.c in preempt-rt for proper credits and further information | ||
18 | */ | ||
19 | #include <linux/config.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/kallsyms.h> | ||
25 | #include <linux/syscalls.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/plist.h> | ||
28 | #include <linux/fs.h> | ||
29 | |||
30 | #include "rtmutex_common.h" | ||
31 | |||
32 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
33 | # include "rtmutex-debug.h" | ||
34 | #else | ||
35 | # include "rtmutex.h" | ||
36 | #endif | ||
37 | |||
38 | # define TRACE_WARN_ON(x) WARN_ON(x) | ||
39 | # define TRACE_BUG_ON(x) BUG_ON(x) | ||
40 | |||
41 | # define TRACE_OFF() \ | ||
42 | do { \ | ||
43 | if (rt_trace_on) { \ | ||
44 | rt_trace_on = 0; \ | ||
45 | console_verbose(); \ | ||
46 | if (spin_is_locked(¤t->pi_lock)) \ | ||
47 | spin_unlock(¤t->pi_lock); \ | ||
48 | if (spin_is_locked(¤t->held_list_lock)) \ | ||
49 | spin_unlock(¤t->held_list_lock); \ | ||
50 | } \ | ||
51 | } while (0) | ||
52 | |||
53 | # define TRACE_OFF_NOLOCK() \ | ||
54 | do { \ | ||
55 | if (rt_trace_on) { \ | ||
56 | rt_trace_on = 0; \ | ||
57 | console_verbose(); \ | ||
58 | } \ | ||
59 | } while (0) | ||
60 | |||
61 | # define TRACE_BUG_LOCKED() \ | ||
62 | do { \ | ||
63 | TRACE_OFF(); \ | ||
64 | BUG(); \ | ||
65 | } while (0) | ||
66 | |||
67 | # define TRACE_WARN_ON_LOCKED(c) \ | ||
68 | do { \ | ||
69 | if (unlikely(c)) { \ | ||
70 | TRACE_OFF(); \ | ||
71 | WARN_ON(1); \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | |||
75 | # define TRACE_BUG_ON_LOCKED(c) \ | ||
76 | do { \ | ||
77 | if (unlikely(c)) \ | ||
78 | TRACE_BUG_LOCKED(); \ | ||
79 | } while (0) | ||
80 | |||
81 | #ifdef CONFIG_SMP | ||
82 | # define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) | ||
83 | #else | ||
84 | # define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * deadlock detection flag. We turn it off when we detect | ||
89 | * the first problem because we dont want to recurse back | ||
90 | * into the tracing code when doing error printk or | ||
91 | * executing a BUG(): | ||
92 | */ | ||
93 | int rt_trace_on = 1; | ||
94 | |||
95 | void deadlock_trace_off(void) | ||
96 | { | ||
97 | rt_trace_on = 0; | ||
98 | } | ||
99 | |||
100 | static void printk_task(task_t *p) | ||
101 | { | ||
102 | if (p) | ||
103 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); | ||
104 | else | ||
105 | printk("<none>"); | ||
106 | } | ||
107 | |||
108 | static void printk_task_short(task_t *p) | ||
109 | { | ||
110 | if (p) | ||
111 | printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); | ||
112 | else | ||
113 | printk("<none>"); | ||
114 | } | ||
115 | |||
116 | static void printk_lock(struct rt_mutex *lock, int print_owner) | ||
117 | { | ||
118 | if (lock->name) | ||
119 | printk(" [%p] {%s}\n", | ||
120 | lock, lock->name); | ||
121 | else | ||
122 | printk(" [%p] {%s:%d}\n", | ||
123 | lock, lock->file, lock->line); | ||
124 | |||
125 | if (print_owner && rt_mutex_owner(lock)) { | ||
126 | printk(".. ->owner: %p\n", lock->owner); | ||
127 | printk(".. held by: "); | ||
128 | printk_task(rt_mutex_owner(lock)); | ||
129 | printk("\n"); | ||
130 | } | ||
131 | if (rt_mutex_owner(lock)) { | ||
132 | printk("... acquired at: "); | ||
133 | print_symbol("%s\n", lock->acquire_ip); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | static void printk_waiter(struct rt_mutex_waiter *w) | ||
138 | { | ||
139 | printk("-------------------------\n"); | ||
140 | printk("| waiter struct %p:\n", w); | ||
141 | printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", | ||
142 | w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next, | ||
143 | w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next, | ||
144 | w->list_entry.prio); | ||
145 | printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", | ||
146 | w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next, | ||
147 | w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next, | ||
148 | w->pi_list_entry.prio); | ||
149 | printk("\n| lock:\n"); | ||
150 | printk_lock(w->lock, 1); | ||
151 | printk("| w->ti->task:\n"); | ||
152 | printk_task(w->task); | ||
153 | printk("| blocked at: "); | ||
154 | print_symbol("%s\n", w->ip); | ||
155 | printk("-------------------------\n"); | ||
156 | } | ||
157 | |||
158 | static void show_task_locks(task_t *p) | ||
159 | { | ||
160 | switch (p->state) { | ||
161 | case TASK_RUNNING: printk("R"); break; | ||
162 | case TASK_INTERRUPTIBLE: printk("S"); break; | ||
163 | case TASK_UNINTERRUPTIBLE: printk("D"); break; | ||
164 | case TASK_STOPPED: printk("T"); break; | ||
165 | case EXIT_ZOMBIE: printk("Z"); break; | ||
166 | case EXIT_DEAD: printk("X"); break; | ||
167 | default: printk("?"); break; | ||
168 | } | ||
169 | printk_task(p); | ||
170 | if (p->pi_blocked_on) { | ||
171 | struct rt_mutex *lock = p->pi_blocked_on->lock; | ||
172 | |||
173 | printk(" blocked on:"); | ||
174 | printk_lock(lock, 1); | ||
175 | } else | ||
176 | printk(" (not blocked)\n"); | ||
177 | } | ||
178 | |||
179 | void rt_mutex_show_held_locks(task_t *task, int verbose) | ||
180 | { | ||
181 | struct list_head *curr, *cursor = NULL; | ||
182 | struct rt_mutex *lock; | ||
183 | task_t *t; | ||
184 | unsigned long flags; | ||
185 | int count = 0; | ||
186 | |||
187 | if (!rt_trace_on) | ||
188 | return; | ||
189 | |||
190 | if (verbose) { | ||
191 | printk("------------------------------\n"); | ||
192 | printk("| showing all locks held by: | ("); | ||
193 | printk_task_short(task); | ||
194 | printk("):\n"); | ||
195 | printk("------------------------------\n"); | ||
196 | } | ||
197 | |||
198 | next: | ||
199 | spin_lock_irqsave(&task->held_list_lock, flags); | ||
200 | list_for_each(curr, &task->held_list_head) { | ||
201 | if (cursor && curr != cursor) | ||
202 | continue; | ||
203 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
204 | t = rt_mutex_owner(lock); | ||
205 | WARN_ON(t != task); | ||
206 | count++; | ||
207 | cursor = curr->next; | ||
208 | spin_unlock_irqrestore(&task->held_list_lock, flags); | ||
209 | |||
210 | printk("\n#%03d: ", count); | ||
211 | printk_lock(lock, 0); | ||
212 | goto next; | ||
213 | } | ||
214 | spin_unlock_irqrestore(&task->held_list_lock, flags); | ||
215 | |||
216 | printk("\n"); | ||
217 | } | ||
218 | |||
219 | void rt_mutex_show_all_locks(void) | ||
220 | { | ||
221 | task_t *g, *p; | ||
222 | int count = 10; | ||
223 | int unlock = 1; | ||
224 | |||
225 | printk("\n"); | ||
226 | printk("----------------------\n"); | ||
227 | printk("| showing all tasks: |\n"); | ||
228 | printk("----------------------\n"); | ||
229 | |||
230 | /* | ||
231 | * Here we try to get the tasklist_lock as hard as possible, | ||
232 | * if not successful after 2 seconds we ignore it (but keep | ||
233 | * trying). This is to enable a debug printout even if a | ||
234 | * tasklist_lock-holding task deadlocks or crashes. | ||
235 | */ | ||
236 | retry: | ||
237 | if (!read_trylock(&tasklist_lock)) { | ||
238 | if (count == 10) | ||
239 | printk("hm, tasklist_lock locked, retrying... "); | ||
240 | if (count) { | ||
241 | count--; | ||
242 | printk(" #%d", 10-count); | ||
243 | mdelay(200); | ||
244 | goto retry; | ||
245 | } | ||
246 | printk(" ignoring it.\n"); | ||
247 | unlock = 0; | ||
248 | } | ||
249 | if (count != 10) | ||
250 | printk(" locked it.\n"); | ||
251 | |||
252 | do_each_thread(g, p) { | ||
253 | show_task_locks(p); | ||
254 | if (!unlock) | ||
255 | if (read_trylock(&tasklist_lock)) | ||
256 | unlock = 1; | ||
257 | } while_each_thread(g, p); | ||
258 | |||
259 | printk("\n"); | ||
260 | |||
261 | printk("-----------------------------------------\n"); | ||
262 | printk("| showing all locks held in the system: |\n"); | ||
263 | printk("-----------------------------------------\n"); | ||
264 | |||
265 | do_each_thread(g, p) { | ||
266 | rt_mutex_show_held_locks(p, 0); | ||
267 | if (!unlock) | ||
268 | if (read_trylock(&tasklist_lock)) | ||
269 | unlock = 1; | ||
270 | } while_each_thread(g, p); | ||
271 | |||
272 | |||
273 | printk("=============================================\n\n"); | ||
274 | |||
275 | if (unlock) | ||
276 | read_unlock(&tasklist_lock); | ||
277 | } | ||
278 | |||
279 | void rt_mutex_debug_check_no_locks_held(task_t *task) | ||
280 | { | ||
281 | struct rt_mutex_waiter *w; | ||
282 | struct list_head *curr; | ||
283 | struct rt_mutex *lock; | ||
284 | |||
285 | if (!rt_trace_on) | ||
286 | return; | ||
287 | if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) { | ||
288 | printk("BUG: PI priority boost leaked!\n"); | ||
289 | printk_task(task); | ||
290 | printk("\n"); | ||
291 | } | ||
292 | if (list_empty(&task->held_list_head)) | ||
293 | return; | ||
294 | |||
295 | spin_lock(&task->pi_lock); | ||
296 | plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) { | ||
297 | TRACE_OFF(); | ||
298 | |||
299 | printk("hm, PI interest held at exit time? Task:\n"); | ||
300 | printk_task(task); | ||
301 | printk_waiter(w); | ||
302 | return; | ||
303 | } | ||
304 | spin_unlock(&task->pi_lock); | ||
305 | |||
306 | list_for_each(curr, &task->held_list_head) { | ||
307 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
308 | |||
309 | printk("BUG: %s/%d, lock held at task exit time!\n", | ||
310 | task->comm, task->pid); | ||
311 | printk_lock(lock, 1); | ||
312 | if (rt_mutex_owner(lock) != task) | ||
313 | printk("exiting task is not even the owner??\n"); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) | ||
318 | { | ||
319 | const void *to = from + len; | ||
320 | struct list_head *curr; | ||
321 | struct rt_mutex *lock; | ||
322 | unsigned long flags; | ||
323 | void *lock_addr; | ||
324 | |||
325 | if (!rt_trace_on) | ||
326 | return 0; | ||
327 | |||
328 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
329 | list_for_each(curr, ¤t->held_list_head) { | ||
330 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
331 | lock_addr = lock; | ||
332 | if (lock_addr < from || lock_addr >= to) | ||
333 | continue; | ||
334 | TRACE_OFF(); | ||
335 | |||
336 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", | ||
337 | current->comm, current->pid, lock, from, to); | ||
338 | dump_stack(); | ||
339 | printk_lock(lock, 1); | ||
340 | if (rt_mutex_owner(lock) != current) | ||
341 | printk("freeing task is not even the owner??\n"); | ||
342 | return 1; | ||
343 | } | ||
344 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | void rt_mutex_debug_task_free(struct task_struct *task) | ||
350 | { | ||
351 | WARN_ON(!plist_head_empty(&task->pi_waiters)); | ||
352 | WARN_ON(task->pi_blocked_on); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * We fill out the fields in the waiter to store the information about | ||
357 | * the deadlock. We print when we return. act_waiter can be NULL in | ||
358 | * case of a remove waiter operation. | ||
359 | */ | ||
360 | void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter, | ||
361 | struct rt_mutex *lock) | ||
362 | { | ||
363 | struct task_struct *task; | ||
364 | |||
365 | if (!rt_trace_on || detect || !act_waiter) | ||
366 | return; | ||
367 | |||
368 | task = rt_mutex_owner(act_waiter->lock); | ||
369 | if (task && task != current) { | ||
370 | act_waiter->deadlock_task_pid = task->pid; | ||
371 | act_waiter->deadlock_lock = lock; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | ||
376 | { | ||
377 | struct task_struct *task; | ||
378 | |||
379 | if (!waiter->deadlock_lock || !rt_trace_on) | ||
380 | return; | ||
381 | |||
382 | task = find_task_by_pid(waiter->deadlock_task_pid); | ||
383 | if (!task) | ||
384 | return; | ||
385 | |||
386 | TRACE_OFF_NOLOCK(); | ||
387 | |||
388 | printk("\n============================================\n"); | ||
389 | printk( "[ BUG: circular locking deadlock detected! ]\n"); | ||
390 | printk( "--------------------------------------------\n"); | ||
391 | printk("%s/%d is deadlocking current task %s/%d\n\n", | ||
392 | task->comm, task->pid, current->comm, current->pid); | ||
393 | |||
394 | printk("\n1) %s/%d is trying to acquire this lock:\n", | ||
395 | current->comm, current->pid); | ||
396 | printk_lock(waiter->lock, 1); | ||
397 | |||
398 | printk("... trying at: "); | ||
399 | print_symbol("%s\n", waiter->ip); | ||
400 | |||
401 | printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); | ||
402 | printk_lock(waiter->deadlock_lock, 1); | ||
403 | |||
404 | rt_mutex_show_held_locks(current, 1); | ||
405 | rt_mutex_show_held_locks(task, 1); | ||
406 | |||
407 | printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); | ||
408 | show_stack(task, NULL); | ||
409 | printk("\n%s/%d's [current] stackdump:\n\n", | ||
410 | current->comm, current->pid); | ||
411 | dump_stack(); | ||
412 | rt_mutex_show_all_locks(); | ||
413 | printk("[ turning off deadlock detection." | ||
414 | "Please report this trace. ]\n\n"); | ||
415 | local_irq_disable(); | ||
416 | } | ||
417 | |||
418 | void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) | ||
419 | { | ||
420 | unsigned long flags; | ||
421 | |||
422 | if (rt_trace_on) { | ||
423 | TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); | ||
424 | |||
425 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
426 | list_add_tail(&lock->held_list_entry, ¤t->held_list_head); | ||
427 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
428 | |||
429 | lock->acquire_ip = ip; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | void debug_rt_mutex_unlock(struct rt_mutex *lock) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | |||
437 | if (rt_trace_on) { | ||
438 | TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); | ||
439 | TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); | ||
440 | |||
441 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
442 | list_del_init(&lock->held_list_entry); | ||
443 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | ||
448 | struct task_struct *powner __IP_DECL__) | ||
449 | { | ||
450 | unsigned long flags; | ||
451 | |||
452 | if (rt_trace_on) { | ||
453 | TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); | ||
454 | |||
455 | spin_lock_irqsave(&powner->held_list_lock, flags); | ||
456 | list_add_tail(&lock->held_list_entry, &powner->held_list_head); | ||
457 | spin_unlock_irqrestore(&powner->held_list_lock, flags); | ||
458 | |||
459 | lock->acquire_ip = ip; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | ||
464 | { | ||
465 | unsigned long flags; | ||
466 | |||
467 | if (rt_trace_on) { | ||
468 | struct task_struct *owner = rt_mutex_owner(lock); | ||
469 | |||
470 | TRACE_WARN_ON_LOCKED(!owner); | ||
471 | TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); | ||
472 | |||
473 | spin_lock_irqsave(&owner->held_list_lock, flags); | ||
474 | list_del_init(&lock->held_list_entry); | ||
475 | spin_unlock_irqrestore(&owner->held_list_lock, flags); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | ||
480 | { | ||
481 | memset(waiter, 0x11, sizeof(*waiter)); | ||
482 | plist_node_init(&waiter->list_entry, MAX_PRIO); | ||
483 | plist_node_init(&waiter->pi_list_entry, MAX_PRIO); | ||
484 | } | ||
485 | |||
486 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) | ||
487 | { | ||
488 | TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); | ||
489 | TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
490 | TRACE_WARN_ON(waiter->task); | ||
491 | memset(waiter, 0x22, sizeof(*waiter)); | ||
492 | } | ||
493 | |||
494 | void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) | ||
495 | { | ||
496 | void *addr = lock; | ||
497 | |||
498 | if (rt_trace_on) { | ||
499 | rt_mutex_debug_check_no_locks_freed(addr, | ||
500 | sizeof(struct rt_mutex)); | ||
501 | INIT_LIST_HEAD(&lock->held_list_entry); | ||
502 | lock->name = name; | ||
503 | } | ||
504 | } | ||
505 | |||
506 | void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) | ||
507 | { | ||
508 | } | ||
509 | |||
510 | void rt_mutex_deadlock_account_unlock(struct task_struct *task) | ||
511 | { | ||
512 | } | ||
513 | |||
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h new file mode 100644 index 000000000000..7612fbc62d70 --- /dev/null +++ b/kernel/rtmutex-debug.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This file contains macros used solely by rtmutex.c. Debug version. | ||
10 | */ | ||
11 | |||
12 | #define __IP_DECL__ , unsigned long ip | ||
13 | #define __IP__ , ip | ||
14 | #define __RET_IP__ , (unsigned long)__builtin_return_address(0) | ||
15 | |||
16 | extern void | ||
17 | rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); | ||
18 | extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); | ||
19 | extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); | ||
20 | extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); | ||
21 | extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); | ||
22 | extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); | ||
23 | extern void debug_rt_mutex_unlock(struct rt_mutex *lock); | ||
24 | extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | ||
25 | struct task_struct *powner __IP_DECL__); | ||
26 | extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); | ||
27 | extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, | ||
28 | struct rt_mutex *lock); | ||
29 | extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); | ||
30 | # define debug_rt_mutex_reset_waiter(w) \ | ||
31 | do { (w)->deadlock_lock = NULL; } while (0) | ||
32 | |||
33 | static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, | ||
34 | int detect) | ||
35 | { | ||
36 | return (waiter != NULL); | ||
37 | } | ||
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c new file mode 100644 index 000000000000..e82c2f848249 --- /dev/null +++ b/kernel/rtmutex-tester.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * RT-Mutex-tester: scriptable tester for rt mutexes | ||
3 | * | ||
4 | * started by Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/config.h> | ||
10 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/smp_lock.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/sysdev.h> | ||
16 | #include <linux/timer.h> | ||
17 | |||
18 | #include "rtmutex.h" | ||
19 | |||
20 | #define MAX_RT_TEST_THREADS 8 | ||
21 | #define MAX_RT_TEST_MUTEXES 8 | ||
22 | |||
23 | static spinlock_t rttest_lock; | ||
24 | static atomic_t rttest_event; | ||
25 | |||
26 | struct test_thread_data { | ||
27 | int opcode; | ||
28 | int opdata; | ||
29 | int mutexes[MAX_RT_TEST_MUTEXES]; | ||
30 | int bkl; | ||
31 | int event; | ||
32 | struct sys_device sysdev; | ||
33 | }; | ||
34 | |||
35 | static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; | ||
36 | static task_t *threads[MAX_RT_TEST_THREADS]; | ||
37 | static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; | ||
38 | |||
39 | enum test_opcodes { | ||
40 | RTTEST_NOP = 0, | ||
41 | RTTEST_SCHEDOT, /* 1 Sched other, data = nice */ | ||
42 | RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */ | ||
43 | RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */ | ||
44 | RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */ | ||
45 | RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */ | ||
46 | RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */ | ||
47 | RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */ | ||
48 | RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */ | ||
49 | RTTEST_LOCKBKL, /* 9 Lock BKL */ | ||
50 | RTTEST_UNLOCKBKL, /* 10 Unlock BKL */ | ||
51 | RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */ | ||
52 | RTTEST_RESETEVENT = 98, /* 98 Reset event counter */ | ||
53 | RTTEST_RESET = 99, /* 99 Reset all pending operations */ | ||
54 | }; | ||
55 | |||
56 | static int handle_op(struct test_thread_data *td, int lockwakeup) | ||
57 | { | ||
58 | int i, id, ret = -EINVAL; | ||
59 | |||
60 | switch(td->opcode) { | ||
61 | |||
62 | case RTTEST_NOP: | ||
63 | return 0; | ||
64 | |||
65 | case RTTEST_LOCKCONT: | ||
66 | td->mutexes[td->opdata] = 1; | ||
67 | td->event = atomic_add_return(1, &rttest_event); | ||
68 | return 0; | ||
69 | |||
70 | case RTTEST_RESET: | ||
71 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) { | ||
72 | if (td->mutexes[i] == 4) { | ||
73 | rt_mutex_unlock(&mutexes[i]); | ||
74 | td->mutexes[i] = 0; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | if (!lockwakeup && td->bkl == 4) { | ||
79 | unlock_kernel(); | ||
80 | td->bkl = 0; | ||
81 | } | ||
82 | return 0; | ||
83 | |||
84 | case RTTEST_RESETEVENT: | ||
85 | atomic_set(&rttest_event, 0); | ||
86 | return 0; | ||
87 | |||
88 | default: | ||
89 | if (lockwakeup) | ||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | switch(td->opcode) { | ||
94 | |||
95 | case RTTEST_LOCK: | ||
96 | case RTTEST_LOCKNOWAIT: | ||
97 | id = td->opdata; | ||
98 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
99 | return ret; | ||
100 | |||
101 | td->mutexes[id] = 1; | ||
102 | td->event = atomic_add_return(1, &rttest_event); | ||
103 | rt_mutex_lock(&mutexes[id]); | ||
104 | td->event = atomic_add_return(1, &rttest_event); | ||
105 | td->mutexes[id] = 4; | ||
106 | return 0; | ||
107 | |||
108 | case RTTEST_LOCKINT: | ||
109 | case RTTEST_LOCKINTNOWAIT: | ||
110 | id = td->opdata; | ||
111 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES) | ||
112 | return ret; | ||
113 | |||
114 | td->mutexes[id] = 1; | ||
115 | td->event = atomic_add_return(1, &rttest_event); | ||
116 | ret = rt_mutex_lock_interruptible(&mutexes[id], 0); | ||
117 | td->event = atomic_add_return(1, &rttest_event); | ||
118 | td->mutexes[id] = ret ? 0 : 4; | ||
119 | return ret ? -EINTR : 0; | ||
120 | |||
121 | case RTTEST_UNLOCK: | ||
122 | id = td->opdata; | ||
123 | if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) | ||
124 | return ret; | ||
125 | |||
126 | td->event = atomic_add_return(1, &rttest_event); | ||
127 | rt_mutex_unlock(&mutexes[id]); | ||
128 | td->event = atomic_add_return(1, &rttest_event); | ||
129 | td->mutexes[id] = 0; | ||
130 | return 0; | ||
131 | |||
132 | case RTTEST_LOCKBKL: | ||
133 | if (td->bkl) | ||
134 | return 0; | ||
135 | td->bkl = 1; | ||
136 | lock_kernel(); | ||
137 | td->bkl = 4; | ||
138 | return 0; | ||
139 | |||
140 | case RTTEST_UNLOCKBKL: | ||
141 | if (td->bkl != 4) | ||
142 | break; | ||
143 | unlock_kernel(); | ||
144 | td->bkl = 0; | ||
145 | return 0; | ||
146 | |||
147 | default: | ||
148 | break; | ||
149 | } | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Schedule replacement for rtsem_down(). Only called for threads with | ||
155 | * PF_MUTEX_TESTER set. | ||
156 | * | ||
157 | * This allows us to have finegrained control over the event flow. | ||
158 | * | ||
159 | */ | ||
160 | void schedule_rt_mutex_test(struct rt_mutex *mutex) | ||
161 | { | ||
162 | int tid, op, dat; | ||
163 | struct test_thread_data *td; | ||
164 | |||
165 | /* We have to lookup the task */ | ||
166 | for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) { | ||
167 | if (threads[tid] == current) | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | BUG_ON(tid == MAX_RT_TEST_THREADS); | ||
172 | |||
173 | td = &thread_data[tid]; | ||
174 | |||
175 | op = td->opcode; | ||
176 | dat = td->opdata; | ||
177 | |||
178 | switch (op) { | ||
179 | case RTTEST_LOCK: | ||
180 | case RTTEST_LOCKINT: | ||
181 | case RTTEST_LOCKNOWAIT: | ||
182 | case RTTEST_LOCKINTNOWAIT: | ||
183 | if (mutex != &mutexes[dat]) | ||
184 | break; | ||
185 | |||
186 | if (td->mutexes[dat] != 1) | ||
187 | break; | ||
188 | |||
189 | td->mutexes[dat] = 2; | ||
190 | td->event = atomic_add_return(1, &rttest_event); | ||
191 | break; | ||
192 | |||
193 | case RTTEST_LOCKBKL: | ||
194 | default: | ||
195 | break; | ||
196 | } | ||
197 | |||
198 | schedule(); | ||
199 | |||
200 | |||
201 | switch (op) { | ||
202 | case RTTEST_LOCK: | ||
203 | case RTTEST_LOCKINT: | ||
204 | if (mutex != &mutexes[dat]) | ||
205 | return; | ||
206 | |||
207 | if (td->mutexes[dat] != 2) | ||
208 | return; | ||
209 | |||
210 | td->mutexes[dat] = 3; | ||
211 | td->event = atomic_add_return(1, &rttest_event); | ||
212 | break; | ||
213 | |||
214 | case RTTEST_LOCKNOWAIT: | ||
215 | case RTTEST_LOCKINTNOWAIT: | ||
216 | if (mutex != &mutexes[dat]) | ||
217 | return; | ||
218 | |||
219 | if (td->mutexes[dat] != 2) | ||
220 | return; | ||
221 | |||
222 | td->mutexes[dat] = 1; | ||
223 | td->event = atomic_add_return(1, &rttest_event); | ||
224 | return; | ||
225 | |||
226 | case RTTEST_LOCKBKL: | ||
227 | return; | ||
228 | default: | ||
229 | return; | ||
230 | } | ||
231 | |||
232 | td->opcode = 0; | ||
233 | |||
234 | for (;;) { | ||
235 | set_current_state(TASK_INTERRUPTIBLE); | ||
236 | |||
237 | if (td->opcode > 0) { | ||
238 | int ret; | ||
239 | |||
240 | set_current_state(TASK_RUNNING); | ||
241 | ret = handle_op(td, 1); | ||
242 | set_current_state(TASK_INTERRUPTIBLE); | ||
243 | if (td->opcode == RTTEST_LOCKCONT) | ||
244 | break; | ||
245 | td->opcode = ret; | ||
246 | } | ||
247 | |||
248 | /* Wait for the next command to be executed */ | ||
249 | schedule(); | ||
250 | } | ||
251 | |||
252 | /* Restore previous command and data */ | ||
253 | td->opcode = op; | ||
254 | td->opdata = dat; | ||
255 | } | ||
256 | |||
257 | static int test_func(void *data) | ||
258 | { | ||
259 | struct test_thread_data *td = data; | ||
260 | int ret; | ||
261 | |||
262 | current->flags |= PF_MUTEX_TESTER; | ||
263 | allow_signal(SIGHUP); | ||
264 | |||
265 | for(;;) { | ||
266 | |||
267 | set_current_state(TASK_INTERRUPTIBLE); | ||
268 | |||
269 | if (td->opcode > 0) { | ||
270 | set_current_state(TASK_RUNNING); | ||
271 | ret = handle_op(td, 0); | ||
272 | set_current_state(TASK_INTERRUPTIBLE); | ||
273 | td->opcode = ret; | ||
274 | } | ||
275 | |||
276 | /* Wait for the next command to be executed */ | ||
277 | schedule(); | ||
278 | |||
279 | if (signal_pending(current)) | ||
280 | flush_signals(current); | ||
281 | |||
282 | if(kthread_should_stop()) | ||
283 | break; | ||
284 | } | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * sysfs_test_command - interface for test commands | ||
290 | * @dev: thread reference | ||
291 | * @buf: command for actual step | ||
292 | * @count: length of buffer | ||
293 | * | ||
294 | * command syntax: | ||
295 | * | ||
296 | * opcode:data | ||
297 | */ | ||
298 | static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, | ||
299 | size_t count) | ||
300 | { | ||
301 | struct sched_param schedpar; | ||
302 | struct test_thread_data *td; | ||
303 | char cmdbuf[32]; | ||
304 | int op, dat, tid, ret; | ||
305 | |||
306 | td = container_of(dev, struct test_thread_data, sysdev); | ||
307 | tid = td->sysdev.id; | ||
308 | |||
309 | /* strings from sysfs write are not 0 terminated! */ | ||
310 | if (count >= sizeof(cmdbuf)) | ||
311 | return -EINVAL; | ||
312 | |||
313 | /* strip of \n: */ | ||
314 | if (buf[count-1] == '\n') | ||
315 | count--; | ||
316 | if (count < 1) | ||
317 | return -EINVAL; | ||
318 | |||
319 | memcpy(cmdbuf, buf, count); | ||
320 | cmdbuf[count] = 0; | ||
321 | |||
322 | if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2) | ||
323 | return -EINVAL; | ||
324 | |||
325 | switch (op) { | ||
326 | case RTTEST_SCHEDOT: | ||
327 | schedpar.sched_priority = 0; | ||
328 | ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar); | ||
329 | if (ret) | ||
330 | return ret; | ||
331 | set_user_nice(current, 0); | ||
332 | break; | ||
333 | |||
334 | case RTTEST_SCHEDRT: | ||
335 | schedpar.sched_priority = dat; | ||
336 | ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar); | ||
337 | if (ret) | ||
338 | return ret; | ||
339 | break; | ||
340 | |||
341 | case RTTEST_SIGNAL: | ||
342 | send_sig(SIGHUP, threads[tid], 0); | ||
343 | break; | ||
344 | |||
345 | default: | ||
346 | if (td->opcode > 0) | ||
347 | return -EBUSY; | ||
348 | td->opdata = dat; | ||
349 | td->opcode = op; | ||
350 | wake_up_process(threads[tid]); | ||
351 | } | ||
352 | |||
353 | return count; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * sysfs_test_status - sysfs interface for rt tester | ||
358 | * @dev: thread to query | ||
359 | * @buf: char buffer to be filled with thread status info | ||
360 | */ | ||
361 | static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) | ||
362 | { | ||
363 | struct test_thread_data *td; | ||
364 | char *curr = buf; | ||
365 | task_t *tsk; | ||
366 | int i; | ||
367 | |||
368 | td = container_of(dev, struct test_thread_data, sysdev); | ||
369 | tsk = threads[td->sysdev.id]; | ||
370 | |||
371 | spin_lock(&rttest_lock); | ||
372 | |||
373 | curr += sprintf(curr, | ||
374 | "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:", | ||
375 | td->opcode, td->event, tsk->state, | ||
376 | (MAX_RT_PRIO - 1) - tsk->prio, | ||
377 | (MAX_RT_PRIO - 1) - tsk->normal_prio, | ||
378 | tsk->pi_blocked_on, td->bkl); | ||
379 | |||
380 | for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--) | ||
381 | curr += sprintf(curr, "%d", td->mutexes[i]); | ||
382 | |||
383 | spin_unlock(&rttest_lock); | ||
384 | |||
385 | curr += sprintf(curr, ", T: %p, R: %p\n", tsk, | ||
386 | mutexes[td->sysdev.id].owner); | ||
387 | |||
388 | return curr - buf; | ||
389 | } | ||
390 | |||
391 | static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL); | ||
392 | static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command); | ||
393 | |||
394 | static struct sysdev_class rttest_sysclass = { | ||
395 | set_kset_name("rttest"), | ||
396 | }; | ||
397 | |||
398 | static int init_test_thread(int id) | ||
399 | { | ||
400 | thread_data[id].sysdev.cls = &rttest_sysclass; | ||
401 | thread_data[id].sysdev.id = id; | ||
402 | |||
403 | threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); | ||
404 | if (IS_ERR(threads[id])) | ||
405 | return PTR_ERR(threads[id]); | ||
406 | |||
407 | return sysdev_register(&thread_data[id].sysdev); | ||
408 | } | ||
409 | |||
410 | static int init_rttest(void) | ||
411 | { | ||
412 | int ret, i; | ||
413 | |||
414 | spin_lock_init(&rttest_lock); | ||
415 | |||
416 | for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) | ||
417 | rt_mutex_init(&mutexes[i]); | ||
418 | |||
419 | ret = sysdev_class_register(&rttest_sysclass); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | |||
423 | for (i = 0; i < MAX_RT_TEST_THREADS; i++) { | ||
424 | ret = init_test_thread(i); | ||
425 | if (ret) | ||
426 | break; | ||
427 | ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status); | ||
428 | if (ret) | ||
429 | break; | ||
430 | ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command); | ||
431 | if (ret) | ||
432 | break; | ||
433 | } | ||
434 | |||
435 | printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" ); | ||
436 | |||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | device_initcall(init_rttest); | ||
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c new file mode 100644 index 000000000000..45d61016da57 --- /dev/null +++ b/kernel/rtmutex.c | |||
@@ -0,0 +1,990 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner. | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | ||
9 | * Copyright (C) 2006 Esben Nielsen | ||
10 | */ | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/timer.h> | ||
15 | |||
16 | #include "rtmutex_common.h" | ||
17 | |||
18 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
19 | # include "rtmutex-debug.h" | ||
20 | #else | ||
21 | # include "rtmutex.h" | ||
22 | #endif | ||
23 | |||
24 | /* | ||
25 | * lock->owner state tracking: | ||
26 | * | ||
27 | * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 | ||
28 | * are used to keep track of the "owner is pending" and "lock has | ||
29 | * waiters" state. | ||
30 | * | ||
31 | * owner bit1 bit0 | ||
32 | * NULL 0 0 lock is free (fast acquire possible) | ||
33 | * NULL 0 1 invalid state | ||
34 | * NULL 1 0 Transitional State* | ||
35 | * NULL 1 1 invalid state | ||
36 | * taskpointer 0 0 lock is held (fast release possible) | ||
37 | * taskpointer 0 1 task is pending owner | ||
38 | * taskpointer 1 0 lock is held and has waiters | ||
39 | * taskpointer 1 1 task is pending owner and lock has more waiters | ||
40 | * | ||
41 | * Pending ownership is assigned to the top (highest priority) | ||
42 | * waiter of the lock, when the lock is released. The thread is woken | ||
43 | * up and can now take the lock. Until the lock is taken (bit 0 | ||
44 | * cleared) a competing higher priority thread can steal the lock | ||
45 | * which puts the woken up thread back on the waiters list. | ||
46 | * | ||
47 | * The fast atomic compare exchange based acquire and release is only | ||
48 | * possible when bit 0 and 1 of lock->owner are 0. | ||
49 | * | ||
50 | * (*) There's a small time where the owner can be NULL and the | ||
51 | * "lock has waiters" bit is set. This can happen when grabbing the lock. | ||
52 | * To prevent a cmpxchg of the owner releasing the lock, we need to set this | ||
53 | * bit before looking at the lock, hence the reason this is a transitional | ||
54 | * state. | ||
55 | */ | ||
56 | |||
57 | static void | ||
58 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | ||
59 | unsigned long mask) | ||
60 | { | ||
61 | unsigned long val = (unsigned long)owner | mask; | ||
62 | |||
63 | if (rt_mutex_has_waiters(lock)) | ||
64 | val |= RT_MUTEX_HAS_WAITERS; | ||
65 | |||
66 | lock->owner = (struct task_struct *)val; | ||
67 | } | ||
68 | |||
69 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | ||
70 | { | ||
71 | lock->owner = (struct task_struct *) | ||
72 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | ||
73 | } | ||
74 | |||
75 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | ||
76 | { | ||
77 | if (!rt_mutex_has_waiters(lock)) | ||
78 | clear_rt_mutex_waiters(lock); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * We can speed up the acquire/release, if the architecture | ||
83 | * supports cmpxchg and if there's no debugging state to be set up | ||
84 | */ | ||
85 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | ||
86 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | ||
87 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
88 | { | ||
89 | unsigned long owner, *p = (unsigned long *) &lock->owner; | ||
90 | |||
91 | do { | ||
92 | owner = *p; | ||
93 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | ||
94 | } | ||
95 | #else | ||
96 | # define rt_mutex_cmpxchg(l,c,n) (0) | ||
97 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
98 | { | ||
99 | lock->owner = (struct task_struct *) | ||
100 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | ||
101 | } | ||
102 | #endif | ||
103 | |||
104 | /* | ||
105 | * Calculate task priority from the waiter list priority | ||
106 | * | ||
107 | * Return task->normal_prio when the waiter list is empty or when | ||
108 | * the waiter is not allowed to do priority boosting | ||
109 | */ | ||
110 | int rt_mutex_getprio(struct task_struct *task) | ||
111 | { | ||
112 | if (likely(!task_has_pi_waiters(task))) | ||
113 | return task->normal_prio; | ||
114 | |||
115 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | ||
116 | task->normal_prio); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Adjust the priority of a task, after its pi_waiters got modified. | ||
121 | * | ||
122 | * This can be both boosting and unboosting. task->pi_lock must be held. | ||
123 | */ | ||
124 | static void __rt_mutex_adjust_prio(struct task_struct *task) | ||
125 | { | ||
126 | int prio = rt_mutex_getprio(task); | ||
127 | |||
128 | if (task->prio != prio) | ||
129 | rt_mutex_setprio(task, prio); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Adjust task priority (undo boosting). Called from the exit path of | ||
134 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | ||
135 | * | ||
136 | * (Note: We do this outside of the protection of lock->wait_lock to | ||
137 | * allow the lock to be taken while or before we readjust the priority | ||
138 | * of task. We do not use the spin_xx_mutex() variants here as we are | ||
139 | * outside of the debug path.) | ||
140 | */ | ||
141 | static void rt_mutex_adjust_prio(struct task_struct *task) | ||
142 | { | ||
143 | unsigned long flags; | ||
144 | |||
145 | spin_lock_irqsave(&task->pi_lock, flags); | ||
146 | __rt_mutex_adjust_prio(task); | ||
147 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Max number of times we'll walk the boosting chain: | ||
152 | */ | ||
153 | int max_lock_depth = 1024; | ||
154 | |||
155 | /* | ||
156 | * Adjust the priority chain. Also used for deadlock detection. | ||
157 | * Decreases task's usage by one - may thus free the task. | ||
158 | * Returns 0 or -EDEADLK. | ||
159 | */ | ||
160 | static int rt_mutex_adjust_prio_chain(task_t *task, | ||
161 | int deadlock_detect, | ||
162 | struct rt_mutex *orig_lock, | ||
163 | struct rt_mutex_waiter *orig_waiter, | ||
164 | struct task_struct *top_task | ||
165 | __IP_DECL__) | ||
166 | { | ||
167 | struct rt_mutex *lock; | ||
168 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | ||
169 | int detect_deadlock, ret = 0, depth = 0; | ||
170 | unsigned long flags; | ||
171 | |||
172 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | ||
173 | deadlock_detect); | ||
174 | |||
175 | /* | ||
176 | * The (de)boosting is a step by step approach with a lot of | ||
177 | * pitfalls. We want this to be preemptible and we want hold a | ||
178 | * maximum of two locks per step. So we have to check | ||
179 | * carefully whether things change under us. | ||
180 | */ | ||
181 | again: | ||
182 | if (++depth > max_lock_depth) { | ||
183 | static int prev_max; | ||
184 | |||
185 | /* | ||
186 | * Print this only once. If the admin changes the limit, | ||
187 | * print a new message when reaching the limit again. | ||
188 | */ | ||
189 | if (prev_max != max_lock_depth) { | ||
190 | prev_max = max_lock_depth; | ||
191 | printk(KERN_WARNING "Maximum lock depth %d reached " | ||
192 | "task: %s (%d)\n", max_lock_depth, | ||
193 | top_task->comm, top_task->pid); | ||
194 | } | ||
195 | put_task_struct(task); | ||
196 | |||
197 | return deadlock_detect ? -EDEADLK : 0; | ||
198 | } | ||
199 | retry: | ||
200 | /* | ||
201 | * Task can not go away as we did a get_task() before ! | ||
202 | */ | ||
203 | spin_lock_irqsave(&task->pi_lock, flags); | ||
204 | |||
205 | waiter = task->pi_blocked_on; | ||
206 | /* | ||
207 | * Check whether the end of the boosting chain has been | ||
208 | * reached or the state of the chain has changed while we | ||
209 | * dropped the locks. | ||
210 | */ | ||
211 | if (!waiter || !waiter->task) | ||
212 | goto out_unlock_pi; | ||
213 | |||
214 | if (top_waiter && (!task_has_pi_waiters(task) || | ||
215 | top_waiter != task_top_pi_waiter(task))) | ||
216 | goto out_unlock_pi; | ||
217 | |||
218 | /* | ||
219 | * When deadlock detection is off then we check, if further | ||
220 | * priority adjustment is necessary. | ||
221 | */ | ||
222 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | ||
223 | goto out_unlock_pi; | ||
224 | |||
225 | lock = waiter->lock; | ||
226 | if (!spin_trylock(&lock->wait_lock)) { | ||
227 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
228 | cpu_relax(); | ||
229 | goto retry; | ||
230 | } | ||
231 | |||
232 | /* Deadlock detection */ | ||
233 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | ||
234 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | ||
235 | spin_unlock(&lock->wait_lock); | ||
236 | ret = deadlock_detect ? -EDEADLK : 0; | ||
237 | goto out_unlock_pi; | ||
238 | } | ||
239 | |||
240 | top_waiter = rt_mutex_top_waiter(lock); | ||
241 | |||
242 | /* Requeue the waiter */ | ||
243 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
244 | waiter->list_entry.prio = task->prio; | ||
245 | plist_add(&waiter->list_entry, &lock->wait_list); | ||
246 | |||
247 | /* Release the task */ | ||
248 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
249 | put_task_struct(task); | ||
250 | |||
251 | /* Grab the next task */ | ||
252 | task = rt_mutex_owner(lock); | ||
253 | spin_lock_irqsave(&task->pi_lock, flags); | ||
254 | |||
255 | if (waiter == rt_mutex_top_waiter(lock)) { | ||
256 | /* Boost the owner */ | ||
257 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | ||
258 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | ||
259 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
260 | __rt_mutex_adjust_prio(task); | ||
261 | |||
262 | } else if (top_waiter == waiter) { | ||
263 | /* Deboost the owner */ | ||
264 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | ||
265 | waiter = rt_mutex_top_waiter(lock); | ||
266 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | ||
267 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
268 | __rt_mutex_adjust_prio(task); | ||
269 | } | ||
270 | |||
271 | get_task_struct(task); | ||
272 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
273 | |||
274 | top_waiter = rt_mutex_top_waiter(lock); | ||
275 | spin_unlock(&lock->wait_lock); | ||
276 | |||
277 | if (!detect_deadlock && waiter != top_waiter) | ||
278 | goto out_put_task; | ||
279 | |||
280 | goto again; | ||
281 | |||
282 | out_unlock_pi: | ||
283 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
284 | out_put_task: | ||
285 | put_task_struct(task); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Optimization: check if we can steal the lock from the | ||
291 | * assigned pending owner [which might not have taken the | ||
292 | * lock yet]: | ||
293 | */ | ||
294 | static inline int try_to_steal_lock(struct rt_mutex *lock) | ||
295 | { | ||
296 | struct task_struct *pendowner = rt_mutex_owner(lock); | ||
297 | struct rt_mutex_waiter *next; | ||
298 | unsigned long flags; | ||
299 | |||
300 | if (!rt_mutex_owner_pending(lock)) | ||
301 | return 0; | ||
302 | |||
303 | if (pendowner == current) | ||
304 | return 1; | ||
305 | |||
306 | spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
307 | if (current->prio >= pendowner->prio) { | ||
308 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Check if a waiter is enqueued on the pending owners | ||
314 | * pi_waiters list. Remove it and readjust pending owners | ||
315 | * priority. | ||
316 | */ | ||
317 | if (likely(!rt_mutex_has_waiters(lock))) { | ||
318 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
319 | return 1; | ||
320 | } | ||
321 | |||
322 | /* No chain handling, pending owner is not blocked on anything: */ | ||
323 | next = rt_mutex_top_waiter(lock); | ||
324 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | ||
325 | __rt_mutex_adjust_prio(pendowner); | ||
326 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
327 | |||
328 | /* | ||
329 | * We are going to steal the lock and a waiter was | ||
330 | * enqueued on the pending owners pi_waiters queue. So | ||
331 | * we have to enqueue this waiter into | ||
332 | * current->pi_waiters list. This covers the case, | ||
333 | * where current is boosted because it holds another | ||
334 | * lock and gets unboosted because the booster is | ||
335 | * interrupted, so we would delay a waiter with higher | ||
336 | * priority as current->normal_prio. | ||
337 | * | ||
338 | * Note: in the rare case of a SCHED_OTHER task changing | ||
339 | * its priority and thus stealing the lock, next->task | ||
340 | * might be current: | ||
341 | */ | ||
342 | if (likely(next->task != current)) { | ||
343 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
344 | plist_add(&next->pi_list_entry, ¤t->pi_waiters); | ||
345 | __rt_mutex_adjust_prio(current); | ||
346 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
347 | } | ||
348 | return 1; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Try to take an rt-mutex | ||
353 | * | ||
354 | * This fails | ||
355 | * - when the lock has a real owner | ||
356 | * - when a different pending owner exists and has higher priority than current | ||
357 | * | ||
358 | * Must be called with lock->wait_lock held. | ||
359 | */ | ||
360 | static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | ||
361 | { | ||
362 | /* | ||
363 | * We have to be careful here if the atomic speedups are | ||
364 | * enabled, such that, when | ||
365 | * - no other waiter is on the lock | ||
366 | * - the lock has been released since we did the cmpxchg | ||
367 | * the lock can be released or taken while we are doing the | ||
368 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | ||
369 | * | ||
370 | * The atomic acquire/release aware variant of | ||
371 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | ||
372 | * the WAITERS bit, the atomic release / acquire can not | ||
373 | * happen anymore and lock->wait_lock protects us from the | ||
374 | * non-atomic case. | ||
375 | * | ||
376 | * Note, that this might set lock->owner = | ||
377 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | ||
378 | * any more. This is fixed up when we take the ownership. | ||
379 | * This is the transitional state explained at the top of this file. | ||
380 | */ | ||
381 | mark_rt_mutex_waiters(lock); | ||
382 | |||
383 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) | ||
384 | return 0; | ||
385 | |||
386 | /* We got the lock. */ | ||
387 | debug_rt_mutex_lock(lock __IP__); | ||
388 | |||
389 | rt_mutex_set_owner(lock, current, 0); | ||
390 | |||
391 | rt_mutex_deadlock_account_lock(lock, current); | ||
392 | |||
393 | return 1; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Task blocks on lock. | ||
398 | * | ||
399 | * Prepare waiter and propagate pi chain | ||
400 | * | ||
401 | * This must be called with lock->wait_lock held. | ||
402 | */ | ||
403 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | ||
404 | struct rt_mutex_waiter *waiter, | ||
405 | int detect_deadlock | ||
406 | __IP_DECL__) | ||
407 | { | ||
408 | struct rt_mutex_waiter *top_waiter = waiter; | ||
409 | task_t *owner = rt_mutex_owner(lock); | ||
410 | int boost = 0, res; | ||
411 | unsigned long flags; | ||
412 | |||
413 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
414 | __rt_mutex_adjust_prio(current); | ||
415 | waiter->task = current; | ||
416 | waiter->lock = lock; | ||
417 | plist_node_init(&waiter->list_entry, current->prio); | ||
418 | plist_node_init(&waiter->pi_list_entry, current->prio); | ||
419 | |||
420 | /* Get the top priority waiter on the lock */ | ||
421 | if (rt_mutex_has_waiters(lock)) | ||
422 | top_waiter = rt_mutex_top_waiter(lock); | ||
423 | plist_add(&waiter->list_entry, &lock->wait_list); | ||
424 | |||
425 | current->pi_blocked_on = waiter; | ||
426 | |||
427 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
428 | |||
429 | if (waiter == rt_mutex_top_waiter(lock)) { | ||
430 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
431 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | ||
432 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | ||
433 | |||
434 | __rt_mutex_adjust_prio(owner); | ||
435 | if (owner->pi_blocked_on) { | ||
436 | boost = 1; | ||
437 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
438 | get_task_struct(owner); | ||
439 | } | ||
440 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
441 | } | ||
442 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { | ||
443 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
444 | if (owner->pi_blocked_on) { | ||
445 | boost = 1; | ||
446 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
447 | get_task_struct(owner); | ||
448 | } | ||
449 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
450 | } | ||
451 | if (!boost) | ||
452 | return 0; | ||
453 | |||
454 | spin_unlock(&lock->wait_lock); | ||
455 | |||
456 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | ||
457 | current __IP__); | ||
458 | |||
459 | spin_lock(&lock->wait_lock); | ||
460 | |||
461 | return res; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Wake up the next waiter on the lock. | ||
466 | * | ||
467 | * Remove the top waiter from the current tasks waiter list and from | ||
468 | * the lock waiter list. Set it as pending owner. Then wake it up. | ||
469 | * | ||
470 | * Called with lock->wait_lock held. | ||
471 | */ | ||
472 | static void wakeup_next_waiter(struct rt_mutex *lock) | ||
473 | { | ||
474 | struct rt_mutex_waiter *waiter; | ||
475 | struct task_struct *pendowner; | ||
476 | unsigned long flags; | ||
477 | |||
478 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
479 | |||
480 | waiter = rt_mutex_top_waiter(lock); | ||
481 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
482 | |||
483 | /* | ||
484 | * Remove it from current->pi_waiters. We do not adjust a | ||
485 | * possible priority boost right now. We execute wakeup in the | ||
486 | * boosted mode and go back to normal after releasing | ||
487 | * lock->wait_lock. | ||
488 | */ | ||
489 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | ||
490 | pendowner = waiter->task; | ||
491 | waiter->task = NULL; | ||
492 | |||
493 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | ||
494 | |||
495 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
496 | |||
497 | /* | ||
498 | * Clear the pi_blocked_on variable and enqueue a possible | ||
499 | * waiter into the pi_waiters list of the pending owner. This | ||
500 | * prevents that in case the pending owner gets unboosted a | ||
501 | * waiter with higher priority than pending-owner->normal_prio | ||
502 | * is blocked on the unboosted (pending) owner. | ||
503 | */ | ||
504 | spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
505 | |||
506 | WARN_ON(!pendowner->pi_blocked_on); | ||
507 | WARN_ON(pendowner->pi_blocked_on != waiter); | ||
508 | WARN_ON(pendowner->pi_blocked_on->lock != lock); | ||
509 | |||
510 | pendowner->pi_blocked_on = NULL; | ||
511 | |||
512 | if (rt_mutex_has_waiters(lock)) { | ||
513 | struct rt_mutex_waiter *next; | ||
514 | |||
515 | next = rt_mutex_top_waiter(lock); | ||
516 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | ||
517 | } | ||
518 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
519 | |||
520 | wake_up_process(pendowner); | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Remove a waiter from a lock | ||
525 | * | ||
526 | * Must be called with lock->wait_lock held | ||
527 | */ | ||
528 | static void remove_waiter(struct rt_mutex *lock, | ||
529 | struct rt_mutex_waiter *waiter __IP_DECL__) | ||
530 | { | ||
531 | int first = (waiter == rt_mutex_top_waiter(lock)); | ||
532 | int boost = 0; | ||
533 | task_t *owner = rt_mutex_owner(lock); | ||
534 | unsigned long flags; | ||
535 | |||
536 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
537 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
538 | waiter->task = NULL; | ||
539 | current->pi_blocked_on = NULL; | ||
540 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
541 | |||
542 | if (first && owner != current) { | ||
543 | |||
544 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
545 | |||
546 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | ||
547 | |||
548 | if (rt_mutex_has_waiters(lock)) { | ||
549 | struct rt_mutex_waiter *next; | ||
550 | |||
551 | next = rt_mutex_top_waiter(lock); | ||
552 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | ||
553 | } | ||
554 | __rt_mutex_adjust_prio(owner); | ||
555 | |||
556 | if (owner->pi_blocked_on) { | ||
557 | boost = 1; | ||
558 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
559 | get_task_struct(owner); | ||
560 | } | ||
561 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
562 | } | ||
563 | |||
564 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
565 | |||
566 | if (!boost) | ||
567 | return; | ||
568 | |||
569 | spin_unlock(&lock->wait_lock); | ||
570 | |||
571 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); | ||
572 | |||
573 | spin_lock(&lock->wait_lock); | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * Recheck the pi chain, in case we got a priority setting | ||
578 | * | ||
579 | * Called from sched_setscheduler | ||
580 | */ | ||
581 | void rt_mutex_adjust_pi(struct task_struct *task) | ||
582 | { | ||
583 | struct rt_mutex_waiter *waiter; | ||
584 | unsigned long flags; | ||
585 | |||
586 | spin_lock_irqsave(&task->pi_lock, flags); | ||
587 | |||
588 | waiter = task->pi_blocked_on; | ||
589 | if (!waiter || waiter->list_entry.prio == task->prio) { | ||
590 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
595 | get_task_struct(task); | ||
596 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
597 | |||
598 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Slow path lock function: | ||
603 | */ | ||
604 | static int __sched | ||
605 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||
606 | struct hrtimer_sleeper *timeout, | ||
607 | int detect_deadlock __IP_DECL__) | ||
608 | { | ||
609 | struct rt_mutex_waiter waiter; | ||
610 | int ret = 0; | ||
611 | |||
612 | debug_rt_mutex_init_waiter(&waiter); | ||
613 | waiter.task = NULL; | ||
614 | |||
615 | spin_lock(&lock->wait_lock); | ||
616 | |||
617 | /* Try to acquire the lock again: */ | ||
618 | if (try_to_take_rt_mutex(lock __IP__)) { | ||
619 | spin_unlock(&lock->wait_lock); | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | set_current_state(state); | ||
624 | |||
625 | /* Setup the timer, when timeout != NULL */ | ||
626 | if (unlikely(timeout)) | ||
627 | hrtimer_start(&timeout->timer, timeout->timer.expires, | ||
628 | HRTIMER_ABS); | ||
629 | |||
630 | for (;;) { | ||
631 | /* Try to acquire the lock: */ | ||
632 | if (try_to_take_rt_mutex(lock __IP__)) | ||
633 | break; | ||
634 | |||
635 | /* | ||
636 | * TASK_INTERRUPTIBLE checks for signals and | ||
637 | * timeout. Ignored otherwise. | ||
638 | */ | ||
639 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | ||
640 | /* Signal pending? */ | ||
641 | if (signal_pending(current)) | ||
642 | ret = -EINTR; | ||
643 | if (timeout && !timeout->task) | ||
644 | ret = -ETIMEDOUT; | ||
645 | if (ret) | ||
646 | break; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * waiter.task is NULL the first time we come here and | ||
651 | * when we have been woken up by the previous owner | ||
652 | * but the lock got stolen by a higher prio task. | ||
653 | */ | ||
654 | if (!waiter.task) { | ||
655 | ret = task_blocks_on_rt_mutex(lock, &waiter, | ||
656 | detect_deadlock __IP__); | ||
657 | /* | ||
658 | * If we got woken up by the owner then start loop | ||
659 | * all over without going into schedule to try | ||
660 | * to get the lock now: | ||
661 | */ | ||
662 | if (unlikely(!waiter.task)) | ||
663 | continue; | ||
664 | |||
665 | if (unlikely(ret)) | ||
666 | break; | ||
667 | } | ||
668 | |||
669 | spin_unlock(&lock->wait_lock); | ||
670 | |||
671 | debug_rt_mutex_print_deadlock(&waiter); | ||
672 | |||
673 | if (waiter.task) | ||
674 | schedule_rt_mutex(lock); | ||
675 | |||
676 | spin_lock(&lock->wait_lock); | ||
677 | set_current_state(state); | ||
678 | } | ||
679 | |||
680 | set_current_state(TASK_RUNNING); | ||
681 | |||
682 | if (unlikely(waiter.task)) | ||
683 | remove_waiter(lock, &waiter __IP__); | ||
684 | |||
685 | /* | ||
686 | * try_to_take_rt_mutex() sets the waiter bit | ||
687 | * unconditionally. We might have to fix that up. | ||
688 | */ | ||
689 | fixup_rt_mutex_waiters(lock); | ||
690 | |||
691 | spin_unlock(&lock->wait_lock); | ||
692 | |||
693 | /* Remove pending timer: */ | ||
694 | if (unlikely(timeout)) | ||
695 | hrtimer_cancel(&timeout->timer); | ||
696 | |||
697 | /* | ||
698 | * Readjust priority, when we did not get the lock. We might | ||
699 | * have been the pending owner and boosted. Since we did not | ||
700 | * take the lock, the PI boost has to go. | ||
701 | */ | ||
702 | if (unlikely(ret)) | ||
703 | rt_mutex_adjust_prio(current); | ||
704 | |||
705 | debug_rt_mutex_free_waiter(&waiter); | ||
706 | |||
707 | return ret; | ||
708 | } | ||
709 | |||
710 | /* | ||
711 | * Slow path try-lock function: | ||
712 | */ | ||
713 | static inline int | ||
714 | rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | ||
715 | { | ||
716 | int ret = 0; | ||
717 | |||
718 | spin_lock(&lock->wait_lock); | ||
719 | |||
720 | if (likely(rt_mutex_owner(lock) != current)) { | ||
721 | |||
722 | ret = try_to_take_rt_mutex(lock __IP__); | ||
723 | /* | ||
724 | * try_to_take_rt_mutex() sets the lock waiters | ||
725 | * bit unconditionally. Clean this up. | ||
726 | */ | ||
727 | fixup_rt_mutex_waiters(lock); | ||
728 | } | ||
729 | |||
730 | spin_unlock(&lock->wait_lock); | ||
731 | |||
732 | return ret; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Slow path to release a rt-mutex: | ||
737 | */ | ||
738 | static void __sched | ||
739 | rt_mutex_slowunlock(struct rt_mutex *lock) | ||
740 | { | ||
741 | spin_lock(&lock->wait_lock); | ||
742 | |||
743 | debug_rt_mutex_unlock(lock); | ||
744 | |||
745 | rt_mutex_deadlock_account_unlock(current); | ||
746 | |||
747 | if (!rt_mutex_has_waiters(lock)) { | ||
748 | lock->owner = NULL; | ||
749 | spin_unlock(&lock->wait_lock); | ||
750 | return; | ||
751 | } | ||
752 | |||
753 | wakeup_next_waiter(lock); | ||
754 | |||
755 | spin_unlock(&lock->wait_lock); | ||
756 | |||
757 | /* Undo pi boosting if necessary: */ | ||
758 | rt_mutex_adjust_prio(current); | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * debug aware fast / slowpath lock,trylock,unlock | ||
763 | * | ||
764 | * The atomic acquire/release ops are compiled away, when either the | ||
765 | * architecture does not support cmpxchg or when debugging is enabled. | ||
766 | */ | ||
767 | static inline int | ||
768 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | ||
769 | int detect_deadlock, | ||
770 | int (*slowfn)(struct rt_mutex *lock, int state, | ||
771 | struct hrtimer_sleeper *timeout, | ||
772 | int detect_deadlock __IP_DECL__)) | ||
773 | { | ||
774 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
775 | rt_mutex_deadlock_account_lock(lock, current); | ||
776 | return 0; | ||
777 | } else | ||
778 | return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); | ||
779 | } | ||
780 | |||
781 | static inline int | ||
782 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | ||
783 | struct hrtimer_sleeper *timeout, int detect_deadlock, | ||
784 | int (*slowfn)(struct rt_mutex *lock, int state, | ||
785 | struct hrtimer_sleeper *timeout, | ||
786 | int detect_deadlock __IP_DECL__)) | ||
787 | { | ||
788 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
789 | rt_mutex_deadlock_account_lock(lock, current); | ||
790 | return 0; | ||
791 | } else | ||
792 | return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); | ||
793 | } | ||
794 | |||
795 | static inline int | ||
796 | rt_mutex_fasttrylock(struct rt_mutex *lock, | ||
797 | int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) | ||
798 | { | ||
799 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
800 | rt_mutex_deadlock_account_lock(lock, current); | ||
801 | return 1; | ||
802 | } | ||
803 | return slowfn(lock __RET_IP__); | ||
804 | } | ||
805 | |||
806 | static inline void | ||
807 | rt_mutex_fastunlock(struct rt_mutex *lock, | ||
808 | void (*slowfn)(struct rt_mutex *lock)) | ||
809 | { | ||
810 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | ||
811 | rt_mutex_deadlock_account_unlock(current); | ||
812 | else | ||
813 | slowfn(lock); | ||
814 | } | ||
815 | |||
816 | /** | ||
817 | * rt_mutex_lock - lock a rt_mutex | ||
818 | * | ||
819 | * @lock: the rt_mutex to be locked | ||
820 | */ | ||
821 | void __sched rt_mutex_lock(struct rt_mutex *lock) | ||
822 | { | ||
823 | might_sleep(); | ||
824 | |||
825 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | ||
826 | } | ||
827 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | ||
828 | |||
829 | /** | ||
830 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | ||
831 | * | ||
832 | * @lock: the rt_mutex to be locked | ||
833 | * @detect_deadlock: deadlock detection on/off | ||
834 | * | ||
835 | * Returns: | ||
836 | * 0 on success | ||
837 | * -EINTR when interrupted by a signal | ||
838 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | ||
839 | */ | ||
840 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | ||
841 | int detect_deadlock) | ||
842 | { | ||
843 | might_sleep(); | ||
844 | |||
845 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | ||
846 | detect_deadlock, rt_mutex_slowlock); | ||
847 | } | ||
848 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | ||
849 | |||
850 | /** | ||
851 | * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible | ||
852 | * the timeout structure is provided | ||
853 | * by the caller | ||
854 | * | ||
855 | * @lock: the rt_mutex to be locked | ||
856 | * @timeout: timeout structure or NULL (no timeout) | ||
857 | * @detect_deadlock: deadlock detection on/off | ||
858 | * | ||
859 | * Returns: | ||
860 | * 0 on success | ||
861 | * -EINTR when interrupted by a signal | ||
862 | * -ETIMEOUT when the timeout expired | ||
863 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | ||
864 | */ | ||
865 | int | ||
866 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | ||
867 | int detect_deadlock) | ||
868 | { | ||
869 | might_sleep(); | ||
870 | |||
871 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | ||
872 | detect_deadlock, rt_mutex_slowlock); | ||
873 | } | ||
874 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | ||
875 | |||
876 | /** | ||
877 | * rt_mutex_trylock - try to lock a rt_mutex | ||
878 | * | ||
879 | * @lock: the rt_mutex to be locked | ||
880 | * | ||
881 | * Returns 1 on success and 0 on contention | ||
882 | */ | ||
883 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | ||
884 | { | ||
885 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | ||
886 | } | ||
887 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | ||
888 | |||
889 | /** | ||
890 | * rt_mutex_unlock - unlock a rt_mutex | ||
891 | * | ||
892 | * @lock: the rt_mutex to be unlocked | ||
893 | */ | ||
894 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | ||
895 | { | ||
896 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | ||
897 | } | ||
898 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | ||
899 | |||
900 | /*** | ||
901 | * rt_mutex_destroy - mark a mutex unusable | ||
902 | * @lock: the mutex to be destroyed | ||
903 | * | ||
904 | * This function marks the mutex uninitialized, and any subsequent | ||
905 | * use of the mutex is forbidden. The mutex must not be locked when | ||
906 | * this function is called. | ||
907 | */ | ||
908 | void rt_mutex_destroy(struct rt_mutex *lock) | ||
909 | { | ||
910 | WARN_ON(rt_mutex_is_locked(lock)); | ||
911 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
912 | lock->magic = NULL; | ||
913 | #endif | ||
914 | } | ||
915 | |||
916 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | ||
917 | |||
918 | /** | ||
919 | * __rt_mutex_init - initialize the rt lock | ||
920 | * | ||
921 | * @lock: the rt lock to be initialized | ||
922 | * | ||
923 | * Initialize the rt lock to unlocked state. | ||
924 | * | ||
925 | * Initializing of a locked rt lock is not allowed | ||
926 | */ | ||
927 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | ||
928 | { | ||
929 | lock->owner = NULL; | ||
930 | spin_lock_init(&lock->wait_lock); | ||
931 | plist_head_init(&lock->wait_list, &lock->wait_lock); | ||
932 | |||
933 | debug_rt_mutex_init(lock, name); | ||
934 | } | ||
935 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | ||
936 | |||
937 | /** | ||
938 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | ||
939 | * proxy owner | ||
940 | * | ||
941 | * @lock: the rt_mutex to be locked | ||
942 | * @proxy_owner:the task to set as owner | ||
943 | * | ||
944 | * No locking. Caller has to do serializing itself | ||
945 | * Special API call for PI-futex support | ||
946 | */ | ||
947 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | ||
948 | struct task_struct *proxy_owner) | ||
949 | { | ||
950 | __rt_mutex_init(lock, NULL); | ||
951 | debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); | ||
952 | rt_mutex_set_owner(lock, proxy_owner, 0); | ||
953 | rt_mutex_deadlock_account_lock(lock, proxy_owner); | ||
954 | } | ||
955 | |||
956 | /** | ||
957 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | ||
958 | * | ||
959 | * @lock: the rt_mutex to be locked | ||
960 | * | ||
961 | * No locking. Caller has to do serializing itself | ||
962 | * Special API call for PI-futex support | ||
963 | */ | ||
964 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | ||
965 | struct task_struct *proxy_owner) | ||
966 | { | ||
967 | debug_rt_mutex_proxy_unlock(lock); | ||
968 | rt_mutex_set_owner(lock, NULL, 0); | ||
969 | rt_mutex_deadlock_account_unlock(proxy_owner); | ||
970 | } | ||
971 | |||
972 | /** | ||
973 | * rt_mutex_next_owner - return the next owner of the lock | ||
974 | * | ||
975 | * @lock: the rt lock query | ||
976 | * | ||
977 | * Returns the next owner of the lock or NULL | ||
978 | * | ||
979 | * Caller has to serialize against other accessors to the lock | ||
980 | * itself. | ||
981 | * | ||
982 | * Special API call for PI-futex support | ||
983 | */ | ||
984 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | ||
985 | { | ||
986 | if (!rt_mutex_has_waiters(lock)) | ||
987 | return NULL; | ||
988 | |||
989 | return rt_mutex_top_waiter(lock)->task; | ||
990 | } | ||
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h new file mode 100644 index 000000000000..1e0fca13ff72 --- /dev/null +++ b/kernel/rtmutex.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This file contains macros used solely by rtmutex.c. | ||
10 | * Non-debug version. | ||
11 | */ | ||
12 | |||
13 | #define __IP_DECL__ | ||
14 | #define __IP__ | ||
15 | #define __RET_IP__ | ||
16 | #define rt_mutex_deadlock_check(l) (0) | ||
17 | #define rt_mutex_deadlock_account_lock(m, t) do { } while (0) | ||
18 | #define rt_mutex_deadlock_account_unlock(l) do { } while (0) | ||
19 | #define debug_rt_mutex_init_waiter(w) do { } while (0) | ||
20 | #define debug_rt_mutex_free_waiter(w) do { } while (0) | ||
21 | #define debug_rt_mutex_lock(l) do { } while (0) | ||
22 | #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) | ||
23 | #define debug_rt_mutex_proxy_unlock(l) do { } while (0) | ||
24 | #define debug_rt_mutex_unlock(l) do { } while (0) | ||
25 | #define debug_rt_mutex_init(m, n) do { } while (0) | ||
26 | #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) | ||
27 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) | ||
28 | #define debug_rt_mutex_detect_deadlock(w,d) (d) | ||
29 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) | ||
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h new file mode 100644 index 000000000000..9c75856e791e --- /dev/null +++ b/kernel/rtmutex_common.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * RT Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This file contains the private data structure and API definitions. | ||
10 | */ | ||
11 | |||
12 | #ifndef __KERNEL_RTMUTEX_COMMON_H | ||
13 | #define __KERNEL_RTMUTEX_COMMON_H | ||
14 | |||
15 | #include <linux/rtmutex.h> | ||
16 | |||
17 | /* | ||
18 | * The rtmutex in kernel tester is independent of rtmutex debugging. We | ||
19 | * call schedule_rt_mutex_test() instead of schedule() for the tasks which | ||
20 | * belong to the tester. That way we can delay the wakeup path of those | ||
21 | * threads to provoke lock stealing and testing of complex boosting scenarios. | ||
22 | */ | ||
23 | #ifdef CONFIG_RT_MUTEX_TESTER | ||
24 | |||
25 | extern void schedule_rt_mutex_test(struct rt_mutex *lock); | ||
26 | |||
27 | #define schedule_rt_mutex(_lock) \ | ||
28 | do { \ | ||
29 | if (!(current->flags & PF_MUTEX_TESTER)) \ | ||
30 | schedule(); \ | ||
31 | else \ | ||
32 | schedule_rt_mutex_test(_lock); \ | ||
33 | } while (0) | ||
34 | |||
35 | #else | ||
36 | # define schedule_rt_mutex(_lock) schedule() | ||
37 | #endif | ||
38 | |||
39 | /* | ||
40 | * This is the control structure for tasks blocked on a rt_mutex, | ||
41 | * which is allocated on the kernel stack on of the blocked task. | ||
42 | * | ||
43 | * @list_entry: pi node to enqueue into the mutex waiters list | ||
44 | * @pi_list_entry: pi node to enqueue into the mutex owner waiters list | ||
45 | * @task: task reference to the blocked task | ||
46 | */ | ||
47 | struct rt_mutex_waiter { | ||
48 | struct plist_node list_entry; | ||
49 | struct plist_node pi_list_entry; | ||
50 | struct task_struct *task; | ||
51 | struct rt_mutex *lock; | ||
52 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
53 | unsigned long ip; | ||
54 | pid_t deadlock_task_pid; | ||
55 | struct rt_mutex *deadlock_lock; | ||
56 | #endif | ||
57 | }; | ||
58 | |||
59 | /* | ||
60 | * Various helpers to access the waiters-plist: | ||
61 | */ | ||
62 | static inline int rt_mutex_has_waiters(struct rt_mutex *lock) | ||
63 | { | ||
64 | return !plist_head_empty(&lock->wait_list); | ||
65 | } | ||
66 | |||
67 | static inline struct rt_mutex_waiter * | ||
68 | rt_mutex_top_waiter(struct rt_mutex *lock) | ||
69 | { | ||
70 | struct rt_mutex_waiter *w; | ||
71 | |||
72 | w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, | ||
73 | list_entry); | ||
74 | BUG_ON(w->lock != lock); | ||
75 | |||
76 | return w; | ||
77 | } | ||
78 | |||
79 | static inline int task_has_pi_waiters(struct task_struct *p) | ||
80 | { | ||
81 | return !plist_head_empty(&p->pi_waiters); | ||
82 | } | ||
83 | |||
84 | static inline struct rt_mutex_waiter * | ||
85 | task_top_pi_waiter(struct task_struct *p) | ||
86 | { | ||
87 | return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter, | ||
88 | pi_list_entry); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * lock->owner state tracking: | ||
93 | */ | ||
94 | #define RT_MUTEX_OWNER_PENDING 1UL | ||
95 | #define RT_MUTEX_HAS_WAITERS 2UL | ||
96 | #define RT_MUTEX_OWNER_MASKALL 3UL | ||
97 | |||
98 | static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) | ||
99 | { | ||
100 | return (struct task_struct *) | ||
101 | ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); | ||
102 | } | ||
103 | |||
104 | static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock) | ||
105 | { | ||
106 | return (struct task_struct *) | ||
107 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | ||
108 | } | ||
109 | |||
110 | static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock) | ||
111 | { | ||
112 | return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * PI-futex support (proxy locking functions, etc.): | ||
117 | */ | ||
118 | extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); | ||
119 | extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | ||
120 | struct task_struct *proxy_owner); | ||
121 | extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, | ||
122 | struct task_struct *proxy_owner); | ||
123 | #endif | ||
diff --git a/kernel/sched.c b/kernel/sched.c index a856040c200a..2629c1711fd6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -168,15 +168,21 @@ | |||
168 | */ | 168 | */ |
169 | 169 | ||
170 | #define SCALE_PRIO(x, prio) \ | 170 | #define SCALE_PRIO(x, prio) \ |
171 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) | 171 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) |
172 | 172 | ||
173 | static unsigned int task_timeslice(task_t *p) | 173 | static unsigned int static_prio_timeslice(int static_prio) |
174 | { | 174 | { |
175 | if (p->static_prio < NICE_TO_PRIO(0)) | 175 | if (static_prio < NICE_TO_PRIO(0)) |
176 | return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); | 176 | return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio); |
177 | else | 177 | else |
178 | return SCALE_PRIO(DEF_TIMESLICE, p->static_prio); | 178 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
179 | } | 179 | } |
180 | |||
181 | static inline unsigned int task_timeslice(task_t *p) | ||
182 | { | ||
183 | return static_prio_timeslice(p->static_prio); | ||
184 | } | ||
185 | |||
180 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ | 186 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ |
181 | < (long long) (sd)->cache_hot_time) | 187 | < (long long) (sd)->cache_hot_time) |
182 | 188 | ||
@@ -184,13 +190,11 @@ static unsigned int task_timeslice(task_t *p) | |||
184 | * These are the runqueue data structures: | 190 | * These are the runqueue data structures: |
185 | */ | 191 | */ |
186 | 192 | ||
187 | #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long)) | ||
188 | |||
189 | typedef struct runqueue runqueue_t; | 193 | typedef struct runqueue runqueue_t; |
190 | 194 | ||
191 | struct prio_array { | 195 | struct prio_array { |
192 | unsigned int nr_active; | 196 | unsigned int nr_active; |
193 | unsigned long bitmap[BITMAP_SIZE]; | 197 | DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ |
194 | struct list_head queue[MAX_PRIO]; | 198 | struct list_head queue[MAX_PRIO]; |
195 | }; | 199 | }; |
196 | 200 | ||
@@ -209,6 +213,7 @@ struct runqueue { | |||
209 | * remote CPUs use both these fields when doing load calculation. | 213 | * remote CPUs use both these fields when doing load calculation. |
210 | */ | 214 | */ |
211 | unsigned long nr_running; | 215 | unsigned long nr_running; |
216 | unsigned long raw_weighted_load; | ||
212 | #ifdef CONFIG_SMP | 217 | #ifdef CONFIG_SMP |
213 | unsigned long cpu_load[3]; | 218 | unsigned long cpu_load[3]; |
214 | #endif | 219 | #endif |
@@ -239,7 +244,6 @@ struct runqueue { | |||
239 | 244 | ||
240 | task_t *migration_thread; | 245 | task_t *migration_thread; |
241 | struct list_head migration_queue; | 246 | struct list_head migration_queue; |
242 | int cpu; | ||
243 | #endif | 247 | #endif |
244 | 248 | ||
245 | #ifdef CONFIG_SCHEDSTATS | 249 | #ifdef CONFIG_SCHEDSTATS |
@@ -351,11 +355,30 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | |||
351 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 355 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
352 | 356 | ||
353 | /* | 357 | /* |
358 | * __task_rq_lock - lock the runqueue a given task resides on. | ||
359 | * Must be called interrupts disabled. | ||
360 | */ | ||
361 | static inline runqueue_t *__task_rq_lock(task_t *p) | ||
362 | __acquires(rq->lock) | ||
363 | { | ||
364 | struct runqueue *rq; | ||
365 | |||
366 | repeat_lock_task: | ||
367 | rq = task_rq(p); | ||
368 | spin_lock(&rq->lock); | ||
369 | if (unlikely(rq != task_rq(p))) { | ||
370 | spin_unlock(&rq->lock); | ||
371 | goto repeat_lock_task; | ||
372 | } | ||
373 | return rq; | ||
374 | } | ||
375 | |||
376 | /* | ||
354 | * task_rq_lock - lock the runqueue a given task resides on and disable | 377 | * task_rq_lock - lock the runqueue a given task resides on and disable |
355 | * interrupts. Note the ordering: we can safely lookup the task_rq without | 378 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
356 | * explicitly disabling preemption. | 379 | * explicitly disabling preemption. |
357 | */ | 380 | */ |
358 | static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) | 381 | static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) |
359 | __acquires(rq->lock) | 382 | __acquires(rq->lock) |
360 | { | 383 | { |
361 | struct runqueue *rq; | 384 | struct runqueue *rq; |
@@ -371,6 +394,12 @@ repeat_lock_task: | |||
371 | return rq; | 394 | return rq; |
372 | } | 395 | } |
373 | 396 | ||
397 | static inline void __task_rq_unlock(runqueue_t *rq) | ||
398 | __releases(rq->lock) | ||
399 | { | ||
400 | spin_unlock(&rq->lock); | ||
401 | } | ||
402 | |||
374 | static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) | 403 | static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) |
375 | __releases(rq->lock) | 404 | __releases(rq->lock) |
376 | { | 405 | { |
@@ -634,7 +663,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) | |||
634 | } | 663 | } |
635 | 664 | ||
636 | /* | 665 | /* |
637 | * effective_prio - return the priority that is based on the static | 666 | * __normal_prio - return the priority that is based on the static |
638 | * priority but is modified by bonuses/penalties. | 667 | * priority but is modified by bonuses/penalties. |
639 | * | 668 | * |
640 | * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] | 669 | * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] |
@@ -647,13 +676,11 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) | |||
647 | * | 676 | * |
648 | * Both properties are important to certain workloads. | 677 | * Both properties are important to certain workloads. |
649 | */ | 678 | */ |
650 | static int effective_prio(task_t *p) | 679 | |
680 | static inline int __normal_prio(task_t *p) | ||
651 | { | 681 | { |
652 | int bonus, prio; | 682 | int bonus, prio; |
653 | 683 | ||
654 | if (rt_task(p)) | ||
655 | return p->prio; | ||
656 | |||
657 | bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; | 684 | bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; |
658 | 685 | ||
659 | prio = p->static_prio - bonus; | 686 | prio = p->static_prio - bonus; |
@@ -665,6 +692,106 @@ static int effective_prio(task_t *p) | |||
665 | } | 692 | } |
666 | 693 | ||
667 | /* | 694 | /* |
695 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | ||
696 | * of tasks with abnormal "nice" values across CPUs the contribution that | ||
697 | * each task makes to its run queue's load is weighted according to its | ||
698 | * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a | ||
699 | * scaled version of the new time slice allocation that they receive on time | ||
700 | * slice expiry etc. | ||
701 | */ | ||
702 | |||
703 | /* | ||
704 | * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE | ||
705 | * If static_prio_timeslice() is ever changed to break this assumption then | ||
706 | * this code will need modification | ||
707 | */ | ||
708 | #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE | ||
709 | #define LOAD_WEIGHT(lp) \ | ||
710 | (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) | ||
711 | #define PRIO_TO_LOAD_WEIGHT(prio) \ | ||
712 | LOAD_WEIGHT(static_prio_timeslice(prio)) | ||
713 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ | ||
714 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) | ||
715 | |||
716 | static void set_load_weight(task_t *p) | ||
717 | { | ||
718 | if (has_rt_policy(p)) { | ||
719 | #ifdef CONFIG_SMP | ||
720 | if (p == task_rq(p)->migration_thread) | ||
721 | /* | ||
722 | * The migration thread does the actual balancing. | ||
723 | * Giving its load any weight will skew balancing | ||
724 | * adversely. | ||
725 | */ | ||
726 | p->load_weight = 0; | ||
727 | else | ||
728 | #endif | ||
729 | p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority); | ||
730 | } else | ||
731 | p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); | ||
732 | } | ||
733 | |||
734 | static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) | ||
735 | { | ||
736 | rq->raw_weighted_load += p->load_weight; | ||
737 | } | ||
738 | |||
739 | static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) | ||
740 | { | ||
741 | rq->raw_weighted_load -= p->load_weight; | ||
742 | } | ||
743 | |||
744 | static inline void inc_nr_running(task_t *p, runqueue_t *rq) | ||
745 | { | ||
746 | rq->nr_running++; | ||
747 | inc_raw_weighted_load(rq, p); | ||
748 | } | ||
749 | |||
750 | static inline void dec_nr_running(task_t *p, runqueue_t *rq) | ||
751 | { | ||
752 | rq->nr_running--; | ||
753 | dec_raw_weighted_load(rq, p); | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * Calculate the expected normal priority: i.e. priority | ||
758 | * without taking RT-inheritance into account. Might be | ||
759 | * boosted by interactivity modifiers. Changes upon fork, | ||
760 | * setprio syscalls, and whenever the interactivity | ||
761 | * estimator recalculates. | ||
762 | */ | ||
763 | static inline int normal_prio(task_t *p) | ||
764 | { | ||
765 | int prio; | ||
766 | |||
767 | if (has_rt_policy(p)) | ||
768 | prio = MAX_RT_PRIO-1 - p->rt_priority; | ||
769 | else | ||
770 | prio = __normal_prio(p); | ||
771 | return prio; | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * Calculate the current priority, i.e. the priority | ||
776 | * taken into account by the scheduler. This value might | ||
777 | * be boosted by RT tasks, or might be boosted by | ||
778 | * interactivity modifiers. Will be RT if the task got | ||
779 | * RT-boosted. If not then it returns p->normal_prio. | ||
780 | */ | ||
781 | static int effective_prio(task_t *p) | ||
782 | { | ||
783 | p->normal_prio = normal_prio(p); | ||
784 | /* | ||
785 | * If we are RT tasks or we were boosted to RT priority, | ||
786 | * keep the priority unchanged. Otherwise, update priority | ||
787 | * to the normal priority: | ||
788 | */ | ||
789 | if (!rt_prio(p->prio)) | ||
790 | return p->normal_prio; | ||
791 | return p->prio; | ||
792 | } | ||
793 | |||
794 | /* | ||
668 | * __activate_task - move a task to the runqueue. | 795 | * __activate_task - move a task to the runqueue. |
669 | */ | 796 | */ |
670 | static void __activate_task(task_t *p, runqueue_t *rq) | 797 | static void __activate_task(task_t *p, runqueue_t *rq) |
@@ -674,7 +801,7 @@ static void __activate_task(task_t *p, runqueue_t *rq) | |||
674 | if (batch_task(p)) | 801 | if (batch_task(p)) |
675 | target = rq->expired; | 802 | target = rq->expired; |
676 | enqueue_task(p, target); | 803 | enqueue_task(p, target); |
677 | rq->nr_running++; | 804 | inc_nr_running(p, rq); |
678 | } | 805 | } |
679 | 806 | ||
680 | /* | 807 | /* |
@@ -683,39 +810,45 @@ static void __activate_task(task_t *p, runqueue_t *rq) | |||
683 | static inline void __activate_idle_task(task_t *p, runqueue_t *rq) | 810 | static inline void __activate_idle_task(task_t *p, runqueue_t *rq) |
684 | { | 811 | { |
685 | enqueue_task_head(p, rq->active); | 812 | enqueue_task_head(p, rq->active); |
686 | rq->nr_running++; | 813 | inc_nr_running(p, rq); |
687 | } | 814 | } |
688 | 815 | ||
816 | /* | ||
817 | * Recalculate p->normal_prio and p->prio after having slept, | ||
818 | * updating the sleep-average too: | ||
819 | */ | ||
689 | static int recalc_task_prio(task_t *p, unsigned long long now) | 820 | static int recalc_task_prio(task_t *p, unsigned long long now) |
690 | { | 821 | { |
691 | /* Caller must always ensure 'now >= p->timestamp' */ | 822 | /* Caller must always ensure 'now >= p->timestamp' */ |
692 | unsigned long long __sleep_time = now - p->timestamp; | 823 | unsigned long sleep_time = now - p->timestamp; |
693 | unsigned long sleep_time; | ||
694 | 824 | ||
695 | if (batch_task(p)) | 825 | if (batch_task(p)) |
696 | sleep_time = 0; | 826 | sleep_time = 0; |
697 | else { | ||
698 | if (__sleep_time > NS_MAX_SLEEP_AVG) | ||
699 | sleep_time = NS_MAX_SLEEP_AVG; | ||
700 | else | ||
701 | sleep_time = (unsigned long)__sleep_time; | ||
702 | } | ||
703 | 827 | ||
704 | if (likely(sleep_time > 0)) { | 828 | if (likely(sleep_time > 0)) { |
705 | /* | 829 | /* |
706 | * User tasks that sleep a long time are categorised as | 830 | * This ceiling is set to the lowest priority that would allow |
707 | * idle. They will only have their sleep_avg increased to a | 831 | * a task to be reinserted into the active array on timeslice |
708 | * level that makes them just interactive priority to stay | 832 | * completion. |
709 | * active yet prevent them suddenly becoming cpu hogs and | ||
710 | * starving other processes. | ||
711 | */ | 833 | */ |
712 | if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) { | 834 | unsigned long ceiling = INTERACTIVE_SLEEP(p); |
713 | unsigned long ceiling; | ||
714 | 835 | ||
715 | ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG - | 836 | if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) { |
716 | DEF_TIMESLICE); | 837 | /* |
717 | if (p->sleep_avg < ceiling) | 838 | * Prevents user tasks from achieving best priority |
718 | p->sleep_avg = ceiling; | 839 | * with one single large enough sleep. |
840 | */ | ||
841 | p->sleep_avg = ceiling; | ||
842 | /* | ||
843 | * Using INTERACTIVE_SLEEP() as a ceiling places a | ||
844 | * nice(0) task 1ms sleep away from promotion, and | ||
845 | * gives it 700ms to round-robin with no chance of | ||
846 | * being demoted. This is more than generous, so | ||
847 | * mark this sleep as non-interactive to prevent the | ||
848 | * on-runqueue bonus logic from intervening should | ||
849 | * this task not receive cpu immediately. | ||
850 | */ | ||
851 | p->sleep_type = SLEEP_NONINTERACTIVE; | ||
719 | } else { | 852 | } else { |
720 | /* | 853 | /* |
721 | * Tasks waking from uninterruptible sleep are | 854 | * Tasks waking from uninterruptible sleep are |
@@ -723,12 +856,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
723 | * are likely to be waiting on I/O | 856 | * are likely to be waiting on I/O |
724 | */ | 857 | */ |
725 | if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { | 858 | if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { |
726 | if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) | 859 | if (p->sleep_avg >= ceiling) |
727 | sleep_time = 0; | 860 | sleep_time = 0; |
728 | else if (p->sleep_avg + sleep_time >= | 861 | else if (p->sleep_avg + sleep_time >= |
729 | INTERACTIVE_SLEEP(p)) { | 862 | ceiling) { |
730 | p->sleep_avg = INTERACTIVE_SLEEP(p); | 863 | p->sleep_avg = ceiling; |
731 | sleep_time = 0; | 864 | sleep_time = 0; |
732 | } | 865 | } |
733 | } | 866 | } |
734 | 867 | ||
@@ -742,9 +875,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
742 | */ | 875 | */ |
743 | p->sleep_avg += sleep_time; | 876 | p->sleep_avg += sleep_time; |
744 | 877 | ||
745 | if (p->sleep_avg > NS_MAX_SLEEP_AVG) | ||
746 | p->sleep_avg = NS_MAX_SLEEP_AVG; | ||
747 | } | 878 | } |
879 | if (p->sleep_avg > NS_MAX_SLEEP_AVG) | ||
880 | p->sleep_avg = NS_MAX_SLEEP_AVG; | ||
748 | } | 881 | } |
749 | 882 | ||
750 | return effective_prio(p); | 883 | return effective_prio(p); |
@@ -805,7 +938,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) | |||
805 | */ | 938 | */ |
806 | static void deactivate_task(struct task_struct *p, runqueue_t *rq) | 939 | static void deactivate_task(struct task_struct *p, runqueue_t *rq) |
807 | { | 940 | { |
808 | rq->nr_running--; | 941 | dec_nr_running(p, rq); |
809 | dequeue_task(p, p->array); | 942 | dequeue_task(p, p->array); |
810 | p->array = NULL; | 943 | p->array = NULL; |
811 | } | 944 | } |
@@ -860,6 +993,12 @@ inline int task_curr(const task_t *p) | |||
860 | return cpu_curr(task_cpu(p)) == p; | 993 | return cpu_curr(task_cpu(p)) == p; |
861 | } | 994 | } |
862 | 995 | ||
996 | /* Used instead of source_load when we know the type == 0 */ | ||
997 | unsigned long weighted_cpuload(const int cpu) | ||
998 | { | ||
999 | return cpu_rq(cpu)->raw_weighted_load; | ||
1000 | } | ||
1001 | |||
863 | #ifdef CONFIG_SMP | 1002 | #ifdef CONFIG_SMP |
864 | typedef struct { | 1003 | typedef struct { |
865 | struct list_head list; | 1004 | struct list_head list; |
@@ -949,7 +1088,8 @@ void kick_process(task_t *p) | |||
949 | } | 1088 | } |
950 | 1089 | ||
951 | /* | 1090 | /* |
952 | * Return a low guess at the load of a migration-source cpu. | 1091 | * Return a low guess at the load of a migration-source cpu weighted |
1092 | * according to the scheduling class and "nice" value. | ||
953 | * | 1093 | * |
954 | * We want to under-estimate the load of migration sources, to | 1094 | * We want to under-estimate the load of migration sources, to |
955 | * balance conservatively. | 1095 | * balance conservatively. |
@@ -957,24 +1097,36 @@ void kick_process(task_t *p) | |||
957 | static inline unsigned long source_load(int cpu, int type) | 1097 | static inline unsigned long source_load(int cpu, int type) |
958 | { | 1098 | { |
959 | runqueue_t *rq = cpu_rq(cpu); | 1099 | runqueue_t *rq = cpu_rq(cpu); |
960 | unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; | 1100 | |
961 | if (type == 0) | 1101 | if (type == 0) |
962 | return load_now; | 1102 | return rq->raw_weighted_load; |
963 | 1103 | ||
964 | return min(rq->cpu_load[type-1], load_now); | 1104 | return min(rq->cpu_load[type-1], rq->raw_weighted_load); |
965 | } | 1105 | } |
966 | 1106 | ||
967 | /* | 1107 | /* |
968 | * Return a high guess at the load of a migration-target cpu | 1108 | * Return a high guess at the load of a migration-target cpu weighted |
1109 | * according to the scheduling class and "nice" value. | ||
969 | */ | 1110 | */ |
970 | static inline unsigned long target_load(int cpu, int type) | 1111 | static inline unsigned long target_load(int cpu, int type) |
971 | { | 1112 | { |
972 | runqueue_t *rq = cpu_rq(cpu); | 1113 | runqueue_t *rq = cpu_rq(cpu); |
973 | unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; | 1114 | |
974 | if (type == 0) | 1115 | if (type == 0) |
975 | return load_now; | 1116 | return rq->raw_weighted_load; |
1117 | |||
1118 | return max(rq->cpu_load[type-1], rq->raw_weighted_load); | ||
1119 | } | ||
1120 | |||
1121 | /* | ||
1122 | * Return the average load per task on the cpu's run queue | ||
1123 | */ | ||
1124 | static inline unsigned long cpu_avg_load_per_task(int cpu) | ||
1125 | { | ||
1126 | runqueue_t *rq = cpu_rq(cpu); | ||
1127 | unsigned long n = rq->nr_running; | ||
976 | 1128 | ||
977 | return max(rq->cpu_load[type-1], load_now); | 1129 | return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; |
978 | } | 1130 | } |
979 | 1131 | ||
980 | /* | 1132 | /* |
@@ -1047,7 +1199,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1047 | cpus_and(tmp, group->cpumask, p->cpus_allowed); | 1199 | cpus_and(tmp, group->cpumask, p->cpus_allowed); |
1048 | 1200 | ||
1049 | for_each_cpu_mask(i, tmp) { | 1201 | for_each_cpu_mask(i, tmp) { |
1050 | load = source_load(i, 0); | 1202 | load = weighted_cpuload(i); |
1051 | 1203 | ||
1052 | if (load < min_load || (load == min_load && i == this_cpu)) { | 1204 | if (load < min_load || (load == min_load && i == this_cpu)) { |
1053 | min_load = load; | 1205 | min_load = load; |
@@ -1074,9 +1226,15 @@ static int sched_balance_self(int cpu, int flag) | |||
1074 | struct task_struct *t = current; | 1226 | struct task_struct *t = current; |
1075 | struct sched_domain *tmp, *sd = NULL; | 1227 | struct sched_domain *tmp, *sd = NULL; |
1076 | 1228 | ||
1077 | for_each_domain(cpu, tmp) | 1229 | for_each_domain(cpu, tmp) { |
1230 | /* | ||
1231 | * If power savings logic is enabled for a domain, stop there. | ||
1232 | */ | ||
1233 | if (tmp->flags & SD_POWERSAVINGS_BALANCE) | ||
1234 | break; | ||
1078 | if (tmp->flags & flag) | 1235 | if (tmp->flags & flag) |
1079 | sd = tmp; | 1236 | sd = tmp; |
1237 | } | ||
1080 | 1238 | ||
1081 | while (sd) { | 1239 | while (sd) { |
1082 | cpumask_t span; | 1240 | cpumask_t span; |
@@ -1226,17 +1384,19 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) | |||
1226 | 1384 | ||
1227 | if (this_sd->flags & SD_WAKE_AFFINE) { | 1385 | if (this_sd->flags & SD_WAKE_AFFINE) { |
1228 | unsigned long tl = this_load; | 1386 | unsigned long tl = this_load; |
1387 | unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu); | ||
1388 | |||
1229 | /* | 1389 | /* |
1230 | * If sync wakeup then subtract the (maximum possible) | 1390 | * If sync wakeup then subtract the (maximum possible) |
1231 | * effect of the currently running task from the load | 1391 | * effect of the currently running task from the load |
1232 | * of the current CPU: | 1392 | * of the current CPU: |
1233 | */ | 1393 | */ |
1234 | if (sync) | 1394 | if (sync) |
1235 | tl -= SCHED_LOAD_SCALE; | 1395 | tl -= current->load_weight; |
1236 | 1396 | ||
1237 | if ((tl <= load && | 1397 | if ((tl <= load && |
1238 | tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) || | 1398 | tl + target_load(cpu, idx) <= tl_per_task) || |
1239 | 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) { | 1399 | 100*(tl + p->load_weight) <= imbalance*load) { |
1240 | /* | 1400 | /* |
1241 | * This domain has SD_WAKE_AFFINE and | 1401 | * This domain has SD_WAKE_AFFINE and |
1242 | * p is cache cold in this domain, and | 1402 | * p is cache cold in this domain, and |
@@ -1353,6 +1513,12 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1353 | * event cannot wake it up and insert it on the runqueue either. | 1513 | * event cannot wake it up and insert it on the runqueue either. |
1354 | */ | 1514 | */ |
1355 | p->state = TASK_RUNNING; | 1515 | p->state = TASK_RUNNING; |
1516 | |||
1517 | /* | ||
1518 | * Make sure we do not leak PI boosting priority to the child: | ||
1519 | */ | ||
1520 | p->prio = current->normal_prio; | ||
1521 | |||
1356 | INIT_LIST_HEAD(&p->run_list); | 1522 | INIT_LIST_HEAD(&p->run_list); |
1357 | p->array = NULL; | 1523 | p->array = NULL; |
1358 | #ifdef CONFIG_SCHEDSTATS | 1524 | #ifdef CONFIG_SCHEDSTATS |
@@ -1432,10 +1598,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) | |||
1432 | __activate_task(p, rq); | 1598 | __activate_task(p, rq); |
1433 | else { | 1599 | else { |
1434 | p->prio = current->prio; | 1600 | p->prio = current->prio; |
1601 | p->normal_prio = current->normal_prio; | ||
1435 | list_add_tail(&p->run_list, ¤t->run_list); | 1602 | list_add_tail(&p->run_list, ¤t->run_list); |
1436 | p->array = current->array; | 1603 | p->array = current->array; |
1437 | p->array->nr_active++; | 1604 | p->array->nr_active++; |
1438 | rq->nr_running++; | 1605 | inc_nr_running(p, rq); |
1439 | } | 1606 | } |
1440 | set_need_resched(); | 1607 | set_need_resched(); |
1441 | } else | 1608 | } else |
@@ -1653,7 +1820,8 @@ unsigned long nr_uninterruptible(void) | |||
1653 | 1820 | ||
1654 | unsigned long long nr_context_switches(void) | 1821 | unsigned long long nr_context_switches(void) |
1655 | { | 1822 | { |
1656 | unsigned long long i, sum = 0; | 1823 | int i; |
1824 | unsigned long long sum = 0; | ||
1657 | 1825 | ||
1658 | for_each_possible_cpu(i) | 1826 | for_each_possible_cpu(i) |
1659 | sum += cpu_rq(i)->nr_switches; | 1827 | sum += cpu_rq(i)->nr_switches; |
@@ -1691,9 +1859,6 @@ unsigned long nr_active(void) | |||
1691 | /* | 1859 | /* |
1692 | * double_rq_lock - safely lock two runqueues | 1860 | * double_rq_lock - safely lock two runqueues |
1693 | * | 1861 | * |
1694 | * We must take them in cpu order to match code in | ||
1695 | * dependent_sleeper and wake_dependent_sleeper. | ||
1696 | * | ||
1697 | * Note this does not disable interrupts like task_rq_lock, | 1862 | * Note this does not disable interrupts like task_rq_lock, |
1698 | * you need to do so manually before calling. | 1863 | * you need to do so manually before calling. |
1699 | */ | 1864 | */ |
@@ -1705,7 +1870,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) | |||
1705 | spin_lock(&rq1->lock); | 1870 | spin_lock(&rq1->lock); |
1706 | __acquire(rq2->lock); /* Fake it out ;) */ | 1871 | __acquire(rq2->lock); /* Fake it out ;) */ |
1707 | } else { | 1872 | } else { |
1708 | if (rq1->cpu < rq2->cpu) { | 1873 | if (rq1 < rq2) { |
1709 | spin_lock(&rq1->lock); | 1874 | spin_lock(&rq1->lock); |
1710 | spin_lock(&rq2->lock); | 1875 | spin_lock(&rq2->lock); |
1711 | } else { | 1876 | } else { |
@@ -1741,7 +1906,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) | |||
1741 | __acquires(this_rq->lock) | 1906 | __acquires(this_rq->lock) |
1742 | { | 1907 | { |
1743 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1908 | if (unlikely(!spin_trylock(&busiest->lock))) { |
1744 | if (busiest->cpu < this_rq->cpu) { | 1909 | if (busiest < this_rq) { |
1745 | spin_unlock(&this_rq->lock); | 1910 | spin_unlock(&this_rq->lock); |
1746 | spin_lock(&busiest->lock); | 1911 | spin_lock(&busiest->lock); |
1747 | spin_lock(&this_rq->lock); | 1912 | spin_lock(&this_rq->lock); |
@@ -1804,9 +1969,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | |||
1804 | runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) | 1969 | runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) |
1805 | { | 1970 | { |
1806 | dequeue_task(p, src_array); | 1971 | dequeue_task(p, src_array); |
1807 | src_rq->nr_running--; | 1972 | dec_nr_running(p, src_rq); |
1808 | set_task_cpu(p, this_cpu); | 1973 | set_task_cpu(p, this_cpu); |
1809 | this_rq->nr_running++; | 1974 | inc_nr_running(p, this_rq); |
1810 | enqueue_task(p, this_array); | 1975 | enqueue_task(p, this_array); |
1811 | p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) | 1976 | p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) |
1812 | + this_rq->timestamp_last_tick; | 1977 | + this_rq->timestamp_last_tick; |
@@ -1853,26 +2018,42 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | |||
1853 | return 1; | 2018 | return 1; |
1854 | } | 2019 | } |
1855 | 2020 | ||
2021 | #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) | ||
1856 | /* | 2022 | /* |
1857 | * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq, | 2023 | * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted |
1858 | * as part of a balancing operation within "domain". Returns the number of | 2024 | * load from busiest to this_rq, as part of a balancing operation within |
1859 | * tasks moved. | 2025 | * "domain". Returns the number of tasks moved. |
1860 | * | 2026 | * |
1861 | * Called with both runqueues locked. | 2027 | * Called with both runqueues locked. |
1862 | */ | 2028 | */ |
1863 | static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | 2029 | static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, |
1864 | unsigned long max_nr_move, struct sched_domain *sd, | 2030 | unsigned long max_nr_move, unsigned long max_load_move, |
1865 | enum idle_type idle, int *all_pinned) | 2031 | struct sched_domain *sd, enum idle_type idle, |
2032 | int *all_pinned) | ||
1866 | { | 2033 | { |
1867 | prio_array_t *array, *dst_array; | 2034 | prio_array_t *array, *dst_array; |
1868 | struct list_head *head, *curr; | 2035 | struct list_head *head, *curr; |
1869 | int idx, pulled = 0, pinned = 0; | 2036 | int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio; |
2037 | int busiest_best_prio_seen; | ||
2038 | int skip_for_load; /* skip the task based on weighted load issues */ | ||
2039 | long rem_load_move; | ||
1870 | task_t *tmp; | 2040 | task_t *tmp; |
1871 | 2041 | ||
1872 | if (max_nr_move == 0) | 2042 | if (max_nr_move == 0 || max_load_move == 0) |
1873 | goto out; | 2043 | goto out; |
1874 | 2044 | ||
2045 | rem_load_move = max_load_move; | ||
1875 | pinned = 1; | 2046 | pinned = 1; |
2047 | this_best_prio = rq_best_prio(this_rq); | ||
2048 | busiest_best_prio = rq_best_prio(busiest); | ||
2049 | /* | ||
2050 | * Enable handling of the case where there is more than one task | ||
2051 | * with the best priority. If the current running task is one | ||
2052 | * of those with prio==busiest_best_prio we know it won't be moved | ||
2053 | * and therefore it's safe to override the skip (based on load) of | ||
2054 | * any task we find with that prio. | ||
2055 | */ | ||
2056 | busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio; | ||
1876 | 2057 | ||
1877 | /* | 2058 | /* |
1878 | * We first consider expired tasks. Those will likely not be | 2059 | * We first consider expired tasks. Those will likely not be |
@@ -1912,7 +2093,17 @@ skip_queue: | |||
1912 | 2093 | ||
1913 | curr = curr->prev; | 2094 | curr = curr->prev; |
1914 | 2095 | ||
1915 | if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | 2096 | /* |
2097 | * To help distribute high priority tasks accross CPUs we don't | ||
2098 | * skip a task if it will be the highest priority task (i.e. smallest | ||
2099 | * prio value) on its new queue regardless of its load weight | ||
2100 | */ | ||
2101 | skip_for_load = tmp->load_weight > rem_load_move; | ||
2102 | if (skip_for_load && idx < this_best_prio) | ||
2103 | skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio; | ||
2104 | if (skip_for_load || | ||
2105 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | ||
2106 | busiest_best_prio_seen |= idx == busiest_best_prio; | ||
1916 | if (curr != head) | 2107 | if (curr != head) |
1917 | goto skip_queue; | 2108 | goto skip_queue; |
1918 | idx++; | 2109 | idx++; |
@@ -1926,9 +2117,15 @@ skip_queue: | |||
1926 | 2117 | ||
1927 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); | 2118 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); |
1928 | pulled++; | 2119 | pulled++; |
2120 | rem_load_move -= tmp->load_weight; | ||
1929 | 2121 | ||
1930 | /* We only want to steal up to the prescribed number of tasks. */ | 2122 | /* |
1931 | if (pulled < max_nr_move) { | 2123 | * We only want to steal up to the prescribed number of tasks |
2124 | * and the prescribed amount of weighted load. | ||
2125 | */ | ||
2126 | if (pulled < max_nr_move && rem_load_move > 0) { | ||
2127 | if (idx < this_best_prio) | ||
2128 | this_best_prio = idx; | ||
1932 | if (curr != head) | 2129 | if (curr != head) |
1933 | goto skip_queue; | 2130 | goto skip_queue; |
1934 | idx++; | 2131 | idx++; |
@@ -1949,7 +2146,7 @@ out: | |||
1949 | 2146 | ||
1950 | /* | 2147 | /* |
1951 | * find_busiest_group finds and returns the busiest CPU group within the | 2148 | * find_busiest_group finds and returns the busiest CPU group within the |
1952 | * domain. It calculates and returns the number of tasks which should be | 2149 | * domain. It calculates and returns the amount of weighted load which should be |
1953 | * moved to restore balance via the imbalance parameter. | 2150 | * moved to restore balance via the imbalance parameter. |
1954 | */ | 2151 | */ |
1955 | static struct sched_group * | 2152 | static struct sched_group * |
@@ -1959,9 +2156,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1959 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 2156 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
1960 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 2157 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
1961 | unsigned long max_pull; | 2158 | unsigned long max_pull; |
2159 | unsigned long busiest_load_per_task, busiest_nr_running; | ||
2160 | unsigned long this_load_per_task, this_nr_running; | ||
1962 | int load_idx; | 2161 | int load_idx; |
2162 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2163 | int power_savings_balance = 1; | ||
2164 | unsigned long leader_nr_running = 0, min_load_per_task = 0; | ||
2165 | unsigned long min_nr_running = ULONG_MAX; | ||
2166 | struct sched_group *group_min = NULL, *group_leader = NULL; | ||
2167 | #endif | ||
1963 | 2168 | ||
1964 | max_load = this_load = total_load = total_pwr = 0; | 2169 | max_load = this_load = total_load = total_pwr = 0; |
2170 | busiest_load_per_task = busiest_nr_running = 0; | ||
2171 | this_load_per_task = this_nr_running = 0; | ||
1965 | if (idle == NOT_IDLE) | 2172 | if (idle == NOT_IDLE) |
1966 | load_idx = sd->busy_idx; | 2173 | load_idx = sd->busy_idx; |
1967 | else if (idle == NEWLY_IDLE) | 2174 | else if (idle == NEWLY_IDLE) |
@@ -1970,16 +2177,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1970 | load_idx = sd->idle_idx; | 2177 | load_idx = sd->idle_idx; |
1971 | 2178 | ||
1972 | do { | 2179 | do { |
1973 | unsigned long load; | 2180 | unsigned long load, group_capacity; |
1974 | int local_group; | 2181 | int local_group; |
1975 | int i; | 2182 | int i; |
2183 | unsigned long sum_nr_running, sum_weighted_load; | ||
1976 | 2184 | ||
1977 | local_group = cpu_isset(this_cpu, group->cpumask); | 2185 | local_group = cpu_isset(this_cpu, group->cpumask); |
1978 | 2186 | ||
1979 | /* Tally up the load of all CPUs in the group */ | 2187 | /* Tally up the load of all CPUs in the group */ |
1980 | avg_load = 0; | 2188 | sum_weighted_load = sum_nr_running = avg_load = 0; |
1981 | 2189 | ||
1982 | for_each_cpu_mask(i, group->cpumask) { | 2190 | for_each_cpu_mask(i, group->cpumask) { |
2191 | runqueue_t *rq = cpu_rq(i); | ||
2192 | |||
1983 | if (*sd_idle && !idle_cpu(i)) | 2193 | if (*sd_idle && !idle_cpu(i)) |
1984 | *sd_idle = 0; | 2194 | *sd_idle = 0; |
1985 | 2195 | ||
@@ -1990,6 +2200,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1990 | load = source_load(i, load_idx); | 2200 | load = source_load(i, load_idx); |
1991 | 2201 | ||
1992 | avg_load += load; | 2202 | avg_load += load; |
2203 | sum_nr_running += rq->nr_running; | ||
2204 | sum_weighted_load += rq->raw_weighted_load; | ||
1993 | } | 2205 | } |
1994 | 2206 | ||
1995 | total_load += avg_load; | 2207 | total_load += avg_load; |
@@ -1998,17 +2210,80 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1998 | /* Adjust by relative CPU power of the group */ | 2210 | /* Adjust by relative CPU power of the group */ |
1999 | avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; | 2211 | avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; |
2000 | 2212 | ||
2213 | group_capacity = group->cpu_power / SCHED_LOAD_SCALE; | ||
2214 | |||
2001 | if (local_group) { | 2215 | if (local_group) { |
2002 | this_load = avg_load; | 2216 | this_load = avg_load; |
2003 | this = group; | 2217 | this = group; |
2004 | } else if (avg_load > max_load) { | 2218 | this_nr_running = sum_nr_running; |
2219 | this_load_per_task = sum_weighted_load; | ||
2220 | } else if (avg_load > max_load && | ||
2221 | sum_nr_running > group_capacity) { | ||
2005 | max_load = avg_load; | 2222 | max_load = avg_load; |
2006 | busiest = group; | 2223 | busiest = group; |
2224 | busiest_nr_running = sum_nr_running; | ||
2225 | busiest_load_per_task = sum_weighted_load; | ||
2007 | } | 2226 | } |
2227 | |||
2228 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2229 | /* | ||
2230 | * Busy processors will not participate in power savings | ||
2231 | * balance. | ||
2232 | */ | ||
2233 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
2234 | goto group_next; | ||
2235 | |||
2236 | /* | ||
2237 | * If the local group is idle or completely loaded | ||
2238 | * no need to do power savings balance at this domain | ||
2239 | */ | ||
2240 | if (local_group && (this_nr_running >= group_capacity || | ||
2241 | !this_nr_running)) | ||
2242 | power_savings_balance = 0; | ||
2243 | |||
2244 | /* | ||
2245 | * If a group is already running at full capacity or idle, | ||
2246 | * don't include that group in power savings calculations | ||
2247 | */ | ||
2248 | if (!power_savings_balance || sum_nr_running >= group_capacity | ||
2249 | || !sum_nr_running) | ||
2250 | goto group_next; | ||
2251 | |||
2252 | /* | ||
2253 | * Calculate the group which has the least non-idle load. | ||
2254 | * This is the group from where we need to pick up the load | ||
2255 | * for saving power | ||
2256 | */ | ||
2257 | if ((sum_nr_running < min_nr_running) || | ||
2258 | (sum_nr_running == min_nr_running && | ||
2259 | first_cpu(group->cpumask) < | ||
2260 | first_cpu(group_min->cpumask))) { | ||
2261 | group_min = group; | ||
2262 | min_nr_running = sum_nr_running; | ||
2263 | min_load_per_task = sum_weighted_load / | ||
2264 | sum_nr_running; | ||
2265 | } | ||
2266 | |||
2267 | /* | ||
2268 | * Calculate the group which is almost near its | ||
2269 | * capacity but still has some space to pick up some load | ||
2270 | * from other group and save more power | ||
2271 | */ | ||
2272 | if (sum_nr_running <= group_capacity - 1) | ||
2273 | if (sum_nr_running > leader_nr_running || | ||
2274 | (sum_nr_running == leader_nr_running && | ||
2275 | first_cpu(group->cpumask) > | ||
2276 | first_cpu(group_leader->cpumask))) { | ||
2277 | group_leader = group; | ||
2278 | leader_nr_running = sum_nr_running; | ||
2279 | } | ||
2280 | |||
2281 | group_next: | ||
2282 | #endif | ||
2008 | group = group->next; | 2283 | group = group->next; |
2009 | } while (group != sd->groups); | 2284 | } while (group != sd->groups); |
2010 | 2285 | ||
2011 | if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE) | 2286 | if (!busiest || this_load >= max_load || busiest_nr_running == 0) |
2012 | goto out_balanced; | 2287 | goto out_balanced; |
2013 | 2288 | ||
2014 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; | 2289 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; |
@@ -2017,6 +2292,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2017 | 100*max_load <= sd->imbalance_pct*this_load) | 2292 | 100*max_load <= sd->imbalance_pct*this_load) |
2018 | goto out_balanced; | 2293 | goto out_balanced; |
2019 | 2294 | ||
2295 | busiest_load_per_task /= busiest_nr_running; | ||
2020 | /* | 2296 | /* |
2021 | * We're trying to get all the cpus to the average_load, so we don't | 2297 | * We're trying to get all the cpus to the average_load, so we don't |
2022 | * want to push ourselves above the average load, nor do we wish to | 2298 | * want to push ourselves above the average load, nor do we wish to |
@@ -2028,21 +2304,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2028 | * by pulling tasks to us. Be careful of negative numbers as they'll | 2304 | * by pulling tasks to us. Be careful of negative numbers as they'll |
2029 | * appear as very large values with unsigned longs. | 2305 | * appear as very large values with unsigned longs. |
2030 | */ | 2306 | */ |
2307 | if (max_load <= busiest_load_per_task) | ||
2308 | goto out_balanced; | ||
2309 | |||
2310 | /* | ||
2311 | * In the presence of smp nice balancing, certain scenarios can have | ||
2312 | * max load less than avg load(as we skip the groups at or below | ||
2313 | * its cpu_power, while calculating max_load..) | ||
2314 | */ | ||
2315 | if (max_load < avg_load) { | ||
2316 | *imbalance = 0; | ||
2317 | goto small_imbalance; | ||
2318 | } | ||
2031 | 2319 | ||
2032 | /* Don't want to pull so many tasks that a group would go idle */ | 2320 | /* Don't want to pull so many tasks that a group would go idle */ |
2033 | max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE); | 2321 | max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); |
2034 | 2322 | ||
2035 | /* How much load to actually move to equalise the imbalance */ | 2323 | /* How much load to actually move to equalise the imbalance */ |
2036 | *imbalance = min(max_pull * busiest->cpu_power, | 2324 | *imbalance = min(max_pull * busiest->cpu_power, |
2037 | (avg_load - this_load) * this->cpu_power) | 2325 | (avg_load - this_load) * this->cpu_power) |
2038 | / SCHED_LOAD_SCALE; | 2326 | / SCHED_LOAD_SCALE; |
2039 | 2327 | ||
2040 | if (*imbalance < SCHED_LOAD_SCALE) { | 2328 | /* |
2041 | unsigned long pwr_now = 0, pwr_move = 0; | 2329 | * if *imbalance is less than the average load per runnable task |
2330 | * there is no gaurantee that any tasks will be moved so we'll have | ||
2331 | * a think about bumping its value to force at least one task to be | ||
2332 | * moved | ||
2333 | */ | ||
2334 | if (*imbalance < busiest_load_per_task) { | ||
2335 | unsigned long pwr_now, pwr_move; | ||
2042 | unsigned long tmp; | 2336 | unsigned long tmp; |
2337 | unsigned int imbn; | ||
2338 | |||
2339 | small_imbalance: | ||
2340 | pwr_move = pwr_now = 0; | ||
2341 | imbn = 2; | ||
2342 | if (this_nr_running) { | ||
2343 | this_load_per_task /= this_nr_running; | ||
2344 | if (busiest_load_per_task > this_load_per_task) | ||
2345 | imbn = 1; | ||
2346 | } else | ||
2347 | this_load_per_task = SCHED_LOAD_SCALE; | ||
2043 | 2348 | ||
2044 | if (max_load - this_load >= SCHED_LOAD_SCALE*2) { | 2349 | if (max_load - this_load >= busiest_load_per_task * imbn) { |
2045 | *imbalance = 1; | 2350 | *imbalance = busiest_load_per_task; |
2046 | return busiest; | 2351 | return busiest; |
2047 | } | 2352 | } |
2048 | 2353 | ||
@@ -2052,39 +2357,47 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2052 | * moving them. | 2357 | * moving them. |
2053 | */ | 2358 | */ |
2054 | 2359 | ||
2055 | pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load); | 2360 | pwr_now += busiest->cpu_power * |
2056 | pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load); | 2361 | min(busiest_load_per_task, max_load); |
2362 | pwr_now += this->cpu_power * | ||
2363 | min(this_load_per_task, this_load); | ||
2057 | pwr_now /= SCHED_LOAD_SCALE; | 2364 | pwr_now /= SCHED_LOAD_SCALE; |
2058 | 2365 | ||
2059 | /* Amount of load we'd subtract */ | 2366 | /* Amount of load we'd subtract */ |
2060 | tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power; | 2367 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power; |
2061 | if (max_load > tmp) | 2368 | if (max_load > tmp) |
2062 | pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE, | 2369 | pwr_move += busiest->cpu_power * |
2063 | max_load - tmp); | 2370 | min(busiest_load_per_task, max_load - tmp); |
2064 | 2371 | ||
2065 | /* Amount of load we'd add */ | 2372 | /* Amount of load we'd add */ |
2066 | if (max_load*busiest->cpu_power < | 2373 | if (max_load*busiest->cpu_power < |
2067 | SCHED_LOAD_SCALE*SCHED_LOAD_SCALE) | 2374 | busiest_load_per_task*SCHED_LOAD_SCALE) |
2068 | tmp = max_load*busiest->cpu_power/this->cpu_power; | 2375 | tmp = max_load*busiest->cpu_power/this->cpu_power; |
2069 | else | 2376 | else |
2070 | tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power; | 2377 | tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power; |
2071 | pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp); | 2378 | pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp); |
2072 | pwr_move /= SCHED_LOAD_SCALE; | 2379 | pwr_move /= SCHED_LOAD_SCALE; |
2073 | 2380 | ||
2074 | /* Move if we gain throughput */ | 2381 | /* Move if we gain throughput */ |
2075 | if (pwr_move <= pwr_now) | 2382 | if (pwr_move <= pwr_now) |
2076 | goto out_balanced; | 2383 | goto out_balanced; |
2077 | 2384 | ||
2078 | *imbalance = 1; | 2385 | *imbalance = busiest_load_per_task; |
2079 | return busiest; | ||
2080 | } | 2386 | } |
2081 | 2387 | ||
2082 | /* Get rid of the scaling factor, rounding down as we divide */ | ||
2083 | *imbalance = *imbalance / SCHED_LOAD_SCALE; | ||
2084 | return busiest; | 2388 | return busiest; |
2085 | 2389 | ||
2086 | out_balanced: | 2390 | out_balanced: |
2391 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
2392 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
2393 | goto ret; | ||
2087 | 2394 | ||
2395 | if (this == group_leader && group_leader != group_min) { | ||
2396 | *imbalance = min_load_per_task; | ||
2397 | return group_min; | ||
2398 | } | ||
2399 | ret: | ||
2400 | #endif | ||
2088 | *imbalance = 0; | 2401 | *imbalance = 0; |
2089 | return NULL; | 2402 | return NULL; |
2090 | } | 2403 | } |
@@ -2093,18 +2406,21 @@ out_balanced: | |||
2093 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 2406 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
2094 | */ | 2407 | */ |
2095 | static runqueue_t *find_busiest_queue(struct sched_group *group, | 2408 | static runqueue_t *find_busiest_queue(struct sched_group *group, |
2096 | enum idle_type idle) | 2409 | enum idle_type idle, unsigned long imbalance) |
2097 | { | 2410 | { |
2098 | unsigned long load, max_load = 0; | 2411 | unsigned long max_load = 0; |
2099 | runqueue_t *busiest = NULL; | 2412 | runqueue_t *busiest = NULL, *rqi; |
2100 | int i; | 2413 | int i; |
2101 | 2414 | ||
2102 | for_each_cpu_mask(i, group->cpumask) { | 2415 | for_each_cpu_mask(i, group->cpumask) { |
2103 | load = source_load(i, 0); | 2416 | rqi = cpu_rq(i); |
2417 | |||
2418 | if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance) | ||
2419 | continue; | ||
2104 | 2420 | ||
2105 | if (load > max_load) { | 2421 | if (rqi->raw_weighted_load > max_load) { |
2106 | max_load = load; | 2422 | max_load = rqi->raw_weighted_load; |
2107 | busiest = cpu_rq(i); | 2423 | busiest = rqi; |
2108 | } | 2424 | } |
2109 | } | 2425 | } |
2110 | 2426 | ||
@@ -2117,6 +2433,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group, | |||
2117 | */ | 2433 | */ |
2118 | #define MAX_PINNED_INTERVAL 512 | 2434 | #define MAX_PINNED_INTERVAL 512 |
2119 | 2435 | ||
2436 | #define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0) | ||
2120 | /* | 2437 | /* |
2121 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2438 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2122 | * tasks if there is an imbalance. | 2439 | * tasks if there is an imbalance. |
@@ -2133,7 +2450,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2133 | int active_balance = 0; | 2450 | int active_balance = 0; |
2134 | int sd_idle = 0; | 2451 | int sd_idle = 0; |
2135 | 2452 | ||
2136 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER) | 2453 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && |
2454 | !sched_smt_power_savings) | ||
2137 | sd_idle = 1; | 2455 | sd_idle = 1; |
2138 | 2456 | ||
2139 | schedstat_inc(sd, lb_cnt[idle]); | 2457 | schedstat_inc(sd, lb_cnt[idle]); |
@@ -2144,7 +2462,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2144 | goto out_balanced; | 2462 | goto out_balanced; |
2145 | } | 2463 | } |
2146 | 2464 | ||
2147 | busiest = find_busiest_queue(group, idle); | 2465 | busiest = find_busiest_queue(group, idle, imbalance); |
2148 | if (!busiest) { | 2466 | if (!busiest) { |
2149 | schedstat_inc(sd, lb_nobusyq[idle]); | 2467 | schedstat_inc(sd, lb_nobusyq[idle]); |
2150 | goto out_balanced; | 2468 | goto out_balanced; |
@@ -2164,6 +2482,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2164 | */ | 2482 | */ |
2165 | double_rq_lock(this_rq, busiest); | 2483 | double_rq_lock(this_rq, busiest); |
2166 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2484 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2485 | minus_1_or_zero(busiest->nr_running), | ||
2167 | imbalance, sd, idle, &all_pinned); | 2486 | imbalance, sd, idle, &all_pinned); |
2168 | double_rq_unlock(this_rq, busiest); | 2487 | double_rq_unlock(this_rq, busiest); |
2169 | 2488 | ||
@@ -2221,7 +2540,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2221 | sd->balance_interval *= 2; | 2540 | sd->balance_interval *= 2; |
2222 | } | 2541 | } |
2223 | 2542 | ||
2224 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2543 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2544 | !sched_smt_power_savings) | ||
2225 | return -1; | 2545 | return -1; |
2226 | return nr_moved; | 2546 | return nr_moved; |
2227 | 2547 | ||
@@ -2236,7 +2556,7 @@ out_one_pinned: | |||
2236 | (sd->balance_interval < sd->max_interval)) | 2556 | (sd->balance_interval < sd->max_interval)) |
2237 | sd->balance_interval *= 2; | 2557 | sd->balance_interval *= 2; |
2238 | 2558 | ||
2239 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2559 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2240 | return -1; | 2560 | return -1; |
2241 | return 0; | 2561 | return 0; |
2242 | } | 2562 | } |
@@ -2257,7 +2577,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2257 | int nr_moved = 0; | 2577 | int nr_moved = 0; |
2258 | int sd_idle = 0; | 2578 | int sd_idle = 0; |
2259 | 2579 | ||
2260 | if (sd->flags & SD_SHARE_CPUPOWER) | 2580 | if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2261 | sd_idle = 1; | 2581 | sd_idle = 1; |
2262 | 2582 | ||
2263 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); | 2583 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); |
@@ -2267,7 +2587,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2267 | goto out_balanced; | 2587 | goto out_balanced; |
2268 | } | 2588 | } |
2269 | 2589 | ||
2270 | busiest = find_busiest_queue(group, NEWLY_IDLE); | 2590 | busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance); |
2271 | if (!busiest) { | 2591 | if (!busiest) { |
2272 | schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); | 2592 | schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); |
2273 | goto out_balanced; | 2593 | goto out_balanced; |
@@ -2282,6 +2602,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2282 | /* Attempt to move tasks */ | 2602 | /* Attempt to move tasks */ |
2283 | double_lock_balance(this_rq, busiest); | 2603 | double_lock_balance(this_rq, busiest); |
2284 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2604 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2605 | minus_1_or_zero(busiest->nr_running), | ||
2285 | imbalance, sd, NEWLY_IDLE, NULL); | 2606 | imbalance, sd, NEWLY_IDLE, NULL); |
2286 | spin_unlock(&busiest->lock); | 2607 | spin_unlock(&busiest->lock); |
2287 | } | 2608 | } |
@@ -2297,7 +2618,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2297 | 2618 | ||
2298 | out_balanced: | 2619 | out_balanced: |
2299 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); | 2620 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); |
2300 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | 2621 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) |
2301 | return -1; | 2622 | return -1; |
2302 | sd->nr_balance_failed = 0; | 2623 | sd->nr_balance_failed = 0; |
2303 | return 0; | 2624 | return 0; |
@@ -2352,17 +2673,19 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) | |||
2352 | double_lock_balance(busiest_rq, target_rq); | 2673 | double_lock_balance(busiest_rq, target_rq); |
2353 | 2674 | ||
2354 | /* Search for an sd spanning us and the target CPU. */ | 2675 | /* Search for an sd spanning us and the target CPU. */ |
2355 | for_each_domain(target_cpu, sd) | 2676 | for_each_domain(target_cpu, sd) { |
2356 | if ((sd->flags & SD_LOAD_BALANCE) && | 2677 | if ((sd->flags & SD_LOAD_BALANCE) && |
2357 | cpu_isset(busiest_cpu, sd->span)) | 2678 | cpu_isset(busiest_cpu, sd->span)) |
2358 | break; | 2679 | break; |
2680 | } | ||
2359 | 2681 | ||
2360 | if (unlikely(sd == NULL)) | 2682 | if (unlikely(sd == NULL)) |
2361 | goto out; | 2683 | goto out; |
2362 | 2684 | ||
2363 | schedstat_inc(sd, alb_cnt); | 2685 | schedstat_inc(sd, alb_cnt); |
2364 | 2686 | ||
2365 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL)) | 2687 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
2688 | RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL)) | ||
2366 | schedstat_inc(sd, alb_pushed); | 2689 | schedstat_inc(sd, alb_pushed); |
2367 | else | 2690 | else |
2368 | schedstat_inc(sd, alb_failed); | 2691 | schedstat_inc(sd, alb_failed); |
@@ -2390,7 +2713,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, | |||
2390 | struct sched_domain *sd; | 2713 | struct sched_domain *sd; |
2391 | int i; | 2714 | int i; |
2392 | 2715 | ||
2393 | this_load = this_rq->nr_running * SCHED_LOAD_SCALE; | 2716 | this_load = this_rq->raw_weighted_load; |
2394 | /* Update our load */ | 2717 | /* Update our load */ |
2395 | for (i = 0; i < 3; i++) { | 2718 | for (i = 0; i < 3; i++) { |
2396 | unsigned long new_load = this_load; | 2719 | unsigned long new_load = this_load; |
@@ -2691,48 +3014,35 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq) | |||
2691 | resched_task(rq->idle); | 3014 | resched_task(rq->idle); |
2692 | } | 3015 | } |
2693 | 3016 | ||
2694 | static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | 3017 | /* |
3018 | * Called with interrupt disabled and this_rq's runqueue locked. | ||
3019 | */ | ||
3020 | static void wake_sleeping_dependent(int this_cpu) | ||
2695 | { | 3021 | { |
2696 | struct sched_domain *tmp, *sd = NULL; | 3022 | struct sched_domain *tmp, *sd = NULL; |
2697 | cpumask_t sibling_map; | ||
2698 | int i; | 3023 | int i; |
2699 | 3024 | ||
2700 | for_each_domain(this_cpu, tmp) | 3025 | for_each_domain(this_cpu, tmp) { |
2701 | if (tmp->flags & SD_SHARE_CPUPOWER) | 3026 | if (tmp->flags & SD_SHARE_CPUPOWER) { |
2702 | sd = tmp; | 3027 | sd = tmp; |
3028 | break; | ||
3029 | } | ||
3030 | } | ||
2703 | 3031 | ||
2704 | if (!sd) | 3032 | if (!sd) |
2705 | return; | 3033 | return; |
2706 | 3034 | ||
2707 | /* | 3035 | for_each_cpu_mask(i, sd->span) { |
2708 | * Unlock the current runqueue because we have to lock in | ||
2709 | * CPU order to avoid deadlocks. Caller knows that we might | ||
2710 | * unlock. We keep IRQs disabled. | ||
2711 | */ | ||
2712 | spin_unlock(&this_rq->lock); | ||
2713 | |||
2714 | sibling_map = sd->span; | ||
2715 | |||
2716 | for_each_cpu_mask(i, sibling_map) | ||
2717 | spin_lock(&cpu_rq(i)->lock); | ||
2718 | /* | ||
2719 | * We clear this CPU from the mask. This both simplifies the | ||
2720 | * inner loop and keps this_rq locked when we exit: | ||
2721 | */ | ||
2722 | cpu_clear(this_cpu, sibling_map); | ||
2723 | |||
2724 | for_each_cpu_mask(i, sibling_map) { | ||
2725 | runqueue_t *smt_rq = cpu_rq(i); | 3036 | runqueue_t *smt_rq = cpu_rq(i); |
2726 | 3037 | ||
3038 | if (i == this_cpu) | ||
3039 | continue; | ||
3040 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3041 | continue; | ||
3042 | |||
2727 | wakeup_busy_runqueue(smt_rq); | 3043 | wakeup_busy_runqueue(smt_rq); |
3044 | spin_unlock(&smt_rq->lock); | ||
2728 | } | 3045 | } |
2729 | |||
2730 | for_each_cpu_mask(i, sibling_map) | ||
2731 | spin_unlock(&cpu_rq(i)->lock); | ||
2732 | /* | ||
2733 | * We exit with this_cpu's rq still held and IRQs | ||
2734 | * still disabled: | ||
2735 | */ | ||
2736 | } | 3046 | } |
2737 | 3047 | ||
2738 | /* | 3048 | /* |
@@ -2745,52 +3055,46 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | |||
2745 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | 3055 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; |
2746 | } | 3056 | } |
2747 | 3057 | ||
2748 | static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 3058 | /* |
3059 | * To minimise lock contention and not have to drop this_rq's runlock we only | ||
3060 | * trylock the sibling runqueues and bypass those runqueues if we fail to | ||
3061 | * acquire their lock. As we only trylock the normal locking order does not | ||
3062 | * need to be obeyed. | ||
3063 | */ | ||
3064 | static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) | ||
2749 | { | 3065 | { |
2750 | struct sched_domain *tmp, *sd = NULL; | 3066 | struct sched_domain *tmp, *sd = NULL; |
2751 | cpumask_t sibling_map; | ||
2752 | prio_array_t *array; | ||
2753 | int ret = 0, i; | 3067 | int ret = 0, i; |
2754 | task_t *p; | ||
2755 | 3068 | ||
2756 | for_each_domain(this_cpu, tmp) | 3069 | /* kernel/rt threads do not participate in dependent sleeping */ |
2757 | if (tmp->flags & SD_SHARE_CPUPOWER) | 3070 | if (!p->mm || rt_task(p)) |
3071 | return 0; | ||
3072 | |||
3073 | for_each_domain(this_cpu, tmp) { | ||
3074 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
2758 | sd = tmp; | 3075 | sd = tmp; |
3076 | break; | ||
3077 | } | ||
3078 | } | ||
2759 | 3079 | ||
2760 | if (!sd) | 3080 | if (!sd) |
2761 | return 0; | 3081 | return 0; |
2762 | 3082 | ||
2763 | /* | 3083 | for_each_cpu_mask(i, sd->span) { |
2764 | * The same locking rules and details apply as for | 3084 | runqueue_t *smt_rq; |
2765 | * wake_sleeping_dependent(): | 3085 | task_t *smt_curr; |
2766 | */ | ||
2767 | spin_unlock(&this_rq->lock); | ||
2768 | sibling_map = sd->span; | ||
2769 | for_each_cpu_mask(i, sibling_map) | ||
2770 | spin_lock(&cpu_rq(i)->lock); | ||
2771 | cpu_clear(this_cpu, sibling_map); | ||
2772 | 3086 | ||
2773 | /* | 3087 | if (i == this_cpu) |
2774 | * Establish next task to be run - it might have gone away because | 3088 | continue; |
2775 | * we released the runqueue lock above: | ||
2776 | */ | ||
2777 | if (!this_rq->nr_running) | ||
2778 | goto out_unlock; | ||
2779 | array = this_rq->active; | ||
2780 | if (!array->nr_active) | ||
2781 | array = this_rq->expired; | ||
2782 | BUG_ON(!array->nr_active); | ||
2783 | 3089 | ||
2784 | p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next, | 3090 | smt_rq = cpu_rq(i); |
2785 | task_t, run_list); | 3091 | if (unlikely(!spin_trylock(&smt_rq->lock))) |
3092 | continue; | ||
2786 | 3093 | ||
2787 | for_each_cpu_mask(i, sibling_map) { | 3094 | smt_curr = smt_rq->curr; |
2788 | runqueue_t *smt_rq = cpu_rq(i); | ||
2789 | task_t *smt_curr = smt_rq->curr; | ||
2790 | 3095 | ||
2791 | /* Kernel threads do not participate in dependent sleeping */ | 3096 | if (!smt_curr->mm) |
2792 | if (!p->mm || !smt_curr->mm || rt_task(p)) | 3097 | goto unlock; |
2793 | goto check_smt_task; | ||
2794 | 3098 | ||
2795 | /* | 3099 | /* |
2796 | * If a user task with lower static priority than the | 3100 | * If a user task with lower static priority than the |
@@ -2808,49 +3112,24 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | |||
2808 | if ((jiffies % DEF_TIMESLICE) > | 3112 | if ((jiffies % DEF_TIMESLICE) > |
2809 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | 3113 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) |
2810 | ret = 1; | 3114 | ret = 1; |
2811 | } else | 3115 | } else { |
2812 | if (smt_curr->static_prio < p->static_prio && | 3116 | if (smt_curr->static_prio < p->static_prio && |
2813 | !TASK_PREEMPTS_CURR(p, smt_rq) && | 3117 | !TASK_PREEMPTS_CURR(p, smt_rq) && |
2814 | smt_slice(smt_curr, sd) > task_timeslice(p)) | 3118 | smt_slice(smt_curr, sd) > task_timeslice(p)) |
2815 | ret = 1; | 3119 | ret = 1; |
2816 | |||
2817 | check_smt_task: | ||
2818 | if ((!smt_curr->mm && smt_curr != smt_rq->idle) || | ||
2819 | rt_task(smt_curr)) | ||
2820 | continue; | ||
2821 | if (!p->mm) { | ||
2822 | wakeup_busy_runqueue(smt_rq); | ||
2823 | continue; | ||
2824 | } | ||
2825 | |||
2826 | /* | ||
2827 | * Reschedule a lower priority task on the SMT sibling for | ||
2828 | * it to be put to sleep, or wake it up if it has been put to | ||
2829 | * sleep for priority reasons to see if it should run now. | ||
2830 | */ | ||
2831 | if (rt_task(p)) { | ||
2832 | if ((jiffies % DEF_TIMESLICE) > | ||
2833 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
2834 | resched_task(smt_curr); | ||
2835 | } else { | ||
2836 | if (TASK_PREEMPTS_CURR(p, smt_rq) && | ||
2837 | smt_slice(p, sd) > task_timeslice(smt_curr)) | ||
2838 | resched_task(smt_curr); | ||
2839 | else | ||
2840 | wakeup_busy_runqueue(smt_rq); | ||
2841 | } | 3120 | } |
3121 | unlock: | ||
3122 | spin_unlock(&smt_rq->lock); | ||
2842 | } | 3123 | } |
2843 | out_unlock: | ||
2844 | for_each_cpu_mask(i, sibling_map) | ||
2845 | spin_unlock(&cpu_rq(i)->lock); | ||
2846 | return ret; | 3124 | return ret; |
2847 | } | 3125 | } |
2848 | #else | 3126 | #else |
2849 | static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | 3127 | static inline void wake_sleeping_dependent(int this_cpu) |
2850 | { | 3128 | { |
2851 | } | 3129 | } |
2852 | 3130 | ||
2853 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 3131 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq, |
3132 | task_t *p) | ||
2854 | { | 3133 | { |
2855 | return 0; | 3134 | return 0; |
2856 | } | 3135 | } |
@@ -2972,32 +3251,13 @@ need_resched_nonpreemptible: | |||
2972 | 3251 | ||
2973 | cpu = smp_processor_id(); | 3252 | cpu = smp_processor_id(); |
2974 | if (unlikely(!rq->nr_running)) { | 3253 | if (unlikely(!rq->nr_running)) { |
2975 | go_idle: | ||
2976 | idle_balance(cpu, rq); | 3254 | idle_balance(cpu, rq); |
2977 | if (!rq->nr_running) { | 3255 | if (!rq->nr_running) { |
2978 | next = rq->idle; | 3256 | next = rq->idle; |
2979 | rq->expired_timestamp = 0; | 3257 | rq->expired_timestamp = 0; |
2980 | wake_sleeping_dependent(cpu, rq); | 3258 | wake_sleeping_dependent(cpu); |
2981 | /* | ||
2982 | * wake_sleeping_dependent() might have released | ||
2983 | * the runqueue, so break out if we got new | ||
2984 | * tasks meanwhile: | ||
2985 | */ | ||
2986 | if (!rq->nr_running) | ||
2987 | goto switch_tasks; | ||
2988 | } | ||
2989 | } else { | ||
2990 | if (dependent_sleeper(cpu, rq)) { | ||
2991 | next = rq->idle; | ||
2992 | goto switch_tasks; | 3259 | goto switch_tasks; |
2993 | } | 3260 | } |
2994 | /* | ||
2995 | * dependent_sleeper() releases and reacquires the runqueue | ||
2996 | * lock, hence go into the idle loop if the rq went | ||
2997 | * empty meanwhile: | ||
2998 | */ | ||
2999 | if (unlikely(!rq->nr_running)) | ||
3000 | goto go_idle; | ||
3001 | } | 3261 | } |
3002 | 3262 | ||
3003 | array = rq->active; | 3263 | array = rq->active; |
@@ -3035,6 +3295,8 @@ go_idle: | |||
3035 | } | 3295 | } |
3036 | } | 3296 | } |
3037 | next->sleep_type = SLEEP_NORMAL; | 3297 | next->sleep_type = SLEEP_NORMAL; |
3298 | if (dependent_sleeper(cpu, rq, next)) | ||
3299 | next = rq->idle; | ||
3038 | switch_tasks: | 3300 | switch_tasks: |
3039 | if (next == rq->idle) | 3301 | if (next == rq->idle) |
3040 | schedstat_inc(rq, sched_goidle); | 3302 | schedstat_inc(rq, sched_goidle); |
@@ -3478,12 +3740,65 @@ long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | |||
3478 | 3740 | ||
3479 | EXPORT_SYMBOL(sleep_on_timeout); | 3741 | EXPORT_SYMBOL(sleep_on_timeout); |
3480 | 3742 | ||
3743 | #ifdef CONFIG_RT_MUTEXES | ||
3744 | |||
3745 | /* | ||
3746 | * rt_mutex_setprio - set the current priority of a task | ||
3747 | * @p: task | ||
3748 | * @prio: prio value (kernel-internal form) | ||
3749 | * | ||
3750 | * This function changes the 'effective' priority of a task. It does | ||
3751 | * not touch ->normal_prio like __setscheduler(). | ||
3752 | * | ||
3753 | * Used by the rt_mutex code to implement priority inheritance logic. | ||
3754 | */ | ||
3755 | void rt_mutex_setprio(task_t *p, int prio) | ||
3756 | { | ||
3757 | unsigned long flags; | ||
3758 | prio_array_t *array; | ||
3759 | runqueue_t *rq; | ||
3760 | int oldprio; | ||
3761 | |||
3762 | BUG_ON(prio < 0 || prio > MAX_PRIO); | ||
3763 | |||
3764 | rq = task_rq_lock(p, &flags); | ||
3765 | |||
3766 | oldprio = p->prio; | ||
3767 | array = p->array; | ||
3768 | if (array) | ||
3769 | dequeue_task(p, array); | ||
3770 | p->prio = prio; | ||
3771 | |||
3772 | if (array) { | ||
3773 | /* | ||
3774 | * If changing to an RT priority then queue it | ||
3775 | * in the active array! | ||
3776 | */ | ||
3777 | if (rt_task(p)) | ||
3778 | array = rq->active; | ||
3779 | enqueue_task(p, array); | ||
3780 | /* | ||
3781 | * Reschedule if we are currently running on this runqueue and | ||
3782 | * our priority decreased, or if we are not currently running on | ||
3783 | * this runqueue and our priority is higher than the current's | ||
3784 | */ | ||
3785 | if (task_running(rq, p)) { | ||
3786 | if (p->prio > oldprio) | ||
3787 | resched_task(rq->curr); | ||
3788 | } else if (TASK_PREEMPTS_CURR(p, rq)) | ||
3789 | resched_task(rq->curr); | ||
3790 | } | ||
3791 | task_rq_unlock(rq, &flags); | ||
3792 | } | ||
3793 | |||
3794 | #endif | ||
3795 | |||
3481 | void set_user_nice(task_t *p, long nice) | 3796 | void set_user_nice(task_t *p, long nice) |
3482 | { | 3797 | { |
3483 | unsigned long flags; | 3798 | unsigned long flags; |
3484 | prio_array_t *array; | 3799 | prio_array_t *array; |
3485 | runqueue_t *rq; | 3800 | runqueue_t *rq; |
3486 | int old_prio, new_prio, delta; | 3801 | int old_prio, delta; |
3487 | 3802 | ||
3488 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) | 3803 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) |
3489 | return; | 3804 | return; |
@@ -3498,22 +3813,25 @@ void set_user_nice(task_t *p, long nice) | |||
3498 | * it wont have any effect on scheduling until the task is | 3813 | * it wont have any effect on scheduling until the task is |
3499 | * not SCHED_NORMAL/SCHED_BATCH: | 3814 | * not SCHED_NORMAL/SCHED_BATCH: |
3500 | */ | 3815 | */ |
3501 | if (rt_task(p)) { | 3816 | if (has_rt_policy(p)) { |
3502 | p->static_prio = NICE_TO_PRIO(nice); | 3817 | p->static_prio = NICE_TO_PRIO(nice); |
3503 | goto out_unlock; | 3818 | goto out_unlock; |
3504 | } | 3819 | } |
3505 | array = p->array; | 3820 | array = p->array; |
3506 | if (array) | 3821 | if (array) { |
3507 | dequeue_task(p, array); | 3822 | dequeue_task(p, array); |
3823 | dec_raw_weighted_load(rq, p); | ||
3824 | } | ||
3508 | 3825 | ||
3509 | old_prio = p->prio; | ||
3510 | new_prio = NICE_TO_PRIO(nice); | ||
3511 | delta = new_prio - old_prio; | ||
3512 | p->static_prio = NICE_TO_PRIO(nice); | 3826 | p->static_prio = NICE_TO_PRIO(nice); |
3513 | p->prio += delta; | 3827 | set_load_weight(p); |
3828 | old_prio = p->prio; | ||
3829 | p->prio = effective_prio(p); | ||
3830 | delta = p->prio - old_prio; | ||
3514 | 3831 | ||
3515 | if (array) { | 3832 | if (array) { |
3516 | enqueue_task(p, array); | 3833 | enqueue_task(p, array); |
3834 | inc_raw_weighted_load(rq, p); | ||
3517 | /* | 3835 | /* |
3518 | * If the task increased its priority or is running and | 3836 | * If the task increased its priority or is running and |
3519 | * lowered its priority, then reschedule its CPU: | 3837 | * lowered its priority, then reschedule its CPU: |
@@ -3524,7 +3842,6 @@ void set_user_nice(task_t *p, long nice) | |||
3524 | out_unlock: | 3842 | out_unlock: |
3525 | task_rq_unlock(rq, &flags); | 3843 | task_rq_unlock(rq, &flags); |
3526 | } | 3844 | } |
3527 | |||
3528 | EXPORT_SYMBOL(set_user_nice); | 3845 | EXPORT_SYMBOL(set_user_nice); |
3529 | 3846 | ||
3530 | /* | 3847 | /* |
@@ -3639,16 +3956,15 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
3639 | BUG_ON(p->array); | 3956 | BUG_ON(p->array); |
3640 | p->policy = policy; | 3957 | p->policy = policy; |
3641 | p->rt_priority = prio; | 3958 | p->rt_priority = prio; |
3642 | if (policy != SCHED_NORMAL && policy != SCHED_BATCH) { | 3959 | p->normal_prio = normal_prio(p); |
3643 | p->prio = MAX_RT_PRIO-1 - p->rt_priority; | 3960 | /* we are holding p->pi_lock already */ |
3644 | } else { | 3961 | p->prio = rt_mutex_getprio(p); |
3645 | p->prio = p->static_prio; | 3962 | /* |
3646 | /* | 3963 | * SCHED_BATCH tasks are treated as perpetual CPU hogs: |
3647 | * SCHED_BATCH tasks are treated as perpetual CPU hogs: | 3964 | */ |
3648 | */ | 3965 | if (policy == SCHED_BATCH) |
3649 | if (policy == SCHED_BATCH) | 3966 | p->sleep_avg = 0; |
3650 | p->sleep_avg = 0; | 3967 | set_load_weight(p); |
3651 | } | ||
3652 | } | 3968 | } |
3653 | 3969 | ||
3654 | /** | 3970 | /** |
@@ -3667,6 +3983,8 @@ int sched_setscheduler(struct task_struct *p, int policy, | |||
3667 | unsigned long flags; | 3983 | unsigned long flags; |
3668 | runqueue_t *rq; | 3984 | runqueue_t *rq; |
3669 | 3985 | ||
3986 | /* may grab non-irq protected spin_locks */ | ||
3987 | BUG_ON(in_interrupt()); | ||
3670 | recheck: | 3988 | recheck: |
3671 | /* double check policy once rq lock held */ | 3989 | /* double check policy once rq lock held */ |
3672 | if (policy < 0) | 3990 | if (policy < 0) |
@@ -3715,14 +4033,20 @@ recheck: | |||
3715 | if (retval) | 4033 | if (retval) |
3716 | return retval; | 4034 | return retval; |
3717 | /* | 4035 | /* |
4036 | * make sure no PI-waiters arrive (or leave) while we are | ||
4037 | * changing the priority of the task: | ||
4038 | */ | ||
4039 | spin_lock_irqsave(&p->pi_lock, flags); | ||
4040 | /* | ||
3718 | * To be able to change p->policy safely, the apropriate | 4041 | * To be able to change p->policy safely, the apropriate |
3719 | * runqueue lock must be held. | 4042 | * runqueue lock must be held. |
3720 | */ | 4043 | */ |
3721 | rq = task_rq_lock(p, &flags); | 4044 | rq = __task_rq_lock(p); |
3722 | /* recheck policy now with rq lock held */ | 4045 | /* recheck policy now with rq lock held */ |
3723 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 4046 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
3724 | policy = oldpolicy = -1; | 4047 | policy = oldpolicy = -1; |
3725 | task_rq_unlock(rq, &flags); | 4048 | __task_rq_unlock(rq); |
4049 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
3726 | goto recheck; | 4050 | goto recheck; |
3727 | } | 4051 | } |
3728 | array = p->array; | 4052 | array = p->array; |
@@ -3743,7 +4067,11 @@ recheck: | |||
3743 | } else if (TASK_PREEMPTS_CURR(p, rq)) | 4067 | } else if (TASK_PREEMPTS_CURR(p, rq)) |
3744 | resched_task(rq->curr); | 4068 | resched_task(rq->curr); |
3745 | } | 4069 | } |
3746 | task_rq_unlock(rq, &flags); | 4070 | __task_rq_unlock(rq); |
4071 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
4072 | |||
4073 | rt_mutex_adjust_pi(p); | ||
4074 | |||
3747 | return 0; | 4075 | return 0; |
3748 | } | 4076 | } |
3749 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 4077 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
@@ -3765,8 +4093,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
3765 | read_unlock_irq(&tasklist_lock); | 4093 | read_unlock_irq(&tasklist_lock); |
3766 | return -ESRCH; | 4094 | return -ESRCH; |
3767 | } | 4095 | } |
3768 | retval = sched_setscheduler(p, policy, &lparam); | 4096 | get_task_struct(p); |
3769 | read_unlock_irq(&tasklist_lock); | 4097 | read_unlock_irq(&tasklist_lock); |
4098 | retval = sched_setscheduler(p, policy, &lparam); | ||
4099 | put_task_struct(p); | ||
3770 | return retval; | 4100 | return retval; |
3771 | } | 4101 | } |
3772 | 4102 | ||
@@ -4378,7 +4708,7 @@ void __devinit init_idle(task_t *idle, int cpu) | |||
4378 | idle->timestamp = sched_clock(); | 4708 | idle->timestamp = sched_clock(); |
4379 | idle->sleep_avg = 0; | 4709 | idle->sleep_avg = 0; |
4380 | idle->array = NULL; | 4710 | idle->array = NULL; |
4381 | idle->prio = MAX_PRIO; | 4711 | idle->prio = idle->normal_prio = MAX_PRIO; |
4382 | idle->state = TASK_RUNNING; | 4712 | idle->state = TASK_RUNNING; |
4383 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 4713 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
4384 | set_task_cpu(idle, cpu); | 4714 | set_task_cpu(idle, cpu); |
@@ -4474,13 +4804,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); | |||
4474 | * | 4804 | * |
4475 | * So we race with normal scheduler movements, but that's OK, as long | 4805 | * So we race with normal scheduler movements, but that's OK, as long |
4476 | * as the task is no longer on this CPU. | 4806 | * as the task is no longer on this CPU. |
4807 | * | ||
4808 | * Returns non-zero if task was successfully migrated. | ||
4477 | */ | 4809 | */ |
4478 | static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | 4810 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
4479 | { | 4811 | { |
4480 | runqueue_t *rq_dest, *rq_src; | 4812 | runqueue_t *rq_dest, *rq_src; |
4813 | int ret = 0; | ||
4481 | 4814 | ||
4482 | if (unlikely(cpu_is_offline(dest_cpu))) | 4815 | if (unlikely(cpu_is_offline(dest_cpu))) |
4483 | return; | 4816 | return ret; |
4484 | 4817 | ||
4485 | rq_src = cpu_rq(src_cpu); | 4818 | rq_src = cpu_rq(src_cpu); |
4486 | rq_dest = cpu_rq(dest_cpu); | 4819 | rq_dest = cpu_rq(dest_cpu); |
@@ -4508,9 +4841,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4508 | if (TASK_PREEMPTS_CURR(p, rq_dest)) | 4841 | if (TASK_PREEMPTS_CURR(p, rq_dest)) |
4509 | resched_task(rq_dest->curr); | 4842 | resched_task(rq_dest->curr); |
4510 | } | 4843 | } |
4511 | 4844 | ret = 1; | |
4512 | out: | 4845 | out: |
4513 | double_rq_unlock(rq_src, rq_dest); | 4846 | double_rq_unlock(rq_src, rq_dest); |
4847 | return ret; | ||
4514 | } | 4848 | } |
4515 | 4849 | ||
4516 | /* | 4850 | /* |
@@ -4580,9 +4914,12 @@ wait_to_die: | |||
4580 | /* Figure out where task on dead CPU should go, use force if neccessary. */ | 4914 | /* Figure out where task on dead CPU should go, use force if neccessary. */ |
4581 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | 4915 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) |
4582 | { | 4916 | { |
4917 | runqueue_t *rq; | ||
4918 | unsigned long flags; | ||
4583 | int dest_cpu; | 4919 | int dest_cpu; |
4584 | cpumask_t mask; | 4920 | cpumask_t mask; |
4585 | 4921 | ||
4922 | restart: | ||
4586 | /* On same node? */ | 4923 | /* On same node? */ |
4587 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 4924 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); |
4588 | cpus_and(mask, mask, tsk->cpus_allowed); | 4925 | cpus_and(mask, mask, tsk->cpus_allowed); |
@@ -4594,8 +4931,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4594 | 4931 | ||
4595 | /* No more Mr. Nice Guy. */ | 4932 | /* No more Mr. Nice Guy. */ |
4596 | if (dest_cpu == NR_CPUS) { | 4933 | if (dest_cpu == NR_CPUS) { |
4934 | rq = task_rq_lock(tsk, &flags); | ||
4597 | cpus_setall(tsk->cpus_allowed); | 4935 | cpus_setall(tsk->cpus_allowed); |
4598 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4936 | dest_cpu = any_online_cpu(tsk->cpus_allowed); |
4937 | task_rq_unlock(rq, &flags); | ||
4599 | 4938 | ||
4600 | /* | 4939 | /* |
4601 | * Don't tell them about moving exiting tasks or | 4940 | * Don't tell them about moving exiting tasks or |
@@ -4607,7 +4946,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4607 | "longer affine to cpu%d\n", | 4946 | "longer affine to cpu%d\n", |
4608 | tsk->pid, tsk->comm, dead_cpu); | 4947 | tsk->pid, tsk->comm, dead_cpu); |
4609 | } | 4948 | } |
4610 | __migrate_task(tsk, dead_cpu, dest_cpu); | 4949 | if (!__migrate_task(tsk, dead_cpu, dest_cpu)) |
4950 | goto restart; | ||
4611 | } | 4951 | } |
4612 | 4952 | ||
4613 | /* | 4953 | /* |
@@ -4734,8 +5074,9 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
4734 | * migration_call - callback that gets triggered when a CPU is added. | 5074 | * migration_call - callback that gets triggered when a CPU is added. |
4735 | * Here we can start up the necessary migration thread for the new CPU. | 5075 | * Here we can start up the necessary migration thread for the new CPU. |
4736 | */ | 5076 | */ |
4737 | static int migration_call(struct notifier_block *nfb, unsigned long action, | 5077 | static int __cpuinit migration_call(struct notifier_block *nfb, |
4738 | void *hcpu) | 5078 | unsigned long action, |
5079 | void *hcpu) | ||
4739 | { | 5080 | { |
4740 | int cpu = (long)hcpu; | 5081 | int cpu = (long)hcpu; |
4741 | struct task_struct *p; | 5082 | struct task_struct *p; |
@@ -4805,7 +5146,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, | |||
4805 | /* Register at highest priority so that task migration (migrate_all_tasks) | 5146 | /* Register at highest priority so that task migration (migrate_all_tasks) |
4806 | * happens before everything else. | 5147 | * happens before everything else. |
4807 | */ | 5148 | */ |
4808 | static struct notifier_block migration_notifier = { | 5149 | static struct notifier_block __cpuinitdata migration_notifier = { |
4809 | .notifier_call = migration_call, | 5150 | .notifier_call = migration_call, |
4810 | .priority = 10 | 5151 | .priority = 10 |
4811 | }; | 5152 | }; |
@@ -5606,6 +5947,7 @@ static cpumask_t sched_domain_node_span(int node) | |||
5606 | } | 5947 | } |
5607 | #endif | 5948 | #endif |
5608 | 5949 | ||
5950 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | ||
5609 | /* | 5951 | /* |
5610 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we | 5952 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we |
5611 | * can switch it on easily if needed. | 5953 | * can switch it on easily if needed. |
@@ -5621,7 +5963,7 @@ static int cpu_to_cpu_group(int cpu) | |||
5621 | 5963 | ||
5622 | #ifdef CONFIG_SCHED_MC | 5964 | #ifdef CONFIG_SCHED_MC |
5623 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 5965 | static DEFINE_PER_CPU(struct sched_domain, core_domains); |
5624 | static struct sched_group sched_group_core[NR_CPUS]; | 5966 | static struct sched_group *sched_group_core_bycpu[NR_CPUS]; |
5625 | #endif | 5967 | #endif |
5626 | 5968 | ||
5627 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 5969 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
@@ -5637,7 +5979,7 @@ static int cpu_to_core_group(int cpu) | |||
5637 | #endif | 5979 | #endif |
5638 | 5980 | ||
5639 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 5981 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
5640 | static struct sched_group sched_group_phys[NR_CPUS]; | 5982 | static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; |
5641 | static int cpu_to_phys_group(int cpu) | 5983 | static int cpu_to_phys_group(int cpu) |
5642 | { | 5984 | { |
5643 | #if defined(CONFIG_SCHED_MC) | 5985 | #if defined(CONFIG_SCHED_MC) |
@@ -5694,13 +6036,74 @@ next_sg: | |||
5694 | } | 6036 | } |
5695 | #endif | 6037 | #endif |
5696 | 6038 | ||
6039 | /* Free memory allocated for various sched_group structures */ | ||
6040 | static void free_sched_groups(const cpumask_t *cpu_map) | ||
6041 | { | ||
6042 | int cpu; | ||
6043 | #ifdef CONFIG_NUMA | ||
6044 | int i; | ||
6045 | |||
6046 | for_each_cpu_mask(cpu, *cpu_map) { | ||
6047 | struct sched_group *sched_group_allnodes | ||
6048 | = sched_group_allnodes_bycpu[cpu]; | ||
6049 | struct sched_group **sched_group_nodes | ||
6050 | = sched_group_nodes_bycpu[cpu]; | ||
6051 | |||
6052 | if (sched_group_allnodes) { | ||
6053 | kfree(sched_group_allnodes); | ||
6054 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
6055 | } | ||
6056 | |||
6057 | if (!sched_group_nodes) | ||
6058 | continue; | ||
6059 | |||
6060 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
6061 | cpumask_t nodemask = node_to_cpumask(i); | ||
6062 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
6063 | |||
6064 | cpus_and(nodemask, nodemask, *cpu_map); | ||
6065 | if (cpus_empty(nodemask)) | ||
6066 | continue; | ||
6067 | |||
6068 | if (sg == NULL) | ||
6069 | continue; | ||
6070 | sg = sg->next; | ||
6071 | next_sg: | ||
6072 | oldsg = sg; | ||
6073 | sg = sg->next; | ||
6074 | kfree(oldsg); | ||
6075 | if (oldsg != sched_group_nodes[i]) | ||
6076 | goto next_sg; | ||
6077 | } | ||
6078 | kfree(sched_group_nodes); | ||
6079 | sched_group_nodes_bycpu[cpu] = NULL; | ||
6080 | } | ||
6081 | #endif | ||
6082 | for_each_cpu_mask(cpu, *cpu_map) { | ||
6083 | if (sched_group_phys_bycpu[cpu]) { | ||
6084 | kfree(sched_group_phys_bycpu[cpu]); | ||
6085 | sched_group_phys_bycpu[cpu] = NULL; | ||
6086 | } | ||
6087 | #ifdef CONFIG_SCHED_MC | ||
6088 | if (sched_group_core_bycpu[cpu]) { | ||
6089 | kfree(sched_group_core_bycpu[cpu]); | ||
6090 | sched_group_core_bycpu[cpu] = NULL; | ||
6091 | } | ||
6092 | #endif | ||
6093 | } | ||
6094 | } | ||
6095 | |||
5697 | /* | 6096 | /* |
5698 | * Build sched domains for a given set of cpus and attach the sched domains | 6097 | * Build sched domains for a given set of cpus and attach the sched domains |
5699 | * to the individual cpus | 6098 | * to the individual cpus |
5700 | */ | 6099 | */ |
5701 | void build_sched_domains(const cpumask_t *cpu_map) | 6100 | static int build_sched_domains(const cpumask_t *cpu_map) |
5702 | { | 6101 | { |
5703 | int i; | 6102 | int i; |
6103 | struct sched_group *sched_group_phys = NULL; | ||
6104 | #ifdef CONFIG_SCHED_MC | ||
6105 | struct sched_group *sched_group_core = NULL; | ||
6106 | #endif | ||
5704 | #ifdef CONFIG_NUMA | 6107 | #ifdef CONFIG_NUMA |
5705 | struct sched_group **sched_group_nodes = NULL; | 6108 | struct sched_group **sched_group_nodes = NULL; |
5706 | struct sched_group *sched_group_allnodes = NULL; | 6109 | struct sched_group *sched_group_allnodes = NULL; |
@@ -5708,11 +6111,11 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5708 | /* | 6111 | /* |
5709 | * Allocate the per-node list of sched groups | 6112 | * Allocate the per-node list of sched groups |
5710 | */ | 6113 | */ |
5711 | sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, | 6114 | sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, |
5712 | GFP_ATOMIC); | 6115 | GFP_KERNEL); |
5713 | if (!sched_group_nodes) { | 6116 | if (!sched_group_nodes) { |
5714 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6117 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
5715 | return; | 6118 | return -ENOMEM; |
5716 | } | 6119 | } |
5717 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 6120 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; |
5718 | #endif | 6121 | #endif |
@@ -5738,7 +6141,7 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5738 | if (!sched_group_allnodes) { | 6141 | if (!sched_group_allnodes) { |
5739 | printk(KERN_WARNING | 6142 | printk(KERN_WARNING |
5740 | "Can not alloc allnodes sched group\n"); | 6143 | "Can not alloc allnodes sched group\n"); |
5741 | break; | 6144 | goto error; |
5742 | } | 6145 | } |
5743 | sched_group_allnodes_bycpu[i] | 6146 | sched_group_allnodes_bycpu[i] |
5744 | = sched_group_allnodes; | 6147 | = sched_group_allnodes; |
@@ -5759,6 +6162,18 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5759 | cpus_and(sd->span, sd->span, *cpu_map); | 6162 | cpus_and(sd->span, sd->span, *cpu_map); |
5760 | #endif | 6163 | #endif |
5761 | 6164 | ||
6165 | if (!sched_group_phys) { | ||
6166 | sched_group_phys | ||
6167 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
6168 | GFP_KERNEL); | ||
6169 | if (!sched_group_phys) { | ||
6170 | printk (KERN_WARNING "Can not alloc phys sched" | ||
6171 | "group\n"); | ||
6172 | goto error; | ||
6173 | } | ||
6174 | sched_group_phys_bycpu[i] = sched_group_phys; | ||
6175 | } | ||
6176 | |||
5762 | p = sd; | 6177 | p = sd; |
5763 | sd = &per_cpu(phys_domains, i); | 6178 | sd = &per_cpu(phys_domains, i); |
5764 | group = cpu_to_phys_group(i); | 6179 | group = cpu_to_phys_group(i); |
@@ -5768,6 +6183,18 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5768 | sd->groups = &sched_group_phys[group]; | 6183 | sd->groups = &sched_group_phys[group]; |
5769 | 6184 | ||
5770 | #ifdef CONFIG_SCHED_MC | 6185 | #ifdef CONFIG_SCHED_MC |
6186 | if (!sched_group_core) { | ||
6187 | sched_group_core | ||
6188 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
6189 | GFP_KERNEL); | ||
6190 | if (!sched_group_core) { | ||
6191 | printk (KERN_WARNING "Can not alloc core sched" | ||
6192 | "group\n"); | ||
6193 | goto error; | ||
6194 | } | ||
6195 | sched_group_core_bycpu[i] = sched_group_core; | ||
6196 | } | ||
6197 | |||
5771 | p = sd; | 6198 | p = sd; |
5772 | sd = &per_cpu(core_domains, i); | 6199 | sd = &per_cpu(core_domains, i); |
5773 | group = cpu_to_core_group(i); | 6200 | group = cpu_to_core_group(i); |
@@ -5851,24 +6278,21 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5851 | domainspan = sched_domain_node_span(i); | 6278 | domainspan = sched_domain_node_span(i); |
5852 | cpus_and(domainspan, domainspan, *cpu_map); | 6279 | cpus_and(domainspan, domainspan, *cpu_map); |
5853 | 6280 | ||
5854 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 6281 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); |
6282 | if (!sg) { | ||
6283 | printk(KERN_WARNING "Can not alloc domain group for " | ||
6284 | "node %d\n", i); | ||
6285 | goto error; | ||
6286 | } | ||
5855 | sched_group_nodes[i] = sg; | 6287 | sched_group_nodes[i] = sg; |
5856 | for_each_cpu_mask(j, nodemask) { | 6288 | for_each_cpu_mask(j, nodemask) { |
5857 | struct sched_domain *sd; | 6289 | struct sched_domain *sd; |
5858 | sd = &per_cpu(node_domains, j); | 6290 | sd = &per_cpu(node_domains, j); |
5859 | sd->groups = sg; | 6291 | sd->groups = sg; |
5860 | if (sd->groups == NULL) { | ||
5861 | /* Turn off balancing if we have no groups */ | ||
5862 | sd->flags = 0; | ||
5863 | } | ||
5864 | } | ||
5865 | if (!sg) { | ||
5866 | printk(KERN_WARNING | ||
5867 | "Can not alloc domain group for node %d\n", i); | ||
5868 | continue; | ||
5869 | } | 6292 | } |
5870 | sg->cpu_power = 0; | 6293 | sg->cpu_power = 0; |
5871 | sg->cpumask = nodemask; | 6294 | sg->cpumask = nodemask; |
6295 | sg->next = sg; | ||
5872 | cpus_or(covered, covered, nodemask); | 6296 | cpus_or(covered, covered, nodemask); |
5873 | prev = sg; | 6297 | prev = sg; |
5874 | 6298 | ||
@@ -5887,54 +6311,90 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5887 | if (cpus_empty(tmp)) | 6311 | if (cpus_empty(tmp)) |
5888 | continue; | 6312 | continue; |
5889 | 6313 | ||
5890 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 6314 | sg = kmalloc_node(sizeof(struct sched_group), |
6315 | GFP_KERNEL, i); | ||
5891 | if (!sg) { | 6316 | if (!sg) { |
5892 | printk(KERN_WARNING | 6317 | printk(KERN_WARNING |
5893 | "Can not alloc domain group for node %d\n", j); | 6318 | "Can not alloc domain group for node %d\n", j); |
5894 | break; | 6319 | goto error; |
5895 | } | 6320 | } |
5896 | sg->cpu_power = 0; | 6321 | sg->cpu_power = 0; |
5897 | sg->cpumask = tmp; | 6322 | sg->cpumask = tmp; |
6323 | sg->next = prev->next; | ||
5898 | cpus_or(covered, covered, tmp); | 6324 | cpus_or(covered, covered, tmp); |
5899 | prev->next = sg; | 6325 | prev->next = sg; |
5900 | prev = sg; | 6326 | prev = sg; |
5901 | } | 6327 | } |
5902 | prev->next = sched_group_nodes[i]; | ||
5903 | } | 6328 | } |
5904 | #endif | 6329 | #endif |
5905 | 6330 | ||
5906 | /* Calculate CPU power for physical packages and nodes */ | 6331 | /* Calculate CPU power for physical packages and nodes */ |
6332 | #ifdef CONFIG_SCHED_SMT | ||
5907 | for_each_cpu_mask(i, *cpu_map) { | 6333 | for_each_cpu_mask(i, *cpu_map) { |
5908 | int power; | ||
5909 | struct sched_domain *sd; | 6334 | struct sched_domain *sd; |
5910 | #ifdef CONFIG_SCHED_SMT | ||
5911 | sd = &per_cpu(cpu_domains, i); | 6335 | sd = &per_cpu(cpu_domains, i); |
5912 | power = SCHED_LOAD_SCALE; | 6336 | sd->groups->cpu_power = SCHED_LOAD_SCALE; |
5913 | sd->groups->cpu_power = power; | 6337 | } |
5914 | #endif | 6338 | #endif |
5915 | #ifdef CONFIG_SCHED_MC | 6339 | #ifdef CONFIG_SCHED_MC |
6340 | for_each_cpu_mask(i, *cpu_map) { | ||
6341 | int power; | ||
6342 | struct sched_domain *sd; | ||
5916 | sd = &per_cpu(core_domains, i); | 6343 | sd = &per_cpu(core_domains, i); |
5917 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | 6344 | if (sched_smt_power_savings) |
6345 | power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); | ||
6346 | else | ||
6347 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | ||
5918 | * SCHED_LOAD_SCALE / 10; | 6348 | * SCHED_LOAD_SCALE / 10; |
5919 | sd->groups->cpu_power = power; | 6349 | sd->groups->cpu_power = power; |
6350 | } | ||
6351 | #endif | ||
5920 | 6352 | ||
6353 | for_each_cpu_mask(i, *cpu_map) { | ||
6354 | struct sched_domain *sd; | ||
6355 | #ifdef CONFIG_SCHED_MC | ||
5921 | sd = &per_cpu(phys_domains, i); | 6356 | sd = &per_cpu(phys_domains, i); |
6357 | if (i != first_cpu(sd->groups->cpumask)) | ||
6358 | continue; | ||
5922 | 6359 | ||
5923 | /* | 6360 | sd->groups->cpu_power = 0; |
5924 | * This has to be < 2 * SCHED_LOAD_SCALE | 6361 | if (sched_mc_power_savings || sched_smt_power_savings) { |
5925 | * Lets keep it SCHED_LOAD_SCALE, so that | 6362 | int j; |
5926 | * while calculating NUMA group's cpu_power | 6363 | |
5927 | * we can simply do | 6364 | for_each_cpu_mask(j, sd->groups->cpumask) { |
5928 | * numa_group->cpu_power += phys_group->cpu_power; | 6365 | struct sched_domain *sd1; |
5929 | * | 6366 | sd1 = &per_cpu(core_domains, j); |
5930 | * See "only add power once for each physical pkg" | 6367 | /* |
5931 | * comment below | 6368 | * for each core we will add once |
5932 | */ | 6369 | * to the group in physical domain |
5933 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | 6370 | */ |
6371 | if (j != first_cpu(sd1->groups->cpumask)) | ||
6372 | continue; | ||
6373 | |||
6374 | if (sched_smt_power_savings) | ||
6375 | sd->groups->cpu_power += sd1->groups->cpu_power; | ||
6376 | else | ||
6377 | sd->groups->cpu_power += SCHED_LOAD_SCALE; | ||
6378 | } | ||
6379 | } else | ||
6380 | /* | ||
6381 | * This has to be < 2 * SCHED_LOAD_SCALE | ||
6382 | * Lets keep it SCHED_LOAD_SCALE, so that | ||
6383 | * while calculating NUMA group's cpu_power | ||
6384 | * we can simply do | ||
6385 | * numa_group->cpu_power += phys_group->cpu_power; | ||
6386 | * | ||
6387 | * See "only add power once for each physical pkg" | ||
6388 | * comment below | ||
6389 | */ | ||
6390 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | ||
5934 | #else | 6391 | #else |
6392 | int power; | ||
5935 | sd = &per_cpu(phys_domains, i); | 6393 | sd = &per_cpu(phys_domains, i); |
5936 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | 6394 | if (sched_smt_power_savings) |
5937 | (cpus_weight(sd->groups->cpumask)-1) / 10; | 6395 | power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask); |
6396 | else | ||
6397 | power = SCHED_LOAD_SCALE; | ||
5938 | sd->groups->cpu_power = power; | 6398 | sd->groups->cpu_power = power; |
5939 | #endif | 6399 | #endif |
5940 | } | 6400 | } |
@@ -5962,13 +6422,20 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5962 | * Tune cache-hot values: | 6422 | * Tune cache-hot values: |
5963 | */ | 6423 | */ |
5964 | calibrate_migration_costs(cpu_map); | 6424 | calibrate_migration_costs(cpu_map); |
6425 | |||
6426 | return 0; | ||
6427 | |||
6428 | error: | ||
6429 | free_sched_groups(cpu_map); | ||
6430 | return -ENOMEM; | ||
5965 | } | 6431 | } |
5966 | /* | 6432 | /* |
5967 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6433 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
5968 | */ | 6434 | */ |
5969 | static void arch_init_sched_domains(const cpumask_t *cpu_map) | 6435 | static int arch_init_sched_domains(const cpumask_t *cpu_map) |
5970 | { | 6436 | { |
5971 | cpumask_t cpu_default_map; | 6437 | cpumask_t cpu_default_map; |
6438 | int err; | ||
5972 | 6439 | ||
5973 | /* | 6440 | /* |
5974 | * Setup mask for cpus without special case scheduling requirements. | 6441 | * Setup mask for cpus without special case scheduling requirements. |
@@ -5977,51 +6444,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map) | |||
5977 | */ | 6444 | */ |
5978 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | 6445 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); |
5979 | 6446 | ||
5980 | build_sched_domains(&cpu_default_map); | 6447 | err = build_sched_domains(&cpu_default_map); |
6448 | |||
6449 | return err; | ||
5981 | } | 6450 | } |
5982 | 6451 | ||
5983 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | 6452 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
5984 | { | 6453 | { |
5985 | #ifdef CONFIG_NUMA | 6454 | free_sched_groups(cpu_map); |
5986 | int i; | ||
5987 | int cpu; | ||
5988 | |||
5989 | for_each_cpu_mask(cpu, *cpu_map) { | ||
5990 | struct sched_group *sched_group_allnodes | ||
5991 | = sched_group_allnodes_bycpu[cpu]; | ||
5992 | struct sched_group **sched_group_nodes | ||
5993 | = sched_group_nodes_bycpu[cpu]; | ||
5994 | |||
5995 | if (sched_group_allnodes) { | ||
5996 | kfree(sched_group_allnodes); | ||
5997 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
5998 | } | ||
5999 | |||
6000 | if (!sched_group_nodes) | ||
6001 | continue; | ||
6002 | |||
6003 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
6004 | cpumask_t nodemask = node_to_cpumask(i); | ||
6005 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
6006 | |||
6007 | cpus_and(nodemask, nodemask, *cpu_map); | ||
6008 | if (cpus_empty(nodemask)) | ||
6009 | continue; | ||
6010 | |||
6011 | if (sg == NULL) | ||
6012 | continue; | ||
6013 | sg = sg->next; | ||
6014 | next_sg: | ||
6015 | oldsg = sg; | ||
6016 | sg = sg->next; | ||
6017 | kfree(oldsg); | ||
6018 | if (oldsg != sched_group_nodes[i]) | ||
6019 | goto next_sg; | ||
6020 | } | ||
6021 | kfree(sched_group_nodes); | ||
6022 | sched_group_nodes_bycpu[cpu] = NULL; | ||
6023 | } | ||
6024 | #endif | ||
6025 | } | 6455 | } |
6026 | 6456 | ||
6027 | /* | 6457 | /* |
@@ -6046,9 +6476,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6046 | * correct sched domains | 6476 | * correct sched domains |
6047 | * Call with hotplug lock held | 6477 | * Call with hotplug lock held |
6048 | */ | 6478 | */ |
6049 | void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | 6479 | int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) |
6050 | { | 6480 | { |
6051 | cpumask_t change_map; | 6481 | cpumask_t change_map; |
6482 | int err = 0; | ||
6052 | 6483 | ||
6053 | cpus_and(*partition1, *partition1, cpu_online_map); | 6484 | cpus_and(*partition1, *partition1, cpu_online_map); |
6054 | cpus_and(*partition2, *partition2, cpu_online_map); | 6485 | cpus_and(*partition2, *partition2, cpu_online_map); |
@@ -6057,10 +6488,86 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | |||
6057 | /* Detach sched domains from all of the affected cpus */ | 6488 | /* Detach sched domains from all of the affected cpus */ |
6058 | detach_destroy_domains(&change_map); | 6489 | detach_destroy_domains(&change_map); |
6059 | if (!cpus_empty(*partition1)) | 6490 | if (!cpus_empty(*partition1)) |
6060 | build_sched_domains(partition1); | 6491 | err = build_sched_domains(partition1); |
6061 | if (!cpus_empty(*partition2)) | 6492 | if (!err && !cpus_empty(*partition2)) |
6062 | build_sched_domains(partition2); | 6493 | err = build_sched_domains(partition2); |
6494 | |||
6495 | return err; | ||
6496 | } | ||
6497 | |||
6498 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
6499 | int arch_reinit_sched_domains(void) | ||
6500 | { | ||
6501 | int err; | ||
6502 | |||
6503 | lock_cpu_hotplug(); | ||
6504 | detach_destroy_domains(&cpu_online_map); | ||
6505 | err = arch_init_sched_domains(&cpu_online_map); | ||
6506 | unlock_cpu_hotplug(); | ||
6507 | |||
6508 | return err; | ||
6509 | } | ||
6510 | |||
6511 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | ||
6512 | { | ||
6513 | int ret; | ||
6514 | |||
6515 | if (buf[0] != '0' && buf[0] != '1') | ||
6516 | return -EINVAL; | ||
6517 | |||
6518 | if (smt) | ||
6519 | sched_smt_power_savings = (buf[0] == '1'); | ||
6520 | else | ||
6521 | sched_mc_power_savings = (buf[0] == '1'); | ||
6522 | |||
6523 | ret = arch_reinit_sched_domains(); | ||
6524 | |||
6525 | return ret ? ret : count; | ||
6526 | } | ||
6527 | |||
6528 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6529 | { | ||
6530 | int err = 0; | ||
6531 | #ifdef CONFIG_SCHED_SMT | ||
6532 | if (smt_capable()) | ||
6533 | err = sysfs_create_file(&cls->kset.kobj, | ||
6534 | &attr_sched_smt_power_savings.attr); | ||
6535 | #endif | ||
6536 | #ifdef CONFIG_SCHED_MC | ||
6537 | if (!err && mc_capable()) | ||
6538 | err = sysfs_create_file(&cls->kset.kobj, | ||
6539 | &attr_sched_mc_power_savings.attr); | ||
6540 | #endif | ||
6541 | return err; | ||
6542 | } | ||
6543 | #endif | ||
6544 | |||
6545 | #ifdef CONFIG_SCHED_MC | ||
6546 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | ||
6547 | { | ||
6548 | return sprintf(page, "%u\n", sched_mc_power_savings); | ||
6549 | } | ||
6550 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | ||
6551 | { | ||
6552 | return sched_power_savings_store(buf, count, 0); | ||
6553 | } | ||
6554 | SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | ||
6555 | sched_mc_power_savings_store); | ||
6556 | #endif | ||
6557 | |||
6558 | #ifdef CONFIG_SCHED_SMT | ||
6559 | static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) | ||
6560 | { | ||
6561 | return sprintf(page, "%u\n", sched_smt_power_savings); | ||
6562 | } | ||
6563 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) | ||
6564 | { | ||
6565 | return sched_power_savings_store(buf, count, 1); | ||
6063 | } | 6566 | } |
6567 | SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | ||
6568 | sched_smt_power_savings_store); | ||
6569 | #endif | ||
6570 | |||
6064 | 6571 | ||
6065 | #ifdef CONFIG_HOTPLUG_CPU | 6572 | #ifdef CONFIG_HOTPLUG_CPU |
6066 | /* | 6573 | /* |
@@ -6143,7 +6650,6 @@ void __init sched_init(void) | |||
6143 | rq->push_cpu = 0; | 6650 | rq->push_cpu = 0; |
6144 | rq->migration_thread = NULL; | 6651 | rq->migration_thread = NULL; |
6145 | INIT_LIST_HEAD(&rq->migration_queue); | 6652 | INIT_LIST_HEAD(&rq->migration_queue); |
6146 | rq->cpu = i; | ||
6147 | #endif | 6653 | #endif |
6148 | atomic_set(&rq->nr_iowait, 0); | 6654 | atomic_set(&rq->nr_iowait, 0); |
6149 | 6655 | ||
@@ -6158,6 +6664,7 @@ void __init sched_init(void) | |||
6158 | } | 6664 | } |
6159 | } | 6665 | } |
6160 | 6666 | ||
6667 | set_load_weight(&init_task); | ||
6161 | /* | 6668 | /* |
6162 | * The boot idle thread does lazy MMU switching as well: | 6669 | * The boot idle thread does lazy MMU switching as well: |
6163 | */ | 6670 | */ |
@@ -6204,11 +6711,12 @@ void normalize_rt_tasks(void) | |||
6204 | runqueue_t *rq; | 6711 | runqueue_t *rq; |
6205 | 6712 | ||
6206 | read_lock_irq(&tasklist_lock); | 6713 | read_lock_irq(&tasklist_lock); |
6207 | for_each_process (p) { | 6714 | for_each_process(p) { |
6208 | if (!rt_task(p)) | 6715 | if (!rt_task(p)) |
6209 | continue; | 6716 | continue; |
6210 | 6717 | ||
6211 | rq = task_rq_lock(p, &flags); | 6718 | spin_lock_irqsave(&p->pi_lock, flags); |
6719 | rq = __task_rq_lock(p); | ||
6212 | 6720 | ||
6213 | array = p->array; | 6721 | array = p->array; |
6214 | if (array) | 6722 | if (array) |
@@ -6219,7 +6727,8 @@ void normalize_rt_tasks(void) | |||
6219 | resched_task(rq->curr); | 6727 | resched_task(rq->curr); |
6220 | } | 6728 | } |
6221 | 6729 | ||
6222 | task_rq_unlock(rq, &flags); | 6730 | __task_rq_unlock(rq); |
6731 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
6223 | } | 6732 | } |
6224 | read_unlock_irq(&tasklist_lock); | 6733 | read_unlock_irq(&tasklist_lock); |
6225 | } | 6734 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 9e2f1c6e73d7..8f03e3b89b55 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu) | |||
446 | } | 446 | } |
447 | #endif /* CONFIG_HOTPLUG_CPU */ | 447 | #endif /* CONFIG_HOTPLUG_CPU */ |
448 | 448 | ||
449 | static int cpu_callback(struct notifier_block *nfb, | 449 | static int __devinit cpu_callback(struct notifier_block *nfb, |
450 | unsigned long action, | 450 | unsigned long action, |
451 | void *hcpu) | 451 | void *hcpu) |
452 | { | 452 | { |
@@ -486,7 +486,7 @@ static int cpu_callback(struct notifier_block *nfb, | |||
486 | return NOTIFY_OK; | 486 | return NOTIFY_OK; |
487 | } | 487 | } |
488 | 488 | ||
489 | static struct notifier_block cpu_nfb = { | 489 | static struct notifier_block __devinitdata cpu_nfb = { |
490 | .notifier_call = cpu_callback | 490 | .notifier_call = cpu_callback |
491 | }; | 491 | }; |
492 | 492 | ||
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index b5c3b94e01ce..6b76caa22981 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu) | |||
104 | /* | 104 | /* |
105 | * Create/destroy watchdog threads as CPUs come and go: | 105 | * Create/destroy watchdog threads as CPUs come and go: |
106 | */ | 106 | */ |
107 | static int | 107 | static int __devinit |
108 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 108 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
109 | { | 109 | { |
110 | int hotcpu = (unsigned long)hcpu; | 110 | int hotcpu = (unsigned long)hcpu; |
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
142 | return NOTIFY_OK; | 142 | return NOTIFY_OK; |
143 | } | 143 | } |
144 | 144 | ||
145 | static struct notifier_block cpu_nfb = { | 145 | static struct notifier_block __devinitdata cpu_nfb = { |
146 | .notifier_call = cpu_callback | 146 | .notifier_call = cpu_callback |
147 | }; | 147 | }; |
148 | 148 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f1a4eb1a655e..93a2c5398648 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -133,6 +133,10 @@ extern int acct_parm[]; | |||
133 | extern int no_unaligned_warning; | 133 | extern int no_unaligned_warning; |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | #ifdef CONFIG_RT_MUTEXES | ||
137 | extern int max_lock_depth; | ||
138 | #endif | ||
139 | |||
136 | static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, | 140 | static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, |
137 | ctl_table *, void **); | 141 | ctl_table *, void **); |
138 | static int proc_doutsstring(ctl_table *table, int write, struct file *filp, | 142 | static int proc_doutsstring(ctl_table *table, int write, struct file *filp, |
@@ -688,6 +692,17 @@ static ctl_table kern_table[] = { | |||
688 | .proc_handler = &proc_dointvec, | 692 | .proc_handler = &proc_dointvec, |
689 | }, | 693 | }, |
690 | #endif | 694 | #endif |
695 | #ifdef CONFIG_RT_MUTEXES | ||
696 | { | ||
697 | .ctl_name = KERN_MAX_LOCK_DEPTH, | ||
698 | .procname = "max_lock_depth", | ||
699 | .data = &max_lock_depth, | ||
700 | .maxlen = sizeof(int), | ||
701 | .mode = 0644, | ||
702 | .proc_handler = &proc_dointvec, | ||
703 | }, | ||
704 | #endif | ||
705 | |||
691 | { .ctl_name = 0 } | 706 | { .ctl_name = 0 } |
692 | }; | 707 | }; |
693 | 708 | ||
@@ -928,6 +943,18 @@ static ctl_table vm_table[] = { | |||
928 | .strategy = &sysctl_jiffies, | 943 | .strategy = &sysctl_jiffies, |
929 | }, | 944 | }, |
930 | #endif | 945 | #endif |
946 | #ifdef CONFIG_X86_32 | ||
947 | { | ||
948 | .ctl_name = VM_VDSO_ENABLED, | ||
949 | .procname = "vdso_enabled", | ||
950 | .data = &vdso_enabled, | ||
951 | .maxlen = sizeof(vdso_enabled), | ||
952 | .mode = 0644, | ||
953 | .proc_handler = &proc_dointvec, | ||
954 | .strategy = &sysctl_intvec, | ||
955 | .extra1 = &zero, | ||
956 | }, | ||
957 | #endif | ||
931 | { .ctl_name = 0 } | 958 | { .ctl_name = 0 } |
932 | }; | 959 | }; |
933 | 960 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 5bb6b7976eec..5a8960253063 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1652,7 +1652,7 @@ static void __devinit migrate_timers(int cpu) | |||
1652 | } | 1652 | } |
1653 | #endif /* CONFIG_HOTPLUG_CPU */ | 1653 | #endif /* CONFIG_HOTPLUG_CPU */ |
1654 | 1654 | ||
1655 | static int timer_cpu_notify(struct notifier_block *self, | 1655 | static int __devinit timer_cpu_notify(struct notifier_block *self, |
1656 | unsigned long action, void *hcpu) | 1656 | unsigned long action, void *hcpu) |
1657 | { | 1657 | { |
1658 | long cpu = (long)hcpu; | 1658 | long cpu = (long)hcpu; |
@@ -1672,7 +1672,7 @@ static int timer_cpu_notify(struct notifier_block *self, | |||
1672 | return NOTIFY_OK; | 1672 | return NOTIFY_OK; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | static struct notifier_block timers_nb = { | 1675 | static struct notifier_block __devinitdata timers_nb = { |
1676 | .notifier_call = timer_cpu_notify, | 1676 | .notifier_call = timer_cpu_notify, |
1677 | }; | 1677 | }; |
1678 | 1678 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 565cf7a1febd..59f0b42bd89e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | |||
559 | } | 559 | } |
560 | 560 | ||
561 | /* We're holding the cpucontrol mutex here */ | 561 | /* We're holding the cpucontrol mutex here */ |
562 | static int workqueue_cpu_callback(struct notifier_block *nfb, | 562 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, |
563 | unsigned long action, | 563 | unsigned long action, |
564 | void *hcpu) | 564 | void *hcpu) |
565 | { | 565 | { |
diff --git a/lib/Kconfig b/lib/Kconfig index 3de93357f5ab..f6299342b882 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -86,4 +86,10 @@ config TEXTSEARCH_BM | |||
86 | config TEXTSEARCH_FSM | 86 | config TEXTSEARCH_FSM |
87 | tristate | 87 | tristate |
88 | 88 | ||
89 | # | ||
90 | # plist support is select#ed if needed | ||
91 | # | ||
92 | config PLIST | ||
93 | boolean | ||
94 | |||
89 | endmenu | 95 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8bab0102ac73..5330911ebd30 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -107,6 +107,24 @@ config DEBUG_MUTEXES | |||
107 | This allows mutex semantics violations and mutex related deadlocks | 107 | This allows mutex semantics violations and mutex related deadlocks |
108 | (lockups) to be detected and reported automatically. | 108 | (lockups) to be detected and reported automatically. |
109 | 109 | ||
110 | config DEBUG_RT_MUTEXES | ||
111 | bool "RT Mutex debugging, deadlock detection" | ||
112 | depends on DEBUG_KERNEL && RT_MUTEXES | ||
113 | help | ||
114 | This allows rt mutex semantics violations and rt mutex related | ||
115 | deadlocks (lockups) to be detected and reported automatically. | ||
116 | |||
117 | config DEBUG_PI_LIST | ||
118 | bool | ||
119 | default y | ||
120 | depends on DEBUG_RT_MUTEXES | ||
121 | |||
122 | config RT_MUTEX_TESTER | ||
123 | bool "Built-in scriptable tester for rt-mutexes" | ||
124 | depends on DEBUG_KERNEL && RT_MUTEXES | ||
125 | help | ||
126 | This option enables a rt-mutex tester. | ||
127 | |||
110 | config DEBUG_SPINLOCK | 128 | config DEBUG_SPINLOCK |
111 | bool "Spinlock debugging" | 129 | bool "Spinlock debugging" |
112 | depends on DEBUG_KERNEL | 130 | depends on DEBUG_KERNEL |
diff --git a/lib/Makefile b/lib/Makefile index 79358ad1f113..10c13c9d7824 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -25,6 +25,7 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | |||
25 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 25 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
26 | lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 26 | lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
27 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 27 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
28 | obj-$(CONFIG_PLIST) += plist.o | ||
28 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 29 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
29 | 30 | ||
30 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | 31 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) |
diff --git a/lib/plist.c b/lib/plist.c new file mode 100644 index 000000000000..3074a02272f3 --- /dev/null +++ b/lib/plist.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * lib/plist.c | ||
3 | * | ||
4 | * Descending-priority-sorted double-linked list | ||
5 | * | ||
6 | * (C) 2002-2003 Intel Corp | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. | ||
8 | * | ||
9 | * 2001-2005 (c) MontaVista Software, Inc. | ||
10 | * Daniel Walker <dwalker@mvista.com> | ||
11 | * | ||
12 | * (C) 2005 Thomas Gleixner <tglx@linutronix.de> | ||
13 | * | ||
14 | * Simplifications of the original code by | ||
15 | * Oleg Nesterov <oleg@tv-sign.ru> | ||
16 | * | ||
17 | * Licensed under the FSF's GNU Public License v2 or later. | ||
18 | * | ||
19 | * Based on simple lists (include/linux/list.h). | ||
20 | * | ||
21 | * This file contains the add / del functions which are considered to | ||
22 | * be too large to inline. See include/linux/plist.h for further | ||
23 | * information. | ||
24 | */ | ||
25 | |||
26 | #include <linux/plist.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_PI_LIST | ||
30 | |||
31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, | ||
32 | struct list_head *n) | ||
33 | { | ||
34 | if (n->prev != p || p->next != n) { | ||
35 | printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); | ||
36 | printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); | ||
37 | printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); | ||
38 | WARN_ON(1); | ||
39 | } | ||
40 | } | ||
41 | |||
42 | static void plist_check_list(struct list_head *top) | ||
43 | { | ||
44 | struct list_head *prev = top, *next = top->next; | ||
45 | |||
46 | plist_check_prev_next(top, prev, next); | ||
47 | while (next != top) { | ||
48 | prev = next; | ||
49 | next = prev->next; | ||
50 | plist_check_prev_next(top, prev, next); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static void plist_check_head(struct plist_head *head) | ||
55 | { | ||
56 | WARN_ON(!head->lock); | ||
57 | if (head->lock) | ||
58 | WARN_ON_SMP(!spin_is_locked(head->lock)); | ||
59 | plist_check_list(&head->prio_list); | ||
60 | plist_check_list(&head->node_list); | ||
61 | } | ||
62 | |||
63 | #else | ||
64 | # define plist_check_head(h) do { } while (0) | ||
65 | #endif | ||
66 | |||
67 | /** | ||
68 | * plist_add - add @node to @head | ||
69 | * | ||
70 | * @node: &struct plist_node pointer | ||
71 | * @head: &struct plist_head pointer | ||
72 | */ | ||
73 | void plist_add(struct plist_node *node, struct plist_head *head) | ||
74 | { | ||
75 | struct plist_node *iter; | ||
76 | |||
77 | plist_check_head(head); | ||
78 | WARN_ON(!plist_node_empty(node)); | ||
79 | |||
80 | list_for_each_entry(iter, &head->prio_list, plist.prio_list) { | ||
81 | if (node->prio < iter->prio) | ||
82 | goto lt_prio; | ||
83 | else if (node->prio == iter->prio) { | ||
84 | iter = list_entry(iter->plist.prio_list.next, | ||
85 | struct plist_node, plist.prio_list); | ||
86 | goto eq_prio; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | lt_prio: | ||
91 | list_add_tail(&node->plist.prio_list, &iter->plist.prio_list); | ||
92 | eq_prio: | ||
93 | list_add_tail(&node->plist.node_list, &iter->plist.node_list); | ||
94 | |||
95 | plist_check_head(head); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * plist_del - Remove a @node from plist. | ||
100 | * | ||
101 | * @node: &struct plist_node pointer - entry to be removed | ||
102 | * @head: &struct plist_head pointer - list head | ||
103 | */ | ||
104 | void plist_del(struct plist_node *node, struct plist_head *head) | ||
105 | { | ||
106 | plist_check_head(head); | ||
107 | |||
108 | if (!list_empty(&node->plist.prio_list)) { | ||
109 | struct plist_node *next = plist_first(&node->plist); | ||
110 | |||
111 | list_move_tail(&next->plist.prio_list, &node->plist.prio_list); | ||
112 | list_del_init(&node->plist.prio_list); | ||
113 | } | ||
114 | |||
115 | list_del_init(&node->plist.node_list); | ||
116 | |||
117 | plist_check_head(head); | ||
118 | } | ||
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 02a16eacb72d..d84560c076d8 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c | |||
@@ -63,10 +63,10 @@ | |||
63 | bytes, which is the maximum length that can be coded. inflate_fast() | 63 | bytes, which is the maximum length that can be coded. inflate_fast() |
64 | requires strm->avail_out >= 258 for each loop to avoid checking for | 64 | requires strm->avail_out >= 258 for each loop to avoid checking for |
65 | output space. | 65 | output space. |
66 | |||
67 | - @start: inflate()'s starting value for strm->avail_out | ||
66 | */ | 68 | */ |
67 | void inflate_fast(strm, start) | 69 | void inflate_fast(z_streamp strm, unsigned start) |
68 | z_streamp strm; | ||
69 | unsigned start; /* inflate()'s starting value for strm->avail_out */ | ||
70 | { | 70 | { |
71 | struct inflate_state *state; | 71 | struct inflate_state *state; |
72 | unsigned char *in; /* local strm->next_in */ | 72 | unsigned char *in; /* local strm->next_in */ |
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c index da665fbb16aa..3fe6ce5b53e5 100644 --- a/lib/zlib_inflate/inftrees.c +++ b/lib/zlib_inflate/inftrees.c | |||
@@ -20,13 +20,8 @@ | |||
20 | table index bits. It will differ if the request is greater than the | 20 | table index bits. It will differ if the request is greater than the |
21 | longest code or if it is less than the shortest code. | 21 | longest code or if it is less than the shortest code. |
22 | */ | 22 | */ |
23 | int zlib_inflate_table(type, lens, codes, table, bits, work) | 23 | int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes, |
24 | codetype type; | 24 | code **table, unsigned *bits, unsigned short *work) |
25 | unsigned short *lens; | ||
26 | unsigned codes; | ||
27 | code **table; | ||
28 | unsigned *bits; | ||
29 | unsigned short *work; | ||
30 | { | 25 | { |
31 | unsigned len; /* a code's length in bits */ | 26 | unsigned len; /* a code's length in bits */ |
32 | unsigned sym; /* index of code symbols */ | 27 | unsigned sym; /* index of code symbols */ |
diff --git a/mm/Kconfig b/mm/Kconfig index 66e65ab39426..e76c023eb0bb 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -116,6 +116,7 @@ config SPARSEMEM_EXTREME | |||
116 | config MEMORY_HOTPLUG | 116 | config MEMORY_HOTPLUG |
117 | bool "Allow for memory hot-add" | 117 | bool "Allow for memory hot-add" |
118 | depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND | 118 | depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND |
119 | depends on (IA64 || X86 || PPC64) | ||
119 | 120 | ||
120 | comment "Memory hotplug is currently incompatible with Software Suspend" | 121 | comment "Memory hotplug is currently incompatible with Software Suspend" |
121 | depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND | 122 | depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND |
diff --git a/mm/filemap.c b/mm/filemap.c index 9c7334bafda8..d504d6e98886 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2095,14 +2095,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2095 | do { | 2095 | do { |
2096 | unsigned long index; | 2096 | unsigned long index; |
2097 | unsigned long offset; | 2097 | unsigned long offset; |
2098 | unsigned long maxlen; | ||
2099 | size_t copied; | 2098 | size_t copied; |
2100 | 2099 | ||
2101 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | 2100 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ |
2102 | index = pos >> PAGE_CACHE_SHIFT; | 2101 | index = pos >> PAGE_CACHE_SHIFT; |
2103 | bytes = PAGE_CACHE_SIZE - offset; | 2102 | bytes = PAGE_CACHE_SIZE - offset; |
2104 | if (bytes > count) | 2103 | |
2105 | bytes = count; | 2104 | /* Limit the size of the copy to the caller's write size */ |
2105 | bytes = min(bytes, count); | ||
2106 | |||
2107 | /* | ||
2108 | * Limit the size of the copy to that of the current segment, | ||
2109 | * because fault_in_pages_readable() doesn't know how to walk | ||
2110 | * segments. | ||
2111 | */ | ||
2112 | bytes = min(bytes, cur_iov->iov_len - iov_base); | ||
2106 | 2113 | ||
2107 | /* | 2114 | /* |
2108 | * Bring in the user page that we will copy from _first_. | 2115 | * Bring in the user page that we will copy from _first_. |
@@ -2110,10 +2117,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2110 | * same page as we're writing to, without it being marked | 2117 | * same page as we're writing to, without it being marked |
2111 | * up-to-date. | 2118 | * up-to-date. |
2112 | */ | 2119 | */ |
2113 | maxlen = cur_iov->iov_len - iov_base; | 2120 | fault_in_pages_readable(buf, bytes); |
2114 | if (maxlen > bytes) | ||
2115 | maxlen = bytes; | ||
2116 | fault_in_pages_readable(buf, maxlen); | ||
2117 | 2121 | ||
2118 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); | 2122 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); |
2119 | if (!page) { | 2123 | if (!page) { |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 841a077d5aeb..ea4038838b0a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/memory_hotplug.h> | 21 | #include <linux/memory_hotplug.h> |
22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/ioport.h> | ||
24 | 25 | ||
25 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
26 | 27 | ||
@@ -126,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
126 | unsigned long i; | 127 | unsigned long i; |
127 | unsigned long flags; | 128 | unsigned long flags; |
128 | unsigned long onlined_pages = 0; | 129 | unsigned long onlined_pages = 0; |
130 | struct resource res; | ||
131 | u64 section_end; | ||
132 | unsigned long start_pfn; | ||
129 | struct zone *zone; | 133 | struct zone *zone; |
130 | int need_zonelists_rebuild = 0; | 134 | int need_zonelists_rebuild = 0; |
131 | 135 | ||
@@ -148,10 +152,27 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
148 | if (!populated_zone(zone)) | 152 | if (!populated_zone(zone)) |
149 | need_zonelists_rebuild = 1; | 153 | need_zonelists_rebuild = 1; |
150 | 154 | ||
151 | for (i = 0; i < nr_pages; i++) { | 155 | res.start = (u64)pfn << PAGE_SHIFT; |
152 | struct page *page = pfn_to_page(pfn + i); | 156 | res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1; |
153 | online_page(page); | 157 | res.flags = IORESOURCE_MEM; /* we just need system ram */ |
154 | onlined_pages++; | 158 | section_end = res.end; |
159 | |||
160 | while (find_next_system_ram(&res) >= 0) { | ||
161 | start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); | ||
162 | nr_pages = (unsigned long) | ||
163 | ((res.end + 1 - res.start) >> PAGE_SHIFT); | ||
164 | |||
165 | if (PageReserved(pfn_to_page(start_pfn))) { | ||
166 | /* this region's page is not onlined now */ | ||
167 | for (i = 0; i < nr_pages; i++) { | ||
168 | struct page *page = pfn_to_page(start_pfn + i); | ||
169 | online_page(page); | ||
170 | onlined_pages++; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | res.start = res.end + 1; | ||
175 | res.end = section_end; | ||
155 | } | 176 | } |
156 | zone->present_pages += onlined_pages; | 177 | zone->present_pages += onlined_pages; |
157 | zone->zone_pgdat->node_present_pages += onlined_pages; | 178 | zone->zone_pgdat->node_present_pages += onlined_pages; |
@@ -163,3 +184,100 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
163 | vm_total_pages = nr_free_pagecache_pages(); | 184 | vm_total_pages = nr_free_pagecache_pages(); |
164 | return 0; | 185 | return 0; |
165 | } | 186 | } |
187 | |||
188 | static pg_data_t *hotadd_new_pgdat(int nid, u64 start) | ||
189 | { | ||
190 | struct pglist_data *pgdat; | ||
191 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | ||
192 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | ||
193 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
194 | |||
195 | pgdat = arch_alloc_nodedata(nid); | ||
196 | if (!pgdat) | ||
197 | return NULL; | ||
198 | |||
199 | arch_refresh_nodedata(nid, pgdat); | ||
200 | |||
201 | /* we can use NODE_DATA(nid) from here */ | ||
202 | |||
203 | /* init node's zones as empty zones, we don't have any present pages.*/ | ||
204 | free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); | ||
205 | |||
206 | return pgdat; | ||
207 | } | ||
208 | |||
209 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | ||
210 | { | ||
211 | arch_refresh_nodedata(nid, NULL); | ||
212 | arch_free_nodedata(pgdat); | ||
213 | return; | ||
214 | } | ||
215 | |||
216 | /* add this memory to iomem resource */ | ||
217 | static void register_memory_resource(u64 start, u64 size) | ||
218 | { | ||
219 | struct resource *res; | ||
220 | |||
221 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
222 | BUG_ON(!res); | ||
223 | |||
224 | res->name = "System RAM"; | ||
225 | res->start = start; | ||
226 | res->end = start + size - 1; | ||
227 | res->flags = IORESOURCE_MEM; | ||
228 | if (request_resource(&iomem_resource, res) < 0) { | ||
229 | printk("System RAM resource %llx - %llx cannot be added\n", | ||
230 | (unsigned long long)res->start, (unsigned long long)res->end); | ||
231 | kfree(res); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | |||
236 | |||
237 | int add_memory(int nid, u64 start, u64 size) | ||
238 | { | ||
239 | pg_data_t *pgdat = NULL; | ||
240 | int new_pgdat = 0; | ||
241 | int ret; | ||
242 | |||
243 | if (!node_online(nid)) { | ||
244 | pgdat = hotadd_new_pgdat(nid, start); | ||
245 | if (!pgdat) | ||
246 | return -ENOMEM; | ||
247 | new_pgdat = 1; | ||
248 | ret = kswapd_run(nid); | ||
249 | if (ret) | ||
250 | goto error; | ||
251 | } | ||
252 | |||
253 | /* call arch's memory hotadd */ | ||
254 | ret = arch_add_memory(nid, start, size); | ||
255 | |||
256 | if (ret < 0) | ||
257 | goto error; | ||
258 | |||
259 | /* we online node here. we can't roll back from here. */ | ||
260 | node_set_online(nid); | ||
261 | |||
262 | if (new_pgdat) { | ||
263 | ret = register_one_node(nid); | ||
264 | /* | ||
265 | * If sysfs file of new node can't create, cpu on the node | ||
266 | * can't be hot-added. There is no rollback way now. | ||
267 | * So, check by BUG_ON() to catch it reluctantly.. | ||
268 | */ | ||
269 | BUG_ON(ret); | ||
270 | } | ||
271 | |||
272 | /* register this memory as resource */ | ||
273 | register_memory_resource(start, size); | ||
274 | |||
275 | return ret; | ||
276 | error: | ||
277 | /* rollback pgdat allocation and others */ | ||
278 | if (new_pgdat) | ||
279 | rollback_node_hotadd(nid, pgdat); | ||
280 | |||
281 | return ret; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(add_memory); | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8ccf6f1b1473..4ec7026c7bab 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -516,14 +516,14 @@ static void set_ratelimit(void) | |||
516 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 516 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; |
517 | } | 517 | } |
518 | 518 | ||
519 | static int | 519 | static int __cpuinit |
520 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 520 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) |
521 | { | 521 | { |
522 | set_ratelimit(); | 522 | set_ratelimit(); |
523 | return 0; | 523 | return 0; |
524 | } | 524 | } |
525 | 525 | ||
526 | static struct notifier_block ratelimit_nb = { | 526 | static struct notifier_block __cpuinitdata ratelimit_nb = { |
527 | .notifier_call = ratelimit_handler, | 527 | .notifier_call = ratelimit_handler, |
528 | .next = NULL, | 528 | .next = NULL, |
529 | }; | 529 | }; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f86191bb632..084a2de7e52a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -446,8 +446,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
446 | 446 | ||
447 | arch_free_page(page, order); | 447 | arch_free_page(page, order); |
448 | if (!PageHighMem(page)) | 448 | if (!PageHighMem(page)) |
449 | mutex_debug_check_no_locks_freed(page_address(page), | 449 | debug_check_no_locks_freed(page_address(page), |
450 | PAGE_SIZE<<order); | 450 | PAGE_SIZE<<order); |
451 | 451 | ||
452 | for (i = 0 ; i < (1 << order) ; ++i) | 452 | for (i = 0 ; i < (1 << order) ; ++i) |
453 | reserved += free_pages_check(page + i); | 453 | reserved += free_pages_check(page + i); |
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu) | |||
2009 | } | 2009 | } |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | static int pageset_cpuup_callback(struct notifier_block *nfb, | 2012 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, |
2013 | unsigned long action, | 2013 | unsigned long action, |
2014 | void *hcpu) | 2014 | void *hcpu) |
2015 | { | 2015 | { |
@@ -2031,7 +2031,7 @@ static int pageset_cpuup_callback(struct notifier_block *nfb, | |||
2031 | return ret; | 2031 | return ret; |
2032 | } | 2032 | } |
2033 | 2033 | ||
2034 | static struct notifier_block pageset_notifier = | 2034 | static struct notifier_block __cpuinitdata pageset_notifier = |
2035 | { &pageset_cpuup_callback, NULL, 0 }; | 2035 | { &pageset_cpuup_callback, NULL, 0 }; |
2036 | 2036 | ||
2037 | void __init setup_per_cpu_pageset(void) | 2037 | void __init setup_per_cpu_pageset(void) |
@@ -89,6 +89,7 @@ | |||
89 | #include <linux/config.h> | 89 | #include <linux/config.h> |
90 | #include <linux/slab.h> | 90 | #include <linux/slab.h> |
91 | #include <linux/mm.h> | 91 | #include <linux/mm.h> |
92 | #include <linux/poison.h> | ||
92 | #include <linux/swap.h> | 93 | #include <linux/swap.h> |
93 | #include <linux/cache.h> | 94 | #include <linux/cache.h> |
94 | #include <linux/interrupt.h> | 95 | #include <linux/interrupt.h> |
@@ -106,6 +107,7 @@ | |||
106 | #include <linux/nodemask.h> | 107 | #include <linux/nodemask.h> |
107 | #include <linux/mempolicy.h> | 108 | #include <linux/mempolicy.h> |
108 | #include <linux/mutex.h> | 109 | #include <linux/mutex.h> |
110 | #include <linux/rtmutex.h> | ||
109 | 111 | ||
110 | #include <asm/uaccess.h> | 112 | #include <asm/uaccess.h> |
111 | #include <asm/cacheflush.h> | 113 | #include <asm/cacheflush.h> |
@@ -492,17 +494,6 @@ struct kmem_cache { | |||
492 | #endif | 494 | #endif |
493 | 495 | ||
494 | #if DEBUG | 496 | #if DEBUG |
495 | /* | ||
496 | * Magic nums for obj red zoning. | ||
497 | * Placed in the first word before and the first word after an obj. | ||
498 | */ | ||
499 | #define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ | ||
500 | #define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ | ||
501 | |||
502 | /* ...and for poisoning */ | ||
503 | #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ | ||
504 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ | ||
505 | #define POISON_END 0xa5 /* end-byte of poisoning */ | ||
506 | 497 | ||
507 | /* | 498 | /* |
508 | * memory layout of objects: | 499 | * memory layout of objects: |
@@ -1083,7 +1074,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1083 | 1074 | ||
1084 | #endif | 1075 | #endif |
1085 | 1076 | ||
1086 | static int cpuup_callback(struct notifier_block *nfb, | 1077 | static int __devinit cpuup_callback(struct notifier_block *nfb, |
1087 | unsigned long action, void *hcpu) | 1078 | unsigned long action, void *hcpu) |
1088 | { | 1079 | { |
1089 | long cpu = (long)hcpu; | 1080 | long cpu = (long)hcpu; |
@@ -1265,7 +1256,9 @@ bad: | |||
1265 | return NOTIFY_BAD; | 1256 | return NOTIFY_BAD; |
1266 | } | 1257 | } |
1267 | 1258 | ||
1268 | static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; | 1259 | static struct notifier_block __cpuinitdata cpucache_notifier = { |
1260 | &cpuup_callback, NULL, 0 | ||
1261 | }; | ||
1269 | 1262 | ||
1270 | /* | 1263 | /* |
1271 | * swap the static kmem_list3 with kmalloced memory | 1264 | * swap the static kmem_list3 with kmalloced memory |
@@ -3405,7 +3398,7 @@ void kfree(const void *objp) | |||
3405 | local_irq_save(flags); | 3398 | local_irq_save(flags); |
3406 | kfree_debugcheck(objp); | 3399 | kfree_debugcheck(objp); |
3407 | c = virt_to_cache(objp); | 3400 | c = virt_to_cache(objp); |
3408 | mutex_debug_check_no_locks_freed(objp, obj_size(c)); | 3401 | debug_check_no_locks_freed(objp, obj_size(c)); |
3409 | __cache_free(c, (void *)objp); | 3402 | __cache_free(c, (void *)objp); |
3410 | local_irq_restore(flags); | 3403 | local_irq_restore(flags); |
3411 | } | 3404 | } |
diff --git a/mm/sparse.c b/mm/sparse.c index e0a3fe48aa37..c7a2b3a0e46b 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -45,7 +45,7 @@ static struct mem_section *sparse_index_alloc(int nid) | |||
45 | 45 | ||
46 | static int sparse_index_init(unsigned long section_nr, int nid) | 46 | static int sparse_index_init(unsigned long section_nr, int nid) |
47 | { | 47 | { |
48 | static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED; | 48 | static DEFINE_SPINLOCK(index_init_lock); |
49 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 49 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
50 | struct mem_section *section; | 50 | struct mem_section *section; |
51 | int ret = 0; | 51 | int ret = 0; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 72babac71dea..eeacb0d695c3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/notifier.h> | 34 | #include <linux/notifier.h> |
35 | #include <linux/rwsem.h> | 35 | #include <linux/rwsem.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/kthread.h> | ||
37 | 38 | ||
38 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
39 | #include <asm/div64.h> | 40 | #include <asm/div64.h> |
@@ -1223,7 +1224,6 @@ static int kswapd(void *p) | |||
1223 | }; | 1224 | }; |
1224 | cpumask_t cpumask; | 1225 | cpumask_t cpumask; |
1225 | 1226 | ||
1226 | daemonize("kswapd%d", pgdat->node_id); | ||
1227 | cpumask = node_to_cpumask(pgdat->node_id); | 1227 | cpumask = node_to_cpumask(pgdat->node_id); |
1228 | if (!cpus_empty(cpumask)) | 1228 | if (!cpus_empty(cpumask)) |
1229 | set_cpus_allowed(tsk, cpumask); | 1229 | set_cpus_allowed(tsk, cpumask); |
@@ -1450,7 +1450,7 @@ out: | |||
1450 | not required for correctness. So if the last cpu in a node goes | 1450 | not required for correctness. So if the last cpu in a node goes |
1451 | away, we get changed to run anywhere: as the first one comes back, | 1451 | away, we get changed to run anywhere: as the first one comes back, |
1452 | restore their cpu bindings. */ | 1452 | restore their cpu bindings. */ |
1453 | static int cpu_callback(struct notifier_block *nfb, | 1453 | static int __devinit cpu_callback(struct notifier_block *nfb, |
1454 | unsigned long action, void *hcpu) | 1454 | unsigned long action, void *hcpu) |
1455 | { | 1455 | { |
1456 | pg_data_t *pgdat; | 1456 | pg_data_t *pgdat; |
@@ -1468,20 +1468,35 @@ static int cpu_callback(struct notifier_block *nfb, | |||
1468 | } | 1468 | } |
1469 | #endif /* CONFIG_HOTPLUG_CPU */ | 1469 | #endif /* CONFIG_HOTPLUG_CPU */ |
1470 | 1470 | ||
1471 | /* | ||
1472 | * This kswapd start function will be called by init and node-hot-add. | ||
1473 | * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. | ||
1474 | */ | ||
1475 | int kswapd_run(int nid) | ||
1476 | { | ||
1477 | pg_data_t *pgdat = NODE_DATA(nid); | ||
1478 | int ret = 0; | ||
1479 | |||
1480 | if (pgdat->kswapd) | ||
1481 | return 0; | ||
1482 | |||
1483 | pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); | ||
1484 | if (IS_ERR(pgdat->kswapd)) { | ||
1485 | /* failure at boot is fatal */ | ||
1486 | BUG_ON(system_state == SYSTEM_BOOTING); | ||
1487 | printk("Failed to start kswapd on node %d\n",nid); | ||
1488 | ret = -1; | ||
1489 | } | ||
1490 | return ret; | ||
1491 | } | ||
1492 | |||
1471 | static int __init kswapd_init(void) | 1493 | static int __init kswapd_init(void) |
1472 | { | 1494 | { |
1473 | pg_data_t *pgdat; | 1495 | int nid; |
1474 | 1496 | ||
1475 | swap_setup(); | 1497 | swap_setup(); |
1476 | for_each_online_pgdat(pgdat) { | 1498 | for_each_online_node(nid) |
1477 | pid_t pid; | 1499 | kswapd_run(nid); |
1478 | |||
1479 | pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); | ||
1480 | BUG_ON(pid < 0); | ||
1481 | read_lock(&tasklist_lock); | ||
1482 | pgdat->kswapd = find_task_by_pid(pid); | ||
1483 | read_unlock(&tasklist_lock); | ||
1484 | } | ||
1485 | hotcpu_notifier(cpu_callback, 0); | 1500 | hotcpu_notifier(cpu_callback, 0); |
1486 | return 0; | 1501 | return 0; |
1487 | } | 1502 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8a777932786d..e728980160d2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif, | |||
349 | (strict & RT6_SELECT_F_REACHABLE) && | 349 | (strict & RT6_SELECT_F_REACHABLE) && |
350 | last && last != rt0) { | 350 | last && last != rt0) { |
351 | /* no entries matched; do round-robin */ | 351 | /* no entries matched; do round-robin */ |
352 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | 352 | static DEFINE_SPINLOCK(lock); |
353 | spin_lock(&lock); | 353 | spin_lock(&lock); |
354 | *head = rt0->u.next; | 354 | *head = rt0->u.next; |
355 | rt0->u.next = last->u.next; | 355 | rt0->u.next = last->u.next; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index f43311221a72..2f312164d6d5 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -70,7 +70,7 @@ | |||
70 | # define RPCDBG_FACILITY RPCDBG_AUTH | 70 | # define RPCDBG_FACILITY RPCDBG_AUTH |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; | 73 | DEFINE_SPINLOCK(krb5_seq_lock); |
74 | 74 | ||
75 | u32 | 75 | u32 |
76 | gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | 76 | gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 54128040a124..1bb75703f384 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -117,7 +117,7 @@ struct bclink { | |||
117 | static struct bcbearer *bcbearer = NULL; | 117 | static struct bcbearer *bcbearer = NULL; |
118 | static struct bclink *bclink = NULL; | 118 | static struct bclink *bclink = NULL; |
119 | static struct link *bcl = NULL; | 119 | static struct link *bcl = NULL; |
120 | static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; | 120 | static DEFINE_SPINLOCK(bc_lock); |
121 | 121 | ||
122 | char tipc_bclink_name[] = "multicast-link"; | 122 | char tipc_bclink_name[] = "multicast-link"; |
123 | 123 | ||
@@ -796,7 +796,7 @@ int tipc_bclink_init(void) | |||
796 | memset(bclink, 0, sizeof(struct bclink)); | 796 | memset(bclink, 0, sizeof(struct bclink)); |
797 | INIT_LIST_HEAD(&bcl->waiting_ports); | 797 | INIT_LIST_HEAD(&bcl->waiting_ports); |
798 | bcl->next_out_no = 1; | 798 | bcl->next_out_no = 1; |
799 | bclink->node.lock = SPIN_LOCK_UNLOCKED; | 799 | spin_lock_init(&bclink->node.lock); |
800 | bcl->owner = &bclink->node; | 800 | bcl->owner = &bclink->node; |
801 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 801 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; |
802 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 802 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 4fa24b5e8914..7ef17a449cfd 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -566,7 +566,7 @@ restart: | |||
566 | b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr, | 566 | b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr, |
567 | bcast_scope, 2); | 567 | bcast_scope, 2); |
568 | } | 568 | } |
569 | b_ptr->publ.lock = SPIN_LOCK_UNLOCKED; | 569 | spin_lock_init(&b_ptr->publ.lock); |
570 | write_unlock_bh(&tipc_net_lock); | 570 | write_unlock_bh(&tipc_net_lock); |
571 | info("Enabled bearer <%s>, discovery domain %s, priority %u\n", | 571 | info("Enabled bearer <%s>, discovery domain %s, priority %u\n", |
572 | name, addr_string_fill(addr_string, bcast_scope), priority); | 572 | name, addr_string_fill(addr_string, bcast_scope), priority); |
diff --git a/net/tipc/config.c b/net/tipc/config.c index 3ec502fac8c3..285e1bc2d880 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -63,7 +63,7 @@ struct manager { | |||
63 | 63 | ||
64 | static struct manager mng = { 0}; | 64 | static struct manager mng = { 0}; |
65 | 65 | ||
66 | static spinlock_t config_lock = SPIN_LOCK_UNLOCKED; | 66 | static DEFINE_SPINLOCK(config_lock); |
67 | 67 | ||
68 | static const void *req_tlv_area; /* request message TLV area */ | 68 | static const void *req_tlv_area; /* request message TLV area */ |
69 | static int req_tlv_space; /* request message TLV area size */ | 69 | static int req_tlv_space; /* request message TLV area size */ |
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c index 26ef95d5fe38..55130655e1ed 100644 --- a/net/tipc/dbg.c +++ b/net/tipc/dbg.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #define MAX_STRING 512 | 41 | #define MAX_STRING 512 |
42 | 42 | ||
43 | static char print_string[MAX_STRING]; | 43 | static char print_string[MAX_STRING]; |
44 | static spinlock_t print_lock = SPIN_LOCK_UNLOCKED; | 44 | static DEFINE_SPINLOCK(print_lock); |
45 | 45 | ||
46 | static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; | 46 | static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; |
47 | struct print_buf *TIPC_CONS = &cons_buf; | 47 | struct print_buf *TIPC_CONS = &cons_buf; |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index 966f70a1b608..ae6ddf00a1aa 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
@@ -44,7 +44,7 @@ struct queue_item { | |||
44 | 44 | ||
45 | static kmem_cache_t *tipc_queue_item_cache; | 45 | static kmem_cache_t *tipc_queue_item_cache; |
46 | static struct list_head signal_queue_head; | 46 | static struct list_head signal_queue_head; |
47 | static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED; | 47 | static DEFINE_SPINLOCK(qitem_lock); |
48 | static int handler_enabled = 0; | 48 | static int handler_enabled = 0; |
49 | 49 | ||
50 | static void process_signal_queue(unsigned long dummy); | 50 | static void process_signal_queue(unsigned long dummy); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 38571306aba5..a6926ff07bcc 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -101,7 +101,7 @@ struct name_table { | |||
101 | 101 | ||
102 | static struct name_table table = { NULL } ; | 102 | static struct name_table table = { NULL } ; |
103 | static atomic_t rsv_publ_ok = ATOMIC_INIT(0); | 103 | static atomic_t rsv_publ_ok = ATOMIC_INIT(0); |
104 | rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED; | 104 | DEFINE_RWLOCK(tipc_nametbl_lock); |
105 | 105 | ||
106 | 106 | ||
107 | static int hash(int x) | 107 | static int hash(int x) |
@@ -172,7 +172,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea | |||
172 | } | 172 | } |
173 | 173 | ||
174 | memset(nseq, 0, sizeof(*nseq)); | 174 | memset(nseq, 0, sizeof(*nseq)); |
175 | nseq->lock = SPIN_LOCK_UNLOCKED; | 175 | spin_lock_init(&nseq->lock); |
176 | nseq->type = type; | 176 | nseq->type = type; |
177 | nseq->sseqs = sseq; | 177 | nseq->sseqs = sseq; |
178 | dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n", | 178 | dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n", |
diff --git a/net/tipc/net.c b/net/tipc/net.c index f7c8223ddf7d..e5a359ab4930 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -115,7 +115,7 @@ | |||
115 | * - A local spin_lock protecting the queue of subscriber events. | 115 | * - A local spin_lock protecting the queue of subscriber events. |
116 | */ | 116 | */ |
117 | 117 | ||
118 | rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED; | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct network tipc_net = { NULL }; | 119 | struct network tipc_net = { NULL }; |
120 | 120 | ||
121 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref) | 121 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref) |
diff --git a/net/tipc/node.c b/net/tipc/node.c index ce9678efa98a..861322b935da 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -77,7 +77,7 @@ struct node *tipc_node_create(u32 addr) | |||
77 | 77 | ||
78 | memset(n_ptr, 0, sizeof(*n_ptr)); | 78 | memset(n_ptr, 0, sizeof(*n_ptr)); |
79 | n_ptr->addr = addr; | 79 | n_ptr->addr = addr; |
80 | n_ptr->lock = SPIN_LOCK_UNLOCKED; | 80 | spin_lock_init(&n_ptr->lock); |
81 | INIT_LIST_HEAD(&n_ptr->nsub); | 81 | INIT_LIST_HEAD(&n_ptr->nsub); |
82 | n_ptr->owner = c_ptr; | 82 | n_ptr->owner = c_ptr; |
83 | tipc_cltr_attach_node(c_ptr, n_ptr); | 83 | tipc_cltr_attach_node(c_ptr, n_ptr); |
diff --git a/net/tipc/port.c b/net/tipc/port.c index 47d97404e3ee..3251c8d8e53c 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -57,8 +57,8 @@ | |||
57 | static struct sk_buff *msg_queue_head = NULL; | 57 | static struct sk_buff *msg_queue_head = NULL; |
58 | static struct sk_buff *msg_queue_tail = NULL; | 58 | static struct sk_buff *msg_queue_tail = NULL; |
59 | 59 | ||
60 | spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED; | 60 | DEFINE_SPINLOCK(tipc_port_list_lock); |
61 | static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; | 61 | static DEFINE_SPINLOCK(queue_lock); |
62 | 62 | ||
63 | static LIST_HEAD(ports); | 63 | static LIST_HEAD(ports); |
64 | static void port_handle_node_down(unsigned long ref); | 64 | static void port_handle_node_down(unsigned long ref); |
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index d2f0cce10e20..596d3c8ff750 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -63,7 +63,7 @@ | |||
63 | 63 | ||
64 | struct ref_table tipc_ref_table = { NULL }; | 64 | struct ref_table tipc_ref_table = { NULL }; |
65 | 65 | ||
66 | static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED; | 66 | static DEFINE_RWLOCK(ref_table_lock); |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * tipc_ref_table_init - create reference table for objects | 69 | * tipc_ref_table_init - create reference table for objects |
@@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start) | |||
87 | index_mask = sz - 1; | 87 | index_mask = sz - 1; |
88 | for (i = sz - 1; i >= 0; i--) { | 88 | for (i = sz - 1; i >= 0; i--) { |
89 | table[i].object = NULL; | 89 | table[i].object = NULL; |
90 | table[i].lock = SPIN_LOCK_UNLOCKED; | 90 | spin_lock_init(&table[i].lock); |
91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; | 91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; |
92 | } | 92 | } |
93 | tipc_ref_table.entries = table; | 93 | tipc_ref_table.entries = table; |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index fc171875660c..e19b4bcd67ec 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -457,7 +457,7 @@ int tipc_subscr_start(void) | |||
457 | int res = -1; | 457 | int res = -1; |
458 | 458 | ||
459 | memset(&topsrv, 0, sizeof (topsrv)); | 459 | memset(&topsrv, 0, sizeof (topsrv)); |
460 | topsrv.lock = SPIN_LOCK_UNLOCKED; | 460 | spin_lock_init(&topsrv.lock); |
461 | INIT_LIST_HEAD(&topsrv.subscriber_list); | 461 | INIT_LIST_HEAD(&topsrv.subscriber_list); |
462 | 462 | ||
463 | spin_lock_bh(&topsrv.lock); | 463 | spin_lock_bh(&topsrv.lock); |
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c index 3f3f933976e9..1e3ae57c7228 100644 --- a/net/tipc/user_reg.c +++ b/net/tipc/user_reg.c | |||
@@ -67,7 +67,7 @@ struct tipc_user { | |||
67 | 67 | ||
68 | static struct tipc_user *users = NULL; | 68 | static struct tipc_user *users = NULL; |
69 | static u32 next_free_user = MAX_USERID + 1; | 69 | static u32 next_free_user = MAX_USERID + 1; |
70 | static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED; | 70 | static DEFINE_SPINLOCK(reg_lock); |
71 | 71 | ||
72 | /** | 72 | /** |
73 | * reg_init - create TIPC user registry (but don't activate it) | 73 | * reg_init - create TIPC user registry (but don't activate it) |
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ac5f275b0283..b0d067be7390 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
@@ -13,11 +13,6 @@ space := $(empty) $(empty) | |||
13 | depfile = $(subst $(comma),_,$(@D)/.$(@F).d) | 13 | depfile = $(subst $(comma),_,$(@D)/.$(@F).d) |
14 | 14 | ||
15 | ### | 15 | ### |
16 | # basetarget equals the filename of the target with no extension. | ||
17 | # So 'foo/bar.o' becomes 'bar' | ||
18 | basetarget = $(basename $(notdir $@)) | ||
19 | |||
20 | ### | ||
21 | # Escape single quote for use in echo statements | 16 | # Escape single quote for use in echo statements |
22 | escsq = $(subst $(squote),'\$(squote)',$1) | 17 | escsq = $(subst $(squote),'\$(squote)',$1) |
23 | 18 | ||
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 3cb445cc7432..02a7eea5fdbc 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -117,7 +117,7 @@ $(real-objs-m:.o=.lst): quiet_modtag := [M] | |||
117 | $(obj-m) : quiet_modtag := [M] | 117 | $(obj-m) : quiet_modtag := [M] |
118 | 118 | ||
119 | # Default for not multi-part modules | 119 | # Default for not multi-part modules |
120 | modname = $(basetarget) | 120 | modname = $(*F) |
121 | 121 | ||
122 | $(multi-objs-m) : modname = $(modname-multi) | 122 | $(multi-objs-m) : modname = $(modname-multi) |
123 | $(multi-objs-m:.o=.i) : modname = $(modname-multi) | 123 | $(multi-objs-m:.o=.i) : modname = $(modname-multi) |
diff --git a/scripts/Makefile.host b/scripts/Makefile.host index 18ecd4d5df7f..2b066d12af2c 100644 --- a/scripts/Makefile.host +++ b/scripts/Makefile.host | |||
@@ -80,10 +80,8 @@ obj-dirs += $(host-objdirs) | |||
80 | ##### | 80 | ##### |
81 | # Handle options to gcc. Support building with separate output directory | 81 | # Handle options to gcc. Support building with separate output directory |
82 | 82 | ||
83 | _hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) \ | 83 | _hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(HOSTCFLAGS_$(*F).o) |
84 | $(HOSTCFLAGS_$(basetarget).o) | 84 | _hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) $(HOSTCXXFLAGS_$(*F).o) |
85 | _hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \ | ||
86 | $(HOSTCXXFLAGS_$(basetarget).o) | ||
87 | 85 | ||
88 | ifeq ($(KBUILD_SRC),) | 86 | ifeq ($(KBUILD_SRC),) |
89 | __hostc_flags = $(_hostc_flags) | 87 | __hostc_flags = $(_hostc_flags) |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index fc498fee68ed..2cb4935e85d1 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -82,12 +82,12 @@ obj-dirs := $(addprefix $(obj)/,$(obj-dirs)) | |||
82 | # than one module. In that case KBUILD_MODNAME will be set to foo_bar, | 82 | # than one module. In that case KBUILD_MODNAME will be set to foo_bar, |
83 | # where foo and bar are the name of the modules. | 83 | # where foo and bar are the name of the modules. |
84 | name-fix = $(subst $(comma),_,$(subst -,_,$1)) | 84 | name-fix = $(subst $(comma),_,$(subst -,_,$1)) |
85 | basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))" | 85 | basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(*F)))" |
86 | modname_flags = $(if $(filter 1,$(words $(modname))),\ | 86 | modname_flags = $(if $(filter 1,$(words $(modname))),\ |
87 | -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))") | 87 | -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))") |
88 | 88 | ||
89 | _c_flags = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(basetarget).o) | 89 | _c_flags = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(*F).o) |
90 | _a_flags = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o) | 90 | _a_flags = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o) |
91 | _cpp_flags = $(CPPFLAGS) $(EXTRA_CPPFLAGS) $(CPPFLAGS_$(@F)) | 91 | _cpp_flags = $(CPPFLAGS) $(EXTRA_CPPFLAGS) $(CPPFLAGS_$(@F)) |
92 | 92 | ||
93 | # If building the kernel in a separate objtree expand all occurrences | 93 | # If building the kernel in a separate objtree expand all occurrences |
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index e83613e0e827..576cce5e387f 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost | |||
@@ -72,7 +72,7 @@ $(modules:.ko=.mod.c): __modpost ; | |||
72 | # Step 5), compile all *.mod.c files | 72 | # Step 5), compile all *.mod.c files |
73 | 73 | ||
74 | # modname is set to make c_flags define KBUILD_MODNAME | 74 | # modname is set to make c_flags define KBUILD_MODNAME |
75 | modname = $(basetarget) | 75 | modname = $(*F) |
76 | 76 | ||
77 | quiet_cmd_cc_o_c = CC $@ | 77 | quiet_cmd_cc_o_c = CC $@ |
78 | cmd_cc_o_c = $(CC) $(c_flags) $(CFLAGS_MODULE) \ | 78 | cmd_cc_o_c = $(CC) $(c_flags) $(CFLAGS_MODULE) \ |
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh new file mode 100644 index 000000000000..43098afe7431 --- /dev/null +++ b/scripts/rt-tester/check-all.sh | |||
@@ -0,0 +1,22 @@ | |||
1 | |||
2 | |||
3 | function testit () | ||
4 | { | ||
5 | printf "%-30s: " $1 | ||
6 | ./rt-tester.py $1 | grep Pass | ||
7 | } | ||
8 | |||
9 | testit t2-l1-2rt-sameprio.tst | ||
10 | testit t2-l1-pi.tst | ||
11 | testit t2-l1-signal.tst | ||
12 | #testit t2-l2-2rt-deadlock.tst | ||
13 | testit t3-l1-pi-1rt.tst | ||
14 | testit t3-l1-pi-2rt.tst | ||
15 | testit t3-l1-pi-3rt.tst | ||
16 | testit t3-l1-pi-signal.tst | ||
17 | testit t3-l1-pi-steal.tst | ||
18 | testit t3-l2-pi.tst | ||
19 | testit t4-l2-pi-deboost.tst | ||
20 | testit t5-l4-pi-boost-deboost.tst | ||
21 | testit t5-l4-pi-boost-deboost-setsched.tst | ||
22 | |||
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py new file mode 100644 index 000000000000..4c79660793cf --- /dev/null +++ b/scripts/rt-tester/rt-tester.py | |||
@@ -0,0 +1,222 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # | ||
3 | # rt-mutex tester | ||
4 | # | ||
5 | # (C) 2006 Thomas Gleixner <tglx@linutronix.de> | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | import os | ||
12 | import sys | ||
13 | import getopt | ||
14 | import shutil | ||
15 | import string | ||
16 | |||
17 | # Globals | ||
18 | quiet = 0 | ||
19 | test = 0 | ||
20 | comments = 0 | ||
21 | |||
22 | sysfsprefix = "/sys/devices/system/rttest/rttest" | ||
23 | statusfile = "/status" | ||
24 | commandfile = "/command" | ||
25 | |||
26 | # Command opcodes | ||
27 | cmd_opcodes = { | ||
28 | "schedother" : "1", | ||
29 | "schedfifo" : "2", | ||
30 | "lock" : "3", | ||
31 | "locknowait" : "4", | ||
32 | "lockint" : "5", | ||
33 | "lockintnowait" : "6", | ||
34 | "lockcont" : "7", | ||
35 | "unlock" : "8", | ||
36 | "lockbkl" : "9", | ||
37 | "unlockbkl" : "10", | ||
38 | "signal" : "11", | ||
39 | "resetevent" : "98", | ||
40 | "reset" : "99", | ||
41 | } | ||
42 | |||
43 | test_opcodes = { | ||
44 | "prioeq" : ["P" , "eq" , None], | ||
45 | "priolt" : ["P" , "lt" , None], | ||
46 | "priogt" : ["P" , "gt" , None], | ||
47 | "nprioeq" : ["N" , "eq" , None], | ||
48 | "npriolt" : ["N" , "lt" , None], | ||
49 | "npriogt" : ["N" , "gt" , None], | ||
50 | "unlocked" : ["M" , "eq" , 0], | ||
51 | "trylock" : ["M" , "eq" , 1], | ||
52 | "blocked" : ["M" , "eq" , 2], | ||
53 | "blockedwake" : ["M" , "eq" , 3], | ||
54 | "locked" : ["M" , "eq" , 4], | ||
55 | "opcodeeq" : ["O" , "eq" , None], | ||
56 | "opcodelt" : ["O" , "lt" , None], | ||
57 | "opcodegt" : ["O" , "gt" , None], | ||
58 | "eventeq" : ["E" , "eq" , None], | ||
59 | "eventlt" : ["E" , "lt" , None], | ||
60 | "eventgt" : ["E" , "gt" , None], | ||
61 | } | ||
62 | |||
63 | # Print usage information | ||
64 | def usage(): | ||
65 | print "rt-tester.py <-c -h -q -t> <testfile>" | ||
66 | print " -c display comments after first command" | ||
67 | print " -h help" | ||
68 | print " -q quiet mode" | ||
69 | print " -t test mode (syntax check)" | ||
70 | print " testfile: read test specification from testfile" | ||
71 | print " otherwise from stdin" | ||
72 | return | ||
73 | |||
74 | # Print progress when not in quiet mode | ||
75 | def progress(str): | ||
76 | if not quiet: | ||
77 | print str | ||
78 | |||
79 | # Analyse a status value | ||
80 | def analyse(val, top, arg): | ||
81 | |||
82 | intval = int(val) | ||
83 | |||
84 | if top[0] == "M": | ||
85 | intval = intval / (10 ** int(arg)) | ||
86 | intval = intval % 10 | ||
87 | argval = top[2] | ||
88 | elif top[0] == "O": | ||
89 | argval = int(cmd_opcodes.get(arg, arg)) | ||
90 | else: | ||
91 | argval = int(arg) | ||
92 | |||
93 | # progress("%d %s %d" %(intval, top[1], argval)) | ||
94 | |||
95 | if top[1] == "eq" and intval == argval: | ||
96 | return 1 | ||
97 | if top[1] == "lt" and intval < argval: | ||
98 | return 1 | ||
99 | if top[1] == "gt" and intval > argval: | ||
100 | return 1 | ||
101 | return 0 | ||
102 | |||
103 | # Parse the commandline | ||
104 | try: | ||
105 | (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') | ||
106 | except getopt.GetoptError, ex: | ||
107 | usage() | ||
108 | sys.exit(1) | ||
109 | |||
110 | # Parse commandline options | ||
111 | for option, value in options: | ||
112 | if option == "-c": | ||
113 | comments = 1 | ||
114 | elif option == "-q": | ||
115 | quiet = 1 | ||
116 | elif option == "-t": | ||
117 | test = 1 | ||
118 | elif option == '-h': | ||
119 | usage() | ||
120 | sys.exit(0) | ||
121 | |||
122 | # Select the input source | ||
123 | if arguments: | ||
124 | try: | ||
125 | fd = open(arguments[0]) | ||
126 | except Exception,ex: | ||
127 | sys.stderr.write("File not found %s\n" %(arguments[0])) | ||
128 | sys.exit(1) | ||
129 | else: | ||
130 | fd = sys.stdin | ||
131 | |||
132 | linenr = 0 | ||
133 | |||
134 | # Read the test patterns | ||
135 | while 1: | ||
136 | |||
137 | linenr = linenr + 1 | ||
138 | line = fd.readline() | ||
139 | if not len(line): | ||
140 | break | ||
141 | |||
142 | line = line.strip() | ||
143 | parts = line.split(":") | ||
144 | |||
145 | if not parts or len(parts) < 1: | ||
146 | continue | ||
147 | |||
148 | if len(parts[0]) == 0: | ||
149 | continue | ||
150 | |||
151 | if parts[0].startswith("#"): | ||
152 | if comments > 1: | ||
153 | progress(line) | ||
154 | continue | ||
155 | |||
156 | if comments == 1: | ||
157 | comments = 2 | ||
158 | |||
159 | progress(line) | ||
160 | |||
161 | cmd = parts[0].strip().lower() | ||
162 | opc = parts[1].strip().lower() | ||
163 | tid = parts[2].strip() | ||
164 | dat = parts[3].strip() | ||
165 | |||
166 | try: | ||
167 | # Test or wait for a status value | ||
168 | if cmd == "t" or cmd == "w": | ||
169 | testop = test_opcodes[opc] | ||
170 | |||
171 | fname = "%s%s%s" %(sysfsprefix, tid, statusfile) | ||
172 | if test: | ||
173 | print fname | ||
174 | continue | ||
175 | |||
176 | while 1: | ||
177 | query = 1 | ||
178 | fsta = open(fname, 'r') | ||
179 | status = fsta.readline().strip() | ||
180 | fsta.close() | ||
181 | stat = status.split(",") | ||
182 | for s in stat: | ||
183 | s = s.strip() | ||
184 | if s.startswith(testop[0]): | ||
185 | # Seperate status value | ||
186 | val = s[2:].strip() | ||
187 | query = analyse(val, testop, dat) | ||
188 | break | ||
189 | if query or cmd == "t": | ||
190 | break | ||
191 | |||
192 | progress(" " + status) | ||
193 | |||
194 | if not query: | ||
195 | sys.stderr.write("Test failed in line %d\n" %(linenr)) | ||
196 | sys.exit(1) | ||
197 | |||
198 | # Issue a command to the tester | ||
199 | elif cmd == "c": | ||
200 | cmdnr = cmd_opcodes[opc] | ||
201 | # Build command string and sys filename | ||
202 | cmdstr = "%s:%s" %(cmdnr, dat) | ||
203 | fname = "%s%s%s" %(sysfsprefix, tid, commandfile) | ||
204 | if test: | ||
205 | print fname | ||
206 | continue | ||
207 | fcmd = open(fname, 'w') | ||
208 | fcmd.write(cmdstr) | ||
209 | fcmd.close() | ||
210 | |||
211 | except Exception,ex: | ||
212 | sys.stderr.write(str(ex)) | ||
213 | sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) | ||
214 | if not test: | ||
215 | fd.close() | ||
216 | sys.exit(1) | ||
217 | |||
218 | # Normal exit pass | ||
219 | print "Pass" | ||
220 | sys.exit(0) | ||
221 | |||
222 | |||
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst new file mode 100644 index 000000000000..8821f27cc8be --- /dev/null +++ b/scripts/rt-tester/t2-l1-2rt-sameprio.tst | |||
@@ -0,0 +1,99 @@ | |||
1 | # | ||
2 | # RT-Mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal 0 | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 2 threads 1 lock | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedfifo: 0: 80 | ||
60 | C: schedfifo: 1: 80 | ||
61 | |||
62 | # T0 lock L0 | ||
63 | C: locknowait: 0: 0 | ||
64 | C: locknowait: 1: 0 | ||
65 | W: locked: 0: 0 | ||
66 | W: blocked: 1: 0 | ||
67 | T: prioeq: 0: 80 | ||
68 | |||
69 | # T0 unlock L0 | ||
70 | C: unlock: 0: 0 | ||
71 | W: locked: 1: 0 | ||
72 | |||
73 | # Verify T0 | ||
74 | W: unlocked: 0: 0 | ||
75 | T: prioeq: 0: 80 | ||
76 | |||
77 | # Unlock | ||
78 | C: unlock: 1: 0 | ||
79 | W: unlocked: 1: 0 | ||
80 | |||
81 | # T1,T0 lock L0 | ||
82 | C: locknowait: 1: 0 | ||
83 | C: locknowait: 0: 0 | ||
84 | W: locked: 1: 0 | ||
85 | W: blocked: 0: 0 | ||
86 | T: prioeq: 1: 80 | ||
87 | |||
88 | # T1 unlock L0 | ||
89 | C: unlock: 1: 0 | ||
90 | W: locked: 0: 0 | ||
91 | |||
92 | # Verify T1 | ||
93 | W: unlocked: 1: 0 | ||
94 | T: prioeq: 1: 80 | ||
95 | |||
96 | # Unlock and exit | ||
97 | C: unlock: 0: 0 | ||
98 | W: unlocked: 0: 0 | ||
99 | |||
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst new file mode 100644 index 000000000000..cde1f189a02b --- /dev/null +++ b/scripts/rt-tester/t2-l1-pi.tst | |||
@@ -0,0 +1,82 @@ | |||
1 | # | ||
2 | # RT-Mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal 0 | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 2 threads 1 lock with priority inversion | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedfifo: 1: 80 | ||
61 | |||
62 | # T0 lock L0 | ||
63 | C: locknowait: 0: 0 | ||
64 | W: locked: 0: 0 | ||
65 | |||
66 | # T1 lock L0 | ||
67 | C: locknowait: 1: 0 | ||
68 | W: blocked: 1: 0 | ||
69 | T: prioeq: 0: 80 | ||
70 | |||
71 | # T0 unlock L0 | ||
72 | C: unlock: 0: 0 | ||
73 | W: locked: 1: 0 | ||
74 | |||
75 | # Verify T1 | ||
76 | W: unlocked: 0: 0 | ||
77 | T: priolt: 0: 1 | ||
78 | |||
79 | # Unlock and exit | ||
80 | C: unlock: 1: 0 | ||
81 | W: unlocked: 1: 0 | ||
82 | |||
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst new file mode 100644 index 000000000000..3ab0bfc49950 --- /dev/null +++ b/scripts/rt-tester/t2-l1-signal.tst | |||
@@ -0,0 +1,77 @@ | |||
1 | # | ||
2 | # RT-Mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal 0 | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 2 threads 1 lock with priority inversion | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedother: 1: 0 | ||
61 | |||
62 | # T0 lock L0 | ||
63 | C: locknowait: 0: 0 | ||
64 | W: locked: 0: 0 | ||
65 | |||
66 | # T1 lock L0 | ||
67 | C: lockintnowait: 1: 0 | ||
68 | W: blocked: 1: 0 | ||
69 | |||
70 | # Interrupt T1 | ||
71 | C: signal: 1: 0 | ||
72 | W: unlocked: 1: 0 | ||
73 | T: opcodeeq: 1: -4 | ||
74 | |||
75 | # Unlock and exit | ||
76 | C: unlock: 0: 0 | ||
77 | W: unlocked: 0: 0 | ||
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst new file mode 100644 index 000000000000..f4b5d5d6215f --- /dev/null +++ b/scripts/rt-tester/t2-l2-2rt-deadlock.tst | |||
@@ -0,0 +1,89 @@ | |||
1 | # | ||
2 | # RT-Mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal 0 | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 2 threads 2 lock | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedfifo: 0: 80 | ||
60 | C: schedfifo: 1: 80 | ||
61 | |||
62 | # T0 lock L0 | ||
63 | C: locknowait: 0: 0 | ||
64 | W: locked: 0: 0 | ||
65 | |||
66 | # T1 lock L1 | ||
67 | C: locknowait: 1: 1 | ||
68 | W: locked: 1: 1 | ||
69 | |||
70 | # T0 lock L1 | ||
71 | C: lockintnowait: 0: 1 | ||
72 | W: blocked: 0: 1 | ||
73 | |||
74 | # T1 lock L0 | ||
75 | C: lockintnowait: 1: 0 | ||
76 | W: blocked: 1: 0 | ||
77 | |||
78 | # Make deadlock go away | ||
79 | C: signal: 1: 0 | ||
80 | W: unlocked: 1: 0 | ||
81 | C: signal: 0: 0 | ||
82 | W: unlocked: 0: 1 | ||
83 | |||
84 | # Unlock and exit | ||
85 | C: unlock: 0: 0 | ||
86 | W: unlocked: 0: 0 | ||
87 | C: unlock: 1: 1 | ||
88 | W: unlocked: 1: 1 | ||
89 | |||
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst new file mode 100644 index 000000000000..63440ca2cce9 --- /dev/null +++ b/scripts/rt-tester/t3-l1-pi-1rt.tst | |||
@@ -0,0 +1,92 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 3 threads 1 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedother: 1: 0 | ||
61 | C: schedfifo: 2: 82 | ||
62 | |||
63 | # T0 lock L0 | ||
64 | C: locknowait: 0: 0 | ||
65 | W: locked: 0: 0 | ||
66 | |||
67 | # T1 lock L0 | ||
68 | C: locknowait: 1: 0 | ||
69 | W: blocked: 1: 0 | ||
70 | T: priolt: 0: 1 | ||
71 | |||
72 | # T2 lock L0 | ||
73 | C: locknowait: 2: 0 | ||
74 | W: blocked: 2: 0 | ||
75 | T: prioeq: 0: 82 | ||
76 | |||
77 | # T0 unlock L0 | ||
78 | C: unlock: 0: 0 | ||
79 | |||
80 | # Wait until T2 got the lock | ||
81 | W: locked: 2: 0 | ||
82 | W: unlocked: 0: 0 | ||
83 | T: priolt: 0: 1 | ||
84 | |||
85 | # T2 unlock L0 | ||
86 | C: unlock: 2: 0 | ||
87 | |||
88 | W: unlocked: 2: 0 | ||
89 | W: locked: 1: 0 | ||
90 | |||
91 | C: unlock: 1: 0 | ||
92 | W: unlocked: 1: 0 | ||
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst new file mode 100644 index 000000000000..e5816fe67df3 --- /dev/null +++ b/scripts/rt-tester/t3-l1-pi-2rt.tst | |||
@@ -0,0 +1,93 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 3 threads 1 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedfifo: 1: 81 | ||
61 | C: schedfifo: 2: 82 | ||
62 | |||
63 | # T0 lock L0 | ||
64 | C: locknowait: 0: 0 | ||
65 | W: locked: 0: 0 | ||
66 | |||
67 | # T1 lock L0 | ||
68 | C: locknowait: 1: 0 | ||
69 | W: blocked: 1: 0 | ||
70 | T: prioeq: 0: 81 | ||
71 | |||
72 | # T2 lock L0 | ||
73 | C: locknowait: 2: 0 | ||
74 | W: blocked: 2: 0 | ||
75 | T: prioeq: 0: 82 | ||
76 | T: prioeq: 1: 81 | ||
77 | |||
78 | # T0 unlock L0 | ||
79 | C: unlock: 0: 0 | ||
80 | |||
81 | # Wait until T2 got the lock | ||
82 | W: locked: 2: 0 | ||
83 | W: unlocked: 0: 0 | ||
84 | T: priolt: 0: 1 | ||
85 | |||
86 | # T2 unlock L0 | ||
87 | C: unlock: 2: 0 | ||
88 | |||
89 | W: unlocked: 2: 0 | ||
90 | W: locked: 1: 0 | ||
91 | |||
92 | C: unlock: 1: 0 | ||
93 | W: unlocked: 1: 0 | ||
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst new file mode 100644 index 000000000000..718b82b5d3bb --- /dev/null +++ b/scripts/rt-tester/t3-l1-pi-3rt.tst | |||
@@ -0,0 +1,92 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 3 threads 1 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedfifo: 0: 80 | ||
60 | C: schedfifo: 1: 81 | ||
61 | C: schedfifo: 2: 82 | ||
62 | |||
63 | # T0 lock L0 | ||
64 | C: locknowait: 0: 0 | ||
65 | W: locked: 0: 0 | ||
66 | |||
67 | # T1 lock L0 | ||
68 | C: locknowait: 1: 0 | ||
69 | W: blocked: 1: 0 | ||
70 | T: prioeq: 0: 81 | ||
71 | |||
72 | # T2 lock L0 | ||
73 | C: locknowait: 2: 0 | ||
74 | W: blocked: 2: 0 | ||
75 | T: prioeq: 0: 82 | ||
76 | |||
77 | # T0 unlock L0 | ||
78 | C: unlock: 0: 0 | ||
79 | |||
80 | # Wait until T2 got the lock | ||
81 | W: locked: 2: 0 | ||
82 | W: unlocked: 0: 0 | ||
83 | T: prioeq: 0: 80 | ||
84 | |||
85 | # T2 unlock L0 | ||
86 | C: unlock: 2: 0 | ||
87 | |||
88 | W: locked: 1: 0 | ||
89 | W: unlocked: 2: 0 | ||
90 | |||
91 | C: unlock: 1: 0 | ||
92 | W: unlocked: 1: 0 | ||
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst new file mode 100644 index 000000000000..c6e213563498 --- /dev/null +++ b/scripts/rt-tester/t3-l1-pi-signal.tst | |||
@@ -0,0 +1,98 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # Reset event counter | ||
53 | C: resetevent: 0: 0 | ||
54 | W: opcodeeq: 0: 0 | ||
55 | |||
56 | # Set priorities | ||
57 | C: schedother: 0: 0 | ||
58 | C: schedfifo: 1: 80 | ||
59 | C: schedfifo: 2: 81 | ||
60 | |||
61 | # T0 lock L0 | ||
62 | C: lock: 0: 0 | ||
63 | W: locked: 0: 0 | ||
64 | |||
65 | # T1 lock L0, no wait in the wakeup path | ||
66 | C: locknowait: 1: 0 | ||
67 | W: blocked: 1: 0 | ||
68 | T: prioeq: 0: 80 | ||
69 | T: prioeq: 1: 80 | ||
70 | |||
71 | # T2 lock L0 interruptible, no wait in the wakeup path | ||
72 | C: lockintnowait: 2: 0 | ||
73 | W: blocked: 2: 0 | ||
74 | T: prioeq: 0: 81 | ||
75 | T: prioeq: 1: 80 | ||
76 | |||
77 | # Interrupt T2 | ||
78 | C: signal: 2: 2 | ||
79 | W: unlocked: 2: 0 | ||
80 | T: prioeq: 1: 80 | ||
81 | T: prioeq: 0: 80 | ||
82 | |||
83 | T: locked: 0: 0 | ||
84 | T: blocked: 1: 0 | ||
85 | |||
86 | # T0 unlock L0 | ||
87 | C: unlock: 0: 0 | ||
88 | |||
89 | # Wait until T1 has locked L0 and exit | ||
90 | W: locked: 1: 0 | ||
91 | W: unlocked: 0: 0 | ||
92 | T: priolt: 0: 1 | ||
93 | |||
94 | C: unlock: 1: 0 | ||
95 | W: unlocked: 1: 0 | ||
96 | |||
97 | |||
98 | |||
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst new file mode 100644 index 000000000000..f53749d59d79 --- /dev/null +++ b/scripts/rt-tester/t3-l1-pi-steal.tst | |||
@@ -0,0 +1,96 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 3 threads 1 lock PI steal pending ownership | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedfifo: 1: 80 | ||
61 | C: schedfifo: 2: 81 | ||
62 | |||
63 | # T0 lock L0 | ||
64 | C: lock: 0: 0 | ||
65 | W: locked: 0: 0 | ||
66 | |||
67 | # T1 lock L0 | ||
68 | C: lock: 1: 0 | ||
69 | W: blocked: 1: 0 | ||
70 | T: prioeq: 0: 80 | ||
71 | |||
72 | # T0 unlock L0 | ||
73 | C: unlock: 0: 0 | ||
74 | |||
75 | # Wait until T1 is in the wakeup loop | ||
76 | W: blockedwake: 1: 0 | ||
77 | T: priolt: 0: 1 | ||
78 | |||
79 | # T2 lock L0 | ||
80 | C: lock: 2: 0 | ||
81 | # T1 leave wakeup loop | ||
82 | C: lockcont: 1: 0 | ||
83 | |||
84 | # T2 must have the lock and T1 must be blocked | ||
85 | W: locked: 2: 0 | ||
86 | W: blocked: 1: 0 | ||
87 | |||
88 | # T2 unlock L0 | ||
89 | C: unlock: 2: 0 | ||
90 | |||
91 | # Wait until T1 is in the wakeup loop and let it run | ||
92 | W: blockedwake: 1: 0 | ||
93 | C: lockcont: 1: 0 | ||
94 | W: locked: 1: 0 | ||
95 | C: unlock: 1: 0 | ||
96 | W: unlocked: 1: 0 | ||
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst new file mode 100644 index 000000000000..cdc3e4fd7bac --- /dev/null +++ b/scripts/rt-tester/t3-l2-pi.tst | |||
@@ -0,0 +1,92 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 3 threads 2 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedother: 1: 0 | ||
61 | C: schedfifo: 2: 82 | ||
62 | |||
63 | # T0 lock L0 | ||
64 | C: locknowait: 0: 0 | ||
65 | W: locked: 0: 0 | ||
66 | |||
67 | # T1 lock L0 | ||
68 | C: locknowait: 1: 0 | ||
69 | W: blocked: 1: 0 | ||
70 | T: priolt: 0: 1 | ||
71 | |||
72 | # T2 lock L0 | ||
73 | C: locknowait: 2: 0 | ||
74 | W: blocked: 2: 0 | ||
75 | T: prioeq: 0: 82 | ||
76 | |||
77 | # T0 unlock L0 | ||
78 | C: unlock: 0: 0 | ||
79 | |||
80 | # Wait until T2 got the lock | ||
81 | W: locked: 2: 0 | ||
82 | W: unlocked: 0: 0 | ||
83 | T: priolt: 0: 1 | ||
84 | |||
85 | # T2 unlock L0 | ||
86 | C: unlock: 2: 0 | ||
87 | |||
88 | W: unlocked: 2: 0 | ||
89 | W: locked: 1: 0 | ||
90 | |||
91 | C: unlock: 1: 0 | ||
92 | W: unlocked: 1: 0 | ||
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst new file mode 100644 index 000000000000..baa14137f473 --- /dev/null +++ b/scripts/rt-tester/t4-l2-pi-deboost.tst | |||
@@ -0,0 +1,123 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 4 threads 2 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedother: 1: 0 | ||
61 | C: schedfifo: 2: 82 | ||
62 | C: schedfifo: 3: 83 | ||
63 | |||
64 | # T0 lock L0 | ||
65 | C: locknowait: 0: 0 | ||
66 | W: locked: 0: 0 | ||
67 | |||
68 | # T1 lock L1 | ||
69 | C: locknowait: 1: 1 | ||
70 | W: locked: 1: 1 | ||
71 | |||
72 | # T3 lock L0 | ||
73 | C: lockintnowait: 3: 0 | ||
74 | W: blocked: 3: 0 | ||
75 | T: prioeq: 0: 83 | ||
76 | |||
77 | # T0 lock L1 | ||
78 | C: lock: 0: 1 | ||
79 | W: blocked: 0: 1 | ||
80 | T: prioeq: 1: 83 | ||
81 | |||
82 | # T1 unlock L1 | ||
83 | C: unlock: 1: 1 | ||
84 | |||
85 | # Wait until T0 is in the wakeup code | ||
86 | W: blockedwake: 0: 1 | ||
87 | |||
88 | # Verify that T1 is unboosted | ||
89 | W: unlocked: 1: 1 | ||
90 | T: priolt: 1: 1 | ||
91 | |||
92 | # T2 lock L1 (T0 is boosted and pending owner !) | ||
93 | C: locknowait: 2: 1 | ||
94 | W: blocked: 2: 1 | ||
95 | T: prioeq: 0: 83 | ||
96 | |||
97 | # Interrupt T3 and wait until T3 returned | ||
98 | C: signal: 3: 0 | ||
99 | W: unlocked: 3: 0 | ||
100 | |||
101 | # Verify prio of T0 (still pending owner, | ||
102 | # but T2 is enqueued due to the previous boost by T3 | ||
103 | T: prioeq: 0: 82 | ||
104 | |||
105 | # Let T0 continue | ||
106 | C: lockcont: 0: 1 | ||
107 | W: locked: 0: 1 | ||
108 | |||
109 | # Unlock L1 and let T2 get L1 | ||
110 | C: unlock: 0: 1 | ||
111 | W: locked: 2: 1 | ||
112 | |||
113 | # Verify that T0 is unboosted | ||
114 | W: unlocked: 0: 1 | ||
115 | T: priolt: 0: 1 | ||
116 | |||
117 | # Unlock everything and exit | ||
118 | C: unlock: 2: 1 | ||
119 | W: unlocked: 2: 1 | ||
120 | |||
121 | C: unlock: 0: 0 | ||
122 | W: unlocked: 0: 0 | ||
123 | |||
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst new file mode 100644 index 000000000000..e6ec0c81b54d --- /dev/null +++ b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst | |||
@@ -0,0 +1,183 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 5 threads 4 lock PI - modify priority of blocked threads | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedfifo: 1: 81 | ||
61 | C: schedfifo: 2: 82 | ||
62 | C: schedfifo: 3: 83 | ||
63 | C: schedfifo: 4: 84 | ||
64 | |||
65 | # T0 lock L0 | ||
66 | C: locknowait: 0: 0 | ||
67 | W: locked: 0: 0 | ||
68 | |||
69 | # T1 lock L1 | ||
70 | C: locknowait: 1: 1 | ||
71 | W: locked: 1: 1 | ||
72 | |||
73 | # T1 lock L0 | ||
74 | C: lockintnowait: 1: 0 | ||
75 | W: blocked: 1: 0 | ||
76 | T: prioeq: 0: 81 | ||
77 | |||
78 | # T2 lock L2 | ||
79 | C: locknowait: 2: 2 | ||
80 | W: locked: 2: 2 | ||
81 | |||
82 | # T2 lock L1 | ||
83 | C: lockintnowait: 2: 1 | ||
84 | W: blocked: 2: 1 | ||
85 | T: prioeq: 0: 82 | ||
86 | T: prioeq: 1: 82 | ||
87 | |||
88 | # T3 lock L3 | ||
89 | C: locknowait: 3: 3 | ||
90 | W: locked: 3: 3 | ||
91 | |||
92 | # T3 lock L2 | ||
93 | C: lockintnowait: 3: 2 | ||
94 | W: blocked: 3: 2 | ||
95 | T: prioeq: 0: 83 | ||
96 | T: prioeq: 1: 83 | ||
97 | T: prioeq: 2: 83 | ||
98 | |||
99 | # T4 lock L3 | ||
100 | C: lockintnowait: 4: 3 | ||
101 | W: blocked: 4: 3 | ||
102 | T: prioeq: 0: 84 | ||
103 | T: prioeq: 1: 84 | ||
104 | T: prioeq: 2: 84 | ||
105 | T: prioeq: 3: 84 | ||
106 | |||
107 | # Reduce prio of T4 | ||
108 | C: schedfifo: 4: 80 | ||
109 | T: prioeq: 0: 83 | ||
110 | T: prioeq: 1: 83 | ||
111 | T: prioeq: 2: 83 | ||
112 | T: prioeq: 3: 83 | ||
113 | T: prioeq: 4: 80 | ||
114 | |||
115 | # Increase prio of T4 | ||
116 | C: schedfifo: 4: 84 | ||
117 | T: prioeq: 0: 84 | ||
118 | T: prioeq: 1: 84 | ||
119 | T: prioeq: 2: 84 | ||
120 | T: prioeq: 3: 84 | ||
121 | T: prioeq: 4: 84 | ||
122 | |||
123 | # Reduce prio of T3 | ||
124 | C: schedfifo: 3: 80 | ||
125 | T: prioeq: 0: 84 | ||
126 | T: prioeq: 1: 84 | ||
127 | T: prioeq: 2: 84 | ||
128 | T: prioeq: 3: 84 | ||
129 | T: prioeq: 4: 84 | ||
130 | |||
131 | # Increase prio of T3 | ||
132 | C: schedfifo: 3: 85 | ||
133 | T: prioeq: 0: 85 | ||
134 | T: prioeq: 1: 85 | ||
135 | T: prioeq: 2: 85 | ||
136 | T: prioeq: 3: 85 | ||
137 | T: prioeq: 4: 84 | ||
138 | |||
139 | # Reduce prio of T3 | ||
140 | C: schedfifo: 3: 83 | ||
141 | T: prioeq: 0: 84 | ||
142 | T: prioeq: 1: 84 | ||
143 | T: prioeq: 2: 84 | ||
144 | T: prioeq: 3: 84 | ||
145 | T: prioeq: 4: 84 | ||
146 | |||
147 | # Signal T4 | ||
148 | C: signal: 4: 0 | ||
149 | W: unlocked: 4: 3 | ||
150 | T: prioeq: 0: 83 | ||
151 | T: prioeq: 1: 83 | ||
152 | T: prioeq: 2: 83 | ||
153 | T: prioeq: 3: 83 | ||
154 | |||
155 | # Signal T3 | ||
156 | C: signal: 3: 0 | ||
157 | W: unlocked: 3: 2 | ||
158 | T: prioeq: 0: 82 | ||
159 | T: prioeq: 1: 82 | ||
160 | T: prioeq: 2: 82 | ||
161 | |||
162 | # Signal T2 | ||
163 | C: signal: 2: 0 | ||
164 | W: unlocked: 2: 1 | ||
165 | T: prioeq: 0: 81 | ||
166 | T: prioeq: 1: 81 | ||
167 | |||
168 | # Signal T1 | ||
169 | C: signal: 1: 0 | ||
170 | W: unlocked: 1: 0 | ||
171 | T: priolt: 0: 1 | ||
172 | |||
173 | # Unlock and exit | ||
174 | C: unlock: 3: 3 | ||
175 | C: unlock: 2: 2 | ||
176 | C: unlock: 1: 1 | ||
177 | C: unlock: 0: 0 | ||
178 | |||
179 | W: unlocked: 3: 3 | ||
180 | W: unlocked: 2: 2 | ||
181 | W: unlocked: 1: 1 | ||
182 | W: unlocked: 0: 0 | ||
183 | |||
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst new file mode 100644 index 000000000000..ca64f8bbf4bc --- /dev/null +++ b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst | |||
@@ -0,0 +1,143 @@ | |||
1 | # | ||
2 | # rt-mutex test | ||
3 | # | ||
4 | # Op: C(ommand)/T(est)/W(ait) | ||
5 | # | opcode | ||
6 | # | | threadid: 0-7 | ||
7 | # | | | opcode argument | ||
8 | # | | | | | ||
9 | # C: lock: 0: 0 | ||
10 | # | ||
11 | # Commands | ||
12 | # | ||
13 | # opcode opcode argument | ||
14 | # schedother nice value | ||
15 | # schedfifo priority | ||
16 | # lock lock nr (0-7) | ||
17 | # locknowait lock nr (0-7) | ||
18 | # lockint lock nr (0-7) | ||
19 | # lockintnowait lock nr (0-7) | ||
20 | # lockcont lock nr (0-7) | ||
21 | # unlock lock nr (0-7) | ||
22 | # lockbkl lock nr (0-7) | ||
23 | # unlockbkl lock nr (0-7) | ||
24 | # signal thread to signal (0-7) | ||
25 | # reset 0 | ||
26 | # resetevent 0 | ||
27 | # | ||
28 | # Tests / Wait | ||
29 | # | ||
30 | # opcode opcode argument | ||
31 | # | ||
32 | # prioeq priority | ||
33 | # priolt priority | ||
34 | # priogt priority | ||
35 | # nprioeq normal priority | ||
36 | # npriolt normal priority | ||
37 | # npriogt normal priority | ||
38 | # locked lock nr (0-7) | ||
39 | # blocked lock nr (0-7) | ||
40 | # blockedwake lock nr (0-7) | ||
41 | # unlocked lock nr (0-7) | ||
42 | # lockedbkl dont care | ||
43 | # blockedbkl dont care | ||
44 | # unlockedbkl dont care | ||
45 | # opcodeeq command opcode or number | ||
46 | # opcodelt number | ||
47 | # opcodegt number | ||
48 | # eventeq number | ||
49 | # eventgt number | ||
50 | # eventlt number | ||
51 | |||
52 | # | ||
53 | # 5 threads 4 lock PI | ||
54 | # | ||
55 | C: resetevent: 0: 0 | ||
56 | W: opcodeeq: 0: 0 | ||
57 | |||
58 | # Set schedulers | ||
59 | C: schedother: 0: 0 | ||
60 | C: schedfifo: 1: 81 | ||
61 | C: schedfifo: 2: 82 | ||
62 | C: schedfifo: 3: 83 | ||
63 | C: schedfifo: 4: 84 | ||
64 | |||
65 | # T0 lock L0 | ||
66 | C: locknowait: 0: 0 | ||
67 | W: locked: 0: 0 | ||
68 | |||
69 | # T1 lock L1 | ||
70 | C: locknowait: 1: 1 | ||
71 | W: locked: 1: 1 | ||
72 | |||
73 | # T1 lock L0 | ||
74 | C: lockintnowait: 1: 0 | ||
75 | W: blocked: 1: 0 | ||
76 | T: prioeq: 0: 81 | ||
77 | |||
78 | # T2 lock L2 | ||
79 | C: locknowait: 2: 2 | ||
80 | W: locked: 2: 2 | ||
81 | |||
82 | # T2 lock L1 | ||
83 | C: lockintnowait: 2: 1 | ||
84 | W: blocked: 2: 1 | ||
85 | T: prioeq: 0: 82 | ||
86 | T: prioeq: 1: 82 | ||
87 | |||
88 | # T3 lock L3 | ||
89 | C: locknowait: 3: 3 | ||
90 | W: locked: 3: 3 | ||
91 | |||
92 | # T3 lock L2 | ||
93 | C: lockintnowait: 3: 2 | ||
94 | W: blocked: 3: 2 | ||
95 | T: prioeq: 0: 83 | ||
96 | T: prioeq: 1: 83 | ||
97 | T: prioeq: 2: 83 | ||
98 | |||
99 | # T4 lock L3 | ||
100 | C: lockintnowait: 4: 3 | ||
101 | W: blocked: 4: 3 | ||
102 | T: prioeq: 0: 84 | ||
103 | T: prioeq: 1: 84 | ||
104 | T: prioeq: 2: 84 | ||
105 | T: prioeq: 3: 84 | ||
106 | |||
107 | # Signal T4 | ||
108 | C: signal: 4: 0 | ||
109 | W: unlocked: 4: 3 | ||
110 | T: prioeq: 0: 83 | ||
111 | T: prioeq: 1: 83 | ||
112 | T: prioeq: 2: 83 | ||
113 | T: prioeq: 3: 83 | ||
114 | |||
115 | # Signal T3 | ||
116 | C: signal: 3: 0 | ||
117 | W: unlocked: 3: 2 | ||
118 | T: prioeq: 0: 82 | ||
119 | T: prioeq: 1: 82 | ||
120 | T: prioeq: 2: 82 | ||
121 | |||
122 | # Signal T2 | ||
123 | C: signal: 2: 0 | ||
124 | W: unlocked: 2: 1 | ||
125 | T: prioeq: 0: 81 | ||
126 | T: prioeq: 1: 81 | ||
127 | |||
128 | # Signal T1 | ||
129 | C: signal: 1: 0 | ||
130 | W: unlocked: 1: 0 | ||
131 | T: priolt: 0: 1 | ||
132 | |||
133 | # Unlock and exit | ||
134 | C: unlock: 3: 3 | ||
135 | C: unlock: 2: 2 | ||
136 | C: unlock: 1: 1 | ||
137 | C: unlock: 0: 0 | ||
138 | |||
139 | W: unlocked: 3: 3 | ||
140 | W: unlocked: 2: 2 | ||
141 | W: unlocked: 1: 1 | ||
142 | W: unlocked: 0: 0 | ||
143 | |||
diff --git a/security/keys/key.c b/security/keys/key.c index 43295ca37b5d..80de8c3e9cc3 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/poison.h> | ||
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
16 | #include <linux/security.h> | 17 | #include <linux/security.h> |
@@ -988,7 +989,7 @@ void unregister_key_type(struct key_type *ktype) | |||
988 | if (key->type == ktype) { | 989 | if (key->type == ktype) { |
989 | if (ktype->destroy) | 990 | if (ktype->destroy) |
990 | ktype->destroy(key); | 991 | ktype->destroy(key); |
991 | memset(&key->payload, 0xbd, sizeof(key->payload)); | 992 | memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); |
992 | } | 993 | } |
993 | } | 994 | } |
994 | 995 | ||
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ac7f2b2e3924..28832e689800 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1532,8 +1532,9 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm) | |||
1532 | /* Default to the current task SID. */ | 1532 | /* Default to the current task SID. */ |
1533 | bsec->sid = tsec->sid; | 1533 | bsec->sid = tsec->sid; |
1534 | 1534 | ||
1535 | /* Reset create and sockcreate SID on execve. */ | 1535 | /* Reset fs, key, and sock SIDs on execve. */ |
1536 | tsec->create_sid = 0; | 1536 | tsec->create_sid = 0; |
1537 | tsec->keycreate_sid = 0; | ||
1537 | tsec->sockcreate_sid = 0; | 1538 | tsec->sockcreate_sid = 0; |
1538 | 1539 | ||
1539 | if (tsec->exec_sid) { | 1540 | if (tsec->exec_sid) { |
@@ -2586,9 +2587,10 @@ static int selinux_task_alloc_security(struct task_struct *tsk) | |||
2586 | tsec2->osid = tsec1->osid; | 2587 | tsec2->osid = tsec1->osid; |
2587 | tsec2->sid = tsec1->sid; | 2588 | tsec2->sid = tsec1->sid; |
2588 | 2589 | ||
2589 | /* Retain the exec, create, and sock SIDs across fork */ | 2590 | /* Retain the exec, fs, key, and sock SIDs across fork */ |
2590 | tsec2->exec_sid = tsec1->exec_sid; | 2591 | tsec2->exec_sid = tsec1->exec_sid; |
2591 | tsec2->create_sid = tsec1->create_sid; | 2592 | tsec2->create_sid = tsec1->create_sid; |
2593 | tsec2->keycreate_sid = tsec1->keycreate_sid; | ||
2592 | tsec2->sockcreate_sid = tsec1->sockcreate_sid; | 2594 | tsec2->sockcreate_sid = tsec1->sockcreate_sid; |
2593 | 2595 | ||
2594 | /* Retain ptracer SID across fork, if any. | 2596 | /* Retain ptracer SID across fork, if any. |
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c index 1a921ee71aba..2343dedd44ae 100644 --- a/sound/oss/via82cxxx_audio.c +++ b/sound/oss/via82cxxx_audio.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/poison.h> | ||
27 | #include <linux/init.h> | 28 | #include <linux/init.h> |
28 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
29 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
@@ -3522,7 +3523,7 @@ err_out_have_mixer: | |||
3522 | 3523 | ||
3523 | err_out_kfree: | 3524 | err_out_kfree: |
3524 | #ifndef VIA_NDEBUG | 3525 | #ifndef VIA_NDEBUG |
3525 | memset (card, 0xAB, sizeof (*card)); /* poison memory */ | 3526 | memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */ |
3526 | #endif | 3527 | #endif |
3527 | kfree (card); | 3528 | kfree (card); |
3528 | 3529 | ||
@@ -3559,7 +3560,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev) | |||
3559 | via_ac97_cleanup (card); | 3560 | via_ac97_cleanup (card); |
3560 | 3561 | ||
3561 | #ifndef VIA_NDEBUG | 3562 | #ifndef VIA_NDEBUG |
3562 | memset (card, 0xAB, sizeof (*card)); /* poison memory */ | 3563 | memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */ |
3563 | #endif | 3564 | #endif |
3564 | kfree (card); | 3565 | kfree (card); |
3565 | 3566 | ||
diff --git a/sound/ppc/toonie.c b/sound/ppc/toonie.c deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/sound/ppc/toonie.c +++ /dev/null | |||