diff options
116 files changed, 1828 insertions, 1269 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO index c2371c5a98f9..48a3955f05fc 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO | |||
@@ -77,7 +77,8 @@ documentation files are also added which explain how to use the feature. | |||
77 | When a kernel change causes the interface that the kernel exposes to | 77 | When a kernel change causes the interface that the kernel exposes to |
78 | userspace to change, it is recommended that you send the information or | 78 | userspace to change, it is recommended that you send the information or |
79 | a patch to the manual pages explaining the change to the manual pages | 79 | a patch to the manual pages explaining the change to the manual pages |
80 | maintainer at mtk.manpages@gmail.com. | 80 | maintainer at mtk.manpages@gmail.com, and CC the list |
81 | linux-api@vger.kernel.org. | ||
81 | 82 | ||
82 | Here is a list of files that are in the kernel source tree that are | 83 | Here is a list of files that are in the kernel source tree that are |
83 | required reading: | 84 | required reading: |
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist index da10e0714241..21f0795af20f 100644 --- a/Documentation/SubmitChecklist +++ b/Documentation/SubmitChecklist | |||
@@ -67,6 +67,8 @@ kernel patches. | |||
67 | 67 | ||
68 | 19: All new userspace interfaces are documented in Documentation/ABI/. | 68 | 19: All new userspace interfaces are documented in Documentation/ABI/. |
69 | See Documentation/ABI/README for more information. | 69 | See Documentation/ABI/README for more information. |
70 | Patches that change userspace interfaces should be CCed to | ||
71 | linux-api@vger.kernel.org. | ||
70 | 72 | ||
71 | 20: Check that it all passes `make headers_check'. | 73 | 20: Check that it all passes `make headers_check'. |
72 | 74 | ||
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt index 0bd32748a467..c6841eee9598 100644 --- a/Documentation/kernel-doc-nano-HOWTO.txt +++ b/Documentation/kernel-doc-nano-HOWTO.txt | |||
@@ -168,10 +168,10 @@ if ($#ARGV < 0) { | |||
168 | mkdir $ARGV[0],0777; | 168 | mkdir $ARGV[0],0777; |
169 | $state = 0; | 169 | $state = 0; |
170 | while (<STDIN>) { | 170 | while (<STDIN>) { |
171 | if (/^\.TH \"[^\"]*\" 4 \"([^\"]*)\"/) { | 171 | if (/^\.TH \"[^\"]*\" 9 \"([^\"]*)\"/) { |
172 | if ($state == 1) { close OUT } | 172 | if ($state == 1) { close OUT } |
173 | $state = 1; | 173 | $state = 1; |
174 | $fn = "$ARGV[0]/$1.4"; | 174 | $fn = "$ARGV[0]/$1.9"; |
175 | print STDERR "Creating $fn\n"; | 175 | print STDERR "Creating $fn\n"; |
176 | open OUT, ">$fn" or die "can't open $fn: $!\n"; | 176 | open OUT, ">$fn" or die "can't open $fn: $!\n"; |
177 | print OUT $_; | 177 | print OUT $_; |
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt index 88bcb8767335..9d8eb553884c 100644 --- a/Documentation/scheduler/sched-design-CFS.txt +++ b/Documentation/scheduler/sched-design-CFS.txt | |||
@@ -1,151 +1,242 @@ | |||
1 | ============= | ||
2 | CFS Scheduler | ||
3 | ============= | ||
1 | 4 | ||
2 | This is the CFS scheduler. | ||
3 | |||
4 | 80% of CFS's design can be summed up in a single sentence: CFS basically | ||
5 | models an "ideal, precise multi-tasking CPU" on real hardware. | ||
6 | |||
7 | "Ideal multi-tasking CPU" is a (non-existent :-)) CPU that has 100% | ||
8 | physical power and which can run each task at precise equal speed, in | ||
9 | parallel, each at 1/nr_running speed. For example: if there are 2 tasks | ||
10 | running then it runs each at 50% physical power - totally in parallel. | ||
11 | |||
12 | On real hardware, we can run only a single task at once, so while that | ||
13 | one task runs, the other tasks that are waiting for the CPU are at a | ||
14 | disadvantage - the current task gets an unfair amount of CPU time. In | ||
15 | CFS this fairness imbalance is expressed and tracked via the per-task | ||
16 | p->wait_runtime (nanosec-unit) value. "wait_runtime" is the amount of | ||
17 | time the task should now run on the CPU for it to become completely fair | ||
18 | and balanced. | ||
19 | |||
20 | ( small detail: on 'ideal' hardware, the p->wait_runtime value would | ||
21 | always be zero - no task would ever get 'out of balance' from the | ||
22 | 'ideal' share of CPU time. ) | ||
23 | |||
24 | CFS's task picking logic is based on this p->wait_runtime value and it | ||
25 | is thus very simple: it always tries to run the task with the largest | ||
26 | p->wait_runtime value. In other words, CFS tries to run the task with | ||
27 | the 'gravest need' for more CPU time. So CFS always tries to split up | ||
28 | CPU time between runnable tasks as close to 'ideal multitasking | ||
29 | hardware' as possible. | ||
30 | |||
31 | Most of the rest of CFS's design just falls out of this really simple | ||
32 | concept, with a few add-on embellishments like nice levels, | ||
33 | multiprocessing and various algorithm variants to recognize sleepers. | ||
34 | |||
35 | In practice it works like this: the system runs a task a bit, and when | ||
36 | the task schedules (or a scheduler tick happens) the task's CPU usage is | ||
37 | 'accounted for': the (small) time it just spent using the physical CPU | ||
38 | is deducted from p->wait_runtime. [minus the 'fair share' it would have | ||
39 | gotten anyway]. Once p->wait_runtime gets low enough so that another | ||
40 | task becomes the 'leftmost task' of the time-ordered rbtree it maintains | ||
41 | (plus a small amount of 'granularity' distance relative to the leftmost | ||
42 | task so that we do not over-schedule tasks and trash the cache) then the | ||
43 | new leftmost task is picked and the current task is preempted. | ||
44 | |||
45 | The rq->fair_clock value tracks the 'CPU time a runnable task would have | ||
46 | fairly gotten, had it been runnable during that time'. So by using | ||
47 | rq->fair_clock values we can accurately timestamp and measure the | ||
48 | 'expected CPU time' a task should have gotten. All runnable tasks are | ||
49 | sorted in the rbtree by the "rq->fair_clock - p->wait_runtime" key, and | ||
50 | CFS picks the 'leftmost' task and sticks to it. As the system progresses | ||
51 | forwards, newly woken tasks are put into the tree more and more to the | ||
52 | right - slowly but surely giving a chance for every task to become the | ||
53 | 'leftmost task' and thus get on the CPU within a deterministic amount of | ||
54 | time. | ||
55 | |||
56 | Some implementation details: | ||
57 | |||
58 | - the introduction of Scheduling Classes: an extensible hierarchy of | ||
59 | scheduler modules. These modules encapsulate scheduling policy | ||
60 | details and are handled by the scheduler core without the core | ||
61 | code assuming about them too much. | ||
62 | |||
63 | - sched_fair.c implements the 'CFS desktop scheduler': it is a | ||
64 | replacement for the vanilla scheduler's SCHED_OTHER interactivity | ||
65 | code. | ||
66 | |||
67 | I'd like to give credit to Con Kolivas for the general approach here: | ||
68 | he has proven via RSDL/SD that 'fair scheduling' is possible and that | ||
69 | it results in better desktop scheduling. Kudos Con! | ||
70 | |||
71 | The CFS patch uses a completely different approach and implementation | ||
72 | from RSDL/SD. My goal was to make CFS's interactivity quality exceed | ||
73 | that of RSDL/SD, which is a high standard to meet :-) Testing | ||
74 | feedback is welcome to decide this one way or another. [ and, in any | ||
75 | case, all of SD's logic could be added via a kernel/sched_sd.c module | ||
76 | as well, if Con is interested in such an approach. ] | ||
77 | |||
78 | CFS's design is quite radical: it does not use runqueues, it uses a | ||
79 | time-ordered rbtree to build a 'timeline' of future task execution, | ||
80 | and thus has no 'array switch' artifacts (by which both the vanilla | ||
81 | scheduler and RSDL/SD are affected). | ||
82 | |||
83 | CFS uses nanosecond granularity accounting and does not rely on any | ||
84 | jiffies or other HZ detail. Thus the CFS scheduler has no notion of | ||
85 | 'timeslices' and has no heuristics whatsoever. There is only one | ||
86 | central tunable (you have to switch on CONFIG_SCHED_DEBUG): | ||
87 | |||
88 | /proc/sys/kernel/sched_granularity_ns | ||
89 | |||
90 | which can be used to tune the scheduler from 'desktop' (low | ||
91 | latencies) to 'server' (good batching) workloads. It defaults to a | ||
92 | setting suitable for desktop workloads. SCHED_BATCH is handled by the | ||
93 | CFS scheduler module too. | ||
94 | |||
95 | Due to its design, the CFS scheduler is not prone to any of the | ||
96 | 'attacks' that exist today against the heuristics of the stock | ||
97 | scheduler: fiftyp.c, thud.c, chew.c, ring-test.c, massive_intr.c all | ||
98 | work fine and do not impact interactivity and produce the expected | ||
99 | behavior. | ||
100 | |||
101 | the CFS scheduler has a much stronger handling of nice levels and | ||
102 | SCHED_BATCH: both types of workloads should be isolated much more | ||
103 | agressively than under the vanilla scheduler. | ||
104 | |||
105 | ( another detail: due to nanosec accounting and timeline sorting, | ||
106 | sched_yield() support is very simple under CFS, and in fact under | ||
107 | CFS sched_yield() behaves much better than under any other | ||
108 | scheduler i have tested so far. ) | ||
109 | |||
110 | - sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler | ||
111 | way than the vanilla scheduler does. It uses 100 runqueues (for all | ||
112 | 100 RT priority levels, instead of 140 in the vanilla scheduler) | ||
113 | and it needs no expired array. | ||
114 | |||
115 | - reworked/sanitized SMP load-balancing: the runqueue-walking | ||
116 | assumptions are gone from the load-balancing code now, and | ||
117 | iterators of the scheduling modules are used. The balancing code got | ||
118 | quite a bit simpler as a result. | ||
119 | |||
120 | |||
121 | Group scheduler extension to CFS | ||
122 | ================================ | ||
123 | |||
124 | Normally the scheduler operates on individual tasks and strives to provide | ||
125 | fair CPU time to each task. Sometimes, it may be desirable to group tasks | ||
126 | and provide fair CPU time to each such task group. For example, it may | ||
127 | be desirable to first provide fair CPU time to each user on the system | ||
128 | and then to each task belonging to a user. | ||
129 | |||
130 | CONFIG_FAIR_GROUP_SCHED strives to achieve exactly that. It lets | ||
131 | SCHED_NORMAL/BATCH tasks be be grouped and divides CPU time fairly among such | ||
132 | groups. At present, there are two (mutually exclusive) mechanisms to group | ||
133 | tasks for CPU bandwidth control purpose: | ||
134 | |||
135 | - Based on user id (CONFIG_FAIR_USER_SCHED) | ||
136 | In this option, tasks are grouped according to their user id. | ||
137 | - Based on "cgroup" pseudo filesystem (CONFIG_FAIR_CGROUP_SCHED) | ||
138 | This options lets the administrator create arbitrary groups | ||
139 | of tasks, using the "cgroup" pseudo filesystem. See | ||
140 | Documentation/cgroups.txt for more information about this | ||
141 | filesystem. | ||
142 | 5 | ||
143 | Only one of these options to group tasks can be chosen and not both. | 6 | 1. OVERVIEW |
7 | |||
8 | CFS stands for "Completely Fair Scheduler," and is the new "desktop" process | ||
9 | scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the | ||
10 | replacement for the previous vanilla scheduler's SCHED_OTHER interactivity | ||
11 | code. | ||
12 | |||
13 | 80% of CFS's design can be summed up in a single sentence: CFS basically models | ||
14 | an "ideal, precise multi-tasking CPU" on real hardware. | ||
15 | |||
16 | "Ideal multi-tasking CPU" is a (non-existent :-)) CPU that has 100% physical | ||
17 | power and which can run each task at precise equal speed, in parallel, each at | ||
18 | 1/nr_running speed. For example: if there are 2 tasks running, then it runs | ||
19 | each at 50% physical power --- i.e., actually in parallel. | ||
20 | |||
21 | On real hardware, we can run only a single task at once, so we have to | ||
22 | introduce the concept of "virtual runtime." The virtual runtime of a task | ||
23 | specifies when its next timeslice would start execution on the ideal | ||
24 | multi-tasking CPU described above. In practice, the virtual runtime of a task | ||
25 | is its actual runtime normalized to the total number of running tasks. | ||
26 | |||
27 | |||
28 | |||
29 | 2. FEW IMPLEMENTATION DETAILS | ||
30 | |||
31 | In CFS the virtual runtime is expressed and tracked via the per-task | ||
32 | p->se.vruntime (nanosec-unit) value. This way, it's possible to accurately | ||
33 | timestamp and measure the "expected CPU time" a task should have gotten. | ||
34 | |||
35 | [ small detail: on "ideal" hardware, at any time all tasks would have the same | ||
36 | p->se.vruntime value --- i.e., tasks would execute simultaneously and no task | ||
37 | would ever get "out of balance" from the "ideal" share of CPU time. ] | ||
38 | |||
39 | CFS's task picking logic is based on this p->se.vruntime value and it is thus | ||
40 | very simple: it always tries to run the task with the smallest p->se.vruntime | ||
41 | value (i.e., the task which executed least so far). CFS always tries to split | ||
42 | up CPU time between runnable tasks as close to "ideal multitasking hardware" as | ||
43 | possible. | ||
44 | |||
45 | Most of the rest of CFS's design just falls out of this really simple concept, | ||
46 | with a few add-on embellishments like nice levels, multiprocessing and various | ||
47 | algorithm variants to recognize sleepers. | ||
48 | |||
49 | |||
50 | |||
51 | 3. THE RBTREE | ||
52 | |||
53 | CFS's design is quite radical: it does not use the old data structures for the | ||
54 | runqueues, but it uses a time-ordered rbtree to build a "timeline" of future | ||
55 | task execution, and thus has no "array switch" artifacts (by which both the | ||
56 | previous vanilla scheduler and RSDL/SD are affected). | ||
57 | |||
58 | CFS also maintains the rq->cfs.min_vruntime value, which is a monotonic | ||
59 | increasing value tracking the smallest vruntime among all tasks in the | ||
60 | runqueue. The total amount of work done by the system is tracked using | ||
61 | min_vruntime; that value is used to place newly activated entities on the left | ||
62 | side of the tree as much as possible. | ||
63 | |||
64 | The total number of running tasks in the runqueue is accounted through the | ||
65 | rq->cfs.load value, which is the sum of the weights of the tasks queued on the | ||
66 | runqueue. | ||
67 | |||
68 | CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the | ||
69 | p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to | ||
70 | account for possible wraparounds). CFS picks the "leftmost" task from this | ||
71 | tree and sticks to it. | ||
72 | As the system progresses forwards, the executed tasks are put into the tree | ||
73 | more and more to the right --- slowly but surely giving a chance for every task | ||
74 | to become the "leftmost task" and thus get on the CPU within a deterministic | ||
75 | amount of time. | ||
76 | |||
77 | Summing up, CFS works like this: it runs a task a bit, and when the task | ||
78 | schedules (or a scheduler tick happens) the task's CPU usage is "accounted | ||
79 | for": the (small) time it just spent using the physical CPU is added to | ||
80 | p->se.vruntime. Once p->se.vruntime gets high enough so that another task | ||
81 | becomes the "leftmost task" of the time-ordered rbtree it maintains (plus a | ||
82 | small amount of "granularity" distance relative to the leftmost task so that we | ||
83 | do not over-schedule tasks and trash the cache), then the new leftmost task is | ||
84 | picked and the current task is preempted. | ||
85 | |||
86 | |||
87 | |||
88 | 4. SOME FEATURES OF CFS | ||
89 | |||
90 | CFS uses nanosecond granularity accounting and does not rely on any jiffies or | ||
91 | other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the | ||
92 | way the previous scheduler had, and has no heuristics whatsoever. There is | ||
93 | only one central tunable (you have to switch on CONFIG_SCHED_DEBUG): | ||
94 | |||
95 | /proc/sys/kernel/sched_granularity_ns | ||
96 | |||
97 | which can be used to tune the scheduler from "desktop" (i.e., low latencies) to | ||
98 | "server" (i.e., good batching) workloads. It defaults to a setting suitable | ||
99 | for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too. | ||
100 | |||
101 | Due to its design, the CFS scheduler is not prone to any of the "attacks" that | ||
102 | exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c, | ||
103 | chew.c, ring-test.c, massive_intr.c all work fine and do not impact | ||
104 | interactivity and produce the expected behavior. | ||
105 | |||
106 | The CFS scheduler has a much stronger handling of nice levels and SCHED_BATCH | ||
107 | than the previous vanilla scheduler: both types of workloads are isolated much | ||
108 | more aggressively. | ||
109 | |||
110 | SMP load-balancing has been reworked/sanitized: the runqueue-walking | ||
111 | assumptions are gone from the load-balancing code now, and iterators of the | ||
112 | scheduling modules are used. The balancing code got quite a bit simpler as a | ||
113 | result. | ||
114 | |||
115 | |||
116 | |||
117 | 5. Scheduling policies | ||
118 | |||
119 | CFS implements three scheduling policies: | ||
120 | |||
121 | - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling | ||
122 | policy that is used for regular tasks. | ||
123 | |||
124 | - SCHED_BATCH: Does not preempt nearly as often as regular tasks | ||
125 | would, thereby allowing tasks to run longer and make better use of | ||
126 | caches but at the cost of interactivity. This is well suited for | ||
127 | batch jobs. | ||
128 | |||
129 | - SCHED_IDLE: This is even weaker than nice 19, but its not a true | ||
130 | idle timer scheduler in order to avoid to get into priority | ||
131 | inversion problems which would deadlock the machine. | ||
132 | |||
133 | SCHED_FIFO/_RR are implemented in sched_rt.c and are as specified by | ||
134 | POSIX. | ||
135 | |||
136 | The command chrt from util-linux-ng 2.13.1.1 can set all of these except | ||
137 | SCHED_IDLE. | ||
144 | 138 | ||
145 | Group scheduler tunables: | ||
146 | 139 | ||
147 | When CONFIG_FAIR_USER_SCHED is defined, a directory is created in sysfs for | 140 | |
148 | each new user and a "cpu_share" file is added in that directory. | 141 | 6. SCHEDULING CLASSES |
142 | |||
143 | The new CFS scheduler has been designed in such a way to introduce "Scheduling | ||
144 | Classes," an extensible hierarchy of scheduler modules. These modules | ||
145 | encapsulate scheduling policy details and are handled by the scheduler core | ||
146 | without the core code assuming too much about them. | ||
147 | |||
148 | sched_fair.c implements the CFS scheduler described above. | ||
149 | |||
150 | sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler way than | ||
151 | the previous vanilla scheduler did. It uses 100 runqueues (for all 100 RT | ||
152 | priority levels, instead of 140 in the previous scheduler) and it needs no | ||
153 | expired array. | ||
154 | |||
155 | Scheduling classes are implemented through the sched_class structure, which | ||
156 | contains hooks to functions that must be called whenever an interesting event | ||
157 | occurs. | ||
158 | |||
159 | This is the (partial) list of the hooks: | ||
160 | |||
161 | - enqueue_task(...) | ||
162 | |||
163 | Called when a task enters a runnable state. | ||
164 | It puts the scheduling entity (task) into the red-black tree and | ||
165 | increments the nr_running variable. | ||
166 | |||
167 | - dequeue_tree(...) | ||
168 | |||
169 | When a task is no longer runnable, this function is called to keep the | ||
170 | corresponding scheduling entity out of the red-black tree. It decrements | ||
171 | the nr_running variable. | ||
172 | |||
173 | - yield_task(...) | ||
174 | |||
175 | This function is basically just a dequeue followed by an enqueue, unless the | ||
176 | compat_yield sysctl is turned on; in that case, it places the scheduling | ||
177 | entity at the right-most end of the red-black tree. | ||
178 | |||
179 | - check_preempt_curr(...) | ||
180 | |||
181 | This function checks if a task that entered the runnable state should | ||
182 | preempt the currently running task. | ||
183 | |||
184 | - pick_next_task(...) | ||
185 | |||
186 | This function chooses the most appropriate task eligible to run next. | ||
187 | |||
188 | - set_curr_task(...) | ||
189 | |||
190 | This function is called when a task changes its scheduling class or changes | ||
191 | its task group. | ||
192 | |||
193 | - task_tick(...) | ||
194 | |||
195 | This function is mostly called from time tick functions; it might lead to | ||
196 | process switch. This drives the running preemption. | ||
197 | |||
198 | - task_new(...) | ||
199 | |||
200 | The core scheduler gives the scheduling module an opportunity to manage new | ||
201 | task startup. The CFS scheduling module uses it for group scheduling, while | ||
202 | the scheduling module for a real-time task does not use it. | ||
203 | |||
204 | |||
205 | |||
206 | 7. GROUP SCHEDULER EXTENSIONS TO CFS | ||
207 | |||
208 | Normally, the scheduler operates on individual tasks and strives to provide | ||
209 | fair CPU time to each task. Sometimes, it may be desirable to group tasks and | ||
210 | provide fair CPU time to each such task group. For example, it may be | ||
211 | desirable to first provide fair CPU time to each user on the system and then to | ||
212 | each task belonging to a user. | ||
213 | |||
214 | CONFIG_GROUP_SCHED strives to achieve exactly that. It lets tasks to be | ||
215 | grouped and divides CPU time fairly among such groups. | ||
216 | |||
217 | CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and | ||
218 | SCHED_RR) tasks. | ||
219 | |||
220 | CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and | ||
221 | SCHED_BATCH) tasks. | ||
222 | |||
223 | At present, there are two (mutually exclusive) mechanisms to group tasks for | ||
224 | CPU bandwidth control purposes: | ||
225 | |||
226 | - Based on user id (CONFIG_USER_SCHED) | ||
227 | |||
228 | With this option, tasks are grouped according to their user id. | ||
229 | |||
230 | - Based on "cgroup" pseudo filesystem (CONFIG_CGROUP_SCHED) | ||
231 | |||
232 | This options needs CONFIG_CGROUPS to be defined, and lets the administrator | ||
233 | create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See | ||
234 | Documentation/cgroups.txt for more information about this filesystem. | ||
235 | |||
236 | Only one of these options to group tasks can be chosen and not both. | ||
237 | |||
238 | When CONFIG_USER_SCHED is defined, a directory is created in sysfs for each new | ||
239 | user and a "cpu_share" file is added in that directory. | ||
149 | 240 | ||
150 | # cd /sys/kernel/uids | 241 | # cd /sys/kernel/uids |
151 | # cat 512/cpu_share # Display user 512's CPU share | 242 | # cat 512/cpu_share # Display user 512's CPU share |
@@ -155,16 +246,14 @@ each new user and a "cpu_share" file is added in that directory. | |||
155 | 2048 | 246 | 2048 |
156 | # | 247 | # |
157 | 248 | ||
158 | CPU bandwidth between two users are divided in the ratio of their CPU shares. | 249 | CPU bandwidth between two users is divided in the ratio of their CPU shares. |
159 | For ex: if you would like user "root" to get twice the bandwidth of user | 250 | For example: if you would like user "root" to get twice the bandwidth of user |
160 | "guest", then set the cpu_share for both the users such that "root"'s | 251 | "guest," then set the cpu_share for both the users such that "root"'s cpu_share |
161 | cpu_share is twice "guest"'s cpu_share | 252 | is twice "guest"'s cpu_share. |
162 | |||
163 | 253 | ||
164 | When CONFIG_FAIR_CGROUP_SCHED is defined, a "cpu.shares" file is created | 254 | When CONFIG_CGROUP_SCHED is defined, a "cpu.shares" file is created for each |
165 | for each group created using the pseudo filesystem. See example steps | 255 | group created using the pseudo filesystem. See example steps below to create |
166 | below to create task groups and modify their CPU share using the "cgroups" | 256 | task groups and modify their CPU share using the "cgroups" pseudo filesystem. |
167 | pseudo filesystem | ||
168 | 257 | ||
169 | # mkdir /dev/cpuctl | 258 | # mkdir /dev/cpuctl |
170 | # mount -t cgroup -ocpu none /dev/cpuctl | 259 | # mount -t cgroup -ocpu none /dev/cpuctl |
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx index 89c7f32abf9f..53449cb99b17 100644 --- a/Documentation/video4linux/CARDLIST.em28xx +++ b/Documentation/video4linux/CARDLIST.em28xx | |||
@@ -46,7 +46,7 @@ | |||
46 | 45 -> Pinnacle PCTV DVB-T (em2870) | 46 | 45 -> Pinnacle PCTV DVB-T (em2870) |
47 | 46 -> Compro, VideoMate U3 (em2870) [185b:2870] | 47 | 46 -> Compro, VideoMate U3 (em2870) [185b:2870] |
48 | 47 -> KWorld DVB-T 305U (em2880) [eb1a:e305] | 48 | 47 -> KWorld DVB-T 305U (em2880) [eb1a:e305] |
49 | 48 -> KWorld DVB-T 310U (em2880) | 49 | 48 -> KWorld DVB-T 310U (em2880) [eb1a:e310] |
50 | 49 -> MSI DigiVox A/D (em2880) [eb1a:e310] | 50 | 49 -> MSI DigiVox A/D (em2880) [eb1a:e310] |
51 | 50 -> MSI DigiVox A/D II (em2880) [eb1a:e320] | 51 | 50 -> MSI DigiVox A/D II (em2880) [eb1a:e320] |
52 | 51 -> Terratec Hybrid XS Secam (em2880) [0ccd:004c] | 52 | 51 -> Terratec Hybrid XS Secam (em2880) [0ccd:004c] |
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt index 0f03900c48fb..9a3e4d797fa8 100644 --- a/Documentation/video4linux/gspca.txt +++ b/Documentation/video4linux/gspca.txt | |||
@@ -190,6 +190,7 @@ pac7311 093a:260f SnakeCam | |||
190 | pac7311 093a:2621 PAC731x | 190 | pac7311 093a:2621 PAC731x |
191 | pac7311 093a:2624 PAC7302 | 191 | pac7311 093a:2624 PAC7302 |
192 | pac7311 093a:2626 Labtec 2200 | 192 | pac7311 093a:2626 Labtec 2200 |
193 | pac7311 093a:262a Webcam 300k | ||
193 | zc3xx 0ac8:0302 Z-star Vimicro zc0302 | 194 | zc3xx 0ac8:0302 Z-star Vimicro zc0302 |
194 | vc032x 0ac8:0321 Vimicro generic vc0321 | 195 | vc032x 0ac8:0321 Vimicro generic vc0321 |
195 | vc032x 0ac8:0323 Vimicro Vc0323 | 196 | vc032x 0ac8:0323 Vimicro Vc0323 |
diff --git a/MAINTAINERS b/MAINTAINERS index 3596d1782264..8dae4555f10e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1198,9 +1198,7 @@ M: hpa@zytor.com | |||
1198 | S: Maintained | 1198 | S: Maintained |
1199 | 1199 | ||
1200 | CPUSETS | 1200 | CPUSETS |
1201 | P: Paul Jackson | ||
1202 | P: Paul Menage | 1201 | P: Paul Menage |
1203 | M: pj@sgi.com | ||
1204 | M: menage@google.com | 1202 | M: menage@google.com |
1205 | L: linux-kernel@vger.kernel.org | 1203 | L: linux-kernel@vger.kernel.org |
1206 | W: http://www.bullopensource.org/cpuset/ | 1204 | W: http://www.bullopensource.org/cpuset/ |
@@ -2706,6 +2704,7 @@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 | |||
2706 | P: Michael Kerrisk | 2704 | P: Michael Kerrisk |
2707 | M: mtk.manpages@gmail.com | 2705 | M: mtk.manpages@gmail.com |
2708 | W: http://www.kernel.org/doc/man-pages | 2706 | W: http://www.kernel.org/doc/man-pages |
2707 | L: linux-man@vger.kernel.org | ||
2709 | S: Supported | 2708 | S: Supported |
2710 | 2709 | ||
2711 | MARVELL LIBERTAS WIRELESS DRIVER | 2710 | MARVELL LIBERTAS WIRELESS DRIVER |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 27 | 3 | SUBLEVEL = 27 |
4 | EXTRAVERSION = -rc8 | 4 | EXTRAVERSION = -rc9 |
5 | NAME = Rotary Wombat | 5 | NAME = Rotary Wombat |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 83df541650fc..06b6fdab639f 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -149,6 +149,9 @@ smp_callin(void) | |||
149 | atomic_inc(&init_mm.mm_count); | 149 | atomic_inc(&init_mm.mm_count); |
150 | current->active_mm = &init_mm; | 150 | current->active_mm = &init_mm; |
151 | 151 | ||
152 | /* inform the notifiers about the new cpu */ | ||
153 | notify_cpu_starting(cpuid); | ||
154 | |||
152 | /* Must have completely accurate bogos. */ | 155 | /* Must have completely accurate bogos. */ |
153 | local_irq_enable(); | 156 | local_irq_enable(); |
154 | 157 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e9842f6767f9..e42a749a56dd 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -277,6 +277,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
277 | /* | 277 | /* |
278 | * Enable local interrupts. | 278 | * Enable local interrupts. |
279 | */ | 279 | */ |
280 | notify_cpu_starting(cpu); | ||
280 | local_irq_enable(); | 281 | local_irq_enable(); |
281 | local_fiq_enable(); | 282 | local_fiq_enable(); |
282 | 283 | ||
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 952a24b2f5a9..52e16c6436f9 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -178,6 +178,7 @@ void __init smp_callin(void) | |||
178 | unmask_irq(IPI_INTR_VECT); | 178 | unmask_irq(IPI_INTR_VECT); |
179 | unmask_irq(TIMER0_INTR_VECT); | 179 | unmask_irq(TIMER0_INTR_VECT); |
180 | preempt_disable(); | 180 | preempt_disable(); |
181 | notify_cpu_starting(cpu); | ||
181 | local_irq_enable(); | 182 | local_irq_enable(); |
182 | 183 | ||
183 | cpu_set(cpu, cpu_online_map); | 184 | cpu_set(cpu, cpu_online_map); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d8f05e504fbf..1dcbb85fc4ee 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -401,6 +401,7 @@ smp_callin (void) | |||
401 | spin_lock(&vector_lock); | 401 | spin_lock(&vector_lock); |
402 | /* Setup the per cpu irq handling data structures */ | 402 | /* Setup the per cpu irq handling data structures */ |
403 | __setup_vector_irq(cpuid); | 403 | __setup_vector_irq(cpuid); |
404 | notify_cpu_starting(cpuid); | ||
404 | cpu_set(cpuid, cpu_online_map); | 405 | cpu_set(cpuid, cpu_online_map); |
405 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 406 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
406 | spin_unlock(&vector_lock); | 407 | spin_unlock(&vector_lock); |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2c03ac1d005f..fc2994811f15 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -498,6 +498,8 @@ static void __init smp_online(void) | |||
498 | { | 498 | { |
499 | int cpu_id = smp_processor_id(); | 499 | int cpu_id = smp_processor_id(); |
500 | 500 | ||
501 | notify_cpu_starting(cpu_id); | ||
502 | |||
501 | local_irq_enable(); | 503 | local_irq_enable(); |
502 | 504 | ||
503 | /* Get our bogomips. */ | 505 | /* Get our bogomips. */ |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d72..1e06d233fa83 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -211,6 +211,7 @@ config MIPS_MALTA | |||
211 | select SYS_SUPPORTS_64BIT_KERNEL | 211 | select SYS_SUPPORTS_64BIT_KERNEL |
212 | select SYS_SUPPORTS_BIG_ENDIAN | 212 | select SYS_SUPPORTS_BIG_ENDIAN |
213 | select SYS_SUPPORTS_LITTLE_ENDIAN | 213 | select SYS_SUPPORTS_LITTLE_ENDIAN |
214 | select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken | ||
214 | select SYS_SUPPORTS_MULTITHREADING | 215 | select SYS_SUPPORTS_MULTITHREADING |
215 | select SYS_SUPPORTS_SMARTMIPS | 216 | select SYS_SUPPORTS_SMARTMIPS |
216 | help | 217 | help |
@@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC | |||
1403 | depends on CPU_MIPS32_R2 | 1404 | depends on CPU_MIPS32_R2 |
1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... | 1405 | #depends on CPU_MIPS64_R2 # once there is hardware ... |
1405 | depends on SYS_SUPPORTS_MULTITHREADING | 1406 | depends on SYS_SUPPORTS_MULTITHREADING |
1406 | select GENERIC_CLOCKEVENTS_BROADCAST | ||
1407 | select CPU_MIPSR2_IRQ_VI | 1407 | select CPU_MIPSR2_IRQ_VI |
1408 | select CPU_MIPSR2_IRQ_EI | 1408 | select CPU_MIPSR2_IRQ_EI |
1409 | select MIPS_MT | 1409 | select MIPS_MT |
@@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER | |||
1451 | Includes a loader for loading an elf relocatable object | 1451 | Includes a loader for loading an elf relocatable object |
1452 | onto another VPE and running it. | 1452 | onto another VPE and running it. |
1453 | 1453 | ||
1454 | config MIPS_MT_SMTC_INSTANT_REPLAY | ||
1455 | bool "Low-latency Dispatch of Deferred SMTC IPIs" | ||
1456 | depends on MIPS_MT_SMTC && !PREEMPT | ||
1457 | default y | ||
1458 | help | ||
1459 | SMTC pseudo-interrupts between TCs are deferred and queued | ||
1460 | if the target TC is interrupt-inhibited (IXMT). In the first | ||
1461 | SMTC prototypes, these queued IPIs were serviced on return | ||
1462 | to user mode, or on entry into the kernel idle loop. The | ||
1463 | INSTANT_REPLAY option dispatches them as part of local_irq_restore() | ||
1464 | processing, which adds runtime overhead (hence the option to turn | ||
1465 | it off), but ensures that IPIs are handled promptly even under | ||
1466 | heavy I/O interrupt load. | ||
1467 | |||
1468 | config MIPS_MT_SMTC_IM_BACKSTOP | 1454 | config MIPS_MT_SMTC_IM_BACKSTOP |
1469 | bool "Use per-TC register bits as backstop for inhibited IM bits" | 1455 | bool "Use per-TC register bits as backstop for inhibited IM bits" |
1470 | depends on MIPS_MT_SMTC | 1456 | depends on MIPS_MT_SMTC |
1471 | default y | 1457 | default n |
1472 | help | 1458 | help |
1473 | To support multiple TC microthreads acting as "CPUs" within | 1459 | To support multiple TC microthreads acting as "CPUs" within |
1474 | a VPE, VPE-wide interrupt mask bits must be specially manipulated | 1460 | a VPE, VPE-wide interrupt mask bits must be specially manipulated |
1475 | during interrupt handling. To support legacy drivers and interrupt | 1461 | during interrupt handling. To support legacy drivers and interrupt |
1476 | controller management code, SMTC has a "backstop" to track and | 1462 | controller management code, SMTC has a "backstop" to track and |
1477 | if necessary restore the interrupt mask. This has some performance | 1463 | if necessary restore the interrupt mask. This has some performance |
1478 | impact on interrupt service overhead. Disable it only if you know | 1464 | impact on interrupt service overhead. |
1479 | what you are doing. | ||
1480 | 1465 | ||
1481 | config MIPS_MT_SMTC_IRQAFF | 1466 | config MIPS_MT_SMTC_IRQAFF |
1482 | bool "Support IRQ affinity API" | 1467 | bool "Support IRQ affinity API" |
@@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF | |||
1486 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | 1471 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) |
1487 | for SMTC Linux kernel. Requires platform support, of which | 1472 | for SMTC Linux kernel. Requires platform support, of which |
1488 | an example can be found in the MIPS kernel i8259 and Malta | 1473 | an example can be found in the MIPS kernel i8259 and Malta |
1489 | platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY | 1474 | platform code. Adds some overhead to interrupt dispatch, and |
1490 | be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to | 1475 | should be used only if you know what you are doing. |
1491 | interrupt dispatch, and should be used only if you know what | ||
1492 | you are doing. | ||
1493 | 1476 | ||
1494 | config MIPS_VPE_LOADER_TOM | 1477 | config MIPS_VPE_LOADER_TOM |
1495 | bool "Load VPE program into memory hidden from linux" | 1478 | bool "Load VPE program into memory hidden from linux" |
@@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD | |||
1517 | "exit" syscall notifying other kernel modules the SP program is | 1500 | "exit" syscall notifying other kernel modules the SP program is |
1518 | exiting. You probably want to say yes here. | 1501 | exiting. You probably want to say yes here. |
1519 | 1502 | ||
1503 | config MIPS_CMP | ||
1504 | bool "MIPS CMP framework support" | ||
1505 | depends on SYS_SUPPORTS_MIPS_CMP | ||
1506 | select SYNC_R4K if BROKEN | ||
1507 | select SYS_SUPPORTS_SMP | ||
1508 | select SYS_SUPPORTS_SCHED_SMT if SMP | ||
1509 | select WEAK_ORDERING | ||
1510 | default n | ||
1511 | help | ||
1512 | This is a placeholder option for the GCMP work. It will need to | ||
1513 | be handled differently... | ||
1514 | |||
1520 | config SB1_PASS_1_WORKAROUNDS | 1515 | config SB1_PASS_1_WORKAROUNDS |
1521 | bool | 1516 | bool |
1522 | depends on CPU_SB1_PASS_1 | 1517 | depends on CPU_SB1_PASS_1 |
@@ -1693,6 +1688,9 @@ config SMP | |||
1693 | config SMP_UP | 1688 | config SMP_UP |
1694 | bool | 1689 | bool |
1695 | 1690 | ||
1691 | config SYS_SUPPORTS_MIPS_CMP | ||
1692 | bool | ||
1693 | |||
1696 | config SYS_SUPPORTS_SMP | 1694 | config SYS_SUPPORTS_SMP |
1697 | bool | 1695 | bool |
1698 | 1696 | ||
@@ -1740,17 +1738,6 @@ config NR_CPUS | |||
1740 | performance should round up your number of processors to the next | 1738 | performance should round up your number of processors to the next |
1741 | power of two. | 1739 | power of two. |
1742 | 1740 | ||
1743 | config MIPS_CMP | ||
1744 | bool "MIPS CMP framework support" | ||
1745 | depends on SMP | ||
1746 | select SYNC_R4K | ||
1747 | select SYS_SUPPORTS_SCHED_SMT | ||
1748 | select WEAK_ORDERING | ||
1749 | default n | ||
1750 | help | ||
1751 | This is a placeholder option for the GCMP work. It will need to | ||
1752 | be handled differently... | ||
1753 | |||
1754 | source "kernel/time/Kconfig" | 1741 | source "kernel/time/Kconfig" |
1755 | 1742 | ||
1756 | # | 1743 | # |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..25775cb54000 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
13 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
13 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 14 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
14 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 15 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
15 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 16 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/cevt-r4k.h> | ||
16 | |||
17 | /* | ||
18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
19 | * of these routines with SMTC-specific variants. | ||
20 | */ | ||
21 | |||
22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
15 | 23 | ||
16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
19 | unsigned int cnt; | 27 | unsigned int cnt; |
20 | int res; | 28 | int res; |
21 | 29 | ||
22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
23 | { | ||
24 | unsigned long flags, vpflags; | ||
25 | local_irq_save(flags); | ||
26 | vpflags = dvpe(); | ||
27 | #endif | ||
28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
29 | cnt += delta; | 31 | cnt += delta; |
30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | evpe(vpflags); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | #endif | ||
37 | return res; | 34 | return res; |
38 | } | 35 | } |
39 | 36 | ||
40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
41 | struct clock_event_device *evt) | 38 | |
39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
40 | struct clock_event_device *evt) | ||
42 | { | 41 | { |
43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
44 | } | 43 | } |
45 | 44 | ||
46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
48 | 47 | ||
49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
51 | */ | ||
52 | static void c0_timer_ack(void) | ||
53 | { | ||
54 | write_c0_compare(read_c0_compare()); | ||
55 | } | ||
56 | 49 | ||
57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
58 | * Possibly handle a performance counter interrupt. | ||
59 | * Return true if the timer interrupt should not be checked | ||
60 | */ | ||
61 | static inline int handle_perf_irq(int r2) | ||
62 | { | ||
63 | /* | ||
64 | * The performance counter overflow interrupt may be shared with the | ||
65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
67 | * and we can't reliably determine if a counter interrupt has also | ||
68 | * happened (!r2) then don't check for a timer interrupt. | ||
69 | */ | ||
70 | return (cp0_perfcount_irq < 0) && | ||
71 | perf_irq() == IRQ_HANDLED && | ||
72 | !r2; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
76 | { | 51 | { |
77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
94 | */ | 69 | */ |
95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
98 | if (cpu_data[cpu].vpe_id) | ||
99 | goto out; | ||
100 | cpu = 0; | ||
101 | #endif | ||
102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
104 | } | 75 | } |
@@ -107,65 +78,16 @@ out: | |||
107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
108 | } | 79 | } |
109 | 80 | ||
110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
82 | |||
83 | struct irqaction c0_compare_irqaction = { | ||
111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
113 | .flags = IRQF_DISABLED, | ||
114 | #else | ||
115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
116 | #endif | ||
117 | .name = "timer", | 86 | .name = "timer", |
118 | }; | 87 | }; |
119 | 88 | ||
120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
122 | |||
123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static void mips_broadcast(cpumask_t mask) | ||
129 | { | ||
130 | unsigned int cpu; | ||
131 | |||
132 | for_each_cpu_mask(cpu, mask) | ||
133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
134 | } | ||
135 | |||
136 | static void setup_smtc_dummy_clockevent_device(void) | ||
137 | { | ||
138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
139 | unsigned int cpu = smp_processor_id(); | ||
140 | struct clock_event_device *cd; | ||
141 | 89 | ||
142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
143 | |||
144 | cd->name = "SMTC"; | ||
145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
146 | |||
147 | /* Calculate the min / max delta */ | ||
148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
149 | cd->shift = 0; //32; | ||
150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
152 | |||
153 | cd->rating = 200; | ||
154 | cd->irq = 17; //-1; | ||
155 | // if (cpu) | ||
156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
157 | // else | ||
158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
159 | |||
160 | cd->set_mode = smtc_set_mode; | ||
161 | |||
162 | cd->broadcast = mips_broadcast; | ||
163 | |||
164 | clockevents_register_device(cd); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static void mips_event_handler(struct clock_event_device *dev) | ||
169 | { | 91 | { |
170 | } | 92 | } |
171 | 93 | ||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
178 | } | 100 | } |
179 | 101 | ||
180 | static int c0_compare_int_usable(void) | 102 | /* |
103 | * Compare interrupt can be routed and latched outside the core, | ||
104 | * so a single execution hazard barrier may not be enough to give | ||
105 | * it time to clear as seen in the Cause register. 4 time the | ||
106 | * pipeline depth seems reasonably conservative, and empirically | ||
107 | * works better in configurations with high CPU/bus clock ratios. | ||
108 | */ | ||
109 | |||
110 | #define compare_change_hazard() \ | ||
111 | do { \ | ||
112 | irq_disable_hazard(); \ | ||
113 | irq_disable_hazard(); \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | } while (0) | ||
117 | |||
118 | int c0_compare_int_usable(void) | ||
181 | { | 119 | { |
182 | unsigned int delta; | 120 | unsigned int delta; |
183 | unsigned int cnt; | 121 | unsigned int cnt; |
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
187 | */ | 125 | */ |
188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
192 | return 0; | 130 | return 0; |
193 | } | 131 | } |
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
197 | cnt += delta; | 135 | cnt += delta; |
198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
201 | break; | 139 | break; |
202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
207 | 145 | ||
146 | compare_change_hazard(); | ||
208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
209 | return 0; | 148 | return 0; |
210 | 149 | ||
211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
214 | return 0; | 153 | return 0; |
215 | 154 | ||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
219 | return 1; | 158 | return 1; |
220 | } | 159 | } |
221 | 160 | ||
161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
162 | |||
222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
223 | { | 164 | { |
224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
230 | return -ENXIO; | 171 | return -ENXIO; |
231 | 172 | ||
232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
233 | setup_smtc_dummy_clockevent_device(); | ||
234 | |||
235 | /* | ||
236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
237 | * device. | ||
238 | */ | ||
239 | if (cpu) | ||
240 | return 0; | ||
241 | #endif | ||
242 | |||
243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
244 | return -ENXIO; | 174 | return -ENXIO; |
245 | 175 | ||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
265 | 195 | ||
266 | cd->rating = 300; | 196 | cd->rating = 300; |
267 | cd->irq = irq; | 197 | cd->irq = irq; |
268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
269 | cd->cpumask = CPU_MASK_ALL; | ||
270 | #else | ||
271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
272 | #endif | ||
273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
276 | 202 | ||
277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
281 | 207 | ||
282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
283 | 209 | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
287 | #else | ||
288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
289 | #endif | ||
290 | 211 | ||
291 | return 0; | 212 | return 0; |
292 | } | 213 | } |
214 | |||
215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
9 | */ | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/smtc_ipi.h> | ||
15 | #include <asm/time.h> | ||
16 | #include <asm/cevt-r4k.h> | ||
17 | |||
18 | /* | ||
19 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
20 | * or other MIPS MT cores. | ||
21 | * | ||
22 | * Notes on SMTC Support: | ||
23 | * | ||
24 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
25 | * But there's only one Count/Compare pair per VPE, and Compare | ||
26 | * interrupts are taken opportunisitically by available TCs | ||
27 | * bound to the VPE with the Count register. The new timer | ||
28 | * framework provides for global broadcasts, but we really | ||
29 | * want VPE-level multicasts for best behavior. So instead | ||
30 | * of invoking the high-level clock-event broadcast code, | ||
31 | * this version of SMTC support uses the historical SMTC | ||
32 | * multicast mechanisms "under the hood", appearing to the | ||
33 | * generic clock layer as if the interrupts are per-CPU. | ||
34 | * | ||
35 | * The approach taken here is to maintain a set of NR_CPUS | ||
36 | * virtual timers, and track which "CPU" needs to be alerted | ||
37 | * at each event. | ||
38 | * | ||
39 | * It's unlikely that we'll see a MIPS MT core with more than | ||
40 | * 2 VPEs, but we *know* that we won't need to handle more | ||
41 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
42 | * is always going to be overkill, but always going to be enough. | ||
43 | */ | ||
44 | |||
45 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
46 | static int smtc_nextinvpe[NR_CPUS]; | ||
47 | |||
48 | /* | ||
49 | * Timestamps stored are absolute values to be programmed | ||
50 | * into Count register. Valid timestamps will never be zero. | ||
51 | * If a Zero Count value is actually calculated, it is converted | ||
52 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
53 | * roughly once every four billion events, which at 1000 HZ means | ||
54 | * about once every 50 days. If that's actually a problem, one | ||
55 | * could alternate squashing 0 to 1 and to -1. | ||
56 | */ | ||
57 | |||
58 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
59 | #define ISVALID(x) ((x) != 0L) | ||
60 | |||
61 | /* | ||
62 | * Time comparison is subtle, as it's really truncated | ||
63 | * modular arithmetic. | ||
64 | */ | ||
65 | |||
66 | #define IS_SOONER(a, b, reference) \ | ||
67 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
68 | |||
69 | /* | ||
70 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
71 | * Could be an increasing function instead of a constant; | ||
72 | */ | ||
73 | |||
74 | #define CATCHUP_INCREMENT 64 | ||
75 | |||
76 | static int mips_next_event(unsigned long delta, | ||
77 | struct clock_event_device *evt) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned int mtflags; | ||
81 | unsigned long timestamp, reference, previous; | ||
82 | unsigned long nextcomp = 0L; | ||
83 | int vpe = current_cpu_data.vpe_id; | ||
84 | int cpu = smp_processor_id(); | ||
85 | local_irq_save(flags); | ||
86 | mtflags = dmt(); | ||
87 | |||
88 | /* | ||
89 | * Maintain the per-TC virtual timer | ||
90 | * and program the per-VPE shared Count register | ||
91 | * as appropriate here... | ||
92 | */ | ||
93 | reference = (unsigned long)read_c0_count(); | ||
94 | timestamp = MAKEVALID(reference + delta); | ||
95 | /* | ||
96 | * To really model the clock, we have to catch the case | ||
97 | * where the current next-in-VPE timestamp is the old | ||
98 | * timestamp for the calling CPE, but the new value is | ||
99 | * in fact later. In that case, we have to do a full | ||
100 | * scan and discover the new next-in-VPE CPU id and | ||
101 | * timestamp. | ||
102 | */ | ||
103 | previous = smtc_nexttime[vpe][cpu]; | ||
104 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
105 | && IS_SOONER(previous, timestamp, reference)) { | ||
106 | int i; | ||
107 | int soonest = cpu; | ||
108 | |||
109 | /* | ||
110 | * Update timestamp array here, so that new | ||
111 | * value gets considered along with those of | ||
112 | * other virtual CPUs on the VPE. | ||
113 | */ | ||
114 | smtc_nexttime[vpe][cpu] = timestamp; | ||
115 | for_each_online_cpu(i) { | ||
116 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
117 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
118 | smtc_nexttime[vpe][soonest], reference)) { | ||
119 | soonest = i; | ||
120 | } | ||
121 | } | ||
122 | smtc_nextinvpe[vpe] = soonest; | ||
123 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
124 | /* | ||
125 | * Otherwise, we don't have to process the whole array rank, | ||
126 | * we just have to see if the event horizon has gotten closer. | ||
127 | */ | ||
128 | } else { | ||
129 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
130 | IS_SOONER(timestamp, | ||
131 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
132 | smtc_nextinvpe[vpe] = cpu; | ||
133 | nextcomp = timestamp; | ||
134 | } | ||
135 | /* | ||
136 | * Since next-in-VPE may me the same as the executing | ||
137 | * virtual CPU, we update the array *after* checking | ||
138 | * its value. | ||
139 | */ | ||
140 | smtc_nexttime[vpe][cpu] = timestamp; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * It may be that, in fact, we don't need to update Compare, | ||
145 | * but if we do, we want to make sure we didn't fall into | ||
146 | * a crack just behind Count. | ||
147 | */ | ||
148 | if (ISVALID(nextcomp)) { | ||
149 | write_c0_compare(nextcomp); | ||
150 | ehb(); | ||
151 | /* | ||
152 | * We never return an error, we just make sure | ||
153 | * that we trigger the handlers as quickly as | ||
154 | * we can if we fell behind. | ||
155 | */ | ||
156 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
157 | > (unsigned long)LONG_MAX) { | ||
158 | nextcomp += CATCHUP_INCREMENT; | ||
159 | write_c0_compare(nextcomp); | ||
160 | ehb(); | ||
161 | } | ||
162 | } | ||
163 | emt(mtflags); | ||
164 | local_irq_restore(flags); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | |||
169 | void smtc_distribute_timer(int vpe) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | unsigned int mtflags; | ||
173 | int cpu; | ||
174 | struct clock_event_device *cd; | ||
175 | unsigned long nextstamp = 0L; | ||
176 | unsigned long reference; | ||
177 | |||
178 | |||
179 | repeat: | ||
180 | for_each_online_cpu(cpu) { | ||
181 | /* | ||
182 | * Find virtual CPUs within the current VPE who have | ||
183 | * unserviced timer requests whose time is now past. | ||
184 | */ | ||
185 | local_irq_save(flags); | ||
186 | mtflags = dmt(); | ||
187 | if (cpu_data[cpu].vpe_id == vpe && | ||
188 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
189 | reference = (unsigned long)read_c0_count(); | ||
190 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
191 | > (unsigned long)LONG_MAX) { | ||
192 | smtc_nexttime[vpe][cpu] = 0L; | ||
193 | emt(mtflags); | ||
194 | local_irq_restore(flags); | ||
195 | /* | ||
196 | * We don't send IPIs to ourself. | ||
197 | */ | ||
198 | if (cpu != smp_processor_id()) { | ||
199 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
200 | } else { | ||
201 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
202 | cd->event_handler(cd); | ||
203 | } | ||
204 | } else { | ||
205 | /* Local to VPE but Valid Time not yet reached. */ | ||
206 | if (!ISVALID(nextstamp) || | ||
207 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
208 | reference)) { | ||
209 | smtc_nextinvpe[vpe] = cpu; | ||
210 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
211 | } | ||
212 | emt(mtflags); | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
215 | } else { | ||
216 | emt(mtflags); | ||
217 | local_irq_restore(flags); | ||
218 | |||
219 | } | ||
220 | } | ||
221 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
222 | if (ISVALID(nextstamp)) { | ||
223 | write_c0_compare(nextstamp); | ||
224 | ehb(); | ||
225 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
226 | > (unsigned long)LONG_MAX) | ||
227 | goto repeat; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | |||
232 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
233 | { | ||
234 | int cpu = smp_processor_id(); | ||
235 | |||
236 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
237 | handle_perf_irq(1); | ||
238 | |||
239 | if (read_c0_cause() & (1 << 30)) { | ||
240 | /* Clear Count/Compare Interrupt */ | ||
241 | write_c0_compare(read_c0_compare()); | ||
242 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
243 | } | ||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | |||
248 | int __cpuinit mips_clockevent_init(void) | ||
249 | { | ||
250 | uint64_t mips_freq = mips_hpt_frequency; | ||
251 | unsigned int cpu = smp_processor_id(); | ||
252 | struct clock_event_device *cd; | ||
253 | unsigned int irq; | ||
254 | int i; | ||
255 | int j; | ||
256 | |||
257 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
258 | return -ENXIO; | ||
259 | if (cpu == 0) { | ||
260 | for (i = 0; i < num_possible_cpus(); i++) { | ||
261 | smtc_nextinvpe[i] = 0; | ||
262 | for (j = 0; j < num_possible_cpus(); j++) | ||
263 | smtc_nexttime[i][j] = 0L; | ||
264 | } | ||
265 | /* | ||
266 | * SMTC also can't have the usablility test | ||
267 | * run by secondary TCs once Compare is in use. | ||
268 | */ | ||
269 | if (!c0_compare_int_usable()) | ||
270 | return -ENXIO; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * With vectored interrupts things are getting platform specific. | ||
275 | * get_c0_compare_int is a hook to allow a platform to return the | ||
276 | * interrupt number of it's liking. | ||
277 | */ | ||
278 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
279 | if (get_c0_compare_int) | ||
280 | irq = get_c0_compare_int(); | ||
281 | |||
282 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
283 | |||
284 | cd->name = "MIPS"; | ||
285 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
286 | |||
287 | /* Calculate the min / max delta */ | ||
288 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
289 | cd->shift = 32; | ||
290 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
291 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
292 | |||
293 | cd->rating = 300; | ||
294 | cd->irq = irq; | ||
295 | cd->cpumask = cpumask_of_cpu(cpu); | ||
296 | cd->set_next_event = mips_next_event; | ||
297 | cd->set_mode = mips_set_clock_mode; | ||
298 | cd->event_handler = mips_event_handler; | ||
299 | |||
300 | clockevents_register_device(cd); | ||
301 | |||
302 | /* | ||
303 | * On SMTC we only want to do the data structure | ||
304 | * initialization and IRQ setup once. | ||
305 | */ | ||
306 | if (cpu) | ||
307 | return 0; | ||
308 | /* | ||
309 | * And we need the hwmask associated with the c0_compare | ||
310 | * vector to be initialized. | ||
311 | */ | ||
312 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
313 | if (cp0_timer_irq_installed) | ||
314 | return 0; | ||
315 | |||
316 | cp0_timer_irq_installed = 1; | ||
317 | |||
318 | setup_irq(irq, &c0_compare_irqaction); | ||
319 | |||
320 | return 0; | ||
321 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 11c92dc53791..e621fda8ab37 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -54,14 +54,18 @@ extern void r4k_wait(void); | |||
54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | 54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes |
55 | * using this version a gamble. | 55 | * using this version a gamble. |
56 | */ | 56 | */ |
57 | static void r4k_wait_irqoff(void) | 57 | void r4k_wait_irqoff(void) |
58 | { | 58 | { |
59 | local_irq_disable(); | 59 | local_irq_disable(); |
60 | if (!need_resched()) | 60 | if (!need_resched()) |
61 | __asm__(" .set mips3 \n" | 61 | __asm__(" .set push \n" |
62 | " .set mips3 \n" | ||
62 | " wait \n" | 63 | " wait \n" |
63 | " .set mips0 \n"); | 64 | " .set pop \n"); |
64 | local_irq_enable(); | 65 | local_irq_enable(); |
66 | __asm__(" .globl __pastwait \n" | ||
67 | "__pastwait: \n"); | ||
68 | return; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | /* | 71 | /* |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939d..ffa331029e08 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit) | |||
79 | 79 | ||
80 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
81 | #ifdef CONFIG_MIPS_MT_SMTC | 81 | #ifdef CONFIG_MIPS_MT_SMTC |
82 | /* Detect and execute deferred IPI "interrupts" */ | ||
83 | LONG_L s0, TI_REGS($28) | ||
84 | LONG_S sp, TI_REGS($28) | ||
85 | jal deferred_smtc_ipi | ||
86 | LONG_S s0, TI_REGS($28) | ||
87 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 82 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
88 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 83 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
89 | mfc0 v0, CP0_TCSTATUS | 84 | mfc0 v0, CP0_TCSTATUS |
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame | |||
112 | xor t0, t0, t3 | 107 | xor t0, t0, t3 |
113 | mtc0 t0, CP0_TCCONTEXT | 108 | mtc0 t0, CP0_TCCONTEXT |
114 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 109 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
110 | /* Detect and execute deferred IPI "interrupts" */ | ||
111 | LONG_L s0, TI_REGS($28) | ||
112 | LONG_S sp, TI_REGS($28) | ||
113 | jal deferred_smtc_ipi | ||
114 | LONG_S s0, TI_REGS($28) | ||
115 | #endif /* CONFIG_MIPS_MT_SMTC */ | 115 | #endif /* CONFIG_MIPS_MT_SMTC */ |
116 | .set noat | 116 | .set noat |
117 | RESTORE_TEMP | 117 | RESTORE_TEMP |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index f886dd7f708e..01dcbe38fa01 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
282 | and t0, a0, t1 | 282 | and t0, a0, t1 |
283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
284 | mfc0 t2, CP0_TCCONTEXT | 284 | mfc0 t2, CP0_TCCONTEXT |
285 | or t0, t0, t2 | 285 | or t2, t0, t2 |
286 | mtc0 t0, CP0_TCCONTEXT | 286 | mtc0 t2, CP0_TCCONTEXT |
287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
288 | xor t1, t1, t0 | 288 | xor t1, t1, t0 |
289 | mtc0 t1, CP0_STATUS | 289 | mtc0 t1, CP0_STATUS |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 361364501d34..492a0a8d70fb 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/irqflags.h> | 22 | #include <asm/irqflags.h> |
23 | #include <asm/regdef.h> | 23 | #include <asm/regdef.h> |
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/pgtable-bits.h> | ||
25 | #include <asm/mipsregs.h> | 26 | #include <asm/mipsregs.h> |
26 | #include <asm/stackframe.h> | 27 | #include <asm/stackframe.h> |
27 | 28 | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740c..dc9eb72ed9de 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); | |||
159 | /* | 159 | /* |
160 | * FPU Use Factor empirically derived from experiments on 34K | 160 | * FPU Use Factor empirically derived from experiments on 34K |
161 | */ | 161 | */ |
162 | #define FPUSEFACTOR 333 | 162 | #define FPUSEFACTOR 2000 |
163 | 163 | ||
164 | static __init int mt_fp_affinity_init(void) | 164 | static __init int mt_fp_affinity_init(void) |
165 | { | 165 | { |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ce7684335a41..22fc19bbe87f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) | |||
55 | while (1) { | 55 | while (1) { |
56 | tick_nohz_stop_sched_tick(1); | 56 | tick_nohz_stop_sched_tick(1); |
57 | while (!need_resched()) { | 57 | while (!need_resched()) { |
58 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | 58 | #ifdef CONFIG_MIPS_MT_SMTC |
59 | extern void smtc_idle_loop_hook(void); | 59 | extern void smtc_idle_loop_hook(void); |
60 | 60 | ||
61 | smtc_idle_loop_hook(); | 61 | smtc_idle_loop_hook(); |
@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
145 | */ | 145 | */ |
146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | 146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); |
147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
148 | |||
149 | #ifdef CONFIG_MIPS_MT_SMTC | ||
150 | /* | ||
151 | * SMTC restores TCStatus after Status, and the CU bits | ||
152 | * are aliased there. | ||
153 | */ | ||
154 | childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); | ||
155 | #endif | ||
148 | clear_tsk_thread_flag(p, TIF_USEDFPU); | 156 | clear_tsk_thread_flag(p, TIF_USEDFPU); |
149 | 157 | ||
150 | #ifdef CONFIG_MIPS_MT_FPAFF | 158 | #ifdef CONFIG_MIPS_MT_FPAFF |
151 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | 159 | clear_tsk_thread_flag(p, TIF_FPUBOUND); |
152 | |||
153 | /* | ||
154 | * FPU affinity support is cleaner if we track the | ||
155 | * user-visible CPU affinity from the very beginning. | ||
156 | * The generic cpus_allowed mask will already have | ||
157 | * been copied from the parent before copy_thread | ||
158 | * is invoked. | ||
159 | */ | ||
160 | p->thread.user_cpus_allowed = p->cpus_allowed; | ||
161 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 160 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
162 | 161 | ||
163 | if (clone_flags & CLONE_SETTLS) | 162 | if (clone_flags & CLONE_SETTLS) |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a5..96ffc9c6d194 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
238 | case FPC_EIR: { /* implementation / version register */ | 238 | case FPC_EIR: { /* implementation / version register */ |
239 | unsigned int flags; | 239 | unsigned int flags; |
240 | #ifdef CONFIG_MIPS_MT_SMTC | 240 | #ifdef CONFIG_MIPS_MT_SMTC |
241 | unsigned int irqflags; | 241 | unsigned long irqflags; |
242 | unsigned int mtflags; | 242 | unsigned int mtflags; |
243 | #endif /* CONFIG_MIPS_MT_SMTC */ | 243 | #endif /* CONFIG_MIPS_MT_SMTC */ |
244 | 244 | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 4410f172b8ab..7b59cfb7e602 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -121,6 +121,8 @@ asmlinkage __cpuinit void start_secondary(void) | |||
121 | cpu = smp_processor_id(); | 121 | cpu = smp_processor_id(); |
122 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 122 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
123 | 123 | ||
124 | notify_cpu_starting(cpu); | ||
125 | |||
124 | mp_ops->smp_finish(); | 126 | mp_ops->smp_finish(); |
125 | set_cpu_sibling_map(cpu); | 127 | set_cpu_sibling_map(cpu); |
126 | 128 | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1,4 +1,21 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* |
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; either version 2 | ||
5 | * of the License, or (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
17 | * Copyright (C) 2008 Kevin D. Kissell | ||
18 | */ | ||
2 | 19 | ||
3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -21,7 +38,6 @@ | |||
21 | #include <asm/time.h> | 38 | #include <asm/time.h> |
22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> |
24 | #include <asm/smtc_ipi.h> | ||
25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> |
26 | 42 | ||
27 | /* | 43 | /* |
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
58 | 74 | ||
59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
60 | 76 | ||
61 | /* | ||
62 | * Clock interrupt "latch" buffers, per "CPU" | ||
63 | */ | ||
64 | |||
65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
66 | 77 | ||
67 | /* | 78 | /* |
68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; | |||
70 | 81 | ||
71 | #define IPIBUF_PER_CPU 4 | 82 | #define IPIBUF_PER_CPU 4 |
72 | 83 | ||
73 | static struct smtc_ipi_q IPIQ[NR_CPUS]; | 84 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
74 | static struct smtc_ipi_q freeIPIq; | 85 | static struct smtc_ipi_q freeIPIq; |
75 | 86 | ||
76 | 87 | ||
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. |
283 | */ | 294 | */ |
284 | 295 | ||
285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
286 | { | 297 | { |
287 | int i, ntcs; | 298 | int i, ntcs; |
288 | 299 | ||
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); |
328 | write_tc_c0_tccontext(0); | 339 | /* |
340 | * TCContext gets an offset from the base of the IPIQ array | ||
341 | * to be used in low-level code to detect the presence of | ||
342 | * an active IPI queue | ||
343 | */ | ||
344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ |
330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); |
331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ |
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; |
338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; |
355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
339 | } | 357 | } |
340 | 358 | ||
359 | /* | ||
360 | * Tweak to get Count registes in as close a sync as possible. | ||
361 | * Value seems good for 34K-class cores. | ||
362 | */ | ||
363 | |||
364 | #define CP0_SKEW 8 | ||
341 | 365 | ||
342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) |
343 | { | 367 | { |
344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
345 | unsigned long flags; | 369 | unsigned long flags; |
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; |
364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); |
365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; |
366 | atomic_set(&ipi_timer_latch[i], 0); | ||
367 | } | 390 | } |
368 | 391 | ||
369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ |
370 | cpu = 0; | 393 | cpu = 0; |
371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; |
372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; |
396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
373 | cpu++; | 397 | cpu++; |
374 | 398 | ||
375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ |
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); |
485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ |
486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); |
487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
512 | ehb(); | ||
488 | } | 513 | } |
489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ |
490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) | |||
556 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 581 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
557 | { | 582 | { |
558 | extern u32 kernelsp[NR_CPUS]; | 583 | extern u32 kernelsp[NR_CPUS]; |
559 | long flags; | 584 | unsigned long flags; |
560 | int mtflags; | 585 | int mtflags; |
561 | 586 | ||
562 | LOCK_MT_PRA(); | 587 | LOCK_MT_PRA(); |
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
585 | 610 | ||
586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) |
587 | { | 612 | { |
588 | /* | ||
589 | * Start timer on secondary VPEs if necessary. | ||
590 | * plat_timer_setup has already have been invoked by init/main | ||
591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
593 | * to across available VPEs. | ||
594 | */ | ||
595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
599 | } | ||
600 | |||
601 | local_irq_enable(); | 613 | local_irq_enable(); |
602 | } | 614 | } |
603 | 615 | ||
604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) |
605 | { | 617 | { |
618 | int cpu = smp_processor_id(); | ||
619 | |||
620 | /* | ||
621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
622 | * Like per_cpu_trap_init() hack, this assumes that | ||
623 | * SMTC init code assigns TCs consdecutively and | ||
624 | * in ascending order across available VPEs. | ||
625 | */ | ||
626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
628 | |||
606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", |
607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
608 | } | 631 | } |
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
753 | { | 776 | { |
754 | int tcstatus; | 777 | int tcstatus; |
755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; |
756 | long flags; | 779 | unsigned long flags; |
757 | int mtflags; | 780 | int mtflags; |
781 | unsigned long tcrestart; | ||
782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
758 | 783 | ||
759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { |
760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); |
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; |
772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; |
773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
774 | if (type == SMTC_CLOCK_TICK) | ||
775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); |
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
800 | 823 | ||
801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
802 | /* | 825 | /* |
803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait |
804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and |
828 | * do a direct post of the IPI. | ||
829 | */ | ||
830 | if (cpu_wait == r4k_wait_irqoff) { | ||
831 | tcrestart = read_tc_c0_tcrestart(); | ||
832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
833 | && tcrestart < (unsigned long)__pastwait) { | ||
834 | write_tc_c0_tcrestart(__pastwait); | ||
835 | tcstatus &= ~TCSTATUS_IXMT; | ||
836 | write_tc_c0_tcstatus(tcstatus); | ||
837 | goto postdirect; | ||
838 | } | ||
839 | } | ||
840 | /* | ||
841 | * Otherwise we queue the message for the target TC | ||
842 | * to pick up when he does a local_irq_restore() | ||
805 | */ | 843 | */ |
806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); |
807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); |
808 | /* Try to reduce redundant timer interrupt messages */ | ||
809 | if (type == SMTC_CLOCK_TICK) { | ||
810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
812 | return; | ||
813 | } | ||
814 | } | ||
815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
816 | } else { | 847 | } else { |
817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: |
818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); |
820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); |
821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); |
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); |
884 | } | 914 | } |
885 | 915 | ||
886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
887 | 917 | ||
888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) |
889 | { | 919 | { |
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; |
892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; |
893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; |
894 | int ticks; | ||
895 | |||
896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); |
897 | switch (type_copy) { | 925 | switch (type_copy) { |
898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: |
899 | irq_enter(); | 927 | irq_enter(); |
900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; |
901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); |
902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); |
903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
904 | while (ticks) { | ||
905 | cd->event_handler(cd); | ||
906 | ticks--; | ||
907 | } | ||
908 | irq_exit(); | 931 | irq_exit(); |
909 | break; | 932 | break; |
910 | 933 | ||
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
937 | } | 960 | } |
938 | } | 961 | } |
939 | 962 | ||
963 | /* | ||
964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
965 | * so it reuses the current exception frame rather than set up a | ||
966 | * new one with self_ipi. | ||
967 | */ | ||
968 | |||
940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) |
941 | { | 970 | { |
942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); |
943 | unsigned long flags; | ||
944 | /* DEBUG */ | ||
945 | int q = smp_processor_id(); | ||
946 | 972 | ||
947 | /* | 973 | /* |
948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, |
949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. |
976 | * If irq_disabled when this was called, then any IPIs queued | ||
977 | * after we test last will be taken on the next irq_enable/restore. | ||
978 | * If interrupts were enabled, then any IPIs added after the | ||
979 | * last test will be taken directly. | ||
950 | */ | 980 | */ |
951 | if (IPIQ[q].head != NULL) { | 981 | |
952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { |
953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; |
985 | unsigned long flags; | ||
986 | |||
987 | /* | ||
988 | * It may be possible we'll come in with interrupts | ||
989 | * already enabled. | ||
990 | */ | ||
991 | local_irq_save(flags); | ||
992 | |||
993 | spin_lock(&q->lock); | ||
994 | pipi = __smtc_ipi_dq(q); | ||
995 | spin_unlock(&q->lock); | ||
996 | if (pipi != NULL) | ||
955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); |
956 | local_irq_restore(flags); | 998 | /* |
957 | } | 999 | * The use of the __raw_local restore isn't |
1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
1001 | * but it's more efficient, given that we're already | ||
1002 | * running down the IPI queue. | ||
1003 | */ | ||
1004 | __raw_local_irq_restore(flags); | ||
958 | } | 1005 | } |
959 | } | 1006 | } |
960 | 1007 | ||
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
975 | struct smtc_ipi *pipi; | 1022 | struct smtc_ipi *pipi; |
976 | unsigned long tcstatus; | 1023 | unsigned long tcstatus; |
977 | int sent; | 1024 | int sent; |
978 | long flags; | 1025 | unsigned long flags; |
979 | unsigned int mtflags; | 1026 | unsigned int mtflags; |
980 | unsigned int vpflags; | 1027 | unsigned int vpflags; |
981 | 1028 | ||
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1066 | 1113 | ||
1067 | /* | 1114 | /* |
1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
1069 | * | ||
1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
1073 | * result in a recursive call to raw_local_irq_restore(). | ||
1074 | */ | 1116 | */ |
1075 | 1117 | ||
1076 | static void __smtc_ipi_replay(void) | 1118 | /* |
1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
1120 | */ | ||
1121 | |||
1122 | void smtc_ipi_replay(void) | ||
1077 | { | 1123 | { |
1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); |
1079 | 1125 | ||
1080 | /* | 1126 | /* |
1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, |
1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. |
1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1084 | * exact number - but we'll also disable interrupts | ||
1085 | * and create a window of failure where a new IPI gets | ||
1086 | * queued after we test the depth but before we re-enable | ||
1087 | * interrupts. So long as IXMT never gets set, however, | ||
1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch |
1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent |
1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT |
1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt |
1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing |
1134 | * is to do the last check for queued message *after* the | ||
1135 | * re-enabling of interrupts. | ||
1093 | */ | 1136 | */ |
1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { |
1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; |
1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; |
1098 | extern void self_ipi(struct smtc_ipi *); | 1141 | |
1099 | 1142 | /* | |
1100 | spin_lock(&q->lock); | 1143 | * It's just possible we'll come in with interrupts |
1101 | pipi = __smtc_ipi_dq(q); | 1144 | * already enabled. |
1102 | spin_unlock(&q->lock); | 1145 | */ |
1103 | if (!pipi) | 1146 | local_irq_save(flags); |
1104 | break; | 1147 | |
1148 | spin_lock(&q->lock); | ||
1149 | pipi = __smtc_ipi_dq(q); | ||
1150 | spin_unlock(&q->lock); | ||
1151 | /* | ||
1152 | ** But use a raw restore here to avoid recursion. | ||
1153 | */ | ||
1154 | __raw_local_irq_restore(flags); | ||
1105 | 1155 | ||
1156 | if (pipi) { | ||
1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); |
1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; |
1108 | } | 1159 | } |
1109 | } | 1160 | } |
1110 | } | 1161 | } |
1111 | 1162 | ||
1112 | void smtc_ipi_replay(void) | ||
1113 | { | ||
1114 | raw_local_irq_disable(); | ||
1115 | __smtc_ipi_replay(); | ||
1116 | } | ||
1117 | |||
1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); |
1119 | 1164 | ||
1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) |
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
1193 | } | 1238 | } |
1194 | } | 1239 | } |
1195 | 1240 | ||
1196 | /* | ||
1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1198 | */ | ||
1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1201 | if ((tc != smp_processor_id()) && | ||
1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
1203 | if (clock_hang_reported[tc] == 0) { | ||
1204 | pdb_msg += sprintf(pdb_msg, | ||
1205 | "TC %d looks hung with timer latch at %d\n", | ||
1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
1207 | clock_hang_reported[tc]++; | ||
1208 | } | ||
1209 | } | ||
1210 | } | ||
1211 | emt(mtflags); | 1241 | emt(mtflags); |
1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); |
1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) |
1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
1216 | 1246 | ||
1217 | /* | 1247 | smtc_ipi_replay(); |
1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
1219 | * is in use, there should never be any. | ||
1220 | */ | ||
1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
1222 | { | ||
1223 | unsigned long flags; | ||
1224 | |||
1225 | local_irq_save(flags); | ||
1226 | __smtc_ipi_replay(); | ||
1227 | local_irq_restore(flags); | ||
1228 | } | ||
1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
1230 | } | 1248 | } |
1231 | 1249 | ||
1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) |
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
1243 | } | 1261 | } |
1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); |
1245 | printk("Timer IPI Backlogs:\n"); | ||
1246 | for (i=0; i < NR_CPUS; i++) { | ||
1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
1248 | } | ||
1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", |
1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); |
1251 | } | 1265 | } |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5fd0cd020af5..b602ac6eb47d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void) | |||
825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { |
826 | cpumask_t tmask; | 826 | cpumask_t tmask; |
827 | 827 | ||
828 | cpus_and(tmask, current->thread.user_cpus_allowed, | 828 | current->thread.user_cpus_allowed |
829 | mt_fpu_cpumask); | 829 | = current->cpus_allowed; |
830 | cpus_and(tmask, current->cpus_allowed, | ||
831 | mt_fpu_cpumask); | ||
830 | set_cpus_allowed(current, tmask); | 832 | set_cpus_allowed(current, tmask); |
831 | set_thread_flag(TIF_FPUBOUND); | 833 | set_thread_flag(TIF_FPUBOUND); |
832 | } | 834 | } |
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 3b7dd722c32a..cef2db8d2225 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile | |||
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o | |||
15 | obj-$(CONFIG_PCI) += malta-pci.o | 15 | obj-$(CONFIG_PCI) += malta-pci.o |
16 | 16 | ||
17 | # FIXME FIXME FIXME | 17 | # FIXME FIXME FIXME |
18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o | 18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o |
19 | 19 | ||
20 | EXTRA_CFLAGS += -Werror | 20 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e49454..f84a46a8ae6e 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) | |||
84 | 84 | ||
85 | static void __init msmtc_smp_setup(void) | 85 | static void __init msmtc_smp_setup(void) |
86 | { | 86 | { |
87 | mipsmt_build_cpu_map(0); | 87 | /* |
88 | * we won't get the definitive value until | ||
89 | * we've run smtc_prepare_cpus later, but | ||
90 | * we would appear to need an upper bound now. | ||
91 | */ | ||
92 | smp_num_siblings = smtc_build_cpu_map(0); | ||
88 | } | 93 | } |
89 | 94 | ||
90 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | 95 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) |
91 | { | 96 | { |
92 | mipsmt_prepare_cpus(); | 97 | smtc_prepare_cpus(max_cpus); |
93 | } | 98 | } |
94 | 99 | ||
95 | struct plat_smp_ops msmtc_smp_ops = { | 100 | struct plat_smp_ops msmtc_smp_ops = { |
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile index f18ba9201bbc..7b45f199d92a 100644 --- a/arch/mips/sibyte/swarm/Makefile +++ b/arch/mips/sibyte/swarm/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o | 1 | obj-y := platform.o setup.o rtc_xicor1241.o \ |
2 | rtc_m41t81.o | ||
2 | 3 | ||
3 | obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o | 4 | obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o |
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c new file mode 100644 index 000000000000..dd0e5b9b64e8 --- /dev/null +++ b/arch/mips/sibyte/swarm/platform.c | |||
@@ -0,0 +1,81 @@ | |||
1 | #include <linux/err.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/io.h> | ||
5 | #include <linux/platform_device.h> | ||
6 | #include <linux/ata_platform.h> | ||
7 | |||
8 | #include <asm/sibyte/board.h> | ||
9 | #include <asm/sibyte/sb1250_genbus.h> | ||
10 | #include <asm/sibyte/sb1250_regs.h> | ||
11 | |||
12 | #define DRV_NAME "pata-swarm" | ||
13 | |||
14 | #define SWARM_IDE_SHIFT 5 | ||
15 | #define SWARM_IDE_BASE 0x1f0 | ||
16 | #define SWARM_IDE_CTRL 0x3f6 | ||
17 | |||
18 | static struct resource swarm_pata_resource[] = { | ||
19 | { | ||
20 | .name = "Swarm GenBus IDE", | ||
21 | .flags = IORESOURCE_MEM, | ||
22 | }, { | ||
23 | .name = "Swarm GenBus IDE", | ||
24 | .flags = IORESOURCE_MEM, | ||
25 | }, { | ||
26 | .name = "Swarm GenBus IDE", | ||
27 | .flags = IORESOURCE_IRQ, | ||
28 | .start = K_INT_GB_IDE, | ||
29 | .end = K_INT_GB_IDE, | ||
30 | }, | ||
31 | }; | ||
32 | |||
33 | static struct pata_platform_info pata_platform_data = { | ||
34 | .ioport_shift = SWARM_IDE_SHIFT, | ||
35 | }; | ||
36 | |||
37 | static struct platform_device swarm_pata_device = { | ||
38 | .name = "pata_platform", | ||
39 | .id = -1, | ||
40 | .resource = swarm_pata_resource, | ||
41 | .num_resources = ARRAY_SIZE(swarm_pata_resource), | ||
42 | .dev = { | ||
43 | .platform_data = &pata_platform_data, | ||
44 | .coherent_dma_mask = ~0, /* grumble */ | ||
45 | }, | ||
46 | }; | ||
47 | |||
48 | static int __init swarm_pata_init(void) | ||
49 | { | ||
50 | u8 __iomem *base; | ||
51 | phys_t offset, size; | ||
52 | struct resource *r; | ||
53 | |||
54 | if (!SIBYTE_HAVE_IDE) | ||
55 | return -ENODEV; | ||
56 | |||
57 | base = ioremap(A_IO_EXT_BASE, 0x800); | ||
58 | offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); | ||
59 | size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); | ||
60 | iounmap(base); | ||
61 | |||
62 | offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE; | ||
63 | size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE; | ||
64 | if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) { | ||
65 | pr_info(DRV_NAME ": PATA interface at GenBus disabled\n"); | ||
66 | |||
67 | return -EBUSY; | ||
68 | } | ||
69 | |||
70 | pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS); | ||
71 | |||
72 | r = swarm_pata_resource; | ||
73 | r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT); | ||
74 | r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1; | ||
75 | r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT); | ||
76 | r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1; | ||
77 | |||
78 | return platform_device_register(&swarm_pata_device); | ||
79 | } | ||
80 | |||
81 | device_initcall(swarm_pata_init); | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 5337ca7bb649..c27b10a1bd79 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -453,6 +453,7 @@ int __devinit start_secondary(void *unused) | |||
453 | secondary_cpu_time_init(); | 453 | secondary_cpu_time_init(); |
454 | 454 | ||
455 | ipi_call_lock(); | 455 | ipi_call_lock(); |
456 | notify_cpu_starting(cpu); | ||
456 | cpu_set(cpu, cpu_online_map); | 457 | cpu_set(cpu, cpu_online_map); |
457 | /* Update sibling maps */ | 458 | /* Update sibling maps */ |
458 | base = cpu_first_thread_in_core(cpu); | 459 | base = cpu_first_thread_in_core(cpu); |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 00b9b4dec5eb..9e8b1f9b8f4d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -585,6 +585,8 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
585 | /* Enable pfault pseudo page faults on this cpu. */ | 585 | /* Enable pfault pseudo page faults on this cpu. */ |
586 | pfault_init(); | 586 | pfault_init(); |
587 | 587 | ||
588 | /* call cpu notifiers */ | ||
589 | notify_cpu_starting(smp_processor_id()); | ||
588 | /* Mark this cpu as online */ | 590 | /* Mark this cpu as online */ |
589 | spin_lock(&call_lock); | 591 | spin_lock(&call_lock); |
590 | cpu_set(smp_processor_id(), cpu_online_map); | 592 | cpu_set(smp_processor_id(), cpu_online_map); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ca114fe46ffb..06acb1a18bbc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -169,6 +169,8 @@ void init_cpu_timer(void) | |||
169 | 169 | ||
170 | static void clock_comparator_interrupt(__u16 code) | 170 | static void clock_comparator_interrupt(__u16 code) |
171 | { | 171 | { |
172 | if (S390_lowcore.clock_comparator == -1ULL) | ||
173 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
172 | } | 174 | } |
173 | 175 | ||
174 | static void etr_timing_alert(struct etr_irq_parm *); | 176 | static void etr_timing_alert(struct etr_irq_parm *); |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index fc6ab6094df8..0953cee05efc 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -1,14 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/lib/delay.c | ||
3 | * Precise Delay Loops for S390 | 2 | * Precise Delay Loops for S390 |
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999,2008 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * | ||
9 | * Derived from "arch/i386/lib/delay.c" | ||
10 | * Copyright (C) 1993 Linus Torvalds | ||
11 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
12 | */ | 7 | */ |
13 | 8 | ||
14 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
@@ -29,30 +24,31 @@ void __delay(unsigned long loops) | |||
29 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); | 24 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); |
30 | } | 25 | } |
31 | 26 | ||
32 | /* | 27 | static void __udelay_disabled(unsigned long usecs) |
33 | * Waits for 'usecs' microseconds using the TOD clock comparator. | ||
34 | */ | ||
35 | void __udelay(unsigned long usecs) | ||
36 | { | 28 | { |
37 | u64 end, time, old_cc = 0; | 29 | unsigned long mask, cr0, cr0_saved; |
38 | unsigned long flags, cr0, mask, dummy; | 30 | u64 clock_saved; |
39 | int irq_context; | ||
40 | 31 | ||
41 | irq_context = in_interrupt(); | 32 | clock_saved = local_tick_disable(); |
42 | if (!irq_context) | 33 | set_clock_comparator(get_clock() + ((u64) usecs << 12)); |
43 | local_bh_disable(); | 34 | __ctl_store(cr0_saved, 0, 0); |
44 | local_irq_save(flags); | 35 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; |
45 | if (raw_irqs_disabled_flags(flags)) { | 36 | __ctl_load(cr0 , 0, 0); |
46 | old_cc = local_tick_disable(); | 37 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; |
47 | S390_lowcore.clock_comparator = -1ULL; | 38 | trace_hardirqs_on(); |
48 | __ctl_store(cr0, 0, 0); | 39 | __load_psw_mask(mask); |
49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 40 | local_irq_disable(); |
50 | __ctl_load(dummy , 0, 0); | 41 | __ctl_load(cr0_saved, 0, 0); |
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | 42 | local_tick_enable(clock_saved); |
52 | } else | 43 | set_clock_comparator(S390_lowcore.clock_comparator); |
53 | mask = psw_kernel_bits | PSW_MASK_WAIT | | 44 | } |
54 | PSW_MASK_EXT | PSW_MASK_IO; | ||
55 | 45 | ||
46 | static void __udelay_enabled(unsigned long usecs) | ||
47 | { | ||
48 | unsigned long mask; | ||
49 | u64 end, time; | ||
50 | |||
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO; | ||
56 | end = get_clock() + ((u64) usecs << 12); | 52 | end = get_clock() + ((u64) usecs << 12); |
57 | do { | 53 | do { |
58 | time = end < S390_lowcore.clock_comparator ? | 54 | time = end < S390_lowcore.clock_comparator ? |
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs) | |||
62 | __load_psw_mask(mask); | 58 | __load_psw_mask(mask); |
63 | local_irq_disable(); | 59 | local_irq_disable(); |
64 | } while (get_clock() < end); | 60 | } while (get_clock() < end); |
61 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
62 | } | ||
65 | 63 | ||
66 | if (raw_irqs_disabled_flags(flags)) { | 64 | /* |
67 | __ctl_load(cr0, 0, 0); | 65 | * Waits for 'usecs' microseconds using the TOD clock comparator. |
68 | local_tick_enable(old_cc); | 66 | */ |
67 | void __udelay(unsigned long usecs) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | preempt_disable(); | ||
72 | local_irq_save(flags); | ||
73 | if (in_irq()) { | ||
74 | __udelay_disabled(usecs); | ||
75 | goto out; | ||
76 | } | ||
77 | if (in_softirq()) { | ||
78 | if (raw_irqs_disabled_flags(flags)) | ||
79 | __udelay_disabled(usecs); | ||
80 | else | ||
81 | __udelay_enabled(usecs); | ||
82 | goto out; | ||
69 | } | 83 | } |
70 | if (!irq_context) | 84 | if (raw_irqs_disabled_flags(flags)) { |
85 | local_bh_disable(); | ||
86 | __udelay_disabled(usecs); | ||
71 | _local_bh_enable(); | 87 | _local_bh_enable(); |
72 | set_clock_comparator(S390_lowcore.clock_comparator); | 88 | goto out; |
89 | } | ||
90 | __udelay_enabled(usecs); | ||
91 | out: | ||
73 | local_irq_restore(flags); | 92 | local_irq_restore(flags); |
93 | preempt_enable(); | ||
74 | } | 94 | } |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 60c50841143e..001778f9adaf 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -82,6 +82,8 @@ asmlinkage void __cpuinit start_secondary(void) | |||
82 | 82 | ||
83 | preempt_disable(); | 83 | preempt_disable(); |
84 | 84 | ||
85 | notify_cpu_starting(smp_processor_id()); | ||
86 | |||
85 | local_irq_enable(); | 87 | local_irq_enable(); |
86 | 88 | ||
87 | calibrate_delay(); | 89 | calibrate_delay(); |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 69596402a500..446767e8f569 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -88,6 +88,7 @@ void __init smp4d_callin(void) | |||
88 | local_flush_cache_all(); | 88 | local_flush_cache_all(); |
89 | local_flush_tlb_all(); | 89 | local_flush_tlb_all(); |
90 | 90 | ||
91 | notify_cpu_starting(cpuid); | ||
91 | /* | 92 | /* |
92 | * Unblock the master CPU _only_ when the scheduler state | 93 | * Unblock the master CPU _only_ when the scheduler state |
93 | * of all secondary CPUs will be up-to-date, so after | 94 | * of all secondary CPUs will be up-to-date, so after |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index a14a76ac7f36..9964890dc1db 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
@@ -71,6 +71,8 @@ void __cpuinit smp4m_callin(void) | |||
71 | local_flush_cache_all(); | 71 | local_flush_cache_all(); |
72 | local_flush_tlb_all(); | 72 | local_flush_tlb_all(); |
73 | 73 | ||
74 | notify_cpu_starting(cpuid); | ||
75 | |||
74 | /* Get our local ticker going. */ | 76 | /* Get our local ticker going. */ |
75 | smp_setup_percpu_timer(); | 77 | smp_setup_percpu_timer(); |
76 | 78 | ||
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index be2d50c3aa95..045772142844 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
@@ -85,6 +85,7 @@ static int idle_proc(void *cpup) | |||
85 | while (!cpu_isset(cpu, smp_commenced_mask)) | 85 | while (!cpu_isset(cpu, smp_commenced_mask)) |
86 | cpu_relax(); | 86 | cpu_relax(); |
87 | 87 | ||
88 | notify_cpu_starting(cpu); | ||
88 | cpu_set(cpu, cpu_online_map); | 89 | cpu_set(cpu, cpu_online_map); |
89 | default_idle(); | 90 | default_idle(); |
90 | return 0; | 91 | return 0; |
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index a1310c52fc0c..857e492c571e 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c | |||
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) | |||
492 | continue; | 492 | continue; |
493 | } | 493 | } |
494 | sh_symtab = sec_symtab->symtab; | 494 | sh_symtab = sec_symtab->symtab; |
495 | sym_strtab = sec->link->strtab; | 495 | sym_strtab = sec_symtab->link->strtab; |
496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { | 496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { |
497 | Elf32_Rel *rel; | 497 | Elf32_Rel *rel; |
498 | Elf32_Sym *sym; | 498 | Elf32_Sym *sym; |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index bfd10fd211cd..c102af85df9c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1605,6 +1605,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { | |||
1605 | */ | 1605 | */ |
1606 | { | 1606 | { |
1607 | .callback = dmi_ignore_irq0_timer_override, | 1607 | .callback = dmi_ignore_irq0_timer_override, |
1608 | .ident = "HP nx6115 laptop", | ||
1609 | .matches = { | ||
1610 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1611 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), | ||
1612 | }, | ||
1613 | }, | ||
1614 | { | ||
1615 | .callback = dmi_ignore_irq0_timer_override, | ||
1608 | .ident = "HP NX6125 laptop", | 1616 | .ident = "HP NX6125 laptop", |
1609 | .matches = { | 1617 | .matches = { |
1610 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 1618 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
@@ -1619,6 +1627,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { | |||
1619 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), | 1627 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), |
1620 | }, | 1628 | }, |
1621 | }, | 1629 | }, |
1630 | { | ||
1631 | .callback = dmi_ignore_irq0_timer_override, | ||
1632 | .ident = "HP 6715b laptop", | ||
1633 | .matches = { | ||
1634 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1635 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), | ||
1636 | }, | ||
1637 | }, | ||
1622 | {} | 1638 | {} |
1623 | }; | 1639 | }; |
1624 | 1640 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 8282a2139681..10435a120d22 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -455,12 +455,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
455 | return NOTIFY_DONE; | 455 | return NOTIFY_DONE; |
456 | 456 | ||
457 | case DIE_NMI_IPI: | 457 | case DIE_NMI_IPI: |
458 | if (atomic_read(&kgdb_active) != -1) { | 458 | /* Just ignore, we will handle the roundup on DIE_NMI. */ |
459 | /* KGDB CPU roundup */ | ||
460 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
461 | was_in_debug_nmi[raw_smp_processor_id()] = 1; | ||
462 | touch_nmi_watchdog(); | ||
463 | } | ||
464 | return NOTIFY_DONE; | 459 | return NOTIFY_DONE; |
465 | 460 | ||
466 | case DIE_NMIUNKNOWN: | 461 | case DIE_NMIUNKNOWN: |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 49285f8fd4d5..be33a5442d82 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -626,7 +626,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
626 | struct pci_dev *dev; | 626 | struct pci_dev *dev; |
627 | void *gatt; | 627 | void *gatt; |
628 | int i, error; | 628 | int i, error; |
629 | unsigned long start_pfn, end_pfn; | ||
630 | 629 | ||
631 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 630 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
632 | aper_size = aper_base = info->aper_size = 0; | 631 | aper_size = aper_base = info->aper_size = 0; |
@@ -672,12 +671,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
672 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 671 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
673 | aper_base, aper_size>>10); | 672 | aper_base, aper_size>>10); |
674 | 673 | ||
675 | /* need to map that range */ | ||
676 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
677 | if (end_pfn > max_low_pfn_mapped) { | ||
678 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
679 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
680 | } | ||
681 | return 0; | 674 | return 0; |
682 | 675 | ||
683 | nommu: | 676 | nommu: |
@@ -727,7 +720,8 @@ void __init gart_iommu_init(void) | |||
727 | { | 720 | { |
728 | struct agp_kern_info info; | 721 | struct agp_kern_info info; |
729 | unsigned long iommu_start; | 722 | unsigned long iommu_start; |
730 | unsigned long aper_size; | 723 | unsigned long aper_base, aper_size; |
724 | unsigned long start_pfn, end_pfn; | ||
731 | unsigned long scratch; | 725 | unsigned long scratch; |
732 | long i; | 726 | long i; |
733 | 727 | ||
@@ -765,8 +759,16 @@ void __init gart_iommu_init(void) | |||
765 | return; | 759 | return; |
766 | } | 760 | } |
767 | 761 | ||
762 | /* need to map that range */ | ||
763 | aper_size = info.aper_size << 20; | ||
764 | aper_base = info.aper_base; | ||
765 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
766 | if (end_pfn > max_low_pfn_mapped) { | ||
767 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
768 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
769 | } | ||
770 | |||
768 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 771 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
769 | aper_size = info.aper_size * 1024 * 1024; | ||
770 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 772 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
771 | iommu_pages = iommu_size >> PAGE_SHIFT; | 773 | iommu_pages = iommu_size >> PAGE_SHIFT; |
772 | 774 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7985c5b3f916..0b8261c3cac2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -257,6 +257,7 @@ static void __cpuinit smp_callin(void) | |||
257 | end_local_APIC_setup(); | 257 | end_local_APIC_setup(); |
258 | map_cpu_to_logical_apicid(); | 258 | map_cpu_to_logical_apicid(); |
259 | 259 | ||
260 | notify_cpu_starting(cpuid); | ||
260 | /* | 261 | /* |
261 | * Get our bogomips. | 262 | * Get our bogomips. |
262 | * | 263 | * |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index ee0fba092157..199a5f4a873c 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -448,6 +448,8 @@ static void __init start_secondary(void *unused) | |||
448 | 448 | ||
449 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | 449 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); |
450 | 450 | ||
451 | notify_cpu_starting(cpuid); | ||
452 | |||
451 | /* enable interrupts */ | 453 | /* enable interrupts */ |
452 | local_irq_enable(); | 454 | local_irq_enable(); |
453 | 455 | ||
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 084109507c9f..8dd3336efd7e 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
165 | "firmware_node"); | 165 | "firmware_node"); |
166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
167 | "physical_node"); | 167 | "physical_node"); |
168 | if (acpi_dev->wakeup.flags.valid) | 168 | if (acpi_dev->wakeup.flags.valid) { |
169 | device_set_wakeup_capable(dev, true); | 169 | device_set_wakeup_capable(dev, true); |
170 | device_set_wakeup_enable(dev, | ||
171 | acpi_dev->wakeup.state.enabled); | ||
172 | } | ||
170 | } | 173 | } |
171 | 174 | ||
172 | return 0; | 175 | return 0; |
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 4ebbba2b6b19..bf5b04de02d1 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | static void physical_device_enable_wakeup(struct acpi_device *adev) | ||
381 | { | ||
382 | struct device *dev = acpi_get_physical_device(adev->handle); | ||
383 | |||
384 | if (dev && device_can_wakeup(dev)) | ||
385 | device_set_wakeup_enable(dev, adev->wakeup.state.enabled); | ||
386 | } | ||
387 | |||
380 | static ssize_t | 388 | static ssize_t |
381 | acpi_system_write_wakeup_device(struct file *file, | 389 | acpi_system_write_wakeup_device(struct file *file, |
382 | const char __user * buffer, | 390 | const char __user * buffer, |
@@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
411 | } | 419 | } |
412 | } | 420 | } |
413 | if (found_dev) { | 421 | if (found_dev) { |
422 | physical_device_enable_wakeup(found_dev); | ||
414 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 423 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
415 | struct acpi_device *dev = container_of(node, | 424 | struct acpi_device *dev = container_of(node, |
416 | struct | 425 | struct |
@@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
428 | dev->pnp.bus_id, found_dev->pnp.bus_id); | 437 | dev->pnp.bus_id, found_dev->pnp.bus_id); |
429 | dev->wakeup.state.enabled = | 438 | dev->wakeup.state.enabled = |
430 | found_dev->wakeup.state.enabled; | 439 | found_dev->wakeup.state.enabled; |
440 | physical_device_enable_wakeup(dev); | ||
431 | } | 441 | } |
432 | } | 442 | } |
433 | } | 443 | } |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 94df91771243..0778d99aea7c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
364 | int i; | 364 | int i; |
365 | 365 | ||
366 | status_block = dma_readl(dw, RAW.BLOCK); | 366 | status_block = dma_readl(dw, RAW.BLOCK); |
367 | status_xfer = dma_readl(dw, RAW.BLOCK); | 367 | status_xfer = dma_readl(dw, RAW.XFER); |
368 | status_err = dma_readl(dw, RAW.ERROR); | 368 | status_err = dma_readl(dw, RAW.ERROR); |
369 | 369 | ||
370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 8e93a797c93d..052879a6f853 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -780,10 +780,6 @@ config BLK_DEV_IDEDMA_PMAC | |||
780 | to transfer data to and from memory. Saying Y is safe and improves | 780 | to transfer data to and from memory. Saying Y is safe and improves |
781 | performance. | 781 | performance. |
782 | 782 | ||
783 | config BLK_DEV_IDE_SWARM | ||
784 | tristate "IDE for Sibyte evaluation boards" | ||
785 | depends on SIBYTE_SB1xxx_SOC | ||
786 | |||
787 | config BLK_DEV_IDE_AU1XXX | 783 | config BLK_DEV_IDE_AU1XXX |
788 | bool "IDE for AMD Alchemy Au1200" | 784 | bool "IDE for AMD Alchemy Au1200" |
789 | depends on SOC_AU1200 | 785 | depends on SOC_AU1200 |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 49a8c589e346..f16bb4667238 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -1661,7 +1661,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1661 | cdi->mask &= ~CDC_PLAY_AUDIO; | 1661 | cdi->mask &= ~CDC_PLAY_AUDIO; |
1662 | 1662 | ||
1663 | mechtype = buf[8 + 6] >> 5; | 1663 | mechtype = buf[8 + 6] >> 5; |
1664 | if (mechtype == mechtype_caddy || mechtype == mechtype_popup) | 1664 | if (mechtype == mechtype_caddy || |
1665 | mechtype == mechtype_popup || | ||
1666 | (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE)) | ||
1665 | cdi->mask |= CDC_CLOSE_TRAY; | 1667 | cdi->mask |= CDC_CLOSE_TRAY; |
1666 | 1668 | ||
1667 | if (cdi->sanyo_slot > 0) { | 1669 | if (cdi->sanyo_slot > 0) { |
@@ -1859,6 +1861,8 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { | |||
1859 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, | 1861 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1860 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, | 1862 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1861 | { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, | 1863 | { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1864 | { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, | ||
1865 | { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE }, | ||
1862 | { NULL, NULL, 0 } | 1866 | { NULL, NULL, 0 } |
1863 | }; | 1867 | }; |
1864 | 1868 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index adc682755857..3fa07c0aeaa4 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -211,7 +211,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq) | |||
211 | xcount = bcount & 0xffff; | 211 | xcount = bcount & 0xffff; |
212 | if (is_trm290) | 212 | if (is_trm290) |
213 | xcount = ((xcount >> 2) - 1) << 16; | 213 | xcount = ((xcount >> 2) - 1) << 16; |
214 | if (xcount == 0x0000) { | 214 | else if (xcount == 0x0000) { |
215 | /* | 215 | /* |
216 | * Most chipsets correctly interpret a length of 0x0000 as 64KB, | 216 | * Most chipsets correctly interpret a length of 0x0000 as 64KB, |
217 | * but at least one (e.g. CS5530) misinterprets it as zero (!). | 217 | * but at least one (e.g. CS5530) misinterprets it as zero (!). |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 994e41099b42..a51a30e9eab3 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -1492,7 +1492,7 @@ static struct device_attribute *ide_port_attrs[] = { | |||
1492 | 1492 | ||
1493 | static int ide_sysfs_register_port(ide_hwif_t *hwif) | 1493 | static int ide_sysfs_register_port(ide_hwif_t *hwif) |
1494 | { | 1494 | { |
1495 | int i, rc; | 1495 | int i, uninitialized_var(rc); |
1496 | 1496 | ||
1497 | for (i = 0; ide_port_attrs[i]; i++) { | 1497 | for (i = 0; ide_port_attrs[i]; i++) { |
1498 | rc = device_create_file(hwif->portdev, ide_port_attrs[i]); | 1498 | rc = device_create_file(hwif->portdev, ide_port_attrs[i]); |
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile index 677c7b2bac92..5873fa0b8769 100644 --- a/drivers/ide/mips/Makefile +++ b/drivers/ide/mips/Makefile | |||
@@ -1,4 +1,3 @@ | |||
1 | obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o | ||
2 | obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o | 1 | obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o |
3 | 2 | ||
4 | EXTRA_CFLAGS := -Idrivers/ide | 3 | EXTRA_CFLAGS := -Idrivers/ide |
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c deleted file mode 100644 index 39c9ee995857..000000000000 --- a/drivers/ide/mips/swarm.c +++ /dev/null | |||
@@ -1,197 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001, 2002, 2003 Broadcom Corporation | ||
3 | * Copyright (C) 2004 MontaVista Software Inc. | ||
4 | * Author: Manish Lachwani, mlachwani@mvista.com | ||
5 | * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved. | ||
6 | * Author: Maciej W. Rozycki <macro@mips.com> | ||
7 | * Copyright (c) 2006, 2008 Maciej W. Rozycki | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version 2 | ||
12 | * of the License, or (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * Derived loosely from ide-pmac.c, so: | ||
26 | * Copyright (C) 1998 Paul Mackerras. | ||
27 | * Copyright (C) 1995-1998 Mark Lord | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * Boards with SiByte processors so far have supported IDE devices via | ||
32 | * the Generic Bus, PCI bus, and built-in PCMCIA interface. In all | ||
33 | * cases, byte-swapping must be avoided for these devices (whereas | ||
34 | * other PCI devices, for example, will require swapping). Any | ||
35 | * SiByte-targetted kernel including IDE support will include this | ||
36 | * file. Probing of a Generic Bus for an IDE device is controlled by | ||
37 | * the definition of "SIBYTE_HAVE_IDE", which is provided by | ||
38 | * <asm/sibyte/board.h> for Broadcom boards. | ||
39 | */ | ||
40 | |||
41 | #include <linux/ide.h> | ||
42 | #include <linux/ioport.h> | ||
43 | #include <linux/kernel.h> | ||
44 | #include <linux/types.h> | ||
45 | #include <linux/platform_device.h> | ||
46 | |||
47 | #include <asm/io.h> | ||
48 | |||
49 | #include <asm/sibyte/board.h> | ||
50 | #include <asm/sibyte/sb1250_genbus.h> | ||
51 | #include <asm/sibyte/sb1250_regs.h> | ||
52 | |||
53 | #define DRV_NAME "ide-swarm" | ||
54 | |||
55 | static char swarm_ide_string[] = DRV_NAME; | ||
56 | |||
57 | static struct resource swarm_ide_resource = { | ||
58 | .name = "SWARM GenBus IDE", | ||
59 | .flags = IORESOURCE_MEM, | ||
60 | }; | ||
61 | |||
62 | static struct platform_device *swarm_ide_dev; | ||
63 | |||
64 | static const struct ide_port_info swarm_port_info = { | ||
65 | .name = DRV_NAME, | ||
66 | .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * swarm_ide_probe - if the board header indicates the existence of | ||
71 | * Generic Bus IDE, allocate a HWIF for it. | ||
72 | */ | ||
73 | static int __devinit swarm_ide_probe(struct device *dev) | ||
74 | { | ||
75 | u8 __iomem *base; | ||
76 | struct ide_host *host; | ||
77 | phys_t offset, size; | ||
78 | int i, rc; | ||
79 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; | ||
80 | |||
81 | if (!SIBYTE_HAVE_IDE) | ||
82 | return -ENODEV; | ||
83 | |||
84 | base = ioremap(A_IO_EXT_BASE, 0x800); | ||
85 | offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); | ||
86 | size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); | ||
87 | iounmap(base); | ||
88 | |||
89 | offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE; | ||
90 | size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE; | ||
91 | if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) { | ||
92 | printk(KERN_INFO DRV_NAME | ||
93 | ": IDE interface at GenBus disabled\n"); | ||
94 | return -EBUSY; | ||
95 | } | ||
96 | |||
97 | printk(KERN_INFO DRV_NAME ": IDE interface at GenBus slot %i\n", | ||
98 | IDE_CS); | ||
99 | |||
100 | swarm_ide_resource.start = offset; | ||
101 | swarm_ide_resource.end = offset + size - 1; | ||
102 | if (request_resource(&iomem_resource, &swarm_ide_resource)) { | ||
103 | printk(KERN_ERR DRV_NAME | ||
104 | ": can't request I/O memory resource\n"); | ||
105 | return -EBUSY; | ||
106 | } | ||
107 | |||
108 | base = ioremap(offset, size); | ||
109 | |||
110 | memset(&hw, 0, sizeof(hw)); | ||
111 | for (i = 0; i <= 7; i++) | ||
112 | hw.io_ports_array[i] = | ||
113 | (unsigned long)(base + ((0x1f0 + i) << 5)); | ||
114 | hw.io_ports.ctl_addr = | ||
115 | (unsigned long)(base + (0x3f6 << 5)); | ||
116 | hw.irq = K_INT_GB_IDE; | ||
117 | hw.chipset = ide_generic; | ||
118 | |||
119 | rc = ide_host_add(&swarm_port_info, hws, &host); | ||
120 | if (rc) | ||
121 | goto err; | ||
122 | |||
123 | dev_set_drvdata(dev, host); | ||
124 | |||
125 | return 0; | ||
126 | err: | ||
127 | release_resource(&swarm_ide_resource); | ||
128 | iounmap(base); | ||
129 | return rc; | ||
130 | } | ||
131 | |||
132 | static struct device_driver swarm_ide_driver = { | ||
133 | .name = swarm_ide_string, | ||
134 | .bus = &platform_bus_type, | ||
135 | .probe = swarm_ide_probe, | ||
136 | }; | ||
137 | |||
138 | static void swarm_ide_platform_release(struct device *device) | ||
139 | { | ||
140 | struct platform_device *pldev; | ||
141 | |||
142 | /* free device */ | ||
143 | pldev = to_platform_device(device); | ||
144 | kfree(pldev); | ||
145 | } | ||
146 | |||
147 | static int __devinit swarm_ide_init_module(void) | ||
148 | { | ||
149 | struct platform_device *pldev; | ||
150 | int err; | ||
151 | |||
152 | printk(KERN_INFO "SWARM IDE driver\n"); | ||
153 | |||
154 | if (driver_register(&swarm_ide_driver)) { | ||
155 | printk(KERN_ERR "Driver registration failed\n"); | ||
156 | err = -ENODEV; | ||
157 | goto out; | ||
158 | } | ||
159 | |||
160 | if (!(pldev = kzalloc(sizeof (*pldev), GFP_KERNEL))) { | ||
161 | err = -ENOMEM; | ||
162 | goto out_unregister_driver; | ||
163 | } | ||
164 | |||
165 | pldev->name = swarm_ide_string; | ||
166 | pldev->id = 0; | ||
167 | pldev->dev.release = swarm_ide_platform_release; | ||
168 | |||
169 | if (platform_device_register(pldev)) { | ||
170 | err = -ENODEV; | ||
171 | goto out_free_pldev; | ||
172 | } | ||
173 | |||
174 | if (!pldev->dev.driver) { | ||
175 | /* | ||
176 | * The driver was not bound to this device, there was | ||
177 | * no hardware at this address. Unregister it, as the | ||
178 | * release fuction will take care of freeing the | ||
179 | * allocated structure | ||
180 | */ | ||
181 | platform_device_unregister (pldev); | ||
182 | } | ||
183 | |||
184 | swarm_ide_dev = pldev; | ||
185 | |||
186 | return 0; | ||
187 | |||
188 | out_free_pldev: | ||
189 | kfree(pldev); | ||
190 | |||
191 | out_unregister_driver: | ||
192 | driver_unregister(&swarm_ide_driver); | ||
193 | out: | ||
194 | return err; | ||
195 | } | ||
196 | |||
197 | module_init(swarm_ide_init_module); | ||
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c index be0e12144b8b..34935155c1c0 100644 --- a/drivers/leds/leds-fsg.c +++ b/drivers/leds/leds-fsg.c | |||
@@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
161 | { | 161 | { |
162 | int ret; | 162 | int ret; |
163 | 163 | ||
164 | /* Map the LED chip select address space */ | ||
165 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
166 | if (!latch_address) { | ||
167 | ret = -ENOMEM; | ||
168 | goto failremap; | ||
169 | } | ||
170 | |||
171 | latch_value = 0xffff; | ||
172 | *latch_address = latch_value; | ||
173 | |||
164 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); | 174 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); |
165 | if (ret < 0) | 175 | if (ret < 0) |
166 | goto failwlan; | 176 | goto failwlan; |
@@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
185 | if (ret < 0) | 195 | if (ret < 0) |
186 | goto failring; | 196 | goto failring; |
187 | 197 | ||
188 | /* Map the LED chip select address space */ | ||
189 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
190 | if (!latch_address) { | ||
191 | ret = -ENOMEM; | ||
192 | goto failremap; | ||
193 | } | ||
194 | |||
195 | latch_value = 0xffff; | ||
196 | *latch_address = latch_value; | ||
197 | |||
198 | return ret; | 198 | return ret; |
199 | 199 | ||
200 | failremap: | ||
201 | led_classdev_unregister(&fsg_ring_led); | ||
202 | failring: | 200 | failring: |
203 | led_classdev_unregister(&fsg_sync_led); | 201 | led_classdev_unregister(&fsg_sync_led); |
204 | failsync: | 202 | failsync: |
@@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
210 | failwan: | 208 | failwan: |
211 | led_classdev_unregister(&fsg_wlan_led); | 209 | led_classdev_unregister(&fsg_wlan_led); |
212 | failwlan: | 210 | failwlan: |
211 | iounmap(latch_address); | ||
212 | failremap: | ||
213 | 213 | ||
214 | return ret; | 214 | return ret; |
215 | } | 215 | } |
216 | 216 | ||
217 | static int fsg_led_remove(struct platform_device *pdev) | 217 | static int fsg_led_remove(struct platform_device *pdev) |
218 | { | 218 | { |
219 | iounmap(latch_address); | ||
220 | |||
221 | led_classdev_unregister(&fsg_wlan_led); | 219 | led_classdev_unregister(&fsg_wlan_led); |
222 | led_classdev_unregister(&fsg_wan_led); | 220 | led_classdev_unregister(&fsg_wan_led); |
223 | led_classdev_unregister(&fsg_sata_led); | 221 | led_classdev_unregister(&fsg_sata_led); |
@@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev) | |||
225 | led_classdev_unregister(&fsg_sync_led); | 223 | led_classdev_unregister(&fsg_sync_led); |
226 | led_classdev_unregister(&fsg_ring_led); | 224 | led_classdev_unregister(&fsg_ring_led); |
227 | 225 | ||
226 | iounmap(latch_address); | ||
227 | |||
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 146c06972863..f508729123b5 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c | |||
@@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
248 | const struct i2c_device_id *id) | 248 | const struct i2c_device_id *id) |
249 | { | 249 | { |
250 | struct pca955x_led *pca955x; | 250 | struct pca955x_led *pca955x; |
251 | int i; | ||
252 | int err = -ENODEV; | ||
253 | struct pca955x_chipdef *chip; | 251 | struct pca955x_chipdef *chip; |
254 | struct i2c_adapter *adapter; | 252 | struct i2c_adapter *adapter; |
255 | struct led_platform_data *pdata; | 253 | struct led_platform_data *pdata; |
254 | int i, err; | ||
256 | 255 | ||
257 | chip = &pca955x_chipdefs[id->driver_data]; | 256 | chip = &pca955x_chipdefs[id->driver_data]; |
258 | adapter = to_i2c_adapter(client->dev.parent); | 257 | adapter = to_i2c_adapter(client->dev.parent); |
@@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
282 | } | 281 | } |
283 | } | 282 | } |
284 | 283 | ||
284 | pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); | ||
285 | if (!pca955x) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | i2c_set_clientdata(client, pca955x); | ||
289 | |||
285 | for (i = 0; i < chip->bits; i++) { | 290 | for (i = 0; i < chip->bits; i++) { |
286 | pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL); | 291 | pca955x[i].chipdef = chip; |
287 | if (!pca955x) { | 292 | pca955x[i].client = client; |
288 | err = -ENOMEM; | 293 | pca955x[i].led_num = i; |
289 | goto exit; | ||
290 | } | ||
291 | 294 | ||
292 | pca955x->chipdef = chip; | ||
293 | pca955x->client = client; | ||
294 | pca955x->led_num = i; | ||
295 | /* Platform data can specify LED names and default triggers */ | 295 | /* Platform data can specify LED names and default triggers */ |
296 | if (pdata) { | 296 | if (pdata) { |
297 | if (pdata->leds[i].name) | 297 | if (pdata->leds[i].name) |
298 | snprintf(pca955x->name, 32, "pca955x:%s", | 298 | snprintf(pca955x[i].name, |
299 | pdata->leds[i].name); | 299 | sizeof(pca955x[i].name), "pca955x:%s", |
300 | pdata->leds[i].name); | ||
300 | if (pdata->leds[i].default_trigger) | 301 | if (pdata->leds[i].default_trigger) |
301 | pca955x->led_cdev.default_trigger = | 302 | pca955x[i].led_cdev.default_trigger = |
302 | pdata->leds[i].default_trigger; | 303 | pdata->leds[i].default_trigger; |
303 | } else { | 304 | } else { |
304 | snprintf(pca955x->name, 32, "pca955x:%d", i); | 305 | snprintf(pca955x[i].name, sizeof(pca955x[i].name), |
306 | "pca955x:%d", i); | ||
305 | } | 307 | } |
306 | spin_lock_init(&pca955x->lock); | ||
307 | 308 | ||
308 | pca955x->led_cdev.name = pca955x->name; | 309 | spin_lock_init(&pca955x[i].lock); |
309 | pca955x->led_cdev.brightness_set = | ||
310 | pca955x_led_set; | ||
311 | 310 | ||
312 | /* | 311 | pca955x[i].led_cdev.name = pca955x[i].name; |
313 | * Client data is a pointer to the _first_ pca955x_led | 312 | pca955x[i].led_cdev.brightness_set = pca955x_led_set; |
314 | * struct | ||
315 | */ | ||
316 | if (i == 0) | ||
317 | i2c_set_clientdata(client, pca955x); | ||
318 | 313 | ||
319 | INIT_WORK(&(pca955x->work), pca955x_led_work); | 314 | INIT_WORK(&pca955x[i].work, pca955x_led_work); |
320 | 315 | ||
321 | led_classdev_register(&client->dev, &(pca955x->led_cdev)); | 316 | err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); |
317 | if (err < 0) | ||
318 | goto exit; | ||
322 | } | 319 | } |
323 | 320 | ||
324 | /* Turn off LEDs */ | 321 | /* Turn off LEDs */ |
@@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
336 | pca955x_write_psc(client, 1, 0); | 333 | pca955x_write_psc(client, 1, 0); |
337 | 334 | ||
338 | return 0; | 335 | return 0; |
336 | |||
339 | exit: | 337 | exit: |
338 | while (i--) { | ||
339 | led_classdev_unregister(&pca955x[i].led_cdev); | ||
340 | cancel_work_sync(&pca955x[i].work); | ||
341 | } | ||
342 | |||
343 | kfree(pca955x); | ||
344 | i2c_set_clientdata(client, NULL); | ||
345 | |||
340 | return err; | 346 | return err; |
341 | } | 347 | } |
342 | 348 | ||
343 | static int __devexit pca955x_remove(struct i2c_client *client) | 349 | static int __devexit pca955x_remove(struct i2c_client *client) |
344 | { | 350 | { |
345 | struct pca955x_led *pca955x = i2c_get_clientdata(client); | 351 | struct pca955x_led *pca955x = i2c_get_clientdata(client); |
346 | int leds = pca955x->chipdef->bits; | ||
347 | int i; | 352 | int i; |
348 | 353 | ||
349 | for (i = 0; i < leds; i++) { | 354 | for (i = 0; i < pca955x->chipdef->bits; i++) { |
350 | led_classdev_unregister(&(pca955x->led_cdev)); | 355 | led_classdev_unregister(&pca955x[i].led_cdev); |
351 | cancel_work_sync(&(pca955x->work)); | 356 | cancel_work_sync(&pca955x[i].work); |
352 | kfree(pca955x); | ||
353 | pca955x = pca955x + 1; | ||
354 | } | 357 | } |
355 | 358 | ||
359 | kfree(pca955x); | ||
360 | i2c_set_clientdata(client, NULL); | ||
361 | |||
356 | return 0; | 362 | return 0; |
357 | } | 363 | } |
358 | 364 | ||
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h index 216025cf5d4b..2c5b6282b569 100644 --- a/drivers/media/common/tuners/tuner-xc2028.h +++ b/drivers/media/common/tuners/tuner-xc2028.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include "dvb_frontend.h" | 10 | #include "dvb_frontend.h" |
11 | 11 | ||
12 | #define XC2028_DEFAULT_FIRMWARE "xc3028-v27.fw" | 12 | #define XC2028_DEFAULT_FIRMWARE "xc3028-v27.fw" |
13 | #define XC3028L_DEFAULT_FIRMWARE "xc3028L-v36.fw" | ||
13 | 14 | ||
14 | /* Dmoduler IF (kHz) */ | 15 | /* Dmoduler IF (kHz) */ |
15 | #define XC3028_FE_DEFAULT 0 /* Don't load SCODE */ | 16 | #define XC3028_FE_DEFAULT 0 /* Don't load SCODE */ |
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index 4eed783f4bce..a127a4175c40 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c | |||
@@ -491,6 +491,7 @@ static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { | |||
491 | .demod_address = 0x53, | 491 | .demod_address = 0x53, |
492 | .invert = 1, | 492 | .invert = 1, |
493 | .repeated_start_workaround = 1, | 493 | .repeated_start_workaround = 1, |
494 | .serial_mpeg = 1, | ||
494 | }; | 495 | }; |
495 | 496 | ||
496 | static struct itd1000_config skystar2_rev2_7_itd1000_config = { | 497 | static struct itd1000_config skystar2_rev2_7_itd1000_config = { |
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 069d847ba887..0c733c66a441 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c | |||
@@ -364,15 +364,16 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
364 | enum dmx_success success) | 364 | enum dmx_success success) |
365 | { | 365 | { |
366 | struct dmxdev_filter *dmxdevfilter = filter->priv; | 366 | struct dmxdev_filter *dmxdevfilter = filter->priv; |
367 | unsigned long flags; | ||
367 | int ret; | 368 | int ret; |
368 | 369 | ||
369 | if (dmxdevfilter->buffer.error) { | 370 | if (dmxdevfilter->buffer.error) { |
370 | wake_up(&dmxdevfilter->buffer.queue); | 371 | wake_up(&dmxdevfilter->buffer.queue); |
371 | return 0; | 372 | return 0; |
372 | } | 373 | } |
373 | spin_lock(&dmxdevfilter->dev->lock); | 374 | spin_lock_irqsave(&dmxdevfilter->dev->lock, flags); |
374 | if (dmxdevfilter->state != DMXDEV_STATE_GO) { | 375 | if (dmxdevfilter->state != DMXDEV_STATE_GO) { |
375 | spin_unlock(&dmxdevfilter->dev->lock); | 376 | spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags); |
376 | return 0; | 377 | return 0; |
377 | } | 378 | } |
378 | del_timer(&dmxdevfilter->timer); | 379 | del_timer(&dmxdevfilter->timer); |
@@ -391,7 +392,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
391 | } | 392 | } |
392 | if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) | 393 | if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) |
393 | dmxdevfilter->state = DMXDEV_STATE_DONE; | 394 | dmxdevfilter->state = DMXDEV_STATE_DONE; |
394 | spin_unlock(&dmxdevfilter->dev->lock); | 395 | spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags); |
395 | wake_up(&dmxdevfilter->buffer.queue); | 396 | wake_up(&dmxdevfilter->buffer.queue); |
396 | return 0; | 397 | return 0; |
397 | } | 398 | } |
@@ -403,11 +404,12 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | |||
403 | { | 404 | { |
404 | struct dmxdev_filter *dmxdevfilter = feed->priv; | 405 | struct dmxdev_filter *dmxdevfilter = feed->priv; |
405 | struct dvb_ringbuffer *buffer; | 406 | struct dvb_ringbuffer *buffer; |
407 | unsigned long flags; | ||
406 | int ret; | 408 | int ret; |
407 | 409 | ||
408 | spin_lock(&dmxdevfilter->dev->lock); | 410 | spin_lock_irqsave(&dmxdevfilter->dev->lock, flags); |
409 | if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { | 411 | if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { |
410 | spin_unlock(&dmxdevfilter->dev->lock); | 412 | spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags); |
411 | return 0; | 413 | return 0; |
412 | } | 414 | } |
413 | 415 | ||
@@ -417,7 +419,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | |||
417 | else | 419 | else |
418 | buffer = &dmxdevfilter->dev->dvr_buffer; | 420 | buffer = &dmxdevfilter->dev->dvr_buffer; |
419 | if (buffer->error) { | 421 | if (buffer->error) { |
420 | spin_unlock(&dmxdevfilter->dev->lock); | 422 | spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags); |
421 | wake_up(&buffer->queue); | 423 | wake_up(&buffer->queue); |
422 | return 0; | 424 | return 0; |
423 | } | 425 | } |
@@ -428,7 +430,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | |||
428 | dvb_ringbuffer_flush(buffer); | 430 | dvb_ringbuffer_flush(buffer); |
429 | buffer->error = ret; | 431 | buffer->error = ret; |
430 | } | 432 | } |
431 | spin_unlock(&dmxdevfilter->dev->lock); | 433 | spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags); |
432 | wake_up(&buffer->queue); | 434 | wake_up(&buffer->queue); |
433 | return 0; | 435 | return 0; |
434 | } | 436 | } |
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index e2eca0b1fe7c..a2c1fd5d2f67 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c | |||
@@ -399,7 +399,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
399 | void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, | 399 | void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, |
400 | size_t count) | 400 | size_t count) |
401 | { | 401 | { |
402 | spin_lock(&demux->lock); | 402 | unsigned long flags; |
403 | |||
404 | spin_lock_irqsave(&demux->lock, flags); | ||
403 | 405 | ||
404 | while (count--) { | 406 | while (count--) { |
405 | if (buf[0] == 0x47) | 407 | if (buf[0] == 0x47) |
@@ -407,16 +409,17 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, | |||
407 | buf += 188; | 409 | buf += 188; |
408 | } | 410 | } |
409 | 411 | ||
410 | spin_unlock(&demux->lock); | 412 | spin_unlock_irqrestore(&demux->lock, flags); |
411 | } | 413 | } |
412 | 414 | ||
413 | EXPORT_SYMBOL(dvb_dmx_swfilter_packets); | 415 | EXPORT_SYMBOL(dvb_dmx_swfilter_packets); |
414 | 416 | ||
415 | void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) | 417 | void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) |
416 | { | 418 | { |
419 | unsigned long flags; | ||
417 | int p = 0, i, j; | 420 | int p = 0, i, j; |
418 | 421 | ||
419 | spin_lock(&demux->lock); | 422 | spin_lock_irqsave(&demux->lock, flags); |
420 | 423 | ||
421 | if (demux->tsbufp) { | 424 | if (demux->tsbufp) { |
422 | i = demux->tsbufp; | 425 | i = demux->tsbufp; |
@@ -449,17 +452,18 @@ void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) | |||
449 | } | 452 | } |
450 | 453 | ||
451 | bailout: | 454 | bailout: |
452 | spin_unlock(&demux->lock); | 455 | spin_unlock_irqrestore(&demux->lock, flags); |
453 | } | 456 | } |
454 | 457 | ||
455 | EXPORT_SYMBOL(dvb_dmx_swfilter); | 458 | EXPORT_SYMBOL(dvb_dmx_swfilter); |
456 | 459 | ||
457 | void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) | 460 | void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) |
458 | { | 461 | { |
462 | unsigned long flags; | ||
459 | int p = 0, i, j; | 463 | int p = 0, i, j; |
460 | u8 tmppack[188]; | 464 | u8 tmppack[188]; |
461 | 465 | ||
462 | spin_lock(&demux->lock); | 466 | spin_lock_irqsave(&demux->lock, flags); |
463 | 467 | ||
464 | if (demux->tsbufp) { | 468 | if (demux->tsbufp) { |
465 | i = demux->tsbufp; | 469 | i = demux->tsbufp; |
@@ -500,7 +504,7 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) | |||
500 | } | 504 | } |
501 | 505 | ||
502 | bailout: | 506 | bailout: |
503 | spin_unlock(&demux->lock); | 507 | spin_unlock_irqrestore(&demux->lock, flags); |
504 | } | 508 | } |
505 | 509 | ||
506 | EXPORT_SYMBOL(dvb_dmx_swfilter_204); | 510 | EXPORT_SYMBOL(dvb_dmx_swfilter_204); |
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c index 747d3fa2e5e5..2e9fd2893ede 100644 --- a/drivers/media/dvb/frontends/s5h1420.c +++ b/drivers/media/dvb/frontends/s5h1420.c | |||
@@ -59,7 +59,7 @@ struct s5h1420_state { | |||
59 | * it does not support repeated-start, workaround: write addr-1 | 59 | * it does not support repeated-start, workaround: write addr-1 |
60 | * and then read | 60 | * and then read |
61 | */ | 61 | */ |
62 | u8 shadow[255]; | 62 | u8 shadow[256]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static u32 s5h1420_getsymbolrate(struct s5h1420_state* state); | 65 | static u32 s5h1420_getsymbolrate(struct s5h1420_state* state); |
@@ -94,8 +94,11 @@ static u8 s5h1420_readreg(struct s5h1420_state *state, u8 reg) | |||
94 | if (ret != 3) | 94 | if (ret != 3) |
95 | return ret; | 95 | return ret; |
96 | } else { | 96 | } else { |
97 | ret = i2c_transfer(state->i2c, &msg[1], 2); | 97 | ret = i2c_transfer(state->i2c, &msg[1], 1); |
98 | if (ret != 2) | 98 | if (ret != 1) |
99 | return ret; | ||
100 | ret = i2c_transfer(state->i2c, &msg[2], 1); | ||
101 | if (ret != 1) | ||
99 | return ret; | 102 | return ret; |
100 | } | 103 | } |
101 | 104 | ||
@@ -823,7 +826,7 @@ static int s5h1420_init (struct dvb_frontend* fe) | |||
823 | struct s5h1420_state* state = fe->demodulator_priv; | 826 | struct s5h1420_state* state = fe->demodulator_priv; |
824 | 827 | ||
825 | /* disable power down and do reset */ | 828 | /* disable power down and do reset */ |
826 | state->CON_1_val = 0x10; | 829 | state->CON_1_val = state->config->serial_mpeg << 4; |
827 | s5h1420_writereg(state, 0x02, state->CON_1_val); | 830 | s5h1420_writereg(state, 0x02, state->CON_1_val); |
828 | msleep(10); | 831 | msleep(10); |
829 | s5h1420_reset(state); | 832 | s5h1420_reset(state); |
diff --git a/drivers/media/dvb/frontends/s5h1420.h b/drivers/media/dvb/frontends/s5h1420.h index 4c913f142bc4..ff308136d865 100644 --- a/drivers/media/dvb/frontends/s5h1420.h +++ b/drivers/media/dvb/frontends/s5h1420.h | |||
@@ -32,10 +32,12 @@ struct s5h1420_config | |||
32 | u8 demod_address; | 32 | u8 demod_address; |
33 | 33 | ||
34 | /* does the inversion require inversion? */ | 34 | /* does the inversion require inversion? */ |
35 | u8 invert : 1; | 35 | u8 invert:1; |
36 | 36 | ||
37 | u8 repeated_start_workaround : 1; | 37 | u8 repeated_start_workaround:1; |
38 | u8 cdclk_polarity : 1; /* 1 == falling edge, 0 == raising edge */ | 38 | u8 cdclk_polarity:1; /* 1 == falling edge, 0 == raising edge */ |
39 | |||
40 | u8 serial_mpeg:1; | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | #if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE)) | 43 | #if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE)) |
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c index cc5efb643f33..9da260fe3fd1 100644 --- a/drivers/media/dvb/siano/sms-cards.c +++ b/drivers/media/dvb/siano/sms-cards.c | |||
@@ -40,6 +40,8 @@ struct usb_device_id smsusb_id_table[] = { | |||
40 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B }, | 40 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B }, |
41 | { USB_DEVICE(0x2040, 0x5500), | 41 | { USB_DEVICE(0x2040, 0x5500), |
42 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | 42 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, |
43 | { USB_DEVICE(0x2040, 0x5510), | ||
44 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
43 | { USB_DEVICE(0x2040, 0x5580), | 45 | { USB_DEVICE(0x2040, 0x5580), |
44 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | 46 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, |
45 | { USB_DEVICE(0x2040, 0x5590), | 47 | { USB_DEVICE(0x2040, 0x5590), |
@@ -87,7 +89,7 @@ static struct sms_board sms_boards[] = { | |||
87 | .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw", | 89 | .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw", |
88 | }, | 90 | }, |
89 | [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { | 91 | [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { |
90 | .name = "Hauppauge WinTV-Nova-T-MiniStick", | 92 | .name = "Hauppauge WinTV MiniStick", |
91 | .type = SMS_NOVA_B0, | 93 | .type = SMS_NOVA_B0, |
92 | .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw", | 94 | .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw", |
93 | }, | 95 | }, |
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 6ae4cc860efe..933eaef41ead 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c | |||
@@ -3431,7 +3431,7 @@ static int radio_open(struct inode *inode, struct file *file) | |||
3431 | dprintk("bttv: open minor=%d\n",minor); | 3431 | dprintk("bttv: open minor=%d\n",minor); |
3432 | 3432 | ||
3433 | for (i = 0; i < bttv_num; i++) { | 3433 | for (i = 0; i < bttv_num; i++) { |
3434 | if (bttvs[i].radio_dev->minor == minor) { | 3434 | if (bttvs[i].radio_dev && bttvs[i].radio_dev->minor == minor) { |
3435 | btv = &bttvs[i]; | 3435 | btv = &bttvs[i]; |
3436 | break; | 3436 | break; |
3437 | } | 3437 | } |
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index c149b7d712e5..5405c30dbb04 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/mm.h> | ||
22 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
23 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c index a4574740350d..a8a199047cbd 100644 --- a/drivers/media/video/cpia2/cpia2_usb.c +++ b/drivers/media/video/cpia2/cpia2_usb.c | |||
@@ -632,7 +632,7 @@ int cpia2_usb_transfer_cmd(struct camera_data *cam, | |||
632 | static int submit_urbs(struct camera_data *cam) | 632 | static int submit_urbs(struct camera_data *cam) |
633 | { | 633 | { |
634 | struct urb *urb; | 634 | struct urb *urb; |
635 | int fx, err, i; | 635 | int fx, err, i, j; |
636 | 636 | ||
637 | for(i=0; i<NUM_SBUF; ++i) { | 637 | for(i=0; i<NUM_SBUF; ++i) { |
638 | if (cam->sbuf[i].data) | 638 | if (cam->sbuf[i].data) |
@@ -657,6 +657,9 @@ static int submit_urbs(struct camera_data *cam) | |||
657 | } | 657 | } |
658 | urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); | 658 | urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); |
659 | if (!urb) { | 659 | if (!urb) { |
660 | ERR("%s: usb_alloc_urb error!\n", __func__); | ||
661 | for (j = 0; j < i; j++) | ||
662 | usb_free_urb(cam->sbuf[j].urb); | ||
660 | return -ENOMEM; | 663 | return -ENOMEM; |
661 | } | 664 | } |
662 | 665 | ||
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c index 8fe5f38c4d7c..3cb9734ec07b 100644 --- a/drivers/media/video/cx18/cx18-cards.c +++ b/drivers/media/video/cx18/cx18-cards.c | |||
@@ -163,7 +163,7 @@ static const struct cx18_card cx18_card_h900 = { | |||
163 | }, | 163 | }, |
164 | .audio_inputs = { | 164 | .audio_inputs = { |
165 | { CX18_CARD_INPUT_AUD_TUNER, | 165 | { CX18_CARD_INPUT_AUD_TUNER, |
166 | CX18_AV_AUDIO8, 0 }, | 166 | CX18_AV_AUDIO5, 0 }, |
167 | { CX18_CARD_INPUT_LINE_IN1, | 167 | { CX18_CARD_INPUT_LINE_IN1, |
168 | CX18_AV_AUDIO_SERIAL1, 0 }, | 168 | CX18_AV_AUDIO_SERIAL1, 0 }, |
169 | }, | 169 | }, |
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c index 3c006103c1eb..ac3292d7646c 100644 --- a/drivers/media/video/em28xx/em28xx-audio.c +++ b/drivers/media/video/em28xx/em28xx-audio.c | |||
@@ -117,10 +117,10 @@ static void em28xx_audio_isocirq(struct urb *urb) | |||
117 | 117 | ||
118 | if (oldptr + length >= runtime->buffer_size) { | 118 | if (oldptr + length >= runtime->buffer_size) { |
119 | unsigned int cnt = | 119 | unsigned int cnt = |
120 | runtime->buffer_size - oldptr - 1; | 120 | runtime->buffer_size - oldptr; |
121 | memcpy(runtime->dma_area + oldptr * stride, cp, | 121 | memcpy(runtime->dma_area + oldptr * stride, cp, |
122 | cnt * stride); | 122 | cnt * stride); |
123 | memcpy(runtime->dma_area, cp + cnt, | 123 | memcpy(runtime->dma_area, cp + cnt * stride, |
124 | length * stride - cnt * stride); | 124 | length * stride - cnt * stride); |
125 | } else { | 125 | } else { |
126 | memcpy(runtime->dma_area + oldptr * stride, cp, | 126 | memcpy(runtime->dma_area + oldptr * stride, cp, |
@@ -161,8 +161,14 @@ static int em28xx_init_audio_isoc(struct em28xx *dev) | |||
161 | 161 | ||
162 | memset(dev->adev->transfer_buffer[i], 0x80, sb_size); | 162 | memset(dev->adev->transfer_buffer[i], 0x80, sb_size); |
163 | urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC); | 163 | urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC); |
164 | if (!urb) | 164 | if (!urb) { |
165 | em28xx_errdev("usb_alloc_urb failed!\n"); | ||
166 | for (j = 0; j < i; j++) { | ||
167 | usb_free_urb(dev->adev->urb[j]); | ||
168 | kfree(dev->adev->transfer_buffer[j]); | ||
169 | } | ||
165 | return -ENOMEM; | 170 | return -ENOMEM; |
171 | } | ||
166 | 172 | ||
167 | urb->dev = dev->udev; | 173 | urb->dev = dev->udev; |
168 | urb->context = dev; | 174 | urb->context = dev; |
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 452da70e719f..de943cf6c169 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c | |||
@@ -93,28 +93,6 @@ struct em28xx_board em28xx_boards[] = { | |||
93 | .amux = 0, | 93 | .amux = 0, |
94 | } }, | 94 | } }, |
95 | }, | 95 | }, |
96 | [EM2800_BOARD_KWORLD_USB2800] = { | ||
97 | .name = "Kworld USB2800", | ||
98 | .valid = EM28XX_BOARD_NOT_VALIDATED, | ||
99 | .is_em2800 = 1, | ||
100 | .vchannels = 3, | ||
101 | .tuner_type = TUNER_PHILIPS_FCV1236D, | ||
102 | .tda9887_conf = TDA9887_PRESENT, | ||
103 | .decoder = EM28XX_SAA7113, | ||
104 | .input = { { | ||
105 | .type = EM28XX_VMUX_TELEVISION, | ||
106 | .vmux = SAA7115_COMPOSITE2, | ||
107 | .amux = 0, | ||
108 | }, { | ||
109 | .type = EM28XX_VMUX_COMPOSITE1, | ||
110 | .vmux = SAA7115_COMPOSITE0, | ||
111 | .amux = 1, | ||
112 | }, { | ||
113 | .type = EM28XX_VMUX_SVIDEO, | ||
114 | .vmux = SAA7115_SVIDEO3, | ||
115 | .amux = 1, | ||
116 | } }, | ||
117 | }, | ||
118 | [EM2820_BOARD_KWORLD_PVRTV2800RF] = { | 96 | [EM2820_BOARD_KWORLD_PVRTV2800RF] = { |
119 | .name = "Kworld PVR TV 2800 RF", | 97 | .name = "Kworld PVR TV 2800 RF", |
120 | .is_em2800 = 0, | 98 | .is_em2800 = 0, |
@@ -599,7 +577,7 @@ struct em28xx_board em28xx_boards[] = { | |||
599 | }, { | 577 | }, { |
600 | .type = EM28XX_VMUX_COMPOSITE1, | 578 | .type = EM28XX_VMUX_COMPOSITE1, |
601 | .vmux = TVP5150_COMPOSITE1, | 579 | .vmux = TVP5150_COMPOSITE1, |
602 | .amux = 1, | 580 | .amux = 3, |
603 | }, { | 581 | }, { |
604 | .type = EM28XX_VMUX_SVIDEO, | 582 | .type = EM28XX_VMUX_SVIDEO, |
605 | .vmux = TVP5150_SVIDEO, | 583 | .vmux = TVP5150_SVIDEO, |
@@ -952,22 +930,23 @@ struct em28xx_board em28xx_boards[] = { | |||
952 | }, | 930 | }, |
953 | [EM2880_BOARD_KWORLD_DVB_310U] = { | 931 | [EM2880_BOARD_KWORLD_DVB_310U] = { |
954 | .name = "KWorld DVB-T 310U", | 932 | .name = "KWorld DVB-T 310U", |
955 | .valid = EM28XX_BOARD_NOT_VALIDATED, | ||
956 | .vchannels = 3, | 933 | .vchannels = 3, |
957 | .tuner_type = TUNER_XC2028, | 934 | .tuner_type = TUNER_XC2028, |
935 | .has_dvb = 1, | ||
936 | .mts_firmware = 1, | ||
958 | .decoder = EM28XX_TVP5150, | 937 | .decoder = EM28XX_TVP5150, |
959 | .input = { { | 938 | .input = { { |
960 | .type = EM28XX_VMUX_TELEVISION, | 939 | .type = EM28XX_VMUX_TELEVISION, |
961 | .vmux = TVP5150_COMPOSITE0, | 940 | .vmux = TVP5150_COMPOSITE0, |
962 | .amux = 0, | 941 | .amux = EM28XX_AMUX_VIDEO, |
963 | }, { | 942 | }, { |
964 | .type = EM28XX_VMUX_COMPOSITE1, | 943 | .type = EM28XX_VMUX_COMPOSITE1, |
965 | .vmux = TVP5150_COMPOSITE1, | 944 | .vmux = TVP5150_COMPOSITE1, |
966 | .amux = 1, | 945 | .amux = EM28XX_AMUX_AC97_LINE_IN, |
967 | }, { | 946 | }, { /* S-video has not been tested yet */ |
968 | .type = EM28XX_VMUX_SVIDEO, | 947 | .type = EM28XX_VMUX_SVIDEO, |
969 | .vmux = TVP5150_SVIDEO, | 948 | .vmux = TVP5150_SVIDEO, |
970 | .amux = 1, | 949 | .amux = EM28XX_AMUX_AC97_LINE_IN, |
971 | } }, | 950 | } }, |
972 | }, | 951 | }, |
973 | [EM2881_BOARD_DNT_DA2_HYBRID] = { | 952 | [EM2881_BOARD_DNT_DA2_HYBRID] = { |
@@ -1282,6 +1261,7 @@ static struct em28xx_reg_seq em2882_terratec_hybrid_xs_digital[] = { | |||
1282 | static struct em28xx_hash_table em28xx_eeprom_hash [] = { | 1261 | static struct em28xx_hash_table em28xx_eeprom_hash [] = { |
1283 | /* P/N: SA 60002070465 Tuner: TVF7533-MF */ | 1262 | /* P/N: SA 60002070465 Tuner: TVF7533-MF */ |
1284 | {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, | 1263 | {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, |
1264 | {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, | ||
1285 | }; | 1265 | }; |
1286 | 1266 | ||
1287 | /* I2C devicelist hash table for devices with generic USB IDs */ | 1267 | /* I2C devicelist hash table for devices with generic USB IDs */ |
@@ -1552,9 +1532,12 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) | |||
1552 | /* djh - Not sure which demod we need here */ | 1532 | /* djh - Not sure which demod we need here */ |
1553 | ctl->demod = XC3028_FE_DEFAULT; | 1533 | ctl->demod = XC3028_FE_DEFAULT; |
1554 | break; | 1534 | break; |
1535 | case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600: | ||
1536 | ctl->demod = XC3028_FE_DEFAULT; | ||
1537 | ctl->fname = XC3028L_DEFAULT_FIRMWARE; | ||
1538 | break; | ||
1555 | case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: | 1539 | case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950: |
1556 | case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: | 1540 | case EM2880_BOARD_PINNACLE_PCTV_HD_PRO: |
1557 | case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600: | ||
1558 | /* FIXME: Better to specify the needed IF */ | 1541 | /* FIXME: Better to specify the needed IF */ |
1559 | ctl->demod = XC3028_FE_DEFAULT; | 1542 | ctl->demod = XC3028_FE_DEFAULT; |
1560 | break; | 1543 | break; |
@@ -1764,6 +1747,20 @@ void em28xx_card_setup(struct em28xx *dev) | |||
1764 | break; | 1747 | break; |
1765 | case EM2820_BOARD_UNKNOWN: | 1748 | case EM2820_BOARD_UNKNOWN: |
1766 | case EM2800_BOARD_UNKNOWN: | 1749 | case EM2800_BOARD_UNKNOWN: |
1750 | /* | ||
1751 | * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD. | ||
1752 | * | ||
1753 | * This occurs because they share identical USB vendor and | ||
1754 | * product IDs. | ||
1755 | * | ||
1756 | * What we do here is look up the EEPROM hash of the K-WORLD | ||
1757 | * and if it is found then we decide that we do not have | ||
1758 | * a DIGIVOX and reset the device to the K-WORLD instead. | ||
1759 | * | ||
1760 | * This solution is only valid if they do not share eeprom | ||
1761 | * hash identities which has not been determined as yet. | ||
1762 | */ | ||
1763 | case EM2880_BOARD_MSI_DIGIVOX_AD: | ||
1767 | if (!em28xx_hint_board(dev)) | 1764 | if (!em28xx_hint_board(dev)) |
1768 | em28xx_set_model(dev); | 1765 | em28xx_set_model(dev); |
1769 | break; | 1766 | break; |
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index 4b992bc0083c..d2b1a1a52689 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c | |||
@@ -452,6 +452,15 @@ static int dvb_init(struct em28xx *dev) | |||
452 | goto out_free; | 452 | goto out_free; |
453 | } | 453 | } |
454 | break; | 454 | break; |
455 | case EM2880_BOARD_KWORLD_DVB_310U: | ||
456 | dvb->frontend = dvb_attach(zl10353_attach, | ||
457 | &em28xx_zl10353_with_xc3028, | ||
458 | &dev->i2c_adap); | ||
459 | if (attach_xc3028(0x61, dev) < 0) { | ||
460 | result = -EINVAL; | ||
461 | goto out_free; | ||
462 | } | ||
463 | break; | ||
455 | default: | 464 | default: |
456 | printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card" | 465 | printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card" |
457 | " isn't supported yet\n", | 466 | " isn't supported yet\n", |
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 7be69284da03..ac95c55887df 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c | |||
@@ -459,6 +459,7 @@ static int create_urbs(struct gspca_dev *gspca_dev, | |||
459 | urb = usb_alloc_urb(npkt, GFP_KERNEL); | 459 | urb = usb_alloc_urb(npkt, GFP_KERNEL); |
460 | if (!urb) { | 460 | if (!urb) { |
461 | err("usb_alloc_urb failed"); | 461 | err("usb_alloc_urb failed"); |
462 | destroy_urbs(gspca_dev); | ||
462 | return -ENOMEM; | 463 | return -ENOMEM; |
463 | } | 464 | } |
464 | urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev, | 465 | urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev, |
@@ -468,8 +469,8 @@ static int create_urbs(struct gspca_dev *gspca_dev, | |||
468 | 469 | ||
469 | if (urb->transfer_buffer == NULL) { | 470 | if (urb->transfer_buffer == NULL) { |
470 | usb_free_urb(urb); | 471 | usb_free_urb(urb); |
471 | destroy_urbs(gspca_dev); | ||
472 | err("usb_buffer_urb failed"); | 472 | err("usb_buffer_urb failed"); |
473 | destroy_urbs(gspca_dev); | ||
473 | return -ENOMEM; | 474 | return -ENOMEM; |
474 | } | 475 | } |
475 | gspca_dev->urb[n] = urb; | 476 | gspca_dev->urb[n] = urb; |
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c index d4be51843286..ba865b7f1ed8 100644 --- a/drivers/media/video/gspca/pac7311.c +++ b/drivers/media/video/gspca/pac7311.c | |||
@@ -1063,6 +1063,7 @@ static __devinitdata struct usb_device_id device_table[] = { | |||
1063 | {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302}, | 1063 | {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302}, |
1064 | {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302}, | 1064 | {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302}, |
1065 | {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302}, | 1065 | {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302}, |
1066 | {USB_DEVICE(0x093a, 0x262a), .driver_info = SENSOR_PAC7302}, | ||
1066 | {} | 1067 | {} |
1067 | }; | 1068 | }; |
1068 | MODULE_DEVICE_TABLE(usb, device_table); | 1069 | MODULE_DEVICE_TABLE(usb, device_table); |
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c index 5dd78c6766ea..12b81ae526b7 100644 --- a/drivers/media/video/gspca/sonixb.c +++ b/drivers/media/video/gspca/sonixb.c | |||
@@ -232,7 +232,7 @@ static struct ctrl sd_ctrls[] = { | |||
232 | static struct v4l2_pix_format vga_mode[] = { | 232 | static struct v4l2_pix_format vga_mode[] = { |
233 | {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | 233 | {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, |
234 | .bytesperline = 160, | 234 | .bytesperline = 160, |
235 | .sizeimage = 160 * 120 * 5 / 4, | 235 | .sizeimage = 160 * 120, |
236 | .colorspace = V4L2_COLORSPACE_SRGB, | 236 | .colorspace = V4L2_COLORSPACE_SRGB, |
237 | .priv = 2 | MODE_RAW}, | 237 | .priv = 2 | MODE_RAW}, |
238 | {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, | 238 | {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, |
@@ -264,7 +264,7 @@ static struct v4l2_pix_format sif_mode[] = { | |||
264 | .priv = 1 | MODE_REDUCED_SIF}, | 264 | .priv = 1 | MODE_REDUCED_SIF}, |
265 | {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, | 265 | {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, |
266 | .bytesperline = 176, | 266 | .bytesperline = 176, |
267 | .sizeimage = 176 * 144 * 5 / 4, | 267 | .sizeimage = 176 * 144, |
268 | .colorspace = V4L2_COLORSPACE_SRGB, | 268 | .colorspace = V4L2_COLORSPACE_SRGB, |
269 | .priv = 1 | MODE_RAW}, | 269 | .priv = 1 | MODE_RAW}, |
270 | {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, | 270 | {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, |
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index d75b1d20b318..572b0f363b64 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c | |||
@@ -707,6 +707,7 @@ static void i2c_w8(struct gspca_dev *gspca_dev, | |||
707 | 0x08, 0, /* value, index */ | 707 | 0x08, 0, /* value, index */ |
708 | gspca_dev->usb_buf, 8, | 708 | gspca_dev->usb_buf, 8, |
709 | 500); | 709 | 500); |
710 | msleep(2); | ||
710 | } | 711 | } |
711 | 712 | ||
712 | /* read 5 bytes in gspca_dev->usb_buf */ | 713 | /* read 5 bytes in gspca_dev->usb_buf */ |
@@ -976,13 +977,13 @@ static int sd_init(struct gspca_dev *gspca_dev) | |||
976 | case BRIDGE_SN9C105: | 977 | case BRIDGE_SN9C105: |
977 | if (regF1 != 0x11) | 978 | if (regF1 != 0x11) |
978 | return -ENODEV; | 979 | return -ENODEV; |
979 | reg_w(gspca_dev, 0x02, regGpio, 2); | 980 | reg_w(gspca_dev, 0x01, regGpio, 2); |
980 | break; | 981 | break; |
981 | case BRIDGE_SN9C120: | 982 | case BRIDGE_SN9C120: |
982 | if (regF1 != 0x12) | 983 | if (regF1 != 0x12) |
983 | return -ENODEV; | 984 | return -ENODEV; |
984 | regGpio[1] = 0x70; | 985 | regGpio[1] = 0x70; |
985 | reg_w(gspca_dev, 0x02, regGpio, 2); | 986 | reg_w(gspca_dev, 0x01, regGpio, 2); |
986 | break; | 987 | break; |
987 | default: | 988 | default: |
988 | /* case BRIDGE_SN9C110: */ | 989 | /* case BRIDGE_SN9C110: */ |
@@ -1183,7 +1184,7 @@ static void sd_start(struct gspca_dev *gspca_dev) | |||
1183 | static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; | 1184 | static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; |
1184 | static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ | 1185 | static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ |
1185 | static const __u8 CE_ov76xx[] = | 1186 | static const __u8 CE_ov76xx[] = |
1186 | { 0x32, 0xdd, 0x32, 0xdd }; /* OV7630/48 */ | 1187 | { 0x32, 0xdd, 0x32, 0xdd }; |
1187 | 1188 | ||
1188 | sn9c1xx = sn_tb[(int) sd->sensor]; | 1189 | sn9c1xx = sn_tb[(int) sd->sensor]; |
1189 | configure_gpio(gspca_dev, sn9c1xx); | 1190 | configure_gpio(gspca_dev, sn9c1xx); |
@@ -1223,8 +1224,15 @@ static void sd_start(struct gspca_dev *gspca_dev) | |||
1223 | reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def); | 1224 | reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def); |
1224 | for (i = 0; i < 8; i++) | 1225 | for (i = 0; i < 8; i++) |
1225 | reg_w(gspca_dev, 0x84, reg84, sizeof reg84); | 1226 | reg_w(gspca_dev, 0x84, reg84, sizeof reg84); |
1227 | switch (sd->sensor) { | ||
1228 | case SENSOR_OV7660: | ||
1229 | reg_w1(gspca_dev, 0x9a, 0x05); | ||
1230 | break; | ||
1231 | default: | ||
1226 | reg_w1(gspca_dev, 0x9a, 0x08); | 1232 | reg_w1(gspca_dev, 0x9a, 0x08); |
1227 | reg_w1(gspca_dev, 0x99, 0x59); | 1233 | reg_w1(gspca_dev, 0x99, 0x59); |
1234 | break; | ||
1235 | } | ||
1228 | 1236 | ||
1229 | mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; | 1237 | mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; |
1230 | if (mode) | 1238 | if (mode) |
@@ -1275,8 +1283,8 @@ static void sd_start(struct gspca_dev *gspca_dev) | |||
1275 | /* reg1 = 0x44; */ | 1283 | /* reg1 = 0x44; */ |
1276 | /* reg1 = 0x46; (done) */ | 1284 | /* reg1 = 0x46; (done) */ |
1277 | } else { | 1285 | } else { |
1278 | reg17 = 0x22; /* 640 MCKSIZE */ | 1286 | reg17 = 0xa2; /* 640 */ |
1279 | reg1 = 0x06; | 1287 | reg1 = 0x44; |
1280 | } | 1288 | } |
1281 | break; | 1289 | break; |
1282 | } | 1290 | } |
@@ -1285,6 +1293,7 @@ static void sd_start(struct gspca_dev *gspca_dev) | |||
1285 | switch (sd->sensor) { | 1293 | switch (sd->sensor) { |
1286 | case SENSOR_OV7630: | 1294 | case SENSOR_OV7630: |
1287 | case SENSOR_OV7648: | 1295 | case SENSOR_OV7648: |
1296 | case SENSOR_OV7660: | ||
1288 | reg_w(gspca_dev, 0xce, CE_ov76xx, 4); | 1297 | reg_w(gspca_dev, 0xce, CE_ov76xx, 4); |
1289 | break; | 1298 | break; |
1290 | default: | 1299 | default: |
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c index cfbc9ebc5c5d..95fcfcb9e31b 100644 --- a/drivers/media/video/gspca/spca561.c +++ b/drivers/media/video/gspca/spca561.c | |||
@@ -225,7 +225,7 @@ static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode) | |||
225 | reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01)); | 225 | reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01)); |
226 | do { | 226 | do { |
227 | reg_r(gspca_dev, 0x8803, 1); | 227 | reg_r(gspca_dev, 0x8803, 1); |
228 | if (!gspca_dev->usb_buf) | 228 | if (!gspca_dev->usb_buf[0]) |
229 | break; | 229 | break; |
230 | } while (--retry); | 230 | } while (--retry); |
231 | if (retry == 0) | 231 | if (retry == 0) |
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index 8d7c27e6ac77..d61ef727e0c2 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c | |||
@@ -6576,8 +6576,8 @@ static int setlightfreq(struct gspca_dev *gspca_dev) | |||
6576 | cs2102_60HZ, cs2102_60HZScale}, | 6576 | cs2102_60HZ, cs2102_60HZScale}, |
6577 | /* SENSOR_CS2102K 1 */ | 6577 | /* SENSOR_CS2102K 1 */ |
6578 | {cs2102_NoFliker, cs2102_NoFlikerScale, | 6578 | {cs2102_NoFliker, cs2102_NoFlikerScale, |
6579 | cs2102_50HZ, cs2102_50HZScale, | 6579 | NULL, NULL, /* currently disabled */ |
6580 | cs2102_60HZ, cs2102_60HZScale}, | 6580 | NULL, NULL}, |
6581 | /* SENSOR_GC0305 2 */ | 6581 | /* SENSOR_GC0305 2 */ |
6582 | {gc0305_NoFliker, gc0305_NoFliker, | 6582 | {gc0305_NoFliker, gc0305_NoFliker, |
6583 | gc0305_50HZ, gc0305_50HZ, | 6583 | gc0305_50HZ, gc0305_50HZ, |
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c index 3d3c48db45d9..c6852402c5e9 100644 --- a/drivers/media/video/ov511.c +++ b/drivers/media/video/ov511.c | |||
@@ -3591,7 +3591,7 @@ static int | |||
3591 | ov51x_init_isoc(struct usb_ov511 *ov) | 3591 | ov51x_init_isoc(struct usb_ov511 *ov) |
3592 | { | 3592 | { |
3593 | struct urb *urb; | 3593 | struct urb *urb; |
3594 | int fx, err, n, size; | 3594 | int fx, err, n, i, size; |
3595 | 3595 | ||
3596 | PDEBUG(3, "*** Initializing capture ***"); | 3596 | PDEBUG(3, "*** Initializing capture ***"); |
3597 | 3597 | ||
@@ -3662,6 +3662,8 @@ ov51x_init_isoc(struct usb_ov511 *ov) | |||
3662 | urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); | 3662 | urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL); |
3663 | if (!urb) { | 3663 | if (!urb) { |
3664 | err("init isoc: usb_alloc_urb ret. NULL"); | 3664 | err("init isoc: usb_alloc_urb ret. NULL"); |
3665 | for (i = 0; i < n; i++) | ||
3666 | usb_free_urb(ov->sbuf[i].urb); | ||
3665 | return -ENOMEM; | 3667 | return -ENOMEM; |
3666 | } | 3668 | } |
3667 | ov->sbuf[n].urb = urb; | 3669 | ov->sbuf[n].urb = urb; |
@@ -5651,7 +5653,7 @@ static ssize_t show_exposure(struct device *cd, | |||
5651 | if (!ov->dev) | 5653 | if (!ov->dev) |
5652 | return -ENODEV; | 5654 | return -ENODEV; |
5653 | sensor_get_exposure(ov, &exp); | 5655 | sensor_get_exposure(ov, &exp); |
5654 | return sprintf(buf, "%d\n", exp >> 8); | 5656 | return sprintf(buf, "%d\n", exp); |
5655 | } | 5657 | } |
5656 | static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL); | 5658 | static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL); |
5657 | 5659 | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c index 88e175168438..cbe2a3417851 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c +++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c | |||
@@ -489,6 +489,8 @@ static const struct pvr2_device_desc pvr2_device_751xx = { | |||
489 | struct usb_device_id pvr2_device_table[] = { | 489 | struct usb_device_id pvr2_device_table[] = { |
490 | { USB_DEVICE(0x2040, 0x2900), | 490 | { USB_DEVICE(0x2040, 0x2900), |
491 | .driver_info = (kernel_ulong_t)&pvr2_device_29xxx}, | 491 | .driver_info = (kernel_ulong_t)&pvr2_device_29xxx}, |
492 | { USB_DEVICE(0x2040, 0x2950), /* Logically identical to 2900 */ | ||
493 | .driver_info = (kernel_ulong_t)&pvr2_device_29xxx}, | ||
492 | { USB_DEVICE(0x2040, 0x2400), | 494 | { USB_DEVICE(0x2040, 0x2400), |
493 | .driver_info = (kernel_ulong_t)&pvr2_device_24xxx}, | 495 | .driver_info = (kernel_ulong_t)&pvr2_device_24xxx}, |
494 | { USB_DEVICE(0x1164, 0x0622), | 496 | { USB_DEVICE(0x1164, 0x0622), |
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index b1d09d8e2b85..92b83feae366 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c | |||
@@ -669,7 +669,7 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf, | |||
669 | (unsigned long)vbuf, pos); | 669 | (unsigned long)vbuf, pos); |
670 | /* tell v4l buffer was filled */ | 670 | /* tell v4l buffer was filled */ |
671 | 671 | ||
672 | buf->vb.field_count++; | 672 | buf->vb.field_count = dev->frame_count[chn] * 2; |
673 | do_gettimeofday(&ts); | 673 | do_gettimeofday(&ts); |
674 | buf->vb.ts = ts; | 674 | buf->vb.ts = ts; |
675 | buf->vb.state = VIDEOBUF_DONE; | 675 | buf->vb.state = VIDEOBUF_DONE; |
@@ -1268,6 +1268,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) | |||
1268 | dev->last_frame[chn] = -1; | 1268 | dev->last_frame[chn] = -1; |
1269 | dev->bad_payload[chn] = 0; | 1269 | dev->bad_payload[chn] = 0; |
1270 | dev->cur_frame[chn] = 0; | 1270 | dev->cur_frame[chn] = 0; |
1271 | dev->frame_count[chn] = 0; | ||
1271 | for (j = 0; j < SYS_FRAMES; j++) { | 1272 | for (j = 0; j < SYS_FRAMES; j++) { |
1272 | dev->buffer[chn].frame[j].ulState = 0; | 1273 | dev->buffer[chn].frame[j].ulState = 0; |
1273 | dev->buffer[chn].frame[j].cur_size = 0; | 1274 | dev->buffer[chn].frame[j].cur_size = 0; |
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c index 6ef3e5297de8..feab12aa2c7b 100644 --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c | |||
@@ -592,7 +592,7 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video, | |||
592 | if (ctrl == NULL) | 592 | if (ctrl == NULL) |
593 | return -EINVAL; | 593 | return -EINVAL; |
594 | 594 | ||
595 | data = kmalloc(8, GFP_KERNEL); | 595 | data = kmalloc(ctrl->info->size, GFP_KERNEL); |
596 | if (data == NULL) | 596 | if (data == NULL) |
597 | return -ENOMEM; | 597 | return -ENOMEM; |
598 | 598 | ||
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c index 168baabe4659..11edf79f57be 100644 --- a/drivers/media/video/w9968cf.c +++ b/drivers/media/video/w9968cf.c | |||
@@ -911,7 +911,6 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam) | |||
911 | 911 | ||
912 | for (i = 0; i < W9968CF_URBS; i++) { | 912 | for (i = 0; i < W9968CF_URBS; i++) { |
913 | urb = usb_alloc_urb(W9968CF_ISO_PACKETS, GFP_KERNEL); | 913 | urb = usb_alloc_urb(W9968CF_ISO_PACKETS, GFP_KERNEL); |
914 | cam->urb[i] = urb; | ||
915 | if (!urb) { | 914 | if (!urb) { |
916 | for (j = 0; j < i; j++) | 915 | for (j = 0; j < i; j++) |
917 | usb_free_urb(cam->urb[j]); | 916 | usb_free_urb(cam->urb[j]); |
@@ -919,6 +918,7 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam) | |||
919 | return -ENOMEM; | 918 | return -ENOMEM; |
920 | } | 919 | } |
921 | 920 | ||
921 | cam->urb[i] = urb; | ||
922 | urb->dev = udev; | 922 | urb->dev = udev; |
923 | urb->context = (void*)cam; | 923 | urb->context = (void*)cam; |
924 | urb->pipe = usb_rcvisocpipe(udev, 1); | 924 | urb->pipe = usb_rcvisocpipe(udev, 1); |
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c index 95c79ad80487..54ac3fe26ec2 100644 --- a/drivers/media/video/wm8739.c +++ b/drivers/media/video/wm8739.c | |||
@@ -274,10 +274,8 @@ static int wm8739_probe(struct i2c_client *client, | |||
274 | client->addr << 1, client->adapter->name); | 274 | client->addr << 1, client->adapter->name); |
275 | 275 | ||
276 | state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL); | 276 | state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL); |
277 | if (state == NULL) { | 277 | if (state == NULL) |
278 | kfree(client); | ||
279 | return -ENOMEM; | 278 | return -ENOMEM; |
280 | } | ||
281 | state->vol_l = 0x17; /* 0dB */ | 279 | state->vol_l = 0x17; /* 0dB */ |
282 | state->vol_r = 0x17; /* 0dB */ | 280 | state->vol_r = 0x17; /* 0dB */ |
283 | state->muted = 0; | 281 | state->muted = 0; |
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c index d842a7cb99d2..3282be730298 100644 --- a/drivers/media/video/zoran_card.c +++ b/drivers/media/video/zoran_card.c | |||
@@ -988,7 +988,7 @@ zoran_open_init_params (struct zoran *zr) | |||
988 | zr->v4l_grab_seq = 0; | 988 | zr->v4l_grab_seq = 0; |
989 | zr->v4l_settings.width = 192; | 989 | zr->v4l_settings.width = 192; |
990 | zr->v4l_settings.height = 144; | 990 | zr->v4l_settings.height = 144; |
991 | zr->v4l_settings.format = &zoran_formats[4]; /* YUY2 - YUV-4:2:2 packed */ | 991 | zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */ |
992 | zr->v4l_settings.bytesperline = | 992 | zr->v4l_settings.bytesperline = |
993 | zr->v4l_settings.width * | 993 | zr->v4l_settings.width * |
994 | ((zr->v4l_settings.format->depth + 7) / 8); | 994 | ((zr->v4l_settings.format->depth + 7) / 8); |
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c index ec6f59674b10..2dab9eea4def 100644 --- a/drivers/media/video/zoran_driver.c +++ b/drivers/media/video/zoran_driver.c | |||
@@ -134,7 +134,7 @@ const struct zoran_format zoran_formats[] = { | |||
134 | }, { | 134 | }, { |
135 | .name = "16-bit RGB BE", | 135 | .name = "16-bit RGB BE", |
136 | ZFMT(-1, | 136 | ZFMT(-1, |
137 | V4L2_PIX_FMT_RGB565, V4L2_COLORSPACE_SRGB), | 137 | V4L2_PIX_FMT_RGB565X, V4L2_COLORSPACE_SRGB), |
138 | .depth = 16, | 138 | .depth = 16, |
139 | .flags = ZORAN_FORMAT_CAPTURE | | 139 | .flags = ZORAN_FORMAT_CAPTURE | |
140 | ZORAN_FORMAT_OVERLAY, | 140 | ZORAN_FORMAT_OVERLAY, |
@@ -2737,7 +2737,8 @@ zoran_do_ioctl (struct inode *inode, | |||
2737 | fh->v4l_settings.format->fourcc; | 2737 | fh->v4l_settings.format->fourcc; |
2738 | fmt->fmt.pix.colorspace = | 2738 | fmt->fmt.pix.colorspace = |
2739 | fh->v4l_settings.format->colorspace; | 2739 | fh->v4l_settings.format->colorspace; |
2740 | fmt->fmt.pix.bytesperline = 0; | 2740 | fmt->fmt.pix.bytesperline = |
2741 | fh->v4l_settings.bytesperline; | ||
2741 | if (BUZ_MAX_HEIGHT < | 2742 | if (BUZ_MAX_HEIGHT < |
2742 | (fh->v4l_settings.height * 2)) | 2743 | (fh->v4l_settings.height * 2)) |
2743 | fmt->fmt.pix.field = | 2744 | fmt->fmt.pix.field = |
@@ -2833,13 +2834,6 @@ zoran_do_ioctl (struct inode *inode, | |||
2833 | fmt->fmt.pix.pixelformat, | 2834 | fmt->fmt.pix.pixelformat, |
2834 | (char *) &printformat); | 2835 | (char *) &printformat); |
2835 | 2836 | ||
2836 | if (fmt->fmt.pix.bytesperline > 0) { | ||
2837 | dprintk(5, | ||
2838 | KERN_ERR "%s: bpl not supported\n", | ||
2839 | ZR_DEVNAME(zr)); | ||
2840 | return -EINVAL; | ||
2841 | } | ||
2842 | |||
2843 | /* we can be requested to do JPEG/raw playback/capture */ | 2837 | /* we can be requested to do JPEG/raw playback/capture */ |
2844 | if (! | 2838 | if (! |
2845 | (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || | 2839 | (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || |
@@ -2923,6 +2917,7 @@ zoran_do_ioctl (struct inode *inode, | |||
2923 | fh->jpg_buffers.buffer_size = | 2917 | fh->jpg_buffers.buffer_size = |
2924 | zoran_v4l2_calc_bufsize(&fh-> | 2918 | zoran_v4l2_calc_bufsize(&fh-> |
2925 | jpg_settings); | 2919 | jpg_settings); |
2920 | fmt->fmt.pix.bytesperline = 0; | ||
2926 | fmt->fmt.pix.sizeimage = | 2921 | fmt->fmt.pix.sizeimage = |
2927 | fh->jpg_buffers.buffer_size; | 2922 | fh->jpg_buffers.buffer_size; |
2928 | 2923 | ||
@@ -2979,6 +2974,8 @@ zoran_do_ioctl (struct inode *inode, | |||
2979 | 2974 | ||
2980 | /* tell the user the | 2975 | /* tell the user the |
2981 | * results/missing stuff */ | 2976 | * results/missing stuff */ |
2977 | fmt->fmt.pix.bytesperline = | ||
2978 | fh->v4l_settings.bytesperline; | ||
2982 | fmt->fmt.pix.sizeimage = | 2979 | fmt->fmt.pix.sizeimage = |
2983 | fh->v4l_settings.height * | 2980 | fh->v4l_settings.height * |
2984 | fh->v4l_settings.bytesperline; | 2981 | fh->v4l_settings.bytesperline; |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 917035e16da4..00008967ef7a 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -426,8 +426,6 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) | |||
426 | host->sg = NULL; | 426 | host->sg = NULL; |
427 | host->data = data; | 427 | host->data = data; |
428 | 428 | ||
429 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | ||
430 | | MCI_BLKLEN(data->blksz)); | ||
431 | dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", | 429 | dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", |
432 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); | 430 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); |
433 | 431 | ||
@@ -483,6 +481,10 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
483 | if (data->blocks > 1 && data->blksz & 3) | 481 | if (data->blocks > 1 && data->blksz & 3) |
484 | goto fail; | 482 | goto fail; |
485 | atmci_set_timeout(host, data); | 483 | atmci_set_timeout(host, data); |
484 | |||
485 | /* Must set block count/size before sending command */ | ||
486 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | ||
487 | | MCI_BLKLEN(data->blksz)); | ||
486 | } | 488 | } |
487 | 489 | ||
488 | iflags = MCI_CMDRDY; | 490 | iflags = MCI_CMDRDY; |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 0b6095ba3ce9..bcd2bc477af2 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -396,7 +396,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
396 | u32 extcnf_ctrl; | 396 | u32 extcnf_ctrl; |
397 | u32 timeout = PHY_CFG_TIMEOUT; | 397 | u32 timeout = PHY_CFG_TIMEOUT; |
398 | 398 | ||
399 | WARN_ON(preempt_count()); | 399 | might_sleep(); |
400 | 400 | ||
401 | if (!mutex_trylock(&nvm_mutex)) { | 401 | if (!mutex_trylock(&nvm_mutex)) { |
402 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", | 402 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index f118252f3a9f..52e2743b04ec 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -422,6 +422,12 @@ done: | |||
422 | return err; | 422 | return err; |
423 | } | 423 | } |
424 | 424 | ||
425 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
426 | { | ||
427 | struct rtc_device *rtc = file->private_data; | ||
428 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
429 | } | ||
430 | |||
425 | static int rtc_dev_release(struct inode *inode, struct file *file) | 431 | static int rtc_dev_release(struct inode *inode, struct file *file) |
426 | { | 432 | { |
427 | struct rtc_device *rtc = file->private_data; | 433 | struct rtc_device *rtc = file->private_data; |
@@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file) | |||
434 | if (rtc->ops->release) | 440 | if (rtc->ops->release) |
435 | rtc->ops->release(rtc->dev.parent); | 441 | rtc->ops->release(rtc->dev.parent); |
436 | 442 | ||
443 | if (file->f_flags & FASYNC) | ||
444 | rtc_dev_fasync(-1, file, 0); | ||
445 | |||
437 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); | 446 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); |
438 | return 0; | 447 | return 0; |
439 | } | 448 | } |
440 | 449 | ||
441 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
442 | { | ||
443 | struct rtc_device *rtc = file->private_data; | ||
444 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
445 | } | ||
446 | |||
447 | static const struct file_operations rtc_dev_fops = { | 450 | static const struct file_operations rtc_dev_fops = { |
448 | .owner = THIS_MODULE, | 451 | .owner = THIS_MODULE, |
449 | .llseek = no_llseek, | 452 | .llseek = no_llseek, |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 1679e2f91c94..a0b6b46e7466 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 447 | { |
448 | char s[80]; | 448 | char s[80]; |
449 | 449 | ||
450 | sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); | 450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); |
451 | |||
452 | switch (irq_ptr->qib.qfmt) { | 451 | switch (irq_ptr->qib.qfmt) { |
453 | case QDIO_QETH_QFMT: | 452 | case QDIO_QETH_QFMT: |
454 | sprintf(s + strlen(s), "OSADE "); | 453 | sprintf(s + strlen(s), "OSA "); |
455 | break; | 454 | break; |
456 | case QDIO_ZFCP_QFMT: | 455 | case QDIO_ZFCP_QFMT: |
457 | sprintf(s + strlen(s), "ZFCP "); | 456 | sprintf(s + strlen(s), "ZFCP "); |
458 | break; | 457 | break; |
459 | case QDIO_IQDIO_QFMT: | 458 | case QDIO_IQDIO_QFMT: |
460 | sprintf(s + strlen(s), "HiperSockets "); | 459 | sprintf(s + strlen(s), "HS "); |
461 | break; | 460 | break; |
462 | } | 461 | } |
463 | sprintf(s + strlen(s), "using: "); | 462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); |
464 | 463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | |
465 | if (!is_thinint_irq(irq_ptr)) | 464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); |
466 | sprintf(s + strlen(s), "no"); | 465 | sprintf(s + strlen(s), "PCI:%d ", |
467 | sprintf(s + strlen(s), "AdapterInterrupts "); | 466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); |
468 | if (!(irq_ptr->sch_token != 0)) | 467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); |
469 | sprintf(s + strlen(s), "no"); | 468 | sprintf(s + strlen(s), "SIGA:"); |
470 | sprintf(s + strlen(s), "QEBSM "); | 469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); |
471 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) | 470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); |
472 | sprintf(s + strlen(s), "no"); | 471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); |
473 | sprintf(s + strlen(s), "OutboundPCI "); | 472 | sprintf(s + strlen(s), "%s", |
474 | if (!css_general_characteristics.aif_tdd) | 473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); |
475 | sprintf(s + strlen(s), "no"); | 474 | sprintf(s + strlen(s), "%s", |
476 | sprintf(s + strlen(s), "TDD\n"); | 475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); |
477 | printk(KERN_INFO "qdio: %s", s); | 476 | sprintf(s + strlen(s), "%s", |
478 | 477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | |
479 | memset(s, 0, sizeof(s)); | ||
480 | sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); | ||
481 | if (irq_ptr->siga_flag.input) | ||
482 | sprintf(s + strlen(s), "Read "); | ||
483 | if (irq_ptr->siga_flag.output) | ||
484 | sprintf(s + strlen(s), "Write "); | ||
485 | if (irq_ptr->siga_flag.sync) | ||
486 | sprintf(s + strlen(s), "Sync "); | ||
487 | if (!irq_ptr->siga_flag.no_sync_ti) | ||
488 | sprintf(s + strlen(s), "SyncAI "); | ||
489 | if (!irq_ptr->siga_flag.no_sync_out_ti) | ||
490 | sprintf(s + strlen(s), "SyncOutAI "); | ||
491 | if (!irq_ptr->siga_flag.no_sync_out_pci) | ||
492 | sprintf(s + strlen(s), "SyncOutPCI"); | ||
493 | sprintf(s + strlen(s), "\n"); | 478 | sprintf(s + strlen(s), "\n"); |
494 | printk(KERN_INFO "qdio: %s", s); | 479 | printk(KERN_INFO "%s", s); |
495 | } | 480 | } |
496 | 481 | ||
497 | int __init qdio_setup_init(void) | 482 | int __init qdio_setup_init(void) |
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index c4eaacd6e553..b872bfaf4bd2 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c | |||
@@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
427 | goto msg_rejected; | 427 | goto msg_rejected; |
428 | } | 428 | } |
429 | 429 | ||
430 | if (t->speed_hz < orion_spi->min_speed) { | 430 | if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { |
431 | dev_err(&spi->dev, | 431 | dev_err(&spi->dev, |
432 | "message rejected : " | 432 | "message rejected : " |
433 | "device min speed (%d Hz) exceeds " | 433 | "device min speed (%d Hz) exceeds " |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index c6299e8a041d..9cbff84b787d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) | |||
2400 | 2400 | ||
2401 | if (!fbcon_is_inactive(vc, info)) { | 2401 | if (!fbcon_is_inactive(vc, info)) { |
2402 | if (ops->blank_state != blank) { | 2402 | if (ops->blank_state != blank) { |
2403 | int ret = 1; | ||
2404 | |||
2403 | ops->blank_state = blank; | 2405 | ops->blank_state = blank; |
2404 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); | 2406 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); |
2405 | ops->cursor_flash = (!blank); | 2407 | ops->cursor_flash = (!blank); |
2406 | 2408 | ||
2407 | if (fb_blank(info, blank)) | 2409 | if (info->fbops->fb_blank) |
2410 | ret = info->fbops->fb_blank(blank, info); | ||
2411 | if (ret) | ||
2408 | fbcon_generic_blank(vc, info, blank); | 2412 | fbcon_generic_blank(vc, info, blank); |
2409 | } | 2413 | } |
2410 | 2414 | ||
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h new file mode 100644 index 000000000000..fa4328f9124f --- /dev/null +++ b/include/asm-mips/cevt-r4k.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2008 Kevin D. Kissell | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Definitions used for common event timer implementation | ||
11 | * for MIPS 4K-type processors and their MIPS MT variants. | ||
12 | * Avoids unsightly extern declarations in C files. | ||
13 | */ | ||
14 | #ifndef __ASM_CEVT_R4K_H | ||
15 | #define __ASM_CEVT_R4K_H | ||
16 | |||
17 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||
18 | |||
19 | void mips_event_handler(struct clock_event_device *dev); | ||
20 | int c0_compare_int_usable(void); | ||
21 | void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); | ||
22 | irqreturn_t c0_compare_interrupt(int, void *); | ||
23 | |||
24 | extern struct irqaction c0_compare_irqaction; | ||
25 | extern int cp0_timer_irq_installed; | ||
26 | |||
27 | /* | ||
28 | * Possibly handle a performance counter interrupt. | ||
29 | * Return true if the timer interrupt should not be checked | ||
30 | */ | ||
31 | |||
32 | static inline int handle_perf_irq(int r2) | ||
33 | { | ||
34 | /* | ||
35 | * The performance counter overflow interrupt may be shared with the | ||
36 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
37 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
38 | * and we can't reliably determine if a counter interrupt has also | ||
39 | * happened (!r2) then don't check for a timer interrupt. | ||
40 | */ | ||
41 | return (cp0_perfcount_irq < 0) && | ||
42 | perf_irq() == IRQ_HANDLED && | ||
43 | !r2; | ||
44 | } | ||
45 | |||
46 | #endif /* __ASM_CEVT_R4K_H */ | ||
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 881e8866501d..701ec0ba8fa9 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h | |||
@@ -38,8 +38,17 @@ __asm__( | |||
38 | " .set pop \n" | 38 | " .set pop \n" |
39 | " .endm"); | 39 | " .endm"); |
40 | 40 | ||
41 | extern void smtc_ipi_replay(void); | ||
42 | |||
41 | static inline void raw_local_irq_enable(void) | 43 | static inline void raw_local_irq_enable(void) |
42 | { | 44 | { |
45 | #ifdef CONFIG_MIPS_MT_SMTC | ||
46 | /* | ||
47 | * SMTC kernel needs to do a software replay of queued | ||
48 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
49 | */ | ||
50 | smtc_ipi_replay(); | ||
51 | #endif | ||
43 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
44 | "raw_local_irq_enable" | 53 | "raw_local_irq_enable" |
45 | : /* no outputs */ | 54 | : /* no outputs */ |
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) | |||
47 | : "memory"); | 56 | : "memory"); |
48 | } | 57 | } |
49 | 58 | ||
59 | |||
50 | /* | 60 | /* |
51 | * For cli() we have to insert nops to make sure that the new value | 61 | * For cli() we have to insert nops to make sure that the new value |
52 | * has actually arrived in the status register before the end of this | 62 | * has actually arrived in the status register before the end of this |
@@ -185,15 +195,14 @@ __asm__( | |||
185 | " .set pop \n" | 195 | " .set pop \n" |
186 | " .endm \n"); | 196 | " .endm \n"); |
187 | 197 | ||
188 | extern void smtc_ipi_replay(void); | ||
189 | 198 | ||
190 | static inline void raw_local_irq_restore(unsigned long flags) | 199 | static inline void raw_local_irq_restore(unsigned long flags) |
191 | { | 200 | { |
192 | unsigned long __tmp1; | 201 | unsigned long __tmp1; |
193 | 202 | ||
194 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | 203 | #ifdef CONFIG_MIPS_MT_SMTC |
195 | /* | 204 | /* |
196 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred | 205 | * SMTC kernel needs to do a software replay of queued |
197 | * IPIs, at the cost of branch and call overhead on each | 206 | * IPIs, at the cost of branch and call overhead on each |
198 | * local_irq_restore() | 207 | * local_irq_restore() |
199 | */ | 208 | */ |
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) | |||
208 | : "memory"); | 217 | : "memory"); |
209 | } | 218 | } |
210 | 219 | ||
220 | static inline void __raw_local_irq_restore(unsigned long flags) | ||
221 | { | ||
222 | unsigned long __tmp1; | ||
223 | |||
224 | __asm__ __volatile__( | ||
225 | "raw_local_irq_restore\t%0" | ||
226 | : "=r" (__tmp1) | ||
227 | : "0" (flags) | ||
228 | : "memory"); | ||
229 | } | ||
230 | |||
211 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 231 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
212 | { | 232 | { |
213 | #ifdef CONFIG_MIPS_MT_SMTC | 233 | #ifdef CONFIG_MIPS_MT_SMTC |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index a46f8e258e6b..979866000da4 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ | |||
1462 | { \ | 1462 | { \ |
1463 | unsigned int res; \ | 1463 | unsigned int res; \ |
1464 | unsigned int omt; \ | 1464 | unsigned int omt; \ |
1465 | unsigned int flags; \ | 1465 | unsigned long flags; \ |
1466 | \ | 1466 | \ |
1467 | local_irq_save(flags); \ | 1467 | local_irq_save(flags); \ |
1468 | omt = __dmt(); \ | 1468 | omt = __dmt(); \ |
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ | |||
1480 | { \ | 1480 | { \ |
1481 | unsigned int res; \ | 1481 | unsigned int res; \ |
1482 | unsigned int omt; \ | 1482 | unsigned int omt; \ |
1483 | unsigned int flags; \ | 1483 | unsigned long flags; \ |
1484 | \ | 1484 | \ |
1485 | local_irq_save(flags); \ | 1485 | local_irq_save(flags); \ |
1486 | omt = __dmt(); \ | 1486 | omt = __dmt(); \ |
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ | |||
1498 | { \ | 1498 | { \ |
1499 | unsigned int res; \ | 1499 | unsigned int res; \ |
1500 | unsigned int omt; \ | 1500 | unsigned int omt; \ |
1501 | unsigned int flags; \ | 1501 | unsigned long flags; \ |
1502 | \ | 1502 | \ |
1503 | local_irq_save(flags); \ | 1503 | local_irq_save(flags); \ |
1504 | \ | 1504 | \ |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index 3639b28f80db..ea60bf08dcb0 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <asm/mips_mt.h> | 8 | #include <asm/mips_mt.h> |
9 | #include <asm/smtc_ipi.h> | ||
9 | 10 | ||
10 | /* | 11 | /* |
11 | * System-wide SMTC status information | 12 | * System-wide SMTC status information |
@@ -38,14 +39,15 @@ struct mm_struct; | |||
38 | struct task_struct; | 39 | struct task_struct; |
39 | 40 | ||
40 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | 41 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); |
41 | 42 | void self_ipi(struct smtc_ipi *); | |
42 | void smtc_flush_tlb_asid(unsigned long asid); | 43 | void smtc_flush_tlb_asid(unsigned long asid); |
43 | extern int mipsmt_build_cpu_map(int startslot); | 44 | extern int smtc_build_cpu_map(int startslot); |
44 | extern void mipsmt_prepare_cpus(void); | 45 | extern void smtc_prepare_cpus(int cpus); |
45 | extern void smtc_smp_finish(void); | 46 | extern void smtc_smp_finish(void); |
46 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | 47 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); |
47 | extern void smtc_cpus_done(void); | 48 | extern void smtc_cpus_done(void); |
48 | 49 | ||
50 | |||
49 | /* | 51 | /* |
50 | * Sharing the TLB between multiple VPEs means that the | 52 | * Sharing the TLB between multiple VPEs means that the |
51 | * "random" index selection function is not allowed to | 53 | * "random" index selection function is not allowed to |
diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h index c3dd5d0d525f..721496a0bb92 100644 --- a/include/asm-mips/sn/mapped_kernel.h +++ b/include/asm-mips/sn/mapped_kernel.h | |||
@@ -5,6 +5,8 @@ | |||
5 | #ifndef __ASM_SN_MAPPED_KERNEL_H | 5 | #ifndef __ASM_SN_MAPPED_KERNEL_H |
6 | #define __ASM_SN_MAPPED_KERNEL_H | 6 | #define __ASM_SN_MAPPED_KERNEL_H |
7 | 7 | ||
8 | #include <linux/mmzone.h> | ||
9 | |||
8 | /* | 10 | /* |
9 | * Note on how mapped kernels work: the text and data section is | 11 | * Note on how mapped kernels work: the text and data section is |
10 | * compiled at cksseg segment (LOADADDR = 0xc001c000), and the | 12 | * compiled at cksseg segment (LOADADDR = 0xc001c000), and the |
@@ -29,10 +31,8 @@ | |||
29 | #define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE) | 31 | #define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE) |
30 | #define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216) | 32 | #define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216) |
31 | 33 | ||
32 | #define MAPPED_KERN_RO_PHYSBASE(n) \ | 34 | #define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr) |
33 | (PLAT_NODE_DATA(n)->kern_vars.kv_ro_baseaddr) | 35 | #define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr) |
34 | #define MAPPED_KERN_RW_PHYSBASE(n) \ | ||
35 | (PLAT_NODE_DATA(n)->kern_vars.kv_rw_baseaddr) | ||
36 | 36 | ||
37 | #define MAPPED_KERN_RO_TO_PHYS(x) \ | 37 | #define MAPPED_KERN_RO_TO_PHYS(x) \ |
38 | ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \ | 38 | ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \ |
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 051e1af0bb95..4c37c4e5f72e 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -297,14 +297,31 @@ | |||
297 | #ifdef CONFIG_MIPS_MT_SMTC | 297 | #ifdef CONFIG_MIPS_MT_SMTC |
298 | .set mips32r2 | 298 | .set mips32r2 |
299 | /* | 299 | /* |
300 | * This may not really be necessary if ints are already | 300 | * We need to make sure the read-modify-write |
301 | * inhibited here. | 301 | * of Status below isn't perturbed by an interrupt |
302 | * or cross-TC access, so we need to do at least a DMT, | ||
303 | * protected by an interrupt-inhibit. But setting IXMT | ||
304 | * also creates a few-cycle window where an IPI could | ||
305 | * be queued and not be detected before potentially | ||
306 | * returning to a WAIT or user-mode loop. It must be | ||
307 | * replayed. | ||
308 | * | ||
309 | * We're in the middle of a context switch, and | ||
310 | * we can't dispatch it directly without trashing | ||
311 | * some registers, so we'll try to detect this unlikely | ||
312 | * case and program a software interrupt in the VPE, | ||
313 | * as would be done for a cross-VPE IPI. To accomodate | ||
314 | * the handling of that case, we're doing a DVPE instead | ||
315 | * of just a DMT here to protect against other threads. | ||
316 | * This is a lot of cruft to cover a tiny window. | ||
317 | * If you can find a better design, implement it! | ||
318 | * | ||
302 | */ | 319 | */ |
303 | mfc0 v0, CP0_TCSTATUS | 320 | mfc0 v0, CP0_TCSTATUS |
304 | ori v0, TCSTATUS_IXMT | 321 | ori v0, TCSTATUS_IXMT |
305 | mtc0 v0, CP0_TCSTATUS | 322 | mtc0 v0, CP0_TCSTATUS |
306 | _ehb | 323 | _ehb |
307 | DMT 5 # dmt a1 | 324 | DVPE 5 # dvpe a1 |
308 | jal mips_ihb | 325 | jal mips_ihb |
309 | #endif /* CONFIG_MIPS_MT_SMTC */ | 326 | #endif /* CONFIG_MIPS_MT_SMTC */ |
310 | mfc0 a0, CP0_STATUS | 327 | mfc0 a0, CP0_STATUS |
@@ -325,17 +342,50 @@ | |||
325 | */ | 342 | */ |
326 | LONG_L v1, PT_TCSTATUS(sp) | 343 | LONG_L v1, PT_TCSTATUS(sp) |
327 | _ehb | 344 | _ehb |
328 | mfc0 v0, CP0_TCSTATUS | 345 | mfc0 a0, CP0_TCSTATUS |
329 | andi v1, TCSTATUS_IXMT | 346 | andi v1, TCSTATUS_IXMT |
330 | /* We know that TCStatua.IXMT should be set from above */ | 347 | bnez v1, 0f |
331 | xori v0, v0, TCSTATUS_IXMT | 348 | |
332 | or v0, v0, v1 | 349 | /* |
333 | mtc0 v0, CP0_TCSTATUS | 350 | * We'd like to detect any IPIs queued in the tiny window |
334 | _ehb | 351 | * above and request an software interrupt to service them |
335 | andi a1, a1, VPECONTROL_TE | 352 | * when we ERET. |
353 | * | ||
354 | * Computing the offset into the IPIQ array of the executing | ||
355 | * TC's IPI queue in-line would be tedious. We use part of | ||
356 | * the TCContext register to hold 16 bits of offset that we | ||
357 | * can add in-line to find the queue head. | ||
358 | */ | ||
359 | mfc0 v0, CP0_TCCONTEXT | ||
360 | la a2, IPIQ | ||
361 | srl v0, v0, 16 | ||
362 | addu a2, a2, v0 | ||
363 | LONG_L v0, 0(a2) | ||
364 | beqz v0, 0f | ||
365 | /* | ||
366 | * If we have a queue, provoke dispatch within the VPE by setting C_SW1 | ||
367 | */ | ||
368 | mfc0 v0, CP0_CAUSE | ||
369 | ori v0, v0, C_SW1 | ||
370 | mtc0 v0, CP0_CAUSE | ||
371 | 0: | ||
372 | /* | ||
373 | * This test should really never branch but | ||
374 | * let's be prudent here. Having atomized | ||
375 | * the shared register modifications, we can | ||
376 | * now EVPE, and must do so before interrupts | ||
377 | * are potentially re-enabled. | ||
378 | */ | ||
379 | andi a1, a1, MVPCONTROL_EVP | ||
336 | beqz a1, 1f | 380 | beqz a1, 1f |
337 | emt | 381 | evpe |
338 | 1: | 382 | 1: |
383 | /* We know that TCStatua.IXMT should be set from above */ | ||
384 | xori a0, a0, TCSTATUS_IXMT | ||
385 | or a0, a0, v1 | ||
386 | mtc0 a0, CP0_TCSTATUS | ||
387 | _ehb | ||
388 | |||
339 | .set mips0 | 389 | .set mips0 |
340 | #endif /* CONFIG_MIPS_MT_SMTC */ | 390 | #endif /* CONFIG_MIPS_MT_SMTC */ |
341 | LONG_L v1, PT_EPC(sp) | 391 | LONG_L v1, PT_EPC(sp) |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 02ef8835999c..4a6b604ef7e4 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -10,6 +10,18 @@ | |||
10 | 10 | ||
11 | #include <linux/wait.h> | 11 | #include <linux/wait.h> |
12 | 12 | ||
13 | /** | ||
14 | * struct completion - structure used to maintain state for a "completion" | ||
15 | * | ||
16 | * This is the opaque structure used to maintain the state for a "completion". | ||
17 | * Completions currently use a FIFO to queue threads that have to wait for | ||
18 | * the "completion" event. | ||
19 | * | ||
20 | * See also: complete(), wait_for_completion() (and friends _timeout, | ||
21 | * _interruptible, _interruptible_timeout, and _killable), init_completion(), | ||
22 | * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and | ||
23 | * INIT_COMPLETION(). | ||
24 | */ | ||
13 | struct completion { | 25 | struct completion { |
14 | unsigned int done; | 26 | unsigned int done; |
15 | wait_queue_head_t wait; | 27 | wait_queue_head_t wait; |
@@ -21,6 +33,14 @@ struct completion { | |||
21 | #define COMPLETION_INITIALIZER_ONSTACK(work) \ | 33 | #define COMPLETION_INITIALIZER_ONSTACK(work) \ |
22 | ({ init_completion(&work); work; }) | 34 | ({ init_completion(&work); work; }) |
23 | 35 | ||
36 | /** | ||
37 | * DECLARE_COMPLETION: - declare and initialize a completion structure | ||
38 | * @work: identifier for the completion structure | ||
39 | * | ||
40 | * This macro declares and initializes a completion structure. Generally used | ||
41 | * for static declarations. You should use the _ONSTACK variant for automatic | ||
42 | * variables. | ||
43 | */ | ||
24 | #define DECLARE_COMPLETION(work) \ | 44 | #define DECLARE_COMPLETION(work) \ |
25 | struct completion work = COMPLETION_INITIALIZER(work) | 45 | struct completion work = COMPLETION_INITIALIZER(work) |
26 | 46 | ||
@@ -29,6 +49,13 @@ struct completion { | |||
29 | * completions - so we use the _ONSTACK() variant for those that | 49 | * completions - so we use the _ONSTACK() variant for those that |
30 | * are on the kernel stack: | 50 | * are on the kernel stack: |
31 | */ | 51 | */ |
52 | /** | ||
53 | * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure | ||
54 | * @work: identifier for the completion structure | ||
55 | * | ||
56 | * This macro declares and initializes a completion structure on the kernel | ||
57 | * stack. | ||
58 | */ | ||
32 | #ifdef CONFIG_LOCKDEP | 59 | #ifdef CONFIG_LOCKDEP |
33 | # define DECLARE_COMPLETION_ONSTACK(work) \ | 60 | # define DECLARE_COMPLETION_ONSTACK(work) \ |
34 | struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) | 61 | struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) |
@@ -36,6 +63,13 @@ struct completion { | |||
36 | # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) | 63 | # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) |
37 | #endif | 64 | #endif |
38 | 65 | ||
66 | /** | ||
67 | * init_completion: - Initialize a dynamically allocated completion | ||
68 | * @x: completion structure that is to be initialized | ||
69 | * | ||
70 | * This inline function will initialize a dynamically created completion | ||
71 | * structure. | ||
72 | */ | ||
39 | static inline void init_completion(struct completion *x) | 73 | static inline void init_completion(struct completion *x) |
40 | { | 74 | { |
41 | x->done = 0; | 75 | x->done = 0; |
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x); | |||
55 | extern void complete(struct completion *); | 89 | extern void complete(struct completion *); |
56 | extern void complete_all(struct completion *); | 90 | extern void complete_all(struct completion *); |
57 | 91 | ||
92 | /** | ||
93 | * INIT_COMPLETION: - reinitialize a completion structure | ||
94 | * @x: completion structure to be reinitialized | ||
95 | * | ||
96 | * This macro should be used to reinitialize a completion structure so it can | ||
97 | * be reused. This is especially important after complete_all() is used. | ||
98 | */ | ||
58 | #define INIT_COMPLETION(x) ((x).done = 0) | 99 | #define INIT_COMPLETION(x) ((x).done = 0) |
59 | 100 | ||
60 | 101 | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d7faf8808497..c2747ac2ae43 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) | |||
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | int cpu_up(unsigned int cpu); | 71 | int cpu_up(unsigned int cpu); |
72 | void notify_cpu_starting(unsigned int cpu); | ||
72 | extern void cpu_hotplug_init(void); | 73 | extern void cpu_hotplug_init(void); |
73 | extern void cpu_maps_update_begin(void); | 74 | extern void cpu_maps_update_begin(void); |
74 | extern void cpu_maps_update_done(void); | 75 | extern void cpu_maps_update_done(void); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 1524829f73f2..6514db8fd2e4 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -366,7 +366,9 @@ enum { | |||
366 | /* Currently on a filemark */ | 366 | /* Currently on a filemark */ |
367 | IDE_AFLAG_FILEMARK = (1 << 25), | 367 | IDE_AFLAG_FILEMARK = (1 << 25), |
368 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ | 368 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ |
369 | IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) | 369 | IDE_AFLAG_MEDIUM_PRESENT = (1 << 26), |
370 | |||
371 | IDE_AFLAG_NO_AUTOCLOSE = (1 << 27), | ||
370 | }; | 372 | }; |
371 | 373 | ||
372 | struct ide_drive_s { | 374 | struct ide_drive_s { |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index da2698b0fdd1..b86fa2ffca0c 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret) | |||
213 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | 213 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ |
214 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 214 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
215 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, | 215 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, |
216 | * not handling interrupts, soon dead */ | 216 | * not handling interrupts, soon dead. |
217 | * Called on the dying cpu, interrupts | ||
218 | * are already disabled. Must not | ||
219 | * sleep, must not fail */ | ||
217 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug | 220 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug |
218 | * lock is dropped */ | 221 | * lock is dropped */ |
222 | #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. | ||
223 | * Called on the new cpu, just before | ||
224 | * enabling interrupts. Must not sleep, | ||
225 | * must not fail */ | ||
219 | 226 | ||
220 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 227 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend |
221 | * operation in progress | 228 | * operation in progress |
@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret) | |||
229 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) | 236 | #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) |
230 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) | 237 | #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) |
231 | #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) | 238 | #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) |
239 | #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) | ||
232 | 240 | ||
233 | /* Hibernation and suspend events */ | 241 | /* Hibernation and suspend events */ |
234 | #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ | 242 | #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ |
diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 5afc1b23346d..cf793bbbd05e 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h | |||
@@ -104,8 +104,8 @@ struct prop_local_single { | |||
104 | * snapshot of the last seen global state | 104 | * snapshot of the last seen global state |
105 | * and a lock protecting this state | 105 | * and a lock protecting this state |
106 | */ | 106 | */ |
107 | int shift; | ||
108 | unsigned long period; | 107 | unsigned long period; |
108 | int shift; | ||
109 | spinlock_t lock; /* protect the snapshot state */ | 109 | spinlock_t lock; /* protect the snapshot state */ |
110 | }; | 110 | }; |
111 | 111 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d9120c5ad15..d8e699b55858 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -451,8 +451,8 @@ struct signal_struct { | |||
451 | * - everyone except group_exit_task is stopped during signal delivery | 451 | * - everyone except group_exit_task is stopped during signal delivery |
452 | * of fatal signals, group_exit_task processes the signal. | 452 | * of fatal signals, group_exit_task processes the signal. |
453 | */ | 453 | */ |
454 | struct task_struct *group_exit_task; | ||
455 | int notify_count; | 454 | int notify_count; |
455 | struct task_struct *group_exit_task; | ||
456 | 456 | ||
457 | /* thread group stop support, overloads group_exit_code too */ | 457 | /* thread group stop support, overloads group_exit_code too */ |
458 | int group_stop_count; | 458 | int group_stop_count; |
@@ -897,7 +897,7 @@ struct sched_class { | |||
897 | void (*yield_task) (struct rq *rq); | 897 | void (*yield_task) (struct rq *rq); |
898 | int (*select_task_rq)(struct task_struct *p, int sync); | 898 | int (*select_task_rq)(struct task_struct *p, int sync); |
899 | 899 | ||
900 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | 900 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); |
901 | 901 | ||
902 | struct task_struct * (*pick_next_task) (struct rq *rq); | 902 | struct task_struct * (*pick_next_task) (struct rq *rq); |
903 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 903 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
@@ -1010,8 +1010,8 @@ struct sched_entity { | |||
1010 | 1010 | ||
1011 | struct sched_rt_entity { | 1011 | struct sched_rt_entity { |
1012 | struct list_head run_list; | 1012 | struct list_head run_list; |
1013 | unsigned int time_slice; | ||
1014 | unsigned long timeout; | 1013 | unsigned long timeout; |
1014 | unsigned int time_slice; | ||
1015 | int nr_cpus_allowed; | 1015 | int nr_cpus_allowed; |
1016 | 1016 | ||
1017 | struct sched_rt_entity *back; | 1017 | struct sched_rt_entity *back; |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..b106fd8e0d5c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_STACKTRACE_H | 1 | #ifndef __LINUX_STACKTRACE_H |
2 | #define __LINUX_STACKTRACE_H | 2 | #define __LINUX_STACKTRACE_H |
3 | 3 | ||
4 | struct task_struct; | ||
5 | |||
4 | #ifdef CONFIG_STACKTRACE | 6 | #ifdef CONFIG_STACKTRACE |
5 | struct stack_trace { | 7 | struct stack_trace { |
6 | unsigned int nr_entries, max_entries; | 8 | unsigned int nr_entries, max_entries; |
diff --git a/init/main.c b/init/main.c index f6f7042331dc..3820323c4c84 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -708,7 +708,7 @@ int do_one_initcall(initcall_t fn) | |||
708 | int result; | 708 | int result; |
709 | 709 | ||
710 | if (initcall_debug) { | 710 | if (initcall_debug) { |
711 | print_fn_descriptor_symbol("calling %s\n", fn); | 711 | printk("calling %pF\n", fn); |
712 | t0 = ktime_get(); | 712 | t0 = ktime_get(); |
713 | } | 713 | } |
714 | 714 | ||
@@ -718,8 +718,8 @@ int do_one_initcall(initcall_t fn) | |||
718 | t1 = ktime_get(); | 718 | t1 = ktime_get(); |
719 | delta = ktime_sub(t1, t0); | 719 | delta = ktime_sub(t1, t0); |
720 | 720 | ||
721 | print_fn_descriptor_symbol("initcall %s", fn); | 721 | printk("initcall %pF returned %d after %Ld msecs\n", |
722 | printk(" returned %d after %Ld msecs\n", result, | 722 | fn, result, |
723 | (unsigned long long) delta.tv64 >> 20); | 723 | (unsigned long long) delta.tv64 >> 20); |
724 | } | 724 | } |
725 | 725 | ||
@@ -737,8 +737,7 @@ int do_one_initcall(initcall_t fn) | |||
737 | local_irq_enable(); | 737 | local_irq_enable(); |
738 | } | 738 | } |
739 | if (msgbuf[0]) { | 739 | if (msgbuf[0]) { |
740 | print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn); | 740 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
741 | printk(" returned with %s\n", msgbuf); | ||
742 | } | 741 | } |
743 | 742 | ||
744 | return result; | 743 | return result; |
diff --git a/kernel/cpu.c b/kernel/cpu.c index f17e9854c246..86d49045daed 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -199,13 +199,14 @@ static int __ref take_cpu_down(void *_param) | |||
199 | struct take_cpu_down_param *param = _param; | 199 | struct take_cpu_down_param *param = _param; |
200 | int err; | 200 | int err; |
201 | 201 | ||
202 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
203 | param->hcpu); | ||
204 | /* Ensure this CPU doesn't handle any more interrupts. */ | 202 | /* Ensure this CPU doesn't handle any more interrupts. */ |
205 | err = __cpu_disable(); | 203 | err = __cpu_disable(); |
206 | if (err < 0) | 204 | if (err < 0) |
207 | return err; | 205 | return err; |
208 | 206 | ||
207 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
208 | param->hcpu); | ||
209 | |||
209 | /* Force idle task to run as soon as we yield: it should | 210 | /* Force idle task to run as soon as we yield: it should |
210 | immediately notice cpu is offline and die quickly. */ | 211 | immediately notice cpu is offline and die quickly. */ |
211 | sched_idle_next(); | 212 | sched_idle_next(); |
@@ -453,6 +454,25 @@ out: | |||
453 | } | 454 | } |
454 | #endif /* CONFIG_PM_SLEEP_SMP */ | 455 | #endif /* CONFIG_PM_SLEEP_SMP */ |
455 | 456 | ||
457 | /** | ||
458 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | ||
459 | * @cpu: cpu that just started | ||
460 | * | ||
461 | * This function calls the cpu_chain notifiers with CPU_STARTING. | ||
462 | * It must be called by the arch code on the new cpu, before the new cpu | ||
463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | ||
464 | */ | ||
465 | void notify_cpu_starting(unsigned int cpu) | ||
466 | { | ||
467 | unsigned long val = CPU_STARTING; | ||
468 | |||
469 | #ifdef CONFIG_PM_SLEEP_SMP | ||
470 | if (cpu_isset(cpu, frozen_cpus)) | ||
471 | val = CPU_STARTING_FROZEN; | ||
472 | #endif /* CONFIG_PM_SLEEP_SMP */ | ||
473 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | ||
474 | } | ||
475 | |||
456 | #endif /* CONFIG_SMP */ | 476 | #endif /* CONFIG_SMP */ |
457 | 477 | ||
458 | /* | 478 | /* |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 25d955dbb989..e4dcfb2272a4 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -590,6 +590,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
590 | 590 | ||
591 | /* Signal the primary CPU that we are done: */ | 591 | /* Signal the primary CPU that we are done: */ |
592 | atomic_set(&cpu_in_kgdb[cpu], 0); | 592 | atomic_set(&cpu_in_kgdb[cpu], 0); |
593 | touch_softlockup_watchdog(); | ||
593 | clocksource_touch_watchdog(); | 594 | clocksource_touch_watchdog(); |
594 | local_irq_restore(flags); | 595 | local_irq_restore(flags); |
595 | } | 596 | } |
@@ -1432,6 +1433,7 @@ acquirelock: | |||
1432 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1433 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { |
1433 | 1434 | ||
1434 | atomic_set(&kgdb_active, -1); | 1435 | atomic_set(&kgdb_active, -1); |
1436 | touch_softlockup_watchdog(); | ||
1435 | clocksource_touch_watchdog(); | 1437 | clocksource_touch_watchdog(); |
1436 | local_irq_restore(flags); | 1438 | local_irq_restore(flags); |
1437 | 1439 | ||
@@ -1524,6 +1526,7 @@ acquirelock: | |||
1524 | kgdb_restore: | 1526 | kgdb_restore: |
1525 | /* Free kgdb_active */ | 1527 | /* Free kgdb_active */ |
1526 | atomic_set(&kgdb_active, -1); | 1528 | atomic_set(&kgdb_active, -1); |
1529 | touch_softlockup_watchdog(); | ||
1527 | clocksource_touch_watchdog(); | 1530 | clocksource_touch_watchdog(); |
1528 | local_irq_restore(flags); | 1531 | local_irq_restore(flags); |
1529 | 1532 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index ad1962dc0aa2..9715f4ce6cfe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -204,11 +204,16 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | 204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
205 | } | 205 | } |
206 | 206 | ||
207 | static inline int rt_bandwidth_enabled(void) | ||
208 | { | ||
209 | return sysctl_sched_rt_runtime >= 0; | ||
210 | } | ||
211 | |||
207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | 212 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
208 | { | 213 | { |
209 | ktime_t now; | 214 | ktime_t now; |
210 | 215 | ||
211 | if (rt_b->rt_runtime == RUNTIME_INF) | 216 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) |
212 | return; | 217 | return; |
213 | 218 | ||
214 | if (hrtimer_active(&rt_b->rt_period_timer)) | 219 | if (hrtimer_active(&rt_b->rt_period_timer)) |
@@ -298,9 +303,9 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
298 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 303 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
299 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 304 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
300 | #endif /* CONFIG_RT_GROUP_SCHED */ | 305 | #endif /* CONFIG_RT_GROUP_SCHED */ |
301 | #else /* !CONFIG_FAIR_GROUP_SCHED */ | 306 | #else /* !CONFIG_USER_SCHED */ |
302 | #define root_task_group init_task_group | 307 | #define root_task_group init_task_group |
303 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 308 | #endif /* CONFIG_USER_SCHED */ |
304 | 309 | ||
305 | /* task_group_lock serializes add/remove of task groups and also changes to | 310 | /* task_group_lock serializes add/remove of task groups and also changes to |
306 | * a task group's cpu shares. | 311 | * a task group's cpu shares. |
@@ -604,9 +609,9 @@ struct rq { | |||
604 | 609 | ||
605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 610 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
606 | 611 | ||
607 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 612 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync) |
608 | { | 613 | { |
609 | rq->curr->sched_class->check_preempt_curr(rq, p); | 614 | rq->curr->sched_class->check_preempt_curr(rq, p, sync); |
610 | } | 615 | } |
611 | 616 | ||
612 | static inline int cpu_of(struct rq *rq) | 617 | static inline int cpu_of(struct rq *rq) |
@@ -1102,7 +1107,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
1102 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | 1107 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); |
1103 | } | 1108 | } |
1104 | 1109 | ||
1105 | static void init_hrtick(void) | 1110 | static inline void init_hrtick(void) |
1106 | { | 1111 | { |
1107 | } | 1112 | } |
1108 | #endif /* CONFIG_SMP */ | 1113 | #endif /* CONFIG_SMP */ |
@@ -1121,7 +1126,7 @@ static void init_rq_hrtick(struct rq *rq) | |||
1121 | rq->hrtick_timer.function = hrtick; | 1126 | rq->hrtick_timer.function = hrtick; |
1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | 1127 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
1123 | } | 1128 | } |
1124 | #else | 1129 | #else /* CONFIG_SCHED_HRTICK */ |
1125 | static inline void hrtick_clear(struct rq *rq) | 1130 | static inline void hrtick_clear(struct rq *rq) |
1126 | { | 1131 | { |
1127 | } | 1132 | } |
@@ -1133,7 +1138,7 @@ static inline void init_rq_hrtick(struct rq *rq) | |||
1133 | static inline void init_hrtick(void) | 1138 | static inline void init_hrtick(void) |
1134 | { | 1139 | { |
1135 | } | 1140 | } |
1136 | #endif | 1141 | #endif /* CONFIG_SCHED_HRTICK */ |
1137 | 1142 | ||
1138 | /* | 1143 | /* |
1139 | * resched_task - mark a task 'to be rescheduled now'. | 1144 | * resched_task - mark a task 'to be rescheduled now'. |
@@ -1380,38 +1385,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) | |||
1380 | update_load_sub(&rq->load, load); | 1385 | update_load_sub(&rq->load, load); |
1381 | } | 1386 | } |
1382 | 1387 | ||
1383 | #ifdef CONFIG_SMP | 1388 | #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) |
1384 | static unsigned long source_load(int cpu, int type); | 1389 | typedef int (*tg_visitor)(struct task_group *, void *); |
1385 | static unsigned long target_load(int cpu, int type); | ||
1386 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | ||
1387 | |||
1388 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
1389 | { | ||
1390 | struct rq *rq = cpu_rq(cpu); | ||
1391 | |||
1392 | if (rq->nr_running) | ||
1393 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | ||
1394 | |||
1395 | return rq->avg_load_per_task; | ||
1396 | } | ||
1397 | |||
1398 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1399 | |||
1400 | typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); | ||
1401 | 1390 | ||
1402 | /* | 1391 | /* |
1403 | * Iterate the full tree, calling @down when first entering a node and @up when | 1392 | * Iterate the full tree, calling @down when first entering a node and @up when |
1404 | * leaving it for the final time. | 1393 | * leaving it for the final time. |
1405 | */ | 1394 | */ |
1406 | static void | 1395 | static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
1407 | walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) | ||
1408 | { | 1396 | { |
1409 | struct task_group *parent, *child; | 1397 | struct task_group *parent, *child; |
1398 | int ret; | ||
1410 | 1399 | ||
1411 | rcu_read_lock(); | 1400 | rcu_read_lock(); |
1412 | parent = &root_task_group; | 1401 | parent = &root_task_group; |
1413 | down: | 1402 | down: |
1414 | (*down)(parent, cpu, sd); | 1403 | ret = (*down)(parent, data); |
1404 | if (ret) | ||
1405 | goto out_unlock; | ||
1415 | list_for_each_entry_rcu(child, &parent->children, siblings) { | 1406 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
1416 | parent = child; | 1407 | parent = child; |
1417 | goto down; | 1408 | goto down; |
@@ -1419,15 +1410,43 @@ down: | |||
1419 | up: | 1410 | up: |
1420 | continue; | 1411 | continue; |
1421 | } | 1412 | } |
1422 | (*up)(parent, cpu, sd); | 1413 | ret = (*up)(parent, data); |
1414 | if (ret) | ||
1415 | goto out_unlock; | ||
1423 | 1416 | ||
1424 | child = parent; | 1417 | child = parent; |
1425 | parent = parent->parent; | 1418 | parent = parent->parent; |
1426 | if (parent) | 1419 | if (parent) |
1427 | goto up; | 1420 | goto up; |
1421 | out_unlock: | ||
1428 | rcu_read_unlock(); | 1422 | rcu_read_unlock(); |
1423 | |||
1424 | return ret; | ||
1429 | } | 1425 | } |
1430 | 1426 | ||
1427 | static int tg_nop(struct task_group *tg, void *data) | ||
1428 | { | ||
1429 | return 0; | ||
1430 | } | ||
1431 | #endif | ||
1432 | |||
1433 | #ifdef CONFIG_SMP | ||
1434 | static unsigned long source_load(int cpu, int type); | ||
1435 | static unsigned long target_load(int cpu, int type); | ||
1436 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | ||
1437 | |||
1438 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
1439 | { | ||
1440 | struct rq *rq = cpu_rq(cpu); | ||
1441 | |||
1442 | if (rq->nr_running) | ||
1443 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | ||
1444 | |||
1445 | return rq->avg_load_per_task; | ||
1446 | } | ||
1447 | |||
1448 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1449 | |||
1431 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1450 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1432 | 1451 | ||
1433 | /* | 1452 | /* |
@@ -1486,11 +1505,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1486 | * This needs to be done in a bottom-up fashion because the rq weight of a | 1505 | * This needs to be done in a bottom-up fashion because the rq weight of a |
1487 | * parent group depends on the shares of its child groups. | 1506 | * parent group depends on the shares of its child groups. |
1488 | */ | 1507 | */ |
1489 | static void | 1508 | static int tg_shares_up(struct task_group *tg, void *data) |
1490 | tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1491 | { | 1509 | { |
1492 | unsigned long rq_weight = 0; | 1510 | unsigned long rq_weight = 0; |
1493 | unsigned long shares = 0; | 1511 | unsigned long shares = 0; |
1512 | struct sched_domain *sd = data; | ||
1494 | int i; | 1513 | int i; |
1495 | 1514 | ||
1496 | for_each_cpu_mask(i, sd->span) { | 1515 | for_each_cpu_mask(i, sd->span) { |
@@ -1515,6 +1534,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1515 | __update_group_shares_cpu(tg, i, shares, rq_weight); | 1534 | __update_group_shares_cpu(tg, i, shares, rq_weight); |
1516 | spin_unlock_irqrestore(&rq->lock, flags); | 1535 | spin_unlock_irqrestore(&rq->lock, flags); |
1517 | } | 1536 | } |
1537 | |||
1538 | return 0; | ||
1518 | } | 1539 | } |
1519 | 1540 | ||
1520 | /* | 1541 | /* |
@@ -1522,10 +1543,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1522 | * This needs to be done in a top-down fashion because the load of a child | 1543 | * This needs to be done in a top-down fashion because the load of a child |
1523 | * group is a fraction of its parents load. | 1544 | * group is a fraction of its parents load. |
1524 | */ | 1545 | */ |
1525 | static void | 1546 | static int tg_load_down(struct task_group *tg, void *data) |
1526 | tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1527 | { | 1547 | { |
1528 | unsigned long load; | 1548 | unsigned long load; |
1549 | long cpu = (long)data; | ||
1529 | 1550 | ||
1530 | if (!tg->parent) { | 1551 | if (!tg->parent) { |
1531 | load = cpu_rq(cpu)->load.weight; | 1552 | load = cpu_rq(cpu)->load.weight; |
@@ -1536,11 +1557,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1536 | } | 1557 | } |
1537 | 1558 | ||
1538 | tg->cfs_rq[cpu]->h_load = load; | 1559 | tg->cfs_rq[cpu]->h_load = load; |
1539 | } | ||
1540 | 1560 | ||
1541 | static void | 1561 | return 0; |
1542 | tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1543 | { | ||
1544 | } | 1562 | } |
1545 | 1563 | ||
1546 | static void update_shares(struct sched_domain *sd) | 1564 | static void update_shares(struct sched_domain *sd) |
@@ -1550,7 +1568,7 @@ static void update_shares(struct sched_domain *sd) | |||
1550 | 1568 | ||
1551 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { | 1569 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { |
1552 | sd->last_update = now; | 1570 | sd->last_update = now; |
1553 | walk_tg_tree(tg_nop, tg_shares_up, 0, sd); | 1571 | walk_tg_tree(tg_nop, tg_shares_up, sd); |
1554 | } | 1572 | } |
1555 | } | 1573 | } |
1556 | 1574 | ||
@@ -1561,9 +1579,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1561 | spin_lock(&rq->lock); | 1579 | spin_lock(&rq->lock); |
1562 | } | 1580 | } |
1563 | 1581 | ||
1564 | static void update_h_load(int cpu) | 1582 | static void update_h_load(long cpu) |
1565 | { | 1583 | { |
1566 | walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); | 1584 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
1567 | } | 1585 | } |
1568 | 1586 | ||
1569 | #else | 1587 | #else |
@@ -1921,11 +1939,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
1921 | running = task_running(rq, p); | 1939 | running = task_running(rq, p); |
1922 | on_rq = p->se.on_rq; | 1940 | on_rq = p->se.on_rq; |
1923 | ncsw = 0; | 1941 | ncsw = 0; |
1924 | if (!match_state || p->state == match_state) { | 1942 | if (!match_state || p->state == match_state) |
1925 | ncsw = p->nivcsw + p->nvcsw; | 1943 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
1926 | if (unlikely(!ncsw)) | ||
1927 | ncsw = 1; | ||
1928 | } | ||
1929 | task_rq_unlock(rq, &flags); | 1944 | task_rq_unlock(rq, &flags); |
1930 | 1945 | ||
1931 | /* | 1946 | /* |
@@ -2285,7 +2300,7 @@ out_running: | |||
2285 | trace_mark(kernel_sched_wakeup, | 2300 | trace_mark(kernel_sched_wakeup, |
2286 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 2301 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
2287 | p->pid, p->state, rq, p, rq->curr); | 2302 | p->pid, p->state, rq, p, rq->curr); |
2288 | check_preempt_curr(rq, p); | 2303 | check_preempt_curr(rq, p, sync); |
2289 | 2304 | ||
2290 | p->state = TASK_RUNNING; | 2305 | p->state = TASK_RUNNING; |
2291 | #ifdef CONFIG_SMP | 2306 | #ifdef CONFIG_SMP |
@@ -2420,7 +2435,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2420 | trace_mark(kernel_sched_wakeup_new, | 2435 | trace_mark(kernel_sched_wakeup_new, |
2421 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 2436 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
2422 | p->pid, p->state, rq, p, rq->curr); | 2437 | p->pid, p->state, rq, p, rq->curr); |
2423 | check_preempt_curr(rq, p); | 2438 | check_preempt_curr(rq, p, 0); |
2424 | #ifdef CONFIG_SMP | 2439 | #ifdef CONFIG_SMP |
2425 | if (p->sched_class->task_wake_up) | 2440 | if (p->sched_class->task_wake_up) |
2426 | p->sched_class->task_wake_up(rq, p); | 2441 | p->sched_class->task_wake_up(rq, p); |
@@ -2880,7 +2895,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
2880 | * Note that idle threads have a prio of MAX_PRIO, for this test | 2895 | * Note that idle threads have a prio of MAX_PRIO, for this test |
2881 | * to be always true for them. | 2896 | * to be always true for them. |
2882 | */ | 2897 | */ |
2883 | check_preempt_curr(this_rq, p); | 2898 | check_preempt_curr(this_rq, p, 0); |
2884 | } | 2899 | } |
2885 | 2900 | ||
2886 | /* | 2901 | /* |
@@ -4627,6 +4642,15 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
4627 | } | 4642 | } |
4628 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 4643 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
4629 | 4644 | ||
4645 | /** | ||
4646 | * complete: - signals a single thread waiting on this completion | ||
4647 | * @x: holds the state of this particular completion | ||
4648 | * | ||
4649 | * This will wake up a single thread waiting on this completion. Threads will be | ||
4650 | * awakened in the same order in which they were queued. | ||
4651 | * | ||
4652 | * See also complete_all(), wait_for_completion() and related routines. | ||
4653 | */ | ||
4630 | void complete(struct completion *x) | 4654 | void complete(struct completion *x) |
4631 | { | 4655 | { |
4632 | unsigned long flags; | 4656 | unsigned long flags; |
@@ -4638,6 +4662,12 @@ void complete(struct completion *x) | |||
4638 | } | 4662 | } |
4639 | EXPORT_SYMBOL(complete); | 4663 | EXPORT_SYMBOL(complete); |
4640 | 4664 | ||
4665 | /** | ||
4666 | * complete_all: - signals all threads waiting on this completion | ||
4667 | * @x: holds the state of this particular completion | ||
4668 | * | ||
4669 | * This will wake up all threads waiting on this particular completion event. | ||
4670 | */ | ||
4641 | void complete_all(struct completion *x) | 4671 | void complete_all(struct completion *x) |
4642 | { | 4672 | { |
4643 | unsigned long flags; | 4673 | unsigned long flags; |
@@ -4658,10 +4688,7 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
4658 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 4688 | wait.flags |= WQ_FLAG_EXCLUSIVE; |
4659 | __add_wait_queue_tail(&x->wait, &wait); | 4689 | __add_wait_queue_tail(&x->wait, &wait); |
4660 | do { | 4690 | do { |
4661 | if ((state == TASK_INTERRUPTIBLE && | 4691 | if (signal_pending_state(state, current)) { |
4662 | signal_pending(current)) || | ||
4663 | (state == TASK_KILLABLE && | ||
4664 | fatal_signal_pending(current))) { | ||
4665 | timeout = -ERESTARTSYS; | 4692 | timeout = -ERESTARTSYS; |
4666 | break; | 4693 | break; |
4667 | } | 4694 | } |
@@ -4689,12 +4716,31 @@ wait_for_common(struct completion *x, long timeout, int state) | |||
4689 | return timeout; | 4716 | return timeout; |
4690 | } | 4717 | } |
4691 | 4718 | ||
4719 | /** | ||
4720 | * wait_for_completion: - waits for completion of a task | ||
4721 | * @x: holds the state of this particular completion | ||
4722 | * | ||
4723 | * This waits to be signaled for completion of a specific task. It is NOT | ||
4724 | * interruptible and there is no timeout. | ||
4725 | * | ||
4726 | * See also similar routines (i.e. wait_for_completion_timeout()) with timeout | ||
4727 | * and interrupt capability. Also see complete(). | ||
4728 | */ | ||
4692 | void __sched wait_for_completion(struct completion *x) | 4729 | void __sched wait_for_completion(struct completion *x) |
4693 | { | 4730 | { |
4694 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | 4731 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
4695 | } | 4732 | } |
4696 | EXPORT_SYMBOL(wait_for_completion); | 4733 | EXPORT_SYMBOL(wait_for_completion); |
4697 | 4734 | ||
4735 | /** | ||
4736 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) | ||
4737 | * @x: holds the state of this particular completion | ||
4738 | * @timeout: timeout value in jiffies | ||
4739 | * | ||
4740 | * This waits for either a completion of a specific task to be signaled or for a | ||
4741 | * specified timeout to expire. The timeout is in jiffies. It is not | ||
4742 | * interruptible. | ||
4743 | */ | ||
4698 | unsigned long __sched | 4744 | unsigned long __sched |
4699 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 4745 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
4700 | { | 4746 | { |
@@ -4702,6 +4748,13 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout) | |||
4702 | } | 4748 | } |
4703 | EXPORT_SYMBOL(wait_for_completion_timeout); | 4749 | EXPORT_SYMBOL(wait_for_completion_timeout); |
4704 | 4750 | ||
4751 | /** | ||
4752 | * wait_for_completion_interruptible: - waits for completion of a task (w/intr) | ||
4753 | * @x: holds the state of this particular completion | ||
4754 | * | ||
4755 | * This waits for completion of a specific task to be signaled. It is | ||
4756 | * interruptible. | ||
4757 | */ | ||
4705 | int __sched wait_for_completion_interruptible(struct completion *x) | 4758 | int __sched wait_for_completion_interruptible(struct completion *x) |
4706 | { | 4759 | { |
4707 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); | 4760 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
@@ -4711,6 +4764,14 @@ int __sched wait_for_completion_interruptible(struct completion *x) | |||
4711 | } | 4764 | } |
4712 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 4765 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
4713 | 4766 | ||
4767 | /** | ||
4768 | * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) | ||
4769 | * @x: holds the state of this particular completion | ||
4770 | * @timeout: timeout value in jiffies | ||
4771 | * | ||
4772 | * This waits for either a completion of a specific task to be signaled or for a | ||
4773 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | ||
4774 | */ | ||
4714 | unsigned long __sched | 4775 | unsigned long __sched |
4715 | wait_for_completion_interruptible_timeout(struct completion *x, | 4776 | wait_for_completion_interruptible_timeout(struct completion *x, |
4716 | unsigned long timeout) | 4777 | unsigned long timeout) |
@@ -4719,6 +4780,13 @@ wait_for_completion_interruptible_timeout(struct completion *x, | |||
4719 | } | 4780 | } |
4720 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | 4781 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
4721 | 4782 | ||
4783 | /** | ||
4784 | * wait_for_completion_killable: - waits for completion of a task (killable) | ||
4785 | * @x: holds the state of this particular completion | ||
4786 | * | ||
4787 | * This waits to be signaled for completion of a specific task. It can be | ||
4788 | * interrupted by a kill signal. | ||
4789 | */ | ||
4722 | int __sched wait_for_completion_killable(struct completion *x) | 4790 | int __sched wait_for_completion_killable(struct completion *x) |
4723 | { | 4791 | { |
4724 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); | 4792 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); |
@@ -5121,7 +5189,8 @@ recheck: | |||
5121 | * Do not allow realtime tasks into groups that have no runtime | 5189 | * Do not allow realtime tasks into groups that have no runtime |
5122 | * assigned. | 5190 | * assigned. |
5123 | */ | 5191 | */ |
5124 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5192 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
5193 | task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
5125 | return -EPERM; | 5194 | return -EPERM; |
5126 | #endif | 5195 | #endif |
5127 | 5196 | ||
@@ -5957,7 +6026,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5957 | set_task_cpu(p, dest_cpu); | 6026 | set_task_cpu(p, dest_cpu); |
5958 | if (on_rq) { | 6027 | if (on_rq) { |
5959 | activate_task(rq_dest, p, 0); | 6028 | activate_task(rq_dest, p, 0); |
5960 | check_preempt_curr(rq_dest, p); | 6029 | check_preempt_curr(rq_dest, p, 0); |
5961 | } | 6030 | } |
5962 | done: | 6031 | done: |
5963 | ret = 1; | 6032 | ret = 1; |
@@ -8242,20 +8311,25 @@ void __might_sleep(char *file, int line) | |||
8242 | #ifdef in_atomic | 8311 | #ifdef in_atomic |
8243 | static unsigned long prev_jiffy; /* ratelimiting */ | 8312 | static unsigned long prev_jiffy; /* ratelimiting */ |
8244 | 8313 | ||
8245 | if ((in_atomic() || irqs_disabled()) && | 8314 | if ((!in_atomic() && !irqs_disabled()) || |
8246 | system_state == SYSTEM_RUNNING && !oops_in_progress) { | 8315 | system_state != SYSTEM_RUNNING || oops_in_progress) |
8247 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | 8316 | return; |
8248 | return; | 8317 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
8249 | prev_jiffy = jiffies; | 8318 | return; |
8250 | printk(KERN_ERR "BUG: sleeping function called from invalid" | 8319 | prev_jiffy = jiffies; |
8251 | " context at %s:%d\n", file, line); | 8320 | |
8252 | printk("in_atomic():%d, irqs_disabled():%d\n", | 8321 | printk(KERN_ERR |
8253 | in_atomic(), irqs_disabled()); | 8322 | "BUG: sleeping function called from invalid context at %s:%d\n", |
8254 | debug_show_held_locks(current); | 8323 | file, line); |
8255 | if (irqs_disabled()) | 8324 | printk(KERN_ERR |
8256 | print_irqtrace_events(current); | 8325 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
8257 | dump_stack(); | 8326 | in_atomic(), irqs_disabled(), |
8258 | } | 8327 | current->pid, current->comm); |
8328 | |||
8329 | debug_show_held_locks(current); | ||
8330 | if (irqs_disabled()) | ||
8331 | print_irqtrace_events(current); | ||
8332 | dump_stack(); | ||
8259 | #endif | 8333 | #endif |
8260 | } | 8334 | } |
8261 | EXPORT_SYMBOL(__might_sleep); | 8335 | EXPORT_SYMBOL(__might_sleep); |
@@ -8753,73 +8827,95 @@ static DEFINE_MUTEX(rt_constraints_mutex); | |||
8753 | static unsigned long to_ratio(u64 period, u64 runtime) | 8827 | static unsigned long to_ratio(u64 period, u64 runtime) |
8754 | { | 8828 | { |
8755 | if (runtime == RUNTIME_INF) | 8829 | if (runtime == RUNTIME_INF) |
8756 | return 1ULL << 16; | 8830 | return 1ULL << 20; |
8757 | 8831 | ||
8758 | return div64_u64(runtime << 16, period); | 8832 | return div64_u64(runtime << 20, period); |
8759 | } | 8833 | } |
8760 | 8834 | ||
8761 | #ifdef CONFIG_CGROUP_SCHED | 8835 | /* Must be called with tasklist_lock held */ |
8762 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8836 | static inline int tg_has_rt_tasks(struct task_group *tg) |
8763 | { | 8837 | { |
8764 | struct task_group *tgi, *parent = tg->parent; | 8838 | struct task_struct *g, *p; |
8765 | unsigned long total = 0; | ||
8766 | 8839 | ||
8767 | if (!parent) { | 8840 | do_each_thread(g, p) { |
8768 | if (global_rt_period() < period) | 8841 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) |
8769 | return 0; | 8842 | return 1; |
8843 | } while_each_thread(g, p); | ||
8770 | 8844 | ||
8771 | return to_ratio(period, runtime) < | 8845 | return 0; |
8772 | to_ratio(global_rt_period(), global_rt_runtime()); | 8846 | } |
8773 | } | ||
8774 | 8847 | ||
8775 | if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) | 8848 | struct rt_schedulable_data { |
8776 | return 0; | 8849 | struct task_group *tg; |
8850 | u64 rt_period; | ||
8851 | u64 rt_runtime; | ||
8852 | }; | ||
8777 | 8853 | ||
8778 | rcu_read_lock(); | 8854 | static int tg_schedulable(struct task_group *tg, void *data) |
8779 | list_for_each_entry_rcu(tgi, &parent->children, siblings) { | 8855 | { |
8780 | if (tgi == tg) | 8856 | struct rt_schedulable_data *d = data; |
8781 | continue; | 8857 | struct task_group *child; |
8858 | unsigned long total, sum = 0; | ||
8859 | u64 period, runtime; | ||
8782 | 8860 | ||
8783 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), | 8861 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
8784 | tgi->rt_bandwidth.rt_runtime); | 8862 | runtime = tg->rt_bandwidth.rt_runtime; |
8863 | |||
8864 | if (tg == d->tg) { | ||
8865 | period = d->rt_period; | ||
8866 | runtime = d->rt_runtime; | ||
8785 | } | 8867 | } |
8786 | rcu_read_unlock(); | ||
8787 | 8868 | ||
8788 | return total + to_ratio(period, runtime) <= | 8869 | /* |
8789 | to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), | 8870 | * Cannot have more runtime than the period. |
8790 | parent->rt_bandwidth.rt_runtime); | 8871 | */ |
8791 | } | 8872 | if (runtime > period && runtime != RUNTIME_INF) |
8792 | #elif defined CONFIG_USER_SCHED | 8873 | return -EINVAL; |
8793 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | ||
8794 | { | ||
8795 | struct task_group *tgi; | ||
8796 | unsigned long total = 0; | ||
8797 | unsigned long global_ratio = | ||
8798 | to_ratio(global_rt_period(), global_rt_runtime()); | ||
8799 | 8874 | ||
8800 | rcu_read_lock(); | 8875 | /* |
8801 | list_for_each_entry_rcu(tgi, &task_groups, list) { | 8876 | * Ensure we don't starve existing RT tasks. |
8802 | if (tgi == tg) | 8877 | */ |
8803 | continue; | 8878 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
8879 | return -EBUSY; | ||
8880 | |||
8881 | total = to_ratio(period, runtime); | ||
8882 | |||
8883 | /* | ||
8884 | * Nobody can have more than the global setting allows. | ||
8885 | */ | ||
8886 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) | ||
8887 | return -EINVAL; | ||
8888 | |||
8889 | /* | ||
8890 | * The sum of our children's runtime should not exceed our own. | ||
8891 | */ | ||
8892 | list_for_each_entry_rcu(child, &tg->children, siblings) { | ||
8893 | period = ktime_to_ns(child->rt_bandwidth.rt_period); | ||
8894 | runtime = child->rt_bandwidth.rt_runtime; | ||
8895 | |||
8896 | if (child == d->tg) { | ||
8897 | period = d->rt_period; | ||
8898 | runtime = d->rt_runtime; | ||
8899 | } | ||
8804 | 8900 | ||
8805 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), | 8901 | sum += to_ratio(period, runtime); |
8806 | tgi->rt_bandwidth.rt_runtime); | ||
8807 | } | 8902 | } |
8808 | rcu_read_unlock(); | ||
8809 | 8903 | ||
8810 | return total + to_ratio(period, runtime) < global_ratio; | 8904 | if (sum > total) |
8905 | return -EINVAL; | ||
8906 | |||
8907 | return 0; | ||
8811 | } | 8908 | } |
8812 | #endif | ||
8813 | 8909 | ||
8814 | /* Must be called with tasklist_lock held */ | 8910 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
8815 | static inline int tg_has_rt_tasks(struct task_group *tg) | ||
8816 | { | 8911 | { |
8817 | struct task_struct *g, *p; | 8912 | struct rt_schedulable_data data = { |
8818 | do_each_thread(g, p) { | 8913 | .tg = tg, |
8819 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) | 8914 | .rt_period = period, |
8820 | return 1; | 8915 | .rt_runtime = runtime, |
8821 | } while_each_thread(g, p); | 8916 | }; |
8822 | return 0; | 8917 | |
8918 | return walk_tg_tree(tg_schedulable, tg_nop, &data); | ||
8823 | } | 8919 | } |
8824 | 8920 | ||
8825 | static int tg_set_bandwidth(struct task_group *tg, | 8921 | static int tg_set_bandwidth(struct task_group *tg, |
@@ -8829,14 +8925,9 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
8829 | 8925 | ||
8830 | mutex_lock(&rt_constraints_mutex); | 8926 | mutex_lock(&rt_constraints_mutex); |
8831 | read_lock(&tasklist_lock); | 8927 | read_lock(&tasklist_lock); |
8832 | if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { | 8928 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
8833 | err = -EBUSY; | 8929 | if (err) |
8834 | goto unlock; | 8930 | goto unlock; |
8835 | } | ||
8836 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { | ||
8837 | err = -EINVAL; | ||
8838 | goto unlock; | ||
8839 | } | ||
8840 | 8931 | ||
8841 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 8932 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
8842 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 8933 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
@@ -8905,19 +8996,25 @@ long sched_group_rt_period(struct task_group *tg) | |||
8905 | 8996 | ||
8906 | static int sched_rt_global_constraints(void) | 8997 | static int sched_rt_global_constraints(void) |
8907 | { | 8998 | { |
8908 | struct task_group *tg = &root_task_group; | 8999 | u64 runtime, period; |
8909 | u64 rt_runtime, rt_period; | ||
8910 | int ret = 0; | 9000 | int ret = 0; |
8911 | 9001 | ||
8912 | if (sysctl_sched_rt_period <= 0) | 9002 | if (sysctl_sched_rt_period <= 0) |
8913 | return -EINVAL; | 9003 | return -EINVAL; |
8914 | 9004 | ||
8915 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | 9005 | runtime = global_rt_runtime(); |
8916 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 9006 | period = global_rt_period(); |
9007 | |||
9008 | /* | ||
9009 | * Sanity check on the sysctl variables. | ||
9010 | */ | ||
9011 | if (runtime > period && runtime != RUNTIME_INF) | ||
9012 | return -EINVAL; | ||
8917 | 9013 | ||
8918 | mutex_lock(&rt_constraints_mutex); | 9014 | mutex_lock(&rt_constraints_mutex); |
8919 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) | 9015 | read_lock(&tasklist_lock); |
8920 | ret = -EINVAL; | 9016 | ret = __rt_schedulable(NULL, 0, 0); |
9017 | read_unlock(&tasklist_lock); | ||
8921 | mutex_unlock(&rt_constraints_mutex); | 9018 | mutex_unlock(&rt_constraints_mutex); |
8922 | 9019 | ||
8923 | return ret; | 9020 | return ret; |
@@ -8991,7 +9088,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
8991 | 9088 | ||
8992 | if (!cgrp->parent) { | 9089 | if (!cgrp->parent) { |
8993 | /* This is early initialization for the top cgroup */ | 9090 | /* This is early initialization for the top cgroup */ |
8994 | init_task_group.css.cgroup = cgrp; | ||
8995 | return &init_task_group.css; | 9091 | return &init_task_group.css; |
8996 | } | 9092 | } |
8997 | 9093 | ||
@@ -9000,9 +9096,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9000 | if (IS_ERR(tg)) | 9096 | if (IS_ERR(tg)) |
9001 | return ERR_PTR(-ENOMEM); | 9097 | return ERR_PTR(-ENOMEM); |
9002 | 9098 | ||
9003 | /* Bind the cgroup to task_group object we just created */ | ||
9004 | tg->css.cgroup = cgrp; | ||
9005 | |||
9006 | return &tg->css; | 9099 | return &tg->css; |
9007 | } | 9100 | } |
9008 | 9101 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fb8994c6d4bb..fcbe850a5a90 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -409,64 +409,6 @@ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in | ||
413 | * that it favours >=0 over <0. | ||
414 | * | ||
415 | * -20 | | ||
416 | * | | ||
417 | * 0 --------+------- | ||
418 | * .' | ||
419 | * 19 .' | ||
420 | * | ||
421 | */ | ||
422 | static unsigned long | ||
423 | calc_delta_asym(unsigned long delta, struct sched_entity *se) | ||
424 | { | ||
425 | struct load_weight lw = { | ||
426 | .weight = NICE_0_LOAD, | ||
427 | .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT) | ||
428 | }; | ||
429 | |||
430 | for_each_sched_entity(se) { | ||
431 | struct load_weight *se_lw = &se->load; | ||
432 | unsigned long rw = cfs_rq_of(se)->load.weight; | ||
433 | |||
434 | #ifdef CONFIG_FAIR_SCHED_GROUP | ||
435 | struct cfs_rq *cfs_rq = se->my_q; | ||
436 | struct task_group *tg = NULL | ||
437 | |||
438 | if (cfs_rq) | ||
439 | tg = cfs_rq->tg; | ||
440 | |||
441 | if (tg && tg->shares < NICE_0_LOAD) { | ||
442 | /* | ||
443 | * scale shares to what it would have been had | ||
444 | * tg->weight been NICE_0_LOAD: | ||
445 | * | ||
446 | * weight = 1024 * shares / tg->weight | ||
447 | */ | ||
448 | lw.weight *= se->load.weight; | ||
449 | lw.weight /= tg->shares; | ||
450 | |||
451 | lw.inv_weight = 0; | ||
452 | |||
453 | se_lw = &lw; | ||
454 | rw += lw.weight - se->load.weight; | ||
455 | } else | ||
456 | #endif | ||
457 | |||
458 | if (se->load.weight < NICE_0_LOAD) { | ||
459 | se_lw = &lw; | ||
460 | rw += NICE_0_LOAD - se->load.weight; | ||
461 | } | ||
462 | |||
463 | delta = calc_delta_mine(delta, rw, se_lw); | ||
464 | } | ||
465 | |||
466 | return delta; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Update the current task's runtime statistics. Skip current tasks that | 412 | * Update the current task's runtime statistics. Skip current tasks that |
471 | * are not in our scheduling class. | 413 | * are not in our scheduling class. |
472 | */ | 414 | */ |
@@ -586,11 +528,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
586 | update_load_add(&cfs_rq->load, se->load.weight); | 528 | update_load_add(&cfs_rq->load, se->load.weight); |
587 | if (!parent_entity(se)) | 529 | if (!parent_entity(se)) |
588 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 530 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); |
589 | if (entity_is_task(se)) | 531 | if (entity_is_task(se)) { |
590 | add_cfs_task_weight(cfs_rq, se->load.weight); | 532 | add_cfs_task_weight(cfs_rq, se->load.weight); |
533 | list_add(&se->group_node, &cfs_rq->tasks); | ||
534 | } | ||
591 | cfs_rq->nr_running++; | 535 | cfs_rq->nr_running++; |
592 | se->on_rq = 1; | 536 | se->on_rq = 1; |
593 | list_add(&se->group_node, &cfs_rq->tasks); | ||
594 | } | 537 | } |
595 | 538 | ||
596 | static void | 539 | static void |
@@ -599,11 +542,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
599 | update_load_sub(&cfs_rq->load, se->load.weight); | 542 | update_load_sub(&cfs_rq->load, se->load.weight); |
600 | if (!parent_entity(se)) | 543 | if (!parent_entity(se)) |
601 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 544 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); |
602 | if (entity_is_task(se)) | 545 | if (entity_is_task(se)) { |
603 | add_cfs_task_weight(cfs_rq, -se->load.weight); | 546 | add_cfs_task_weight(cfs_rq, -se->load.weight); |
547 | list_del_init(&se->group_node); | ||
548 | } | ||
604 | cfs_rq->nr_running--; | 549 | cfs_rq->nr_running--; |
605 | se->on_rq = 0; | 550 | se->on_rq = 0; |
606 | list_del_init(&se->group_node); | ||
607 | } | 551 | } |
608 | 552 | ||
609 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 553 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
@@ -1085,7 +1029,6 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1085 | long wl, long wg) | 1029 | long wl, long wg) |
1086 | { | 1030 | { |
1087 | struct sched_entity *se = tg->se[cpu]; | 1031 | struct sched_entity *se = tg->se[cpu]; |
1088 | long more_w; | ||
1089 | 1032 | ||
1090 | if (!tg->parent) | 1033 | if (!tg->parent) |
1091 | return wl; | 1034 | return wl; |
@@ -1097,18 +1040,17 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1097 | if (!wl && sched_feat(ASYM_EFF_LOAD)) | 1040 | if (!wl && sched_feat(ASYM_EFF_LOAD)) |
1098 | return wl; | 1041 | return wl; |
1099 | 1042 | ||
1100 | /* | ||
1101 | * Instead of using this increment, also add the difference | ||
1102 | * between when the shares were last updated and now. | ||
1103 | */ | ||
1104 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1105 | wl += more_w; | ||
1106 | wg += more_w; | ||
1107 | |||
1108 | for_each_sched_entity(se) { | 1043 | for_each_sched_entity(se) { |
1109 | #define D(n) (likely(n) ? (n) : 1) | ||
1110 | |||
1111 | long S, rw, s, a, b; | 1044 | long S, rw, s, a, b; |
1045 | long more_w; | ||
1046 | |||
1047 | /* | ||
1048 | * Instead of using this increment, also add the difference | ||
1049 | * between when the shares were last updated and now. | ||
1050 | */ | ||
1051 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1052 | wl += more_w; | ||
1053 | wg += more_w; | ||
1112 | 1054 | ||
1113 | S = se->my_q->tg->shares; | 1055 | S = se->my_q->tg->shares; |
1114 | s = se->my_q->shares; | 1056 | s = se->my_q->shares; |
@@ -1117,7 +1059,11 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1117 | a = S*(rw + wl); | 1059 | a = S*(rw + wl); |
1118 | b = S*rw + s*wg; | 1060 | b = S*rw + s*wg; |
1119 | 1061 | ||
1120 | wl = s*(a-b)/D(b); | 1062 | wl = s*(a-b); |
1063 | |||
1064 | if (likely(b)) | ||
1065 | wl /= b; | ||
1066 | |||
1121 | /* | 1067 | /* |
1122 | * Assume the group is already running and will | 1068 | * Assume the group is already running and will |
1123 | * thus already be accounted for in the weight. | 1069 | * thus already be accounted for in the weight. |
@@ -1126,7 +1072,6 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1126 | * alter the group weight. | 1072 | * alter the group weight. |
1127 | */ | 1073 | */ |
1128 | wg = 0; | 1074 | wg = 0; |
1129 | #undef D | ||
1130 | } | 1075 | } |
1131 | 1076 | ||
1132 | return wl; | 1077 | return wl; |
@@ -1143,7 +1088,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1143 | #endif | 1088 | #endif |
1144 | 1089 | ||
1145 | static int | 1090 | static int |
1146 | wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | 1091 | wake_affine(struct sched_domain *this_sd, struct rq *this_rq, |
1147 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, | 1092 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, |
1148 | int idx, unsigned long load, unsigned long this_load, | 1093 | int idx, unsigned long load, unsigned long this_load, |
1149 | unsigned int imbalance) | 1094 | unsigned int imbalance) |
@@ -1191,8 +1136,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1191 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 1136 | schedstat_inc(p, se.nr_wakeups_affine_attempts); |
1192 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 1137 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
1193 | 1138 | ||
1194 | if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || | 1139 | if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= |
1195 | balanced) { | 1140 | tl_per_task)) { |
1196 | /* | 1141 | /* |
1197 | * This domain has SD_WAKE_AFFINE and | 1142 | * This domain has SD_WAKE_AFFINE and |
1198 | * p is cache cold in this domain, and | 1143 | * p is cache cold in this domain, and |
@@ -1211,16 +1156,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1211 | struct sched_domain *sd, *this_sd = NULL; | 1156 | struct sched_domain *sd, *this_sd = NULL; |
1212 | int prev_cpu, this_cpu, new_cpu; | 1157 | int prev_cpu, this_cpu, new_cpu; |
1213 | unsigned long load, this_load; | 1158 | unsigned long load, this_load; |
1214 | struct rq *rq, *this_rq; | 1159 | struct rq *this_rq; |
1215 | unsigned int imbalance; | 1160 | unsigned int imbalance; |
1216 | int idx; | 1161 | int idx; |
1217 | 1162 | ||
1218 | prev_cpu = task_cpu(p); | 1163 | prev_cpu = task_cpu(p); |
1219 | rq = task_rq(p); | ||
1220 | this_cpu = smp_processor_id(); | 1164 | this_cpu = smp_processor_id(); |
1221 | this_rq = cpu_rq(this_cpu); | 1165 | this_rq = cpu_rq(this_cpu); |
1222 | new_cpu = prev_cpu; | 1166 | new_cpu = prev_cpu; |
1223 | 1167 | ||
1168 | if (prev_cpu == this_cpu) | ||
1169 | goto out; | ||
1224 | /* | 1170 | /* |
1225 | * 'this_sd' is the first domain that both | 1171 | * 'this_sd' is the first domain that both |
1226 | * this_cpu and prev_cpu are present in: | 1172 | * this_cpu and prev_cpu are present in: |
@@ -1248,13 +1194,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1248 | load = source_load(prev_cpu, idx); | 1194 | load = source_load(prev_cpu, idx); |
1249 | this_load = target_load(this_cpu, idx); | 1195 | this_load = target_load(this_cpu, idx); |
1250 | 1196 | ||
1251 | if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, | 1197 | if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, |
1252 | load, this_load, imbalance)) | 1198 | load, this_load, imbalance)) |
1253 | return this_cpu; | 1199 | return this_cpu; |
1254 | 1200 | ||
1255 | if (prev_cpu == this_cpu) | ||
1256 | goto out; | ||
1257 | |||
1258 | /* | 1201 | /* |
1259 | * Start passive balancing when half the imbalance_pct | 1202 | * Start passive balancing when half the imbalance_pct |
1260 | * limit is reached. | 1203 | * limit is reached. |
@@ -1281,62 +1224,20 @@ static unsigned long wakeup_gran(struct sched_entity *se) | |||
1281 | * + nice tasks. | 1224 | * + nice tasks. |
1282 | */ | 1225 | */ |
1283 | if (sched_feat(ASYM_GRAN)) | 1226 | if (sched_feat(ASYM_GRAN)) |
1284 | gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | 1227 | gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); |
1285 | else | ||
1286 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | ||
1287 | 1228 | ||
1288 | return gran; | 1229 | return gran; |
1289 | } | 1230 | } |
1290 | 1231 | ||
1291 | /* | 1232 | /* |
1292 | * Should 'se' preempt 'curr'. | ||
1293 | * | ||
1294 | * |s1 | ||
1295 | * |s2 | ||
1296 | * |s3 | ||
1297 | * g | ||
1298 | * |<--->|c | ||
1299 | * | ||
1300 | * w(c, s1) = -1 | ||
1301 | * w(c, s2) = 0 | ||
1302 | * w(c, s3) = 1 | ||
1303 | * | ||
1304 | */ | ||
1305 | static int | ||
1306 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
1307 | { | ||
1308 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
1309 | |||
1310 | if (vdiff < 0) | ||
1311 | return -1; | ||
1312 | |||
1313 | gran = wakeup_gran(curr); | ||
1314 | if (vdiff > gran) | ||
1315 | return 1; | ||
1316 | |||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | /* return depth at which a sched entity is present in the hierarchy */ | ||
1321 | static inline int depth_se(struct sched_entity *se) | ||
1322 | { | ||
1323 | int depth = 0; | ||
1324 | |||
1325 | for_each_sched_entity(se) | ||
1326 | depth++; | ||
1327 | |||
1328 | return depth; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * Preempt the current task with a newly woken task if needed: | 1233 | * Preempt the current task with a newly woken task if needed: |
1333 | */ | 1234 | */ |
1334 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | 1235 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1335 | { | 1236 | { |
1336 | struct task_struct *curr = rq->curr; | 1237 | struct task_struct *curr = rq->curr; |
1337 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1238 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1338 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1239 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1339 | int se_depth, pse_depth; | 1240 | s64 delta_exec; |
1340 | 1241 | ||
1341 | if (unlikely(rt_prio(p->prio))) { | 1242 | if (unlikely(rt_prio(p->prio))) { |
1342 | update_rq_clock(rq); | 1243 | update_rq_clock(rq); |
@@ -1351,6 +1252,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1351 | cfs_rq_of(pse)->next = pse; | 1252 | cfs_rq_of(pse)->next = pse; |
1352 | 1253 | ||
1353 | /* | 1254 | /* |
1255 | * We can come here with TIF_NEED_RESCHED already set from new task | ||
1256 | * wake up path. | ||
1257 | */ | ||
1258 | if (test_tsk_need_resched(curr)) | ||
1259 | return; | ||
1260 | |||
1261 | /* | ||
1354 | * Batch tasks do not preempt (their preemption is driven by | 1262 | * Batch tasks do not preempt (their preemption is driven by |
1355 | * the tick): | 1263 | * the tick): |
1356 | */ | 1264 | */ |
@@ -1360,33 +1268,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1360 | if (!sched_feat(WAKEUP_PREEMPT)) | 1268 | if (!sched_feat(WAKEUP_PREEMPT)) |
1361 | return; | 1269 | return; |
1362 | 1270 | ||
1363 | /* | 1271 | if (sched_feat(WAKEUP_OVERLAP) && sync && |
1364 | * preemption test can be made between sibling entities who are in the | 1272 | se->avg_overlap < sysctl_sched_migration_cost && |
1365 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 1273 | pse->avg_overlap < sysctl_sched_migration_cost) { |
1366 | * both tasks until we find their ancestors who are siblings of common | 1274 | resched_task(curr); |
1367 | * parent. | 1275 | return; |
1368 | */ | ||
1369 | |||
1370 | /* First walk up until both entities are at same depth */ | ||
1371 | se_depth = depth_se(se); | ||
1372 | pse_depth = depth_se(pse); | ||
1373 | |||
1374 | while (se_depth > pse_depth) { | ||
1375 | se_depth--; | ||
1376 | se = parent_entity(se); | ||
1377 | } | ||
1378 | |||
1379 | while (pse_depth > se_depth) { | ||
1380 | pse_depth--; | ||
1381 | pse = parent_entity(pse); | ||
1382 | } | ||
1383 | |||
1384 | while (!is_same_group(se, pse)) { | ||
1385 | se = parent_entity(se); | ||
1386 | pse = parent_entity(pse); | ||
1387 | } | 1276 | } |
1388 | 1277 | ||
1389 | if (wakeup_preempt_entity(se, pse) == 1) | 1278 | delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; |
1279 | if (delta_exec > wakeup_gran(pse)) | ||
1390 | resched_task(curr); | 1280 | resched_task(curr); |
1391 | } | 1281 | } |
1392 | 1282 | ||
@@ -1445,19 +1335,9 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | |||
1445 | if (next == &cfs_rq->tasks) | 1335 | if (next == &cfs_rq->tasks) |
1446 | return NULL; | 1336 | return NULL; |
1447 | 1337 | ||
1448 | /* Skip over entities that are not tasks */ | 1338 | se = list_entry(next, struct sched_entity, group_node); |
1449 | do { | 1339 | p = task_of(se); |
1450 | se = list_entry(next, struct sched_entity, group_node); | 1340 | cfs_rq->balance_iterator = next->next; |
1451 | next = next->next; | ||
1452 | } while (next != &cfs_rq->tasks && !entity_is_task(se)); | ||
1453 | |||
1454 | if (next == &cfs_rq->tasks) | ||
1455 | return NULL; | ||
1456 | |||
1457 | cfs_rq->balance_iterator = next; | ||
1458 | |||
1459 | if (entity_is_task(se)) | ||
1460 | p = task_of(se); | ||
1461 | 1341 | ||
1462 | return p; | 1342 | return p; |
1463 | } | 1343 | } |
@@ -1507,7 +1387,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1507 | rcu_read_lock(); | 1387 | rcu_read_lock(); |
1508 | update_h_load(busiest_cpu); | 1388 | update_h_load(busiest_cpu); |
1509 | 1389 | ||
1510 | list_for_each_entry(tg, &task_groups, list) { | 1390 | list_for_each_entry_rcu(tg, &task_groups, list) { |
1511 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | 1391 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; |
1512 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 1392 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; |
1513 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 1393 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; |
@@ -1620,10 +1500,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1620 | * 'current' within the tree based on its new key value. | 1500 | * 'current' within the tree based on its new key value. |
1621 | */ | 1501 | */ |
1622 | swap(curr->vruntime, se->vruntime); | 1502 | swap(curr->vruntime, se->vruntime); |
1503 | resched_task(rq->curr); | ||
1623 | } | 1504 | } |
1624 | 1505 | ||
1625 | enqueue_task_fair(rq, p, 0); | 1506 | enqueue_task_fair(rq, p, 0); |
1626 | resched_task(rq->curr); | ||
1627 | } | 1507 | } |
1628 | 1508 | ||
1629 | /* | 1509 | /* |
@@ -1642,7 +1522,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p, | |||
1642 | if (p->prio > oldprio) | 1522 | if (p->prio > oldprio) |
1643 | resched_task(rq->curr); | 1523 | resched_task(rq->curr); |
1644 | } else | 1524 | } else |
1645 | check_preempt_curr(rq, p); | 1525 | check_preempt_curr(rq, p, 0); |
1646 | } | 1526 | } |
1647 | 1527 | ||
1648 | /* | 1528 | /* |
@@ -1659,7 +1539,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p, | |||
1659 | if (running) | 1539 | if (running) |
1660 | resched_task(rq->curr); | 1540 | resched_task(rq->curr); |
1661 | else | 1541 | else |
1662 | check_preempt_curr(rq, p); | 1542 | check_preempt_curr(rq, p, 0); |
1663 | } | 1543 | } |
1664 | 1544 | ||
1665 | /* Account for a task changing its policy or group. | 1545 | /* Account for a task changing its policy or group. |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 9353ca78154e..7c9e8f4a049f 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1) | |||
11 | SCHED_FEAT(LB_BIAS, 1) | 11 | SCHED_FEAT(LB_BIAS, 1) |
12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | ||
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 3a4f92dbbe66..dec4ccabe2f5 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync) | |||
14 | /* | 14 | /* |
15 | * Idle tasks are unconditionally rescheduled: | 15 | * Idle tasks are unconditionally rescheduled: |
16 | */ | 16 | */ |
17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) |
18 | { | 18 | { |
19 | resched_task(rq->idle); | 19 | resched_task(rq->idle); |
20 | } | 20 | } |
@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p, | |||
76 | if (running) | 76 | if (running) |
77 | resched_task(rq->curr); | 77 | resched_task(rq->curr); |
78 | else | 78 | else |
79 | check_preempt_curr(rq, p); | 79 | check_preempt_curr(rq, p, 0); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | 82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, | |||
93 | if (p->prio > oldprio) | 93 | if (p->prio > oldprio) |
94 | resched_task(rq->curr); | 94 | resched_task(rq->curr); |
95 | } else | 95 | } else |
96 | check_preempt_curr(rq, p); | 96 | check_preempt_curr(rq, p, 0); |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | 99 | /* |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 1113157b2058..cdf5740ab03e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -102,12 +102,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
102 | 102 | ||
103 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 103 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
104 | { | 104 | { |
105 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | ||
105 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 106 | struct sched_rt_entity *rt_se = rt_rq->rt_se; |
106 | 107 | ||
107 | if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) { | 108 | if (rt_rq->rt_nr_running) { |
108 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 109 | if (rt_se && !on_rt_rq(rt_se)) |
109 | 110 | enqueue_rt_entity(rt_se); | |
110 | enqueue_rt_entity(rt_se); | ||
111 | if (rt_rq->highest_prio < curr->prio) | 111 | if (rt_rq->highest_prio < curr->prio) |
112 | resched_task(curr); | 112 | resched_task(curr); |
113 | } | 113 | } |
@@ -231,6 +231,9 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
231 | #endif /* CONFIG_RT_GROUP_SCHED */ | 231 | #endif /* CONFIG_RT_GROUP_SCHED */ |
232 | 232 | ||
233 | #ifdef CONFIG_SMP | 233 | #ifdef CONFIG_SMP |
234 | /* | ||
235 | * We ran out of runtime, see if we can borrow some from our neighbours. | ||
236 | */ | ||
234 | static int do_balance_runtime(struct rt_rq *rt_rq) | 237 | static int do_balance_runtime(struct rt_rq *rt_rq) |
235 | { | 238 | { |
236 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 239 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
@@ -250,9 +253,18 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
250 | continue; | 253 | continue; |
251 | 254 | ||
252 | spin_lock(&iter->rt_runtime_lock); | 255 | spin_lock(&iter->rt_runtime_lock); |
256 | /* | ||
257 | * Either all rqs have inf runtime and there's nothing to steal | ||
258 | * or __disable_runtime() below sets a specific rq to inf to | ||
259 | * indicate its been disabled and disalow stealing. | ||
260 | */ | ||
253 | if (iter->rt_runtime == RUNTIME_INF) | 261 | if (iter->rt_runtime == RUNTIME_INF) |
254 | goto next; | 262 | goto next; |
255 | 263 | ||
264 | /* | ||
265 | * From runqueues with spare time, take 1/n part of their | ||
266 | * spare time, but no more than our period. | ||
267 | */ | ||
256 | diff = iter->rt_runtime - iter->rt_time; | 268 | diff = iter->rt_runtime - iter->rt_time; |
257 | if (diff > 0) { | 269 | if (diff > 0) { |
258 | diff = div_u64((u64)diff, weight); | 270 | diff = div_u64((u64)diff, weight); |
@@ -274,6 +286,9 @@ next: | |||
274 | return more; | 286 | return more; |
275 | } | 287 | } |
276 | 288 | ||
289 | /* | ||
290 | * Ensure this RQ takes back all the runtime it lend to its neighbours. | ||
291 | */ | ||
277 | static void __disable_runtime(struct rq *rq) | 292 | static void __disable_runtime(struct rq *rq) |
278 | { | 293 | { |
279 | struct root_domain *rd = rq->rd; | 294 | struct root_domain *rd = rq->rd; |
@@ -289,17 +304,33 @@ static void __disable_runtime(struct rq *rq) | |||
289 | 304 | ||
290 | spin_lock(&rt_b->rt_runtime_lock); | 305 | spin_lock(&rt_b->rt_runtime_lock); |
291 | spin_lock(&rt_rq->rt_runtime_lock); | 306 | spin_lock(&rt_rq->rt_runtime_lock); |
307 | /* | ||
308 | * Either we're all inf and nobody needs to borrow, or we're | ||
309 | * already disabled and thus have nothing to do, or we have | ||
310 | * exactly the right amount of runtime to take out. | ||
311 | */ | ||
292 | if (rt_rq->rt_runtime == RUNTIME_INF || | 312 | if (rt_rq->rt_runtime == RUNTIME_INF || |
293 | rt_rq->rt_runtime == rt_b->rt_runtime) | 313 | rt_rq->rt_runtime == rt_b->rt_runtime) |
294 | goto balanced; | 314 | goto balanced; |
295 | spin_unlock(&rt_rq->rt_runtime_lock); | 315 | spin_unlock(&rt_rq->rt_runtime_lock); |
296 | 316 | ||
317 | /* | ||
318 | * Calculate the difference between what we started out with | ||
319 | * and what we current have, that's the amount of runtime | ||
320 | * we lend and now have to reclaim. | ||
321 | */ | ||
297 | want = rt_b->rt_runtime - rt_rq->rt_runtime; | 322 | want = rt_b->rt_runtime - rt_rq->rt_runtime; |
298 | 323 | ||
324 | /* | ||
325 | * Greedy reclaim, take back as much as we can. | ||
326 | */ | ||
299 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu_mask(i, rd->span) { |
300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
301 | s64 diff; | 329 | s64 diff; |
302 | 330 | ||
331 | /* | ||
332 | * Can't reclaim from ourselves or disabled runqueues. | ||
333 | */ | ||
303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 334 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
304 | continue; | 335 | continue; |
305 | 336 | ||
@@ -319,8 +350,16 @@ static void __disable_runtime(struct rq *rq) | |||
319 | } | 350 | } |
320 | 351 | ||
321 | spin_lock(&rt_rq->rt_runtime_lock); | 352 | spin_lock(&rt_rq->rt_runtime_lock); |
353 | /* | ||
354 | * We cannot be left wanting - that would mean some runtime | ||
355 | * leaked out of the system. | ||
356 | */ | ||
322 | BUG_ON(want); | 357 | BUG_ON(want); |
323 | balanced: | 358 | balanced: |
359 | /* | ||
360 | * Disable all the borrow logic by pretending we have inf | ||
361 | * runtime - in which case borrowing doesn't make sense. | ||
362 | */ | ||
324 | rt_rq->rt_runtime = RUNTIME_INF; | 363 | rt_rq->rt_runtime = RUNTIME_INF; |
325 | spin_unlock(&rt_rq->rt_runtime_lock); | 364 | spin_unlock(&rt_rq->rt_runtime_lock); |
326 | spin_unlock(&rt_b->rt_runtime_lock); | 365 | spin_unlock(&rt_b->rt_runtime_lock); |
@@ -343,6 +382,9 @@ static void __enable_runtime(struct rq *rq) | |||
343 | if (unlikely(!scheduler_running)) | 382 | if (unlikely(!scheduler_running)) |
344 | return; | 383 | return; |
345 | 384 | ||
385 | /* | ||
386 | * Reset each runqueue's bandwidth settings | ||
387 | */ | ||
346 | for_each_leaf_rt_rq(rt_rq, rq) { | 388 | for_each_leaf_rt_rq(rt_rq, rq) { |
347 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 389 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
348 | 390 | ||
@@ -389,7 +431,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
389 | int i, idle = 1; | 431 | int i, idle = 1; |
390 | cpumask_t span; | 432 | cpumask_t span; |
391 | 433 | ||
392 | if (rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
393 | return 1; | 435 | return 1; |
394 | 436 | ||
395 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
@@ -487,6 +529,9 @@ static void update_curr_rt(struct rq *rq) | |||
487 | curr->se.exec_start = rq->clock; | 529 | curr->se.exec_start = rq->clock; |
488 | cpuacct_charge(curr, delta_exec); | 530 | cpuacct_charge(curr, delta_exec); |
489 | 531 | ||
532 | if (!rt_bandwidth_enabled()) | ||
533 | return; | ||
534 | |||
490 | for_each_sched_rt_entity(rt_se) { | 535 | for_each_sched_rt_entity(rt_se) { |
491 | rt_rq = rt_rq_of_se(rt_se); | 536 | rt_rq = rt_rq_of_se(rt_se); |
492 | 537 | ||
@@ -784,7 +829,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
784 | /* | 829 | /* |
785 | * Preempt the current task with a newly woken task if needed: | 830 | * Preempt the current task with a newly woken task if needed: |
786 | */ | 831 | */ |
787 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | 832 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) |
788 | { | 833 | { |
789 | if (p->prio < rq->curr->prio) { | 834 | if (p->prio < rq->curr->prio) { |
790 | resched_task(rq->curr); | 835 | resched_task(rq->curr); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index bd7034542399..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -235,7 +235,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
238 | if (bc->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
239 | TICKDEV_MODE_PERIODIC) | ||
239 | clockevents_shutdown(dev); | 240 | clockevents_shutdown(dev); |
240 | } | 241 | } |
241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
@@ -245,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
245 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
246 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
247 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
248 | if (bc->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
250 | TICKDEV_MODE_PERIODIC) | ||
249 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
250 | } | 252 | } |
251 | break; | 253 | break; |
diff --git a/kernel/user.c b/kernel/user.c index 865ecf57a096..39d6159fae43 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -169,7 +169,7 @@ static ssize_t cpu_rt_runtime_show(struct kobject *kobj, | |||
169 | { | 169 | { |
170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
171 | 171 | ||
172 | return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | 172 | return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg)); |
173 | } | 173 | } |
174 | 174 | ||
175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | 175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, |
@@ -180,7 +180,7 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | |||
180 | unsigned long rt_runtime; | 180 | unsigned long rt_runtime; |
181 | int rc; | 181 | int rc; |
182 | 182 | ||
183 | sscanf(buf, "%lu", &rt_runtime); | 183 | sscanf(buf, "%ld", &rt_runtime); |
184 | 184 | ||
185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | 185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); |
186 | 186 | ||
@@ -515,7 +515,7 @@ size_t ksize(const void *block) | |||
515 | 515 | ||
516 | sp = (struct slob_page *)virt_to_page(block); | 516 | sp = (struct slob_page *)virt_to_page(block); |
517 | if (slob_page(sp)) | 517 | if (slob_page(sp)) |
518 | return ((slob_t *)block - 1)->units + SLOB_UNIT; | 518 | return (((slob_t *)block - 1)->units - 1) * SLOB_UNIT; |
519 | else | 519 | else |
520 | return sp->page.private; | 520 | return sp->page.private; |
521 | } | 521 | } |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 418cd7dbbc93..8e0de6a5e18a 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -1986,11 +1986,13 @@ static void read_markers(const char *fname) | |||
1986 | 1986 | ||
1987 | mod = find_module(modname); | 1987 | mod = find_module(modname); |
1988 | if (!mod) { | 1988 | if (!mod) { |
1989 | if (is_vmlinux(modname)) | ||
1990 | have_vmlinux = 1; | ||
1991 | mod = new_module(NOFAIL(strdup(modname))); | 1989 | mod = new_module(NOFAIL(strdup(modname))); |
1992 | mod->skip = 1; | 1990 | mod->skip = 1; |
1993 | } | 1991 | } |
1992 | if (is_vmlinux(modname)) { | ||
1993 | have_vmlinux = 1; | ||
1994 | mod->skip = 0; | ||
1995 | } | ||
1994 | 1996 | ||
1995 | if (!mod->skip) | 1997 | if (!mod->skip) |
1996 | add_marker(mod, marker, fmt); | 1998 | add_marker(mod, marker, fmt); |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index d11a8154500f..8551952ef329 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -2737,6 +2737,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, | |||
2737 | if (ctx == NULL) | 2737 | if (ctx == NULL) |
2738 | goto netlbl_secattr_to_sid_return; | 2738 | goto netlbl_secattr_to_sid_return; |
2739 | 2739 | ||
2740 | context_init(&ctx_new); | ||
2740 | ctx_new.user = ctx->user; | 2741 | ctx_new.user = ctx->user; |
2741 | ctx_new.role = ctx->role; | 2742 | ctx_new.role = ctx->role; |
2742 | ctx_new.type = ctx->type; | 2743 | ctx_new.type = ctx->type; |
@@ -2745,13 +2746,9 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, | |||
2745 | if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, | 2746 | if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, |
2746 | secattr->attr.mls.cat) != 0) | 2747 | secattr->attr.mls.cat) != 0) |
2747 | goto netlbl_secattr_to_sid_return; | 2748 | goto netlbl_secattr_to_sid_return; |
2748 | ctx_new.range.level[1].cat.highbit = | 2749 | memcpy(&ctx_new.range.level[1].cat, |
2749 | ctx_new.range.level[0].cat.highbit; | 2750 | &ctx_new.range.level[0].cat, |
2750 | ctx_new.range.level[1].cat.node = | 2751 | sizeof(ctx_new.range.level[0].cat)); |
2751 | ctx_new.range.level[0].cat.node; | ||
2752 | } else { | ||
2753 | ebitmap_init(&ctx_new.range.level[0].cat); | ||
2754 | ebitmap_init(&ctx_new.range.level[1].cat); | ||
2755 | } | 2752 | } |
2756 | if (mls_context_isvalid(&policydb, &ctx_new) != 1) | 2753 | if (mls_context_isvalid(&policydb, &ctx_new) != 1) |
2757 | goto netlbl_secattr_to_sid_return_cleanup; | 2754 | goto netlbl_secattr_to_sid_return_cleanup; |