diff options
733 files changed, 34006 insertions, 18105 deletions
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index b0756d0fd579..8bca1d5cec09 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl | |||
@@ -86,4 +86,9 @@ | |||
86 | !Iinclude/trace/events/irq.h | 86 | !Iinclude/trace/events/irq.h |
87 | </chapter> | 87 | </chapter> |
88 | 88 | ||
89 | <chapter id="signal"> | ||
90 | <title>SIGNAL</title> | ||
91 | !Iinclude/trace/events/signal.h | ||
92 | </chapter> | ||
93 | |||
89 | </book> | 94 | </book> |
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index 187bbf10c923..8608fd85e921 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt | |||
@@ -1,185 +1,10 @@ | |||
1 | CONFIG_RCU_TRACE debugfs Files and Formats | 1 | CONFIG_RCU_TRACE debugfs Files and Formats |
2 | 2 | ||
3 | 3 | ||
4 | The rcupreempt and rcutree implementations of RCU provide debugfs trace | 4 | The rcutree implementation of RCU provides debugfs trace output that |
5 | output that summarizes counters and state. This information is useful for | 5 | summarizes counters and state. This information is useful for debugging |
6 | debugging RCU itself, and can sometimes also help to debug abuses of RCU. | 6 | RCU itself, and can sometimes also help to debug abuses of RCU. |
7 | Note that the rcuclassic implementation of RCU does not provide debugfs | 7 | The following sections describe the debugfs files and formats. |
8 | trace output. | ||
9 | |||
10 | The following sections describe the debugfs files and formats for | ||
11 | preemptable RCU (rcupreempt) and hierarchical RCU (rcutree). | ||
12 | |||
13 | |||
14 | Preemptable RCU debugfs Files and Formats | ||
15 | |||
16 | This implementation of RCU provides three debugfs files under the | ||
17 | top-level directory RCU: rcu/rcuctrs (which displays the per-CPU | ||
18 | counters used by preemptable RCU) rcu/rcugp (which displays grace-period | ||
19 | counters), and rcu/rcustats (which internal counters for debugging RCU). | ||
20 | |||
21 | The output of "cat rcu/rcuctrs" looks as follows: | ||
22 | |||
23 | CPU last cur F M | ||
24 | 0 5 -5 0 0 | ||
25 | 1 -1 0 0 0 | ||
26 | 2 0 1 0 0 | ||
27 | 3 0 1 0 0 | ||
28 | 4 0 1 0 0 | ||
29 | 5 0 1 0 0 | ||
30 | 6 0 2 0 0 | ||
31 | 7 0 -1 0 0 | ||
32 | 8 0 1 0 0 | ||
33 | ggp = 26226, state = waitzero | ||
34 | |||
35 | The per-CPU fields are as follows: | ||
36 | |||
37 | o "CPU" gives the CPU number. Offline CPUs are not displayed. | ||
38 | |||
39 | o "last" gives the value of the counter that is being decremented | ||
40 | for the current grace period phase. In the example above, | ||
41 | the counters sum to 4, indicating that there are still four | ||
42 | RCU read-side critical sections still running that started | ||
43 | before the last counter flip. | ||
44 | |||
45 | o "cur" gives the value of the counter that is currently being | ||
46 | both incremented (by rcu_read_lock()) and decremented (by | ||
47 | rcu_read_unlock()). In the example above, the counters sum to | ||
48 | 1, indicating that there is only one RCU read-side critical section | ||
49 | still running that started after the last counter flip. | ||
50 | |||
51 | o "F" indicates whether RCU is waiting for this CPU to acknowledge | ||
52 | a counter flip. In the above example, RCU is not waiting on any, | ||
53 | which is consistent with the state being "waitzero" rather than | ||
54 | "waitack". | ||
55 | |||
56 | o "M" indicates whether RCU is waiting for this CPU to execute a | ||
57 | memory barrier. In the above example, RCU is not waiting on any, | ||
58 | which is consistent with the state being "waitzero" rather than | ||
59 | "waitmb". | ||
60 | |||
61 | o "ggp" is the global grace-period counter. | ||
62 | |||
63 | o "state" is the RCU state, which can be one of the following: | ||
64 | |||
65 | o "idle": there is no grace period in progress. | ||
66 | |||
67 | o "waitack": RCU just incremented the global grace-period | ||
68 | counter, which has the effect of reversing the roles of | ||
69 | the "last" and "cur" counters above, and is waiting for | ||
70 | all the CPUs to acknowledge the flip. Once the flip has | ||
71 | been acknowledged, CPUs will no longer be incrementing | ||
72 | what are now the "last" counters, so that their sum will | ||
73 | decrease monotonically down to zero. | ||
74 | |||
75 | o "waitzero": RCU is waiting for the sum of the "last" counters | ||
76 | to decrease to zero. | ||
77 | |||
78 | o "waitmb": RCU is waiting for each CPU to execute a memory | ||
79 | barrier, which ensures that instructions from a given CPU's | ||
80 | last RCU read-side critical section cannot be reordered | ||
81 | with instructions following the memory-barrier instruction. | ||
82 | |||
83 | The output of "cat rcu/rcugp" looks as follows: | ||
84 | |||
85 | oldggp=48870 newggp=48873 | ||
86 | |||
87 | Note that reading from this file provokes a synchronize_rcu(). The | ||
88 | "oldggp" value is that of "ggp" from rcu/rcuctrs above, taken before | ||
89 | executing the synchronize_rcu(), and the "newggp" value is also the | ||
90 | "ggp" value, but taken after the synchronize_rcu() command returns. | ||
91 | |||
92 | |||
93 | The output of "cat rcu/rcugp" looks as follows: | ||
94 | |||
95 | na=1337955 nl=40 wa=1337915 wl=44 da=1337871 dl=0 dr=1337871 di=1337871 | ||
96 | 1=50989 e1=6138 i1=49722 ie1=82 g1=49640 a1=315203 ae1=265563 a2=49640 | ||
97 | z1=1401244 ze1=1351605 z2=49639 m1=5661253 me1=5611614 m2=49639 | ||
98 | |||
99 | These are counters tracking internal preemptable-RCU events, however, | ||
100 | some of them may be useful for debugging algorithms using RCU. In | ||
101 | particular, the "nl", "wl", and "dl" values track the number of RCU | ||
102 | callbacks in various states. The fields are as follows: | ||
103 | |||
104 | o "na" is the total number of RCU callbacks that have been enqueued | ||
105 | since boot. | ||
106 | |||
107 | o "nl" is the number of RCU callbacks waiting for the previous | ||
108 | grace period to end so that they can start waiting on the next | ||
109 | grace period. | ||
110 | |||
111 | o "wa" is the total number of RCU callbacks that have started waiting | ||
112 | for a grace period since boot. "na" should be roughly equal to | ||
113 | "nl" plus "wa". | ||
114 | |||
115 | o "wl" is the number of RCU callbacks currently waiting for their | ||
116 | grace period to end. | ||
117 | |||
118 | o "da" is the total number of RCU callbacks whose grace periods | ||
119 | have completed since boot. "wa" should be roughly equal to | ||
120 | "wl" plus "da". | ||
121 | |||
122 | o "dr" is the total number of RCU callbacks that have been removed | ||
123 | from the list of callbacks ready to invoke. "dr" should be roughly | ||
124 | equal to "da". | ||
125 | |||
126 | o "di" is the total number of RCU callbacks that have been invoked | ||
127 | since boot. "di" should be roughly equal to "da", though some | ||
128 | early versions of preemptable RCU had a bug so that only the | ||
129 | last CPU's count of invocations was displayed, rather than the | ||
130 | sum of all CPU's counts. | ||
131 | |||
132 | o "1" is the number of calls to rcu_try_flip(). This should be | ||
133 | roughly equal to the sum of "e1", "i1", "a1", "z1", and "m1" | ||
134 | described below. In other words, the number of times that | ||
135 | the state machine is visited should be equal to the sum of the | ||
136 | number of times that each state is visited plus the number of | ||
137 | times that the state-machine lock acquisition failed. | ||
138 | |||
139 | o "e1" is the number of times that rcu_try_flip() was unable to | ||
140 | acquire the fliplock. | ||
141 | |||
142 | o "i1" is the number of calls to rcu_try_flip_idle(). | ||
143 | |||
144 | o "ie1" is the number of times rcu_try_flip_idle() exited early | ||
145 | due to the calling CPU having no work for RCU. | ||
146 | |||
147 | o "g1" is the number of times that rcu_try_flip_idle() decided | ||
148 | to start a new grace period. "i1" should be roughly equal to | ||
149 | "ie1" plus "g1". | ||
150 | |||
151 | o "a1" is the number of calls to rcu_try_flip_waitack(). | ||
152 | |||
153 | o "ae1" is the number of times that rcu_try_flip_waitack() found | ||
154 | that at least one CPU had not yet acknowledge the new grace period | ||
155 | (AKA "counter flip"). | ||
156 | |||
157 | o "a2" is the number of time rcu_try_flip_waitack() found that | ||
158 | all CPUs had acknowledged. "a1" should be roughly equal to | ||
159 | "ae1" plus "a2". (This particular output was collected on | ||
160 | a 128-CPU machine, hence the smaller-than-usual fraction of | ||
161 | calls to rcu_try_flip_waitack() finding all CPUs having already | ||
162 | acknowledged.) | ||
163 | |||
164 | o "z1" is the number of calls to rcu_try_flip_waitzero(). | ||
165 | |||
166 | o "ze1" is the number of times that rcu_try_flip_waitzero() found | ||
167 | that not all of the old RCU read-side critical sections had | ||
168 | completed. | ||
169 | |||
170 | o "z2" is the number of times that rcu_try_flip_waitzero() finds | ||
171 | the sum of the counters equal to zero, in other words, that | ||
172 | all of the old RCU read-side critical sections had completed. | ||
173 | The value of "z1" should be roughly equal to "ze1" plus | ||
174 | "z2". | ||
175 | |||
176 | o "m1" is the number of calls to rcu_try_flip_waitmb(). | ||
177 | |||
178 | o "me1" is the number of times that rcu_try_flip_waitmb() finds | ||
179 | that at least one CPU has not yet executed a memory barrier. | ||
180 | |||
181 | o "m2" is the number of times that rcu_try_flip_waitmb() finds that | ||
182 | all CPUs have executed a memory barrier. | ||
183 | 8 | ||
184 | 9 | ||
185 | Hierarchical RCU debugfs Files and Formats | 10 | Hierarchical RCU debugfs Files and Formats |
@@ -210,9 +35,10 @@ rcu_bh: | |||
210 | 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 35 | 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 |
211 | 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 | 36 | 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 |
212 | 37 | ||
213 | The first section lists the rcu_data structures for rcu, the second for | 38 | The first section lists the rcu_data structures for rcu_sched, the second |
214 | rcu_bh. Each section has one line per CPU, or eight for this 8-CPU system. | 39 | for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an |
215 | The fields are as follows: | 40 | additional section for rcu_preempt. Each section has one line per CPU, |
41 | or eight for this 8-CPU system. The fields are as follows: | ||
216 | 42 | ||
217 | o The number at the beginning of each line is the CPU number. | 43 | o The number at the beginning of each line is the CPU number. |
218 | CPUs numbers followed by an exclamation mark are offline, | 44 | CPUs numbers followed by an exclamation mark are offline, |
@@ -223,9 +49,9 @@ o The number at the beginning of each line is the CPU number. | |||
223 | 49 | ||
224 | o "c" is the count of grace periods that this CPU believes have | 50 | o "c" is the count of grace periods that this CPU believes have |
225 | completed. CPUs in dynticks idle mode may lag quite a ways | 51 | completed. CPUs in dynticks idle mode may lag quite a ways |
226 | behind, for example, CPU 4 under "rcu" above, which has slept | 52 | behind, for example, CPU 4 under "rcu_sched" above, which has |
227 | through the past 25 RCU grace periods. It is not unusual to | 53 | slept through the past 25 RCU grace periods. It is not unusual |
228 | see CPUs lagging by thousands of grace periods. | 54 | to see CPUs lagging by thousands of grace periods. |
229 | 55 | ||
230 | o "g" is the count of grace periods that this CPU believes have | 56 | o "g" is the count of grace periods that this CPU believes have |
231 | started. Again, CPUs in dynticks idle mode may lag behind. | 57 | started. Again, CPUs in dynticks idle mode may lag behind. |
@@ -308,8 +134,10 @@ The output of "cat rcu/rcugp" looks as follows: | |||
308 | rcu_sched: completed=33062 gpnum=33063 | 134 | rcu_sched: completed=33062 gpnum=33063 |
309 | rcu_bh: completed=464 gpnum=464 | 135 | rcu_bh: completed=464 gpnum=464 |
310 | 136 | ||
311 | Again, this output is for both "rcu" and "rcu_bh". The fields are | 137 | Again, this output is for both "rcu_sched" and "rcu_bh". Note that |
312 | taken from the rcu_state structure, and are as follows: | 138 | kernels built with CONFIG_TREE_PREEMPT_RCU will have an additional |
139 | "rcu_preempt" line. The fields are taken from the rcu_state structure, | ||
140 | and are as follows: | ||
313 | 141 | ||
314 | o "completed" is the number of grace periods that have completed. | 142 | o "completed" is the number of grace periods that have completed. |
315 | It is comparable to the "c" field from rcu/rcudata in that a | 143 | It is comparable to the "c" field from rcu/rcudata in that a |
@@ -324,23 +152,24 @@ o "gpnum" is the number of grace periods that have started. It is | |||
324 | If these two fields are equal (as they are for "rcu_bh" above), | 152 | If these two fields are equal (as they are for "rcu_bh" above), |
325 | then there is no grace period in progress, in other words, RCU | 153 | then there is no grace period in progress, in other words, RCU |
326 | is idle. On the other hand, if the two fields differ (as they | 154 | is idle. On the other hand, if the two fields differ (as they |
327 | do for "rcu" above), then an RCU grace period is in progress. | 155 | do for "rcu_sched" above), then an RCU grace period is in progress. |
328 | 156 | ||
329 | 157 | ||
330 | The output of "cat rcu/rcuhier" looks as follows, with very long lines: | 158 | The output of "cat rcu/rcuhier" looks as follows, with very long lines: |
331 | 159 | ||
332 | c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 | 160 | c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 oqlen=0 |
333 | 1/1 0:127 ^0 | 161 | 1/1 .>. 0:127 ^0 |
334 | 3/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 | 162 | 3/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 |
335 | 3/3f 0:5 ^0 2/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 | 163 | 3/3f .>. 0:5 ^0 2/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 |
336 | rcu_bh: | 164 | rcu_bh: |
337 | c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 | 165 | c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 oqlen=0 |
338 | 0/1 0:127 ^0 | 166 | 0/1 .>. 0:127 ^0 |
339 | 0/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 | 167 | 0/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 |
340 | 0/3f 0:5 ^0 0/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 | 168 | 0/3f .>. 0:5 ^0 0/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 |
341 | 169 | ||
342 | This is once again split into "rcu" and "rcu_bh" portions. The fields are | 170 | This is once again split into "rcu_sched" and "rcu_bh" portions, |
343 | as follows: | 171 | and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional |
172 | "rcu_preempt" section. The fields are as follows: | ||
344 | 173 | ||
345 | o "c" is exactly the same as "completed" under rcu/rcugp. | 174 | o "c" is exactly the same as "completed" under rcu/rcugp. |
346 | 175 | ||
@@ -372,6 +201,11 @@ o "fqlh" is the number of calls to force_quiescent_state() that | |||
372 | exited immediately (without even being counted in nfqs above) | 201 | exited immediately (without even being counted in nfqs above) |
373 | due to contention on ->fqslock. | 202 | due to contention on ->fqslock. |
374 | 203 | ||
204 | o "oqlen" is the number of callbacks on the "orphan" callback | ||
205 | list. RCU callbacks are placed on this list by CPUs going | ||
206 | offline, and are "adopted" either by the CPU helping the outgoing | ||
207 | CPU or by the next rcu_barrier*() call, whichever comes first. | ||
208 | |||
375 | o Each element of the form "1/1 0:127 ^0" represents one struct | 209 | o Each element of the form "1/1 0:127 ^0" represents one struct |
376 | rcu_node. Each line represents one level of the hierarchy, from | 210 | rcu_node. Each line represents one level of the hierarchy, from |
377 | root to leaves. It is best to think of the rcu_data structures | 211 | root to leaves. It is best to think of the rcu_data structures |
@@ -379,7 +213,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct | |||
379 | might be either one, two, or three levels of rcu_node structures, | 213 | might be either one, two, or three levels of rcu_node structures, |
380 | depending on the relationship between CONFIG_RCU_FANOUT and | 214 | depending on the relationship between CONFIG_RCU_FANOUT and |
381 | CONFIG_NR_CPUS. | 215 | CONFIG_NR_CPUS. |
382 | 216 | ||
383 | o The numbers separated by the "/" are the qsmask followed | 217 | o The numbers separated by the "/" are the qsmask followed |
384 | by the qsmaskinit. The qsmask will have one bit | 218 | by the qsmaskinit. The qsmask will have one bit |
385 | set for each entity in the next lower level that | 219 | set for each entity in the next lower level that |
@@ -389,10 +223,19 @@ o Each element of the form "1/1 0:127 ^0" represents one struct | |||
389 | The value of qsmaskinit is assigned to that of qsmask | 223 | The value of qsmaskinit is assigned to that of qsmask |
390 | at the beginning of each grace period. | 224 | at the beginning of each grace period. |
391 | 225 | ||
392 | For example, for "rcu", the qsmask of the first entry | 226 | For example, for "rcu_sched", the qsmask of the first |
393 | of the lowest level is 0x14, meaning that we are still | 227 | entry of the lowest level is 0x14, meaning that we |
394 | waiting for CPUs 2 and 4 to check in for the current | 228 | are still waiting for CPUs 2 and 4 to check in for the |
395 | grace period. | 229 | current grace period. |
230 | |||
231 | o The characters separated by the ">" indicate the state | ||
232 | of the blocked-tasks lists. A "T" preceding the ">" | ||
233 | indicates that at least one task blocked in an RCU | ||
234 | read-side critical section blocks the current grace | ||
235 | period, while a "." preceding the ">" indicates otherwise. | ||
236 | The character following the ">" indicates similarly for | ||
237 | the next grace period. A "T" should appear in this | ||
238 | field only for rcu-preempt. | ||
396 | 239 | ||
397 | o The numbers separated by the ":" are the range of CPUs | 240 | o The numbers separated by the ":" are the range of CPUs |
398 | served by this struct rcu_node. This can be helpful | 241 | served by this struct rcu_node. This can be helpful |
@@ -431,8 +274,9 @@ rcu_bh: | |||
431 | 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 | 274 | 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 |
432 | 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 | 275 | 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 |
433 | 276 | ||
434 | As always, this is once again split into "rcu" and "rcu_bh" portions. | 277 | As always, this is once again split into "rcu_sched" and "rcu_bh" |
435 | The fields are as follows: | 278 | portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional |
279 | "rcu_preempt" section. The fields are as follows: | ||
436 | 280 | ||
437 | o "np" is the number of times that __rcu_pending() has been invoked | 281 | o "np" is the number of times that __rcu_pending() has been invoked |
438 | for the corresponding flavor of RCU. | 282 | for the corresponding flavor of RCU. |
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index e41a7fecf0d3..d542ca243b80 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt | |||
@@ -830,7 +830,7 @@ sched: Critical sections Grace period Barrier | |||
830 | SRCU: Critical sections Grace period Barrier | 830 | SRCU: Critical sections Grace period Barrier |
831 | 831 | ||
832 | srcu_read_lock synchronize_srcu N/A | 832 | srcu_read_lock synchronize_srcu N/A |
833 | srcu_read_unlock | 833 | srcu_read_unlock synchronize_srcu_expedited |
834 | 834 | ||
835 | SRCU: Initialization/cleanup | 835 | SRCU: Initialization/cleanup |
836 | init_srcu_struct | 836 | init_srcu_struct |
diff --git a/Documentation/dontdiff b/Documentation/dontdiff index e1efc400bed6..e151b2a36267 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff | |||
@@ -65,6 +65,7 @@ aicdb.h* | |||
65 | asm-offsets.h | 65 | asm-offsets.h |
66 | asm_offsets.h | 66 | asm_offsets.h |
67 | autoconf.h* | 67 | autoconf.h* |
68 | av_permissions.h | ||
68 | bbootsect | 69 | bbootsect |
69 | bin2c | 70 | bin2c |
70 | binkernel.spec | 71 | binkernel.spec |
@@ -95,12 +96,14 @@ docproc | |||
95 | elf2ecoff | 96 | elf2ecoff |
96 | elfconfig.h* | 97 | elfconfig.h* |
97 | fixdep | 98 | fixdep |
99 | flask.h | ||
98 | fore200e_mkfirm | 100 | fore200e_mkfirm |
99 | fore200e_pca_fw.c* | 101 | fore200e_pca_fw.c* |
100 | gconf | 102 | gconf |
101 | gen-devlist | 103 | gen-devlist |
102 | gen_crc32table | 104 | gen_crc32table |
103 | gen_init_cpio | 105 | gen_init_cpio |
106 | genheaders | ||
104 | genksyms | 107 | genksyms |
105 | *_gray256.c | 108 | *_gray256.c |
106 | ihex2fw | 109 | ihex2fw |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index bc693fffabe0..f613df8ec7bf 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -6,6 +6,21 @@ be removed from this file. | |||
6 | 6 | ||
7 | --------------------------- | 7 | --------------------------- |
8 | 8 | ||
9 | What: USER_SCHED | ||
10 | When: 2.6.34 | ||
11 | |||
12 | Why: USER_SCHED was implemented as a proof of concept for group scheduling. | ||
13 | The effect of USER_SCHED can already be achieved from userspace with | ||
14 | the help of libcgroup. The removal of USER_SCHED will also simplify | ||
15 | the scheduler code with the removal of one major ifdef. There are also | ||
16 | issues USER_SCHED has with USER_NS. A decision was taken not to fix | ||
17 | those and instead remove USER_SCHED. Also new group scheduling | ||
18 | features will not be implemented for USER_SCHED. | ||
19 | |||
20 | Who: Dhaval Giani <dhaval@linux.vnet.ibm.com> | ||
21 | |||
22 | --------------------------- | ||
23 | |||
9 | What: PRISM54 | 24 | What: PRISM54 |
10 | When: 2.6.34 | 25 | When: 2.6.34 |
11 | 26 | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 2c48f945546b..4af0018533f2 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -1072,7 +1072,8 @@ second). The meanings of the columns are as follows, from left to right: | |||
1072 | - irq: servicing interrupts | 1072 | - irq: servicing interrupts |
1073 | - softirq: servicing softirqs | 1073 | - softirq: servicing softirqs |
1074 | - steal: involuntary wait | 1074 | - steal: involuntary wait |
1075 | - guest: running a guest | 1075 | - guest: running a normal guest |
1076 | - guest_nice: running a niced guest | ||
1076 | 1077 | ||
1077 | The "intr" line gives counts of interrupts serviced since boot time, for each | 1078 | The "intr" line gives counts of interrupts serviced since boot time, for each |
1078 | of the possible system interrupts. The first column is the total of all | 1079 | of the possible system interrupts. The first column is the total of all |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9107b387e91f..fce5b5e516cc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -85,7 +85,6 @@ parameter is applicable: | |||
85 | PPT Parallel port support is enabled. | 85 | PPT Parallel port support is enabled. |
86 | PS2 Appropriate PS/2 support is enabled. | 86 | PS2 Appropriate PS/2 support is enabled. |
87 | RAM RAM disk support is enabled. | 87 | RAM RAM disk support is enabled. |
88 | ROOTPLUG The example Root Plug LSM is enabled. | ||
89 | S390 S390 architecture is enabled. | 88 | S390 S390 architecture is enabled. |
90 | SCSI Appropriate SCSI support is enabled. | 89 | SCSI Appropriate SCSI support is enabled. |
91 | A lot of drivers has their options described inside of | 90 | A lot of drivers has their options described inside of |
@@ -345,6 +344,15 @@ and is between 256 and 4096 characters. It is defined in the file | |||
345 | Change the amount of debugging information output | 344 | Change the amount of debugging information output |
346 | when initialising the APIC and IO-APIC components. | 345 | when initialising the APIC and IO-APIC components. |
347 | 346 | ||
347 | show_lapic= [APIC,X86] Advanced Programmable Interrupt Controller | ||
348 | Limit apic dumping. The parameter defines the maximal | ||
349 | number of local apics being dumped. Also it is possible | ||
350 | to set it to "all" by meaning -- no limit here. | ||
351 | Format: { 1 (default) | 2 | ... | all }. | ||
352 | The parameter valid if only apic=debug or | ||
353 | apic=verbose is specified. | ||
354 | Example: apic=debug show_lapic=all | ||
355 | |||
348 | apm= [APM] Advanced Power Management | 356 | apm= [APM] Advanced Power Management |
349 | See header of arch/x86/kernel/apm_32.c. | 357 | See header of arch/x86/kernel/apm_32.c. |
350 | 358 | ||
@@ -779,6 +787,13 @@ and is between 256 and 4096 characters. It is defined in the file | |||
779 | by the set_ftrace_notrace file in the debugfs | 787 | by the set_ftrace_notrace file in the debugfs |
780 | tracing directory. | 788 | tracing directory. |
781 | 789 | ||
790 | ftrace_graph_filter=[function-list] | ||
791 | [FTRACE] Limit the top level callers functions traced | ||
792 | by the function graph tracer at boot up. | ||
793 | function-list is a comma separated list of functions | ||
794 | that can be changed at run time by the | ||
795 | set_graph_function file in the debugfs tracing directory. | ||
796 | |||
782 | gamecon.map[2|3]= | 797 | gamecon.map[2|3]= |
783 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad | 798 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad |
784 | support via parallel port (up to 5 devices per port) | 799 | support via parallel port (up to 5 devices per port) |
@@ -2032,8 +2047,15 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2032 | 2047 | ||
2033 | print-fatal-signals= | 2048 | print-fatal-signals= |
2034 | [KNL] debug: print fatal signals | 2049 | [KNL] debug: print fatal signals |
2035 | print-fatal-signals=1: print segfault info to | 2050 | |
2036 | the kernel console. | 2051 | If enabled, warn about various signal handling |
2052 | related application anomalies: too many signals, | ||
2053 | too many POSIX.1 timers, fatal signals causing a | ||
2054 | coredump - etc. | ||
2055 | |||
2056 | If you hit the warning due to signal overflow, | ||
2057 | you might want to try "ulimit -i unlimited". | ||
2058 | |||
2037 | default: off. | 2059 | default: off. |
2038 | 2060 | ||
2039 | printk.time= Show timing data prefixed to each printk message line | 2061 | printk.time= Show timing data prefixed to each printk message line |
@@ -2164,15 +2186,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2164 | Useful for devices that are detected asynchronously | 2186 | Useful for devices that are detected asynchronously |
2165 | (e.g. USB and MMC devices). | 2187 | (e.g. USB and MMC devices). |
2166 | 2188 | ||
2167 | root_plug.vendor_id= | ||
2168 | [ROOTPLUG] Override the default vendor ID | ||
2169 | |||
2170 | root_plug.product_id= | ||
2171 | [ROOTPLUG] Override the default product ID | ||
2172 | |||
2173 | root_plug.debug= | ||
2174 | [ROOTPLUG] Enable debugging output | ||
2175 | |||
2176 | rw [KNL] Mount root device read-write on boot | 2189 | rw [KNL] Mount root device read-write on boot |
2177 | 2190 | ||
2178 | S [KNL] Run init in single mode | 2191 | S [KNL] Run init in single mode |
@@ -2182,6 +2195,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2182 | 2195 | ||
2183 | sbni= [NET] Granch SBNI12 leased line adapter | 2196 | sbni= [NET] Granch SBNI12 leased line adapter |
2184 | 2197 | ||
2198 | sched_debug [KNL] Enables verbose scheduler debug messages. | ||
2199 | |||
2185 | sc1200wdt= [HW,WDT] SC1200 WDT (watchdog) driver | 2200 | sc1200wdt= [HW,WDT] SC1200 WDT (watchdog) driver |
2186 | Format: <io>[,<timeout>[,<isapnp>]] | 2201 | Format: <io>[,<timeout>[,<isapnp>]] |
2187 | 2202 | ||
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt index 059934363caf..446f43b309df 100644 --- a/Documentation/pcmcia/driver-changes.txt +++ b/Documentation/pcmcia/driver-changes.txt | |||
@@ -1,5 +1,17 @@ | |||
1 | This file details changes in 2.6 which affect PCMCIA card driver authors: | 1 | This file details changes in 2.6 which affect PCMCIA card driver authors: |
2 | 2 | ||
3 | * no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33) | ||
4 | Instead of the cs_error() callback or the CS_CHECK() macro, please use | ||
5 | Linux-style checking of return values, and -- if necessary -- debug | ||
6 | messages using "dev_dbg()" or "pr_debug()". | ||
7 | |||
8 | * New CIS tuple access (as of 2.6.33) | ||
9 | Instead of pcmcia_get_{first,next}_tuple(), pcmcia_get_tuple_data() and | ||
10 | pcmcia_parse_tuple(), a driver shall use "pcmcia_get_tuple()" if it is | ||
11 | only interested in one (raw) tuple, or "pcmcia_loop_tuple()" if it is | ||
12 | interested in all tuples of one type. To decode the MAC from CISTPL_FUNCE, | ||
13 | a new helper "pcmcia_get_mac_from_cis()" was added. | ||
14 | |||
3 | * New configuration loop helper (as of 2.6.28) | 15 | * New configuration loop helper (as of 2.6.28) |
4 | By calling pcmcia_loop_config(), a driver can iterate over all available | 16 | By calling pcmcia_loop_config(), a driver can iterate over all available |
5 | configuration options. During a driver's probe() phase, one doesn't need | 17 | configuration options. During a driver's probe() phase, one doesn't need |
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 52bc31433723..9dbf4470c7e1 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt | |||
@@ -279,9 +279,9 @@ The slow-work thread pool has a number of configurables: | |||
279 | VIEWING EXECUTING AND QUEUED ITEMS | 279 | VIEWING EXECUTING AND QUEUED ITEMS |
280 | ================================== | 280 | ================================== |
281 | 281 | ||
282 | If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available: | 282 | If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available: |
283 | 283 | ||
284 | /proc/slow_work_rq | 284 | /sys/kernel/debug/slow_work/runqueue |
285 | 285 | ||
286 | through which the list of work items being executed and the queues of items to | 286 | through which the list of work items being executed and the queues of items to |
287 | be executed may be viewed. The owner of a work item is given the chance to | 287 | be executed may be viewed. The owner of a work item is given the chance to |
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt index 7003e10f10f5..641a1ef2a7ff 100644 --- a/Documentation/trace/ftrace-design.txt +++ b/Documentation/trace/ftrace-design.txt | |||
@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option. | |||
213 | <details to be filled> | 213 | <details to be filled> |
214 | 214 | ||
215 | 215 | ||
216 | HAVE_FTRACE_SYSCALLS | 216 | HAVE_SYSCALL_TRACEPOINTS |
217 | --------------------- | 217 | --------------------- |
218 | 218 | ||
219 | <details to be filled> | 219 | You need very few things to get the syscalls tracing in an arch. |
220 | |||
221 | - Have a NR_syscalls variable in <asm/unistd.h> that provides the number | ||
222 | of syscalls supported by the arch. | ||
223 | - Implement arch_syscall_addr() that resolves a syscall address from a | ||
224 | syscall number. | ||
225 | - Support the TIF_SYSCALL_TRACEPOINT thread flags | ||
226 | - Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace | ||
227 | in the ptrace syscalls tracing path. | ||
228 | - Tag this arch as HAVE_SYSCALL_TRACEPOINTS. | ||
220 | 229 | ||
221 | 230 | ||
222 | HAVE_FTRACE_MCOUNT_RECORD | 231 | HAVE_FTRACE_MCOUNT_RECORD |
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt new file mode 100644 index 000000000000..47aabeebbdf6 --- /dev/null +++ b/Documentation/trace/kprobetrace.txt | |||
@@ -0,0 +1,149 @@ | |||
1 | Kprobe-based Event Tracing | ||
2 | ========================== | ||
3 | |||
4 | Documentation is written by Masami Hiramatsu | ||
5 | |||
6 | |||
7 | Overview | ||
8 | -------- | ||
9 | These events are similar to tracepoint based events. Instead of Tracepoint, | ||
10 | this is based on kprobes (kprobe and kretprobe). So it can probe wherever | ||
11 | kprobes can probe (this means, all functions body except for __kprobes | ||
12 | functions). Unlike the Tracepoint based event, this can be added and removed | ||
13 | dynamically, on the fly. | ||
14 | |||
15 | To enable this feature, build your kernel with CONFIG_KPROBE_TRACING=y. | ||
16 | |||
17 | Similar to the events tracer, this doesn't need to be activated via | ||
18 | current_tracer. Instead of that, add probe points via | ||
19 | /sys/kernel/debug/tracing/kprobe_events, and enable it via | ||
20 | /sys/kernel/debug/tracing/events/kprobes/<EVENT>/enabled. | ||
21 | |||
22 | |||
23 | Synopsis of kprobe_events | ||
24 | ------------------------- | ||
25 | p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe | ||
26 | r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe | ||
27 | |||
28 | GRP : Group name. If omitted, use "kprobes" for it. | ||
29 | EVENT : Event name. If omitted, the event name is generated | ||
30 | based on SYMBOL+offs or MEMADDR. | ||
31 | SYMBOL[+offs] : Symbol+offset where the probe is inserted. | ||
32 | MEMADDR : Address where the probe is inserted. | ||
33 | |||
34 | FETCHARGS : Arguments. Each probe can have up to 128 args. | ||
35 | %REG : Fetch register REG | ||
36 | @ADDR : Fetch memory at ADDR (ADDR should be in kernel) | ||
37 | @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) | ||
38 | $stackN : Fetch Nth entry of stack (N >= 0) | ||
39 | $stack : Fetch stack address. | ||
40 | $argN : Fetch function argument. (N >= 0)(*) | ||
41 | $retval : Fetch return value.(**) | ||
42 | +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) | ||
43 | NAME=FETCHARG: Set NAME as the argument name of FETCHARG. | ||
44 | |||
45 | (*) aN may not correct on asmlinkaged functions and at the middle of | ||
46 | function body. | ||
47 | (**) only for return probe. | ||
48 | (***) this is useful for fetching a field of data structures. | ||
49 | |||
50 | |||
51 | Per-Probe Event Filtering | ||
52 | ------------------------- | ||
53 | Per-probe event filtering feature allows you to set different filter on each | ||
54 | probe and gives you what arguments will be shown in trace buffer. If an event | ||
55 | name is specified right after 'p:' or 'r:' in kprobe_events, it adds an event | ||
56 | under tracing/events/kprobes/<EVENT>, at the directory you can see 'id', | ||
57 | 'enabled', 'format' and 'filter'. | ||
58 | |||
59 | enabled: | ||
60 | You can enable/disable the probe by writing 1 or 0 on it. | ||
61 | |||
62 | format: | ||
63 | This shows the format of this probe event. | ||
64 | |||
65 | filter: | ||
66 | You can write filtering rules of this event. | ||
67 | |||
68 | id: | ||
69 | This shows the id of this probe event. | ||
70 | |||
71 | |||
72 | Event Profiling | ||
73 | --------------- | ||
74 | You can check the total number of probe hits and probe miss-hits via | ||
75 | /sys/kernel/debug/tracing/kprobe_profile. | ||
76 | The first column is event name, the second is the number of probe hits, | ||
77 | the third is the number of probe miss-hits. | ||
78 | |||
79 | |||
80 | Usage examples | ||
81 | -------------- | ||
82 | To add a probe as a new event, write a new definition to kprobe_events | ||
83 | as below. | ||
84 | |||
85 | echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events | ||
86 | |||
87 | This sets a kprobe on the top of do_sys_open() function with recording | ||
88 | 1st to 4th arguments as "myprobe" event. As this example shows, users can | ||
89 | choose more familiar names for each arguments. | ||
90 | |||
91 | echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events | ||
92 | |||
93 | This sets a kretprobe on the return point of do_sys_open() function with | ||
94 | recording return value as "myretprobe" event. | ||
95 | You can see the format of these events via | ||
96 | /sys/kernel/debug/tracing/events/kprobes/<EVENT>/format. | ||
97 | |||
98 | cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format | ||
99 | name: myprobe | ||
100 | ID: 75 | ||
101 | format: | ||
102 | field:unsigned short common_type; offset:0; size:2; | ||
103 | field:unsigned char common_flags; offset:2; size:1; | ||
104 | field:unsigned char common_preempt_count; offset:3; size:1; | ||
105 | field:int common_pid; offset:4; size:4; | ||
106 | field:int common_tgid; offset:8; size:4; | ||
107 | |||
108 | field: unsigned long ip; offset:16;tsize:8; | ||
109 | field: int nargs; offset:24;tsize:4; | ||
110 | field: unsigned long dfd; offset:32;tsize:8; | ||
111 | field: unsigned long filename; offset:40;tsize:8; | ||
112 | field: unsigned long flags; offset:48;tsize:8; | ||
113 | field: unsigned long mode; offset:56;tsize:8; | ||
114 | |||
115 | print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode | ||
116 | |||
117 | |||
118 | You can see that the event has 4 arguments as in the expressions you specified. | ||
119 | |||
120 | echo > /sys/kernel/debug/tracing/kprobe_events | ||
121 | |||
122 | This clears all probe points. | ||
123 | |||
124 | Right after definition, each event is disabled by default. For tracing these | ||
125 | events, you need to enable it. | ||
126 | |||
127 | echo 1 > /sys/kernel/debug/tracing/events/kprobes/myprobe/enable | ||
128 | echo 1 > /sys/kernel/debug/tracing/events/kprobes/myretprobe/enable | ||
129 | |||
130 | And you can see the traced information via /sys/kernel/debug/tracing/trace. | ||
131 | |||
132 | cat /sys/kernel/debug/tracing/trace | ||
133 | # tracer: nop | ||
134 | # | ||
135 | # TASK-PID CPU# TIMESTAMP FUNCTION | ||
136 | # | | | | | | ||
137 | <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 | ||
138 | <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $retval=fffffffffffffffe | ||
139 | <...>-1447 [001] 1038282.286885: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=40413c flags=8000 mode=1b6 | ||
140 | <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $retval=3 | ||
141 | <...>-1447 [001] 1038282.286969: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=4041c6 flags=98800 mode=10 | ||
142 | <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $retval=3 | ||
143 | |||
144 | |||
145 | Each line shows when the kernel hits an event, and <- SYMBOL means kernel | ||
146 | returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel | ||
147 | returns from do_sys_open to sys_open+0x1b). | ||
148 | |||
149 | |||
diff --git a/MAINTAINERS b/MAINTAINERS index ad59f178586f..75f771c8579f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3045,11 +3045,8 @@ S: Maintained | |||
3045 | F: fs/autofs4/ | 3045 | F: fs/autofs4/ |
3046 | 3046 | ||
3047 | KERNEL BUILD | 3047 | KERNEL BUILD |
3048 | M: Sam Ravnborg <sam@ravnborg.org> | ||
3049 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-next.git | ||
3050 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes.git | ||
3051 | L: linux-kbuild@vger.kernel.org | 3048 | L: linux-kbuild@vger.kernel.org |
3052 | S: Maintained | 3049 | S: Orphan |
3053 | F: Documentation/kbuild/ | 3050 | F: Documentation/kbuild/ |
3054 | F: Makefile | 3051 | F: Makefile |
3055 | F: scripts/Makefile.* | 3052 | F: scripts/Makefile.* |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 32 | 3 | SUBLEVEL = 32 |
4 | EXTRAVERSION = -rc8 | 4 | EXTRAVERSION = |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -379,6 +379,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc | |||
379 | PHONY += scripts_basic | 379 | PHONY += scripts_basic |
380 | scripts_basic: | 380 | scripts_basic: |
381 | $(Q)$(MAKE) $(build)=scripts/basic | 381 | $(Q)$(MAKE) $(build)=scripts/basic |
382 | $(Q)rm -f .tmp_quiet_recordmcount | ||
382 | 383 | ||
383 | # To avoid any implicit rule to kick in, define an empty command. | 384 | # To avoid any implicit rule to kick in, define an empty command. |
384 | scripts/basic/%: scripts_basic ; | 385 | scripts/basic/%: scripts_basic ; |
diff --git a/arch/Kconfig b/arch/Kconfig index 7f418bbc261a..eef3bbb97075 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -126,4 +126,11 @@ config HAVE_DMA_API_DEBUG | |||
126 | config HAVE_DEFAULT_NO_SPIN_MUTEXES | 126 | config HAVE_DEFAULT_NO_SPIN_MUTEXES |
127 | bool | 127 | bool |
128 | 128 | ||
129 | config HAVE_HW_BREAKPOINT | ||
130 | bool | ||
131 | depends on HAVE_PERF_EVENTS | ||
132 | select ANON_INODES | ||
133 | select PERF_EVENTS | ||
134 | |||
135 | |||
129 | source "kernel/gcov/Kconfig" | 136 | source "kernel/gcov/Kconfig" |
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 815680b585ed..b3e888638bb7 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -61,21 +61,24 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
61 | /* | 61 | /* |
62 | * Thread information flags: | 62 | * Thread information flags: |
63 | * - these are process state flags and used from assembly | 63 | * - these are process state flags and used from assembly |
64 | * - pending work-to-be-done flags come first to fit in and immediate operand. | 64 | * - pending work-to-be-done flags come first and must be assigned to be |
65 | * within bits 0 to 7 to fit in and immediate operand. | ||
66 | * - ALPHA_UAC_SHIFT below must be kept consistent with the unaligned | ||
67 | * control flags. | ||
65 | * | 68 | * |
66 | * TIF_SYSCALL_TRACE is known to be 0 via blbs. | 69 | * TIF_SYSCALL_TRACE is known to be 0 via blbs. |
67 | */ | 70 | */ |
68 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 71 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
69 | #define TIF_SIGPENDING 1 /* signal pending */ | 72 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ |
70 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 73 | #define TIF_SIGPENDING 2 /* signal pending */ |
71 | #define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */ | 74 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
72 | #define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */ | 75 | #define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ |
73 | #define TIF_UAC_NOPRINT 5 /* see sysinfo.h */ | 76 | #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ |
74 | #define TIF_UAC_NOFIX 6 | 77 | #define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ |
75 | #define TIF_UAC_SIGBUS 7 | 78 | #define TIF_UAC_NOFIX 11 |
76 | #define TIF_MEMDIE 8 | 79 | #define TIF_UAC_SIGBUS 12 |
77 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 80 | #define TIF_MEMDIE 13 |
78 | #define TIF_NOTIFY_RESUME 10 /* callback before returning to user */ | 81 | #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ |
79 | #define TIF_FREEZE 16 /* is freezing for suspend */ | 82 | #define TIF_FREEZE 16 /* is freezing for suspend */ |
80 | 83 | ||
81 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 84 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -94,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
94 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | 97 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
95 | | _TIF_SYSCALL_TRACE) | 98 | | _TIF_SYSCALL_TRACE) |
96 | 99 | ||
97 | #define ALPHA_UAC_SHIFT 6 | 100 | #define ALPHA_UAC_SHIFT 10 |
98 | #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ | 101 | #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ |
99 | 1 << TIF_UAC_SIGBUS) | 102 | 1 << TIF_UAC_SIGBUS) |
100 | 103 | ||
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 8e059e58b0ac..53dd2f1a53aa 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c | |||
@@ -1103,6 +1103,8 @@ marvel_agp_info(void) | |||
1103 | * Allocate the info structure. | 1103 | * Allocate the info structure. |
1104 | */ | 1104 | */ |
1105 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); | 1105 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); |
1106 | if (!agp) | ||
1107 | return NULL; | ||
1106 | 1108 | ||
1107 | /* | 1109 | /* |
1108 | * Fill it in. | 1110 | * Fill it in. |
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c index 76686497b1e2..219bf271c0ba 100644 --- a/arch/alpha/kernel/core_titan.c +++ b/arch/alpha/kernel/core_titan.c | |||
@@ -757,6 +757,8 @@ titan_agp_info(void) | |||
757 | * Allocate the info structure. | 757 | * Allocate the info structure. |
758 | */ | 758 | */ |
759 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); | 759 | agp = kmalloc(sizeof(*agp), GFP_KERNEL); |
760 | if (!agp) | ||
761 | return NULL; | ||
760 | 762 | ||
761 | /* | 763 | /* |
762 | * Fill it in. | 764 | * Fill it in. |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index cc7834661427..c0de072b8305 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -92,7 +92,7 @@ show_interrupts(struct seq_file *p, void *v) | |||
92 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
93 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); | 93 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); |
94 | #endif | 94 | #endif |
95 | seq_printf(p, " %14s", irq_desc[irq].chip->typename); | 95 | seq_printf(p, " %14s", irq_desc[irq].chip->name); |
96 | seq_printf(p, " %c%s", | 96 | seq_printf(p, " %c%s", |
97 | (action->flags & IRQF_DISABLED)?'+':' ', | 97 | (action->flags & IRQF_DISABLED)?'+':' ', |
98 | action->name); | 98 | action->name); |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 38c805dfc544..cfde865b78e0 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { | |||
228 | }; | 228 | }; |
229 | 229 | ||
230 | static struct irq_chip rtc_irq_type = { | 230 | static struct irq_chip rtc_irq_type = { |
231 | .typename = "RTC", | 231 | .name = "RTC", |
232 | .startup = rtc_startup, | 232 | .startup = rtc_startup, |
233 | .shutdown = rtc_enable_disable, | 233 | .shutdown = rtc_enable_disable, |
234 | .enable = rtc_enable_disable, | 234 | .enable = rtc_enable_disable, |
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 50bfec9b588f..83a9ac280890 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c | |||
@@ -84,7 +84,7 @@ i8259a_end_irq(unsigned int irq) | |||
84 | } | 84 | } |
85 | 85 | ||
86 | struct irq_chip i8259a_irq_type = { | 86 | struct irq_chip i8259a_irq_type = { |
87 | .typename = "XT-PIC", | 87 | .name = "XT-PIC", |
88 | .startup = i8259a_startup_irq, | 88 | .startup = i8259a_startup_irq, |
89 | .shutdown = i8259a_disable_irq, | 89 | .shutdown = i8259a_disable_irq, |
90 | .enable = i8259a_enable_irq, | 90 | .enable = i8259a_enable_irq, |
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index 69199a76ec4a..989ce46a0cf3 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c | |||
@@ -71,7 +71,7 @@ pyxis_mask_and_ack_irq(unsigned int irq) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | static struct irq_chip pyxis_irq_type = { | 73 | static struct irq_chip pyxis_irq_type = { |
74 | .typename = "PYXIS", | 74 | .name = "PYXIS", |
75 | .startup = pyxis_startup_irq, | 75 | .startup = pyxis_startup_irq, |
76 | .shutdown = pyxis_disable_irq, | 76 | .shutdown = pyxis_disable_irq, |
77 | .enable = pyxis_enable_irq, | 77 | .enable = pyxis_enable_irq, |
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index 85229369a1f8..d63e93e1e8bf 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c | |||
@@ -49,7 +49,7 @@ srm_end_irq(unsigned int irq) | |||
49 | 49 | ||
50 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ | 50 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ |
51 | static struct irq_chip srm_irq_type = { | 51 | static struct irq_chip srm_irq_type = { |
52 | .typename = "SRM", | 52 | .name = "SRM", |
53 | .startup = srm_startup_irq, | 53 | .startup = srm_startup_irq, |
54 | .shutdown = srm_disable_irq, | 54 | .shutdown = srm_disable_irq, |
55 | .enable = srm_enable_irq, | 55 | .enable = srm_enable_irq, |
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 382035ef7394..20a30b8b9655 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -90,7 +90,7 @@ alcor_end_irq(unsigned int irq) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | static struct irq_chip alcor_irq_type = { | 92 | static struct irq_chip alcor_irq_type = { |
93 | .typename = "ALCOR", | 93 | .name = "ALCOR", |
94 | .startup = alcor_startup_irq, | 94 | .startup = alcor_startup_irq, |
95 | .shutdown = alcor_disable_irq, | 95 | .shutdown = alcor_disable_irq, |
96 | .enable = alcor_enable_irq, | 96 | .enable = alcor_enable_irq, |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index ed349436732b..affd0f3f25df 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -72,7 +72,7 @@ cabriolet_end_irq(unsigned int irq) | |||
72 | } | 72 | } |
73 | 73 | ||
74 | static struct irq_chip cabriolet_irq_type = { | 74 | static struct irq_chip cabriolet_irq_type = { |
75 | .typename = "CABRIOLET", | 75 | .name = "CABRIOLET", |
76 | .startup = cabriolet_startup_irq, | 76 | .startup = cabriolet_startup_irq, |
77 | .shutdown = cabriolet_disable_irq, | 77 | .shutdown = cabriolet_disable_irq, |
78 | .enable = cabriolet_enable_irq, | 78 | .enable = cabriolet_enable_irq, |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 46e70ece5176..d64e1e497e76 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -199,7 +199,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | static struct irq_chip dp264_irq_type = { | 201 | static struct irq_chip dp264_irq_type = { |
202 | .typename = "DP264", | 202 | .name = "DP264", |
203 | .startup = dp264_startup_irq, | 203 | .startup = dp264_startup_irq, |
204 | .shutdown = dp264_disable_irq, | 204 | .shutdown = dp264_disable_irq, |
205 | .enable = dp264_enable_irq, | 205 | .enable = dp264_enable_irq, |
@@ -210,7 +210,7 @@ static struct irq_chip dp264_irq_type = { | |||
210 | }; | 210 | }; |
211 | 211 | ||
212 | static struct irq_chip clipper_irq_type = { | 212 | static struct irq_chip clipper_irq_type = { |
213 | .typename = "CLIPPER", | 213 | .name = "CLIPPER", |
214 | .startup = clipper_startup_irq, | 214 | .startup = clipper_startup_irq, |
215 | .shutdown = clipper_disable_irq, | 215 | .shutdown = clipper_disable_irq, |
216 | .enable = clipper_enable_irq, | 216 | .enable = clipper_enable_irq, |
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 660c23ef661f..df2090ce5e7f 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -70,7 +70,7 @@ eb64p_end_irq(unsigned int irq) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | static struct irq_chip eb64p_irq_type = { | 72 | static struct irq_chip eb64p_irq_type = { |
73 | .typename = "EB64P", | 73 | .name = "EB64P", |
74 | .startup = eb64p_startup_irq, | 74 | .startup = eb64p_startup_irq, |
75 | .shutdown = eb64p_disable_irq, | 75 | .shutdown = eb64p_disable_irq, |
76 | .enable = eb64p_enable_irq, | 76 | .enable = eb64p_enable_irq, |
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index b99ea488d844..3ca1dbcf4044 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -81,7 +81,7 @@ eiger_end_irq(unsigned int irq) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static struct irq_chip eiger_irq_type = { | 83 | static struct irq_chip eiger_irq_type = { |
84 | .typename = "EIGER", | 84 | .name = "EIGER", |
85 | .startup = eiger_startup_irq, | 85 | .startup = eiger_startup_irq, |
86 | .shutdown = eiger_disable_irq, | 86 | .shutdown = eiger_disable_irq, |
87 | .enable = eiger_enable_irq, | 87 | .enable = eiger_enable_irq, |
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index ef0b83a070ac..7a7ae36fff91 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c | |||
@@ -119,7 +119,7 @@ jensen_local_end(unsigned int irq) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static struct irq_chip jensen_local_irq_type = { | 121 | static struct irq_chip jensen_local_irq_type = { |
122 | .typename = "LOCAL", | 122 | .name = "LOCAL", |
123 | .startup = jensen_local_startup, | 123 | .startup = jensen_local_startup, |
124 | .shutdown = jensen_local_shutdown, | 124 | .shutdown = jensen_local_shutdown, |
125 | .enable = jensen_local_enable, | 125 | .enable = jensen_local_enable, |
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index bbfc4f20ca72..0bb3b5c4f693 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c | |||
@@ -170,7 +170,7 @@ marvel_irq_noop_return(unsigned int irq) | |||
170 | } | 170 | } |
171 | 171 | ||
172 | static struct irq_chip marvel_legacy_irq_type = { | 172 | static struct irq_chip marvel_legacy_irq_type = { |
173 | .typename = "LEGACY", | 173 | .name = "LEGACY", |
174 | .startup = marvel_irq_noop_return, | 174 | .startup = marvel_irq_noop_return, |
175 | .shutdown = marvel_irq_noop, | 175 | .shutdown = marvel_irq_noop, |
176 | .enable = marvel_irq_noop, | 176 | .enable = marvel_irq_noop, |
@@ -180,7 +180,7 @@ static struct irq_chip marvel_legacy_irq_type = { | |||
180 | }; | 180 | }; |
181 | 181 | ||
182 | static struct irq_chip io7_lsi_irq_type = { | 182 | static struct irq_chip io7_lsi_irq_type = { |
183 | .typename = "LSI", | 183 | .name = "LSI", |
184 | .startup = io7_startup_irq, | 184 | .startup = io7_startup_irq, |
185 | .shutdown = io7_disable_irq, | 185 | .shutdown = io7_disable_irq, |
186 | .enable = io7_enable_irq, | 186 | .enable = io7_enable_irq, |
@@ -190,7 +190,7 @@ static struct irq_chip io7_lsi_irq_type = { | |||
190 | }; | 190 | }; |
191 | 191 | ||
192 | static struct irq_chip io7_msi_irq_type = { | 192 | static struct irq_chip io7_msi_irq_type = { |
193 | .typename = "MSI", | 193 | .name = "MSI", |
194 | .startup = io7_startup_irq, | 194 | .startup = io7_startup_irq, |
195 | .shutdown = io7_disable_irq, | 195 | .shutdown = io7_disable_irq, |
196 | .enable = io7_enable_irq, | 196 | .enable = io7_enable_irq, |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index 4e366641a08e..ee8865169811 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -69,7 +69,7 @@ mikasa_end_irq(unsigned int irq) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | static struct irq_chip mikasa_irq_type = { | 71 | static struct irq_chip mikasa_irq_type = { |
72 | .typename = "MIKASA", | 72 | .name = "MIKASA", |
73 | .startup = mikasa_startup_irq, | 73 | .startup = mikasa_startup_irq, |
74 | .shutdown = mikasa_disable_irq, | 74 | .shutdown = mikasa_disable_irq, |
75 | .enable = mikasa_enable_irq, | 75 | .enable = mikasa_enable_irq, |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 35753a173bac..86503fe73a88 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -74,7 +74,7 @@ noritake_end_irq(unsigned int irq) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | static struct irq_chip noritake_irq_type = { | 76 | static struct irq_chip noritake_irq_type = { |
77 | .typename = "NORITAKE", | 77 | .name = "NORITAKE", |
78 | .startup = noritake_startup_irq, | 78 | .startup = noritake_startup_irq, |
79 | .shutdown = noritake_disable_irq, | 79 | .shutdown = noritake_disable_irq, |
80 | .enable = noritake_enable_irq, | 80 | .enable = noritake_enable_irq, |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index f3aec7e085c8..26c322bf89ee 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -136,7 +136,7 @@ rawhide_end_irq(unsigned int irq) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static struct irq_chip rawhide_irq_type = { | 138 | static struct irq_chip rawhide_irq_type = { |
139 | .typename = "RAWHIDE", | 139 | .name = "RAWHIDE", |
140 | .startup = rawhide_startup_irq, | 140 | .startup = rawhide_startup_irq, |
141 | .shutdown = rawhide_disable_irq, | 141 | .shutdown = rawhide_disable_irq, |
142 | .enable = rawhide_enable_irq, | 142 | .enable = rawhide_enable_irq, |
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index d9f9cfeb9931..8de1046fe91e 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c | |||
@@ -66,7 +66,7 @@ ruffian_init_irq(void) | |||
66 | common_init_isa_dma(); | 66 | common_init_isa_dma(); |
67 | } | 67 | } |
68 | 68 | ||
69 | #define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) | 69 | #define RUFFIAN_LATCH DIV_ROUND_CLOSEST(PIT_TICK_RATE, HZ) |
70 | 70 | ||
71 | static void __init | 71 | static void __init |
72 | ruffian_init_rtc(void) | 72 | ruffian_init_rtc(void) |
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index fc9246373452..be161129eab9 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -73,7 +73,7 @@ rx164_end_irq(unsigned int irq) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | static struct irq_chip rx164_irq_type = { | 75 | static struct irq_chip rx164_irq_type = { |
76 | .typename = "RX164", | 76 | .name = "RX164", |
77 | .startup = rx164_startup_irq, | 77 | .startup = rx164_startup_irq, |
78 | .shutdown = rx164_disable_irq, | 78 | .shutdown = rx164_disable_irq, |
79 | .enable = rx164_enable_irq, | 79 | .enable = rx164_enable_irq, |
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 426eb6906d01..b2abe27a23cf 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -502,7 +502,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) | |||
502 | } | 502 | } |
503 | 503 | ||
504 | static struct irq_chip sable_lynx_irq_type = { | 504 | static struct irq_chip sable_lynx_irq_type = { |
505 | .typename = "SABLE/LYNX", | 505 | .name = "SABLE/LYNX", |
506 | .startup = sable_lynx_startup_irq, | 506 | .startup = sable_lynx_startup_irq, |
507 | .shutdown = sable_lynx_disable_irq, | 507 | .shutdown = sable_lynx_disable_irq, |
508 | .enable = sable_lynx_enable_irq, | 508 | .enable = sable_lynx_enable_irq, |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 830318c21661..230464885b5c 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -75,7 +75,7 @@ takara_end_irq(unsigned int irq) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | static struct irq_chip takara_irq_type = { | 77 | static struct irq_chip takara_irq_type = { |
78 | .typename = "TAKARA", | 78 | .name = "TAKARA", |
79 | .startup = takara_startup_irq, | 79 | .startup = takara_startup_irq, |
80 | .shutdown = takara_disable_irq, | 80 | .shutdown = takara_disable_irq, |
81 | .enable = takara_enable_irq, | 81 | .enable = takara_enable_irq, |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 88978fc60f83..288053342c83 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -195,7 +195,7 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) | |||
195 | } | 195 | } |
196 | 196 | ||
197 | static struct irq_chip titan_irq_type = { | 197 | static struct irq_chip titan_irq_type = { |
198 | .typename = "TITAN", | 198 | .name = "TITAN", |
199 | .startup = titan_startup_irq, | 199 | .startup = titan_startup_irq, |
200 | .shutdown = titan_disable_irq, | 200 | .shutdown = titan_disable_irq, |
201 | .enable = titan_enable_irq, | 201 | .enable = titan_enable_irq, |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index e91b4c3838a8..62fd972e18ef 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -158,7 +158,7 @@ wildfire_end_irq(unsigned int irq) | |||
158 | } | 158 | } |
159 | 159 | ||
160 | static struct irq_chip wildfire_irq_type = { | 160 | static struct irq_chip wildfire_irq_type = { |
161 | .typename = "WILDFIRE", | 161 | .name = "WILDFIRE", |
162 | .startup = wildfire_startup_irq, | 162 | .startup = wildfire_startup_irq, |
163 | .shutdown = wildfire_disable_irq, | 163 | .shutdown = wildfire_disable_irq, |
164 | .enable = wildfire_enable_irq, | 164 | .enable = wildfire_enable_irq, |
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig index f6aed7747d4d..efa78e144e5c 100644 --- a/arch/arm/configs/h3600_defconfig +++ b/arch/arm/configs/h3600_defconfig | |||
@@ -1,86 +1,189 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.12-rc4 | 3 | # Linux kernel version: 2.6.32-rc5 |
4 | # Thu Jun 9 01:59:03 2005 | 4 | # Sat Oct 24 00:09:30 2009 |
5 | # | 5 | # |
6 | CONFIG_ARM=y | 6 | CONFIG_ARM=y |
7 | CONFIG_MMU=y | 7 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y |
8 | CONFIG_UID16=y | 8 | CONFIG_GENERIC_GPIO=y |
9 | CONFIG_GENERIC_TIME=y | ||
10 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
11 | CONFIG_GENERIC_HARDIRQS=y | ||
12 | CONFIG_STACKTRACE_SUPPORT=y | ||
13 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | ||
14 | CONFIG_LOCKDEP_SUPPORT=y | ||
15 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
16 | CONFIG_HARDIRQS_SW_RESEND=y | ||
17 | CONFIG_GENERIC_IRQ_PROBE=y | ||
9 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 18 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
19 | CONFIG_ARCH_HAS_CPUFREQ=y | ||
20 | CONFIG_GENERIC_HWEIGHT=y | ||
10 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 21 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
11 | CONFIG_GENERIC_IOMAP=y | 22 | CONFIG_ARCH_MTD_XIP=y |
23 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
24 | CONFIG_VECTORS_BASE=0xffff0000 | ||
25 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
26 | CONFIG_CONSTRUCTORS=y | ||
12 | 27 | ||
13 | # | 28 | # |
14 | # Code maturity level options | 29 | # General setup |
15 | # | 30 | # |
16 | CONFIG_EXPERIMENTAL=y | 31 | CONFIG_EXPERIMENTAL=y |
17 | CONFIG_CLEAN_COMPILE=y | ||
18 | CONFIG_BROKEN_ON_SMP=y | 32 | CONFIG_BROKEN_ON_SMP=y |
19 | CONFIG_INIT_ENV_ARG_LIMIT=32 | 33 | CONFIG_INIT_ENV_ARG_LIMIT=32 |
20 | |||
21 | # | ||
22 | # General setup | ||
23 | # | ||
24 | CONFIG_LOCALVERSION="" | 34 | CONFIG_LOCALVERSION="" |
35 | CONFIG_LOCALVERSION_AUTO=y | ||
25 | CONFIG_SWAP=y | 36 | CONFIG_SWAP=y |
26 | CONFIG_SYSVIPC=y | 37 | CONFIG_SYSVIPC=y |
38 | CONFIG_SYSVIPC_SYSCTL=y | ||
27 | # CONFIG_POSIX_MQUEUE is not set | 39 | # CONFIG_POSIX_MQUEUE is not set |
28 | # CONFIG_BSD_PROCESS_ACCT is not set | 40 | # CONFIG_BSD_PROCESS_ACCT is not set |
29 | CONFIG_SYSCTL=y | 41 | # CONFIG_TASKSTATS is not set |
30 | # CONFIG_AUDIT is not set | 42 | # CONFIG_AUDIT is not set |
31 | CONFIG_HOTPLUG=y | 43 | |
32 | CONFIG_KOBJECT_UEVENT=y | 44 | # |
45 | # RCU Subsystem | ||
46 | # | ||
47 | CONFIG_TREE_RCU=y | ||
48 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
49 | # CONFIG_RCU_TRACE is not set | ||
50 | CONFIG_RCU_FANOUT=32 | ||
51 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
52 | # CONFIG_TREE_RCU_TRACE is not set | ||
33 | # CONFIG_IKCONFIG is not set | 53 | # CONFIG_IKCONFIG is not set |
54 | CONFIG_LOG_BUF_SHIFT=14 | ||
55 | # CONFIG_GROUP_SCHED is not set | ||
56 | # CONFIG_CGROUPS is not set | ||
57 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
58 | # CONFIG_RELAY is not set | ||
59 | CONFIG_NAMESPACES=y | ||
60 | # CONFIG_UTS_NS is not set | ||
61 | # CONFIG_IPC_NS is not set | ||
62 | # CONFIG_USER_NS is not set | ||
63 | # CONFIG_PID_NS is not set | ||
64 | # CONFIG_NET_NS is not set | ||
65 | CONFIG_BLK_DEV_INITRD=y | ||
66 | CONFIG_INITRAMFS_SOURCE="" | ||
67 | CONFIG_RD_GZIP=y | ||
68 | CONFIG_RD_BZIP2=y | ||
69 | CONFIG_RD_LZMA=y | ||
70 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
71 | CONFIG_SYSCTL=y | ||
72 | CONFIG_ANON_INODES=y | ||
34 | # CONFIG_EMBEDDED is not set | 73 | # CONFIG_EMBEDDED is not set |
74 | CONFIG_UID16=y | ||
75 | CONFIG_SYSCTL_SYSCALL=y | ||
35 | CONFIG_KALLSYMS=y | 76 | CONFIG_KALLSYMS=y |
36 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 77 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
78 | CONFIG_HOTPLUG=y | ||
37 | CONFIG_PRINTK=y | 79 | CONFIG_PRINTK=y |
38 | CONFIG_BUG=y | 80 | CONFIG_BUG=y |
81 | CONFIG_ELF_CORE=y | ||
39 | CONFIG_BASE_FULL=y | 82 | CONFIG_BASE_FULL=y |
40 | CONFIG_FUTEX=y | 83 | CONFIG_FUTEX=y |
41 | CONFIG_EPOLL=y | 84 | CONFIG_EPOLL=y |
42 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 85 | CONFIG_SIGNALFD=y |
86 | CONFIG_TIMERFD=y | ||
87 | CONFIG_EVENTFD=y | ||
43 | CONFIG_SHMEM=y | 88 | CONFIG_SHMEM=y |
44 | CONFIG_CC_ALIGN_FUNCTIONS=0 | 89 | CONFIG_AIO=y |
45 | CONFIG_CC_ALIGN_LABELS=0 | 90 | |
46 | CONFIG_CC_ALIGN_LOOPS=0 | 91 | # |
47 | CONFIG_CC_ALIGN_JUMPS=0 | 92 | # Kernel Performance Events And Counters |
48 | # CONFIG_TINY_SHMEM is not set | 93 | # |
49 | CONFIG_BASE_SMALL=0 | 94 | CONFIG_VM_EVENT_COUNTERS=y |
95 | CONFIG_SLUB_DEBUG=y | ||
96 | CONFIG_COMPAT_BRK=y | ||
97 | # CONFIG_SLAB is not set | ||
98 | CONFIG_SLUB=y | ||
99 | # CONFIG_SLOB is not set | ||
100 | # CONFIG_PROFILING is not set | ||
101 | CONFIG_HAVE_OPROFILE=y | ||
102 | # CONFIG_KPROBES is not set | ||
103 | CONFIG_HAVE_KPROBES=y | ||
104 | CONFIG_HAVE_KRETPROBES=y | ||
105 | CONFIG_HAVE_CLK=y | ||
50 | 106 | ||
51 | # | 107 | # |
52 | # Loadable module support | 108 | # GCOV-based kernel profiling |
53 | # | 109 | # |
110 | # CONFIG_SLOW_WORK is not set | ||
111 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | ||
112 | CONFIG_SLABINFO=y | ||
113 | CONFIG_RT_MUTEXES=y | ||
114 | CONFIG_BASE_SMALL=0 | ||
54 | CONFIG_MODULES=y | 115 | CONFIG_MODULES=y |
116 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
55 | # CONFIG_MODULE_UNLOAD is not set | 117 | # CONFIG_MODULE_UNLOAD is not set |
56 | CONFIG_OBSOLETE_MODPARM=y | ||
57 | # CONFIG_MODVERSIONS is not set | 118 | # CONFIG_MODVERSIONS is not set |
58 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 119 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
59 | # CONFIG_KMOD is not set | 120 | CONFIG_BLOCK=y |
121 | # CONFIG_LBDAF is not set | ||
122 | # CONFIG_BLK_DEV_BSG is not set | ||
123 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
124 | |||
125 | # | ||
126 | # IO Schedulers | ||
127 | # | ||
128 | CONFIG_IOSCHED_NOOP=y | ||
129 | # CONFIG_IOSCHED_AS is not set | ||
130 | # CONFIG_IOSCHED_DEADLINE is not set | ||
131 | # CONFIG_IOSCHED_CFQ is not set | ||
132 | # CONFIG_DEFAULT_AS is not set | ||
133 | # CONFIG_DEFAULT_DEADLINE is not set | ||
134 | # CONFIG_DEFAULT_CFQ is not set | ||
135 | CONFIG_DEFAULT_NOOP=y | ||
136 | CONFIG_DEFAULT_IOSCHED="noop" | ||
137 | CONFIG_FREEZER=y | ||
60 | 138 | ||
61 | # | 139 | # |
62 | # System Type | 140 | # System Type |
63 | # | 141 | # |
64 | # CONFIG_ARCH_CLPS7500 is not set | 142 | CONFIG_MMU=y |
143 | # CONFIG_ARCH_AAEC2000 is not set | ||
144 | # CONFIG_ARCH_INTEGRATOR is not set | ||
145 | # CONFIG_ARCH_REALVIEW is not set | ||
146 | # CONFIG_ARCH_VERSATILE is not set | ||
147 | # CONFIG_ARCH_AT91 is not set | ||
65 | # CONFIG_ARCH_CLPS711X is not set | 148 | # CONFIG_ARCH_CLPS711X is not set |
66 | # CONFIG_ARCH_CO285 is not set | 149 | # CONFIG_ARCH_GEMINI is not set |
67 | # CONFIG_ARCH_EBSA110 is not set | 150 | # CONFIG_ARCH_EBSA110 is not set |
151 | # CONFIG_ARCH_EP93XX is not set | ||
68 | # CONFIG_ARCH_FOOTBRIDGE is not set | 152 | # CONFIG_ARCH_FOOTBRIDGE is not set |
69 | # CONFIG_ARCH_INTEGRATOR is not set | 153 | # CONFIG_ARCH_MXC is not set |
70 | # CONFIG_ARCH_IOP3XX is not set | 154 | # CONFIG_ARCH_STMP3XXX is not set |
71 | # CONFIG_ARCH_IXP4XX is not set | 155 | # CONFIG_ARCH_NETX is not set |
156 | # CONFIG_ARCH_H720X is not set | ||
157 | # CONFIG_ARCH_NOMADIK is not set | ||
158 | # CONFIG_ARCH_IOP13XX is not set | ||
159 | # CONFIG_ARCH_IOP32X is not set | ||
160 | # CONFIG_ARCH_IOP33X is not set | ||
161 | # CONFIG_ARCH_IXP23XX is not set | ||
72 | # CONFIG_ARCH_IXP2000 is not set | 162 | # CONFIG_ARCH_IXP2000 is not set |
163 | # CONFIG_ARCH_IXP4XX is not set | ||
73 | # CONFIG_ARCH_L7200 is not set | 164 | # CONFIG_ARCH_L7200 is not set |
165 | # CONFIG_ARCH_KIRKWOOD is not set | ||
166 | # CONFIG_ARCH_LOKI is not set | ||
167 | # CONFIG_ARCH_MV78XX0 is not set | ||
168 | # CONFIG_ARCH_ORION5X is not set | ||
169 | # CONFIG_ARCH_MMP is not set | ||
170 | # CONFIG_ARCH_KS8695 is not set | ||
171 | # CONFIG_ARCH_NS9XXX is not set | ||
172 | # CONFIG_ARCH_W90X900 is not set | ||
173 | # CONFIG_ARCH_PNX4008 is not set | ||
74 | # CONFIG_ARCH_PXA is not set | 174 | # CONFIG_ARCH_PXA is not set |
175 | # CONFIG_ARCH_MSM is not set | ||
75 | # CONFIG_ARCH_RPC is not set | 176 | # CONFIG_ARCH_RPC is not set |
76 | CONFIG_ARCH_SA1100=y | 177 | CONFIG_ARCH_SA1100=y |
77 | # CONFIG_ARCH_S3C2410 is not set | 178 | # CONFIG_ARCH_S3C2410 is not set |
179 | # CONFIG_ARCH_S3C64XX is not set | ||
180 | # CONFIG_ARCH_S5PC1XX is not set | ||
78 | # CONFIG_ARCH_SHARK is not set | 181 | # CONFIG_ARCH_SHARK is not set |
79 | # CONFIG_ARCH_LH7A40X is not set | 182 | # CONFIG_ARCH_LH7A40X is not set |
183 | # CONFIG_ARCH_U300 is not set | ||
184 | # CONFIG_ARCH_DAVINCI is not set | ||
80 | # CONFIG_ARCH_OMAP is not set | 185 | # CONFIG_ARCH_OMAP is not set |
81 | # CONFIG_ARCH_VERSATILE is not set | 186 | # CONFIG_ARCH_BCMRING is not set |
82 | # CONFIG_ARCH_IMX is not set | ||
83 | # CONFIG_ARCH_H720X is not set | ||
84 | 187 | ||
85 | # | 188 | # |
86 | # SA11x0 Implementations | 189 | # SA11x0 Implementations |
@@ -106,27 +209,31 @@ CONFIG_CPU_32=y | |||
106 | CONFIG_CPU_SA1100=y | 209 | CONFIG_CPU_SA1100=y |
107 | CONFIG_CPU_32v4=y | 210 | CONFIG_CPU_32v4=y |
108 | CONFIG_CPU_ABRT_EV4=y | 211 | CONFIG_CPU_ABRT_EV4=y |
212 | CONFIG_CPU_PABRT_LEGACY=y | ||
109 | CONFIG_CPU_CACHE_V4WB=y | 213 | CONFIG_CPU_CACHE_V4WB=y |
110 | CONFIG_CPU_CACHE_VIVT=y | 214 | CONFIG_CPU_CACHE_VIVT=y |
111 | CONFIG_CPU_TLB_V4WB=y | 215 | CONFIG_CPU_TLB_V4WB=y |
112 | CONFIG_CPU_MINICACHE=y | 216 | CONFIG_CPU_CP15=y |
217 | CONFIG_CPU_CP15_MMU=y | ||
113 | 218 | ||
114 | # | 219 | # |
115 | # Processor Features | 220 | # Processor Features |
116 | # | 221 | # |
222 | # CONFIG_CPU_ICACHE_DISABLE is not set | ||
223 | # CONFIG_CPU_DCACHE_DISABLE is not set | ||
224 | CONFIG_ARM_L1_CACHE_SHIFT=5 | ||
117 | 225 | ||
118 | # | 226 | # |
119 | # Bus support | 227 | # Bus support |
120 | # | 228 | # |
121 | CONFIG_ISA=y | 229 | CONFIG_ISA=y |
122 | CONFIG_ISA_DMA_API=y | 230 | # CONFIG_PCI_SYSCALL is not set |
123 | 231 | # CONFIG_ARCH_SUPPORTS_MSI is not set | |
124 | # | ||
125 | # PCCARD (PCMCIA/CardBus) support | ||
126 | # | ||
127 | CONFIG_PCCARD=y | 232 | CONFIG_PCCARD=y |
128 | # CONFIG_PCMCIA_DEBUG is not set | 233 | # CONFIG_PCMCIA_DEBUG is not set |
129 | CONFIG_PCMCIA=y | 234 | CONFIG_PCMCIA=y |
235 | CONFIG_PCMCIA_LOAD_CIS=y | ||
236 | CONFIG_PCMCIA_IOCTL=y | ||
130 | 237 | ||
131 | # | 238 | # |
132 | # PC-card bridges | 239 | # PC-card bridges |
@@ -138,11 +245,41 @@ CONFIG_PCMCIA_SA1100=y | |||
138 | # | 245 | # |
139 | # Kernel Features | 246 | # Kernel Features |
140 | # | 247 | # |
141 | # CONFIG_SMP is not set | 248 | CONFIG_TICK_ONESHOT=y |
249 | # CONFIG_NO_HZ is not set | ||
250 | # CONFIG_HIGH_RES_TIMERS is not set | ||
251 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
252 | CONFIG_VMSPLIT_3G=y | ||
253 | # CONFIG_VMSPLIT_2G is not set | ||
254 | # CONFIG_VMSPLIT_1G is not set | ||
255 | CONFIG_PAGE_OFFSET=0xC0000000 | ||
256 | CONFIG_PREEMPT_NONE=y | ||
257 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
142 | # CONFIG_PREEMPT is not set | 258 | # CONFIG_PREEMPT is not set |
143 | CONFIG_DISCONTIGMEM=y | 259 | CONFIG_HZ=100 |
260 | # CONFIG_AEABI is not set | ||
261 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
262 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | ||
263 | # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set | ||
264 | # CONFIG_HIGHMEM is not set | ||
265 | CONFIG_SELECT_MEMORY_MODEL=y | ||
266 | # CONFIG_FLATMEM_MANUAL is not set | ||
267 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
268 | CONFIG_SPARSEMEM_MANUAL=y | ||
269 | CONFIG_SPARSEMEM=y | ||
270 | CONFIG_HAVE_MEMORY_PRESENT=y | ||
271 | CONFIG_SPARSEMEM_EXTREME=y | ||
272 | CONFIG_SPLIT_PTLOCK_CPUS=4096 | ||
273 | # CONFIG_PHYS_ADDR_T_64BIT is not set | ||
274 | CONFIG_ZONE_DMA_FLAG=0 | ||
275 | CONFIG_VIRT_TO_BUS=y | ||
276 | CONFIG_HAVE_MLOCK=y | ||
277 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
278 | # CONFIG_KSM is not set | ||
279 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
144 | # CONFIG_LEDS is not set | 280 | # CONFIG_LEDS is not set |
145 | CONFIG_ALIGNMENT_TRAP=y | 281 | CONFIG_ALIGNMENT_TRAP=y |
282 | # CONFIG_UACCESS_WITH_MEMCPY is not set | ||
146 | 283 | ||
147 | # | 284 | # |
148 | # Boot options | 285 | # Boot options |
@@ -151,22 +288,26 @@ CONFIG_ZBOOT_ROM_TEXT=0x0 | |||
151 | CONFIG_ZBOOT_ROM_BSS=0x0 | 288 | CONFIG_ZBOOT_ROM_BSS=0x0 |
152 | CONFIG_CMDLINE="" | 289 | CONFIG_CMDLINE="" |
153 | # CONFIG_XIP_KERNEL is not set | 290 | # CONFIG_XIP_KERNEL is not set |
291 | # CONFIG_KEXEC is not set | ||
154 | 292 | ||
155 | # | 293 | # |
156 | # CPU Frequency scaling | 294 | # CPU Power Management |
157 | # | 295 | # |
158 | CONFIG_CPU_FREQ=y | 296 | CONFIG_CPU_FREQ=y |
159 | CONFIG_CPU_FREQ_TABLE=y | ||
160 | # CONFIG_CPU_FREQ_DEBUG is not set | 297 | # CONFIG_CPU_FREQ_DEBUG is not set |
161 | CONFIG_CPU_FREQ_STAT=y | 298 | # CONFIG_CPU_FREQ_STAT is not set |
162 | # CONFIG_CPU_FREQ_STAT_DETAILS is not set | ||
163 | # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set | 299 | # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set |
300 | # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set | ||
164 | CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y | 301 | CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y |
302 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
303 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
165 | # CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set | 304 | # CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set |
166 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set | 305 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set |
167 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | 306 | CONFIG_CPU_FREQ_GOV_USERSPACE=y |
168 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set | 307 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set |
308 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
169 | CONFIG_CPU_FREQ_SA1100=y | 309 | CONFIG_CPU_FREQ_SA1100=y |
310 | # CONFIG_CPU_IDLE is not set | ||
170 | 311 | ||
171 | # | 312 | # |
172 | # Floating point emulation | 313 | # Floating point emulation |
@@ -183,6 +324,8 @@ CONFIG_FPE_NWFPE=y | |||
183 | # Userspace binary formats | 324 | # Userspace binary formats |
184 | # | 325 | # |
185 | CONFIG_BINFMT_ELF=y | 326 | CONFIG_BINFMT_ELF=y |
327 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
328 | CONFIG_HAVE_AOUT=y | ||
186 | # CONFIG_BINFMT_AOUT is not set | 329 | # CONFIG_BINFMT_AOUT is not set |
187 | # CONFIG_BINFMT_MISC is not set | 330 | # CONFIG_BINFMT_MISC is not set |
188 | # CONFIG_ARTHUR is not set | 331 | # CONFIG_ARTHUR is not set |
@@ -191,8 +334,120 @@ CONFIG_BINFMT_ELF=y | |||
191 | # Power management options | 334 | # Power management options |
192 | # | 335 | # |
193 | CONFIG_PM=y | 336 | CONFIG_PM=y |
194 | # CONFIG_PM_LEGACY is not set | 337 | # CONFIG_PM_DEBUG is not set |
195 | # CONFIG_APM is not set | 338 | CONFIG_PM_SLEEP=y |
339 | CONFIG_SUSPEND=y | ||
340 | CONFIG_SUSPEND_FREEZER=y | ||
341 | # CONFIG_APM_EMULATION is not set | ||
342 | # CONFIG_PM_RUNTIME is not set | ||
343 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
344 | CONFIG_NET=y | ||
345 | |||
346 | # | ||
347 | # Networking options | ||
348 | # | ||
349 | # CONFIG_PACKET is not set | ||
350 | CONFIG_UNIX=y | ||
351 | CONFIG_XFRM=y | ||
352 | # CONFIG_XFRM_USER is not set | ||
353 | # CONFIG_XFRM_SUB_POLICY is not set | ||
354 | # CONFIG_XFRM_MIGRATE is not set | ||
355 | # CONFIG_XFRM_STATISTICS is not set | ||
356 | # CONFIG_NET_KEY is not set | ||
357 | CONFIG_INET=y | ||
358 | # CONFIG_IP_MULTICAST is not set | ||
359 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
360 | CONFIG_IP_FIB_HASH=y | ||
361 | # CONFIG_IP_PNP is not set | ||
362 | # CONFIG_NET_IPIP is not set | ||
363 | # CONFIG_NET_IPGRE is not set | ||
364 | # CONFIG_ARPD is not set | ||
365 | # CONFIG_SYN_COOKIES is not set | ||
366 | # CONFIG_INET_AH is not set | ||
367 | # CONFIG_INET_ESP is not set | ||
368 | # CONFIG_INET_IPCOMP is not set | ||
369 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
370 | # CONFIG_INET_TUNNEL is not set | ||
371 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
372 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
373 | CONFIG_INET_XFRM_MODE_BEET=y | ||
374 | CONFIG_INET_LRO=y | ||
375 | CONFIG_INET_DIAG=y | ||
376 | CONFIG_INET_TCP_DIAG=y | ||
377 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
378 | CONFIG_TCP_CONG_CUBIC=y | ||
379 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
380 | # CONFIG_TCP_MD5SIG is not set | ||
381 | # CONFIG_IPV6 is not set | ||
382 | # CONFIG_NETWORK_SECMARK is not set | ||
383 | # CONFIG_NETFILTER is not set | ||
384 | # CONFIG_IP_DCCP is not set | ||
385 | # CONFIG_IP_SCTP is not set | ||
386 | # CONFIG_RDS is not set | ||
387 | # CONFIG_TIPC is not set | ||
388 | # CONFIG_ATM is not set | ||
389 | # CONFIG_BRIDGE is not set | ||
390 | # CONFIG_NET_DSA is not set | ||
391 | # CONFIG_VLAN_8021Q is not set | ||
392 | # CONFIG_DECNET is not set | ||
393 | # CONFIG_LLC2 is not set | ||
394 | # CONFIG_IPX is not set | ||
395 | # CONFIG_ATALK is not set | ||
396 | # CONFIG_X25 is not set | ||
397 | # CONFIG_LAPB is not set | ||
398 | # CONFIG_ECONET is not set | ||
399 | # CONFIG_WAN_ROUTER is not set | ||
400 | # CONFIG_PHONET is not set | ||
401 | # CONFIG_IEEE802154 is not set | ||
402 | # CONFIG_NET_SCHED is not set | ||
403 | # CONFIG_DCB is not set | ||
404 | |||
405 | # | ||
406 | # Network testing | ||
407 | # | ||
408 | # CONFIG_NET_PKTGEN is not set | ||
409 | # CONFIG_HAMRADIO is not set | ||
410 | # CONFIG_CAN is not set | ||
411 | CONFIG_IRDA=m | ||
412 | |||
413 | # | ||
414 | # IrDA protocols | ||
415 | # | ||
416 | CONFIG_IRLAN=m | ||
417 | CONFIG_IRNET=m | ||
418 | CONFIG_IRCOMM=m | ||
419 | # CONFIG_IRDA_ULTRA is not set | ||
420 | |||
421 | # | ||
422 | # IrDA options | ||
423 | # | ||
424 | # CONFIG_IRDA_CACHE_LAST_LSAP is not set | ||
425 | # CONFIG_IRDA_FAST_RR is not set | ||
426 | # CONFIG_IRDA_DEBUG is not set | ||
427 | |||
428 | # | ||
429 | # Infrared-port device drivers | ||
430 | # | ||
431 | |||
432 | # | ||
433 | # SIR device drivers | ||
434 | # | ||
435 | # CONFIG_IRTTY_SIR is not set | ||
436 | |||
437 | # | ||
438 | # Dongle support | ||
439 | # | ||
440 | |||
441 | # | ||
442 | # FIR device drivers | ||
443 | # | ||
444 | CONFIG_SA1100_FIR=m | ||
445 | # CONFIG_BT is not set | ||
446 | # CONFIG_AF_RXRPC is not set | ||
447 | # CONFIG_WIRELESS is not set | ||
448 | # CONFIG_WIMAX is not set | ||
449 | # CONFIG_RFKILL is not set | ||
450 | # CONFIG_NET_9P is not set | ||
196 | 451 | ||
197 | # | 452 | # |
198 | # Device Drivers | 453 | # Device Drivers |
@@ -201,15 +456,17 @@ CONFIG_PM=y | |||
201 | # | 456 | # |
202 | # Generic Driver Options | 457 | # Generic Driver Options |
203 | # | 458 | # |
459 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
204 | CONFIG_STANDALONE=y | 460 | CONFIG_STANDALONE=y |
205 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 461 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
206 | # CONFIG_FW_LOADER is not set | 462 | CONFIG_FW_LOADER=y |
207 | 463 | CONFIG_FIRMWARE_IN_KERNEL=y | |
208 | # | 464 | CONFIG_EXTRA_FIRMWARE="" |
209 | # Memory Technology Devices (MTD) | 465 | # CONFIG_SYS_HYPERVISOR is not set |
210 | # | 466 | # CONFIG_CONNECTOR is not set |
211 | CONFIG_MTD=y | 467 | CONFIG_MTD=y |
212 | # CONFIG_MTD_DEBUG is not set | 468 | # CONFIG_MTD_DEBUG is not set |
469 | # CONFIG_MTD_TESTS is not set | ||
213 | # CONFIG_MTD_CONCAT is not set | 470 | # CONFIG_MTD_CONCAT is not set |
214 | CONFIG_MTD_PARTITIONS=y | 471 | CONFIG_MTD_PARTITIONS=y |
215 | CONFIG_MTD_REDBOOT_PARTS=y | 472 | CONFIG_MTD_REDBOOT_PARTS=y |
@@ -218,15 +475,20 @@ CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 | |||
218 | # CONFIG_MTD_REDBOOT_PARTS_READONLY is not set | 475 | # CONFIG_MTD_REDBOOT_PARTS_READONLY is not set |
219 | # CONFIG_MTD_CMDLINE_PARTS is not set | 476 | # CONFIG_MTD_CMDLINE_PARTS is not set |
220 | # CONFIG_MTD_AFS_PARTS is not set | 477 | # CONFIG_MTD_AFS_PARTS is not set |
478 | # CONFIG_MTD_AR7_PARTS is not set | ||
221 | 479 | ||
222 | # | 480 | # |
223 | # User Modules And Translation Layers | 481 | # User Modules And Translation Layers |
224 | # | 482 | # |
225 | CONFIG_MTD_CHAR=y | 483 | CONFIG_MTD_CHAR=y |
484 | CONFIG_MTD_BLKDEVS=y | ||
226 | CONFIG_MTD_BLOCK=y | 485 | CONFIG_MTD_BLOCK=y |
227 | # CONFIG_FTL is not set | 486 | # CONFIG_FTL is not set |
228 | # CONFIG_NFTL is not set | 487 | # CONFIG_NFTL is not set |
229 | # CONFIG_INFTL is not set | 488 | # CONFIG_INFTL is not set |
489 | # CONFIG_RFD_FTL is not set | ||
490 | # CONFIG_SSFDC is not set | ||
491 | # CONFIG_MTD_OOPS is not set | ||
230 | 492 | ||
231 | # | 493 | # |
232 | # RAM/ROM/Flash chip drivers | 494 | # RAM/ROM/Flash chip drivers |
@@ -249,6 +511,7 @@ CONFIG_MTD_MAP_BANK_WIDTH_4=y | |||
249 | CONFIG_MTD_CFI_I2=y | 511 | CONFIG_MTD_CFI_I2=y |
250 | # CONFIG_MTD_CFI_I4 is not set | 512 | # CONFIG_MTD_CFI_I4 is not set |
251 | # CONFIG_MTD_CFI_I8 is not set | 513 | # CONFIG_MTD_CFI_I8 is not set |
514 | # CONFIG_MTD_OTP is not set | ||
252 | CONFIG_MTD_CFI_INTELEXT=y | 515 | CONFIG_MTD_CFI_INTELEXT=y |
253 | # CONFIG_MTD_CFI_AMDSTD is not set | 516 | # CONFIG_MTD_CFI_AMDSTD is not set |
254 | # CONFIG_MTD_CFI_STAA is not set | 517 | # CONFIG_MTD_CFI_STAA is not set |
@@ -265,7 +528,7 @@ CONFIG_MTD_CFI_UTIL=y | |||
265 | # CONFIG_MTD_PHYSMAP is not set | 528 | # CONFIG_MTD_PHYSMAP is not set |
266 | # CONFIG_MTD_ARM_INTEGRATOR is not set | 529 | # CONFIG_MTD_ARM_INTEGRATOR is not set |
267 | CONFIG_MTD_SA1100=y | 530 | CONFIG_MTD_SA1100=y |
268 | # CONFIG_MTD_EDB7312 is not set | 531 | # CONFIG_MTD_PLATRAM is not set |
269 | 532 | ||
270 | # | 533 | # |
271 | # Self-contained MTD device drivers | 534 | # Self-contained MTD device drivers |
@@ -273,7 +536,6 @@ CONFIG_MTD_SA1100=y | |||
273 | # CONFIG_MTD_SLRAM is not set | 536 | # CONFIG_MTD_SLRAM is not set |
274 | # CONFIG_MTD_PHRAM is not set | 537 | # CONFIG_MTD_PHRAM is not set |
275 | # CONFIG_MTD_MTDRAM is not set | 538 | # CONFIG_MTD_MTDRAM is not set |
276 | # CONFIG_MTD_BLKMTD is not set | ||
277 | # CONFIG_MTD_BLOCK2MTD is not set | 539 | # CONFIG_MTD_BLOCK2MTD is not set |
278 | 540 | ||
279 | # | 541 | # |
@@ -282,26 +544,21 @@ CONFIG_MTD_SA1100=y | |||
282 | # CONFIG_MTD_DOC2000 is not set | 544 | # CONFIG_MTD_DOC2000 is not set |
283 | # CONFIG_MTD_DOC2001 is not set | 545 | # CONFIG_MTD_DOC2001 is not set |
284 | # CONFIG_MTD_DOC2001PLUS is not set | 546 | # CONFIG_MTD_DOC2001PLUS is not set |
285 | |||
286 | # | ||
287 | # NAND Flash Device Drivers | ||
288 | # | ||
289 | # CONFIG_MTD_NAND is not set | 547 | # CONFIG_MTD_NAND is not set |
548 | # CONFIG_MTD_ONENAND is not set | ||
290 | 549 | ||
291 | # | 550 | # |
292 | # Parallel port support | 551 | # LPDDR flash memory drivers |
293 | # | 552 | # |
294 | # CONFIG_PARPORT is not set | 553 | # CONFIG_MTD_LPDDR is not set |
295 | 554 | ||
296 | # | 555 | # |
297 | # Plug and Play support | 556 | # UBI - Unsorted block images |
298 | # | 557 | # |
558 | # CONFIG_MTD_UBI is not set | ||
559 | # CONFIG_PARPORT is not set | ||
299 | # CONFIG_PNP is not set | 560 | # CONFIG_PNP is not set |
300 | 561 | CONFIG_BLK_DEV=y | |
301 | # | ||
302 | # Block devices | ||
303 | # | ||
304 | # CONFIG_BLK_DEV_XD is not set | ||
305 | # CONFIG_BLK_DEV_COW_COMMON is not set | 562 | # CONFIG_BLK_DEV_COW_COMMON is not set |
306 | CONFIG_BLK_DEV_LOOP=m | 563 | CONFIG_BLK_DEV_LOOP=m |
307 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | 564 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set |
@@ -309,212 +566,58 @@ CONFIG_BLK_DEV_LOOP=m | |||
309 | CONFIG_BLK_DEV_RAM=y | 566 | CONFIG_BLK_DEV_RAM=y |
310 | CONFIG_BLK_DEV_RAM_COUNT=16 | 567 | CONFIG_BLK_DEV_RAM_COUNT=16 |
311 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 568 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
312 | CONFIG_BLK_DEV_INITRD=y | 569 | # CONFIG_BLK_DEV_XIP is not set |
313 | CONFIG_INITRAMFS_SOURCE="" | ||
314 | # CONFIG_CDROM_PKTCDVD is not set | 570 | # CONFIG_CDROM_PKTCDVD is not set |
315 | |||
316 | # | ||
317 | # IO Schedulers | ||
318 | # | ||
319 | CONFIG_IOSCHED_NOOP=y | ||
320 | CONFIG_IOSCHED_AS=y | ||
321 | CONFIG_IOSCHED_DEADLINE=y | ||
322 | CONFIG_IOSCHED_CFQ=y | ||
323 | # CONFIG_ATA_OVER_ETH is not set | 571 | # CONFIG_ATA_OVER_ETH is not set |
572 | # CONFIG_MG_DISK is not set | ||
573 | # CONFIG_MISC_DEVICES is not set | ||
574 | CONFIG_HAVE_IDE=y | ||
575 | CONFIG_IDE=y | ||
324 | 576 | ||
325 | # | 577 | # |
326 | # ATA/ATAPI/MFM/RLL support | 578 | # Please see Documentation/ide/ide.txt for help/info on IDE drives |
327 | # | ||
328 | CONFIG_IDE=m | ||
329 | CONFIG_BLK_DEV_IDE=m | ||
330 | |||
331 | # | ||
332 | # Please see Documentation/ide.txt for help/info on IDE drives | ||
333 | # | 579 | # |
334 | # CONFIG_BLK_DEV_IDE_SATA is not set | 580 | # CONFIG_BLK_DEV_IDE_SATA is not set |
335 | CONFIG_BLK_DEV_IDEDISK=m | 581 | CONFIG_IDE_GD=y |
336 | # CONFIG_IDEDISK_MULTI_MODE is not set | 582 | CONFIG_IDE_GD_ATA=y |
337 | # CONFIG_BLK_DEV_IDECS is not set | 583 | # CONFIG_IDE_GD_ATAPI is not set |
338 | CONFIG_BLK_DEV_IDECD=m | 584 | CONFIG_BLK_DEV_IDECS=y |
585 | # CONFIG_BLK_DEV_IDECD is not set | ||
339 | # CONFIG_BLK_DEV_IDETAPE is not set | 586 | # CONFIG_BLK_DEV_IDETAPE is not set |
340 | # CONFIG_BLK_DEV_IDEFLOPPY is not set | ||
341 | # CONFIG_IDE_TASK_IOCTL is not set | 587 | # CONFIG_IDE_TASK_IOCTL is not set |
588 | CONFIG_IDE_PROC_FS=y | ||
342 | 589 | ||
343 | # | 590 | # |
344 | # IDE chipset support/bugfixes | 591 | # IDE chipset support/bugfixes |
345 | # | 592 | # |
346 | CONFIG_IDE_GENERIC=m | 593 | # CONFIG_BLK_DEV_PLATFORM is not set |
347 | # CONFIG_IDE_ARM is not set | ||
348 | # CONFIG_IDE_CHIPSETS is not set | ||
349 | # CONFIG_BLK_DEV_IDEDMA is not set | 594 | # CONFIG_BLK_DEV_IDEDMA is not set |
350 | # CONFIG_IDEDMA_AUTO is not set | ||
351 | # CONFIG_BLK_DEV_HD is not set | ||
352 | 595 | ||
353 | # | 596 | # |
354 | # SCSI device support | 597 | # SCSI device support |
355 | # | 598 | # |
599 | # CONFIG_RAID_ATTRS is not set | ||
356 | # CONFIG_SCSI is not set | 600 | # CONFIG_SCSI is not set |
357 | 601 | # CONFIG_SCSI_DMA is not set | |
358 | # | 602 | # CONFIG_SCSI_NETLINK is not set |
359 | # Multi-device support (RAID and LVM) | 603 | # CONFIG_ATA is not set |
360 | # | ||
361 | # CONFIG_MD is not set | 604 | # CONFIG_MD is not set |
362 | |||
363 | # | ||
364 | # Fusion MPT device support | ||
365 | # | ||
366 | |||
367 | # | ||
368 | # IEEE 1394 (FireWire) support | ||
369 | # | ||
370 | |||
371 | # | ||
372 | # I2O device support | ||
373 | # | ||
374 | |||
375 | # | ||
376 | # Networking support | ||
377 | # | ||
378 | CONFIG_NET=y | ||
379 | |||
380 | # | ||
381 | # Networking options | ||
382 | # | ||
383 | # CONFIG_PACKET is not set | ||
384 | CONFIG_UNIX=y | ||
385 | # CONFIG_NET_KEY is not set | ||
386 | CONFIG_INET=y | ||
387 | # CONFIG_IP_MULTICAST is not set | ||
388 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
389 | # CONFIG_IP_PNP is not set | ||
390 | # CONFIG_NET_IPIP is not set | ||
391 | # CONFIG_NET_IPGRE is not set | ||
392 | # CONFIG_ARPD is not set | ||
393 | # CONFIG_SYN_COOKIES is not set | ||
394 | # CONFIG_INET_AH is not set | ||
395 | # CONFIG_INET_ESP is not set | ||
396 | # CONFIG_INET_IPCOMP is not set | ||
397 | # CONFIG_INET_TUNNEL is not set | ||
398 | # CONFIG_IP_TCPDIAG is not set | ||
399 | # CONFIG_IP_TCPDIAG_IPV6 is not set | ||
400 | # CONFIG_IPV6 is not set | ||
401 | # CONFIG_NETFILTER is not set | ||
402 | |||
403 | # | ||
404 | # SCTP Configuration (EXPERIMENTAL) | ||
405 | # | ||
406 | # CONFIG_IP_SCTP is not set | ||
407 | # CONFIG_ATM is not set | ||
408 | # CONFIG_BRIDGE is not set | ||
409 | # CONFIG_VLAN_8021Q is not set | ||
410 | # CONFIG_DECNET is not set | ||
411 | # CONFIG_LLC2 is not set | ||
412 | # CONFIG_IPX is not set | ||
413 | # CONFIG_ATALK is not set | ||
414 | # CONFIG_X25 is not set | ||
415 | # CONFIG_LAPB is not set | ||
416 | # CONFIG_NET_DIVERT is not set | ||
417 | # CONFIG_ECONET is not set | ||
418 | # CONFIG_WAN_ROUTER is not set | ||
419 | |||
420 | # | ||
421 | # QoS and/or fair queueing | ||
422 | # | ||
423 | # CONFIG_NET_SCHED is not set | ||
424 | # CONFIG_NET_CLS_ROUTE is not set | ||
425 | |||
426 | # | ||
427 | # Network testing | ||
428 | # | ||
429 | # CONFIG_NET_PKTGEN is not set | ||
430 | # CONFIG_NETPOLL is not set | ||
431 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
432 | # CONFIG_HAMRADIO is not set | ||
433 | CONFIG_IRDA=m | ||
434 | |||
435 | # | ||
436 | # IrDA protocols | ||
437 | # | ||
438 | CONFIG_IRLAN=m | ||
439 | CONFIG_IRNET=m | ||
440 | CONFIG_IRCOMM=m | ||
441 | # CONFIG_IRDA_ULTRA is not set | ||
442 | |||
443 | # | ||
444 | # IrDA options | ||
445 | # | ||
446 | # CONFIG_IRDA_CACHE_LAST_LSAP is not set | ||
447 | # CONFIG_IRDA_FAST_RR is not set | ||
448 | # CONFIG_IRDA_DEBUG is not set | ||
449 | |||
450 | # | ||
451 | # Infrared-port device drivers | ||
452 | # | ||
453 | |||
454 | # | ||
455 | # SIR device drivers | ||
456 | # | ||
457 | # CONFIG_IRTTY_SIR is not set | ||
458 | |||
459 | # | ||
460 | # Dongle support | ||
461 | # | ||
462 | |||
463 | # | ||
464 | # Old SIR device drivers | ||
465 | # | ||
466 | # CONFIG_IRPORT_SIR is not set | ||
467 | |||
468 | # | ||
469 | # Old Serial dongle support | ||
470 | # | ||
471 | |||
472 | # | ||
473 | # FIR device drivers | ||
474 | # | ||
475 | # CONFIG_NSC_FIR is not set | ||
476 | # CONFIG_WINBOND_FIR is not set | ||
477 | # CONFIG_SMC_IRCC_FIR is not set | ||
478 | # CONFIG_ALI_FIR is not set | ||
479 | CONFIG_SA1100_FIR=m | ||
480 | # CONFIG_VIA_FIR is not set | ||
481 | # CONFIG_BT is not set | ||
482 | CONFIG_NETDEVICES=y | 605 | CONFIG_NETDEVICES=y |
483 | # CONFIG_DUMMY is not set | 606 | # CONFIG_DUMMY is not set |
484 | # CONFIG_BONDING is not set | 607 | # CONFIG_BONDING is not set |
608 | # CONFIG_MACVLAN is not set | ||
485 | # CONFIG_EQUALIZER is not set | 609 | # CONFIG_EQUALIZER is not set |
486 | # CONFIG_TUN is not set | 610 | # CONFIG_TUN is not set |
487 | 611 | # CONFIG_VETH is not set | |
488 | # | ||
489 | # ARCnet devices | ||
490 | # | ||
491 | # CONFIG_ARCNET is not set | 612 | # CONFIG_ARCNET is not set |
492 | |||
493 | # | ||
494 | # Ethernet (10 or 100Mbit) | ||
495 | # | ||
496 | # CONFIG_NET_ETHERNET is not set | 613 | # CONFIG_NET_ETHERNET is not set |
497 | 614 | # CONFIG_NETDEV_1000 is not set | |
498 | # | 615 | # CONFIG_NETDEV_10000 is not set |
499 | # Ethernet (1000 Mbit) | ||
500 | # | ||
501 | |||
502 | # | ||
503 | # Ethernet (10000 Mbit) | ||
504 | # | ||
505 | |||
506 | # | ||
507 | # Token Ring devices | ||
508 | # | ||
509 | # CONFIG_TR is not set | 616 | # CONFIG_TR is not set |
617 | # CONFIG_WLAN is not set | ||
510 | 618 | ||
511 | # | 619 | # |
512 | # Wireless LAN (non-hamradio) | 620 | # Enable WiMAX (Networking options) to see the WiMAX drivers |
513 | # | ||
514 | # CONFIG_NET_RADIO is not set | ||
515 | |||
516 | # | ||
517 | # PCMCIA network device support | ||
518 | # | 621 | # |
519 | CONFIG_NET_PCMCIA=y | 622 | CONFIG_NET_PCMCIA=y |
520 | # CONFIG_PCMCIA_3C589 is not set | 623 | # CONFIG_PCMCIA_3C589 is not set |
@@ -525,10 +628,6 @@ CONFIG_PCMCIA_PCNET=y | |||
525 | # CONFIG_PCMCIA_SMC91C92 is not set | 628 | # CONFIG_PCMCIA_SMC91C92 is not set |
526 | # CONFIG_PCMCIA_XIRC2PS is not set | 629 | # CONFIG_PCMCIA_XIRC2PS is not set |
527 | # CONFIG_PCMCIA_AXNET is not set | 630 | # CONFIG_PCMCIA_AXNET is not set |
528 | |||
529 | # | ||
530 | # Wan interfaces | ||
531 | # | ||
532 | # CONFIG_WAN is not set | 631 | # CONFIG_WAN is not set |
533 | CONFIG_PPP=m | 632 | CONFIG_PPP=m |
534 | # CONFIG_PPP_MULTILINK is not set | 633 | # CONFIG_PPP_MULTILINK is not set |
@@ -537,20 +636,23 @@ CONFIG_PPP_ASYNC=m | |||
537 | # CONFIG_PPP_SYNC_TTY is not set | 636 | # CONFIG_PPP_SYNC_TTY is not set |
538 | CONFIG_PPP_DEFLATE=m | 637 | CONFIG_PPP_DEFLATE=m |
539 | CONFIG_PPP_BSDCOMP=m | 638 | CONFIG_PPP_BSDCOMP=m |
639 | # CONFIG_PPP_MPPE is not set | ||
540 | # CONFIG_PPPOE is not set | 640 | # CONFIG_PPPOE is not set |
641 | # CONFIG_PPPOL2TP is not set | ||
541 | # CONFIG_SLIP is not set | 642 | # CONFIG_SLIP is not set |
542 | # CONFIG_SHAPER is not set | 643 | CONFIG_SLHC=m |
543 | # CONFIG_NETCONSOLE is not set | 644 | # CONFIG_NETCONSOLE is not set |
544 | 645 | # CONFIG_NETPOLL is not set | |
545 | # | 646 | # CONFIG_NET_POLL_CONTROLLER is not set |
546 | # ISDN subsystem | ||
547 | # | ||
548 | # CONFIG_ISDN is not set | 647 | # CONFIG_ISDN is not set |
648 | # CONFIG_PHONE is not set | ||
549 | 649 | ||
550 | # | 650 | # |
551 | # Input device support | 651 | # Input device support |
552 | # | 652 | # |
553 | CONFIG_INPUT=y | 653 | CONFIG_INPUT=y |
654 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
655 | # CONFIG_INPUT_POLLDEV is not set | ||
554 | 656 | ||
555 | # | 657 | # |
556 | # Userland interfaces | 658 | # Userland interfaces |
@@ -560,7 +662,6 @@ CONFIG_INPUT_MOUSEDEV_PSAUX=y | |||
560 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | 662 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 |
561 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | 663 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 |
562 | # CONFIG_INPUT_JOYDEV is not set | 664 | # CONFIG_INPUT_JOYDEV is not set |
563 | # CONFIG_INPUT_TSDEV is not set | ||
564 | # CONFIG_INPUT_EVDEV is not set | 665 | # CONFIG_INPUT_EVDEV is not set |
565 | # CONFIG_INPUT_EVBUG is not set | 666 | # CONFIG_INPUT_EVBUG is not set |
566 | 667 | ||
@@ -568,47 +669,42 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | |||
568 | # Input Device Drivers | 669 | # Input Device Drivers |
569 | # | 670 | # |
570 | CONFIG_INPUT_KEYBOARD=y | 671 | CONFIG_INPUT_KEYBOARD=y |
571 | CONFIG_KEYBOARD_ATKBD=y | 672 | # CONFIG_KEYBOARD_ATKBD is not set |
572 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
573 | # CONFIG_KEYBOARD_LKKBD is not set | 673 | # CONFIG_KEYBOARD_LKKBD is not set |
574 | # CONFIG_KEYBOARD_XTKBD is not set | 674 | CONFIG_KEYBOARD_GPIO=y |
675 | # CONFIG_KEYBOARD_MATRIX is not set | ||
575 | # CONFIG_KEYBOARD_NEWTON is not set | 676 | # CONFIG_KEYBOARD_NEWTON is not set |
576 | CONFIG_INPUT_MOUSE=y | 677 | # CONFIG_KEYBOARD_OPENCORES is not set |
577 | CONFIG_MOUSE_PS2=y | 678 | # CONFIG_KEYBOARD_STOWAWAY is not set |
578 | # CONFIG_MOUSE_SERIAL is not set | 679 | # CONFIG_KEYBOARD_SUNKBD is not set |
579 | # CONFIG_MOUSE_INPORT is not set | 680 | # CONFIG_KEYBOARD_XTKBD is not set |
580 | # CONFIG_MOUSE_LOGIBM is not set | 681 | # CONFIG_INPUT_MOUSE is not set |
581 | # CONFIG_MOUSE_PC110PAD is not set | ||
582 | # CONFIG_MOUSE_VSXXXAA is not set | ||
583 | # CONFIG_INPUT_JOYSTICK is not set | 682 | # CONFIG_INPUT_JOYSTICK is not set |
683 | # CONFIG_INPUT_TABLET is not set | ||
584 | # CONFIG_INPUT_TOUCHSCREEN is not set | 684 | # CONFIG_INPUT_TOUCHSCREEN is not set |
585 | # CONFIG_INPUT_MISC is not set | 685 | # CONFIG_INPUT_MISC is not set |
586 | 686 | ||
587 | # | 687 | # |
588 | # Hardware I/O ports | 688 | # Hardware I/O ports |
589 | # | 689 | # |
590 | CONFIG_SERIO=y | 690 | # CONFIG_SERIO is not set |
591 | CONFIG_SERIO_SERPORT=y | ||
592 | CONFIG_SERIO_LIBPS2=y | ||
593 | # CONFIG_SERIO_RAW is not set | ||
594 | # CONFIG_GAMEPORT is not set | 691 | # CONFIG_GAMEPORT is not set |
595 | CONFIG_SOUND_GAMEPORT=y | ||
596 | 692 | ||
597 | # | 693 | # |
598 | # Character devices | 694 | # Character devices |
599 | # | 695 | # |
600 | CONFIG_VT=y | 696 | CONFIG_VT=y |
697 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
601 | CONFIG_VT_CONSOLE=y | 698 | CONFIG_VT_CONSOLE=y |
602 | CONFIG_HW_CONSOLE=y | 699 | CONFIG_HW_CONSOLE=y |
700 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
701 | CONFIG_DEVKMEM=y | ||
603 | # CONFIG_SERIAL_NONSTANDARD is not set | 702 | # CONFIG_SERIAL_NONSTANDARD is not set |
604 | 703 | ||
605 | # | 704 | # |
606 | # Serial drivers | 705 | # Serial drivers |
607 | # | 706 | # |
608 | CONFIG_SERIAL_8250=m | 707 | # CONFIG_SERIAL_8250 is not set |
609 | # CONFIG_SERIAL_8250_CS is not set | ||
610 | CONFIG_SERIAL_8250_NR_UARTS=4 | ||
611 | # CONFIG_SERIAL_8250_EXTENDED is not set | ||
612 | 708 | ||
613 | # | 709 | # |
614 | # Non-8250 serial port support | 710 | # Non-8250 serial port support |
@@ -618,71 +714,125 @@ CONFIG_SERIAL_SA1100_CONSOLE=y | |||
618 | CONFIG_SERIAL_CORE=y | 714 | CONFIG_SERIAL_CORE=y |
619 | CONFIG_SERIAL_CORE_CONSOLE=y | 715 | CONFIG_SERIAL_CORE_CONSOLE=y |
620 | CONFIG_UNIX98_PTYS=y | 716 | CONFIG_UNIX98_PTYS=y |
717 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
621 | CONFIG_LEGACY_PTYS=y | 718 | CONFIG_LEGACY_PTYS=y |
622 | CONFIG_LEGACY_PTY_COUNT=256 | 719 | CONFIG_LEGACY_PTY_COUNT=256 |
720 | # CONFIG_IPMI_HANDLER is not set | ||
721 | # CONFIG_HW_RANDOM is not set | ||
722 | # CONFIG_DTLK is not set | ||
723 | # CONFIG_R3964 is not set | ||
623 | 724 | ||
624 | # | 725 | # |
625 | # IPMI | 726 | # PCMCIA character devices |
626 | # | 727 | # |
627 | # CONFIG_IPMI_HANDLER is not set | 728 | # CONFIG_SYNCLINK_CS is not set |
729 | # CONFIG_CARDMAN_4000 is not set | ||
730 | # CONFIG_CARDMAN_4040 is not set | ||
731 | # CONFIG_IPWIRELESS is not set | ||
732 | # CONFIG_RAW_DRIVER is not set | ||
733 | # CONFIG_TCG_TPM is not set | ||
734 | CONFIG_DEVPORT=y | ||
735 | # CONFIG_I2C is not set | ||
736 | # CONFIG_SPI is not set | ||
628 | 737 | ||
629 | # | 738 | # |
630 | # Watchdog Cards | 739 | # PPS support |
631 | # | 740 | # |
632 | # CONFIG_WATCHDOG is not set | 741 | # CONFIG_PPS is not set |
633 | # CONFIG_NVRAM is not set | 742 | CONFIG_ARCH_REQUIRE_GPIOLIB=y |
634 | # CONFIG_RTC is not set | 743 | CONFIG_GPIOLIB=y |
635 | # CONFIG_DTLK is not set | 744 | # CONFIG_GPIO_SYSFS is not set |
636 | # CONFIG_R3964 is not set | ||
637 | 745 | ||
638 | # | 746 | # |
639 | # Ftape, the floppy tape device driver | 747 | # Memory mapped GPIO expanders: |
640 | # | 748 | # |
641 | # CONFIG_DRM is not set | ||
642 | 749 | ||
643 | # | 750 | # |
644 | # PCMCIA character devices | 751 | # I2C GPIO expanders: |
645 | # | 752 | # |
646 | # CONFIG_SYNCLINK_CS is not set | ||
647 | # CONFIG_RAW_DRIVER is not set | ||
648 | 753 | ||
649 | # | 754 | # |
650 | # TPM devices | 755 | # PCI GPIO expanders: |
651 | # | 756 | # |
652 | 757 | ||
653 | # | 758 | # |
654 | # I2C support | 759 | # SPI GPIO expanders: |
655 | # | 760 | # |
656 | # CONFIG_I2C is not set | ||
657 | 761 | ||
658 | # | 762 | # |
659 | # Misc devices | 763 | # AC97 GPIO expanders: |
660 | # | 764 | # |
765 | # CONFIG_W1 is not set | ||
766 | # CONFIG_POWER_SUPPLY is not set | ||
767 | # CONFIG_HWMON is not set | ||
768 | # CONFIG_THERMAL is not set | ||
769 | # CONFIG_WATCHDOG is not set | ||
770 | CONFIG_SSB_POSSIBLE=y | ||
661 | 771 | ||
662 | # | 772 | # |
663 | # Multimedia devices | 773 | # Sonics Silicon Backplane |
664 | # | 774 | # |
665 | # CONFIG_VIDEO_DEV is not set | 775 | # CONFIG_SSB is not set |
666 | 776 | ||
667 | # | 777 | # |
668 | # Digital Video Broadcasting Devices | 778 | # Multifunction device drivers |
669 | # | 779 | # |
670 | # CONFIG_DVB is not set | 780 | # CONFIG_MFD_CORE is not set |
781 | # CONFIG_MFD_SM501 is not set | ||
782 | # CONFIG_MFD_ASIC3 is not set | ||
783 | CONFIG_HTC_EGPIO=y | ||
784 | # CONFIG_HTC_PASIC3 is not set | ||
785 | # CONFIG_MFD_TMIO is not set | ||
786 | # CONFIG_MFD_T7L66XB is not set | ||
787 | # CONFIG_MFD_TC6387XB is not set | ||
788 | # CONFIG_MFD_TC6393XB is not set | ||
789 | |||
790 | # | ||
791 | # Multimedia Capabilities Port drivers | ||
792 | # | ||
793 | # CONFIG_MCP_SA11X0 is not set | ||
794 | # CONFIG_REGULATOR is not set | ||
795 | # CONFIG_MEDIA_SUPPORT is not set | ||
671 | 796 | ||
672 | # | 797 | # |
673 | # Graphics support | 798 | # Graphics support |
674 | # | 799 | # |
800 | # CONFIG_VGASTATE is not set | ||
801 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
675 | CONFIG_FB=y | 802 | CONFIG_FB=y |
803 | # CONFIG_FIRMWARE_EDID is not set | ||
804 | # CONFIG_FB_DDC is not set | ||
805 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | ||
676 | CONFIG_FB_CFB_FILLRECT=y | 806 | CONFIG_FB_CFB_FILLRECT=y |
677 | CONFIG_FB_CFB_COPYAREA=y | 807 | CONFIG_FB_CFB_COPYAREA=y |
678 | CONFIG_FB_CFB_IMAGEBLIT=y | 808 | CONFIG_FB_CFB_IMAGEBLIT=y |
679 | CONFIG_FB_SOFT_CURSOR=y | 809 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set |
810 | # CONFIG_FB_SYS_FILLRECT is not set | ||
811 | # CONFIG_FB_SYS_COPYAREA is not set | ||
812 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
813 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
814 | # CONFIG_FB_SYS_FOPS is not set | ||
815 | # CONFIG_FB_SVGALIB is not set | ||
680 | # CONFIG_FB_MACMODES is not set | 816 | # CONFIG_FB_MACMODES is not set |
817 | # CONFIG_FB_BACKLIGHT is not set | ||
681 | # CONFIG_FB_MODE_HELPERS is not set | 818 | # CONFIG_FB_MODE_HELPERS is not set |
682 | # CONFIG_FB_TILEBLITTING is not set | 819 | # CONFIG_FB_TILEBLITTING is not set |
820 | |||
821 | # | ||
822 | # Frame buffer hardware drivers | ||
823 | # | ||
683 | CONFIG_FB_SA1100=y | 824 | CONFIG_FB_SA1100=y |
684 | # CONFIG_FB_S1D13XXX is not set | 825 | # CONFIG_FB_S1D13XXX is not set |
685 | # CONFIG_FB_VIRTUAL is not set | 826 | # CONFIG_FB_VIRTUAL is not set |
827 | # CONFIG_FB_METRONOME is not set | ||
828 | # CONFIG_FB_MB862XX is not set | ||
829 | # CONFIG_FB_BROADSHEET is not set | ||
830 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
831 | |||
832 | # | ||
833 | # Display device support | ||
834 | # | ||
835 | # CONFIG_DISPLAY_SUPPORT is not set | ||
686 | 836 | ||
687 | # | 837 | # |
688 | # Console display driver support | 838 | # Console display driver support |
@@ -691,65 +841,54 @@ CONFIG_FB_SA1100=y | |||
691 | # CONFIG_MDA_CONSOLE is not set | 841 | # CONFIG_MDA_CONSOLE is not set |
692 | CONFIG_DUMMY_CONSOLE=y | 842 | CONFIG_DUMMY_CONSOLE=y |
693 | # CONFIG_FRAMEBUFFER_CONSOLE is not set | 843 | # CONFIG_FRAMEBUFFER_CONSOLE is not set |
694 | |||
695 | # | ||
696 | # Logo configuration | ||
697 | # | ||
698 | # CONFIG_LOGO is not set | 844 | # CONFIG_LOGO is not set |
699 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | 845 | # CONFIG_SOUND is not set |
700 | 846 | # CONFIG_HID_SUPPORT is not set | |
701 | # | 847 | # CONFIG_USB_SUPPORT is not set |
702 | # Sound | 848 | # CONFIG_MMC is not set |
703 | # | 849 | # CONFIG_MEMSTICK is not set |
704 | CONFIG_SOUND=y | 850 | # CONFIG_NEW_LEDS is not set |
705 | 851 | # CONFIG_ACCESSIBILITY is not set | |
706 | # | 852 | CONFIG_RTC_LIB=y |
707 | # Advanced Linux Sound Architecture | 853 | # CONFIG_RTC_CLASS is not set |
708 | # | 854 | # CONFIG_DMADEVICES is not set |
709 | # CONFIG_SND is not set | 855 | # CONFIG_AUXDISPLAY is not set |
710 | 856 | # CONFIG_UIO is not set | |
711 | # | ||
712 | # Open Sound System | ||
713 | # | ||
714 | # CONFIG_SOUND_PRIME is not set | ||
715 | |||
716 | # | ||
717 | # USB support | ||
718 | # | ||
719 | CONFIG_USB_ARCH_HAS_HCD=y | ||
720 | # CONFIG_USB_ARCH_HAS_OHCI is not set | ||
721 | # CONFIG_USB is not set | ||
722 | 857 | ||
723 | # | 858 | # |
724 | # USB Gadget Support | 859 | # TI VLYNQ |
725 | # | 860 | # |
726 | # CONFIG_USB_GADGET is not set | 861 | # CONFIG_STAGING is not set |
727 | |||
728 | # | ||
729 | # MMC/SD Card support | ||
730 | # | ||
731 | # CONFIG_MMC is not set | ||
732 | 862 | ||
733 | # | 863 | # |
734 | # File systems | 864 | # File systems |
735 | # | 865 | # |
736 | CONFIG_EXT2_FS=y | 866 | CONFIG_EXT2_FS=y |
737 | # CONFIG_EXT2_FS_XATTR is not set | 867 | # CONFIG_EXT2_FS_XATTR is not set |
868 | # CONFIG_EXT2_FS_XIP is not set | ||
738 | # CONFIG_EXT3_FS is not set | 869 | # CONFIG_EXT3_FS is not set |
739 | # CONFIG_JBD is not set | 870 | # CONFIG_EXT4_FS is not set |
740 | # CONFIG_REISERFS_FS is not set | 871 | # CONFIG_REISERFS_FS is not set |
741 | # CONFIG_JFS_FS is not set | 872 | # CONFIG_JFS_FS is not set |
742 | 873 | # CONFIG_FS_POSIX_ACL is not set | |
743 | # | ||
744 | # XFS support | ||
745 | # | ||
746 | # CONFIG_XFS_FS is not set | 874 | # CONFIG_XFS_FS is not set |
747 | # CONFIG_MINIX_FS is not set | 875 | # CONFIG_OCFS2_FS is not set |
748 | # CONFIG_ROMFS_FS is not set | 876 | # CONFIG_BTRFS_FS is not set |
749 | # CONFIG_QUOTA is not set | 877 | # CONFIG_NILFS2_FS is not set |
878 | CONFIG_FILE_LOCKING=y | ||
879 | CONFIG_FSNOTIFY=y | ||
750 | CONFIG_DNOTIFY=y | 880 | CONFIG_DNOTIFY=y |
881 | # CONFIG_INOTIFY is not set | ||
882 | CONFIG_INOTIFY_USER=y | ||
883 | # CONFIG_QUOTA is not set | ||
751 | # CONFIG_AUTOFS_FS is not set | 884 | # CONFIG_AUTOFS_FS is not set |
752 | # CONFIG_AUTOFS4_FS is not set | 885 | # CONFIG_AUTOFS4_FS is not set |
886 | # CONFIG_FUSE_FS is not set | ||
887 | |||
888 | # | ||
889 | # Caches | ||
890 | # | ||
891 | # CONFIG_FSCACHE is not set | ||
753 | 892 | ||
754 | # | 893 | # |
755 | # CD-ROM/DVD Filesystems | 894 | # CD-ROM/DVD Filesystems |
@@ -771,16 +910,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | |||
771 | # Pseudo filesystems | 910 | # Pseudo filesystems |
772 | # | 911 | # |
773 | CONFIG_PROC_FS=y | 912 | CONFIG_PROC_FS=y |
913 | CONFIG_PROC_SYSCTL=y | ||
914 | CONFIG_PROC_PAGE_MONITOR=y | ||
774 | CONFIG_SYSFS=y | 915 | CONFIG_SYSFS=y |
775 | # CONFIG_DEVFS_FS is not set | ||
776 | # CONFIG_DEVPTS_FS_XATTR is not set | ||
777 | # CONFIG_TMPFS is not set | 916 | # CONFIG_TMPFS is not set |
778 | # CONFIG_HUGETLB_PAGE is not set | 917 | # CONFIG_HUGETLB_PAGE is not set |
779 | CONFIG_RAMFS=y | 918 | # CONFIG_CONFIGFS_FS is not set |
780 | 919 | CONFIG_MISC_FILESYSTEMS=y | |
781 | # | ||
782 | # Miscellaneous filesystems | ||
783 | # | ||
784 | # CONFIG_ADFS_FS is not set | 920 | # CONFIG_ADFS_FS is not set |
785 | # CONFIG_AFFS_FS is not set | 921 | # CONFIG_AFFS_FS is not set |
786 | # CONFIG_HFS_FS is not set | 922 | # CONFIG_HFS_FS is not set |
@@ -788,34 +924,37 @@ CONFIG_RAMFS=y | |||
788 | # CONFIG_BEFS_FS is not set | 924 | # CONFIG_BEFS_FS is not set |
789 | # CONFIG_BFS_FS is not set | 925 | # CONFIG_BFS_FS is not set |
790 | # CONFIG_EFS_FS is not set | 926 | # CONFIG_EFS_FS is not set |
791 | # CONFIG_JFFS_FS is not set | ||
792 | CONFIG_JFFS2_FS=y | 927 | CONFIG_JFFS2_FS=y |
793 | CONFIG_JFFS2_FS_DEBUG=0 | 928 | CONFIG_JFFS2_FS_DEBUG=0 |
794 | # CONFIG_JFFS2_FS_NAND is not set | 929 | CONFIG_JFFS2_FS_WRITEBUFFER=y |
795 | # CONFIG_JFFS2_FS_NOR_ECC is not set | 930 | # CONFIG_JFFS2_FS_WBUF_VERIFY is not set |
931 | # CONFIG_JFFS2_SUMMARY is not set | ||
932 | # CONFIG_JFFS2_FS_XATTR is not set | ||
796 | # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set | 933 | # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set |
797 | CONFIG_JFFS2_ZLIB=y | 934 | CONFIG_JFFS2_ZLIB=y |
935 | # CONFIG_JFFS2_LZO is not set | ||
798 | CONFIG_JFFS2_RTIME=y | 936 | CONFIG_JFFS2_RTIME=y |
799 | # CONFIG_JFFS2_RUBIN is not set | 937 | # CONFIG_JFFS2_RUBIN is not set |
800 | CONFIG_CRAMFS=m | 938 | CONFIG_CRAMFS=m |
939 | # CONFIG_SQUASHFS is not set | ||
801 | # CONFIG_VXFS_FS is not set | 940 | # CONFIG_VXFS_FS is not set |
941 | # CONFIG_MINIX_FS is not set | ||
942 | # CONFIG_OMFS_FS is not set | ||
802 | # CONFIG_HPFS_FS is not set | 943 | # CONFIG_HPFS_FS is not set |
803 | # CONFIG_QNX4FS_FS is not set | 944 | # CONFIG_QNX4FS_FS is not set |
945 | # CONFIG_ROMFS_FS is not set | ||
804 | # CONFIG_SYSV_FS is not set | 946 | # CONFIG_SYSV_FS is not set |
805 | # CONFIG_UFS_FS is not set | 947 | # CONFIG_UFS_FS is not set |
806 | 948 | CONFIG_NETWORK_FILESYSTEMS=y | |
807 | # | ||
808 | # Network File Systems | ||
809 | # | ||
810 | CONFIG_NFS_FS=y | 949 | CONFIG_NFS_FS=y |
811 | # CONFIG_NFS_V3 is not set | 950 | # CONFIG_NFS_V3 is not set |
812 | # CONFIG_NFS_V4 is not set | 951 | # CONFIG_NFS_V4 is not set |
813 | # CONFIG_NFS_DIRECTIO is not set | ||
814 | CONFIG_NFSD=m | 952 | CONFIG_NFSD=m |
815 | # CONFIG_NFSD_V3 is not set | 953 | # CONFIG_NFSD_V3 is not set |
816 | CONFIG_NFSD_TCP=y | 954 | # CONFIG_NFSD_V4 is not set |
817 | CONFIG_LOCKD=y | 955 | CONFIG_LOCKD=y |
818 | CONFIG_EXPORTFS=m | 956 | CONFIG_EXPORTFS=m |
957 | CONFIG_NFS_COMMON=y | ||
819 | CONFIG_SUNRPC=y | 958 | CONFIG_SUNRPC=y |
820 | # CONFIG_RPCSEC_GSS_KRB5 is not set | 959 | # CONFIG_RPCSEC_GSS_KRB5 is not set |
821 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 960 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
@@ -831,10 +970,6 @@ CONFIG_SMB_FS=m | |||
831 | # | 970 | # |
832 | # CONFIG_PARTITION_ADVANCED is not set | 971 | # CONFIG_PARTITION_ADVANCED is not set |
833 | CONFIG_MSDOS_PARTITION=y | 972 | CONFIG_MSDOS_PARTITION=y |
834 | |||
835 | # | ||
836 | # Native Language Support | ||
837 | # | ||
838 | CONFIG_NLS=y | 973 | CONFIG_NLS=y |
839 | CONFIG_NLS_DEFAULT="iso8859-1" | 974 | CONFIG_NLS_DEFAULT="iso8859-1" |
840 | # CONFIG_NLS_CODEPAGE_437 is not set | 975 | # CONFIG_NLS_CODEPAGE_437 is not set |
@@ -875,20 +1010,34 @@ CONFIG_NLS_DEFAULT="iso8859-1" | |||
875 | # CONFIG_NLS_KOI8_R is not set | 1010 | # CONFIG_NLS_KOI8_R is not set |
876 | # CONFIG_NLS_KOI8_U is not set | 1011 | # CONFIG_NLS_KOI8_U is not set |
877 | # CONFIG_NLS_UTF8 is not set | 1012 | # CONFIG_NLS_UTF8 is not set |
878 | 1013 | # CONFIG_DLM is not set | |
879 | # | ||
880 | # Profiling support | ||
881 | # | ||
882 | # CONFIG_PROFILING is not set | ||
883 | 1014 | ||
884 | # | 1015 | # |
885 | # Kernel hacking | 1016 | # Kernel hacking |
886 | # | 1017 | # |
887 | # CONFIG_PRINTK_TIME is not set | 1018 | # CONFIG_PRINTK_TIME is not set |
1019 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1020 | CONFIG_ENABLE_MUST_CHECK=y | ||
1021 | CONFIG_FRAME_WARN=1024 | ||
1022 | # CONFIG_MAGIC_SYSRQ is not set | ||
1023 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1024 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1025 | # CONFIG_DEBUG_FS is not set | ||
1026 | # CONFIG_HEADERS_CHECK is not set | ||
888 | # CONFIG_DEBUG_KERNEL is not set | 1027 | # CONFIG_DEBUG_KERNEL is not set |
889 | CONFIG_LOG_BUF_SHIFT=14 | 1028 | # CONFIG_SLUB_DEBUG_ON is not set |
1029 | # CONFIG_SLUB_STATS is not set | ||
890 | CONFIG_DEBUG_BUGVERBOSE=y | 1030 | CONFIG_DEBUG_BUGVERBOSE=y |
1031 | CONFIG_DEBUG_MEMORY_INIT=y | ||
891 | CONFIG_FRAME_POINTER=y | 1032 | CONFIG_FRAME_POINTER=y |
1033 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1034 | # CONFIG_LATENCYTOP is not set | ||
1035 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1036 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
1037 | CONFIG_TRACING_SUPPORT=y | ||
1038 | # CONFIG_FTRACE is not set | ||
1039 | # CONFIG_SAMPLES is not set | ||
1040 | CONFIG_HAVE_ARCH_KGDB=y | ||
892 | # CONFIG_DEBUG_USER is not set | 1041 | # CONFIG_DEBUG_USER is not set |
893 | 1042 | ||
894 | # | 1043 | # |
@@ -896,21 +1045,120 @@ CONFIG_FRAME_POINTER=y | |||
896 | # | 1045 | # |
897 | # CONFIG_KEYS is not set | 1046 | # CONFIG_KEYS is not set |
898 | # CONFIG_SECURITY is not set | 1047 | # CONFIG_SECURITY is not set |
1048 | # CONFIG_SECURITYFS is not set | ||
1049 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1050 | CONFIG_CRYPTO=y | ||
1051 | |||
1052 | # | ||
1053 | # Crypto core or helper | ||
1054 | # | ||
1055 | # CONFIG_CRYPTO_FIPS is not set | ||
1056 | CONFIG_CRYPTO_ALGAPI=m | ||
1057 | CONFIG_CRYPTO_ALGAPI2=m | ||
1058 | CONFIG_CRYPTO_RNG=m | ||
1059 | CONFIG_CRYPTO_RNG2=m | ||
1060 | # CONFIG_CRYPTO_MANAGER is not set | ||
1061 | # CONFIG_CRYPTO_MANAGER2 is not set | ||
1062 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1063 | # CONFIG_CRYPTO_NULL is not set | ||
1064 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1065 | # CONFIG_CRYPTO_AUTHENC is not set | ||
1066 | # CONFIG_CRYPTO_TEST is not set | ||
1067 | |||
1068 | # | ||
1069 | # Authenticated Encryption with Associated Data | ||
1070 | # | ||
1071 | # CONFIG_CRYPTO_CCM is not set | ||
1072 | # CONFIG_CRYPTO_GCM is not set | ||
1073 | # CONFIG_CRYPTO_SEQIV is not set | ||
1074 | |||
1075 | # | ||
1076 | # Block modes | ||
1077 | # | ||
1078 | # CONFIG_CRYPTO_CBC is not set | ||
1079 | # CONFIG_CRYPTO_CTR is not set | ||
1080 | # CONFIG_CRYPTO_CTS is not set | ||
1081 | # CONFIG_CRYPTO_ECB is not set | ||
1082 | # CONFIG_CRYPTO_LRW is not set | ||
1083 | # CONFIG_CRYPTO_PCBC is not set | ||
1084 | # CONFIG_CRYPTO_XTS is not set | ||
1085 | |||
1086 | # | ||
1087 | # Hash modes | ||
1088 | # | ||
1089 | # CONFIG_CRYPTO_HMAC is not set | ||
1090 | # CONFIG_CRYPTO_XCBC is not set | ||
1091 | # CONFIG_CRYPTO_VMAC is not set | ||
1092 | |||
1093 | # | ||
1094 | # Digest | ||
1095 | # | ||
1096 | # CONFIG_CRYPTO_CRC32C is not set | ||
1097 | # CONFIG_CRYPTO_GHASH is not set | ||
1098 | # CONFIG_CRYPTO_MD4 is not set | ||
1099 | # CONFIG_CRYPTO_MD5 is not set | ||
1100 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1101 | # CONFIG_CRYPTO_RMD128 is not set | ||
1102 | # CONFIG_CRYPTO_RMD160 is not set | ||
1103 | # CONFIG_CRYPTO_RMD256 is not set | ||
1104 | # CONFIG_CRYPTO_RMD320 is not set | ||
1105 | # CONFIG_CRYPTO_SHA1 is not set | ||
1106 | # CONFIG_CRYPTO_SHA256 is not set | ||
1107 | # CONFIG_CRYPTO_SHA512 is not set | ||
1108 | # CONFIG_CRYPTO_TGR192 is not set | ||
1109 | # CONFIG_CRYPTO_WP512 is not set | ||
1110 | |||
1111 | # | ||
1112 | # Ciphers | ||
1113 | # | ||
1114 | CONFIG_CRYPTO_AES=m | ||
1115 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1116 | # CONFIG_CRYPTO_ARC4 is not set | ||
1117 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1118 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1119 | # CONFIG_CRYPTO_CAST5 is not set | ||
1120 | # CONFIG_CRYPTO_CAST6 is not set | ||
1121 | # CONFIG_CRYPTO_DES is not set | ||
1122 | # CONFIG_CRYPTO_FCRYPT is not set | ||
1123 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1124 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1125 | # CONFIG_CRYPTO_SEED is not set | ||
1126 | # CONFIG_CRYPTO_SERPENT is not set | ||
1127 | # CONFIG_CRYPTO_TEA is not set | ||
1128 | # CONFIG_CRYPTO_TWOFISH is not set | ||
899 | 1129 | ||
900 | # | 1130 | # |
901 | # Cryptographic options | 1131 | # Compression |
902 | # | 1132 | # |
903 | # CONFIG_CRYPTO is not set | 1133 | # CONFIG_CRYPTO_DEFLATE is not set |
1134 | # CONFIG_CRYPTO_ZLIB is not set | ||
1135 | # CONFIG_CRYPTO_LZO is not set | ||
904 | 1136 | ||
905 | # | 1137 | # |
906 | # Hardware crypto devices | 1138 | # Random Number Generation |
907 | # | 1139 | # |
1140 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
1141 | CONFIG_CRYPTO_HW=y | ||
1142 | # CONFIG_BINARY_PRINTF is not set | ||
908 | 1143 | ||
909 | # | 1144 | # |
910 | # Library routines | 1145 | # Library routines |
911 | # | 1146 | # |
1147 | CONFIG_BITREVERSE=y | ||
1148 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
912 | CONFIG_CRC_CCITT=m | 1149 | CONFIG_CRC_CCITT=m |
1150 | # CONFIG_CRC16 is not set | ||
1151 | # CONFIG_CRC_T10DIF is not set | ||
1152 | # CONFIG_CRC_ITU_T is not set | ||
913 | CONFIG_CRC32=y | 1153 | CONFIG_CRC32=y |
1154 | # CONFIG_CRC7 is not set | ||
914 | # CONFIG_LIBCRC32C is not set | 1155 | # CONFIG_LIBCRC32C is not set |
915 | CONFIG_ZLIB_INFLATE=y | 1156 | CONFIG_ZLIB_INFLATE=y |
916 | CONFIG_ZLIB_DEFLATE=y | 1157 | CONFIG_ZLIB_DEFLATE=y |
1158 | CONFIG_DECOMPRESS_GZIP=y | ||
1159 | CONFIG_DECOMPRESS_BZIP2=y | ||
1160 | CONFIG_DECOMPRESS_LZMA=y | ||
1161 | CONFIG_HAS_IOMEM=y | ||
1162 | CONFIG_HAS_IOPORT=y | ||
1163 | CONFIG_HAS_DMA=y | ||
1164 | CONFIG_NLATTR=y | ||
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index 4e5c07f4e456..03a7f3857c5e 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig | |||
@@ -53,23 +53,23 @@ config SA1100_COLLIE | |||
53 | 53 | ||
54 | config SA1100_H3100 | 54 | config SA1100_H3100 |
55 | bool "Compaq iPAQ H3100" | 55 | bool "Compaq iPAQ H3100" |
56 | select HTC_EGPIO | ||
56 | help | 57 | help |
57 | Say Y here if you intend to run this kernel on the Compaq iPAQ | 58 | Say Y here if you intend to run this kernel on the Compaq iPAQ |
58 | H3100 handheld computer. Information about this machine and the | 59 | H3100 handheld computer. Information about this machine and the |
59 | Linux port to this machine can be found at: | 60 | Linux port to this machine can be found at: |
60 | 61 | ||
61 | <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100> | 62 | <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100> |
62 | <http://www.compaq.com/products/handhelds/pocketpc/> | ||
63 | 63 | ||
64 | config SA1100_H3600 | 64 | config SA1100_H3600 |
65 | bool "Compaq iPAQ H3600/H3700" | 65 | bool "Compaq iPAQ H3600/H3700" |
66 | select HTC_EGPIO | ||
66 | help | 67 | help |
67 | Say Y here if you intend to run this kernel on the Compaq iPAQ | 68 | Say Y here if you intend to run this kernel on the Compaq iPAQ |
68 | H3600 handheld computer. Information about this machine and the | 69 | H3600 handheld computer. Information about this machine and the |
69 | Linux port to this machine can be found at: | 70 | Linux port to this machine can be found at: |
70 | 71 | ||
71 | <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600> | 72 | <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600> |
72 | <http://www.compaq.com/products/handhelds/pocketpc/> | ||
73 | 73 | ||
74 | config SA1100_BADGE4 | 74 | config SA1100_BADGE4 |
75 | bool "HP Labs BadgePAD 4" | 75 | bool "HP Labs BadgePAD 4" |
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile index bb7b8198d0c4..89349c1dd7a6 100644 --- a/arch/arm/mach-sa1100/Makefile +++ b/arch/arm/mach-sa1100/Makefile | |||
@@ -25,8 +25,8 @@ led-$(CONFIG_SA1100_CERF) += leds-cerf.o | |||
25 | 25 | ||
26 | obj-$(CONFIG_SA1100_COLLIE) += collie.o | 26 | obj-$(CONFIG_SA1100_COLLIE) += collie.o |
27 | 27 | ||
28 | obj-$(CONFIG_SA1100_H3100) += h3600.o | 28 | obj-$(CONFIG_SA1100_H3100) += h3100.o h3xxx.o |
29 | obj-$(CONFIG_SA1100_H3600) += h3600.o | 29 | obj-$(CONFIG_SA1100_H3600) += h3600.o h3xxx.o |
30 | 30 | ||
31 | obj-$(CONFIG_SA1100_HACKKIT) += hackkit.o | 31 | obj-$(CONFIG_SA1100_HACKKIT) += hackkit.o |
32 | led-$(CONFIG_SA1100_HACKKIT) += leds-hackkit.o | 32 | led-$(CONFIG_SA1100_HACKKIT) += leds-hackkit.o |
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 55e64477a876..169e5b87dbff 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -249,10 +249,10 @@ static void __init assabet_init(void) | |||
249 | #endif | 249 | #endif |
250 | } | 250 | } |
251 | 251 | ||
252 | sa11x0_set_flash_data(&assabet_flash_data, assabet_flash_resources, | 252 | sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources, |
253 | ARRAY_SIZE(assabet_flash_resources)); | 253 | ARRAY_SIZE(assabet_flash_resources)); |
254 | sa11x0_set_irda_data(&assabet_irda_data); | 254 | sa11x0_register_irda(&assabet_irda_data); |
255 | sa11x0_set_mcp_data(&assabet_mcp_data); | 255 | sa11x0_register_mcp(&assabet_mcp_data); |
256 | } | 256 | } |
257 | 257 | ||
258 | /* | 258 | /* |
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c index ab5883b39ddf..051ec0f0023c 100644 --- a/arch/arm/mach-sa1100/badge4.c +++ b/arch/arm/mach-sa1100/badge4.c | |||
@@ -212,7 +212,7 @@ static int __init badge4_init(void) | |||
212 | /* maybe turn on 5v0 from the start */ | 212 | /* maybe turn on 5v0 from the start */ |
213 | badge4_set_5V(BADGE4_5V_INITIALLY, five_v_on); | 213 | badge4_set_5V(BADGE4_5V_INITIALLY, five_v_on); |
214 | 214 | ||
215 | sa11x0_set_flash_data(&badge4_flash_data, &badge4_flash_resource, 1); | 215 | sa11x0_register_mtd(&badge4_flash_data, &badge4_flash_resource, 1); |
216 | 216 | ||
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c index fd3ad9cfc912..bc950ef418af 100644 --- a/arch/arm/mach-sa1100/cerf.c +++ b/arch/arm/mach-sa1100/cerf.c | |||
@@ -129,8 +129,8 @@ static struct mcp_plat_data cerf_mcp_data = { | |||
129 | static void __init cerf_init(void) | 129 | static void __init cerf_init(void) |
130 | { | 130 | { |
131 | platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); | 131 | platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); |
132 | sa11x0_set_flash_data(&cerf_flash_data, &cerf_flash_resource, 1); | 132 | sa11x0_register_mtd(&cerf_flash_data, &cerf_flash_resource, 1); |
133 | sa11x0_set_mcp_data(&cerf_mcp_data); | 133 | sa11x0_register_mcp(&cerf_mcp_data); |
134 | } | 134 | } |
135 | 135 | ||
136 | MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube") | 136 | MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube") |
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index bbf2ebcc3066..9982c5c28edf 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/mtd/partitions.h> | 26 | #include <linux/mtd/partitions.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/gpio.h> | 28 | #include <linux/gpio.h> |
29 | #include <linux/pda_power.h> | ||
29 | 30 | ||
30 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
31 | #include <asm/mach-types.h> | 32 | #include <asm/mach-types.h> |
@@ -56,6 +57,7 @@ static struct resource collie_scoop_resources[] = { | |||
56 | static struct scoop_config collie_scoop_setup = { | 57 | static struct scoop_config collie_scoop_setup = { |
57 | .io_dir = COLLIE_SCOOP_IO_DIR, | 58 | .io_dir = COLLIE_SCOOP_IO_DIR, |
58 | .io_out = COLLIE_SCOOP_IO_OUT, | 59 | .io_out = COLLIE_SCOOP_IO_OUT, |
60 | .gpio_base = COLLIE_SCOOP_GPIO_BASE, | ||
59 | }; | 61 | }; |
60 | 62 | ||
61 | struct platform_device colliescoop_device = { | 63 | struct platform_device colliescoop_device = { |
@@ -85,6 +87,70 @@ static struct scoop_pcmcia_config collie_pcmcia_config = { | |||
85 | static struct mcp_plat_data collie_mcp_data = { | 87 | static struct mcp_plat_data collie_mcp_data = { |
86 | .mccr0 = MCCR0_ADM | MCCR0_ExtClk, | 88 | .mccr0 = MCCR0_ADM | MCCR0_ExtClk, |
87 | .sclk_rate = 9216000, | 89 | .sclk_rate = 9216000, |
90 | .gpio_base = COLLIE_TC35143_GPIO_BASE, | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * Collie AC IN | ||
95 | */ | ||
96 | static int collie_power_init(struct device *dev) | ||
97 | { | ||
98 | int ret = gpio_request(COLLIE_GPIO_AC_IN, "ac in"); | ||
99 | if (ret) | ||
100 | goto err_gpio_req; | ||
101 | |||
102 | ret = gpio_direction_input(COLLIE_GPIO_AC_IN); | ||
103 | if (ret) | ||
104 | goto err_gpio_in; | ||
105 | |||
106 | return 0; | ||
107 | |||
108 | err_gpio_in: | ||
109 | gpio_free(COLLIE_GPIO_AC_IN); | ||
110 | err_gpio_req: | ||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static void collie_power_exit(struct device *dev) | ||
115 | { | ||
116 | gpio_free(COLLIE_GPIO_AC_IN); | ||
117 | } | ||
118 | |||
119 | static int collie_power_ac_online(void) | ||
120 | { | ||
121 | return gpio_get_value(COLLIE_GPIO_AC_IN) == 2; | ||
122 | } | ||
123 | |||
124 | static char *collie_ac_supplied_to[] = { | ||
125 | "main-battery", | ||
126 | "backup-battery", | ||
127 | }; | ||
128 | |||
129 | static struct pda_power_pdata collie_power_data = { | ||
130 | .init = collie_power_init, | ||
131 | .is_ac_online = collie_power_ac_online, | ||
132 | .exit = collie_power_exit, | ||
133 | .supplied_to = collie_ac_supplied_to, | ||
134 | .num_supplicants = ARRAY_SIZE(collie_ac_supplied_to), | ||
135 | }; | ||
136 | |||
137 | static struct resource collie_power_resource[] = { | ||
138 | { | ||
139 | .name = "ac", | ||
140 | .start = gpio_to_irq(COLLIE_GPIO_AC_IN), | ||
141 | .end = gpio_to_irq(COLLIE_GPIO_AC_IN), | ||
142 | .flags = IORESOURCE_IRQ | | ||
143 | IORESOURCE_IRQ_HIGHEDGE | | ||
144 | IORESOURCE_IRQ_LOWEDGE, | ||
145 | }, | ||
146 | }; | ||
147 | |||
148 | static struct platform_device collie_power_device = { | ||
149 | .name = "pda-power", | ||
150 | .id = -1, | ||
151 | .dev.platform_data = &collie_power_data, | ||
152 | .resource = collie_power_resource, | ||
153 | .num_resources = ARRAY_SIZE(collie_power_resource), | ||
88 | }; | 154 | }; |
89 | 155 | ||
90 | #ifdef CONFIG_SHARP_LOCOMO | 156 | #ifdef CONFIG_SHARP_LOCOMO |
@@ -178,6 +244,7 @@ struct platform_device collie_locomo_device = { | |||
178 | static struct platform_device *devices[] __initdata = { | 244 | static struct platform_device *devices[] __initdata = { |
179 | &collie_locomo_device, | 245 | &collie_locomo_device, |
180 | &colliescoop_device, | 246 | &colliescoop_device, |
247 | &collie_power_device, | ||
181 | }; | 248 | }; |
182 | 249 | ||
183 | static struct mtd_partition collie_partitions[] = { | 250 | static struct mtd_partition collie_partitions[] = { |
@@ -248,22 +315,24 @@ static void __init collie_init(void) | |||
248 | GPDR = GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | | 315 | GPDR = GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | |
249 | GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | | 316 | GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | |
250 | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | | 317 | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | |
251 | COLLIE_GPIO_UCB1x00_RESET | COLLIE_GPIO_nMIC_ON | | 318 | _COLLIE_GPIO_UCB1x00_RESET | _COLLIE_GPIO_nMIC_ON | |
252 | COLLIE_GPIO_nREMOCON_ON | GPIO_32_768kHz; | 319 | _COLLIE_GPIO_nREMOCON_ON | GPIO_32_768kHz; |
253 | 320 | ||
254 | PPDR = PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | | 321 | PPDR = PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | |
255 | PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | | 322 | PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | |
256 | PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM; | 323 | PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM; |
257 | 324 | ||
258 | PWER = COLLIE_GPIO_AC_IN | COLLIE_GPIO_CO | COLLIE_GPIO_ON_KEY | | 325 | PWER = _COLLIE_GPIO_AC_IN | _COLLIE_GPIO_CO | _COLLIE_GPIO_ON_KEY | |
259 | COLLIE_GPIO_WAKEUP | COLLIE_GPIO_nREMOCON_INT | PWER_RTC; | 326 | _COLLIE_GPIO_WAKEUP | _COLLIE_GPIO_nREMOCON_INT | PWER_RTC; |
260 | 327 | ||
261 | PGSR = COLLIE_GPIO_nREMOCON_ON; | 328 | PGSR = _COLLIE_GPIO_nREMOCON_ON; |
262 | 329 | ||
263 | PSDR = PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4; | 330 | PSDR = PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4; |
264 | 331 | ||
265 | PCFR = PCFR_OPDE; | 332 | PCFR = PCFR_OPDE; |
266 | 333 | ||
334 | GPSR |= _COLLIE_GPIO_UCB1x00_RESET; | ||
335 | |||
267 | 336 | ||
268 | platform_scoop_config = &collie_pcmcia_config; | 337 | platform_scoop_config = &collie_pcmcia_config; |
269 | 338 | ||
@@ -272,9 +341,9 @@ static void __init collie_init(void) | |||
272 | printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); | 341 | printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); |
273 | } | 342 | } |
274 | 343 | ||
275 | sa11x0_set_flash_data(&collie_flash_data, collie_flash_resources, | 344 | sa11x0_register_mtd(&collie_flash_data, collie_flash_resources, |
276 | ARRAY_SIZE(collie_flash_resources)); | 345 | ARRAY_SIZE(collie_flash_resources)); |
277 | sa11x0_set_mcp_data(&collie_mcp_data); | 346 | sa11x0_register_mcp(&collie_mcp_data); |
278 | 347 | ||
279 | sharpsl_save_param(); | 348 | sharpsl_save_param(); |
280 | } | 349 | } |
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 23cfdd593954..9faea1511c1f 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
@@ -162,6 +162,17 @@ static void sa1100_power_off(void) | |||
162 | PMCR = PMCR_SF; | 162 | PMCR = PMCR_SF; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void sa11x0_register_device(struct platform_device *dev, void *data) | ||
166 | { | ||
167 | int err; | ||
168 | dev->dev.platform_data = data; | ||
169 | err = platform_device_register(dev); | ||
170 | if (err) | ||
171 | printk(KERN_ERR "Unable to register device %s: %d\n", | ||
172 | dev->name, err); | ||
173 | } | ||
174 | |||
175 | |||
165 | static struct resource sa11x0udc_resources[] = { | 176 | static struct resource sa11x0udc_resources[] = { |
166 | [0] = { | 177 | [0] = { |
167 | .start = 0x80000000, | 178 | .start = 0x80000000, |
@@ -234,9 +245,9 @@ static struct platform_device sa11x0mcp_device = { | |||
234 | .resource = sa11x0mcp_resources, | 245 | .resource = sa11x0mcp_resources, |
235 | }; | 246 | }; |
236 | 247 | ||
237 | void sa11x0_set_mcp_data(struct mcp_plat_data *data) | 248 | void sa11x0_register_mcp(struct mcp_plat_data *data) |
238 | { | 249 | { |
239 | sa11x0mcp_device.dev.platform_data = data; | 250 | sa11x0_register_device(&sa11x0mcp_device, data); |
240 | } | 251 | } |
241 | 252 | ||
242 | static struct resource sa11x0ssp_resources[] = { | 253 | static struct resource sa11x0ssp_resources[] = { |
@@ -293,13 +304,13 @@ static struct platform_device sa11x0mtd_device = { | |||
293 | .id = -1, | 304 | .id = -1, |
294 | }; | 305 | }; |
295 | 306 | ||
296 | void sa11x0_set_flash_data(struct flash_platform_data *flash, | 307 | void sa11x0_register_mtd(struct flash_platform_data *flash, |
297 | struct resource *res, int nr) | 308 | struct resource *res, int nr) |
298 | { | 309 | { |
299 | flash->name = "sa1100"; | 310 | flash->name = "sa1100"; |
300 | sa11x0mtd_device.dev.platform_data = flash; | ||
301 | sa11x0mtd_device.resource = res; | 311 | sa11x0mtd_device.resource = res; |
302 | sa11x0mtd_device.num_resources = nr; | 312 | sa11x0mtd_device.num_resources = nr; |
313 | sa11x0_register_device(&sa11x0mtd_device, flash); | ||
303 | } | 314 | } |
304 | 315 | ||
305 | static struct resource sa11x0ir_resources[] = { | 316 | static struct resource sa11x0ir_resources[] = { |
@@ -329,9 +340,9 @@ static struct platform_device sa11x0ir_device = { | |||
329 | .resource = sa11x0ir_resources, | 340 | .resource = sa11x0ir_resources, |
330 | }; | 341 | }; |
331 | 342 | ||
332 | void sa11x0_set_irda_data(struct irda_platform_data *irda) | 343 | void sa11x0_register_irda(struct irda_platform_data *irda) |
333 | { | 344 | { |
334 | sa11x0ir_device.dev.platform_data = irda; | 345 | sa11x0_register_device(&sa11x0ir_device, irda); |
335 | } | 346 | } |
336 | 347 | ||
337 | static struct platform_device sa11x0rtc_device = { | 348 | static struct platform_device sa11x0rtc_device = { |
@@ -343,21 +354,15 @@ static struct platform_device *sa11x0_devices[] __initdata = { | |||
343 | &sa11x0udc_device, | 354 | &sa11x0udc_device, |
344 | &sa11x0uart1_device, | 355 | &sa11x0uart1_device, |
345 | &sa11x0uart3_device, | 356 | &sa11x0uart3_device, |
346 | &sa11x0mcp_device, | ||
347 | &sa11x0ssp_device, | 357 | &sa11x0ssp_device, |
348 | &sa11x0pcmcia_device, | 358 | &sa11x0pcmcia_device, |
349 | &sa11x0fb_device, | 359 | &sa11x0fb_device, |
350 | &sa11x0mtd_device, | ||
351 | &sa11x0rtc_device, | 360 | &sa11x0rtc_device, |
352 | }; | 361 | }; |
353 | 362 | ||
354 | static int __init sa1100_init(void) | 363 | static int __init sa1100_init(void) |
355 | { | 364 | { |
356 | pm_power_off = sa1100_power_off; | 365 | pm_power_off = sa1100_power_off; |
357 | |||
358 | if (sa11x0ir_device.dev.platform_data) | ||
359 | platform_device_register(&sa11x0ir_device); | ||
360 | |||
361 | return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices)); | 366 | return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices)); |
362 | } | 367 | } |
363 | 368 | ||
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 793c2e6c991f..ec03f187c52b 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h | |||
@@ -32,14 +32,11 @@ extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx); | |||
32 | struct flash_platform_data; | 32 | struct flash_platform_data; |
33 | struct resource; | 33 | struct resource; |
34 | 34 | ||
35 | extern void sa11x0_set_flash_data(struct flash_platform_data *flash, | 35 | void sa11x0_register_mtd(struct flash_platform_data *flash, |
36 | struct resource *res, int nr); | 36 | struct resource *res, int nr); |
37 | |||
38 | struct sa11x0_ssp_plat_ops; | ||
39 | extern void sa11x0_set_ssp_data(struct sa11x0_ssp_plat_ops *ops); | ||
40 | 37 | ||
41 | struct irda_platform_data; | 38 | struct irda_platform_data; |
42 | void sa11x0_set_irda_data(struct irda_platform_data *irda); | 39 | void sa11x0_register_irda(struct irda_platform_data *irda); |
43 | 40 | ||
44 | struct mcp_plat_data; | 41 | struct mcp_plat_data; |
45 | void sa11x0_set_mcp_data(struct mcp_plat_data *data); | 42 | void sa11x0_register_mcp(struct mcp_plat_data *data); |
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c new file mode 100644 index 000000000000..0c7cea0dc013 --- /dev/null +++ b/arch/arm/mach-sa1100/h3100.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Support for Compaq iPAQ H3100 handheld computer | ||
3 | * | ||
4 | * Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks) | ||
5 | * Copyright (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/gpio.h> | ||
16 | |||
17 | #include <asm/mach-types.h> | ||
18 | #include <asm/mach/arch.h> | ||
19 | #include <asm/mach/irda.h> | ||
20 | |||
21 | #include <mach/h3xxx.h> | ||
22 | |||
23 | #include "generic.h" | ||
24 | |||
25 | /* | ||
26 | * helper for sa1100fb | ||
27 | */ | ||
28 | static void h3100_lcd_power(int enable) | ||
29 | { | ||
30 | if (!gpio_request(H3XXX_EGPIO_LCD_ON, "LCD ON")) { | ||
31 | gpio_set_value(H3100_GPIO_LCD_3V_ON, enable); | ||
32 | gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable); | ||
33 | gpio_free(H3XXX_EGPIO_LCD_ON); | ||
34 | } else { | ||
35 | pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__); | ||
36 | } | ||
37 | } | ||
38 | |||
39 | |||
40 | static void __init h3100_map_io(void) | ||
41 | { | ||
42 | h3xxx_map_io(); | ||
43 | |||
44 | sa1100fb_lcd_power = h3100_lcd_power; | ||
45 | |||
46 | /* Older bootldrs put GPIO2-9 in alternate mode on the | ||
47 | assumption that they are used for video */ | ||
48 | GAFR &= ~0x000001fb; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * This turns the IRDA power on or off on the Compaq H3100 | ||
53 | */ | ||
54 | static int h3100_irda_set_power(struct device *dev, unsigned int state) | ||
55 | { | ||
56 | gpio_set_value(H3100_GPIO_IR_ON, state); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static void h3100_irda_set_speed(struct device *dev, unsigned int speed) | ||
61 | { | ||
62 | gpio_set_value(H3100_GPIO_IR_FSEL, !(speed < 4000000)); | ||
63 | } | ||
64 | |||
65 | static struct irda_platform_data h3100_irda_data = { | ||
66 | .set_power = h3100_irda_set_power, | ||
67 | .set_speed = h3100_irda_set_speed, | ||
68 | }; | ||
69 | |||
70 | static struct gpio_default_state h3100_default_gpio[] = { | ||
71 | { H3100_GPIO_IR_ON, GPIO_MODE_OUT0, "IrDA power" }, | ||
72 | { H3100_GPIO_IR_FSEL, GPIO_MODE_OUT0, "IrDA fsel" }, | ||
73 | { H3XXX_GPIO_COM_DCD, GPIO_MODE_IN, "COM DCD" }, | ||
74 | { H3XXX_GPIO_COM_CTS, GPIO_MODE_IN, "COM CTS" }, | ||
75 | { H3XXX_GPIO_COM_RTS, GPIO_MODE_OUT0, "COM RTS" }, | ||
76 | { H3100_GPIO_LCD_3V_ON, GPIO_MODE_OUT0, "LCD 3v" }, | ||
77 | }; | ||
78 | |||
79 | static void __init h3100_mach_init(void) | ||
80 | { | ||
81 | h3xxx_init_gpio(h3100_default_gpio, ARRAY_SIZE(h3100_default_gpio)); | ||
82 | h3xxx_mach_init(); | ||
83 | sa11x0_register_irda(&h3100_irda_data); | ||
84 | } | ||
85 | |||
86 | MACHINE_START(H3100, "Compaq iPAQ H3100") | ||
87 | .phys_io = 0x80000000, | ||
88 | .io_pg_offst = ((0xf8000000) >> 18) & 0xfffc, | ||
89 | .boot_params = 0xc0000100, | ||
90 | .map_io = h3100_map_io, | ||
91 | .init_irq = sa1100_init_irq, | ||
92 | .timer = &sa1100_timer, | ||
93 | .init_machine = h3100_mach_init, | ||
94 | MACHINE_END | ||
95 | |||
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c index 0eb2f159578b..af3b71459f8d 100644 --- a/arch/arm/mach-sa1100/h3600.c +++ b/arch/arm/mach-sa1100/h3600.c | |||
@@ -1,421 +1,127 @@ | |||
1 | /* | 1 | /* |
2 | * Hardware definitions for Compaq iPAQ H3xxx Handheld Computers | 2 | * Support for Compaq iPAQ H3600 handheld computer |
3 | * | 3 | * |
4 | * Copyright 2000,1 Compaq Computer Corporation. | 4 | * Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks) |
5 | * Copyright (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru> | ||
5 | * | 6 | * |
6 | * Use consistent with the GNU GPL is permitted, | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * provided that this copyright notice is | 8 | * it under the terms of the GNU General Public License version 2 as |
8 | * preserved in its entirety in all copies and derived works. | 9 | * published by the Free Software Foundation. |
9 | * | ||
10 | * COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | ||
11 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | ||
12 | * FITNESS FOR ANY PARTICULAR PURPOSE. | ||
13 | * | ||
14 | * Author: Jamey Hicks. | ||
15 | * | ||
16 | * History: | ||
17 | * | ||
18 | * 2001-10-?? Andrew Christian Added support for iPAQ H3800 | ||
19 | * and abstracted EGPIO interface. | ||
20 | * | 10 | * |
21 | */ | 11 | */ |
22 | #include <linux/module.h> | 12 | |
23 | #include <linux/init.h> | 13 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
25 | #include <linux/tty.h> | 15 | #include <linux/gpio.h> |
26 | #include <linux/pm.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/mtd/mtd.h> | ||
29 | #include <linux/mtd/partitions.h> | ||
30 | #include <linux/serial_core.h> | ||
31 | 16 | ||
32 | #include <asm/irq.h> | ||
33 | #include <mach/hardware.h> | ||
34 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
35 | #include <asm/setup.h> | ||
36 | |||
37 | #include <asm/mach/irq.h> | ||
38 | #include <asm/mach/arch.h> | 18 | #include <asm/mach/arch.h> |
39 | #include <asm/mach/flash.h> | ||
40 | #include <asm/mach/irda.h> | 19 | #include <asm/mach/irda.h> |
41 | #include <asm/mach/map.h> | ||
42 | #include <asm/mach/serial_sa1100.h> | ||
43 | 20 | ||
44 | #include <mach/h3600.h> | 21 | #include <mach/h3xxx.h> |
45 | #include <mach/h3600_gpio.h> | ||
46 | 22 | ||
47 | #include "generic.h" | 23 | #include "generic.h" |
48 | 24 | ||
49 | void (*assign_h3600_egpio)(enum ipaq_egpio_type x, int level); | ||
50 | EXPORT_SYMBOL(assign_h3600_egpio); | ||
51 | |||
52 | static struct mtd_partition h3xxx_partitions[] = { | ||
53 | { | ||
54 | .name = "H3XXX boot firmware", | ||
55 | .size = 0x00040000, | ||
56 | .offset = 0, | ||
57 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
58 | }, { | ||
59 | .name = "H3XXX rootfs", | ||
60 | .size = MTDPART_SIZ_FULL, | ||
61 | .offset = 0x00040000, | ||
62 | } | ||
63 | }; | ||
64 | |||
65 | static void h3xxx_set_vpp(int vpp) | ||
66 | { | ||
67 | assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, vpp); | ||
68 | } | ||
69 | |||
70 | static struct flash_platform_data h3xxx_flash_data = { | ||
71 | .map_name = "cfi_probe", | ||
72 | .set_vpp = h3xxx_set_vpp, | ||
73 | .parts = h3xxx_partitions, | ||
74 | .nr_parts = ARRAY_SIZE(h3xxx_partitions), | ||
75 | }; | ||
76 | |||
77 | static struct resource h3xxx_flash_resource = { | ||
78 | .start = SA1100_CS0_PHYS, | ||
79 | .end = SA1100_CS0_PHYS + SZ_32M - 1, | ||
80 | .flags = IORESOURCE_MEM, | ||
81 | }; | ||
82 | |||
83 | /* | 25 | /* |
84 | * This turns the IRDA power on or off on the Compaq H3600 | 26 | * helper for sa1100fb |
85 | */ | ||
86 | static int h3600_irda_set_power(struct device *dev, unsigned int state) | ||
87 | { | ||
88 | assign_h3600_egpio( IPAQ_EGPIO_IR_ON, state ); | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static void h3600_irda_set_speed(struct device *dev, unsigned int speed) | ||
94 | { | ||
95 | assign_h3600_egpio(IPAQ_EGPIO_IR_FSEL, !(speed < 4000000)); | ||
96 | } | ||
97 | |||
98 | static struct irda_platform_data h3600_irda_data = { | ||
99 | .set_power = h3600_irda_set_power, | ||
100 | .set_speed = h3600_irda_set_speed, | ||
101 | }; | ||
102 | |||
103 | static void h3xxx_mach_init(void) | ||
104 | { | ||
105 | sa11x0_set_flash_data(&h3xxx_flash_data, &h3xxx_flash_resource, 1); | ||
106 | sa11x0_set_irda_data(&h3600_irda_data); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * low-level UART features | ||
111 | */ | 27 | */ |
112 | 28 | static void h3600_lcd_power(int enable) | |
113 | static void h3600_uart_set_mctrl(struct uart_port *port, u_int mctrl) | ||
114 | { | 29 | { |
115 | if (port->mapbase == _Ser3UTCR0) { | 30 | if (gpio_request(H3XXX_EGPIO_LCD_ON, "LCD power")) { |
116 | if (mctrl & TIOCM_RTS) | 31 | pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__); |
117 | GPCR = GPIO_H3600_COM_RTS; | 32 | goto err1; |
118 | else | ||
119 | GPSR = GPIO_H3600_COM_RTS; | ||
120 | } | 33 | } |
121 | } | 34 | if (gpio_request(H3600_EGPIO_LCD_PCI, "LCD control")) { |
122 | 35 | pr_err("%s: can't request H3XXX_EGPIO_LCD_PCI\n", __func__); | |
123 | static u_int h3600_uart_get_mctrl(struct uart_port *port) | 36 | goto err2; |
124 | { | 37 | } |
125 | u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; | 38 | if (gpio_request(H3600_EGPIO_LCD_5V_ON, "LCD 5v")) { |
126 | 39 | pr_err("%s: can't request H3XXX_EGPIO_LCD_5V_ON\n", __func__); | |
127 | if (port->mapbase == _Ser3UTCR0) { | 40 | goto err3; |
128 | int gplr = GPLR; | 41 | } |
129 | /* DCD and CTS bits are inverted in GPLR by RS232 transceiver */ | 42 | if (gpio_request(H3600_EGPIO_LVDD_ON, "LCD 9v/-6.5v")) { |
130 | if (gplr & GPIO_H3600_COM_DCD) | 43 | pr_err("%s: can't request H3600_EGPIO_LVDD_ON\n", __func__); |
131 | ret &= ~TIOCM_CD; | 44 | goto err4; |
132 | if (gplr & GPIO_H3600_COM_CTS) | ||
133 | ret &= ~TIOCM_CTS; | ||
134 | } | 45 | } |
135 | 46 | ||
136 | return ret; | 47 | gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable); |
137 | } | 48 | gpio_direction_output(H3600_EGPIO_LCD_PCI, enable); |
49 | gpio_direction_output(H3600_EGPIO_LCD_5V_ON, enable); | ||
50 | gpio_direction_output(H3600_EGPIO_LVDD_ON, enable); | ||
138 | 51 | ||
139 | static void h3600_uart_pm(struct uart_port *port, u_int state, u_int oldstate) | 52 | gpio_free(H3600_EGPIO_LVDD_ON); |
140 | { | 53 | err4: gpio_free(H3600_EGPIO_LCD_5V_ON); |
141 | if (port->mapbase == _Ser2UTCR0) { /* TODO: REMOVE THIS */ | 54 | err3: gpio_free(H3600_EGPIO_LCD_PCI); |
142 | assign_h3600_egpio(IPAQ_EGPIO_IR_ON, !state); | 55 | err2: gpio_free(H3XXX_EGPIO_LCD_ON); |
143 | } else if (port->mapbase == _Ser3UTCR0) { | 56 | err1: return; |
144 | assign_h3600_egpio(IPAQ_EGPIO_RS232_ON, !state); | ||
145 | } | ||
146 | } | 57 | } |
147 | 58 | ||
148 | /* | 59 | static void __init h3600_map_io(void) |
149 | * Enable/Disable wake up events for this serial port. | ||
150 | * Obviously, we only support this on the normal COM port. | ||
151 | */ | ||
152 | static int h3600_uart_set_wake(struct uart_port *port, u_int enable) | ||
153 | { | 60 | { |
154 | int err = -EINVAL; | 61 | h3xxx_map_io(); |
155 | 62 | ||
156 | if (port->mapbase == _Ser3UTCR0) { | 63 | sa1100fb_lcd_power = h3600_lcd_power; |
157 | if (enable) | ||
158 | PWER |= PWER_GPIO23 | PWER_GPIO25; /* DCD and CTS */ | ||
159 | else | ||
160 | PWER &= ~(PWER_GPIO23 | PWER_GPIO25); /* DCD and CTS */ | ||
161 | err = 0; | ||
162 | } | ||
163 | return err; | ||
164 | } | 64 | } |
165 | 65 | ||
166 | static struct sa1100_port_fns h3600_port_fns __initdata = { | ||
167 | .set_mctrl = h3600_uart_set_mctrl, | ||
168 | .get_mctrl = h3600_uart_get_mctrl, | ||
169 | .pm = h3600_uart_pm, | ||
170 | .set_wake = h3600_uart_set_wake, | ||
171 | }; | ||
172 | |||
173 | /* | 66 | /* |
174 | * helper for sa1100fb | 67 | * This turns the IRDA power on or off on the Compaq H3600 |
175 | */ | 68 | */ |
176 | static void h3xxx_lcd_power(int enable) | 69 | static int h3600_irda_set_power(struct device *dev, unsigned int state) |
177 | { | 70 | { |
178 | assign_h3600_egpio(IPAQ_EGPIO_LCD_POWER, enable); | 71 | gpio_set_value(H3600_EGPIO_IR_ON, state); |
72 | return 0; | ||
179 | } | 73 | } |
180 | 74 | ||
181 | static struct map_desc h3600_io_desc[] __initdata = { | 75 | static void h3600_irda_set_speed(struct device *dev, unsigned int speed) |
182 | { /* static memory bank 2 CS#2 */ | ||
183 | .virtual = H3600_BANK_2_VIRT, | ||
184 | .pfn = __phys_to_pfn(SA1100_CS2_PHYS), | ||
185 | .length = 0x02800000, | ||
186 | .type = MT_DEVICE | ||
187 | }, { /* static memory bank 4 CS#4 */ | ||
188 | .virtual = H3600_BANK_4_VIRT, | ||
189 | .pfn = __phys_to_pfn(SA1100_CS4_PHYS), | ||
190 | .length = 0x00800000, | ||
191 | .type = MT_DEVICE | ||
192 | }, { /* EGPIO 0 CS#5 */ | ||
193 | .virtual = H3600_EGPIO_VIRT, | ||
194 | .pfn = __phys_to_pfn(H3600_EGPIO_PHYS), | ||
195 | .length = 0x01000000, | ||
196 | .type = MT_DEVICE | ||
197 | } | ||
198 | }; | ||
199 | |||
200 | /* | ||
201 | * Common map_io initialization | ||
202 | */ | ||
203 | |||
204 | static void __init h3xxx_map_io(void) | ||
205 | { | 76 | { |
206 | sa1100_map_io(); | 77 | gpio_set_value(H3600_EGPIO_IR_FSEL, !(speed < 4000000)); |
207 | iotable_init(h3600_io_desc, ARRAY_SIZE(h3600_io_desc)); | ||
208 | |||
209 | sa1100_register_uart_fns(&h3600_port_fns); | ||
210 | sa1100_register_uart(0, 3); /* Common serial port */ | ||
211 | // sa1100_register_uart(1, 1); /* Microcontroller on 3100/3600 */ | ||
212 | |||
213 | /* Ensure those pins are outputs and driving low */ | ||
214 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
215 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
216 | |||
217 | /* Configure suspend conditions */ | ||
218 | PGSR = 0; | ||
219 | PWER = PWER_GPIO0 | PWER_RTC; | ||
220 | PCFR = PCFR_OPDE; | ||
221 | PSDR = 0; | ||
222 | |||
223 | sa1100fb_lcd_power = h3xxx_lcd_power; | ||
224 | } | 78 | } |
225 | 79 | ||
226 | /************************* H3100 *************************/ | 80 | static int h3600_irda_startup(struct device *dev) |
227 | |||
228 | #ifdef CONFIG_SA1100_H3100 | ||
229 | |||
230 | #define H3100_EGPIO (*(volatile unsigned int *)H3600_EGPIO_VIRT) | ||
231 | static unsigned int h3100_egpio = 0; | ||
232 | |||
233 | static void h3100_control_egpio(enum ipaq_egpio_type x, int setp) | ||
234 | { | 81 | { |
235 | unsigned int egpio = 0; | 82 | int err = gpio_request(H3600_EGPIO_IR_ON, "IrDA power"); |
236 | long gpio = 0; | 83 | if (err) |
237 | unsigned long flags; | 84 | goto err1; |
238 | 85 | err = gpio_direction_output(H3600_EGPIO_IR_ON, 0); | |
239 | switch (x) { | 86 | if (err) |
240 | case IPAQ_EGPIO_LCD_POWER: | 87 | goto err2; |
241 | egpio |= EGPIO_H3600_LCD_ON; | 88 | err = gpio_request(H3600_EGPIO_IR_FSEL, "IrDA fsel"); |
242 | gpio |= GPIO_H3100_LCD_3V_ON; | 89 | if (err) |
243 | break; | 90 | goto err2; |
244 | case IPAQ_EGPIO_LCD_ENABLE: | 91 | err = gpio_direction_output(H3600_EGPIO_IR_FSEL, 0); |
245 | break; | 92 | if (err) |
246 | case IPAQ_EGPIO_CODEC_NRESET: | 93 | goto err3; |
247 | egpio |= EGPIO_H3600_CODEC_NRESET; | 94 | return 0; |
248 | break; | ||
249 | case IPAQ_EGPIO_AUDIO_ON: | ||
250 | gpio |= GPIO_H3100_AUD_PWR_ON | ||
251 | | GPIO_H3100_AUD_ON; | ||
252 | break; | ||
253 | case IPAQ_EGPIO_QMUTE: | ||
254 | gpio |= GPIO_H3100_QMUTE; | ||
255 | break; | ||
256 | case IPAQ_EGPIO_OPT_NVRAM_ON: | ||
257 | egpio |= EGPIO_H3600_OPT_NVRAM_ON; | ||
258 | break; | ||
259 | case IPAQ_EGPIO_OPT_ON: | ||
260 | egpio |= EGPIO_H3600_OPT_ON; | ||
261 | break; | ||
262 | case IPAQ_EGPIO_CARD_RESET: | ||
263 | egpio |= EGPIO_H3600_CARD_RESET; | ||
264 | break; | ||
265 | case IPAQ_EGPIO_OPT_RESET: | ||
266 | egpio |= EGPIO_H3600_OPT_RESET; | ||
267 | break; | ||
268 | case IPAQ_EGPIO_IR_ON: | ||
269 | gpio |= GPIO_H3100_IR_ON; | ||
270 | break; | ||
271 | case IPAQ_EGPIO_IR_FSEL: | ||
272 | gpio |= GPIO_H3100_IR_FSEL; | ||
273 | break; | ||
274 | case IPAQ_EGPIO_RS232_ON: | ||
275 | egpio |= EGPIO_H3600_RS232_ON; | ||
276 | break; | ||
277 | case IPAQ_EGPIO_VPP_ON: | ||
278 | egpio |= EGPIO_H3600_VPP_ON; | ||
279 | break; | ||
280 | } | ||
281 | 95 | ||
282 | if (egpio || gpio) { | 96 | err3: gpio_free(H3600_EGPIO_IR_FSEL); |
283 | local_irq_save(flags); | 97 | err2: gpio_free(H3600_EGPIO_IR_ON); |
284 | if (setp) { | 98 | err1: return err; |
285 | h3100_egpio |= egpio; | ||
286 | GPSR = gpio; | ||
287 | } else { | ||
288 | h3100_egpio &= ~egpio; | ||
289 | GPCR = gpio; | ||
290 | } | ||
291 | H3100_EGPIO = h3100_egpio; | ||
292 | local_irq_restore(flags); | ||
293 | } | ||
294 | } | 99 | } |
295 | 100 | ||
296 | #define H3100_DIRECT_EGPIO (GPIO_H3100_BT_ON \ | 101 | static void h3600_irda_shutdown(struct device *dev) |
297 | | GPIO_H3100_GPIO3 \ | ||
298 | | GPIO_H3100_QMUTE \ | ||
299 | | GPIO_H3100_LCD_3V_ON \ | ||
300 | | GPIO_H3100_AUD_ON \ | ||
301 | | GPIO_H3100_AUD_PWR_ON \ | ||
302 | | GPIO_H3100_IR_ON \ | ||
303 | | GPIO_H3100_IR_FSEL) | ||
304 | |||
305 | static void __init h3100_map_io(void) | ||
306 | { | 102 | { |
307 | h3xxx_map_io(); | 103 | gpio_free(H3600_EGPIO_IR_ON); |
308 | 104 | gpio_free(H3600_EGPIO_IR_FSEL); | |
309 | /* Initialize h3100-specific values here */ | ||
310 | GPCR = 0x0fffffff; /* All outputs are set low by default */ | ||
311 | GPDR = GPIO_H3600_COM_RTS | GPIO_H3600_L3_CLOCK | | ||
312 | GPIO_H3600_L3_MODE | GPIO_H3600_L3_DATA | | ||
313 | GPIO_H3600_CLK_SET1 | GPIO_H3600_CLK_SET0 | | ||
314 | H3100_DIRECT_EGPIO; | ||
315 | |||
316 | /* Older bootldrs put GPIO2-9 in alternate mode on the | ||
317 | assumption that they are used for video */ | ||
318 | GAFR &= ~H3100_DIRECT_EGPIO; | ||
319 | |||
320 | H3100_EGPIO = h3100_egpio; | ||
321 | assign_h3600_egpio = h3100_control_egpio; | ||
322 | } | 105 | } |
323 | 106 | ||
324 | MACHINE_START(H3100, "Compaq iPAQ H3100") | 107 | static struct irda_platform_data h3600_irda_data = { |
325 | .phys_io = 0x80000000, | 108 | .set_power = h3600_irda_set_power, |
326 | .io_pg_offst = ((0xf8000000) >> 18) & 0xfffc, | 109 | .set_speed = h3600_irda_set_speed, |
327 | .boot_params = 0xc0000100, | 110 | .startup = h3600_irda_startup, |
328 | .map_io = h3100_map_io, | 111 | .shutdown = h3600_irda_shutdown, |
329 | .init_irq = sa1100_init_irq, | 112 | }; |
330 | .timer = &sa1100_timer, | ||
331 | .init_machine = h3xxx_mach_init, | ||
332 | MACHINE_END | ||
333 | |||
334 | #endif /* CONFIG_SA1100_H3100 */ | ||
335 | |||
336 | /************************* H3600 *************************/ | ||
337 | |||
338 | #ifdef CONFIG_SA1100_H3600 | ||
339 | |||
340 | #define H3600_EGPIO (*(volatile unsigned int *)H3600_EGPIO_VIRT) | ||
341 | static unsigned int h3600_egpio = EGPIO_H3600_RS232_ON; | ||
342 | |||
343 | static void h3600_control_egpio(enum ipaq_egpio_type x, int setp) | ||
344 | { | ||
345 | unsigned int egpio = 0; | ||
346 | unsigned long flags; | ||
347 | |||
348 | switch (x) { | ||
349 | case IPAQ_EGPIO_LCD_POWER: | ||
350 | egpio |= EGPIO_H3600_LCD_ON | | ||
351 | EGPIO_H3600_LCD_PCI | | ||
352 | EGPIO_H3600_LCD_5V_ON | | ||
353 | EGPIO_H3600_LVDD_ON; | ||
354 | break; | ||
355 | case IPAQ_EGPIO_LCD_ENABLE: | ||
356 | break; | ||
357 | case IPAQ_EGPIO_CODEC_NRESET: | ||
358 | egpio |= EGPIO_H3600_CODEC_NRESET; | ||
359 | break; | ||
360 | case IPAQ_EGPIO_AUDIO_ON: | ||
361 | egpio |= EGPIO_H3600_AUD_AMP_ON | | ||
362 | EGPIO_H3600_AUD_PWR_ON; | ||
363 | break; | ||
364 | case IPAQ_EGPIO_QMUTE: | ||
365 | egpio |= EGPIO_H3600_QMUTE; | ||
366 | break; | ||
367 | case IPAQ_EGPIO_OPT_NVRAM_ON: | ||
368 | egpio |= EGPIO_H3600_OPT_NVRAM_ON; | ||
369 | break; | ||
370 | case IPAQ_EGPIO_OPT_ON: | ||
371 | egpio |= EGPIO_H3600_OPT_ON; | ||
372 | break; | ||
373 | case IPAQ_EGPIO_CARD_RESET: | ||
374 | egpio |= EGPIO_H3600_CARD_RESET; | ||
375 | break; | ||
376 | case IPAQ_EGPIO_OPT_RESET: | ||
377 | egpio |= EGPIO_H3600_OPT_RESET; | ||
378 | break; | ||
379 | case IPAQ_EGPIO_IR_ON: | ||
380 | egpio |= EGPIO_H3600_IR_ON; | ||
381 | break; | ||
382 | case IPAQ_EGPIO_IR_FSEL: | ||
383 | egpio |= EGPIO_H3600_IR_FSEL; | ||
384 | break; | ||
385 | case IPAQ_EGPIO_RS232_ON: | ||
386 | egpio |= EGPIO_H3600_RS232_ON; | ||
387 | break; | ||
388 | case IPAQ_EGPIO_VPP_ON: | ||
389 | egpio |= EGPIO_H3600_VPP_ON; | ||
390 | break; | ||
391 | } | ||
392 | 113 | ||
393 | if (egpio) { | 114 | static struct gpio_default_state h3600_default_gpio[] = { |
394 | local_irq_save(flags); | 115 | { H3XXX_GPIO_COM_DCD, GPIO_MODE_IN, "COM DCD" }, |
395 | if (setp) | 116 | { H3XXX_GPIO_COM_CTS, GPIO_MODE_IN, "COM CTS" }, |
396 | h3600_egpio |= egpio; | 117 | { H3XXX_GPIO_COM_RTS, GPIO_MODE_OUT0, "COM RTS" }, |
397 | else | 118 | }; |
398 | h3600_egpio &= ~egpio; | ||
399 | H3600_EGPIO = h3600_egpio; | ||
400 | local_irq_restore(flags); | ||
401 | } | ||
402 | } | ||
403 | 119 | ||
404 | static void __init h3600_map_io(void) | 120 | static void __init h3600_mach_init(void) |
405 | { | 121 | { |
406 | h3xxx_map_io(); | 122 | h3xxx_init_gpio(h3600_default_gpio, ARRAY_SIZE(h3600_default_gpio)); |
407 | 123 | h3xxx_mach_init(); | |
408 | /* Initialize h3600-specific values here */ | 124 | sa11x0_register_irda(&h3600_irda_data); |
409 | |||
410 | GPCR = 0x0fffffff; /* All outputs are set low by default */ | ||
411 | GPDR = GPIO_H3600_COM_RTS | GPIO_H3600_L3_CLOCK | | ||
412 | GPIO_H3600_L3_MODE | GPIO_H3600_L3_DATA | | ||
413 | GPIO_H3600_CLK_SET1 | GPIO_H3600_CLK_SET0 | | ||
414 | GPIO_LDD15 | GPIO_LDD14 | GPIO_LDD13 | GPIO_LDD12 | | ||
415 | GPIO_LDD11 | GPIO_LDD10 | GPIO_LDD9 | GPIO_LDD8; | ||
416 | |||
417 | H3600_EGPIO = h3600_egpio; /* Maintains across sleep? */ | ||
418 | assign_h3600_egpio = h3600_control_egpio; | ||
419 | } | 125 | } |
420 | 126 | ||
421 | MACHINE_START(H3600, "Compaq iPAQ H3600") | 127 | MACHINE_START(H3600, "Compaq iPAQ H3600") |
@@ -425,8 +131,6 @@ MACHINE_START(H3600, "Compaq iPAQ H3600") | |||
425 | .map_io = h3600_map_io, | 131 | .map_io = h3600_map_io, |
426 | .init_irq = sa1100_init_irq, | 132 | .init_irq = sa1100_init_irq, |
427 | .timer = &sa1100_timer, | 133 | .timer = &sa1100_timer, |
428 | .init_machine = h3xxx_mach_init, | 134 | .init_machine = h3600_mach_init, |
429 | MACHINE_END | 135 | MACHINE_END |
430 | 136 | ||
431 | #endif /* CONFIG_SA1100_H3600 */ | ||
432 | |||
diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c new file mode 100644 index 000000000000..b0784c974c2d --- /dev/null +++ b/arch/arm/mach-sa1100/h3xxx.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * Support for Compaq iPAQ H3100 and H3600 handheld computers (common code) | ||
3 | * | ||
4 | * Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks) | ||
5 | * Copyright (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/gpio.h> | ||
15 | #include <linux/gpio_keys.h> | ||
16 | #include <linux/input.h> | ||
17 | #include <linux/mfd/htc-egpio.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include <linux/mtd/partitions.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/serial_core.h> | ||
22 | |||
23 | #include <asm/mach/flash.h> | ||
24 | #include <asm/mach/map.h> | ||
25 | #include <asm/mach/serial_sa1100.h> | ||
26 | |||
27 | #include <mach/h3xxx.h> | ||
28 | |||
29 | #include "generic.h" | ||
30 | |||
31 | void h3xxx_init_gpio(struct gpio_default_state *s, size_t n) | ||
32 | { | ||
33 | while (n--) { | ||
34 | const char *name = s->name; | ||
35 | int err; | ||
36 | |||
37 | if (!name) | ||
38 | name = "[init]"; | ||
39 | err = gpio_request(s->gpio, name); | ||
40 | if (err) { | ||
41 | printk(KERN_ERR "gpio%u: unable to request: %d\n", | ||
42 | s->gpio, err); | ||
43 | continue; | ||
44 | } | ||
45 | if (s->mode >= 0) { | ||
46 | err = gpio_direction_output(s->gpio, s->mode); | ||
47 | } else { | ||
48 | err = gpio_direction_input(s->gpio); | ||
49 | } | ||
50 | if (err) { | ||
51 | printk(KERN_ERR "gpio%u: unable to set direction: %d\n", | ||
52 | s->gpio, err); | ||
53 | continue; | ||
54 | } | ||
55 | if (!s->name) | ||
56 | gpio_free(s->gpio); | ||
57 | s++; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | |||
62 | /* | ||
63 | * H3xxx flash support | ||
64 | */ | ||
65 | static struct mtd_partition h3xxx_partitions[] = { | ||
66 | { | ||
67 | .name = "H3XXX boot firmware", | ||
68 | .size = 0x00040000, | ||
69 | .offset = 0, | ||
70 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
71 | }, { | ||
72 | .name = "H3XXX rootfs", | ||
73 | .size = MTDPART_SIZ_FULL, | ||
74 | .offset = 0x00040000, | ||
75 | } | ||
76 | }; | ||
77 | |||
78 | static void h3xxx_set_vpp(int vpp) | ||
79 | { | ||
80 | gpio_set_value(H3XXX_EGPIO_VPP_ON, vpp); | ||
81 | } | ||
82 | |||
83 | static int h3xxx_flash_init(void) | ||
84 | { | ||
85 | int err = gpio_request(H3XXX_EGPIO_VPP_ON, "Flash Vpp"); | ||
86 | if (err) { | ||
87 | pr_err("%s: can't request H3XXX_EGPIO_VPP_ON\n", __func__); | ||
88 | return err; | ||
89 | } | ||
90 | |||
91 | err = gpio_direction_output(H3XXX_EGPIO_VPP_ON, 0); | ||
92 | if (err) | ||
93 | gpio_free(H3XXX_EGPIO_VPP_ON); | ||
94 | |||
95 | return err; | ||
96 | } | ||
97 | |||
98 | static void h3xxx_flash_exit(void) | ||
99 | { | ||
100 | gpio_free(H3XXX_EGPIO_VPP_ON); | ||
101 | } | ||
102 | |||
103 | static struct flash_platform_data h3xxx_flash_data = { | ||
104 | .map_name = "cfi_probe", | ||
105 | .set_vpp = h3xxx_set_vpp, | ||
106 | .init = h3xxx_flash_init, | ||
107 | .exit = h3xxx_flash_exit, | ||
108 | .parts = h3xxx_partitions, | ||
109 | .nr_parts = ARRAY_SIZE(h3xxx_partitions), | ||
110 | }; | ||
111 | |||
112 | static struct resource h3xxx_flash_resource = { | ||
113 | .start = SA1100_CS0_PHYS, | ||
114 | .end = SA1100_CS0_PHYS + SZ_32M - 1, | ||
115 | .flags = IORESOURCE_MEM, | ||
116 | }; | ||
117 | |||
118 | |||
119 | /* | ||
120 | * H3xxx uart support | ||
121 | */ | ||
122 | static void h3xxx_uart_set_mctrl(struct uart_port *port, u_int mctrl) | ||
123 | { | ||
124 | if (port->mapbase == _Ser3UTCR0) { | ||
125 | gpio_set_value(H3XXX_GPIO_COM_RTS, !(mctrl & TIOCM_RTS)); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static u_int h3xxx_uart_get_mctrl(struct uart_port *port) | ||
130 | { | ||
131 | u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; | ||
132 | |||
133 | if (port->mapbase == _Ser3UTCR0) { | ||
134 | /* | ||
135 | * DCD and CTS bits are inverted in GPLR by RS232 transceiver | ||
136 | */ | ||
137 | if (gpio_get_value(H3XXX_GPIO_COM_DCD)) | ||
138 | ret &= ~TIOCM_CD; | ||
139 | if (gpio_get_value(H3XXX_GPIO_COM_CTS)) | ||
140 | ret &= ~TIOCM_CTS; | ||
141 | } | ||
142 | |||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void h3xxx_uart_pm(struct uart_port *port, u_int state, u_int oldstate) | ||
147 | { | ||
148 | if (port->mapbase == _Ser3UTCR0) { | ||
149 | if (!gpio_request(H3XXX_EGPIO_RS232_ON, "RS232 transceiver")) { | ||
150 | gpio_direction_output(H3XXX_EGPIO_RS232_ON, !state); | ||
151 | gpio_free(H3XXX_EGPIO_RS232_ON); | ||
152 | } else { | ||
153 | pr_err("%s: can't request H3XXX_EGPIO_RS232_ON\n", | ||
154 | __func__); | ||
155 | } | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Enable/Disable wake up events for this serial port. | ||
161 | * Obviously, we only support this on the normal COM port. | ||
162 | */ | ||
163 | static int h3xxx_uart_set_wake(struct uart_port *port, u_int enable) | ||
164 | { | ||
165 | int err = -EINVAL; | ||
166 | |||
167 | if (port->mapbase == _Ser3UTCR0) { | ||
168 | if (enable) | ||
169 | PWER |= PWER_GPIO23 | PWER_GPIO25; /* DCD and CTS */ | ||
170 | else | ||
171 | PWER &= ~(PWER_GPIO23 | PWER_GPIO25); /* DCD and CTS */ | ||
172 | err = 0; | ||
173 | } | ||
174 | return err; | ||
175 | } | ||
176 | |||
177 | static struct sa1100_port_fns h3xxx_port_fns __initdata = { | ||
178 | .set_mctrl = h3xxx_uart_set_mctrl, | ||
179 | .get_mctrl = h3xxx_uart_get_mctrl, | ||
180 | .pm = h3xxx_uart_pm, | ||
181 | .set_wake = h3xxx_uart_set_wake, | ||
182 | }; | ||
183 | |||
184 | /* | ||
185 | * EGPIO | ||
186 | */ | ||
187 | |||
188 | static struct resource egpio_resources[] = { | ||
189 | [0] = { | ||
190 | .start = H3600_EGPIO_PHYS, | ||
191 | .end = H3600_EGPIO_PHYS + 0x4 - 1, | ||
192 | .flags = IORESOURCE_MEM, | ||
193 | }, | ||
194 | }; | ||
195 | |||
196 | static struct htc_egpio_chip egpio_chips[] = { | ||
197 | [0] = { | ||
198 | .reg_start = 0, | ||
199 | .gpio_base = H3XXX_EGPIO_BASE, | ||
200 | .num_gpios = 16, | ||
201 | .direction = HTC_EGPIO_OUTPUT, | ||
202 | .initial_values = 0x0080, /* H3XXX_EGPIO_RS232_ON */ | ||
203 | }, | ||
204 | }; | ||
205 | |||
206 | static struct htc_egpio_platform_data egpio_info = { | ||
207 | .reg_width = 16, | ||
208 | .bus_width = 16, | ||
209 | .chip = egpio_chips, | ||
210 | .num_chips = ARRAY_SIZE(egpio_chips), | ||
211 | }; | ||
212 | |||
213 | static struct platform_device h3xxx_egpio = { | ||
214 | .name = "htc-egpio", | ||
215 | .id = -1, | ||
216 | .resource = egpio_resources, | ||
217 | .num_resources = ARRAY_SIZE(egpio_resources), | ||
218 | .dev = { | ||
219 | .platform_data = &egpio_info, | ||
220 | }, | ||
221 | }; | ||
222 | |||
223 | /* | ||
224 | * GPIO keys | ||
225 | */ | ||
226 | |||
227 | static struct gpio_keys_button h3xxx_button_table[] = { | ||
228 | { | ||
229 | .code = KEY_POWER, | ||
230 | .gpio = H3XXX_GPIO_PWR_BUTTON, | ||
231 | .desc = "Power Button", | ||
232 | .active_low = 1, | ||
233 | .type = EV_KEY, | ||
234 | .wakeup = 1, | ||
235 | }, { | ||
236 | .code = KEY_ENTER, | ||
237 | .gpio = H3XXX_GPIO_ACTION_BUTTON, | ||
238 | .active_low = 1, | ||
239 | .desc = "Action button", | ||
240 | .type = EV_KEY, | ||
241 | .wakeup = 0, | ||
242 | }, | ||
243 | }; | ||
244 | |||
245 | static struct gpio_keys_platform_data h3xxx_keys_data = { | ||
246 | .buttons = h3xxx_button_table, | ||
247 | .nbuttons = ARRAY_SIZE(h3xxx_button_table), | ||
248 | }; | ||
249 | |||
250 | static struct platform_device h3xxx_keys = { | ||
251 | .name = "gpio-keys", | ||
252 | .id = -1, | ||
253 | .dev = { | ||
254 | .platform_data = &h3xxx_keys_data, | ||
255 | }, | ||
256 | }; | ||
257 | |||
258 | static struct platform_device *h3xxx_devices[] = { | ||
259 | &h3xxx_egpio, | ||
260 | &h3xxx_keys, | ||
261 | }; | ||
262 | |||
263 | void __init h3xxx_mach_init(void) | ||
264 | { | ||
265 | sa1100_register_uart_fns(&h3xxx_port_fns); | ||
266 | sa11x0_register_mtd(&h3xxx_flash_data, &h3xxx_flash_resource, 1); | ||
267 | platform_add_devices(h3xxx_devices, ARRAY_SIZE(h3xxx_devices)); | ||
268 | } | ||
269 | |||
270 | static struct map_desc h3600_io_desc[] __initdata = { | ||
271 | { /* static memory bank 2 CS#2 */ | ||
272 | .virtual = H3600_BANK_2_VIRT, | ||
273 | .pfn = __phys_to_pfn(SA1100_CS2_PHYS), | ||
274 | .length = 0x02800000, | ||
275 | .type = MT_DEVICE | ||
276 | }, { /* static memory bank 4 CS#4 */ | ||
277 | .virtual = H3600_BANK_4_VIRT, | ||
278 | .pfn = __phys_to_pfn(SA1100_CS4_PHYS), | ||
279 | .length = 0x00800000, | ||
280 | .type = MT_DEVICE | ||
281 | }, { /* EGPIO 0 CS#5 */ | ||
282 | .virtual = H3600_EGPIO_VIRT, | ||
283 | .pfn = __phys_to_pfn(H3600_EGPIO_PHYS), | ||
284 | .length = 0x01000000, | ||
285 | .type = MT_DEVICE | ||
286 | } | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * Common map_io initialization | ||
291 | */ | ||
292 | |||
293 | void __init h3xxx_map_io(void) | ||
294 | { | ||
295 | sa1100_map_io(); | ||
296 | iotable_init(h3600_io_desc, ARRAY_SIZE(h3600_io_desc)); | ||
297 | |||
298 | sa1100_register_uart(0, 3); /* Common serial port */ | ||
299 | // sa1100_register_uart(1, 1); /* Microcontroller on 3100/3600 */ | ||
300 | |||
301 | /* Ensure those pins are outputs and driving low */ | ||
302 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
303 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
304 | |||
305 | /* Configure suspend conditions */ | ||
306 | PGSR = 0; | ||
307 | PCFR = PCFR_OPDE; | ||
308 | PSDR = 0; | ||
309 | |||
310 | GPCR = 0x0fffffff; /* All outputs are set low by default */ | ||
311 | GPDR = 0; /* Configure all GPIOs as input */ | ||
312 | } | ||
313 | |||
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c index e7056c0b562c..51568dfc8e97 100644 --- a/arch/arm/mach-sa1100/hackkit.c +++ b/arch/arm/mach-sa1100/hackkit.c | |||
@@ -187,7 +187,7 @@ static struct resource hackkit_flash_resource = { | |||
187 | 187 | ||
188 | static void __init hackkit_init(void) | 188 | static void __init hackkit_init(void) |
189 | { | 189 | { |
190 | sa11x0_set_flash_data(&hackkit_flash_data, &hackkit_flash_resource, 1); | 190 | sa11x0_register_mtd(&hackkit_flash_data, &hackkit_flash_resource, 1); |
191 | } | 191 | } |
192 | 192 | ||
193 | /********************************************************************** | 193 | /********************************************************************** |
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index 9efb569cdb60..71a0b3fdcc8c 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h | |||
@@ -25,29 +25,39 @@ | |||
25 | #define COLLIE_GPIO_VPEN (COLLIE_SCOOP_GPIO_BASE + 7) | 25 | #define COLLIE_GPIO_VPEN (COLLIE_SCOOP_GPIO_BASE + 7) |
26 | #define COLLIE_SCP_LB_VOL_CHG SCOOP_GPCR_PA19 | 26 | #define COLLIE_SCP_LB_VOL_CHG SCOOP_GPCR_PA19 |
27 | 27 | ||
28 | #define COLLIE_SCOOP_IO_DIR ( COLLIE_SCP_MUTE_L | COLLIE_SCP_MUTE_R | \ | 28 | #define COLLIE_SCOOP_IO_DIR (COLLIE_SCP_MUTE_L | COLLIE_SCP_MUTE_R | \ |
29 | COLLIE_SCP_5VON | COLLIE_SCP_AMP_ON | \ | 29 | COLLIE_SCP_5VON | COLLIE_SCP_AMP_ON | \ |
30 | COLLIE_SCP_LB_VOL_CHG ) | 30 | COLLIE_SCP_LB_VOL_CHG) |
31 | #define COLLIE_SCOOP_IO_OUT ( COLLIE_SCP_MUTE_L | COLLIE_SCP_MUTE_R ) | 31 | #define COLLIE_SCOOP_IO_OUT (COLLIE_SCP_MUTE_L | COLLIE_SCP_MUTE_R) |
32 | 32 | ||
33 | /* GPIOs for which the generic definition doesn't say much */ | 33 | /* GPIOs for gpiolib */ |
34 | 34 | ||
35 | #define COLLIE_GPIO_ON_KEY GPIO_GPIO (0) | 35 | #define COLLIE_GPIO_ON_KEY (0) |
36 | #define COLLIE_GPIO_AC_IN GPIO_GPIO (1) | 36 | #define COLLIE_GPIO_AC_IN (1) |
37 | #define COLLIE_GPIO_SDIO_INT GPIO_GPIO (11) | 37 | #define COLLIE_GPIO_SDIO_INT (11) |
38 | #define COLLIE_GPIO_CF_IRQ GPIO_GPIO (14) | 38 | #define COLLIE_GPIO_CF_IRQ (14) |
39 | #define COLLIE_GPIO_nREMOCON_INT GPIO_GPIO (15) | 39 | #define COLLIE_GPIO_nREMOCON_INT (15) |
40 | #define COLLIE_GPIO_UCB1x00_RESET GPIO_GPIO (16) | 40 | #define COLLIE_GPIO_UCB1x00_RESET (16) |
41 | #define COLLIE_GPIO_nMIC_ON GPIO_GPIO (17) | 41 | #define COLLIE_GPIO_nMIC_ON (17) |
42 | #define COLLIE_GPIO_nREMOCON_ON GPIO_GPIO (18) | 42 | #define COLLIE_GPIO_nREMOCON_ON (18) |
43 | #define COLLIE_GPIO_CO GPIO_GPIO (20) | 43 | #define COLLIE_GPIO_CO (20) |
44 | #define COLLIE_GPIO_MCP_CLK GPIO_GPIO (21) | 44 | #define COLLIE_GPIO_MCP_CLK (21) |
45 | #define COLLIE_GPIO_CF_CD GPIO_GPIO (22) | 45 | #define COLLIE_GPIO_CF_CD (22) |
46 | #define COLLIE_GPIO_UCB1x00_IRQ GPIO_GPIO (23) | 46 | #define COLLIE_GPIO_UCB1x00_IRQ (23) |
47 | #define COLLIE_GPIO_WAKEUP GPIO_GPIO (24) | 47 | #define COLLIE_GPIO_WAKEUP (24) |
48 | #define COLLIE_GPIO_GA_INT GPIO_GPIO (25) | 48 | #define COLLIE_GPIO_GA_INT (25) |
49 | #define COLLIE_GPIO_MAIN_BAT_LOW GPIO_GPIO (26) | 49 | #define COLLIE_GPIO_MAIN_BAT_LOW (26) |
50 | 50 | ||
51 | /* GPIO definitions for direct register access */ | ||
52 | |||
53 | #define _COLLIE_GPIO_ON_KEY GPIO_GPIO(0) | ||
54 | #define _COLLIE_GPIO_AC_IN GPIO_GPIO(1) | ||
55 | #define _COLLIE_GPIO_nREMOCON_INT GPIO_GPIO(15) | ||
56 | #define _COLLIE_GPIO_UCB1x00_RESET GPIO_GPIO(16) | ||
57 | #define _COLLIE_GPIO_nMIC_ON GPIO_GPIO(17) | ||
58 | #define _COLLIE_GPIO_nREMOCON_ON GPIO_GPIO(18) | ||
59 | #define _COLLIE_GPIO_CO GPIO_GPIO(20) | ||
60 | #define _COLLIE_GPIO_WAKEUP GPIO_GPIO(24) | ||
51 | /* Interrupts */ | 61 | /* Interrupts */ |
52 | 62 | ||
53 | #define COLLIE_IRQ_GPIO_ON_KEY IRQ_GPIO0 | 63 | #define COLLIE_IRQ_GPIO_ON_KEY IRQ_GPIO0 |
@@ -70,19 +80,20 @@ | |||
70 | #define COLLIE_LCM_IRQ_GPIO_nSD_WP IRQ_LOCOMO_GPIO14 | 80 | #define COLLIE_LCM_IRQ_GPIO_nSD_WP IRQ_LOCOMO_GPIO14 |
71 | 81 | ||
72 | /* GPIO's on the TC35143AF (Toshiba Analog Frontend) */ | 82 | /* GPIO's on the TC35143AF (Toshiba Analog Frontend) */ |
73 | #define COLLIE_TC35143_GPIO_VERSION0 UCB_IO_0 /* GPIO0=Version */ | 83 | #define COLLIE_TC35143_GPIO_BASE (GPIO_MAX + 13) |
74 | #define COLLIE_TC35143_GPIO_TBL_CHK UCB_IO_1 /* GPIO1=TBL_CHK */ | 84 | #define COLLIE_TC35143_GPIO_VERSION0 UCB_IO_0 |
75 | #define COLLIE_TC35143_GPIO_VPEN_ON UCB_IO_2 /* GPIO2=VPNE_ON */ | 85 | #define COLLIE_TC35143_GPIO_TBL_CHK UCB_IO_1 |
76 | #define COLLIE_TC35143_GPIO_IR_ON UCB_IO_3 /* GPIO3=IR_ON */ | 86 | #define COLLIE_TC35143_GPIO_VPEN_ON UCB_IO_2 |
77 | #define COLLIE_TC35143_GPIO_AMP_ON UCB_IO_4 /* GPIO4=AMP_ON */ | 87 | #define COLLIE_TC35143_GPIO_IR_ON UCB_IO_3 |
78 | #define COLLIE_TC35143_GPIO_VERSION1 UCB_IO_5 /* GPIO5=Version */ | 88 | #define COLLIE_TC35143_GPIO_AMP_ON UCB_IO_4 |
79 | #define COLLIE_TC35143_GPIO_FS8KLPF UCB_IO_5 /* GPIO5=fs 8k LPF */ | 89 | #define COLLIE_TC35143_GPIO_VERSION1 UCB_IO_5 |
80 | #define COLLIE_TC35143_GPIO_BUZZER_BIAS UCB_IO_6 /* GPIO6=BUZZER BIAS */ | 90 | #define COLLIE_TC35143_GPIO_FS8KLPF UCB_IO_5 |
81 | #define COLLIE_TC35143_GPIO_MBAT_ON UCB_IO_7 /* GPIO7=MBAT_ON */ | 91 | #define COLLIE_TC35143_GPIO_BUZZER_BIAS UCB_IO_6 |
82 | #define COLLIE_TC35143_GPIO_BBAT_ON UCB_IO_8 /* GPIO8=BBAT_ON */ | 92 | #define COLLIE_GPIO_MBAT_ON (COLLIE_TC35143_GPIO_BASE + 7) |
83 | #define COLLIE_TC35143_GPIO_TMP_ON UCB_IO_9 /* GPIO9=TMP_ON */ | 93 | #define COLLIE_GPIO_BBAT_ON (COLLIE_TC35143_GPIO_BASE + 8) |
84 | #define COLLIE_TC35143_GPIO_IN ( UCB_IO_0 | UCB_IO_2 | UCB_IO_5 ) | 94 | #define COLLIE_GPIO_TMP_ON (COLLIE_TC35143_GPIO_BASE + 9) |
85 | #define COLLIE_TC35143_GPIO_OUT ( UCB_IO_1 | UCB_IO_3 | UCB_IO_4 | UCB_IO_6 | \ | 95 | #define COLLIE_TC35143_GPIO_IN (UCB_IO_0 | UCB_IO_2 | UCB_IO_5) |
86 | UCB_IO_7 | UCB_IO_8 | UCB_IO_9 ) | 96 | #define COLLIE_TC35143_GPIO_OUT (UCB_IO_1 | UCB_IO_3 | UCB_IO_4 \ |
97 | | UCB_IO_6) | ||
87 | 98 | ||
88 | #endif | 99 | #endif |
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h index 582a0c92da53..7befc104e9a9 100644 --- a/arch/arm/mach-sa1100/include/mach/gpio.h +++ b/arch/arm/mach-sa1100/include/mach/gpio.h | |||
@@ -49,20 +49,9 @@ static inline void gpio_set_value(unsigned gpio, int value) | |||
49 | 49 | ||
50 | #define gpio_cansleep __gpio_cansleep | 50 | #define gpio_cansleep __gpio_cansleep |
51 | 51 | ||
52 | static inline unsigned gpio_to_irq(unsigned gpio) | 52 | #define gpio_to_irq(gpio) ((gpio < 11) ? (IRQ_GPIO0 + gpio) : \ |
53 | { | 53 | (IRQ_GPIO11 - 11 + gpio)) |
54 | if (gpio < 11) | 54 | #define irq_to_gpio(irq) ((irq < IRQ_GPIO11_27) ? (irq - IRQ_GPIO0) : \ |
55 | return IRQ_GPIO0 + gpio; | 55 | (irq - IRQ_GPIO11 + 11)) |
56 | else | ||
57 | return IRQ_GPIO11 - 11 + gpio; | ||
58 | } | ||
59 | |||
60 | static inline unsigned irq_to_gpio(unsigned irq) | ||
61 | { | ||
62 | if (irq < IRQ_GPIO11_27) | ||
63 | return irq - IRQ_GPIO0; | ||
64 | else | ||
65 | return irq - IRQ_GPIO11 + 11; | ||
66 | } | ||
67 | 56 | ||
68 | #endif | 57 | #endif |
diff --git a/arch/arm/mach-sa1100/include/mach/h3600.h b/arch/arm/mach-sa1100/include/mach/h3600.h deleted file mode 100644 index 2827faa47421..000000000000 --- a/arch/arm/mach-sa1100/include/mach/h3600.h +++ /dev/null | |||
@@ -1,100 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Definitions for H3600 Handheld Computer | ||
4 | * | ||
5 | * Copyright 2000 Compaq Computer Corporation. | ||
6 | * | ||
7 | * Use consistent with the GNU GPL is permitted, | ||
8 | * provided that this copyright notice is | ||
9 | * preserved in its entirety in all copies and derived works. | ||
10 | * | ||
11 | * COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | ||
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | ||
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | ||
14 | * | ||
15 | * Author: Jamey Hicks. | ||
16 | * | ||
17 | * History: | ||
18 | * | ||
19 | * 2001-10-?? Andrew Christian Added support for iPAQ H3800 | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _INCLUDE_H3600_H_ | ||
24 | #define _INCLUDE_H3600_H_ | ||
25 | |||
26 | typedef int __bitwise pm_request_t; | ||
27 | |||
28 | #define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */ | ||
29 | #define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */ | ||
30 | |||
31 | /* generalized support for H3xxx series Compaq Pocket PC's */ | ||
32 | #define machine_is_h3xxx() (machine_is_h3100() || machine_is_h3600()) | ||
33 | |||
34 | /* Physical memory regions corresponding to chip selects */ | ||
35 | #define H3600_EGPIO_PHYS (SA1100_CS5_PHYS + 0x01000000) | ||
36 | #define H3600_BANK_2_PHYS SA1100_CS2_PHYS | ||
37 | #define H3600_BANK_4_PHYS SA1100_CS4_PHYS | ||
38 | |||
39 | /* Virtual memory regions corresponding to chip selects 2 & 4 (used on sleeves) */ | ||
40 | #define H3600_EGPIO_VIRT 0xf0000000 | ||
41 | #define H3600_BANK_2_VIRT 0xf1000000 | ||
42 | #define H3600_BANK_4_VIRT 0xf3800000 | ||
43 | |||
44 | /* | ||
45 | Machine-independent GPIO definitions | ||
46 | --- these are common across all current iPAQ platforms | ||
47 | */ | ||
48 | |||
49 | #define GPIO_H3600_NPOWER_BUTTON GPIO_GPIO (0) /* Also known as the "off button" */ | ||
50 | |||
51 | #define GPIO_H3600_PCMCIA_CD1 GPIO_GPIO (10) | ||
52 | #define GPIO_H3600_PCMCIA_IRQ1 GPIO_GPIO (11) | ||
53 | |||
54 | /* UDA1341 L3 Interface */ | ||
55 | #define GPIO_H3600_L3_DATA GPIO_GPIO (14) | ||
56 | #define GPIO_H3600_L3_MODE GPIO_GPIO (15) | ||
57 | #define GPIO_H3600_L3_CLOCK GPIO_GPIO (16) | ||
58 | |||
59 | #define GPIO_H3600_PCMCIA_CD0 GPIO_GPIO (17) | ||
60 | #define GPIO_H3600_SYS_CLK GPIO_GPIO (19) | ||
61 | #define GPIO_H3600_PCMCIA_IRQ0 GPIO_GPIO (21) | ||
62 | |||
63 | #define GPIO_H3600_COM_DCD GPIO_GPIO (23) | ||
64 | #define GPIO_H3600_OPT_IRQ GPIO_GPIO (24) | ||
65 | #define GPIO_H3600_COM_CTS GPIO_GPIO (25) | ||
66 | #define GPIO_H3600_COM_RTS GPIO_GPIO (26) | ||
67 | |||
68 | #define IRQ_GPIO_H3600_NPOWER_BUTTON IRQ_GPIO0 | ||
69 | #define IRQ_GPIO_H3600_PCMCIA_CD1 IRQ_GPIO10 | ||
70 | #define IRQ_GPIO_H3600_PCMCIA_IRQ1 IRQ_GPIO11 | ||
71 | #define IRQ_GPIO_H3600_PCMCIA_CD0 IRQ_GPIO17 | ||
72 | #define IRQ_GPIO_H3600_PCMCIA_IRQ0 IRQ_GPIO21 | ||
73 | #define IRQ_GPIO_H3600_COM_DCD IRQ_GPIO23 | ||
74 | #define IRQ_GPIO_H3600_OPT_IRQ IRQ_GPIO24 | ||
75 | #define IRQ_GPIO_H3600_COM_CTS IRQ_GPIO25 | ||
76 | |||
77 | |||
78 | #ifndef __ASSEMBLY__ | ||
79 | |||
80 | enum ipaq_egpio_type { | ||
81 | IPAQ_EGPIO_LCD_POWER, /* Power to the LCD panel */ | ||
82 | IPAQ_EGPIO_CODEC_NRESET, /* Clear to reset the audio codec (remember to return high) */ | ||
83 | IPAQ_EGPIO_AUDIO_ON, /* Audio power */ | ||
84 | IPAQ_EGPIO_QMUTE, /* Audio muting */ | ||
85 | IPAQ_EGPIO_OPT_NVRAM_ON, /* Non-volatile RAM on extension sleeves (SPI interface) */ | ||
86 | IPAQ_EGPIO_OPT_ON, /* Power to extension sleeves */ | ||
87 | IPAQ_EGPIO_CARD_RESET, /* Reset PCMCIA cards on extension sleeve (???) */ | ||
88 | IPAQ_EGPIO_OPT_RESET, /* Reset option pack (???) */ | ||
89 | IPAQ_EGPIO_IR_ON, /* IR sensor/emitter power */ | ||
90 | IPAQ_EGPIO_IR_FSEL, /* IR speed selection 1->fast, 0->slow */ | ||
91 | IPAQ_EGPIO_RS232_ON, /* Maxim RS232 chip power */ | ||
92 | IPAQ_EGPIO_VPP_ON, /* Turn on power to flash programming */ | ||
93 | IPAQ_EGPIO_LCD_ENABLE, /* Enable/disable LCD controller */ | ||
94 | }; | ||
95 | |||
96 | extern void (*assign_h3600_egpio)(enum ipaq_egpio_type x, int level); | ||
97 | |||
98 | #endif /* ASSEMBLY */ | ||
99 | |||
100 | #endif /* _INCLUDE_H3600_H_ */ | ||
diff --git a/arch/arm/mach-sa1100/include/mach/h3600_gpio.h b/arch/arm/mach-sa1100/include/mach/h3600_gpio.h deleted file mode 100644 index a36ca76d018b..000000000000 --- a/arch/arm/mach-sa1100/include/mach/h3600_gpio.h +++ /dev/null | |||
@@ -1,77 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Definitions for H3600 Handheld Computer | ||
4 | * | ||
5 | * Copyright 2000 Compaq Computer Corporation. | ||
6 | * | ||
7 | * Use consistent with the GNU GPL is permitted, | ||
8 | * provided that this copyright notice is | ||
9 | * preserved in its entirety in all copies and derived works. | ||
10 | * | ||
11 | * COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | ||
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | ||
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | ||
14 | * | ||
15 | * Author: Jamey Hicks. | ||
16 | * | ||
17 | * History: | ||
18 | * | ||
19 | * 2001-10-?? Andrew Christian Added support for iPAQ H3800 | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _INCLUDE_H3600_GPIO_H_ | ||
24 | #define _INCLUDE_H3600_GPIO_H_ | ||
25 | |||
26 | /* | ||
27 | * GPIO lines that are common across ALL iPAQ models are in "h3600.h" | ||
28 | * This file contains machine-specific definitions | ||
29 | */ | ||
30 | |||
31 | #define GPIO_H3600_SUSPEND GPIO_GPIO (0) | ||
32 | /* GPIO[2:9] used by LCD on H3600/3800, used as GPIO on H3100 */ | ||
33 | #define GPIO_H3100_BT_ON GPIO_GPIO (2) | ||
34 | #define GPIO_H3100_GPIO3 GPIO_GPIO (3) | ||
35 | #define GPIO_H3100_QMUTE GPIO_GPIO (4) | ||
36 | #define GPIO_H3100_LCD_3V_ON GPIO_GPIO (5) | ||
37 | #define GPIO_H3100_AUD_ON GPIO_GPIO (6) | ||
38 | #define GPIO_H3100_AUD_PWR_ON GPIO_GPIO (7) | ||
39 | #define GPIO_H3100_IR_ON GPIO_GPIO (8) | ||
40 | #define GPIO_H3100_IR_FSEL GPIO_GPIO (9) | ||
41 | |||
42 | /* for H3600, audio sample rate clock generator */ | ||
43 | #define GPIO_H3600_CLK_SET0 GPIO_GPIO (12) | ||
44 | #define GPIO_H3600_CLK_SET1 GPIO_GPIO (13) | ||
45 | |||
46 | #define GPIO_H3600_ACTION_BUTTON GPIO_GPIO (18) | ||
47 | #define GPIO_H3600_SOFT_RESET GPIO_GPIO (20) /* Also known as BATT_FAULT */ | ||
48 | #define GPIO_H3600_OPT_LOCK GPIO_GPIO (22) | ||
49 | #define GPIO_H3600_OPT_DET GPIO_GPIO (27) | ||
50 | |||
51 | /****************************************************/ | ||
52 | |||
53 | #define IRQ_GPIO_H3600_ACTION_BUTTON IRQ_GPIO18 | ||
54 | #define IRQ_GPIO_H3600_OPT_DET IRQ_GPIO27 | ||
55 | |||
56 | /* H3100 / 3600 EGPIO pins */ | ||
57 | #define EGPIO_H3600_VPP_ON (1 << 0) | ||
58 | #define EGPIO_H3600_CARD_RESET (1 << 1) /* reset the attached pcmcia/compactflash card. active high. */ | ||
59 | #define EGPIO_H3600_OPT_RESET (1 << 2) /* reset the attached option pack. active high. */ | ||
60 | #define EGPIO_H3600_CODEC_NRESET (1 << 3) /* reset the onboard UDA1341. active low. */ | ||
61 | #define EGPIO_H3600_OPT_NVRAM_ON (1 << 4) /* apply power to optionpack nvram, active high. */ | ||
62 | #define EGPIO_H3600_OPT_ON (1 << 5) /* full power to option pack. active high. */ | ||
63 | #define EGPIO_H3600_LCD_ON (1 << 6) /* enable 3.3V to LCD. active high. */ | ||
64 | #define EGPIO_H3600_RS232_ON (1 << 7) /* UART3 transceiver force on. Active high. */ | ||
65 | |||
66 | /* H3600 only EGPIO pins */ | ||
67 | #define EGPIO_H3600_LCD_PCI (1 << 8) /* LCD control IC enable. active high. */ | ||
68 | #define EGPIO_H3600_IR_ON (1 << 9) /* apply power to IR module. active high. */ | ||
69 | #define EGPIO_H3600_AUD_AMP_ON (1 << 10) /* apply power to audio power amp. active high. */ | ||
70 | #define EGPIO_H3600_AUD_PWR_ON (1 << 11) /* apply power to reset of audio circuit. active high. */ | ||
71 | #define EGPIO_H3600_QMUTE (1 << 12) /* mute control for onboard UDA1341. active high. */ | ||
72 | #define EGPIO_H3600_IR_FSEL (1 << 13) /* IR speed select: 1->fast, 0->slow */ | ||
73 | #define EGPIO_H3600_LCD_5V_ON (1 << 14) /* enable 5V to LCD. active high. */ | ||
74 | #define EGPIO_H3600_LVDD_ON (1 << 15) /* enable 9V and -6.5V to LCD. */ | ||
75 | |||
76 | |||
77 | #endif /* _INCLUDE_H3600_GPIO_H_ */ | ||
diff --git a/arch/arm/mach-sa1100/include/mach/h3xxx.h b/arch/arm/mach-sa1100/include/mach/h3xxx.h new file mode 100644 index 000000000000..7d9df16f04a2 --- /dev/null +++ b/arch/arm/mach-sa1100/include/mach/h3xxx.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Definitions for Compaq iPAQ H3100 and H3600 handheld computers | ||
3 | * | ||
4 | * (c) 2000 Compaq Computer Corporation. (Author: Jamey Hicks) | ||
5 | * (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #ifndef _INCLUDE_H3XXX_H_ | ||
14 | #define _INCLUDE_H3XXX_H_ | ||
15 | |||
16 | /* Physical memory regions corresponding to chip selects */ | ||
17 | #define H3600_EGPIO_PHYS (SA1100_CS5_PHYS + 0x01000000) | ||
18 | #define H3600_BANK_2_PHYS SA1100_CS2_PHYS | ||
19 | #define H3600_BANK_4_PHYS SA1100_CS4_PHYS | ||
20 | |||
21 | /* Virtual memory regions corresponding to chip selects 2 & 4 (used on sleeves) */ | ||
22 | #define H3600_EGPIO_VIRT 0xf0000000 | ||
23 | #define H3600_BANK_2_VIRT 0xf1000000 | ||
24 | #define H3600_BANK_4_VIRT 0xf3800000 | ||
25 | |||
26 | /* | ||
27 | * gpiolib numbers for all iPAQs | ||
28 | */ | ||
29 | #define H3XXX_GPIO_PWR_BUTTON 0 | ||
30 | #define H3XXX_GPIO_PCMCIA_CD1 10 | ||
31 | #define H3XXX_GPIO_PCMCIA_IRQ1 11 | ||
32 | #define H3XXX_GPIO_PCMCIA_CD0 17 | ||
33 | #define H3XXX_GPIO_ACTION_BUTTON 18 | ||
34 | #define H3XXX_GPIO_SYS_CLK 19 | ||
35 | #define H3XXX_GPIO_PCMCIA_IRQ0 21 | ||
36 | #define H3XXX_GPIO_COM_DCD 23 | ||
37 | #define H3XXX_GPIO_OPTION 24 | ||
38 | #define H3XXX_GPIO_COM_CTS 25 | ||
39 | #define H3XXX_GPIO_COM_RTS 26 | ||
40 | |||
41 | /* machine-specific gpios */ | ||
42 | |||
43 | #define H3100_GPIO_BT_ON 2 | ||
44 | #define H3100_GPIO_QMUTE 4 | ||
45 | #define H3100_GPIO_LCD_3V_ON 5 | ||
46 | #define H3100_GPIO_AUD_ON 6 | ||
47 | #define H3100_GPIO_AUD_PWR_ON 7 | ||
48 | #define H3100_GPIO_IR_ON 8 | ||
49 | #define H3100_GPIO_IR_FSEL 9 | ||
50 | |||
51 | #define H3600_GPIO_CLK_SET0 12 /* audio sample rate clock generator */ | ||
52 | #define H3600_GPIO_CLK_SET1 13 | ||
53 | #define H3600_GPIO_SOFT_RESET 20 /* also known as BATT_FAULT */ | ||
54 | #define H3600_GPIO_OPT_LOCK 22 | ||
55 | #define H3600_GPIO_OPT_DET 27 | ||
56 | |||
57 | |||
58 | /* H3100 / 3600 EGPIO pins */ | ||
59 | #define H3XXX_EGPIO_BASE (GPIO_MAX + 1) | ||
60 | |||
61 | #define H3XXX_EGPIO_VPP_ON (H3XXX_EGPIO_BASE + 0) | ||
62 | #define H3XXX_EGPIO_CARD_RESET (H3XXX_EGPIO_BASE + 1) /* reset the attached pcmcia/compactflash card. active high. */ | ||
63 | #define H3XXX_EGPIO_OPT_RESET (H3XXX_EGPIO_BASE + 2) /* reset the attached option pack. active high. */ | ||
64 | #define H3XXX_EGPIO_CODEC_NRESET (H3XXX_EGPIO_BASE + 3) /* reset the onboard UDA1341. active low. */ | ||
65 | #define H3XXX_EGPIO_OPT_NVRAM_ON (H3XXX_EGPIO_BASE + 4) /* apply power to optionpack nvram, active high. */ | ||
66 | #define H3XXX_EGPIO_OPT_ON (H3XXX_EGPIO_BASE + 5) /* full power to option pack. active high. */ | ||
67 | #define H3XXX_EGPIO_LCD_ON (H3XXX_EGPIO_BASE + 6) /* enable 3.3V to LCD. active high. */ | ||
68 | #define H3XXX_EGPIO_RS232_ON (H3XXX_EGPIO_BASE + 7) /* UART3 transceiver force on. Active high. */ | ||
69 | |||
70 | /* H3600 only EGPIO pins */ | ||
71 | #define H3600_EGPIO_LCD_PCI (H3XXX_EGPIO_BASE + 8) /* LCD control IC enable. active high. */ | ||
72 | #define H3600_EGPIO_IR_ON (H3XXX_EGPIO_BASE + 9) /* apply power to IR module. active high. */ | ||
73 | #define H3600_EGPIO_AUD_AMP_ON (H3XXX_EGPIO_BASE + 10) /* apply power to audio power amp. active high. */ | ||
74 | #define H3600_EGPIO_AUD_PWR_ON (H3XXX_EGPIO_BASE + 11) /* apply power to reset of audio circuit. active high. */ | ||
75 | #define H3600_EGPIO_QMUTE (H3XXX_EGPIO_BASE + 12) /* mute control for onboard UDA1341. active high. */ | ||
76 | #define H3600_EGPIO_IR_FSEL (H3XXX_EGPIO_BASE + 13) /* IR speed select: 1->fast, 0->slow */ | ||
77 | #define H3600_EGPIO_LCD_5V_ON (H3XXX_EGPIO_BASE + 14) /* enable 5V to LCD. active high. */ | ||
78 | #define H3600_EGPIO_LVDD_ON (H3XXX_EGPIO_BASE + 15) /* enable 9V and -6.5V to LCD. */ | ||
79 | |||
80 | struct gpio_default_state { | ||
81 | int gpio; | ||
82 | int mode; | ||
83 | const char *name; | ||
84 | }; | ||
85 | |||
86 | #define GPIO_MODE_IN -1 | ||
87 | #define GPIO_MODE_OUT0 0 | ||
88 | #define GPIO_MODE_OUT1 1 | ||
89 | |||
90 | void h3xxx_init_gpio(struct gpio_default_state *s, size_t n); | ||
91 | void __init h3xxx_map_io(void); | ||
92 | void __init h3xxx_mach_init(void); | ||
93 | |||
94 | #endif /* _INCLUDE_H3XXX_H_ */ | ||
diff --git a/arch/arm/mach-sa1100/include/mach/mcp.h b/arch/arm/mach-sa1100/include/mach/mcp.h index fb8b09a57ad7..ed1a331508a7 100644 --- a/arch/arm/mach-sa1100/include/mach/mcp.h +++ b/arch/arm/mach-sa1100/include/mach/mcp.h | |||
@@ -16,6 +16,7 @@ struct mcp_plat_data { | |||
16 | u32 mccr0; | 16 | u32 mccr0; |
17 | u32 mccr1; | 17 | u32 mccr1; |
18 | unsigned int sclk_rate; | 18 | unsigned int sclk_rate; |
19 | int gpio_base; | ||
19 | }; | 20 | }; |
20 | 21 | ||
21 | #endif | 22 | #endif |
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c index fd776bb666cd..13ebd2d99bfd 100644 --- a/arch/arm/mach-sa1100/jornada720.c +++ b/arch/arm/mach-sa1100/jornada720.c | |||
@@ -354,7 +354,7 @@ static struct resource jornada720_flash_resource = { | |||
354 | 354 | ||
355 | static void __init jornada720_mach_init(void) | 355 | static void __init jornada720_mach_init(void) |
356 | { | 356 | { |
357 | sa11x0_set_flash_data(&jornada720_flash_data, &jornada720_flash_resource, 1); | 357 | sa11x0_register_mtd(&jornada720_flash_data, &jornada720_flash_resource, 1); |
358 | } | 358 | } |
359 | 359 | ||
360 | MACHINE_START(JORNADA720, "HP Jornada 720") | 360 | MACHINE_START(JORNADA720, "HP Jornada 720") |
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c index 1f940df0e5af..68069d6dc07a 100644 --- a/arch/arm/mach-sa1100/lart.c +++ b/arch/arm/mach-sa1100/lart.c | |||
@@ -28,7 +28,7 @@ static struct mcp_plat_data lart_mcp_data = { | |||
28 | 28 | ||
29 | static void __init lart_init(void) | 29 | static void __init lart_init(void) |
30 | { | 30 | { |
31 | sa11x0_set_mcp_data(&lart_mcp_data); | 31 | sa11x0_register_mcp(&lart_mcp_data); |
32 | } | 32 | } |
33 | 33 | ||
34 | static struct map_desc lart_io_desc[] __initdata = { | 34 | static struct map_desc lart_io_desc[] __initdata = { |
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index e1458bc1868e..1ccd6018d3a3 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -109,7 +109,7 @@ static struct flash_platform_data pleb_flash_data = { | |||
109 | 109 | ||
110 | static void __init pleb_init(void) | 110 | static void __init pleb_init(void) |
111 | { | 111 | { |
112 | sa11x0_set_flash_data(&pleb_flash_data, pleb_flash_resources, | 112 | sa11x0_register_mtd(&pleb_flash_data, pleb_flash_resources, |
113 | ARRAY_SIZE(pleb_flash_resources)); | 113 | ARRAY_SIZE(pleb_flash_resources)); |
114 | 114 | ||
115 | 115 | ||
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c index ddd917d1083d..85e82bb73d7e 100644 --- a/arch/arm/mach-sa1100/shannon.c +++ b/arch/arm/mach-sa1100/shannon.c | |||
@@ -59,8 +59,8 @@ static struct mcp_plat_data shannon_mcp_data = { | |||
59 | 59 | ||
60 | static void __init shannon_init(void) | 60 | static void __init shannon_init(void) |
61 | { | 61 | { |
62 | sa11x0_set_flash_data(&shannon_flash_data, &shannon_flash_resource, 1); | 62 | sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1); |
63 | sa11x0_set_mcp_data(&shannon_mcp_data); | 63 | sa11x0_register_mcp(&shannon_mcp_data); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void __init shannon_map_io(void) | 66 | static void __init shannon_map_io(void) |
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index 3c74534f7fee..49cfd64663ac 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c | |||
@@ -166,9 +166,9 @@ static void __init simpad_map_io(void) | |||
166 | PCFR = 0; | 166 | PCFR = 0; |
167 | PSDR = 0; | 167 | PSDR = 0; |
168 | 168 | ||
169 | sa11x0_set_flash_data(&simpad_flash_data, simpad_flash_resources, | 169 | sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources, |
170 | ARRAY_SIZE(simpad_flash_resources)); | 170 | ARRAY_SIZE(simpad_flash_resources)); |
171 | sa11x0_set_mcp_data(&simpad_mcp_data); | 171 | sa11x0_register_mcp(&simpad_mcp_data); |
172 | } | 172 | } |
173 | 173 | ||
174 | static void simpad_power_off(void) | 174 | static void simpad_power_off(void) |
diff --git a/arch/avr32/include/asm/bug.h b/arch/avr32/include/asm/bug.h index 331d45bab18f..2aa373cc61b5 100644 --- a/arch/avr32/include/asm/bug.h +++ b/arch/avr32/include/asm/bug.h | |||
@@ -52,7 +52,7 @@ | |||
52 | #define BUG() \ | 52 | #define BUG() \ |
53 | do { \ | 53 | do { \ |
54 | _BUG_OR_WARN(0); \ | 54 | _BUG_OR_WARN(0); \ |
55 | for (;;); \ | 55 | unreachable(); \ |
56 | } while (0) | 56 | } while (0) |
57 | 57 | ||
58 | #define WARN_ON(condition) \ | 58 | #define WARN_ON(condition) \ |
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index dcbaea7ce128..f0acde68aaea 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/dma-mapping.h> | 4 | #include <linux/dma-mapping.h> |
5 | #include <linux/swiotlb.h> | 5 | #include <linux/swiotlb.h> |
6 | 6 | ||
7 | extern int swiotlb_force; | ||
8 | |||
9 | #ifdef CONFIG_SWIOTLB | 7 | #ifdef CONFIG_SWIOTLB |
10 | extern int swiotlb; | 8 | extern int swiotlb; |
11 | extern void pci_swiotlb_init(void); | 9 | extern void pci_swiotlb_init(void); |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 285aae8431c6..53292abf846c 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = { | |||
41 | void __init swiotlb_dma_init(void) | 41 | void __init swiotlb_dma_init(void) |
42 | { | 42 | { |
43 | dma_ops = &swiotlb_dma_ops; | 43 | dma_ops = &swiotlb_dma_ops; |
44 | swiotlb_init(); | 44 | swiotlb_init(1); |
45 | } | 45 | } |
46 | 46 | ||
47 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void) | |||
51 | swiotlb = 1; | 51 | swiotlb = 1; |
52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); | 52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); |
53 | machvec_init("dig"); | 53 | machvec_init("dig"); |
54 | swiotlb_init(); | 54 | swiotlb_init(1); |
55 | dma_ops = &swiotlb_dma_ops; | 55 | dma_ops = &swiotlb_dma_ops; |
56 | #else | 56 | #else |
57 | panic("Unable to find Intel IOMMU"); | 57 | panic("Unable to find Intel IOMMU"); |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 1aad0d9f5074..fd7620f025fa 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -358,7 +358,14 @@ config SGI_IP22 | |||
358 | select SWAP_IO_SPACE | 358 | select SWAP_IO_SPACE |
359 | select SYS_HAS_CPU_R4X00 | 359 | select SYS_HAS_CPU_R4X00 |
360 | select SYS_HAS_CPU_R5000 | 360 | select SYS_HAS_CPU_R5000 |
361 | select SYS_HAS_EARLY_PRINTK | 361 | # |
362 | # Disable EARLY_PRINTK for now since it leads to overwritten prom | ||
363 | # memory during early boot on some machines. | ||
364 | # | ||
365 | # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com | ||
366 | # for a more details discussion | ||
367 | # | ||
368 | # select SYS_HAS_EARLY_PRINTK | ||
362 | select SYS_SUPPORTS_32BIT_KERNEL | 369 | select SYS_SUPPORTS_32BIT_KERNEL |
363 | select SYS_SUPPORTS_64BIT_KERNEL | 370 | select SYS_SUPPORTS_64BIT_KERNEL |
364 | select SYS_SUPPORTS_BIG_ENDIAN | 371 | select SYS_SUPPORTS_BIG_ENDIAN |
@@ -410,7 +417,14 @@ config SGI_IP28 | |||
410 | select SGI_HAS_ZILOG | 417 | select SGI_HAS_ZILOG |
411 | select SWAP_IO_SPACE | 418 | select SWAP_IO_SPACE |
412 | select SYS_HAS_CPU_R10000 | 419 | select SYS_HAS_CPU_R10000 |
413 | select SYS_HAS_EARLY_PRINTK | 420 | # |
421 | # Disable EARLY_PRINTK for now since it leads to overwritten prom | ||
422 | # memory during early boot on some machines. | ||
423 | # | ||
424 | # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com | ||
425 | # for a more details discussion | ||
426 | # | ||
427 | # select SYS_HAS_EARLY_PRINTK | ||
414 | select SYS_SUPPORTS_64BIT_KERNEL | 428 | select SYS_SUPPORTS_64BIT_KERNEL |
415 | select SYS_SUPPORTS_BIG_ENDIAN | 429 | select SYS_SUPPORTS_BIG_ENDIAN |
416 | help | 430 | help |
@@ -1439,6 +1453,7 @@ choice | |||
1439 | 1453 | ||
1440 | config PAGE_SIZE_4KB | 1454 | config PAGE_SIZE_4KB |
1441 | bool "4kB" | 1455 | bool "4kB" |
1456 | depends on !CPU_LOONGSON2 | ||
1442 | help | 1457 | help |
1443 | This option select the standard 4kB Linux page size. On some | 1458 | This option select the standard 4kB Linux page size. On some |
1444 | R3000-family processors this is the only available page size. Using | 1459 | R3000-family processors this is the only available page size. Using |
@@ -1763,7 +1778,7 @@ config SYS_SUPPORTS_SMARTMIPS | |||
1763 | 1778 | ||
1764 | config ARCH_FLATMEM_ENABLE | 1779 | config ARCH_FLATMEM_ENABLE |
1765 | def_bool y | 1780 | def_bool y |
1766 | depends on !NUMA | 1781 | depends on !NUMA && !CPU_LOONGSON2 |
1767 | 1782 | ||
1768 | config ARCH_DISCONTIGMEM_ENABLE | 1783 | config ARCH_DISCONTIGMEM_ENABLE |
1769 | bool | 1784 | bool |
diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h index 6cf29c26e873..540c98a810d1 100644 --- a/arch/mips/include/asm/bug.h +++ b/arch/mips/include/asm/bug.h | |||
@@ -11,9 +11,7 @@ | |||
11 | static inline void __noreturn BUG(void) | 11 | static inline void __noreturn BUG(void) |
12 | { | 12 | { |
13 | __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); | 13 | __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); |
14 | /* Fool GCC into thinking the function doesn't return. */ | 14 | unreachable(); |
15 | while (1) | ||
16 | ; | ||
17 | } | 15 | } |
18 | 16 | ||
19 | #define HAVE_ARCH_BUG | 17 | #define HAVE_ARCH_BUG |
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h index a2250f390a29..c892bfb3e2c1 100644 --- a/arch/mips/include/asm/mman.h +++ b/arch/mips/include/asm/mman.h | |||
@@ -75,6 +75,7 @@ | |||
75 | 75 | ||
76 | #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ | 76 | #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ |
77 | #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ | 77 | #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ |
78 | #define MADV_HWPOISON 100 /* poison a page for testing */ | ||
78 | 79 | ||
79 | /* compatibility flags */ | 80 | /* compatibility flags */ |
80 | #define MAP_FILE 0 | 81 | #define MAP_FILE 0 |
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index fcf5f98d90cc..83b5509e09e8 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #ifndef _ASM_SYSTEM_H | 12 | #ifndef _ASM_SYSTEM_H |
13 | #define _ASM_SYSTEM_H | 13 | #define _ASM_SYSTEM_H |
14 | 14 | ||
15 | #include <linux/kernel.h> | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | #include <linux/irqflags.h> | 17 | #include <linux/irqflags.h> |
17 | 18 | ||
@@ -193,10 +194,6 @@ extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 v | |||
193 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels | 194 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels |
194 | #endif | 195 | #endif |
195 | 196 | ||
196 | /* This function doesn't exist, so you'll get a linker error | ||
197 | if something tries to do an invalid xchg(). */ | ||
198 | extern void __xchg_called_with_bad_pointer(void); | ||
199 | |||
200 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 197 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
201 | { | 198 | { |
202 | switch (size) { | 199 | switch (size) { |
@@ -205,11 +202,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
205 | case 8: | 202 | case 8: |
206 | return __xchg_u64(ptr, x); | 203 | return __xchg_u64(ptr, x); |
207 | } | 204 | } |
208 | __xchg_called_with_bad_pointer(); | 205 | |
209 | return x; | 206 | return x; |
210 | } | 207 | } |
211 | 208 | ||
212 | #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | 209 | #define xchg(ptr, x) \ |
210 | ({ \ | ||
211 | BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ | ||
212 | \ | ||
213 | ((__typeof__(*(ptr))) \ | ||
214 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ | ||
215 | }) | ||
213 | 216 | ||
214 | extern void set_handler(unsigned long offset, void *addr, unsigned long len); | 217 | extern void set_handler(unsigned long offset, void *addr, unsigned long len); |
215 | extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); | 218 | extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 3fe1fcfa2e73..fe0d79805603 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -306,6 +306,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
306 | 306 | ||
307 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 307 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
308 | __asm__ __volatile__ ( | 308 | __asm__ __volatile__ ( |
309 | " .set mips3 \n" | ||
309 | " li %[err], 0 \n" | 310 | " li %[err], 0 \n" |
310 | "1: ll %[old], (%[addr]) \n" | 311 | "1: ll %[old], (%[addr]) \n" |
311 | " move %[tmp], %[new] \n" | 312 | " move %[tmp], %[new] \n" |
@@ -320,6 +321,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
320 | " "STR(PTR)" 1b, 4b \n" | 321 | " "STR(PTR)" 1b, 4b \n" |
321 | " "STR(PTR)" 2b, 4b \n" | 322 | " "STR(PTR)" 2b, 4b \n" |
322 | " .previous \n" | 323 | " .previous \n" |
324 | " .set mips0 \n" | ||
323 | : [old] "=&r" (old), | 325 | : [old] "=&r" (old), |
324 | [err] "=&r" (err), | 326 | [err] "=&r" (err), |
325 | [tmp] "=&r" (tmp) | 327 | [tmp] "=&r" (tmp) |
@@ -329,6 +331,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
329 | : "memory"); | 331 | : "memory"); |
330 | } else if (cpu_has_llsc) { | 332 | } else if (cpu_has_llsc) { |
331 | __asm__ __volatile__ ( | 333 | __asm__ __volatile__ ( |
334 | " .set mips3 \n" | ||
332 | " li %[err], 0 \n" | 335 | " li %[err], 0 \n" |
333 | "1: ll %[old], (%[addr]) \n" | 336 | "1: ll %[old], (%[addr]) \n" |
334 | " move %[tmp], %[new] \n" | 337 | " move %[tmp], %[new] \n" |
@@ -347,6 +350,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
347 | " "STR(PTR)" 1b, 5b \n" | 350 | " "STR(PTR)" 1b, 5b \n" |
348 | " "STR(PTR)" 2b, 5b \n" | 351 | " "STR(PTR)" 2b, 5b \n" |
349 | " .previous \n" | 352 | " .previous \n" |
353 | " .set mips0 \n" | ||
350 | : [old] "=&r" (old), | 354 | : [old] "=&r" (old), |
351 | [err] "=&r" (err), | 355 | [err] "=&r" (err), |
352 | [tmp] "=&r" (tmp) | 356 | [tmp] "=&r" (tmp) |
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 9f40e1ff9b4f..041fc1afc3f4 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c | |||
@@ -110,7 +110,6 @@ static struct korina_device korina_dev0_data = { | |||
110 | static struct platform_device korina_dev0 = { | 110 | static struct platform_device korina_dev0 = { |
111 | .id = -1, | 111 | .id = -1, |
112 | .name = "korina", | 112 | .name = "korina", |
113 | .dev.driver_data = &korina_dev0_data, | ||
114 | .resource = korina_dev0_res, | 113 | .resource = korina_dev0_res, |
115 | .num_resources = ARRAY_SIZE(korina_dev0_res), | 114 | .num_resources = ARRAY_SIZE(korina_dev0_res), |
116 | }; | 115 | }; |
@@ -332,6 +331,8 @@ static int __init plat_setup_devices(void) | |||
332 | /* set the uart clock to the current cpu frequency */ | 331 | /* set the uart clock to the current cpu frequency */ |
333 | rb532_uart_res[0].uartclk = idt_cpu_freq; | 332 | rb532_uart_res[0].uartclk = idt_cpu_freq; |
334 | 333 | ||
334 | dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data); | ||
335 | |||
335 | return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); | 336 | return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); |
336 | } | 337 | } |
337 | 338 | ||
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 3b1005185390..bf3382f1904d 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -46,7 +46,7 @@ config DEBUG_STACK_USAGE | |||
46 | 46 | ||
47 | config HCALL_STATS | 47 | config HCALL_STATS |
48 | bool "Hypervisor call instrumentation" | 48 | bool "Hypervisor call instrumentation" |
49 | depends on PPC_PSERIES && DEBUG_FS | 49 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS |
50 | help | 50 | help |
51 | Adds code to keep track of the number of hypervisor calls made and | 51 | Adds code to keep track of the number of hypervisor calls made and |
52 | the amount of time spent in hypervisor calls. Wall time spent in | 52 | the amount of time spent in hypervisor calls. Wall time spent in |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index f1889abb89b1..c568329723b8 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -1683,7 +1683,7 @@ CONFIG_HAVE_ARCH_KGDB=y | |||
1683 | CONFIG_DEBUG_STACKOVERFLOW=y | 1683 | CONFIG_DEBUG_STACKOVERFLOW=y |
1684 | # CONFIG_DEBUG_STACK_USAGE is not set | 1684 | # CONFIG_DEBUG_STACK_USAGE is not set |
1685 | # CONFIG_DEBUG_PAGEALLOC is not set | 1685 | # CONFIG_DEBUG_PAGEALLOC is not set |
1686 | CONFIG_HCALL_STATS=y | 1686 | # CONFIG_HCALL_STATS is not set |
1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set | 1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set |
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 9154e8526732..f0fb4fc1f6e6 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define _ASM_POWERPC_EMULATED_OPS_H | 19 | #define _ASM_POWERPC_EMULATED_OPS_H |
20 | 20 | ||
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | #include <linux/perf_event.h> | ||
22 | 23 | ||
23 | 24 | ||
24 | #ifdef CONFIG_PPC_EMULATED_STATS | 25 | #ifdef CONFIG_PPC_EMULATED_STATS |
@@ -57,7 +58,7 @@ extern u32 ppc_warn_emulated; | |||
57 | 58 | ||
58 | extern void ppc_warn_emulated_print(const char *type); | 59 | extern void ppc_warn_emulated_print(const char *type); |
59 | 60 | ||
60 | #define PPC_WARN_EMULATED(type) \ | 61 | #define __PPC_WARN_EMULATED(type) \ |
61 | do { \ | 62 | do { \ |
62 | atomic_inc(&ppc_emulated.type.val); \ | 63 | atomic_inc(&ppc_emulated.type.val); \ |
63 | if (ppc_warn_emulated) \ | 64 | if (ppc_warn_emulated) \ |
@@ -66,8 +67,22 @@ extern void ppc_warn_emulated_print(const char *type); | |||
66 | 67 | ||
67 | #else /* !CONFIG_PPC_EMULATED_STATS */ | 68 | #else /* !CONFIG_PPC_EMULATED_STATS */ |
68 | 69 | ||
69 | #define PPC_WARN_EMULATED(type) do { } while (0) | 70 | #define __PPC_WARN_EMULATED(type) do { } while (0) |
70 | 71 | ||
71 | #endif /* !CONFIG_PPC_EMULATED_STATS */ | 72 | #endif /* !CONFIG_PPC_EMULATED_STATS */ |
72 | 73 | ||
74 | #define PPC_WARN_EMULATED(type, regs) \ | ||
75 | do { \ | ||
76 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ | ||
77 | 1, 0, regs, 0); \ | ||
78 | __PPC_WARN_EMULATED(type); \ | ||
79 | } while (0) | ||
80 | |||
81 | #define PPC_WARN_ALIGNMENT(type, regs) \ | ||
82 | do { \ | ||
83 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ | ||
84 | 1, 0, regs, regs->dar); \ | ||
85 | __PPC_WARN_EMULATED(type); \ | ||
86 | } while (0) | ||
87 | |||
73 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ | 88 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 6251a4b10be7..c27caac47ad1 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -274,6 +274,8 @@ struct hcall_stats { | |||
274 | unsigned long num_calls; /* number of calls (on this CPU) */ | 274 | unsigned long num_calls; /* number of calls (on this CPU) */ |
275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ | 275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ |
276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ | 276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ |
277 | unsigned long tb_start; | ||
278 | unsigned long purr_start; | ||
277 | }; | 279 | }; |
278 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) | 280 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) |
279 | 281 | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 6315edc205d8..bc8dd53f718a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -489,6 +489,8 @@ | |||
489 | #define SPRN_MMCR1 798 | 489 | #define SPRN_MMCR1 798 |
490 | #define SPRN_MMCRA 0x312 | 490 | #define SPRN_MMCRA 0x312 |
491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ | 491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ |
492 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL | ||
493 | #define MMCRA_SDAR_ERAT_MISS 0x20000000UL | ||
492 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ | 494 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ |
493 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ | 495 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ |
494 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ | 496 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ |
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h new file mode 100644 index 000000000000..cbe2297d68b6 --- /dev/null +++ b/arch/powerpc/include/asm/trace.h | |||
@@ -0,0 +1,133 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM powerpc | ||
3 | |||
4 | #if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_POWERPC_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | struct pt_regs; | ||
10 | |||
11 | TRACE_EVENT(irq_entry, | ||
12 | |||
13 | TP_PROTO(struct pt_regs *regs), | ||
14 | |||
15 | TP_ARGS(regs), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __field(struct pt_regs *, regs) | ||
19 | ), | ||
20 | |||
21 | TP_fast_assign( | ||
22 | __entry->regs = regs; | ||
23 | ), | ||
24 | |||
25 | TP_printk("pt_regs=%p", __entry->regs) | ||
26 | ); | ||
27 | |||
28 | TRACE_EVENT(irq_exit, | ||
29 | |||
30 | TP_PROTO(struct pt_regs *regs), | ||
31 | |||
32 | TP_ARGS(regs), | ||
33 | |||
34 | TP_STRUCT__entry( | ||
35 | __field(struct pt_regs *, regs) | ||
36 | ), | ||
37 | |||
38 | TP_fast_assign( | ||
39 | __entry->regs = regs; | ||
40 | ), | ||
41 | |||
42 | TP_printk("pt_regs=%p", __entry->regs) | ||
43 | ); | ||
44 | |||
45 | TRACE_EVENT(timer_interrupt_entry, | ||
46 | |||
47 | TP_PROTO(struct pt_regs *regs), | ||
48 | |||
49 | TP_ARGS(regs), | ||
50 | |||
51 | TP_STRUCT__entry( | ||
52 | __field(struct pt_regs *, regs) | ||
53 | ), | ||
54 | |||
55 | TP_fast_assign( | ||
56 | __entry->regs = regs; | ||
57 | ), | ||
58 | |||
59 | TP_printk("pt_regs=%p", __entry->regs) | ||
60 | ); | ||
61 | |||
62 | TRACE_EVENT(timer_interrupt_exit, | ||
63 | |||
64 | TP_PROTO(struct pt_regs *regs), | ||
65 | |||
66 | TP_ARGS(regs), | ||
67 | |||
68 | TP_STRUCT__entry( | ||
69 | __field(struct pt_regs *, regs) | ||
70 | ), | ||
71 | |||
72 | TP_fast_assign( | ||
73 | __entry->regs = regs; | ||
74 | ), | ||
75 | |||
76 | TP_printk("pt_regs=%p", __entry->regs) | ||
77 | ); | ||
78 | |||
79 | #ifdef CONFIG_PPC_PSERIES | ||
80 | extern void hcall_tracepoint_regfunc(void); | ||
81 | extern void hcall_tracepoint_unregfunc(void); | ||
82 | |||
83 | TRACE_EVENT_FN(hcall_entry, | ||
84 | |||
85 | TP_PROTO(unsigned long opcode, unsigned long *args), | ||
86 | |||
87 | TP_ARGS(opcode, args), | ||
88 | |||
89 | TP_STRUCT__entry( | ||
90 | __field(unsigned long, opcode) | ||
91 | ), | ||
92 | |||
93 | TP_fast_assign( | ||
94 | __entry->opcode = opcode; | ||
95 | ), | ||
96 | |||
97 | TP_printk("opcode=%lu", __entry->opcode), | ||
98 | |||
99 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
100 | ); | ||
101 | |||
102 | TRACE_EVENT_FN(hcall_exit, | ||
103 | |||
104 | TP_PROTO(unsigned long opcode, unsigned long retval, | ||
105 | unsigned long *retbuf), | ||
106 | |||
107 | TP_ARGS(opcode, retval, retbuf), | ||
108 | |||
109 | TP_STRUCT__entry( | ||
110 | __field(unsigned long, opcode) | ||
111 | __field(unsigned long, retval) | ||
112 | ), | ||
113 | |||
114 | TP_fast_assign( | ||
115 | __entry->opcode = opcode; | ||
116 | __entry->retval = retval; | ||
117 | ), | ||
118 | |||
119 | TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval), | ||
120 | |||
121 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
122 | ); | ||
123 | #endif | ||
124 | |||
125 | #endif /* _TRACE_POWERPC_H */ | ||
126 | |||
127 | #undef TRACE_INCLUDE_PATH | ||
128 | #undef TRACE_INCLUDE_FILE | ||
129 | |||
130 | #define TRACE_INCLUDE_PATH asm | ||
131 | #define TRACE_INCLUDE_FILE trace | ||
132 | |||
133 | #include <trace/define_trace.h> | ||
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a5b632e52fae..3839839f83c7 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -732,7 +732,7 @@ int fix_alignment(struct pt_regs *regs) | |||
732 | 732 | ||
733 | #ifdef CONFIG_SPE | 733 | #ifdef CONFIG_SPE |
734 | if ((instr >> 26) == 0x4) { | 734 | if ((instr >> 26) == 0x4) { |
735 | PPC_WARN_EMULATED(spe); | 735 | PPC_WARN_ALIGNMENT(spe, regs); |
736 | return emulate_spe(regs, reg, instr); | 736 | return emulate_spe(regs, reg, instr); |
737 | } | 737 | } |
738 | #endif | 738 | #endif |
@@ -786,7 +786,7 @@ int fix_alignment(struct pt_regs *regs) | |||
786 | flags |= SPLT; | 786 | flags |= SPLT; |
787 | nb = 8; | 787 | nb = 8; |
788 | } | 788 | } |
789 | PPC_WARN_EMULATED(vsx); | 789 | PPC_WARN_ALIGNMENT(vsx, regs); |
790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); |
791 | } | 791 | } |
792 | #endif | 792 | #endif |
@@ -794,7 +794,7 @@ int fix_alignment(struct pt_regs *regs) | |||
794 | * the exception of DCBZ which is handled as a special case here | 794 | * the exception of DCBZ which is handled as a special case here |
795 | */ | 795 | */ |
796 | if (instr == DCBZ) { | 796 | if (instr == DCBZ) { |
797 | PPC_WARN_EMULATED(dcbz); | 797 | PPC_WARN_ALIGNMENT(dcbz, regs); |
798 | return emulate_dcbz(regs, addr); | 798 | return emulate_dcbz(regs, addr); |
799 | } | 799 | } |
800 | if (unlikely(nb == 0)) | 800 | if (unlikely(nb == 0)) |
@@ -804,7 +804,7 @@ int fix_alignment(struct pt_regs *regs) | |||
804 | * function | 804 | * function |
805 | */ | 805 | */ |
806 | if (flags & M) { | 806 | if (flags & M) { |
807 | PPC_WARN_EMULATED(multiple); | 807 | PPC_WARN_ALIGNMENT(multiple, regs); |
808 | return emulate_multiple(regs, addr, reg, nb, | 808 | return emulate_multiple(regs, addr, reg, nb, |
809 | flags, instr, swiz); | 809 | flags, instr, swiz); |
810 | } | 810 | } |
@@ -825,11 +825,11 @@ int fix_alignment(struct pt_regs *regs) | |||
825 | 825 | ||
826 | /* Special case for 16-byte FP loads and stores */ | 826 | /* Special case for 16-byte FP loads and stores */ |
827 | if (nb == 16) { | 827 | if (nb == 16) { |
828 | PPC_WARN_EMULATED(fp_pair); | 828 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
829 | return emulate_fp_pair(addr, reg, flags); | 829 | return emulate_fp_pair(addr, reg, flags); |
830 | } | 830 | } |
831 | 831 | ||
832 | PPC_WARN_EMULATED(unaligned); | 832 | PPC_WARN_ALIGNMENT(unaligned, regs); |
833 | 833 | ||
834 | /* If we are loading, get the data from user space, else | 834 | /* If we are loading, get the data from user space, else |
835 | * get it from register values | 835 | * get it from register values |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9763267e38b4..bdcb557d470a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -551,7 +551,7 @@ restore: | |||
551 | BEGIN_FW_FTR_SECTION | 551 | BEGIN_FW_FTR_SECTION |
552 | ld r5,SOFTE(r1) | 552 | ld r5,SOFTE(r1) |
553 | FW_FTR_SECTION_ELSE | 553 | FW_FTR_SECTION_ELSE |
554 | b iseries_check_pending_irqs | 554 | b .Liseries_check_pending_irqs |
555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | 555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) |
556 | 2: | 556 | 2: |
557 | TRACE_AND_RESTORE_IRQ(r5); | 557 | TRACE_AND_RESTORE_IRQ(r5); |
@@ -623,7 +623,7 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
623 | 623 | ||
624 | #endif /* CONFIG_PPC_BOOK3E */ | 624 | #endif /* CONFIG_PPC_BOOK3E */ |
625 | 625 | ||
626 | iseries_check_pending_irqs: | 626 | .Liseries_check_pending_irqs: |
627 | #ifdef CONFIG_PPC_ISERIES | 627 | #ifdef CONFIG_PPC_ISERIES |
628 | ld r5,SOFTE(r1) | 628 | ld r5,SOFTE(r1) |
629 | cmpdi 0,r5,0 | 629 | cmpdi 0,r5,0 |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1808876edcc9..c7eb4e0eb86c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -185,12 +185,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
185 | * prolog code of the PerformanceMonitor one. A little | 185 | * prolog code of the PerformanceMonitor one. A little |
186 | * trickery is thus necessary | 186 | * trickery is thus necessary |
187 | */ | 187 | */ |
188 | performance_monitor_pSeries_1: | ||
188 | . = 0xf00 | 189 | . = 0xf00 |
189 | b performance_monitor_pSeries | 190 | b performance_monitor_pSeries |
190 | 191 | ||
192 | altivec_unavailable_pSeries_1: | ||
191 | . = 0xf20 | 193 | . = 0xf20 |
192 | b altivec_unavailable_pSeries | 194 | b altivec_unavailable_pSeries |
193 | 195 | ||
196 | vsx_unavailable_pSeries_1: | ||
194 | . = 0xf40 | 197 | . = 0xf40 |
195 | b vsx_unavailable_pSeries | 198 | b vsx_unavailable_pSeries |
196 | 199 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e5d121177984..02a334662cc0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -70,6 +70,8 @@ | |||
70 | #include <asm/firmware.h> | 70 | #include <asm/firmware.h> |
71 | #include <asm/lv1call.h> | 71 | #include <asm/lv1call.h> |
72 | #endif | 72 | #endif |
73 | #define CREATE_TRACE_POINTS | ||
74 | #include <asm/trace.h> | ||
73 | 75 | ||
74 | int __irq_offset_value; | 76 | int __irq_offset_value; |
75 | static int ppc_spurious_interrupts; | 77 | static int ppc_spurious_interrupts; |
@@ -325,6 +327,8 @@ void do_IRQ(struct pt_regs *regs) | |||
325 | struct pt_regs *old_regs = set_irq_regs(regs); | 327 | struct pt_regs *old_regs = set_irq_regs(regs); |
326 | unsigned int irq; | 328 | unsigned int irq; |
327 | 329 | ||
330 | trace_irq_entry(regs); | ||
331 | |||
328 | irq_enter(); | 332 | irq_enter(); |
329 | 333 | ||
330 | check_stack_overflow(); | 334 | check_stack_overflow(); |
@@ -348,6 +352,8 @@ void do_IRQ(struct pt_regs *regs) | |||
348 | timer_interrupt(regs); | 352 | timer_interrupt(regs); |
349 | } | 353 | } |
350 | #endif | 354 | #endif |
355 | |||
356 | trace_irq_exit(regs); | ||
351 | } | 357 | } |
352 | 358 | ||
353 | void __init init_IRQ(void) | 359 | void __init init_IRQ(void) |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 87f1663584b0..1eb85fbf53a5 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1165,7 +1165,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1165 | */ | 1165 | */ |
1166 | if (record) { | 1166 | if (record) { |
1167 | struct perf_sample_data data = { | 1167 | struct perf_sample_data data = { |
1168 | .addr = 0, | 1168 | .addr = ~0ULL, |
1169 | .period = event->hw.last_period, | 1169 | .period = event->hw.last_period, |
1170 | }; | 1170 | }; |
1171 | 1171 | ||
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 0f4c1c73a6ad..199de527d411 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index c351b3a57fbb..98b6a729a9dd 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -390,7 +386,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev, | |||
390 | unsigned int hwc[], unsigned long mmcr[]) | 386 | unsigned int hwc[], unsigned long mmcr[]) |
391 | { | 387 | { |
392 | unsigned long mmcr1 = 0; | 388 | unsigned long mmcr1 = 0; |
393 | unsigned long mmcra = 0; | 389 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
394 | unsigned int pmc, unit, byte, psel; | 390 | unsigned int pmc, unit, byte, psel; |
395 | unsigned int ttm, grp; | 391 | unsigned int ttm, grp; |
396 | int i, isbus, bit, grsel; | 392 | int i, isbus, bit, grsel; |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index ca399ba5034c..84a607bda8fb 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -178,7 +178,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev, | |||
178 | unsigned int hwc[], unsigned long mmcr[]) | 178 | unsigned int hwc[], unsigned long mmcr[]) |
179 | { | 179 | { |
180 | unsigned long mmcr1 = 0; | 180 | unsigned long mmcr1 = 0; |
181 | unsigned long mmcra = 0; | 181 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
182 | int i; | 182 | int i; |
183 | unsigned int pmc, ev, b, u, s, psel; | 183 | unsigned int pmc, ev, b, u, s, psel; |
184 | unsigned int ttmset = 0; | 184 | unsigned int ttmset = 0; |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 28a4daacdc02..852f7b7f6b40 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -51,10 +51,6 @@ | |||
51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Bits in MMCRA | ||
55 | */ | ||
56 | |||
57 | /* | ||
58 | * Layout of constraint bits: | 54 | * Layout of constraint bits: |
59 | * 6666555555555544444444443333333333222222222211111111110000000000 | 55 | * 6666555555555544444444443333333333222222222211111111110000000000 |
60 | * 3210987654321098765432109876543210987654321098765432109876543210 | 56 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -230,7 +226,7 @@ static int power7_compute_mmcr(u64 event[], int n_ev, | |||
230 | unsigned int hwc[], unsigned long mmcr[]) | 226 | unsigned int hwc[], unsigned long mmcr[]) |
231 | { | 227 | { |
232 | unsigned long mmcr1 = 0; | 228 | unsigned long mmcr1 = 0; |
233 | unsigned long mmcra = 0; | 229 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
234 | unsigned int pmc, unit, combine, l2sel, psel; | 230 | unsigned int pmc, unit, combine, l2sel, psel; |
235 | unsigned int pmc_inuse = 0; | 231 | unsigned int pmc_inuse = 0; |
236 | int i; | 232 | int i; |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 479574413a93..8eff48e20dba 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -84,10 +84,6 @@ static short mmcr1_adder_bits[8] = { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Bits in MMCRA | ||
88 | */ | ||
89 | |||
90 | /* | ||
91 | * Layout of constraint bits: | 87 | * Layout of constraint bits: |
92 | * 6666555555555544444444443333333333222222222211111111110000000000 | 88 | * 6666555555555544444444443333333333222222222211111111110000000000 |
93 | * 3210987654321098765432109876543210987654321098765432109876543210 | 89 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 4271f7a655a3..845c72ab7357 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -660,6 +660,7 @@ late_initcall(check_cache_coherency); | |||
660 | 660 | ||
661 | #ifdef CONFIG_DEBUG_FS | 661 | #ifdef CONFIG_DEBUG_FS |
662 | struct dentry *powerpc_debugfs_root; | 662 | struct dentry *powerpc_debugfs_root; |
663 | EXPORT_SYMBOL(powerpc_debugfs_root); | ||
663 | 664 | ||
664 | static int powerpc_debugfs_init(void) | 665 | static int powerpc_debugfs_init(void) |
665 | { | 666 | { |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 53bcf3d792db..b152de3e64d4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -345,7 +345,7 @@ void __init setup_arch(char **cmdline_p) | |||
345 | 345 | ||
346 | #ifdef CONFIG_SWIOTLB | 346 | #ifdef CONFIG_SWIOTLB |
347 | if (ppc_swiotlb_enable) | 347 | if (ppc_swiotlb_enable) |
348 | swiotlb_init(); | 348 | swiotlb_init(1); |
349 | #endif | 349 | #endif |
350 | 350 | ||
351 | paging_init(); | 351 | paging_init(); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 04f638d82fb3..df2c9e932b37 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -550,7 +550,7 @@ void __init setup_arch(char **cmdline_p) | |||
550 | 550 | ||
551 | #ifdef CONFIG_SWIOTLB | 551 | #ifdef CONFIG_SWIOTLB |
552 | if (ppc_swiotlb_enable) | 552 | if (ppc_swiotlb_enable) |
553 | swiotlb_init(); | 553 | swiotlb_init(1); |
554 | #endif | 554 | #endif |
555 | 555 | ||
556 | paging_init(); | 556 | paging_init(); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a136a11c490d..36707dec94d7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
57 | #include <asm/trace.h> | ||
57 | 58 | ||
58 | #include <asm/io.h> | 59 | #include <asm/io.h> |
59 | #include <asm/processor.h> | 60 | #include <asm/processor.h> |
@@ -571,6 +572,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
571 | struct clock_event_device *evt = &decrementer->event; | 572 | struct clock_event_device *evt = &decrementer->event; |
572 | u64 now; | 573 | u64 now; |
573 | 574 | ||
575 | trace_timer_interrupt_entry(regs); | ||
576 | |||
574 | /* Ensure a positive value is written to the decrementer, or else | 577 | /* Ensure a positive value is written to the decrementer, or else |
575 | * some CPUs will continuue to take decrementer exceptions */ | 578 | * some CPUs will continuue to take decrementer exceptions */ |
576 | set_dec(DECREMENTER_MAX); | 579 | set_dec(DECREMENTER_MAX); |
@@ -590,6 +593,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
590 | now = decrementer->next_tb - now; | 593 | now = decrementer->next_tb - now; |
591 | if (now <= DECREMENTER_MAX) | 594 | if (now <= DECREMENTER_MAX) |
592 | set_dec((int)now); | 595 | set_dec((int)now); |
596 | trace_timer_interrupt_exit(regs); | ||
593 | return; | 597 | return; |
594 | } | 598 | } |
595 | old_regs = set_irq_regs(regs); | 599 | old_regs = set_irq_regs(regs); |
@@ -620,6 +624,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
620 | 624 | ||
621 | irq_exit(); | 625 | irq_exit(); |
622 | set_irq_regs(old_regs); | 626 | set_irq_regs(old_regs); |
627 | |||
628 | trace_timer_interrupt_exit(regs); | ||
623 | } | 629 | } |
624 | 630 | ||
625 | void wakeup_decrementer(void) | 631 | void wakeup_decrementer(void) |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6f0ae1a9bfae..9d1f9354d6ca 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -759,7 +759,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
759 | 759 | ||
760 | /* Emulate the mfspr rD, PVR. */ | 760 | /* Emulate the mfspr rD, PVR. */ |
761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { | 761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
762 | PPC_WARN_EMULATED(mfpvr); | 762 | PPC_WARN_EMULATED(mfpvr, regs); |
763 | rd = (instword >> 21) & 0x1f; | 763 | rd = (instword >> 21) & 0x1f; |
764 | regs->gpr[rd] = mfspr(SPRN_PVR); | 764 | regs->gpr[rd] = mfspr(SPRN_PVR); |
765 | return 0; | 765 | return 0; |
@@ -767,7 +767,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
767 | 767 | ||
768 | /* Emulating the dcba insn is just a no-op. */ | 768 | /* Emulating the dcba insn is just a no-op. */ |
769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { | 769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
770 | PPC_WARN_EMULATED(dcba); | 770 | PPC_WARN_EMULATED(dcba, regs); |
771 | return 0; | 771 | return 0; |
772 | } | 772 | } |
773 | 773 | ||
@@ -776,7 +776,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
776 | int shift = (instword >> 21) & 0x1c; | 776 | int shift = (instword >> 21) & 0x1c; |
777 | unsigned long msk = 0xf0000000UL >> shift; | 777 | unsigned long msk = 0xf0000000UL >> shift; |
778 | 778 | ||
779 | PPC_WARN_EMULATED(mcrxr); | 779 | PPC_WARN_EMULATED(mcrxr, regs); |
780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | 780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
781 | regs->xer &= ~0xf0000000UL; | 781 | regs->xer &= ~0xf0000000UL; |
782 | return 0; | 782 | return 0; |
@@ -784,19 +784,19 @@ static int emulate_instruction(struct pt_regs *regs) | |||
784 | 784 | ||
785 | /* Emulate load/store string insn. */ | 785 | /* Emulate load/store string insn. */ |
786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { | 786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
787 | PPC_WARN_EMULATED(string); | 787 | PPC_WARN_EMULATED(string, regs); |
788 | return emulate_string_inst(regs, instword); | 788 | return emulate_string_inst(regs, instword); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ | 791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { | 792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
793 | PPC_WARN_EMULATED(popcntb); | 793 | PPC_WARN_EMULATED(popcntb, regs); |
794 | return emulate_popcntb_inst(regs, instword); | 794 | return emulate_popcntb_inst(regs, instword); |
795 | } | 795 | } |
796 | 796 | ||
797 | /* Emulate isel (Integer Select) instruction */ | 797 | /* Emulate isel (Integer Select) instruction */ |
798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { | 798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
799 | PPC_WARN_EMULATED(isel); | 799 | PPC_WARN_EMULATED(isel, regs); |
800 | return emulate_isel(regs, instword); | 800 | return emulate_isel(regs, instword); |
801 | } | 801 | } |
802 | 802 | ||
@@ -995,7 +995,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
995 | #ifdef CONFIG_MATH_EMULATION | 995 | #ifdef CONFIG_MATH_EMULATION |
996 | errcode = do_mathemu(regs); | 996 | errcode = do_mathemu(regs); |
997 | if (errcode >= 0) | 997 | if (errcode >= 0) |
998 | PPC_WARN_EMULATED(math); | 998 | PPC_WARN_EMULATED(math, regs); |
999 | 999 | ||
1000 | switch (errcode) { | 1000 | switch (errcode) { |
1001 | case 0: | 1001 | case 0: |
@@ -1018,7 +1018,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
1019 | errcode = Soft_emulate_8xx(regs); | 1019 | errcode = Soft_emulate_8xx(regs); |
1020 | if (errcode >= 0) | 1020 | if (errcode >= 0) |
1021 | PPC_WARN_EMULATED(8xx); | 1021 | PPC_WARN_EMULATED(8xx, regs); |
1022 | 1022 | ||
1023 | switch (errcode) { | 1023 | switch (errcode) { |
1024 | case 0: | 1024 | case 0: |
@@ -1129,7 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1129 | 1129 | ||
1130 | flush_altivec_to_thread(current); | 1130 | flush_altivec_to_thread(current); |
1131 | 1131 | ||
1132 | PPC_WARN_EMULATED(altivec); | 1132 | PPC_WARN_EMULATED(altivec, regs); |
1133 | err = emulate_altivec(regs); | 1133 | err = emulate_altivec(regs); |
1134 | if (err == 0) { | 1134 | if (err == 0) { |
1135 | regs->nip += 4; /* skip emulated instruction */ | 1135 | regs->nip += 4; /* skip emulated instruction */ |
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 75f3267fdc30..e68beac0a171 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S | |||
@@ -26,11 +26,11 @@ BEGIN_FTR_SECTION | |||
26 | srd r8,r5,r11 | 26 | srd r8,r5,r11 |
27 | 27 | ||
28 | mtctr r8 | 28 | mtctr r8 |
29 | setup: | 29 | .Lsetup: |
30 | dcbt r9,r4 | 30 | dcbt r9,r4 |
31 | dcbz r9,r3 | 31 | dcbz r9,r3 |
32 | add r9,r9,r12 | 32 | add r9,r9,r12 |
33 | bdnz setup | 33 | bdnz .Lsetup |
34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) | 34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) |
35 | addi r3,r3,-8 | 35 | addi r3,r3,-8 |
36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ | 36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ |
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index c1427b3634ec..383a5d0e9818 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -14,68 +14,94 @@ | |||
14 | 14 | ||
15 | #define STK_PARM(i) (48 + ((i)-3)*8) | 15 | #define STK_PARM(i) (48 + ((i)-3)*8) |
16 | 16 | ||
17 | #ifdef CONFIG_HCALL_STATS | 17 | #ifdef CONFIG_TRACEPOINTS |
18 | |||
19 | .section ".toc","aw" | ||
20 | |||
21 | .globl hcall_tracepoint_refcount | ||
22 | hcall_tracepoint_refcount: | ||
23 | .llong 0 | ||
24 | |||
25 | .section ".text" | ||
26 | |||
18 | /* | 27 | /* |
19 | * precall must preserve all registers. use unused STK_PARM() | 28 | * precall must preserve all registers. use unused STK_PARM() |
20 | * areas to save snapshots and opcode. | 29 | * areas to save snapshots and opcode. We branch around this |
30 | * in early init (eg when populating the MMU hashtable) by using an | ||
31 | * unconditional cpu feature. | ||
21 | */ | 32 | */ |
22 | #define HCALL_INST_PRECALL \ | 33 | #define HCALL_INST_PRECALL(FIRST_REG) \ |
23 | std r3,STK_PARM(r3)(r1); /* save opcode */ \ | ||
24 | mftb r0; /* get timebase and */ \ | ||
25 | std r0,STK_PARM(r5)(r1); /* save for later */ \ | ||
26 | BEGIN_FTR_SECTION; \ | 34 | BEGIN_FTR_SECTION; \ |
27 | mfspr r0,SPRN_PURR; /* get PURR and */ \ | 35 | b 1f; \ |
28 | std r0,STK_PARM(r6)(r1); /* save for later */ \ | 36 | END_FTR_SECTION(0, 1); \ |
29 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); | 37 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
30 | 38 | cmpdi r12,0; \ | |
39 | beq+ 1f; \ | ||
40 | mflr r0; \ | ||
41 | std r3,STK_PARM(r3)(r1); \ | ||
42 | std r4,STK_PARM(r4)(r1); \ | ||
43 | std r5,STK_PARM(r5)(r1); \ | ||
44 | std r6,STK_PARM(r6)(r1); \ | ||
45 | std r7,STK_PARM(r7)(r1); \ | ||
46 | std r8,STK_PARM(r8)(r1); \ | ||
47 | std r9,STK_PARM(r9)(r1); \ | ||
48 | std r10,STK_PARM(r10)(r1); \ | ||
49 | std r0,16(r1); \ | ||
50 | addi r4,r1,STK_PARM(FIRST_REG); \ | ||
51 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | ||
52 | bl .__trace_hcall_entry; \ | ||
53 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | ||
54 | ld r0,16(r1); \ | ||
55 | ld r3,STK_PARM(r3)(r1); \ | ||
56 | ld r4,STK_PARM(r4)(r1); \ | ||
57 | ld r5,STK_PARM(r5)(r1); \ | ||
58 | ld r6,STK_PARM(r6)(r1); \ | ||
59 | ld r7,STK_PARM(r7)(r1); \ | ||
60 | ld r8,STK_PARM(r8)(r1); \ | ||
61 | ld r9,STK_PARM(r9)(r1); \ | ||
62 | ld r10,STK_PARM(r10)(r1); \ | ||
63 | mtlr r0; \ | ||
64 | 1: | ||
65 | |||
31 | /* | 66 | /* |
32 | * postcall is performed immediately before function return which | 67 | * postcall is performed immediately before function return which |
33 | * allows liberal use of volatile registers. We branch around this | 68 | * allows liberal use of volatile registers. We branch around this |
34 | * in early init (eg when populating the MMU hashtable) by using an | 69 | * in early init (eg when populating the MMU hashtable) by using an |
35 | * unconditional cpu feature. | 70 | * unconditional cpu feature. |
36 | */ | 71 | */ |
37 | #define HCALL_INST_POSTCALL \ | 72 | #define __HCALL_INST_POSTCALL \ |
38 | BEGIN_FTR_SECTION; \ | 73 | BEGIN_FTR_SECTION; \ |
39 | b 1f; \ | 74 | b 1f; \ |
40 | END_FTR_SECTION(0, 1); \ | 75 | END_FTR_SECTION(0, 1); \ |
41 | ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ | 76 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
42 | cmpldi cr7,r4,MAX_HCALL_OPCODE; \ | 77 | cmpdi r12,0; \ |
43 | bgt- cr7,1f; \ | 78 | beq+ 1f; \ |
44 | \ | 79 | mflr r0; \ |
45 | /* get time and PURR snapshots after hcall */ \ | 80 | ld r6,STK_PARM(r3)(r1); \ |
46 | mftb r7; /* timebase after */ \ | 81 | std r3,STK_PARM(r3)(r1); \ |
47 | BEGIN_FTR_SECTION; \ | 82 | mr r4,r3; \ |
48 | mfspr r8,SPRN_PURR; /* PURR after */ \ | 83 | mr r3,r6; \ |
49 | ld r6,STK_PARM(r6)(r1); /* PURR before */ \ | 84 | std r0,16(r1); \ |
50 | subf r6,r6,r8; /* delta */ \ | 85 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
51 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | 86 | bl .__trace_hcall_exit; \ |
52 | ld r5,STK_PARM(r5)(r1); /* timebase before */ \ | 87 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
53 | subf r5,r5,r7; /* time delta */ \ | 88 | ld r0,16(r1); \ |
54 | \ | 89 | ld r3,STK_PARM(r3)(r1); \ |
55 | /* calculate address of stat structure r4 = opcode */ \ | 90 | mtlr r0; \ |
56 | srdi r4,r4,2; /* index into array */ \ | ||
57 | mulli r4,r4,HCALL_STAT_SIZE; \ | ||
58 | LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ | ||
59 | add r4,r4,r7; \ | ||
60 | ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ | ||
61 | add r4,r4,r7; \ | ||
62 | \ | ||
63 | /* update stats */ \ | ||
64 | ld r7,HCALL_STAT_CALLS(r4); /* count */ \ | ||
65 | addi r7,r7,1; \ | ||
66 | std r7,HCALL_STAT_CALLS(r4); \ | ||
67 | ld r7,HCALL_STAT_TB(r4); /* timebase */ \ | ||
68 | add r7,r7,r5; \ | ||
69 | std r7,HCALL_STAT_TB(r4); \ | ||
70 | BEGIN_FTR_SECTION; \ | ||
71 | ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ | ||
72 | add r7,r7,r6; \ | ||
73 | std r7,HCALL_STAT_PURR(r4); \ | ||
74 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | ||
75 | 1: | 91 | 1: |
92 | |||
93 | #define HCALL_INST_POSTCALL_NORETS \ | ||
94 | li r5,0; \ | ||
95 | __HCALL_INST_POSTCALL | ||
96 | |||
97 | #define HCALL_INST_POSTCALL(BUFREG) \ | ||
98 | mr r5,BUFREG; \ | ||
99 | __HCALL_INST_POSTCALL | ||
100 | |||
76 | #else | 101 | #else |
77 | #define HCALL_INST_PRECALL | 102 | #define HCALL_INST_PRECALL(FIRST_ARG) |
78 | #define HCALL_INST_POSTCALL | 103 | #define HCALL_INST_POSTCALL_NORETS |
104 | #define HCALL_INST_POSTCALL(BUFREG) | ||
79 | #endif | 105 | #endif |
80 | 106 | ||
81 | .text | 107 | .text |
@@ -86,11 +112,11 @@ _GLOBAL(plpar_hcall_norets) | |||
86 | mfcr r0 | 112 | mfcr r0 |
87 | stw r0,8(r1) | 113 | stw r0,8(r1) |
88 | 114 | ||
89 | HCALL_INST_PRECALL | 115 | HCALL_INST_PRECALL(r4) |
90 | 116 | ||
91 | HVSC /* invoke the hypervisor */ | 117 | HVSC /* invoke the hypervisor */ |
92 | 118 | ||
93 | HCALL_INST_POSTCALL | 119 | HCALL_INST_POSTCALL_NORETS |
94 | 120 | ||
95 | lwz r0,8(r1) | 121 | lwz r0,8(r1) |
96 | mtcrf 0xff,r0 | 122 | mtcrf 0xff,r0 |
@@ -102,7 +128,7 @@ _GLOBAL(plpar_hcall) | |||
102 | mfcr r0 | 128 | mfcr r0 |
103 | stw r0,8(r1) | 129 | stw r0,8(r1) |
104 | 130 | ||
105 | HCALL_INST_PRECALL | 131 | HCALL_INST_PRECALL(r5) |
106 | 132 | ||
107 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 133 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
108 | 134 | ||
@@ -121,7 +147,7 @@ _GLOBAL(plpar_hcall) | |||
121 | std r6, 16(r12) | 147 | std r6, 16(r12) |
122 | std r7, 24(r12) | 148 | std r7, 24(r12) |
123 | 149 | ||
124 | HCALL_INST_POSTCALL | 150 | HCALL_INST_POSTCALL(r12) |
125 | 151 | ||
126 | lwz r0,8(r1) | 152 | lwz r0,8(r1) |
127 | mtcrf 0xff,r0 | 153 | mtcrf 0xff,r0 |
@@ -168,7 +194,7 @@ _GLOBAL(plpar_hcall9) | |||
168 | mfcr r0 | 194 | mfcr r0 |
169 | stw r0,8(r1) | 195 | stw r0,8(r1) |
170 | 196 | ||
171 | HCALL_INST_PRECALL | 197 | HCALL_INST_PRECALL(r5) |
172 | 198 | ||
173 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 199 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
174 | 200 | ||
@@ -196,7 +222,7 @@ _GLOBAL(plpar_hcall9) | |||
196 | std r11,56(r12) | 222 | std r11,56(r12) |
197 | std r0, 64(r12) | 223 | std r0, 64(r12) |
198 | 224 | ||
199 | HCALL_INST_POSTCALL | 225 | HCALL_INST_POSTCALL(r12) |
200 | 226 | ||
201 | lwz r0,8(r1) | 227 | lwz r0,8(r1) |
202 | mtcrf 0xff,r0 | 228 | mtcrf 0xff,r0 |
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 3631a4f277eb..2f58c71b7259 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/hvcall.h> | 26 | #include <asm/hvcall.h> |
27 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/trace.h> | ||
29 | 30 | ||
30 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); | 31 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); |
31 | 32 | ||
@@ -100,6 +101,35 @@ static const struct file_operations hcall_inst_seq_fops = { | |||
100 | #define HCALL_ROOT_DIR "hcall_inst" | 101 | #define HCALL_ROOT_DIR "hcall_inst" |
101 | #define CPU_NAME_BUF_SIZE 32 | 102 | #define CPU_NAME_BUF_SIZE 32 |
102 | 103 | ||
104 | |||
105 | static void probe_hcall_entry(unsigned long opcode, unsigned long *args) | ||
106 | { | ||
107 | struct hcall_stats *h; | ||
108 | |||
109 | if (opcode > MAX_HCALL_OPCODE) | ||
110 | return; | ||
111 | |||
112 | h = &get_cpu_var(hcall_stats)[opcode / 4]; | ||
113 | h->tb_start = mftb(); | ||
114 | h->purr_start = mfspr(SPRN_PURR); | ||
115 | } | ||
116 | |||
117 | static void probe_hcall_exit(unsigned long opcode, unsigned long retval, | ||
118 | unsigned long *retbuf) | ||
119 | { | ||
120 | struct hcall_stats *h; | ||
121 | |||
122 | if (opcode > MAX_HCALL_OPCODE) | ||
123 | return; | ||
124 | |||
125 | h = &__get_cpu_var(hcall_stats)[opcode / 4]; | ||
126 | h->num_calls++; | ||
127 | h->tb_total = mftb() - h->tb_start; | ||
128 | h->purr_total = mfspr(SPRN_PURR) - h->purr_start; | ||
129 | |||
130 | put_cpu_var(hcall_stats); | ||
131 | } | ||
132 | |||
103 | static int __init hcall_inst_init(void) | 133 | static int __init hcall_inst_init(void) |
104 | { | 134 | { |
105 | struct dentry *hcall_root; | 135 | struct dentry *hcall_root; |
@@ -110,6 +140,14 @@ static int __init hcall_inst_init(void) | |||
110 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 140 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
111 | return 0; | 141 | return 0; |
112 | 142 | ||
143 | if (register_trace_hcall_entry(probe_hcall_entry)) | ||
144 | return -EINVAL; | ||
145 | |||
146 | if (register_trace_hcall_exit(probe_hcall_exit)) { | ||
147 | unregister_trace_hcall_entry(probe_hcall_entry); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
113 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); | 151 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); |
114 | if (!hcall_root) | 152 | if (!hcall_root) |
115 | return -ENOMEM; | 153 | return -ENOMEM; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 903eb9eec687..0707653612ba 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/cputable.h> | 39 | #include <asm/cputable.h> |
40 | #include <asm/udbg.h> | 40 | #include <asm/udbg.h> |
41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
42 | #include <asm/trace.h> | ||
42 | 43 | ||
43 | #include "plpar_wrappers.h" | 44 | #include "plpar_wrappers.h" |
44 | #include "pseries.h" | 45 | #include "pseries.h" |
@@ -661,3 +662,35 @@ void arch_free_page(struct page *page, int order) | |||
661 | EXPORT_SYMBOL(arch_free_page); | 662 | EXPORT_SYMBOL(arch_free_page); |
662 | 663 | ||
663 | #endif | 664 | #endif |
665 | |||
666 | #ifdef CONFIG_TRACEPOINTS | ||
667 | /* | ||
668 | * We optimise our hcall path by placing hcall_tracepoint_refcount | ||
669 | * directly in the TOC so we can check if the hcall tracepoints are | ||
670 | * enabled via a single load. | ||
671 | */ | ||
672 | |||
673 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | ||
674 | extern long hcall_tracepoint_refcount; | ||
675 | |||
676 | void hcall_tracepoint_regfunc(void) | ||
677 | { | ||
678 | hcall_tracepoint_refcount++; | ||
679 | } | ||
680 | |||
681 | void hcall_tracepoint_unregfunc(void) | ||
682 | { | ||
683 | hcall_tracepoint_refcount--; | ||
684 | } | ||
685 | |||
686 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | ||
687 | { | ||
688 | trace_hcall_entry(opcode, args); | ||
689 | } | ||
690 | |||
691 | void __trace_hcall_exit(long opcode, unsigned long retval, | ||
692 | unsigned long *retbuf) | ||
693 | { | ||
694 | trace_hcall_exit(opcode, retval, retbuf); | ||
695 | } | ||
696 | #endif | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 43c0acad7160..16c673096a22 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -95,6 +95,34 @@ config S390 | |||
95 | select HAVE_ARCH_TRACEHOOK | 95 | select HAVE_ARCH_TRACEHOOK |
96 | select INIT_ALL_POSSIBLE | 96 | select INIT_ALL_POSSIBLE |
97 | select HAVE_PERF_EVENTS | 97 | select HAVE_PERF_EVENTS |
98 | select ARCH_INLINE_SPIN_TRYLOCK | ||
99 | select ARCH_INLINE_SPIN_TRYLOCK_BH | ||
100 | select ARCH_INLINE_SPIN_LOCK | ||
101 | select ARCH_INLINE_SPIN_LOCK_BH | ||
102 | select ARCH_INLINE_SPIN_LOCK_IRQ | ||
103 | select ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
104 | select ARCH_INLINE_SPIN_UNLOCK | ||
105 | select ARCH_INLINE_SPIN_UNLOCK_BH | ||
106 | select ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
107 | select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
108 | select ARCH_INLINE_READ_TRYLOCK | ||
109 | select ARCH_INLINE_READ_LOCK | ||
110 | select ARCH_INLINE_READ_LOCK_BH | ||
111 | select ARCH_INLINE_READ_LOCK_IRQ | ||
112 | select ARCH_INLINE_READ_LOCK_IRQSAVE | ||
113 | select ARCH_INLINE_READ_UNLOCK | ||
114 | select ARCH_INLINE_READ_UNLOCK_BH | ||
115 | select ARCH_INLINE_READ_UNLOCK_IRQ | ||
116 | select ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
117 | select ARCH_INLINE_WRITE_TRYLOCK | ||
118 | select ARCH_INLINE_WRITE_LOCK | ||
119 | select ARCH_INLINE_WRITE_LOCK_BH | ||
120 | select ARCH_INLINE_WRITE_LOCK_IRQ | ||
121 | select ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
122 | select ARCH_INLINE_WRITE_UNLOCK | ||
123 | select ARCH_INLINE_WRITE_UNLOCK_BH | ||
124 | select ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
125 | select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
98 | 126 | ||
99 | config SCHED_OMIT_FRAME_POINTER | 127 | config SCHED_OMIT_FRAME_POINTER |
100 | bool | 128 | bool |
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 7efd0abe8887..efb74fd5156e 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | #define BUG() do { \ | 50 | #define BUG() do { \ |
51 | __EMIT_BUG(0); \ | 51 | __EMIT_BUG(0); \ |
52 | for (;;); \ | 52 | unreachable(); \ |
53 | } while (0) | 53 | } while (0) |
54 | 54 | ||
55 | #define WARN_ON(x) ({ \ | 55 | #define WARN_ON(x) ({ \ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 41ce6861174e..c9af0d19c7ab 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #define __always_inline__spin_lock | ||
195 | #define __always_inline__read_lock | ||
196 | #define __always_inline__write_lock | ||
197 | #define __always_inline__spin_lock_bh | ||
198 | #define __always_inline__read_lock_bh | ||
199 | #define __always_inline__write_lock_bh | ||
200 | #define __always_inline__spin_lock_irq | ||
201 | #define __always_inline__read_lock_irq | ||
202 | #define __always_inline__write_lock_irq | ||
203 | #define __always_inline__spin_lock_irqsave | ||
204 | #define __always_inline__read_lock_irqsave | ||
205 | #define __always_inline__write_lock_irqsave | ||
206 | #define __always_inline__spin_trylock | ||
207 | #define __always_inline__read_trylock | ||
208 | #define __always_inline__write_trylock | ||
209 | #define __always_inline__spin_trylock_bh | ||
210 | #define __always_inline__spin_unlock | ||
211 | #define __always_inline__read_unlock | ||
212 | #define __always_inline__write_unlock | ||
213 | #define __always_inline__spin_unlock_bh | ||
214 | #define __always_inline__read_unlock_bh | ||
215 | #define __always_inline__write_unlock_bh | ||
216 | #define __always_inline__spin_unlock_irq | ||
217 | #define __always_inline__read_unlock_irq | ||
218 | #define __always_inline__write_unlock_irq | ||
219 | #define __always_inline__spin_unlock_irqrestore | ||
220 | #define __always_inline__read_unlock_irqrestore | ||
221 | #define __always_inline__write_unlock_irqrestore | ||
222 | |||
223 | #endif /* __ASM_SPINLOCK_H */ | 194 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index f5fe34dd821b..5a82bc68193e 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -203,73 +203,10 @@ out: | |||
203 | 203 | ||
204 | #ifdef CONFIG_FTRACE_SYSCALLS | 204 | #ifdef CONFIG_FTRACE_SYSCALLS |
205 | 205 | ||
206 | extern unsigned long __start_syscalls_metadata[]; | ||
207 | extern unsigned long __stop_syscalls_metadata[]; | ||
208 | extern unsigned int sys_call_table[]; | 206 | extern unsigned int sys_call_table[]; |
209 | 207 | ||
210 | static struct syscall_metadata **syscalls_metadata; | 208 | unsigned long __init arch_syscall_addr(int nr) |
211 | |||
212 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
213 | { | ||
214 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | ||
215 | return NULL; | ||
216 | |||
217 | return syscalls_metadata[nr]; | ||
218 | } | ||
219 | |||
220 | int syscall_name_to_nr(char *name) | ||
221 | { | ||
222 | int i; | ||
223 | |||
224 | if (!syscalls_metadata) | ||
225 | return -1; | ||
226 | for (i = 0; i < NR_syscalls; i++) | ||
227 | if (syscalls_metadata[i]) | ||
228 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
229 | return i; | ||
230 | return -1; | ||
231 | } | ||
232 | |||
233 | void set_syscall_enter_id(int num, int id) | ||
234 | { | ||
235 | syscalls_metadata[num]->enter_id = id; | ||
236 | } | ||
237 | |||
238 | void set_syscall_exit_id(int num, int id) | ||
239 | { | 209 | { |
240 | syscalls_metadata[num]->exit_id = id; | 210 | return (unsigned long)sys_call_table[nr]; |
241 | } | ||
242 | |||
243 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | ||
244 | { | ||
245 | struct syscall_metadata *start; | ||
246 | struct syscall_metadata *stop; | ||
247 | char str[KSYM_SYMBOL_LEN]; | ||
248 | |||
249 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
250 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
251 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | ||
252 | |||
253 | for ( ; start < stop; start++) { | ||
254 | if (start->name && !strcmp(start->name + 3, str + 3)) | ||
255 | return start; | ||
256 | } | ||
257 | return NULL; | ||
258 | } | ||
259 | |||
260 | static int __init arch_init_ftrace_syscalls(void) | ||
261 | { | ||
262 | struct syscall_metadata *meta; | ||
263 | int i; | ||
264 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, | ||
265 | GFP_KERNEL); | ||
266 | if (!syscalls_metadata) | ||
267 | return -ENOMEM; | ||
268 | for (i = 0; i < NR_syscalls; i++) { | ||
269 | meta = find_syscall_meta((unsigned long)sys_call_table[i]); | ||
270 | syscalls_metadata[i] = meta; | ||
271 | } | ||
272 | return 0; | ||
273 | } | 211 | } |
274 | arch_initcall(arch_init_ftrace_syscalls); | ||
275 | #endif | 212 | #endif |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 72ace9515a07..178084b4377c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -49,6 +49,7 @@ config X86 | |||
49 | select HAVE_KERNEL_GZIP | 49 | select HAVE_KERNEL_GZIP |
50 | select HAVE_KERNEL_BZIP2 | 50 | select HAVE_KERNEL_BZIP2 |
51 | select HAVE_KERNEL_LZMA | 51 | select HAVE_KERNEL_LZMA |
52 | select HAVE_HW_BREAKPOINT | ||
52 | select HAVE_ARCH_KMEMCHECK | 53 | select HAVE_ARCH_KMEMCHECK |
53 | 54 | ||
54 | config OUTPUT_FORMAT | 55 | config OUTPUT_FORMAT |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2649840d888f..5e99762eb5c2 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -406,7 +406,7 @@ config X86_CMPXCHG64 | |||
406 | # generates cmov. | 406 | # generates cmov. |
407 | config X86_CMOV | 407 | config X86_CMOV |
408 | def_bool y | 408 | def_bool y |
409 | depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM) | 409 | depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) |
410 | 410 | ||
411 | config X86_MINIMUM_CPU_FAMILY | 411 | config X86_MINIMUM_CPU_FAMILY |
412 | int | 412 | int |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index d105f29bb6bb..731318e5ac1d 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -186,6 +186,15 @@ config X86_DS_SELFTEST | |||
186 | config HAVE_MMIOTRACE_SUPPORT | 186 | config HAVE_MMIOTRACE_SUPPORT |
187 | def_bool y | 187 | def_bool y |
188 | 188 | ||
189 | config X86_DECODER_SELFTEST | ||
190 | bool "x86 instruction decoder selftest" | ||
191 | depends on DEBUG_KERNEL | ||
192 | ---help--- | ||
193 | Perform x86 instruction decoder selftests at build time. | ||
194 | This option is useful for checking the sanity of x86 instruction | ||
195 | decoder code. | ||
196 | If unsure, say "N". | ||
197 | |||
189 | # | 198 | # |
190 | # IO delay types: | 199 | # IO delay types: |
191 | # | 200 | # |
@@ -287,4 +296,18 @@ config OPTIMIZE_INLINING | |||
287 | 296 | ||
288 | If unsure, say N. | 297 | If unsure, say N. |
289 | 298 | ||
299 | config DEBUG_STRICT_USER_COPY_CHECKS | ||
300 | bool "Strict copy size checks" | ||
301 | depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING | ||
302 | ---help--- | ||
303 | Enabling this option turns a certain set of sanity checks for user | ||
304 | copy operations into compile time failures. | ||
305 | |||
306 | The copy_from_user() etc checks are there to help test if there | ||
307 | are sufficient security checks on the length argument of | ||
308 | the copy operation, by having gcc prove that the argument is | ||
309 | within bounds. | ||
310 | |||
311 | If unsure, or if you run an older (pre 4.4) gcc, say N. | ||
312 | |||
290 | endmenu | 313 | endmenu |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index d2d24c9ee64d..78b32be55e9e 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -155,6 +155,9 @@ all: bzImage | |||
155 | KBUILD_IMAGE := $(boot)/bzImage | 155 | KBUILD_IMAGE := $(boot)/bzImage |
156 | 156 | ||
157 | bzImage: vmlinux | 157 | bzImage: vmlinux |
158 | ifeq ($(CONFIG_X86_DECODER_SELFTEST),y) | ||
159 | $(Q)$(MAKE) $(build)=arch/x86/tools posttest | ||
160 | endif | ||
158 | $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) | 161 | $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) |
159 | $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot | 162 | $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot |
160 | $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ | 163 | $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ |
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index 30e9a264f69d..cbf0776dbec1 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu | |||
@@ -41,7 +41,7 @@ cflags-$(CONFIG_X86_ELAN) += -march=i486 | |||
41 | 41 | ||
42 | # Geode GX1 support | 42 | # Geode GX1 support |
43 | cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx | 43 | cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx |
44 | 44 | cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx) | |
45 | # add at the end to overwrite eventual tuning options from earlier | 45 | # add at the end to overwrite eventual tuning options from earlier |
46 | # cpu entries | 46 | # cpu entries |
47 | cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) | 47 | cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) |
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa5..9f828f87ca35 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -10,6 +10,7 @@ header-y += ptrace-abi.h | |||
10 | header-y += sigcontext32.h | 10 | header-y += sigcontext32.h |
11 | header-y += ucontext.h | 11 | header-y += ucontext.h |
12 | header-y += processor-flags.h | 12 | header-y += processor-flags.h |
13 | header-y += hw_breakpoint.h | ||
13 | 14 | ||
14 | unifdef-y += e820.h | 15 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 16 | unifdef-y += ist.h |
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index bb70e397aa84..7a15588e45d4 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/user.h> | 18 | #include <linux/user.h> |
19 | #include <linux/elfcore.h> | 19 | #include <linux/elfcore.h> |
20 | #include <asm/debugreg.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * fill in the user structure for an a.out core dump | 23 | * fill in the user structure for an a.out core dump |
@@ -32,14 +33,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
32 | >> PAGE_SHIFT; | 33 | >> PAGE_SHIFT; |
33 | dump->u_dsize -= dump->u_tsize; | 34 | dump->u_dsize -= dump->u_tsize; |
34 | dump->u_ssize = 0; | 35 | dump->u_ssize = 0; |
35 | dump->u_debugreg[0] = current->thread.debugreg0; | 36 | aout_dump_debugregs(dump); |
36 | dump->u_debugreg[1] = current->thread.debugreg1; | ||
37 | dump->u_debugreg[2] = current->thread.debugreg2; | ||
38 | dump->u_debugreg[3] = current->thread.debugreg3; | ||
39 | dump->u_debugreg[4] = 0; | ||
40 | dump->u_debugreg[5] = 0; | ||
41 | dump->u_debugreg[6] = current->thread.debugreg6; | ||
42 | dump->u_debugreg[7] = current->thread.debugreg7; | ||
43 | 37 | ||
44 | if (dump->start_stack < TASK_SIZE) | 38 | if (dump->start_stack < TASK_SIZE) |
45 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) | 39 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e2077d343c33..b97f786a48d5 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,17 +1,13 @@ | |||
1 | #ifdef __ASSEMBLY__ | 1 | #ifdef __ASSEMBLY__ |
2 | 2 | ||
3 | #ifdef CONFIG_X86_32 | 3 | #include <asm/asm.h> |
4 | # define X86_ALIGN .long | ||
5 | #else | ||
6 | # define X86_ALIGN .quad | ||
7 | #endif | ||
8 | 4 | ||
9 | #ifdef CONFIG_SMP | 5 | #ifdef CONFIG_SMP |
10 | .macro LOCK_PREFIX | 6 | .macro LOCK_PREFIX |
11 | 1: lock | 7 | 1: lock |
12 | .section .smp_locks,"a" | 8 | .section .smp_locks,"a" |
13 | .align 4 | 9 | _ASM_ALIGN |
14 | X86_ALIGN 1b | 10 | _ASM_PTR 1b |
15 | .previous | 11 | .previous |
16 | .endm | 12 | .endm |
17 | #else | 13 | #else |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index c240efc74e00..69b74a7b877f 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -84,6 +84,7 @@ static inline void alternatives_smp_switch(int smp) {} | |||
84 | " .byte " __stringify(feature) "\n" /* feature bit */ \ | 84 | " .byte " __stringify(feature) "\n" /* feature bit */ \ |
85 | " .byte 662b-661b\n" /* sourcelen */ \ | 85 | " .byte 662b-661b\n" /* sourcelen */ \ |
86 | " .byte 664f-663f\n" /* replacementlen */ \ | 86 | " .byte 664f-663f\n" /* replacementlen */ \ |
87 | " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ | ||
87 | ".previous\n" \ | 88 | ".previous\n" \ |
88 | ".section .altinstr_replacement, \"ax\"\n" \ | 89 | ".section .altinstr_replacement, \"ax\"\n" \ |
89 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | 90 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 4b180897e6b5..5af2982133b5 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -23,19 +23,13 @@ | |||
23 | #include <linux/irqreturn.h> | 23 | #include <linux/irqreturn.h> |
24 | 24 | ||
25 | #ifdef CONFIG_AMD_IOMMU | 25 | #ifdef CONFIG_AMD_IOMMU |
26 | extern int amd_iommu_init(void); | 26 | |
27 | extern int amd_iommu_init_dma_ops(void); | ||
28 | extern int amd_iommu_init_passthrough(void); | ||
29 | extern void amd_iommu_detect(void); | 27 | extern void amd_iommu_detect(void); |
30 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 28 | |
31 | extern void amd_iommu_flush_all_domains(void); | ||
32 | extern void amd_iommu_flush_all_devices(void); | ||
33 | extern void amd_iommu_shutdown(void); | ||
34 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
35 | #else | 29 | #else |
36 | static inline int amd_iommu_init(void) { return -ENODEV; } | 30 | |
37 | static inline void amd_iommu_detect(void) { } | 31 | static inline void amd_iommu_detect(void) { } |
38 | static inline void amd_iommu_shutdown(void) { } | 32 | |
39 | #endif | 33 | #endif |
40 | 34 | ||
41 | #endif /* _ASM_X86_AMD_IOMMU_H */ | 35 | #endif /* _ASM_X86_AMD_IOMMU_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h new file mode 100644 index 000000000000..84786fb9a23b --- /dev/null +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | ||
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | ||
21 | |||
22 | struct amd_iommu; | ||
23 | |||
24 | extern int amd_iommu_init_dma_ops(void); | ||
25 | extern int amd_iommu_init_passthrough(void); | ||
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | ||
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
31 | |||
32 | #ifndef CONFIG_AMD_IOMMU_STATS | ||
33 | |||
34 | static inline void amd_iommu_stats_init(void) { } | ||
35 | |||
36 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | ||
37 | |||
38 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | ||
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 2a2cc7a78a81..ba19ad4c47d0 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -25,6 +25,11 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Maximum number of IOMMUs supported | ||
29 | */ | ||
30 | #define MAX_IOMMUS 32 | ||
31 | |||
32 | /* | ||
28 | * some size calculation constants | 33 | * some size calculation constants |
29 | */ | 34 | */ |
30 | #define DEV_TABLE_ENTRY_SIZE 32 | 35 | #define DEV_TABLE_ENTRY_SIZE 32 |
@@ -206,6 +211,9 @@ extern bool amd_iommu_dump; | |||
206 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ | 211 | printk(KERN_INFO "AMD-Vi: " format, ## arg); \ |
207 | } while(0); | 212 | } while(0); |
208 | 213 | ||
214 | /* global flag if IOMMUs cache non-present entries */ | ||
215 | extern bool amd_iommu_np_cache; | ||
216 | |||
209 | /* | 217 | /* |
210 | * Make iterating over all IOMMUs easier | 218 | * Make iterating over all IOMMUs easier |
211 | */ | 219 | */ |
@@ -226,6 +234,8 @@ extern bool amd_iommu_dump; | |||
226 | * independent of their use. | 234 | * independent of their use. |
227 | */ | 235 | */ |
228 | struct protection_domain { | 236 | struct protection_domain { |
237 | struct list_head list; /* for list of all protection domains */ | ||
238 | struct list_head dev_list; /* List of all devices in this domain */ | ||
229 | spinlock_t lock; /* mostly used to lock the page table*/ | 239 | spinlock_t lock; /* mostly used to lock the page table*/ |
230 | u16 id; /* the domain id written to the device table */ | 240 | u16 id; /* the domain id written to the device table */ |
231 | int mode; /* paging mode (0-6 levels) */ | 241 | int mode; /* paging mode (0-6 levels) */ |
@@ -233,7 +243,20 @@ struct protection_domain { | |||
233 | unsigned long flags; /* flags to find out type of domain */ | 243 | unsigned long flags; /* flags to find out type of domain */ |
234 | bool updated; /* complete domain flush required */ | 244 | bool updated; /* complete domain flush required */ |
235 | unsigned dev_cnt; /* devices assigned to this domain */ | 245 | unsigned dev_cnt; /* devices assigned to this domain */ |
246 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ | ||
236 | void *priv; /* private data */ | 247 | void *priv; /* private data */ |
248 | |||
249 | }; | ||
250 | |||
251 | /* | ||
252 | * This struct contains device specific data for the IOMMU | ||
253 | */ | ||
254 | struct iommu_dev_data { | ||
255 | struct list_head list; /* For domain->dev_list */ | ||
256 | struct device *dev; /* Device this data belong to */ | ||
257 | struct device *alias; /* The Alias Device */ | ||
258 | struct protection_domain *domain; /* Domain the device is bound to */ | ||
259 | atomic_t bind; /* Domain attach reverent count */ | ||
237 | }; | 260 | }; |
238 | 261 | ||
239 | /* | 262 | /* |
@@ -291,6 +314,9 @@ struct dma_ops_domain { | |||
291 | struct amd_iommu { | 314 | struct amd_iommu { |
292 | struct list_head list; | 315 | struct list_head list; |
293 | 316 | ||
317 | /* Index within the IOMMU array */ | ||
318 | int index; | ||
319 | |||
294 | /* locks the accesses to the hardware */ | 320 | /* locks the accesses to the hardware */ |
295 | spinlock_t lock; | 321 | spinlock_t lock; |
296 | 322 | ||
@@ -357,6 +383,21 @@ struct amd_iommu { | |||
357 | extern struct list_head amd_iommu_list; | 383 | extern struct list_head amd_iommu_list; |
358 | 384 | ||
359 | /* | 385 | /* |
386 | * Array with pointers to each IOMMU struct | ||
387 | * The indices are referenced in the protection domains | ||
388 | */ | ||
389 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; | ||
390 | |||
391 | /* Number of IOMMUs present in the system */ | ||
392 | extern int amd_iommus_present; | ||
393 | |||
394 | /* | ||
395 | * Declarations for the global list of all protection domains | ||
396 | */ | ||
397 | extern spinlock_t amd_iommu_pd_lock; | ||
398 | extern struct list_head amd_iommu_pd_list; | ||
399 | |||
400 | /* | ||
360 | * Structure defining one entry in the device table | 401 | * Structure defining one entry in the device table |
361 | */ | 402 | */ |
362 | struct dev_table_entry { | 403 | struct dev_table_entry { |
@@ -416,15 +457,9 @@ extern unsigned amd_iommu_aperture_order; | |||
416 | /* largest PCI device id we expect translation requests for */ | 457 | /* largest PCI device id we expect translation requests for */ |
417 | extern u16 amd_iommu_last_bdf; | 458 | extern u16 amd_iommu_last_bdf; |
418 | 459 | ||
419 | /* data structures for protection domain handling */ | ||
420 | extern struct protection_domain **amd_iommu_pd_table; | ||
421 | |||
422 | /* allocation bitmap for domain ids */ | 460 | /* allocation bitmap for domain ids */ |
423 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 461 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
424 | 462 | ||
425 | /* will be 1 if device isolation is enabled */ | ||
426 | extern bool amd_iommu_isolate; | ||
427 | |||
428 | /* | 463 | /* |
429 | * If true, the addresses will be flushed on unmap time, not when | 464 | * If true, the addresses will be flushed on unmap time, not when |
430 | * they are reused | 465 | * they are reused |
@@ -462,11 +497,6 @@ struct __iommu_counter { | |||
462 | #define ADD_STATS_COUNTER(name, x) | 497 | #define ADD_STATS_COUNTER(name, x) |
463 | #define SUB_STATS_COUNTER(name, x) | 498 | #define SUB_STATS_COUNTER(name, x) |
464 | 499 | ||
465 | static inline void amd_iommu_stats_init(void) { } | ||
466 | |||
467 | #endif /* CONFIG_AMD_IOMMU_STATS */ | 500 | #endif /* CONFIG_AMD_IOMMU_STATS */ |
468 | 501 | ||
469 | /* some function prototypes */ | ||
470 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | ||
471 | |||
472 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 502 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 474d80d3e6cc..b4ac2cdcb64f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -297,20 +297,20 @@ struct apic { | |||
297 | int disable_esr; | 297 | int disable_esr; |
298 | 298 | ||
299 | int dest_logical; | 299 | int dest_logical; |
300 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 300 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); |
301 | unsigned long (*check_apicid_present)(int apicid); | 301 | unsigned long (*check_apicid_present)(int apicid); |
302 | 302 | ||
303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | 303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
304 | void (*init_apic_ldr)(void); | 304 | void (*init_apic_ldr)(void); |
305 | 305 | ||
306 | physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); | 306 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); |
307 | 307 | ||
308 | void (*setup_apic_routing)(void); | 308 | void (*setup_apic_routing)(void); |
309 | int (*multi_timer_check)(int apic, int irq); | 309 | int (*multi_timer_check)(int apic, int irq); |
310 | int (*apicid_to_node)(int logical_apicid); | 310 | int (*apicid_to_node)(int logical_apicid); |
311 | int (*cpu_to_logical_apicid)(int cpu); | 311 | int (*cpu_to_logical_apicid)(int cpu); |
312 | int (*cpu_present_to_apicid)(int mps_cpu); | 312 | int (*cpu_present_to_apicid)(int mps_cpu); |
313 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 313 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); |
314 | void (*setup_portio_remap)(void); | 314 | void (*setup_portio_remap)(void); |
315 | int (*check_phys_apicid_present)(int phys_apicid); | 315 | int (*check_phys_apicid_present)(int phys_apicid); |
316 | void (*enable_apic_mode)(void); | 316 | void (*enable_apic_mode)(void); |
@@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void) | |||
488 | 488 | ||
489 | extern void default_setup_apic_routing(void); | 489 | extern void default_setup_apic_routing(void); |
490 | 490 | ||
491 | extern struct apic apic_noop; | ||
492 | |||
491 | #ifdef CONFIG_X86_32 | 493 | #ifdef CONFIG_X86_32 |
492 | 494 | ||
493 | extern struct apic apic_default; | 495 | extern struct apic apic_default; |
@@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
532 | return (unsigned int)(mask1 & mask2 & mask3); | 534 | return (unsigned int)(mask1 & mask2 & mask3); |
533 | } | 535 | } |
534 | 536 | ||
535 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) | 537 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) |
536 | { | 538 | { |
537 | return physid_isset(apicid, bitmap); | 539 | return physid_isset(apicid, *map); |
538 | } | 540 | } |
539 | 541 | ||
540 | static inline unsigned long default_check_apicid_present(int bit) | 542 | static inline unsigned long default_check_apicid_present(int bit) |
@@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit) | |||
542 | return physid_isset(bit, phys_cpu_present_map); | 544 | return physid_isset(bit, phys_cpu_present_map); |
543 | } | 545 | } |
544 | 546 | ||
545 | static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) | 547 | static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
546 | { | 548 | { |
547 | return phys_map; | 549 | *retmap = *phys_map; |
548 | } | 550 | } |
549 | 551 | ||
550 | /* Mapping from cpu number to logical apicid */ | 552 | /* Mapping from cpu number to logical apicid */ |
@@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu); | |||
583 | extern int default_check_phys_apicid_present(int phys_apicid); | 585 | extern int default_check_phys_apicid_present(int phys_apicid); |
584 | #endif | 586 | #endif |
585 | 587 | ||
586 | static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) | ||
587 | { | ||
588 | return physid_mask_of_physid(phys_apicid); | ||
589 | } | ||
590 | |||
591 | #endif /* CONFIG_X86_LOCAL_APIC */ | 588 | #endif /* CONFIG_X86_LOCAL_APIC */ |
592 | 589 | ||
593 | #ifdef CONFIG_X86_32 | 590 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 3b62da926de9..7fe3b3060f08 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
13 | 13 | ||
14 | /* | ||
15 | * This is the IO-APIC register space as specified | ||
16 | * by Intel docs: | ||
17 | */ | ||
18 | #define IO_APIC_SLOT_SIZE 1024 | ||
19 | |||
14 | #define APIC_ID 0x20 | 20 | #define APIC_ID 0x20 |
15 | 21 | ||
16 | #define APIC_LVR 0x30 | 22 | #define APIC_LVR 0x30 |
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h deleted file mode 100644 index 82f613c607ce..000000000000 --- a/arch/x86/include/asm/apicnum.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef _ASM_X86_APICNUM_H | ||
2 | #define _ASM_X86_APICNUM_H | ||
3 | |||
4 | /* define MAX_IO_APICS */ | ||
5 | #ifdef CONFIG_X86_32 | ||
6 | # define MAX_IO_APICS 64 | ||
7 | #else | ||
8 | # define MAX_IO_APICS 128 | ||
9 | # define MAX_LOCAL_APIC 32768 | ||
10 | #endif | ||
11 | |||
12 | #endif /* _ASM_X86_APICNUM_H */ | ||
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index d9cf1cd156d2..f654d1bb17fb 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h | |||
@@ -22,14 +22,14 @@ do { \ | |||
22 | ".popsection" \ | 22 | ".popsection" \ |
23 | : : "i" (__FILE__), "i" (__LINE__), \ | 23 | : : "i" (__FILE__), "i" (__LINE__), \ |
24 | "i" (sizeof(struct bug_entry))); \ | 24 | "i" (sizeof(struct bug_entry))); \ |
25 | for (;;) ; \ | 25 | unreachable(); \ |
26 | } while (0) | 26 | } while (0) |
27 | 27 | ||
28 | #else | 28 | #else |
29 | #define BUG() \ | 29 | #define BUG() \ |
30 | do { \ | 30 | do { \ |
31 | asm volatile("ud2"); \ | 31 | asm volatile("ud2"); \ |
32 | for (;;) ; \ | 32 | unreachable(); \ |
33 | } while (0) | 33 | } while (0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h index b03bedb62aa7..0918654305af 100644 --- a/arch/x86/include/asm/calgary.h +++ b/arch/x86/include/asm/calgary.h | |||
@@ -62,10 +62,8 @@ struct cal_chipset_ops { | |||
62 | extern int use_calgary; | 62 | extern int use_calgary; |
63 | 63 | ||
64 | #ifdef CONFIG_CALGARY_IOMMU | 64 | #ifdef CONFIG_CALGARY_IOMMU |
65 | extern int calgary_iommu_init(void); | ||
66 | extern void detect_calgary(void); | 65 | extern void detect_calgary(void); |
67 | #else | 66 | #else |
68 | static inline int calgary_iommu_init(void) { return 1; } | ||
69 | static inline void detect_calgary(void) { return; } | 67 | static inline void detect_calgary(void) { return; } |
70 | #endif | 68 | #endif |
71 | 69 | ||
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index ee1931be6593..ffb9bb6b6c37 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -8,14 +8,50 @@ | |||
8 | * you need to test for the feature in boot_cpu_data. | 8 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define xchg(ptr, v) \ | 11 | extern void __xchg_wrong_size(void); |
12 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | 12 | |
13 | /* | ||
14 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
15 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
16 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
17 | */ | ||
13 | 18 | ||
14 | struct __xchg_dummy { | 19 | struct __xchg_dummy { |
15 | unsigned long a[100]; | 20 | unsigned long a[100]; |
16 | }; | 21 | }; |
17 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 22 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
18 | 23 | ||
24 | #define __xchg(x, ptr, size) \ | ||
25 | ({ \ | ||
26 | __typeof(*(ptr)) __x = (x); \ | ||
27 | switch (size) { \ | ||
28 | case 1: \ | ||
29 | asm volatile("xchgb %b0,%1" \ | ||
30 | : "=q" (__x) \ | ||
31 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
32 | : "memory"); \ | ||
33 | break; \ | ||
34 | case 2: \ | ||
35 | asm volatile("xchgw %w0,%1" \ | ||
36 | : "=r" (__x) \ | ||
37 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
38 | : "memory"); \ | ||
39 | break; \ | ||
40 | case 4: \ | ||
41 | asm volatile("xchgl %0,%1" \ | ||
42 | : "=r" (__x) \ | ||
43 | : "m" (*__xg(ptr)), "0" (__x) \ | ||
44 | : "memory"); \ | ||
45 | break; \ | ||
46 | default: \ | ||
47 | __xchg_wrong_size(); \ | ||
48 | } \ | ||
49 | __x; \ | ||
50 | }) | ||
51 | |||
52 | #define xchg(ptr, v) \ | ||
53 | __xchg((v), (ptr), sizeof(*ptr)) | ||
54 | |||
19 | /* | 55 | /* |
20 | * The semantics of XCHGCMP8B are a bit strange, this is why | 56 | * The semantics of XCHGCMP8B are a bit strange, this is why |
21 | * there is a loop and the loading of %%eax and %%edx has to | 57 | * there is a loop and the loading of %%eax and %%edx has to |
@@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr, | |||
71 | (unsigned int)((value) >> 32)) \ | 107 | (unsigned int)((value) >> 32)) \ |
72 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | 108 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
73 | 109 | ||
74 | /* | 110 | extern void __cmpxchg_wrong_size(void); |
75 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
76 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
77 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
78 | */ | ||
79 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||
80 | int size) | ||
81 | { | ||
82 | switch (size) { | ||
83 | case 1: | ||
84 | asm volatile("xchgb %b0,%1" | ||
85 | : "=q" (x) | ||
86 | : "m" (*__xg(ptr)), "0" (x) | ||
87 | : "memory"); | ||
88 | break; | ||
89 | case 2: | ||
90 | asm volatile("xchgw %w0,%1" | ||
91 | : "=r" (x) | ||
92 | : "m" (*__xg(ptr)), "0" (x) | ||
93 | : "memory"); | ||
94 | break; | ||
95 | case 4: | ||
96 | asm volatile("xchgl %0,%1" | ||
97 | : "=r" (x) | ||
98 | : "m" (*__xg(ptr)), "0" (x) | ||
99 | : "memory"); | ||
100 | break; | ||
101 | } | ||
102 | return x; | ||
103 | } | ||
104 | 111 | ||
105 | /* | 112 | /* |
106 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 113 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
107 | * store NEW in MEM. Return the initial value in MEM. Success is | 114 | * store NEW in MEM. Return the initial value in MEM. Success is |
108 | * indicated by comparing RETURN with OLD. | 115 | * indicated by comparing RETURN with OLD. |
109 | */ | 116 | */ |
117 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
118 | ({ \ | ||
119 | __typeof__(*(ptr)) __ret; \ | ||
120 | __typeof__(*(ptr)) __old = (old); \ | ||
121 | __typeof__(*(ptr)) __new = (new); \ | ||
122 | switch (size) { \ | ||
123 | case 1: \ | ||
124 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
125 | : "=a"(__ret) \ | ||
126 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
127 | : "memory"); \ | ||
128 | break; \ | ||
129 | case 2: \ | ||
130 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
131 | : "=a"(__ret) \ | ||
132 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
133 | : "memory"); \ | ||
134 | break; \ | ||
135 | case 4: \ | ||
136 | asm volatile(lock "cmpxchgl %1,%2" \ | ||
137 | : "=a"(__ret) \ | ||
138 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
139 | : "memory"); \ | ||
140 | break; \ | ||
141 | default: \ | ||
142 | __cmpxchg_wrong_size(); \ | ||
143 | } \ | ||
144 | __ret; \ | ||
145 | }) | ||
146 | |||
147 | #define __cmpxchg(ptr, old, new, size) \ | ||
148 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
149 | |||
150 | #define __sync_cmpxchg(ptr, old, new, size) \ | ||
151 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") | ||
152 | |||
153 | #define __cmpxchg_local(ptr, old, new, size) \ | ||
154 | __raw_cmpxchg((ptr), (old), (new), (size), "") | ||
110 | 155 | ||
111 | #ifdef CONFIG_X86_CMPXCHG | 156 | #ifdef CONFIG_X86_CMPXCHG |
112 | #define __HAVE_ARCH_CMPXCHG 1 | 157 | #define __HAVE_ARCH_CMPXCHG 1 |
113 | #define cmpxchg(ptr, o, n) \ | 158 | |
114 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 159 | #define cmpxchg(ptr, old, new) \ |
115 | (unsigned long)(n), \ | 160 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
116 | sizeof(*(ptr)))) | 161 | |
117 | #define sync_cmpxchg(ptr, o, n) \ | 162 | #define sync_cmpxchg(ptr, old, new) \ |
118 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | 163 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
119 | (unsigned long)(n), \ | 164 | |
120 | sizeof(*(ptr)))) | 165 | #define cmpxchg_local(ptr, old, new) \ |
121 | #define cmpxchg_local(ptr, o, n) \ | 166 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
122 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
123 | (unsigned long)(n), \ | ||
124 | sizeof(*(ptr)))) | ||
125 | #endif | 167 | #endif |
126 | 168 | ||
127 | #ifdef CONFIG_X86_CMPXCHG64 | 169 | #ifdef CONFIG_X86_CMPXCHG64 |
@@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
133 | (unsigned long long)(n))) | 175 | (unsigned long long)(n))) |
134 | #endif | 176 | #endif |
135 | 177 | ||
136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
137 | unsigned long new, int size) | ||
138 | { | ||
139 | unsigned long prev; | ||
140 | switch (size) { | ||
141 | case 1: | ||
142 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 2: | ||
148 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 4: | ||
154 | asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Always use locked operations when touching memory shared with a | ||
165 | * hypervisor, since the system may be SMP even if the guest kernel | ||
166 | * isn't. | ||
167 | */ | ||
168 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
169 | unsigned long old, | ||
170 | unsigned long new, int size) | ||
171 | { | ||
172 | unsigned long prev; | ||
173 | switch (size) { | ||
174 | case 1: | ||
175 | asm volatile("lock; cmpxchgb %b1,%2" | ||
176 | : "=a"(prev) | ||
177 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
178 | : "memory"); | ||
179 | return prev; | ||
180 | case 2: | ||
181 | asm volatile("lock; cmpxchgw %w1,%2" | ||
182 | : "=a"(prev) | ||
183 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
184 | : "memory"); | ||
185 | return prev; | ||
186 | case 4: | ||
187 | asm volatile("lock; cmpxchgl %1,%2" | ||
188 | : "=a"(prev) | ||
189 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
190 | : "memory"); | ||
191 | return prev; | ||
192 | } | ||
193 | return old; | ||
194 | } | ||
195 | |||
196 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
197 | unsigned long old, | ||
198 | unsigned long new, int size) | ||
199 | { | ||
200 | unsigned long prev; | ||
201 | switch (size) { | ||
202 | case 1: | ||
203 | asm volatile("cmpxchgb %b1,%2" | ||
204 | : "=a"(prev) | ||
205 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
206 | : "memory"); | ||
207 | return prev; | ||
208 | case 2: | ||
209 | asm volatile("cmpxchgw %w1,%2" | ||
210 | : "=a"(prev) | ||
211 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
212 | : "memory"); | ||
213 | return prev; | ||
214 | case 4: | ||
215 | asm volatile("cmpxchgl %1,%2" | ||
216 | : "=a"(prev) | ||
217 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
218 | : "memory"); | ||
219 | return prev; | ||
220 | } | ||
221 | return old; | ||
222 | } | ||
223 | |||
224 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | 178 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
225 | unsigned long long old, | 179 | unsigned long long old, |
226 | unsigned long long new) | 180 | unsigned long long new) |
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 52de72e0de8c..485ae415faec 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h | |||
@@ -3,9 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
6 | #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ | ||
7 | (ptr), sizeof(*(ptr)))) | ||
8 | |||
9 | #define __xg(x) ((volatile long *)(x)) | 6 | #define __xg(x) ((volatile long *)(x)) |
10 | 7 | ||
11 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | 8 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
@@ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | |||
15 | 12 | ||
16 | #define _set_64bit set_64bit | 13 | #define _set_64bit set_64bit |
17 | 14 | ||
15 | extern void __xchg_wrong_size(void); | ||
16 | extern void __cmpxchg_wrong_size(void); | ||
17 | |||
18 | /* | 18 | /* |
19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 19 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
21 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 21 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
22 | */ | 22 | */ |
23 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 23 | #define __xchg(x, ptr, size) \ |
24 | int size) | 24 | ({ \ |
25 | { | 25 | __typeof(*(ptr)) __x = (x); \ |
26 | switch (size) { | 26 | switch (size) { \ |
27 | case 1: | 27 | case 1: \ |
28 | asm volatile("xchgb %b0,%1" | 28 | asm volatile("xchgb %b0,%1" \ |
29 | : "=q" (x) | 29 | : "=q" (__x) \ |
30 | : "m" (*__xg(ptr)), "0" (x) | 30 | : "m" (*__xg(ptr)), "0" (__x) \ |
31 | : "memory"); | 31 | : "memory"); \ |
32 | break; | 32 | break; \ |
33 | case 2: | 33 | case 2: \ |
34 | asm volatile("xchgw %w0,%1" | 34 | asm volatile("xchgw %w0,%1" \ |
35 | : "=r" (x) | 35 | : "=r" (__x) \ |
36 | : "m" (*__xg(ptr)), "0" (x) | 36 | : "m" (*__xg(ptr)), "0" (__x) \ |
37 | : "memory"); | 37 | : "memory"); \ |
38 | break; | 38 | break; \ |
39 | case 4: | 39 | case 4: \ |
40 | asm volatile("xchgl %k0,%1" | 40 | asm volatile("xchgl %k0,%1" \ |
41 | : "=r" (x) | 41 | : "=r" (__x) \ |
42 | : "m" (*__xg(ptr)), "0" (x) | 42 | : "m" (*__xg(ptr)), "0" (__x) \ |
43 | : "memory"); | 43 | : "memory"); \ |
44 | break; | 44 | break; \ |
45 | case 8: | 45 | case 8: \ |
46 | asm volatile("xchgq %0,%1" | 46 | asm volatile("xchgq %0,%1" \ |
47 | : "=r" (x) | 47 | : "=r" (__x) \ |
48 | : "m" (*__xg(ptr)), "0" (x) | 48 | : "m" (*__xg(ptr)), "0" (__x) \ |
49 | : "memory"); | 49 | : "memory"); \ |
50 | break; | 50 | break; \ |
51 | } | 51 | default: \ |
52 | return x; | 52 | __xchg_wrong_size(); \ |
53 | } | 53 | } \ |
54 | __x; \ | ||
55 | }) | ||
56 | |||
57 | #define xchg(ptr, v) \ | ||
58 | __xchg((v), (ptr), sizeof(*ptr)) | ||
59 | |||
60 | #define __HAVE_ARCH_CMPXCHG 1 | ||
54 | 61 | ||
55 | /* | 62 | /* |
56 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 63 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
57 | * store NEW in MEM. Return the initial value in MEM. Success is | 64 | * store NEW in MEM. Return the initial value in MEM. Success is |
58 | * indicated by comparing RETURN with OLD. | 65 | * indicated by comparing RETURN with OLD. |
59 | */ | 66 | */ |
67 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ | ||
68 | ({ \ | ||
69 | __typeof__(*(ptr)) __ret; \ | ||
70 | __typeof__(*(ptr)) __old = (old); \ | ||
71 | __typeof__(*(ptr)) __new = (new); \ | ||
72 | switch (size) { \ | ||
73 | case 1: \ | ||
74 | asm volatile(lock "cmpxchgb %b1,%2" \ | ||
75 | : "=a"(__ret) \ | ||
76 | : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
77 | : "memory"); \ | ||
78 | break; \ | ||
79 | case 2: \ | ||
80 | asm volatile(lock "cmpxchgw %w1,%2" \ | ||
81 | : "=a"(__ret) \ | ||
82 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
83 | : "memory"); \ | ||
84 | break; \ | ||
85 | case 4: \ | ||
86 | asm volatile(lock "cmpxchgl %k1,%2" \ | ||
87 | : "=a"(__ret) \ | ||
88 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
89 | : "memory"); \ | ||
90 | break; \ | ||
91 | case 8: \ | ||
92 | asm volatile(lock "cmpxchgq %1,%2" \ | ||
93 | : "=a"(__ret) \ | ||
94 | : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ | ||
95 | : "memory"); \ | ||
96 | break; \ | ||
97 | default: \ | ||
98 | __cmpxchg_wrong_size(); \ | ||
99 | } \ | ||
100 | __ret; \ | ||
101 | }) | ||
60 | 102 | ||
61 | #define __HAVE_ARCH_CMPXCHG 1 | 103 | #define __cmpxchg(ptr, old, new, size) \ |
104 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) | ||
62 | 105 | ||
63 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 106 | #define __sync_cmpxchg(ptr, old, new, size) \ |
64 | unsigned long new, int size) | 107 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
65 | { | ||
66 | unsigned long prev; | ||
67 | switch (size) { | ||
68 | case 1: | ||
69 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
70 | : "=a"(prev) | ||
71 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
72 | : "memory"); | ||
73 | return prev; | ||
74 | case 2: | ||
75 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
76 | : "=a"(prev) | ||
77 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
78 | : "memory"); | ||
79 | return prev; | ||
80 | case 4: | ||
81 | asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" | ||
82 | : "=a"(prev) | ||
83 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
84 | : "memory"); | ||
85 | return prev; | ||
86 | case 8: | ||
87 | asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" | ||
88 | : "=a"(prev) | ||
89 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
90 | : "memory"); | ||
91 | return prev; | ||
92 | } | ||
93 | return old; | ||
94 | } | ||
95 | 108 | ||
96 | /* | 109 | #define __cmpxchg_local(ptr, old, new, size) \ |
97 | * Always use locked operations when touching memory shared with a | 110 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
98 | * hypervisor, since the system may be SMP even if the guest kernel | ||
99 | * isn't. | ||
100 | */ | ||
101 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
102 | unsigned long old, | ||
103 | unsigned long new, int size) | ||
104 | { | ||
105 | unsigned long prev; | ||
106 | switch (size) { | ||
107 | case 1: | ||
108 | asm volatile("lock; cmpxchgb %b1,%2" | ||
109 | : "=a"(prev) | ||
110 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
111 | : "memory"); | ||
112 | return prev; | ||
113 | case 2: | ||
114 | asm volatile("lock; cmpxchgw %w1,%2" | ||
115 | : "=a"(prev) | ||
116 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
117 | : "memory"); | ||
118 | return prev; | ||
119 | case 4: | ||
120 | asm volatile("lock; cmpxchgl %1,%2" | ||
121 | : "=a"(prev) | ||
122 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
123 | : "memory"); | ||
124 | return prev; | ||
125 | } | ||
126 | return old; | ||
127 | } | ||
128 | 111 | ||
129 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 112 | #define cmpxchg(ptr, old, new) \ |
130 | unsigned long old, | 113 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
131 | unsigned long new, int size) | 114 | |
132 | { | 115 | #define sync_cmpxchg(ptr, old, new) \ |
133 | unsigned long prev; | 116 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
134 | switch (size) { | 117 | |
135 | case 1: | 118 | #define cmpxchg_local(ptr, old, new) \ |
136 | asm volatile("cmpxchgb %b1,%2" | 119 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
137 | : "=a"(prev) | ||
138 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
139 | : "memory"); | ||
140 | return prev; | ||
141 | case 2: | ||
142 | asm volatile("cmpxchgw %w1,%2" | ||
143 | : "=a"(prev) | ||
144 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
145 | : "memory"); | ||
146 | return prev; | ||
147 | case 4: | ||
148 | asm volatile("cmpxchgl %k1,%2" | ||
149 | : "=a"(prev) | ||
150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
151 | : "memory"); | ||
152 | return prev; | ||
153 | case 8: | ||
154 | asm volatile("cmpxchgq %1,%2" | ||
155 | : "=a"(prev) | ||
156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
157 | : "memory"); | ||
158 | return prev; | ||
159 | } | ||
160 | return old; | ||
161 | } | ||
162 | 120 | ||
163 | #define cmpxchg(ptr, o, n) \ | ||
164 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
165 | (unsigned long)(n), sizeof(*(ptr)))) | ||
166 | #define cmpxchg64(ptr, o, n) \ | 121 | #define cmpxchg64(ptr, o, n) \ |
167 | ({ \ | 122 | ({ \ |
168 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 123 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
169 | cmpxchg((ptr), (o), (n)); \ | 124 | cmpxchg((ptr), (o), (n)); \ |
170 | }) | 125 | }) |
171 | #define cmpxchg_local(ptr, o, n) \ | 126 | |
172 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
173 | (unsigned long)(n), \ | ||
174 | sizeof(*(ptr)))) | ||
175 | #define sync_cmpxchg(ptr, o, n) \ | ||
176 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | ||
177 | (unsigned long)(n), \ | ||
178 | sizeof(*(ptr)))) | ||
179 | #define cmpxchg64_local(ptr, o, n) \ | 127 | #define cmpxchg64_local(ptr, o, n) \ |
180 | ({ \ | 128 | ({ \ |
181 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 129 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 3ea6f37be9e2..8240f76b531e 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define DR_TRAP1 (0x2) /* db1 */ | 18 | #define DR_TRAP1 (0x2) /* db1 */ |
19 | #define DR_TRAP2 (0x4) /* db2 */ | 19 | #define DR_TRAP2 (0x4) /* db2 */ |
20 | #define DR_TRAP3 (0x8) /* db3 */ | 20 | #define DR_TRAP3 (0x8) /* db3 */ |
21 | #define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) | ||
21 | 22 | ||
22 | #define DR_STEP (0x4000) /* single-step */ | 23 | #define DR_STEP (0x4000) /* single-step */ |
23 | #define DR_SWITCH (0x8000) /* task switch */ | 24 | #define DR_SWITCH (0x8000) /* task switch */ |
@@ -49,6 +50,8 @@ | |||
49 | 50 | ||
50 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ | 51 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ |
51 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ | 52 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ |
53 | #define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */ | ||
54 | #define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */ | ||
52 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ | 55 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ |
53 | 56 | ||
54 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ | 57 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ |
@@ -67,4 +70,34 @@ | |||
67 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ | 70 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ |
68 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ | 71 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ |
69 | 72 | ||
73 | /* | ||
74 | * HW breakpoint additions | ||
75 | */ | ||
76 | #ifdef __KERNEL__ | ||
77 | |||
78 | DECLARE_PER_CPU(unsigned long, cpu_dr7); | ||
79 | |||
80 | static inline void hw_breakpoint_disable(void) | ||
81 | { | ||
82 | /* Zero the control register for HW Breakpoint */ | ||
83 | set_debugreg(0UL, 7); | ||
84 | |||
85 | /* Zero-out the individual HW breakpoint address registers */ | ||
86 | set_debugreg(0UL, 0); | ||
87 | set_debugreg(0UL, 1); | ||
88 | set_debugreg(0UL, 2); | ||
89 | set_debugreg(0UL, 3); | ||
90 | } | ||
91 | |||
92 | static inline int hw_breakpoint_active(void) | ||
93 | { | ||
94 | return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; | ||
95 | } | ||
96 | |||
97 | extern void aout_dump_debugregs(struct user *dump); | ||
98 | |||
99 | extern void hw_breakpoint_restore(void); | ||
100 | |||
101 | #endif /* __KERNEL__ */ | ||
102 | |||
70 | #endif /* _ASM_X86_DEBUGREG_H */ | 103 | #endif /* _ASM_X86_DEBUGREG_H */ |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index cee34e9ca45b..029f230ab637 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -8,7 +8,7 @@ struct dev_archdata { | |||
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_map_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
13 | #endif | 13 | #endif |
14 | }; | 14 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6a25d5d42836..0f6c02f3b7d4 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -20,7 +20,8 @@ | |||
20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) | 20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | extern dma_addr_t bad_dma_address; | 23 | #define DMA_ERROR_CODE 0 |
24 | |||
24 | extern int iommu_merge; | 25 | extern int iommu_merge; |
25 | extern struct device x86_dma_fallback_dev; | 26 | extern struct device x86_dma_fallback_dev; |
26 | extern int panic_on_overflow; | 27 | extern int panic_on_overflow; |
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
48 | if (ops->mapping_error) | 49 | if (ops->mapping_error) |
49 | return ops->mapping_error(dev, dma_addr); | 50 | return ops->mapping_error(dev, dma_addr); |
50 | 51 | ||
51 | return (dma_addr == bad_dma_address); | 52 | return (dma_addr == DMA_ERROR_CODE); |
52 | } | 53 | } |
53 | 54 | ||
54 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 55 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 6cfdafa409d8..4ac5b0f33fc1 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed; | |||
35 | extern int gart_iommu_aperture_disabled; | 35 | extern int gart_iommu_aperture_disabled; |
36 | 36 | ||
37 | extern void early_gart_iommu_check(void); | 37 | extern void early_gart_iommu_check(void); |
38 | extern void gart_iommu_init(void); | 38 | extern int gart_iommu_init(void); |
39 | extern void gart_iommu_shutdown(void); | ||
40 | extern void __init gart_parse_options(char *); | 39 | extern void __init gart_parse_options(char *); |
41 | extern void gart_iommu_hole_init(void); | 40 | extern void gart_iommu_hole_init(void); |
42 | 41 | ||
@@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void); | |||
48 | static inline void early_gart_iommu_check(void) | 47 | static inline void early_gart_iommu_check(void) |
49 | { | 48 | { |
50 | } | 49 | } |
51 | static inline void gart_iommu_init(void) | ||
52 | { | ||
53 | } | ||
54 | static inline void gart_iommu_shutdown(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void gart_parse_options(char *options) | 50 | static inline void gart_parse_options(char *options) |
58 | { | 51 | { |
59 | } | 52 | } |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 82e3e8f01043..108eb6fd1ae7 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -20,11 +20,11 @@ typedef struct { | |||
20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
21 | unsigned int irq_tlb_count; | 21 | unsigned int irq_tlb_count; |
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_X86_MCE | 23 | #ifdef CONFIG_X86_THERMAL_VECTOR |
24 | unsigned int irq_thermal_count; | 24 | unsigned int irq_thermal_count; |
25 | # ifdef CONFIG_X86_MCE_THRESHOLD | 25 | #endif |
26 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
26 | unsigned int irq_threshold_count; | 27 | unsigned int irq_threshold_count; |
27 | # endif | ||
28 | #endif | 28 | #endif |
29 | } ____cacheline_aligned irq_cpustat_t; | 29 | } ____cacheline_aligned irq_cpustat_t; |
30 | 30 | ||
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..0675a7c4c20e --- /dev/null +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
@@ -0,0 +1,73 @@ | |||
1 | #ifndef _I386_HW_BREAKPOINT_H | ||
2 | #define _I386_HW_BREAKPOINT_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define __ARCH_HW_BREAKPOINT_H | ||
6 | |||
7 | /* | ||
8 | * The name should probably be something dealt in | ||
9 | * a higher level. While dealing with the user | ||
10 | * (display/resolving) | ||
11 | */ | ||
12 | struct arch_hw_breakpoint { | ||
13 | char *name; /* Contains name of the symbol to set bkpt */ | ||
14 | unsigned long address; | ||
15 | u8 len; | ||
16 | u8 type; | ||
17 | }; | ||
18 | |||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/list.h> | ||
22 | |||
23 | /* Available HW breakpoint length encodings */ | ||
24 | #define X86_BREAKPOINT_LEN_1 0x40 | ||
25 | #define X86_BREAKPOINT_LEN_2 0x44 | ||
26 | #define X86_BREAKPOINT_LEN_4 0x4c | ||
27 | #define X86_BREAKPOINT_LEN_EXECUTE 0x40 | ||
28 | |||
29 | #ifdef CONFIG_X86_64 | ||
30 | #define X86_BREAKPOINT_LEN_8 0x48 | ||
31 | #endif | ||
32 | |||
33 | /* Available HW breakpoint type encodings */ | ||
34 | |||
35 | /* trigger on instruction execute */ | ||
36 | #define X86_BREAKPOINT_EXECUTE 0x80 | ||
37 | /* trigger on memory write */ | ||
38 | #define X86_BREAKPOINT_WRITE 0x81 | ||
39 | /* trigger on memory read or write */ | ||
40 | #define X86_BREAKPOINT_RW 0x83 | ||
41 | |||
42 | /* Total number of available HW breakpoint registers */ | ||
43 | #define HBP_NUM 4 | ||
44 | |||
45 | struct perf_event; | ||
46 | struct pmu; | ||
47 | |||
48 | extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); | ||
49 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
50 | struct task_struct *tsk); | ||
51 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
52 | unsigned long val, void *data); | ||
53 | |||
54 | |||
55 | int arch_install_hw_breakpoint(struct perf_event *bp); | ||
56 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); | ||
57 | void hw_breakpoint_pmu_read(struct perf_event *bp); | ||
58 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); | ||
59 | |||
60 | extern void | ||
61 | arch_fill_perf_breakpoint(struct perf_event *bp); | ||
62 | |||
63 | unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type); | ||
64 | int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type); | ||
65 | |||
66 | extern int arch_bp_generic_fields(int x86_len, int x86_type, | ||
67 | int *gen_len, int *gen_type); | ||
68 | |||
69 | extern struct pmu perf_ops_bp; | ||
70 | |||
71 | #endif /* __KERNEL__ */ | ||
72 | #endif /* _I386_HW_BREAKPOINT_H */ | ||
73 | |||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index ba180d93b08c..6e124269fd4b 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -79,14 +79,32 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
79 | int ioapic, int ioapic_pin, | 79 | int ioapic, int ioapic_pin, |
80 | int trigger, int polarity) | 80 | int trigger, int polarity) |
81 | { | 81 | { |
82 | irq_attr->ioapic = ioapic; | 82 | irq_attr->ioapic = ioapic; |
83 | irq_attr->ioapic_pin = ioapic_pin; | 83 | irq_attr->ioapic_pin = ioapic_pin; |
84 | irq_attr->trigger = trigger; | 84 | irq_attr->trigger = trigger; |
85 | irq_attr->polarity = polarity; | 85 | irq_attr->polarity = polarity; |
86 | } | 86 | } |
87 | 87 | ||
88 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | 88 | /* |
89 | struct io_apic_irq_attr *irq_attr); | 89 | * This is performance-critical, we want to do it O(1) |
90 | * | ||
91 | * Most irqs are mapped 1:1 with pins. | ||
92 | */ | ||
93 | struct irq_cfg { | ||
94 | struct irq_pin_list *irq_2_pin; | ||
95 | cpumask_var_t domain; | ||
96 | cpumask_var_t old_domain; | ||
97 | u8 vector; | ||
98 | u8 move_in_progress : 1; | ||
99 | }; | ||
100 | |||
101 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
102 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | ||
103 | extern void send_cleanup_vector(struct irq_cfg *); | ||
104 | |||
105 | struct irq_desc; | ||
106 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *); | ||
107 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | ||
90 | extern void setup_ioapic_dest(void); | 108 | extern void setup_ioapic_dest(void); |
91 | 109 | ||
92 | extern void enable_IO_APIC(void); | 110 | extern void enable_IO_APIC(void); |
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h new file mode 100644 index 000000000000..205b063e3e32 --- /dev/null +++ b/arch/x86/include/asm/inat.h | |||
@@ -0,0 +1,220 @@ | |||
1 | #ifndef _ASM_X86_INAT_H | ||
2 | #define _ASM_X86_INAT_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | #include <asm/inat_types.h> | ||
24 | |||
25 | /* | ||
26 | * Internal bits. Don't use bitmasks directly, because these bits are | ||
27 | * unstable. You should use checking functions. | ||
28 | */ | ||
29 | |||
30 | #define INAT_OPCODE_TABLE_SIZE 256 | ||
31 | #define INAT_GROUP_TABLE_SIZE 8 | ||
32 | |||
33 | /* Legacy last prefixes */ | ||
34 | #define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ | ||
35 | #define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */ | ||
36 | #define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */ | ||
37 | /* Other Legacy prefixes */ | ||
38 | #define INAT_PFX_LOCK 4 /* 0xF0 */ | ||
39 | #define INAT_PFX_CS 5 /* 0x2E */ | ||
40 | #define INAT_PFX_DS 6 /* 0x3E */ | ||
41 | #define INAT_PFX_ES 7 /* 0x26 */ | ||
42 | #define INAT_PFX_FS 8 /* 0x64 */ | ||
43 | #define INAT_PFX_GS 9 /* 0x65 */ | ||
44 | #define INAT_PFX_SS 10 /* 0x36 */ | ||
45 | #define INAT_PFX_ADDRSZ 11 /* 0x67 */ | ||
46 | /* x86-64 REX prefix */ | ||
47 | #define INAT_PFX_REX 12 /* 0x4X */ | ||
48 | /* AVX VEX prefixes */ | ||
49 | #define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ | ||
50 | #define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ | ||
51 | |||
52 | #define INAT_LSTPFX_MAX 3 | ||
53 | #define INAT_LGCPFX_MAX 11 | ||
54 | |||
55 | /* Immediate size */ | ||
56 | #define INAT_IMM_BYTE 1 | ||
57 | #define INAT_IMM_WORD 2 | ||
58 | #define INAT_IMM_DWORD 3 | ||
59 | #define INAT_IMM_QWORD 4 | ||
60 | #define INAT_IMM_PTR 5 | ||
61 | #define INAT_IMM_VWORD32 6 | ||
62 | #define INAT_IMM_VWORD 7 | ||
63 | |||
64 | /* Legacy prefix */ | ||
65 | #define INAT_PFX_OFFS 0 | ||
66 | #define INAT_PFX_BITS 4 | ||
67 | #define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) | ||
68 | #define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) | ||
69 | /* Escape opcodes */ | ||
70 | #define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) | ||
71 | #define INAT_ESC_BITS 2 | ||
72 | #define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) | ||
73 | #define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) | ||
74 | /* Group opcodes (1-16) */ | ||
75 | #define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) | ||
76 | #define INAT_GRP_BITS 5 | ||
77 | #define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) | ||
78 | #define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) | ||
79 | /* Immediates */ | ||
80 | #define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) | ||
81 | #define INAT_IMM_BITS 3 | ||
82 | #define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) | ||
83 | /* Flags */ | ||
84 | #define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) | ||
85 | #define INAT_MODRM (1 << (INAT_FLAG_OFFS)) | ||
86 | #define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1)) | ||
87 | #define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) | ||
88 | #define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) | ||
89 | #define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) | ||
90 | #define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) | ||
91 | #define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) | ||
92 | /* Attribute making macros for attribute tables */ | ||
93 | #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) | ||
94 | #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) | ||
95 | #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) | ||
96 | #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) | ||
97 | |||
98 | /* Attribute search APIs */ | ||
99 | extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); | ||
100 | extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, | ||
101 | insn_byte_t last_pfx, | ||
102 | insn_attr_t esc_attr); | ||
103 | extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, | ||
104 | insn_byte_t last_pfx, | ||
105 | insn_attr_t esc_attr); | ||
106 | extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, | ||
107 | insn_byte_t vex_m, | ||
108 | insn_byte_t vex_pp); | ||
109 | |||
110 | /* Attribute checking functions */ | ||
111 | static inline int inat_is_legacy_prefix(insn_attr_t attr) | ||
112 | { | ||
113 | attr &= INAT_PFX_MASK; | ||
114 | return attr && attr <= INAT_LGCPFX_MAX; | ||
115 | } | ||
116 | |||
117 | static inline int inat_is_address_size_prefix(insn_attr_t attr) | ||
118 | { | ||
119 | return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; | ||
120 | } | ||
121 | |||
122 | static inline int inat_is_operand_size_prefix(insn_attr_t attr) | ||
123 | { | ||
124 | return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; | ||
125 | } | ||
126 | |||
127 | static inline int inat_is_rex_prefix(insn_attr_t attr) | ||
128 | { | ||
129 | return (attr & INAT_PFX_MASK) == INAT_PFX_REX; | ||
130 | } | ||
131 | |||
132 | static inline int inat_last_prefix_id(insn_attr_t attr) | ||
133 | { | ||
134 | if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX) | ||
135 | return 0; | ||
136 | else | ||
137 | return attr & INAT_PFX_MASK; | ||
138 | } | ||
139 | |||
140 | static inline int inat_is_vex_prefix(insn_attr_t attr) | ||
141 | { | ||
142 | attr &= INAT_PFX_MASK; | ||
143 | return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; | ||
144 | } | ||
145 | |||
146 | static inline int inat_is_vex3_prefix(insn_attr_t attr) | ||
147 | { | ||
148 | return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3; | ||
149 | } | ||
150 | |||
151 | static inline int inat_is_escape(insn_attr_t attr) | ||
152 | { | ||
153 | return attr & INAT_ESC_MASK; | ||
154 | } | ||
155 | |||
156 | static inline int inat_escape_id(insn_attr_t attr) | ||
157 | { | ||
158 | return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; | ||
159 | } | ||
160 | |||
161 | static inline int inat_is_group(insn_attr_t attr) | ||
162 | { | ||
163 | return attr & INAT_GRP_MASK; | ||
164 | } | ||
165 | |||
166 | static inline int inat_group_id(insn_attr_t attr) | ||
167 | { | ||
168 | return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; | ||
169 | } | ||
170 | |||
171 | static inline int inat_group_common_attribute(insn_attr_t attr) | ||
172 | { | ||
173 | return attr & ~INAT_GRP_MASK; | ||
174 | } | ||
175 | |||
176 | static inline int inat_has_immediate(insn_attr_t attr) | ||
177 | { | ||
178 | return attr & INAT_IMM_MASK; | ||
179 | } | ||
180 | |||
181 | static inline int inat_immediate_size(insn_attr_t attr) | ||
182 | { | ||
183 | return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; | ||
184 | } | ||
185 | |||
186 | static inline int inat_has_modrm(insn_attr_t attr) | ||
187 | { | ||
188 | return attr & INAT_MODRM; | ||
189 | } | ||
190 | |||
191 | static inline int inat_is_force64(insn_attr_t attr) | ||
192 | { | ||
193 | return attr & INAT_FORCE64; | ||
194 | } | ||
195 | |||
196 | static inline int inat_has_second_immediate(insn_attr_t attr) | ||
197 | { | ||
198 | return attr & INAT_SCNDIMM; | ||
199 | } | ||
200 | |||
201 | static inline int inat_has_moffset(insn_attr_t attr) | ||
202 | { | ||
203 | return attr & INAT_MOFFSET; | ||
204 | } | ||
205 | |||
206 | static inline int inat_has_variant(insn_attr_t attr) | ||
207 | { | ||
208 | return attr & INAT_VARIANT; | ||
209 | } | ||
210 | |||
211 | static inline int inat_accept_vex(insn_attr_t attr) | ||
212 | { | ||
213 | return attr & INAT_VEXOK; | ||
214 | } | ||
215 | |||
216 | static inline int inat_must_vex(insn_attr_t attr) | ||
217 | { | ||
218 | return attr & INAT_VEXONLY; | ||
219 | } | ||
220 | #endif | ||
diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h new file mode 100644 index 000000000000..cb3c20ce39cf --- /dev/null +++ b/arch/x86/include/asm/inat_types.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_X86_INAT_TYPES_H | ||
2 | #define _ASM_X86_INAT_TYPES_H | ||
3 | /* | ||
4 | * x86 instruction attributes | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /* Instruction attributes */ | ||
25 | typedef unsigned int insn_attr_t; | ||
26 | typedef unsigned char insn_byte_t; | ||
27 | typedef signed int insn_value_t; | ||
28 | |||
29 | #endif | ||
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h new file mode 100644 index 000000000000..96c2e0ad04ca --- /dev/null +++ b/arch/x86/include/asm/insn.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef _ASM_X86_INSN_H | ||
2 | #define _ASM_X86_INSN_H | ||
3 | /* | ||
4 | * x86 instruction analysis | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright (C) IBM Corporation, 2009 | ||
21 | */ | ||
22 | |||
23 | /* insn_attr_t is defined in inat.h */ | ||
24 | #include <asm/inat.h> | ||
25 | |||
26 | struct insn_field { | ||
27 | union { | ||
28 | insn_value_t value; | ||
29 | insn_byte_t bytes[4]; | ||
30 | }; | ||
31 | /* !0 if we've run insn_get_xxx() for this field */ | ||
32 | unsigned char got; | ||
33 | unsigned char nbytes; | ||
34 | }; | ||
35 | |||
36 | struct insn { | ||
37 | struct insn_field prefixes; /* | ||
38 | * Prefixes | ||
39 | * prefixes.bytes[3]: last prefix | ||
40 | */ | ||
41 | struct insn_field rex_prefix; /* REX prefix */ | ||
42 | struct insn_field vex_prefix; /* VEX prefix */ | ||
43 | struct insn_field opcode; /* | ||
44 | * opcode.bytes[0]: opcode1 | ||
45 | * opcode.bytes[1]: opcode2 | ||
46 | * opcode.bytes[2]: opcode3 | ||
47 | */ | ||
48 | struct insn_field modrm; | ||
49 | struct insn_field sib; | ||
50 | struct insn_field displacement; | ||
51 | union { | ||
52 | struct insn_field immediate; | ||
53 | struct insn_field moffset1; /* for 64bit MOV */ | ||
54 | struct insn_field immediate1; /* for 64bit imm or off16/32 */ | ||
55 | }; | ||
56 | union { | ||
57 | struct insn_field moffset2; /* for 64bit MOV */ | ||
58 | struct insn_field immediate2; /* for 64bit imm or seg16 */ | ||
59 | }; | ||
60 | |||
61 | insn_attr_t attr; | ||
62 | unsigned char opnd_bytes; | ||
63 | unsigned char addr_bytes; | ||
64 | unsigned char length; | ||
65 | unsigned char x86_64; | ||
66 | |||
67 | const insn_byte_t *kaddr; /* kernel address of insn to analyze */ | ||
68 | const insn_byte_t *next_byte; | ||
69 | }; | ||
70 | |||
71 | #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) | ||
72 | #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) | ||
73 | #define X86_MODRM_RM(modrm) ((modrm) & 0x07) | ||
74 | |||
75 | #define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) | ||
76 | #define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) | ||
77 | #define X86_SIB_BASE(sib) ((sib) & 0x07) | ||
78 | |||
79 | #define X86_REX_W(rex) ((rex) & 8) | ||
80 | #define X86_REX_R(rex) ((rex) & 4) | ||
81 | #define X86_REX_X(rex) ((rex) & 2) | ||
82 | #define X86_REX_B(rex) ((rex) & 1) | ||
83 | |||
84 | /* VEX bit flags */ | ||
85 | #define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */ | ||
86 | #define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */ | ||
87 | #define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */ | ||
88 | #define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ | ||
89 | #define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ | ||
90 | /* VEX bit fields */ | ||
91 | #define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ | ||
92 | #define X86_VEX2_M 1 /* VEX2.M always 1 */ | ||
93 | #define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ | ||
94 | #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ | ||
95 | #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ | ||
96 | |||
97 | /* The last prefix is needed for two-byte and three-byte opcodes */ | ||
98 | static inline insn_byte_t insn_last_prefix(struct insn *insn) | ||
99 | { | ||
100 | return insn->prefixes.bytes[3]; | ||
101 | } | ||
102 | |||
103 | extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); | ||
104 | extern void insn_get_prefixes(struct insn *insn); | ||
105 | extern void insn_get_opcode(struct insn *insn); | ||
106 | extern void insn_get_modrm(struct insn *insn); | ||
107 | extern void insn_get_sib(struct insn *insn); | ||
108 | extern void insn_get_displacement(struct insn *insn); | ||
109 | extern void insn_get_immediate(struct insn *insn); | ||
110 | extern void insn_get_length(struct insn *insn); | ||
111 | |||
112 | /* Attribute will be determined after getting ModRM (for opcode groups) */ | ||
113 | static inline void insn_get_attribute(struct insn *insn) | ||
114 | { | ||
115 | insn_get_modrm(insn); | ||
116 | } | ||
117 | |||
118 | /* Instruction uses RIP-relative addressing */ | ||
119 | extern int insn_rip_relative(struct insn *insn); | ||
120 | |||
121 | /* Init insn for kernel text */ | ||
122 | static inline void kernel_insn_init(struct insn *insn, const void *kaddr) | ||
123 | { | ||
124 | #ifdef CONFIG_X86_64 | ||
125 | insn_init(insn, kaddr, 1); | ||
126 | #else /* CONFIG_X86_32 */ | ||
127 | insn_init(insn, kaddr, 0); | ||
128 | #endif | ||
129 | } | ||
130 | |||
131 | static inline int insn_is_avx(struct insn *insn) | ||
132 | { | ||
133 | if (!insn->prefixes.got) | ||
134 | insn_get_prefixes(insn); | ||
135 | return (insn->vex_prefix.value != 0); | ||
136 | } | ||
137 | |||
138 | static inline insn_byte_t insn_vex_m_bits(struct insn *insn) | ||
139 | { | ||
140 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
141 | return X86_VEX2_M; | ||
142 | else | ||
143 | return X86_VEX3_M(insn->vex_prefix.bytes[1]); | ||
144 | } | ||
145 | |||
146 | static inline insn_byte_t insn_vex_p_bits(struct insn *insn) | ||
147 | { | ||
148 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | ||
149 | return X86_VEX_P(insn->vex_prefix.bytes[1]); | ||
150 | else | ||
151 | return X86_VEX_P(insn->vex_prefix.bytes[2]); | ||
152 | } | ||
153 | |||
154 | /* Offset of each field from kaddr */ | ||
155 | static inline int insn_offset_rex_prefix(struct insn *insn) | ||
156 | { | ||
157 | return insn->prefixes.nbytes; | ||
158 | } | ||
159 | static inline int insn_offset_vex_prefix(struct insn *insn) | ||
160 | { | ||
161 | return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; | ||
162 | } | ||
163 | static inline int insn_offset_opcode(struct insn *insn) | ||
164 | { | ||
165 | return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes; | ||
166 | } | ||
167 | static inline int insn_offset_modrm(struct insn *insn) | ||
168 | { | ||
169 | return insn_offset_opcode(insn) + insn->opcode.nbytes; | ||
170 | } | ||
171 | static inline int insn_offset_sib(struct insn *insn) | ||
172 | { | ||
173 | return insn_offset_modrm(insn) + insn->modrm.nbytes; | ||
174 | } | ||
175 | static inline int insn_offset_displacement(struct insn *insn) | ||
176 | { | ||
177 | return insn_offset_sib(insn) + insn->sib.nbytes; | ||
178 | } | ||
179 | static inline int insn_offset_immediate(struct insn *insn) | ||
180 | { | ||
181 | return insn_offset_displacement(insn) + insn->displacement.nbytes; | ||
182 | } | ||
183 | |||
184 | #endif /* _ASM_X86_INSN_H */ | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index fd6d21bbee6c..345c99cef152 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ASM_X86_IOMMU_H | 1 | #ifndef _ASM_X86_IOMMU_H |
2 | #define _ASM_X86_IOMMU_H | 2 | #define _ASM_X86_IOMMU_H |
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | ||
5 | extern void no_iommu_init(void); | ||
6 | extern struct dma_map_ops nommu_dma_ops; | 4 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 5 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 6 | extern int iommu_detected; |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ddda6cbed6f4..ffd700ff5dcb 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -34,6 +34,7 @@ static inline int irq_canonicalize(int irq) | |||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(void); | 36 | extern void fixup_irqs(void); |
37 | extern void irq_force_complete_move(int); | ||
37 | #endif | 38 | #endif |
38 | 39 | ||
39 | extern void (*generic_interrupt_extension)(void); | 40 | extern void (*generic_interrupt_extension)(void); |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f1363b72364f..858baa061cfc 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -108,6 +108,8 @@ struct mce_log { | |||
108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | 108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) |
109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | 109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) |
110 | 110 | ||
111 | extern struct atomic_notifier_head x86_mce_decoder_chain; | ||
112 | |||
111 | #ifdef __KERNEL__ | 113 | #ifdef __KERNEL__ |
112 | 114 | ||
113 | #include <linux/percpu.h> | 115 | #include <linux/percpu.h> |
@@ -118,9 +120,11 @@ extern int mce_disabled; | |||
118 | extern int mce_p5_enabled; | 120 | extern int mce_p5_enabled; |
119 | 121 | ||
120 | #ifdef CONFIG_X86_MCE | 122 | #ifdef CONFIG_X86_MCE |
121 | void mcheck_init(struct cpuinfo_x86 *c); | 123 | int mcheck_init(void); |
124 | void mcheck_cpu_init(struct cpuinfo_x86 *c); | ||
122 | #else | 125 | #else |
123 | static inline void mcheck_init(struct cpuinfo_x86 *c) {} | 126 | static inline int mcheck_init(void) { return 0; } |
127 | static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} | ||
124 | #endif | 128 | #endif |
125 | 129 | ||
126 | #ifdef CONFIG_X86_ANCIENT_MCE | 130 | #ifdef CONFIG_X86_ANCIENT_MCE |
@@ -214,5 +218,11 @@ void intel_init_thermal(struct cpuinfo_x86 *c); | |||
214 | 218 | ||
215 | void mce_log_therm_throt_event(__u64 status); | 219 | void mce_log_therm_throt_event(__u64 status); |
216 | 220 | ||
221 | #ifdef CONFIG_X86_THERMAL_VECTOR | ||
222 | extern void mcheck_intel_therm_init(void); | ||
223 | #else | ||
224 | static inline void mcheck_intel_therm_init(void) { } | ||
225 | #endif | ||
226 | |||
217 | #endif /* __KERNEL__ */ | 227 | #endif /* __KERNEL__ */ |
218 | #endif /* _ASM_X86_MCE_H */ | 228 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 79c94500c0bb..61d90b1331c3 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -163,14 +163,16 @@ typedef struct physid_mask physid_mask_t; | |||
163 | #define physids_shift_left(d, s, n) \ | 163 | #define physids_shift_left(d, s, n) \ |
164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
165 | 165 | ||
166 | #define physids_coerce(map) ((map).mask[0]) | 166 | static inline unsigned long physids_coerce(physid_mask_t *map) |
167 | { | ||
168 | return map->mask[0]; | ||
169 | } | ||
167 | 170 | ||
168 | #define physids_promote(physids) \ | 171 | static inline void physids_promote(unsigned long physids, physid_mask_t *map) |
169 | ({ \ | 172 | { |
170 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 173 | physids_clear(*map); |
171 | __physid_mask.mask[0] = physids; \ | 174 | map->mask[0] = physids; |
172 | __physid_mask; \ | 175 | } |
173 | }) | ||
174 | 176 | ||
175 | /* Note: will create very large stack frames if physid_mask_t is big */ | 177 | /* Note: will create very large stack frames if physid_mask_t is big */ |
176 | #define physid_mask_of_physid(physid) \ | 178 | #define physid_mask_of_physid(physid) \ |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7e2b6ba962ff..5bef931f8b14 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -247,8 +247,8 @@ do { \ | |||
247 | #ifdef CONFIG_SMP | 247 | #ifdef CONFIG_SMP |
248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
250 | void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 250 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
251 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | 251 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 252 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 253 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | 254 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
@@ -264,12 +264,12 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
264 | wrmsr(msr_no, l, h); | 264 | wrmsr(msr_no, l, h); |
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 267 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
268 | struct msr *msrs) | 268 | struct msr *msrs) |
269 | { | 269 | { |
270 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); | 270 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
271 | } | 271 | } |
272 | static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no, | 272 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
273 | struct msr *msrs) | 273 | struct msr *msrs) |
274 | { | 274 | { |
275 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); | 275 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd5065..8d9f8548a870 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -28,9 +28,20 @@ | |||
28 | */ | 28 | */ |
29 | #define ARCH_PERFMON_EVENT_MASK 0xffff | 29 | #define ARCH_PERFMON_EVENT_MASK 0xffff |
30 | 30 | ||
31 | /* | ||
32 | * filter mask to validate fixed counter events. | ||
33 | * the following filters disqualify for fixed counters: | ||
34 | * - inv | ||
35 | * - edge | ||
36 | * - cnt-mask | ||
37 | * The other filters are supported by fixed counters. | ||
38 | * The any-thread option is supported starting with v3. | ||
39 | */ | ||
40 | #define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 | ||
41 | |||
31 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 42 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
32 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 43 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
33 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | 44 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
34 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | 45 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
35 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 46 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
36 | 47 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c9786480f0fe..6f8ec1c37e0a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -30,6 +30,7 @@ struct mm_struct; | |||
30 | #include <linux/math64.h> | 30 | #include <linux/math64.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | 32 | ||
33 | #define HBP_NUM 4 | ||
33 | /* | 34 | /* |
34 | * Default implementation of macro that returns current | 35 | * Default implementation of macro that returns current |
35 | * instruction pointer ("program counter"). | 36 | * instruction pointer ("program counter"). |
@@ -422,6 +423,8 @@ extern unsigned int xstate_size; | |||
422 | extern void free_thread_xstate(struct task_struct *); | 423 | extern void free_thread_xstate(struct task_struct *); |
423 | extern struct kmem_cache *task_xstate_cachep; | 424 | extern struct kmem_cache *task_xstate_cachep; |
424 | 425 | ||
426 | struct perf_event; | ||
427 | |||
425 | struct thread_struct { | 428 | struct thread_struct { |
426 | /* Cached TLS descriptors: */ | 429 | /* Cached TLS descriptors: */ |
427 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 430 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
@@ -443,13 +446,10 @@ struct thread_struct { | |||
443 | unsigned long fs; | 446 | unsigned long fs; |
444 | #endif | 447 | #endif |
445 | unsigned long gs; | 448 | unsigned long gs; |
446 | /* Hardware debugging registers: */ | 449 | /* Save middle states of ptrace breakpoints */ |
447 | unsigned long debugreg0; | 450 | struct perf_event *ptrace_bps[HBP_NUM]; |
448 | unsigned long debugreg1; | 451 | /* Debug status used for traps, single steps, etc... */ |
449 | unsigned long debugreg2; | 452 | unsigned long debugreg6; |
450 | unsigned long debugreg3; | ||
451 | unsigned long debugreg6; | ||
452 | unsigned long debugreg7; | ||
453 | /* Fault info: */ | 453 | /* Fault info: */ |
454 | unsigned long cr2; | 454 | unsigned long cr2; |
455 | unsigned long trap_no; | 455 | unsigned long trap_no; |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 0f0d908349aa..3d11fd0f44c5 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | #include <asm/segment.h> | 9 | #include <asm/segment.h> |
10 | #include <asm/page_types.h> | ||
10 | #endif | 11 | #endif |
11 | 12 | ||
12 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
@@ -216,6 +217,67 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) | |||
216 | return regs->sp; | 217 | return regs->sp; |
217 | } | 218 | } |
218 | 219 | ||
220 | /* Query offset/name of register from its name/offset */ | ||
221 | extern int regs_query_register_offset(const char *name); | ||
222 | extern const char *regs_query_register_name(unsigned int offset); | ||
223 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) | ||
224 | |||
225 | /** | ||
226 | * regs_get_register() - get register value from its offset | ||
227 | * @regs: pt_regs from which register value is gotten. | ||
228 | * @offset: offset number of the register. | ||
229 | * | ||
230 | * regs_get_register returns the value of a register. The @offset is the | ||
231 | * offset of the register in struct pt_regs address which specified by @regs. | ||
232 | * If @offset is bigger than MAX_REG_OFFSET, this returns 0. | ||
233 | */ | ||
234 | static inline unsigned long regs_get_register(struct pt_regs *regs, | ||
235 | unsigned int offset) | ||
236 | { | ||
237 | if (unlikely(offset > MAX_REG_OFFSET)) | ||
238 | return 0; | ||
239 | return *(unsigned long *)((unsigned long)regs + offset); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * regs_within_kernel_stack() - check the address in the stack | ||
244 | * @regs: pt_regs which contains kernel stack pointer. | ||
245 | * @addr: address which is checked. | ||
246 | * | ||
247 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). | ||
248 | * If @addr is within the kernel stack, it returns true. If not, returns false. | ||
249 | */ | ||
250 | static inline int regs_within_kernel_stack(struct pt_regs *regs, | ||
251 | unsigned long addr) | ||
252 | { | ||
253 | return ((addr & ~(THREAD_SIZE - 1)) == | ||
254 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * regs_get_kernel_stack_nth() - get Nth entry of the stack | ||
259 | * @regs: pt_regs which contains kernel stack pointer. | ||
260 | * @n: stack entry number. | ||
261 | * | ||
262 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | ||
263 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, | ||
264 | * this returns 0. | ||
265 | */ | ||
266 | static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | ||
267 | unsigned int n) | ||
268 | { | ||
269 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | ||
270 | addr += n; | ||
271 | if (regs_within_kernel_stack(regs, (unsigned long)addr)) | ||
272 | return *addr; | ||
273 | else | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | /* Get Nth argument at function call */ | ||
278 | extern unsigned long regs_get_argument_nth(struct pt_regs *regs, | ||
279 | unsigned int n); | ||
280 | |||
219 | /* | 281 | /* |
220 | * These are defined as per linux/ptrace.h, which see. | 282 | * These are defined as per linux/ptrace.h, which see. |
221 | */ | 283 | */ |
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index ae907e617181..3d3e8353ee5c 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h | |||
@@ -177,10 +177,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) | |||
177 | */ | 177 | */ |
178 | 178 | ||
179 | #ifndef CONFIG_KMEMCHECK | 179 | #ifndef CONFIG_KMEMCHECK |
180 | |||
181 | #if (__GNUC__ >= 4) | ||
182 | #define memcpy(t, f, n) __builtin_memcpy(t, f, n) | ||
183 | #else | ||
180 | #define memcpy(t, f, n) \ | 184 | #define memcpy(t, f, n) \ |
181 | (__builtin_constant_p((n)) \ | 185 | (__builtin_constant_p((n)) \ |
182 | ? __constant_memcpy((t), (f), (n)) \ | 186 | ? __constant_memcpy((t), (f), (n)) \ |
183 | : __memcpy((t), (f), (n))) | 187 | : __memcpy((t), (f), (n))) |
188 | #endif | ||
184 | #else | 189 | #else |
185 | /* | 190 | /* |
186 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, | 191 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, |
@@ -316,11 +321,15 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, | |||
316 | : __memset_generic((s), (c), (count))) | 321 | : __memset_generic((s), (c), (count))) |
317 | 322 | ||
318 | #define __HAVE_ARCH_MEMSET | 323 | #define __HAVE_ARCH_MEMSET |
324 | #if (__GNUC__ >= 4) | ||
325 | #define memset(s, c, count) __builtin_memset(s, c, count) | ||
326 | #else | ||
319 | #define memset(s, c, count) \ | 327 | #define memset(s, c, count) \ |
320 | (__builtin_constant_p(c) \ | 328 | (__builtin_constant_p(c) \ |
321 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ | 329 | ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
322 | (count)) \ | 330 | (count)) \ |
323 | : __memset((s), (c), (count))) | 331 | : __memset((s), (c), (count))) |
332 | #endif | ||
324 | 333 | ||
325 | /* | 334 | /* |
326 | * find the first occurrence of byte 'c', or 1 past the area if none | 335 | * find the first occurrence of byte 'c', or 1 past the area if none |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index b9e4e20174fb..87ffcb12a1b8 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -3,17 +3,14 @@ | |||
3 | 3 | ||
4 | #include <linux/swiotlb.h> | 4 | #include <linux/swiotlb.h> |
5 | 5 | ||
6 | /* SWIOTLB interface */ | ||
7 | |||
8 | extern int swiotlb_force; | ||
9 | |||
10 | #ifdef CONFIG_SWIOTLB | 6 | #ifdef CONFIG_SWIOTLB |
11 | extern int swiotlb; | 7 | extern int swiotlb; |
12 | extern void pci_swiotlb_init(void); | 8 | extern int pci_swiotlb_init(void); |
13 | #else | 9 | #else |
14 | #define swiotlb 0 | 10 | #define swiotlb 0 |
15 | static inline void pci_swiotlb_init(void) | 11 | static inline int pci_swiotlb_init(void) |
16 | { | 12 | { |
13 | return 0; | ||
17 | } | 14 | } |
18 | #endif | 15 | #endif |
19 | 16 | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index f08f97374892..022a84386de8 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -128,8 +128,6 @@ do { \ | |||
128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | 129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
130 | "call __switch_to\n\t" \ | 130 | "call __switch_to\n\t" \ |
131 | ".globl thread_return\n" \ | ||
132 | "thread_return:\n\t" \ | ||
133 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | 131 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
134 | __switch_canary \ | 132 | __switch_canary \ |
135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 133 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
@@ -157,19 +155,22 @@ extern void native_load_gs_index(unsigned); | |||
157 | * Load a segment. Fall back on loading the zero | 155 | * Load a segment. Fall back on loading the zero |
158 | * segment if something goes wrong.. | 156 | * segment if something goes wrong.. |
159 | */ | 157 | */ |
160 | #define loadsegment(seg, value) \ | 158 | #define loadsegment(seg, value) \ |
161 | asm volatile("\n" \ | 159 | do { \ |
162 | "1:\t" \ | 160 | unsigned short __val = (value); \ |
163 | "movl %k0,%%" #seg "\n" \ | 161 | \ |
164 | "2:\n" \ | 162 | asm volatile(" \n" \ |
165 | ".section .fixup,\"ax\"\n" \ | 163 | "1: movl %k0,%%" #seg " \n" \ |
166 | "3:\t" \ | 164 | \ |
167 | "movl %k1, %%" #seg "\n\t" \ | 165 | ".section .fixup,\"ax\" \n" \ |
168 | "jmp 2b\n" \ | 166 | "2: xorl %k0,%k0 \n" \ |
169 | ".previous\n" \ | 167 | " jmp 1b \n" \ |
170 | _ASM_EXTABLE(1b,3b) \ | 168 | ".previous \n" \ |
171 | : :"r" (value), "r" (0) : "memory") | 169 | \ |
172 | 170 | _ASM_EXTABLE(1b, 2b) \ | |
171 | \ | ||
172 | : "+r" (__val) : : "memory"); \ | ||
173 | } while (0) | ||
173 | 174 | ||
174 | /* | 175 | /* |
175 | * Save a segment register away | 176 | * Save a segment register away |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index d2c6c930b491..abd3e0ea762a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -570,7 +570,6 @@ extern struct movsl_mask { | |||
570 | #ifdef CONFIG_X86_32 | 570 | #ifdef CONFIG_X86_32 |
571 | # include "uaccess_32.h" | 571 | # include "uaccess_32.h" |
572 | #else | 572 | #else |
573 | # define ARCH_HAS_SEARCH_EXTABLE | ||
574 | # include "uaccess_64.h" | 573 | # include "uaccess_64.h" |
575 | #endif | 574 | #endif |
576 | 575 | ||
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 632fb44b4cb5..0c9825e97f36 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -187,9 +187,34 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, | |||
187 | 187 | ||
188 | unsigned long __must_check copy_to_user(void __user *to, | 188 | unsigned long __must_check copy_to_user(void __user *to, |
189 | const void *from, unsigned long n); | 189 | const void *from, unsigned long n); |
190 | unsigned long __must_check copy_from_user(void *to, | 190 | unsigned long __must_check _copy_from_user(void *to, |
191 | const void __user *from, | 191 | const void __user *from, |
192 | unsigned long n); | 192 | unsigned long n); |
193 | |||
194 | |||
195 | extern void copy_from_user_overflow(void) | ||
196 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | ||
197 | __compiletime_error("copy_from_user() buffer size is not provably correct") | ||
198 | #else | ||
199 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | ||
200 | #endif | ||
201 | ; | ||
202 | |||
203 | static inline unsigned long __must_check copy_from_user(void *to, | ||
204 | const void __user *from, | ||
205 | unsigned long n) | ||
206 | { | ||
207 | int sz = __compiletime_object_size(to); | ||
208 | int ret = -EFAULT; | ||
209 | |||
210 | if (likely(sz == -1 || sz >= n)) | ||
211 | ret = _copy_from_user(to, from, n); | ||
212 | else | ||
213 | copy_from_user_overflow(); | ||
214 | |||
215 | return ret; | ||
216 | } | ||
217 | |||
193 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 218 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
194 | long count); | 219 | long count); |
195 | long __must_check __strncpy_from_user(char *dst, | 220 | long __must_check __strncpy_from_user(char *dst, |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index db24b215fc50..46324c6a4f6e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -19,12 +19,37 @@ __must_check unsigned long | |||
19 | copy_user_generic(void *to, const void *from, unsigned len); | 19 | copy_user_generic(void *to, const void *from, unsigned len); |
20 | 20 | ||
21 | __must_check unsigned long | 21 | __must_check unsigned long |
22 | copy_to_user(void __user *to, const void *from, unsigned len); | 22 | _copy_to_user(void __user *to, const void *from, unsigned len); |
23 | __must_check unsigned long | 23 | __must_check unsigned long |
24 | copy_from_user(void *to, const void __user *from, unsigned len); | 24 | _copy_from_user(void *to, const void __user *from, unsigned len); |
25 | __must_check unsigned long | 25 | __must_check unsigned long |
26 | copy_in_user(void __user *to, const void __user *from, unsigned len); | 26 | copy_in_user(void __user *to, const void __user *from, unsigned len); |
27 | 27 | ||
28 | static inline unsigned long __must_check copy_from_user(void *to, | ||
29 | const void __user *from, | ||
30 | unsigned long n) | ||
31 | { | ||
32 | int sz = __compiletime_object_size(to); | ||
33 | int ret = -EFAULT; | ||
34 | |||
35 | might_fault(); | ||
36 | if (likely(sz == -1 || sz >= n)) | ||
37 | ret = _copy_from_user(to, from, n); | ||
38 | #ifdef CONFIG_DEBUG_VM | ||
39 | else | ||
40 | WARN(1, "Buffer overflow detected!\n"); | ||
41 | #endif | ||
42 | return ret; | ||
43 | } | ||
44 | |||
45 | static __always_inline __must_check | ||
46 | int copy_to_user(void __user *dst, const void *src, unsigned size) | ||
47 | { | ||
48 | might_fault(); | ||
49 | |||
50 | return _copy_to_user(dst, src, size); | ||
51 | } | ||
52 | |||
28 | static __always_inline __must_check | 53 | static __always_inline __must_check |
29 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 54 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
30 | { | 55 | { |
@@ -176,8 +201,11 @@ __must_check long strlen_user(const char __user *str); | |||
176 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 201 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
177 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 202 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
178 | 203 | ||
179 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, | 204 | static __must_check __always_inline int |
180 | unsigned size); | 205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
206 | { | ||
207 | return copy_user_generic(dst, (__force const void *)src, size); | ||
208 | } | ||
181 | 209 | ||
182 | static __must_check __always_inline int | 210 | static __must_check __always_inline int |
183 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index 9613c8c0b647..d6b17c760622 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h | |||
@@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry { | |||
25 | dest : 32; | 25 | dest : 32; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | extern struct irq_chip uv_irq_chip; | 28 | enum { |
29 | 29 | UV_AFFINITY_ALL, | |
30 | extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long); | 30 | UV_AFFINITY_NODE, |
31 | extern void arch_disable_uv_irq(int, unsigned long); | 31 | UV_AFFINITY_CPU |
32 | }; | ||
32 | 33 | ||
33 | extern int uv_setup_irq(char *, int, int, unsigned long); | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); |
34 | extern void uv_teardown_irq(unsigned int, int, unsigned long); | 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); |
36 | extern void uv_teardown_irq(unsigned int); | ||
35 | 37 | ||
36 | #endif /* _ASM_X86_UV_UV_IRQ_H */ | 38 | #endif /* _ASM_X86_UV_UV_IRQ_H */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 2c756fd4ab0e..d8e71459f025 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -91,6 +91,14 @@ struct x86_init_timers { | |||
91 | }; | 91 | }; |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * struct x86_init_iommu - platform specific iommu setup | ||
95 | * @iommu_init: platform specific iommu setup | ||
96 | */ | ||
97 | struct x86_init_iommu { | ||
98 | int (*iommu_init)(void); | ||
99 | }; | ||
100 | |||
101 | /** | ||
94 | * struct x86_init_ops - functions for platform specific setup | 102 | * struct x86_init_ops - functions for platform specific setup |
95 | * | 103 | * |
96 | */ | 104 | */ |
@@ -101,6 +109,7 @@ struct x86_init_ops { | |||
101 | struct x86_init_oem oem; | 109 | struct x86_init_oem oem; |
102 | struct x86_init_paging paging; | 110 | struct x86_init_paging paging; |
103 | struct x86_init_timers timers; | 111 | struct x86_init_timers timers; |
112 | struct x86_init_iommu iommu; | ||
104 | }; | 113 | }; |
105 | 114 | ||
106 | /** | 115 | /** |
@@ -121,6 +130,7 @@ struct x86_platform_ops { | |||
121 | unsigned long (*calibrate_tsc)(void); | 130 | unsigned long (*calibrate_tsc)(void); |
122 | unsigned long (*get_wallclock)(void); | 131 | unsigned long (*get_wallclock)(void); |
123 | int (*set_wallclock)(unsigned long nowtime); | 132 | int (*set_wallclock)(unsigned long nowtime); |
133 | void (*iommu_shutdown)(void); | ||
124 | }; | 134 | }; |
125 | 135 | ||
126 | extern struct x86_init_ops x86_init; | 136 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d8e5d0cdd678..4f2e66e29ecc 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -40,7 +40,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | |||
40 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o | 40 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o |
41 | obj-y += bootflag.o e820.o | 41 | obj-y += bootflag.o e820.o |
42 | obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o | 42 | obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o |
43 | obj-y += alternative.o i8253.o pci-nommu.o | 43 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o |
44 | obj-y += tsc.o io_delay.o rtc.o | 44 | obj-y += tsc.o io_delay.o rtc.o |
45 | 45 | ||
46 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 46 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0285521e0a99..32fb09102a13 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
29 | #include <asm/iommu.h> | 29 | #include <asm/iommu.h> |
30 | #include <asm/gart.h> | 30 | #include <asm/gart.h> |
31 | #include <asm/amd_iommu_proto.h> | ||
31 | #include <asm/amd_iommu_types.h> | 32 | #include <asm/amd_iommu_types.h> |
32 | #include <asm/amd_iommu.h> | 33 | #include <asm/amd_iommu.h> |
33 | 34 | ||
@@ -56,20 +57,115 @@ struct iommu_cmd { | |||
56 | u32 data[4]; | 57 | u32 data[4]; |
57 | }; | 58 | }; |
58 | 59 | ||
59 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
60 | struct unity_map_entry *e); | ||
61 | static struct dma_ops_domain *find_protection_domain(u16 devid); | ||
62 | static u64 *alloc_pte(struct protection_domain *domain, | ||
63 | unsigned long address, int end_lvl, | ||
64 | u64 **pte_page, gfp_t gfp); | ||
65 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | ||
66 | unsigned long start_page, | ||
67 | unsigned int pages); | ||
68 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | 60 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); |
69 | static u64 *fetch_pte(struct protection_domain *domain, | ||
70 | unsigned long address, int map_size); | ||
71 | static void update_domain(struct protection_domain *domain); | 61 | static void update_domain(struct protection_domain *domain); |
72 | 62 | ||
63 | /**************************************************************************** | ||
64 | * | ||
65 | * Helper functions | ||
66 | * | ||
67 | ****************************************************************************/ | ||
68 | |||
69 | static inline u16 get_device_id(struct device *dev) | ||
70 | { | ||
71 | struct pci_dev *pdev = to_pci_dev(dev); | ||
72 | |||
73 | return calc_devid(pdev->bus->number, pdev->devfn); | ||
74 | } | ||
75 | |||
76 | static struct iommu_dev_data *get_dev_data(struct device *dev) | ||
77 | { | ||
78 | return dev->archdata.iommu; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * In this function the list of preallocated protection domains is traversed to | ||
83 | * find the domain for a specific device | ||
84 | */ | ||
85 | static struct dma_ops_domain *find_protection_domain(u16 devid) | ||
86 | { | ||
87 | struct dma_ops_domain *entry, *ret = NULL; | ||
88 | unsigned long flags; | ||
89 | u16 alias = amd_iommu_alias_table[devid]; | ||
90 | |||
91 | if (list_empty(&iommu_pd_list)) | ||
92 | return NULL; | ||
93 | |||
94 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | ||
95 | |||
96 | list_for_each_entry(entry, &iommu_pd_list, list) { | ||
97 | if (entry->target_dev == devid || | ||
98 | entry->target_dev == alias) { | ||
99 | ret = entry; | ||
100 | break; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * This function checks if the driver got a valid device from the caller to | ||
111 | * avoid dereferencing invalid pointers. | ||
112 | */ | ||
113 | static bool check_device(struct device *dev) | ||
114 | { | ||
115 | u16 devid; | ||
116 | |||
117 | if (!dev || !dev->dma_mask) | ||
118 | return false; | ||
119 | |||
120 | /* No device or no PCI device */ | ||
121 | if (!dev || dev->bus != &pci_bus_type) | ||
122 | return false; | ||
123 | |||
124 | devid = get_device_id(dev); | ||
125 | |||
126 | /* Out of our scope? */ | ||
127 | if (devid > amd_iommu_last_bdf) | ||
128 | return false; | ||
129 | |||
130 | if (amd_iommu_rlookup_table[devid] == NULL) | ||
131 | return false; | ||
132 | |||
133 | return true; | ||
134 | } | ||
135 | |||
136 | static int iommu_init_device(struct device *dev) | ||
137 | { | ||
138 | struct iommu_dev_data *dev_data; | ||
139 | struct pci_dev *pdev; | ||
140 | u16 devid, alias; | ||
141 | |||
142 | if (dev->archdata.iommu) | ||
143 | return 0; | ||
144 | |||
145 | dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); | ||
146 | if (!dev_data) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | dev_data->dev = dev; | ||
150 | |||
151 | devid = get_device_id(dev); | ||
152 | alias = amd_iommu_alias_table[devid]; | ||
153 | pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); | ||
154 | if (pdev) | ||
155 | dev_data->alias = &pdev->dev; | ||
156 | |||
157 | atomic_set(&dev_data->bind, 0); | ||
158 | |||
159 | dev->archdata.iommu = dev_data; | ||
160 | |||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static void iommu_uninit_device(struct device *dev) | ||
166 | { | ||
167 | kfree(dev->archdata.iommu); | ||
168 | } | ||
73 | #ifdef CONFIG_AMD_IOMMU_STATS | 169 | #ifdef CONFIG_AMD_IOMMU_STATS |
74 | 170 | ||
75 | /* | 171 | /* |
@@ -90,7 +186,6 @@ DECLARE_STATS_COUNTER(alloced_io_mem); | |||
90 | DECLARE_STATS_COUNTER(total_map_requests); | 186 | DECLARE_STATS_COUNTER(total_map_requests); |
91 | 187 | ||
92 | static struct dentry *stats_dir; | 188 | static struct dentry *stats_dir; |
93 | static struct dentry *de_isolate; | ||
94 | static struct dentry *de_fflush; | 189 | static struct dentry *de_fflush; |
95 | 190 | ||
96 | static void amd_iommu_stats_add(struct __iommu_counter *cnt) | 191 | static void amd_iommu_stats_add(struct __iommu_counter *cnt) |
@@ -108,9 +203,6 @@ static void amd_iommu_stats_init(void) | |||
108 | if (stats_dir == NULL) | 203 | if (stats_dir == NULL) |
109 | return; | 204 | return; |
110 | 205 | ||
111 | de_isolate = debugfs_create_bool("isolation", 0444, stats_dir, | ||
112 | (u32 *)&amd_iommu_isolate); | ||
113 | |||
114 | de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, | 206 | de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, |
115 | (u32 *)&amd_iommu_unmap_flush); | 207 | (u32 *)&amd_iommu_unmap_flush); |
116 | 208 | ||
@@ -130,12 +222,6 @@ static void amd_iommu_stats_init(void) | |||
130 | 222 | ||
131 | #endif | 223 | #endif |
132 | 224 | ||
133 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ | ||
134 | static int iommu_has_npcache(struct amd_iommu *iommu) | ||
135 | { | ||
136 | return iommu->cap & (1UL << IOMMU_CAP_NPCACHE); | ||
137 | } | ||
138 | |||
139 | /**************************************************************************** | 225 | /**************************************************************************** |
140 | * | 226 | * |
141 | * Interrupt handling functions | 227 | * Interrupt handling functions |
@@ -199,6 +285,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | |||
199 | break; | 285 | break; |
200 | case EVENT_TYPE_ILL_CMD: | 286 | case EVENT_TYPE_ILL_CMD: |
201 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 287 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
288 | iommu->reset_in_progress = true; | ||
202 | reset_iommu_command_buffer(iommu); | 289 | reset_iommu_command_buffer(iommu); |
203 | dump_command(address); | 290 | dump_command(address); |
204 | break; | 291 | break; |
@@ -321,11 +408,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) | |||
321 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 408 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
322 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 409 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
323 | 410 | ||
324 | if (unlikely(i == EXIT_LOOP_COUNT)) { | 411 | if (unlikely(i == EXIT_LOOP_COUNT)) |
325 | spin_unlock(&iommu->lock); | 412 | iommu->reset_in_progress = true; |
326 | reset_iommu_command_buffer(iommu); | ||
327 | spin_lock(&iommu->lock); | ||
328 | } | ||
329 | } | 413 | } |
330 | 414 | ||
331 | /* | 415 | /* |
@@ -372,26 +456,46 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
372 | out: | 456 | out: |
373 | spin_unlock_irqrestore(&iommu->lock, flags); | 457 | spin_unlock_irqrestore(&iommu->lock, flags); |
374 | 458 | ||
459 | if (iommu->reset_in_progress) | ||
460 | reset_iommu_command_buffer(iommu); | ||
461 | |||
375 | return 0; | 462 | return 0; |
376 | } | 463 | } |
377 | 464 | ||
465 | static void iommu_flush_complete(struct protection_domain *domain) | ||
466 | { | ||
467 | int i; | ||
468 | |||
469 | for (i = 0; i < amd_iommus_present; ++i) { | ||
470 | if (!domain->dev_iommu[i]) | ||
471 | continue; | ||
472 | |||
473 | /* | ||
474 | * Devices of this domain are behind this IOMMU | ||
475 | * We need to wait for completion of all commands. | ||
476 | */ | ||
477 | iommu_completion_wait(amd_iommus[i]); | ||
478 | } | ||
479 | } | ||
480 | |||
378 | /* | 481 | /* |
379 | * Command send function for invalidating a device table entry | 482 | * Command send function for invalidating a device table entry |
380 | */ | 483 | */ |
381 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | 484 | static int iommu_flush_device(struct device *dev) |
382 | { | 485 | { |
486 | struct amd_iommu *iommu; | ||
383 | struct iommu_cmd cmd; | 487 | struct iommu_cmd cmd; |
384 | int ret; | 488 | u16 devid; |
385 | 489 | ||
386 | BUG_ON(iommu == NULL); | 490 | devid = get_device_id(dev); |
491 | iommu = amd_iommu_rlookup_table[devid]; | ||
387 | 492 | ||
493 | /* Build command */ | ||
388 | memset(&cmd, 0, sizeof(cmd)); | 494 | memset(&cmd, 0, sizeof(cmd)); |
389 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | 495 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); |
390 | cmd.data[0] = devid; | 496 | cmd.data[0] = devid; |
391 | 497 | ||
392 | ret = iommu_queue_command(iommu, &cmd); | 498 | return iommu_queue_command(iommu, &cmd); |
393 | |||
394 | return ret; | ||
395 | } | 499 | } |
396 | 500 | ||
397 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | 501 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, |
@@ -430,11 +534,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
430 | * It invalidates a single PTE if the range to flush is within a single | 534 | * It invalidates a single PTE if the range to flush is within a single |
431 | * page. Otherwise it flushes the whole TLB of the IOMMU. | 535 | * page. Otherwise it flushes the whole TLB of the IOMMU. |
432 | */ | 536 | */ |
433 | static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | 537 | static void __iommu_flush_pages(struct protection_domain *domain, |
434 | u64 address, size_t size) | 538 | u64 address, size_t size, int pde) |
435 | { | 539 | { |
436 | int s = 0; | 540 | int s = 0, i; |
437 | unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); | 541 | unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); |
438 | 542 | ||
439 | address &= PAGE_MASK; | 543 | address &= PAGE_MASK; |
440 | 544 | ||
@@ -447,142 +551,212 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
447 | s = 1; | 551 | s = 1; |
448 | } | 552 | } |
449 | 553 | ||
450 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s); | ||
451 | 554 | ||
452 | return 0; | 555 | for (i = 0; i < amd_iommus_present; ++i) { |
556 | if (!domain->dev_iommu[i]) | ||
557 | continue; | ||
558 | |||
559 | /* | ||
560 | * Devices of this domain are behind this IOMMU | ||
561 | * We need a TLB flush | ||
562 | */ | ||
563 | iommu_queue_inv_iommu_pages(amd_iommus[i], address, | ||
564 | domain->id, pde, s); | ||
565 | } | ||
566 | |||
567 | return; | ||
453 | } | 568 | } |
454 | 569 | ||
455 | /* Flush the whole IO/TLB for a given protection domain */ | 570 | static void iommu_flush_pages(struct protection_domain *domain, |
456 | static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) | 571 | u64 address, size_t size) |
457 | { | 572 | { |
458 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 573 | __iommu_flush_pages(domain, address, size, 0); |
459 | 574 | } | |
460 | INC_STATS_COUNTER(domain_flush_single); | ||
461 | 575 | ||
462 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); | 576 | /* Flush the whole IO/TLB for a given protection domain */ |
577 | static void iommu_flush_tlb(struct protection_domain *domain) | ||
578 | { | ||
579 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); | ||
463 | } | 580 | } |
464 | 581 | ||
465 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ | 582 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ |
466 | static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | 583 | static void iommu_flush_tlb_pde(struct protection_domain *domain) |
467 | { | 584 | { |
468 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 585 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); |
469 | |||
470 | INC_STATS_COUNTER(domain_flush_single); | ||
471 | |||
472 | iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1); | ||
473 | } | 586 | } |
474 | 587 | ||
588 | |||
475 | /* | 589 | /* |
476 | * This function flushes one domain on one IOMMU | 590 | * This function flushes the DTEs for all devices in domain |
477 | */ | 591 | */ |
478 | static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) | 592 | static void iommu_flush_domain_devices(struct protection_domain *domain) |
479 | { | 593 | { |
480 | struct iommu_cmd cmd; | 594 | struct iommu_dev_data *dev_data; |
481 | unsigned long flags; | 595 | unsigned long flags; |
482 | 596 | ||
483 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | 597 | spin_lock_irqsave(&domain->lock, flags); |
484 | domid, 1, 1); | ||
485 | 598 | ||
486 | spin_lock_irqsave(&iommu->lock, flags); | 599 | list_for_each_entry(dev_data, &domain->dev_list, list) |
487 | __iommu_queue_command(iommu, &cmd); | 600 | iommu_flush_device(dev_data->dev); |
488 | __iommu_completion_wait(iommu); | 601 | |
489 | __iommu_wait_for_completion(iommu); | 602 | spin_unlock_irqrestore(&domain->lock, flags); |
490 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
491 | } | 603 | } |
492 | 604 | ||
493 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | 605 | static void iommu_flush_all_domain_devices(void) |
494 | { | 606 | { |
495 | int i; | 607 | struct protection_domain *domain; |
608 | unsigned long flags; | ||
496 | 609 | ||
497 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | 610 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); |
498 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | 611 | |
499 | continue; | 612 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { |
500 | flush_domain_on_iommu(iommu, i); | 613 | iommu_flush_domain_devices(domain); |
614 | iommu_flush_complete(domain); | ||
501 | } | 615 | } |
502 | 616 | ||
617 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
618 | } | ||
619 | |||
620 | void amd_iommu_flush_all_devices(void) | ||
621 | { | ||
622 | iommu_flush_all_domain_devices(); | ||
503 | } | 623 | } |
504 | 624 | ||
505 | /* | 625 | /* |
506 | * This function is used to flush the IO/TLB for a given protection domain | 626 | * This function uses heavy locking and may disable irqs for some time. But |
507 | * on every IOMMU in the system | 627 | * this is no issue because it is only called during resume. |
508 | */ | 628 | */ |
509 | static void iommu_flush_domain(u16 domid) | 629 | void amd_iommu_flush_all_domains(void) |
510 | { | 630 | { |
511 | struct amd_iommu *iommu; | 631 | struct protection_domain *domain; |
632 | unsigned long flags; | ||
512 | 633 | ||
513 | INC_STATS_COUNTER(domain_flush_all); | 634 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); |
514 | 635 | ||
515 | for_each_iommu(iommu) | 636 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { |
516 | flush_domain_on_iommu(iommu, domid); | 637 | spin_lock(&domain->lock); |
638 | iommu_flush_tlb_pde(domain); | ||
639 | iommu_flush_complete(domain); | ||
640 | spin_unlock(&domain->lock); | ||
641 | } | ||
642 | |||
643 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
517 | } | 644 | } |
518 | 645 | ||
519 | void amd_iommu_flush_all_domains(void) | 646 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) |
520 | { | 647 | { |
521 | struct amd_iommu *iommu; | 648 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); |
522 | 649 | ||
523 | for_each_iommu(iommu) | 650 | if (iommu->reset_in_progress) |
524 | flush_all_domains_on_iommu(iommu); | 651 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); |
652 | |||
653 | amd_iommu_reset_cmd_buffer(iommu); | ||
654 | amd_iommu_flush_all_devices(); | ||
655 | amd_iommu_flush_all_domains(); | ||
656 | |||
657 | iommu->reset_in_progress = false; | ||
525 | } | 658 | } |
526 | 659 | ||
527 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | 660 | /**************************************************************************** |
661 | * | ||
662 | * The functions below are used the create the page table mappings for | ||
663 | * unity mapped regions. | ||
664 | * | ||
665 | ****************************************************************************/ | ||
666 | |||
667 | /* | ||
668 | * This function is used to add another level to an IO page table. Adding | ||
669 | * another level increases the size of the address space by 9 bits to a size up | ||
670 | * to 64 bits. | ||
671 | */ | ||
672 | static bool increase_address_space(struct protection_domain *domain, | ||
673 | gfp_t gfp) | ||
528 | { | 674 | { |
529 | int i; | 675 | u64 *pte; |
530 | 676 | ||
531 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 677 | if (domain->mode == PAGE_MODE_6_LEVEL) |
532 | if (iommu != amd_iommu_rlookup_table[i]) | 678 | /* address space already 64 bit large */ |
533 | continue; | 679 | return false; |
534 | 680 | ||
535 | iommu_queue_inv_dev_entry(iommu, i); | 681 | pte = (void *)get_zeroed_page(gfp); |
536 | iommu_completion_wait(iommu); | 682 | if (!pte) |
537 | } | 683 | return false; |
684 | |||
685 | *pte = PM_LEVEL_PDE(domain->mode, | ||
686 | virt_to_phys(domain->pt_root)); | ||
687 | domain->pt_root = pte; | ||
688 | domain->mode += 1; | ||
689 | domain->updated = true; | ||
690 | |||
691 | return true; | ||
538 | } | 692 | } |
539 | 693 | ||
540 | static void flush_devices_by_domain(struct protection_domain *domain) | 694 | static u64 *alloc_pte(struct protection_domain *domain, |
695 | unsigned long address, | ||
696 | int end_lvl, | ||
697 | u64 **pte_page, | ||
698 | gfp_t gfp) | ||
541 | { | 699 | { |
542 | struct amd_iommu *iommu; | 700 | u64 *pte, *page; |
543 | int i; | 701 | int level; |
544 | 702 | ||
545 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 703 | while (address > PM_LEVEL_SIZE(domain->mode)) |
546 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || | 704 | increase_address_space(domain, gfp); |
547 | (amd_iommu_pd_table[i] != domain)) | ||
548 | continue; | ||
549 | 705 | ||
550 | iommu = amd_iommu_rlookup_table[i]; | 706 | level = domain->mode - 1; |
551 | if (!iommu) | 707 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
552 | continue; | ||
553 | 708 | ||
554 | iommu_queue_inv_dev_entry(iommu, i); | 709 | while (level > end_lvl) { |
555 | iommu_completion_wait(iommu); | 710 | if (!IOMMU_PTE_PRESENT(*pte)) { |
711 | page = (u64 *)get_zeroed_page(gfp); | ||
712 | if (!page) | ||
713 | return NULL; | ||
714 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
715 | } | ||
716 | |||
717 | level -= 1; | ||
718 | |||
719 | pte = IOMMU_PTE_PAGE(*pte); | ||
720 | |||
721 | if (pte_page && level == end_lvl) | ||
722 | *pte_page = pte; | ||
723 | |||
724 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | ||
556 | } | 725 | } |
726 | |||
727 | return pte; | ||
557 | } | 728 | } |
558 | 729 | ||
559 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | 730 | /* |
731 | * This function checks if there is a PTE for a given dma address. If | ||
732 | * there is one, it returns the pointer to it. | ||
733 | */ | ||
734 | static u64 *fetch_pte(struct protection_domain *domain, | ||
735 | unsigned long address, int map_size) | ||
560 | { | 736 | { |
561 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | 737 | int level; |
738 | u64 *pte; | ||
562 | 739 | ||
563 | if (iommu->reset_in_progress) | 740 | level = domain->mode - 1; |
564 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | 741 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
565 | 742 | ||
566 | iommu->reset_in_progress = true; | 743 | while (level > map_size) { |
744 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
745 | return NULL; | ||
567 | 746 | ||
568 | amd_iommu_reset_cmd_buffer(iommu); | 747 | level -= 1; |
569 | flush_all_devices_for_iommu(iommu); | ||
570 | flush_all_domains_on_iommu(iommu); | ||
571 | 748 | ||
572 | iommu->reset_in_progress = false; | 749 | pte = IOMMU_PTE_PAGE(*pte); |
573 | } | 750 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
574 | 751 | ||
575 | void amd_iommu_flush_all_devices(void) | 752 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { |
576 | { | 753 | pte = NULL; |
577 | flush_devices_by_domain(NULL); | 754 | break; |
578 | } | 755 | } |
756 | } | ||
579 | 757 | ||
580 | /**************************************************************************** | 758 | return pte; |
581 | * | 759 | } |
582 | * The functions below are used the create the page table mappings for | ||
583 | * unity mapped regions. | ||
584 | * | ||
585 | ****************************************************************************/ | ||
586 | 760 | ||
587 | /* | 761 | /* |
588 | * Generic mapping functions. It maps a physical address into a DMA | 762 | * Generic mapping functions. It maps a physical address into a DMA |
@@ -654,28 +828,6 @@ static int iommu_for_unity_map(struct amd_iommu *iommu, | |||
654 | } | 828 | } |
655 | 829 | ||
656 | /* | 830 | /* |
657 | * Init the unity mappings for a specific IOMMU in the system | ||
658 | * | ||
659 | * Basically iterates over all unity mapping entries and applies them to | ||
660 | * the default domain DMA of that IOMMU if necessary. | ||
661 | */ | ||
662 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | ||
663 | { | ||
664 | struct unity_map_entry *entry; | ||
665 | int ret; | ||
666 | |||
667 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | ||
668 | if (!iommu_for_unity_map(iommu, entry)) | ||
669 | continue; | ||
670 | ret = dma_ops_unity_map(iommu->default_dom, entry); | ||
671 | if (ret) | ||
672 | return ret; | ||
673 | } | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * This function actually applies the mapping to the page table of the | 831 | * This function actually applies the mapping to the page table of the |
680 | * dma_ops domain. | 832 | * dma_ops domain. |
681 | */ | 833 | */ |
@@ -704,6 +856,28 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
704 | } | 856 | } |
705 | 857 | ||
706 | /* | 858 | /* |
859 | * Init the unity mappings for a specific IOMMU in the system | ||
860 | * | ||
861 | * Basically iterates over all unity mapping entries and applies them to | ||
862 | * the default domain DMA of that IOMMU if necessary. | ||
863 | */ | ||
864 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | ||
865 | { | ||
866 | struct unity_map_entry *entry; | ||
867 | int ret; | ||
868 | |||
869 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | ||
870 | if (!iommu_for_unity_map(iommu, entry)) | ||
871 | continue; | ||
872 | ret = dma_ops_unity_map(iommu->default_dom, entry); | ||
873 | if (ret) | ||
874 | return ret; | ||
875 | } | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | /* | ||
707 | * Inits the unity mappings required for a specific device | 881 | * Inits the unity mappings required for a specific device |
708 | */ | 882 | */ |
709 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | 883 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, |
@@ -740,34 +914,23 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
740 | */ | 914 | */ |
741 | 915 | ||
742 | /* | 916 | /* |
743 | * This function checks if there is a PTE for a given dma address. If | 917 | * Used to reserve address ranges in the aperture (e.g. for exclusion |
744 | * there is one, it returns the pointer to it. | 918 | * ranges. |
745 | */ | 919 | */ |
746 | static u64 *fetch_pte(struct protection_domain *domain, | 920 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
747 | unsigned long address, int map_size) | 921 | unsigned long start_page, |
922 | unsigned int pages) | ||
748 | { | 923 | { |
749 | int level; | 924 | unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; |
750 | u64 *pte; | ||
751 | |||
752 | level = domain->mode - 1; | ||
753 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
754 | |||
755 | while (level > map_size) { | ||
756 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
757 | return NULL; | ||
758 | |||
759 | level -= 1; | ||
760 | 925 | ||
761 | pte = IOMMU_PTE_PAGE(*pte); | 926 | if (start_page + pages > last_page) |
762 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 927 | pages = last_page - start_page; |
763 | 928 | ||
764 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { | 929 | for (i = start_page; i < start_page + pages; ++i) { |
765 | pte = NULL; | 930 | int index = i / APERTURE_RANGE_PAGES; |
766 | break; | 931 | int page = i % APERTURE_RANGE_PAGES; |
767 | } | 932 | __set_bit(page, dom->aperture[index]->bitmap); |
768 | } | 933 | } |
769 | |||
770 | return pte; | ||
771 | } | 934 | } |
772 | 935 | ||
773 | /* | 936 | /* |
@@ -775,11 +938,11 @@ static u64 *fetch_pte(struct protection_domain *domain, | |||
775 | * aperture in case of dma_ops domain allocation or address allocation | 938 | * aperture in case of dma_ops domain allocation or address allocation |
776 | * failure. | 939 | * failure. |
777 | */ | 940 | */ |
778 | static int alloc_new_range(struct amd_iommu *iommu, | 941 | static int alloc_new_range(struct dma_ops_domain *dma_dom, |
779 | struct dma_ops_domain *dma_dom, | ||
780 | bool populate, gfp_t gfp) | 942 | bool populate, gfp_t gfp) |
781 | { | 943 | { |
782 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; | 944 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; |
945 | struct amd_iommu *iommu; | ||
783 | int i; | 946 | int i; |
784 | 947 | ||
785 | #ifdef CONFIG_IOMMU_STRESS | 948 | #ifdef CONFIG_IOMMU_STRESS |
@@ -819,14 +982,17 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
819 | dma_dom->aperture_size += APERTURE_RANGE_SIZE; | 982 | dma_dom->aperture_size += APERTURE_RANGE_SIZE; |
820 | 983 | ||
821 | /* Intialize the exclusion range if necessary */ | 984 | /* Intialize the exclusion range if necessary */ |
822 | if (iommu->exclusion_start && | 985 | for_each_iommu(iommu) { |
823 | iommu->exclusion_start >= dma_dom->aperture[index]->offset && | 986 | if (iommu->exclusion_start && |
824 | iommu->exclusion_start < dma_dom->aperture_size) { | 987 | iommu->exclusion_start >= dma_dom->aperture[index]->offset |
825 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | 988 | && iommu->exclusion_start < dma_dom->aperture_size) { |
826 | int pages = iommu_num_pages(iommu->exclusion_start, | 989 | unsigned long startpage; |
827 | iommu->exclusion_length, | 990 | int pages = iommu_num_pages(iommu->exclusion_start, |
828 | PAGE_SIZE); | 991 | iommu->exclusion_length, |
829 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 992 | PAGE_SIZE); |
993 | startpage = iommu->exclusion_start >> PAGE_SHIFT; | ||
994 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | ||
995 | } | ||
830 | } | 996 | } |
831 | 997 | ||
832 | /* | 998 | /* |
@@ -928,7 +1094,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
928 | } | 1094 | } |
929 | 1095 | ||
930 | if (unlikely(address == -1)) | 1096 | if (unlikely(address == -1)) |
931 | address = bad_dma_address; | 1097 | address = DMA_ERROR_CODE; |
932 | 1098 | ||
933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | 1099 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); |
934 | 1100 | ||
@@ -973,6 +1139,31 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, | |||
973 | * | 1139 | * |
974 | ****************************************************************************/ | 1140 | ****************************************************************************/ |
975 | 1141 | ||
1142 | /* | ||
1143 | * This function adds a protection domain to the global protection domain list | ||
1144 | */ | ||
1145 | static void add_domain_to_list(struct protection_domain *domain) | ||
1146 | { | ||
1147 | unsigned long flags; | ||
1148 | |||
1149 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | ||
1150 | list_add(&domain->list, &amd_iommu_pd_list); | ||
1151 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * This function removes a protection domain to the global | ||
1156 | * protection domain list | ||
1157 | */ | ||
1158 | static void del_domain_from_list(struct protection_domain *domain) | ||
1159 | { | ||
1160 | unsigned long flags; | ||
1161 | |||
1162 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | ||
1163 | list_del(&domain->list); | ||
1164 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
1165 | } | ||
1166 | |||
976 | static u16 domain_id_alloc(void) | 1167 | static u16 domain_id_alloc(void) |
977 | { | 1168 | { |
978 | unsigned long flags; | 1169 | unsigned long flags; |
@@ -1000,26 +1191,6 @@ static void domain_id_free(int id) | |||
1000 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1191 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1001 | } | 1192 | } |
1002 | 1193 | ||
1003 | /* | ||
1004 | * Used to reserve address ranges in the aperture (e.g. for exclusion | ||
1005 | * ranges. | ||
1006 | */ | ||
1007 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | ||
1008 | unsigned long start_page, | ||
1009 | unsigned int pages) | ||
1010 | { | ||
1011 | unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; | ||
1012 | |||
1013 | if (start_page + pages > last_page) | ||
1014 | pages = last_page - start_page; | ||
1015 | |||
1016 | for (i = start_page; i < start_page + pages; ++i) { | ||
1017 | int index = i / APERTURE_RANGE_PAGES; | ||
1018 | int page = i % APERTURE_RANGE_PAGES; | ||
1019 | __set_bit(page, dom->aperture[index]->bitmap); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | static void free_pagetable(struct protection_domain *domain) | 1194 | static void free_pagetable(struct protection_domain *domain) |
1024 | { | 1195 | { |
1025 | int i, j; | 1196 | int i, j; |
@@ -1061,6 +1232,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) | |||
1061 | if (!dom) | 1232 | if (!dom) |
1062 | return; | 1233 | return; |
1063 | 1234 | ||
1235 | del_domain_from_list(&dom->domain); | ||
1236 | |||
1064 | free_pagetable(&dom->domain); | 1237 | free_pagetable(&dom->domain); |
1065 | 1238 | ||
1066 | for (i = 0; i < APERTURE_MAX_RANGES; ++i) { | 1239 | for (i = 0; i < APERTURE_MAX_RANGES; ++i) { |
@@ -1078,7 +1251,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) | |||
1078 | * It also intializes the page table and the address allocator data | 1251 | * It also intializes the page table and the address allocator data |
1079 | * structures required for the dma_ops interface | 1252 | * structures required for the dma_ops interface |
1080 | */ | 1253 | */ |
1081 | static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | 1254 | static struct dma_ops_domain *dma_ops_domain_alloc(void) |
1082 | { | 1255 | { |
1083 | struct dma_ops_domain *dma_dom; | 1256 | struct dma_ops_domain *dma_dom; |
1084 | 1257 | ||
@@ -1091,6 +1264,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1091 | dma_dom->domain.id = domain_id_alloc(); | 1264 | dma_dom->domain.id = domain_id_alloc(); |
1092 | if (dma_dom->domain.id == 0) | 1265 | if (dma_dom->domain.id == 0) |
1093 | goto free_dma_dom; | 1266 | goto free_dma_dom; |
1267 | INIT_LIST_HEAD(&dma_dom->domain.dev_list); | ||
1094 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; | 1268 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1095 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1269 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1096 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1270 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
@@ -1101,7 +1275,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1101 | dma_dom->need_flush = false; | 1275 | dma_dom->need_flush = false; |
1102 | dma_dom->target_dev = 0xffff; | 1276 | dma_dom->target_dev = 0xffff; |
1103 | 1277 | ||
1104 | if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) | 1278 | add_domain_to_list(&dma_dom->domain); |
1279 | |||
1280 | if (alloc_new_range(dma_dom, true, GFP_KERNEL)) | ||
1105 | goto free_dma_dom; | 1281 | goto free_dma_dom; |
1106 | 1282 | ||
1107 | /* | 1283 | /* |
@@ -1129,22 +1305,6 @@ static bool dma_ops_domain(struct protection_domain *domain) | |||
1129 | return domain->flags & PD_DMA_OPS_MASK; | 1305 | return domain->flags & PD_DMA_OPS_MASK; |
1130 | } | 1306 | } |
1131 | 1307 | ||
1132 | /* | ||
1133 | * Find out the protection domain structure for a given PCI device. This | ||
1134 | * will give us the pointer to the page table root for example. | ||
1135 | */ | ||
1136 | static struct protection_domain *domain_for_device(u16 devid) | ||
1137 | { | ||
1138 | struct protection_domain *dom; | ||
1139 | unsigned long flags; | ||
1140 | |||
1141 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1142 | dom = amd_iommu_pd_table[devid]; | ||
1143 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1144 | |||
1145 | return dom; | ||
1146 | } | ||
1147 | |||
1148 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | 1308 | static void set_dte_entry(u16 devid, struct protection_domain *domain) |
1149 | { | 1309 | { |
1150 | u64 pte_root = virt_to_phys(domain->pt_root); | 1310 | u64 pte_root = virt_to_phys(domain->pt_root); |
@@ -1156,42 +1316,123 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain) | |||
1156 | amd_iommu_dev_table[devid].data[2] = domain->id; | 1316 | amd_iommu_dev_table[devid].data[2] = domain->id; |
1157 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | 1317 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); |
1158 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1318 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); |
1319 | } | ||
1320 | |||
1321 | static void clear_dte_entry(u16 devid) | ||
1322 | { | ||
1323 | /* remove entry from the device table seen by the hardware */ | ||
1324 | amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1325 | amd_iommu_dev_table[devid].data[1] = 0; | ||
1326 | amd_iommu_dev_table[devid].data[2] = 0; | ||
1159 | 1327 | ||
1160 | amd_iommu_pd_table[devid] = domain; | 1328 | amd_iommu_apply_erratum_63(devid); |
1329 | } | ||
1330 | |||
1331 | static void do_attach(struct device *dev, struct protection_domain *domain) | ||
1332 | { | ||
1333 | struct iommu_dev_data *dev_data; | ||
1334 | struct amd_iommu *iommu; | ||
1335 | u16 devid; | ||
1336 | |||
1337 | devid = get_device_id(dev); | ||
1338 | iommu = amd_iommu_rlookup_table[devid]; | ||
1339 | dev_data = get_dev_data(dev); | ||
1340 | |||
1341 | /* Update data structures */ | ||
1342 | dev_data->domain = domain; | ||
1343 | list_add(&dev_data->list, &domain->dev_list); | ||
1344 | set_dte_entry(devid, domain); | ||
1345 | |||
1346 | /* Do reference counting */ | ||
1347 | domain->dev_iommu[iommu->index] += 1; | ||
1348 | domain->dev_cnt += 1; | ||
1349 | |||
1350 | /* Flush the DTE entry */ | ||
1351 | iommu_flush_device(dev); | ||
1352 | } | ||
1353 | |||
1354 | static void do_detach(struct device *dev) | ||
1355 | { | ||
1356 | struct iommu_dev_data *dev_data; | ||
1357 | struct amd_iommu *iommu; | ||
1358 | u16 devid; | ||
1359 | |||
1360 | devid = get_device_id(dev); | ||
1361 | iommu = amd_iommu_rlookup_table[devid]; | ||
1362 | dev_data = get_dev_data(dev); | ||
1363 | |||
1364 | /* decrease reference counters */ | ||
1365 | dev_data->domain->dev_iommu[iommu->index] -= 1; | ||
1366 | dev_data->domain->dev_cnt -= 1; | ||
1367 | |||
1368 | /* Update data structures */ | ||
1369 | dev_data->domain = NULL; | ||
1370 | list_del(&dev_data->list); | ||
1371 | clear_dte_entry(devid); | ||
1372 | |||
1373 | /* Flush the DTE entry */ | ||
1374 | iommu_flush_device(dev); | ||
1161 | } | 1375 | } |
1162 | 1376 | ||
1163 | /* | 1377 | /* |
1164 | * If a device is not yet associated with a domain, this function does | 1378 | * If a device is not yet associated with a domain, this function does |
1165 | * assigns it visible for the hardware | 1379 | * assigns it visible for the hardware |
1166 | */ | 1380 | */ |
1167 | static void __attach_device(struct amd_iommu *iommu, | 1381 | static int __attach_device(struct device *dev, |
1168 | struct protection_domain *domain, | 1382 | struct protection_domain *domain) |
1169 | u16 devid) | ||
1170 | { | 1383 | { |
1384 | struct iommu_dev_data *dev_data, *alias_data; | ||
1385 | |||
1386 | dev_data = get_dev_data(dev); | ||
1387 | alias_data = get_dev_data(dev_data->alias); | ||
1388 | |||
1389 | if (!alias_data) | ||
1390 | return -EINVAL; | ||
1391 | |||
1171 | /* lock domain */ | 1392 | /* lock domain */ |
1172 | spin_lock(&domain->lock); | 1393 | spin_lock(&domain->lock); |
1173 | 1394 | ||
1174 | /* update DTE entry */ | 1395 | /* Some sanity checks */ |
1175 | set_dte_entry(devid, domain); | 1396 | if (alias_data->domain != NULL && |
1397 | alias_data->domain != domain) | ||
1398 | return -EBUSY; | ||
1176 | 1399 | ||
1177 | domain->dev_cnt += 1; | 1400 | if (dev_data->domain != NULL && |
1401 | dev_data->domain != domain) | ||
1402 | return -EBUSY; | ||
1403 | |||
1404 | /* Do real assignment */ | ||
1405 | if (dev_data->alias != dev) { | ||
1406 | alias_data = get_dev_data(dev_data->alias); | ||
1407 | if (alias_data->domain == NULL) | ||
1408 | do_attach(dev_data->alias, domain); | ||
1409 | |||
1410 | atomic_inc(&alias_data->bind); | ||
1411 | } | ||
1412 | |||
1413 | if (dev_data->domain == NULL) | ||
1414 | do_attach(dev, domain); | ||
1415 | |||
1416 | atomic_inc(&dev_data->bind); | ||
1178 | 1417 | ||
1179 | /* ready */ | 1418 | /* ready */ |
1180 | spin_unlock(&domain->lock); | 1419 | spin_unlock(&domain->lock); |
1420 | |||
1421 | return 0; | ||
1181 | } | 1422 | } |
1182 | 1423 | ||
1183 | /* | 1424 | /* |
1184 | * If a device is not yet associated with a domain, this function does | 1425 | * If a device is not yet associated with a domain, this function does |
1185 | * assigns it visible for the hardware | 1426 | * assigns it visible for the hardware |
1186 | */ | 1427 | */ |
1187 | static void attach_device(struct amd_iommu *iommu, | 1428 | static int attach_device(struct device *dev, |
1188 | struct protection_domain *domain, | 1429 | struct protection_domain *domain) |
1189 | u16 devid) | ||
1190 | { | 1430 | { |
1191 | unsigned long flags; | 1431 | unsigned long flags; |
1432 | int ret; | ||
1192 | 1433 | ||
1193 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1434 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1194 | __attach_device(iommu, domain, devid); | 1435 | ret = __attach_device(dev, domain); |
1195 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1436 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1196 | 1437 | ||
1197 | /* | 1438 | /* |
@@ -1199,98 +1440,125 @@ static void attach_device(struct amd_iommu *iommu, | |||
1199 | * left the caches in the IOMMU dirty. So we have to flush | 1440 | * left the caches in the IOMMU dirty. So we have to flush |
1200 | * here to evict all dirty stuff. | 1441 | * here to evict all dirty stuff. |
1201 | */ | 1442 | */ |
1202 | iommu_queue_inv_dev_entry(iommu, devid); | 1443 | iommu_flush_tlb_pde(domain); |
1203 | iommu_flush_tlb_pde(iommu, domain->id); | 1444 | |
1445 | return ret; | ||
1204 | } | 1446 | } |
1205 | 1447 | ||
1206 | /* | 1448 | /* |
1207 | * Removes a device from a protection domain (unlocked) | 1449 | * Removes a device from a protection domain (unlocked) |
1208 | */ | 1450 | */ |
1209 | static void __detach_device(struct protection_domain *domain, u16 devid) | 1451 | static void __detach_device(struct device *dev) |
1210 | { | 1452 | { |
1453 | struct iommu_dev_data *dev_data = get_dev_data(dev); | ||
1454 | struct iommu_dev_data *alias_data; | ||
1455 | unsigned long flags; | ||
1211 | 1456 | ||
1212 | /* lock domain */ | 1457 | BUG_ON(!dev_data->domain); |
1213 | spin_lock(&domain->lock); | ||
1214 | |||
1215 | /* remove domain from the lookup table */ | ||
1216 | amd_iommu_pd_table[devid] = NULL; | ||
1217 | 1458 | ||
1218 | /* remove entry from the device table seen by the hardware */ | 1459 | spin_lock_irqsave(&dev_data->domain->lock, flags); |
1219 | amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1220 | amd_iommu_dev_table[devid].data[1] = 0; | ||
1221 | amd_iommu_dev_table[devid].data[2] = 0; | ||
1222 | 1460 | ||
1223 | amd_iommu_apply_erratum_63(devid); | 1461 | if (dev_data->alias != dev) { |
1462 | alias_data = get_dev_data(dev_data->alias); | ||
1463 | if (atomic_dec_and_test(&alias_data->bind)) | ||
1464 | do_detach(dev_data->alias); | ||
1465 | } | ||
1224 | 1466 | ||
1225 | /* decrease reference counter */ | 1467 | if (atomic_dec_and_test(&dev_data->bind)) |
1226 | domain->dev_cnt -= 1; | 1468 | do_detach(dev); |
1227 | 1469 | ||
1228 | /* ready */ | 1470 | spin_unlock_irqrestore(&dev_data->domain->lock, flags); |
1229 | spin_unlock(&domain->lock); | ||
1230 | 1471 | ||
1231 | /* | 1472 | /* |
1232 | * If we run in passthrough mode the device must be assigned to the | 1473 | * If we run in passthrough mode the device must be assigned to the |
1233 | * passthrough domain if it is detached from any other domain | 1474 | * passthrough domain if it is detached from any other domain |
1234 | */ | 1475 | */ |
1235 | if (iommu_pass_through) { | 1476 | if (iommu_pass_through && dev_data->domain == NULL) |
1236 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | 1477 | __attach_device(dev, pt_domain); |
1237 | __attach_device(iommu, pt_domain, devid); | ||
1238 | } | ||
1239 | } | 1478 | } |
1240 | 1479 | ||
1241 | /* | 1480 | /* |
1242 | * Removes a device from a protection domain (with devtable_lock held) | 1481 | * Removes a device from a protection domain (with devtable_lock held) |
1243 | */ | 1482 | */ |
1244 | static void detach_device(struct protection_domain *domain, u16 devid) | 1483 | static void detach_device(struct device *dev) |
1245 | { | 1484 | { |
1246 | unsigned long flags; | 1485 | unsigned long flags; |
1247 | 1486 | ||
1248 | /* lock device table */ | 1487 | /* lock device table */ |
1249 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1488 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1250 | __detach_device(domain, devid); | 1489 | __detach_device(dev); |
1251 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1490 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1252 | } | 1491 | } |
1253 | 1492 | ||
1493 | /* | ||
1494 | * Find out the protection domain structure for a given PCI device. This | ||
1495 | * will give us the pointer to the page table root for example. | ||
1496 | */ | ||
1497 | static struct protection_domain *domain_for_device(struct device *dev) | ||
1498 | { | ||
1499 | struct protection_domain *dom; | ||
1500 | struct iommu_dev_data *dev_data, *alias_data; | ||
1501 | unsigned long flags; | ||
1502 | u16 devid, alias; | ||
1503 | |||
1504 | devid = get_device_id(dev); | ||
1505 | alias = amd_iommu_alias_table[devid]; | ||
1506 | dev_data = get_dev_data(dev); | ||
1507 | alias_data = get_dev_data(dev_data->alias); | ||
1508 | if (!alias_data) | ||
1509 | return NULL; | ||
1510 | |||
1511 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1512 | dom = dev_data->domain; | ||
1513 | if (dom == NULL && | ||
1514 | alias_data->domain != NULL) { | ||
1515 | __attach_device(dev, alias_data->domain); | ||
1516 | dom = alias_data->domain; | ||
1517 | } | ||
1518 | |||
1519 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1520 | |||
1521 | return dom; | ||
1522 | } | ||
1523 | |||
1254 | static int device_change_notifier(struct notifier_block *nb, | 1524 | static int device_change_notifier(struct notifier_block *nb, |
1255 | unsigned long action, void *data) | 1525 | unsigned long action, void *data) |
1256 | { | 1526 | { |
1257 | struct device *dev = data; | 1527 | struct device *dev = data; |
1258 | struct pci_dev *pdev = to_pci_dev(dev); | 1528 | u16 devid; |
1259 | u16 devid = calc_devid(pdev->bus->number, pdev->devfn); | ||
1260 | struct protection_domain *domain; | 1529 | struct protection_domain *domain; |
1261 | struct dma_ops_domain *dma_domain; | 1530 | struct dma_ops_domain *dma_domain; |
1262 | struct amd_iommu *iommu; | 1531 | struct amd_iommu *iommu; |
1263 | unsigned long flags; | 1532 | unsigned long flags; |
1264 | 1533 | ||
1265 | if (devid > amd_iommu_last_bdf) | 1534 | if (!check_device(dev)) |
1266 | goto out; | 1535 | return 0; |
1267 | |||
1268 | devid = amd_iommu_alias_table[devid]; | ||
1269 | |||
1270 | iommu = amd_iommu_rlookup_table[devid]; | ||
1271 | if (iommu == NULL) | ||
1272 | goto out; | ||
1273 | |||
1274 | domain = domain_for_device(devid); | ||
1275 | 1536 | ||
1276 | if (domain && !dma_ops_domain(domain)) | 1537 | devid = get_device_id(dev); |
1277 | WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound " | 1538 | iommu = amd_iommu_rlookup_table[devid]; |
1278 | "to a non-dma-ops domain\n", dev_name(dev)); | ||
1279 | 1539 | ||
1280 | switch (action) { | 1540 | switch (action) { |
1281 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1541 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1542 | |||
1543 | domain = domain_for_device(dev); | ||
1544 | |||
1282 | if (!domain) | 1545 | if (!domain) |
1283 | goto out; | 1546 | goto out; |
1284 | if (iommu_pass_through) | 1547 | if (iommu_pass_through) |
1285 | break; | 1548 | break; |
1286 | detach_device(domain, devid); | 1549 | detach_device(dev); |
1287 | break; | 1550 | break; |
1288 | case BUS_NOTIFY_ADD_DEVICE: | 1551 | case BUS_NOTIFY_ADD_DEVICE: |
1552 | |||
1553 | iommu_init_device(dev); | ||
1554 | |||
1555 | domain = domain_for_device(dev); | ||
1556 | |||
1289 | /* allocate a protection domain if a device is added */ | 1557 | /* allocate a protection domain if a device is added */ |
1290 | dma_domain = find_protection_domain(devid); | 1558 | dma_domain = find_protection_domain(devid); |
1291 | if (dma_domain) | 1559 | if (dma_domain) |
1292 | goto out; | 1560 | goto out; |
1293 | dma_domain = dma_ops_domain_alloc(iommu); | 1561 | dma_domain = dma_ops_domain_alloc(); |
1294 | if (!dma_domain) | 1562 | if (!dma_domain) |
1295 | goto out; | 1563 | goto out; |
1296 | dma_domain->target_dev = devid; | 1564 | dma_domain->target_dev = devid; |
@@ -1300,11 +1568,15 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1300 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | 1568 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); |
1301 | 1569 | ||
1302 | break; | 1570 | break; |
1571 | case BUS_NOTIFY_DEL_DEVICE: | ||
1572 | |||
1573 | iommu_uninit_device(dev); | ||
1574 | |||
1303 | default: | 1575 | default: |
1304 | goto out; | 1576 | goto out; |
1305 | } | 1577 | } |
1306 | 1578 | ||
1307 | iommu_queue_inv_dev_entry(iommu, devid); | 1579 | iommu_flush_device(dev); |
1308 | iommu_completion_wait(iommu); | 1580 | iommu_completion_wait(iommu); |
1309 | 1581 | ||
1310 | out: | 1582 | out: |
@@ -1322,106 +1594,46 @@ static struct notifier_block device_nb = { | |||
1322 | *****************************************************************************/ | 1594 | *****************************************************************************/ |
1323 | 1595 | ||
1324 | /* | 1596 | /* |
1325 | * This function checks if the driver got a valid device from the caller to | ||
1326 | * avoid dereferencing invalid pointers. | ||
1327 | */ | ||
1328 | static bool check_device(struct device *dev) | ||
1329 | { | ||
1330 | if (!dev || !dev->dma_mask) | ||
1331 | return false; | ||
1332 | |||
1333 | return true; | ||
1334 | } | ||
1335 | |||
1336 | /* | ||
1337 | * In this function the list of preallocated protection domains is traversed to | ||
1338 | * find the domain for a specific device | ||
1339 | */ | ||
1340 | static struct dma_ops_domain *find_protection_domain(u16 devid) | ||
1341 | { | ||
1342 | struct dma_ops_domain *entry, *ret = NULL; | ||
1343 | unsigned long flags; | ||
1344 | |||
1345 | if (list_empty(&iommu_pd_list)) | ||
1346 | return NULL; | ||
1347 | |||
1348 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | ||
1349 | |||
1350 | list_for_each_entry(entry, &iommu_pd_list, list) { | ||
1351 | if (entry->target_dev == devid) { | ||
1352 | ret = entry; | ||
1353 | break; | ||
1354 | } | ||
1355 | } | ||
1356 | |||
1357 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | ||
1358 | |||
1359 | return ret; | ||
1360 | } | ||
1361 | |||
1362 | /* | ||
1363 | * In the dma_ops path we only have the struct device. This function | 1597 | * In the dma_ops path we only have the struct device. This function |
1364 | * finds the corresponding IOMMU, the protection domain and the | 1598 | * finds the corresponding IOMMU, the protection domain and the |
1365 | * requestor id for a given device. | 1599 | * requestor id for a given device. |
1366 | * If the device is not yet associated with a domain this is also done | 1600 | * If the device is not yet associated with a domain this is also done |
1367 | * in this function. | 1601 | * in this function. |
1368 | */ | 1602 | */ |
1369 | static int get_device_resources(struct device *dev, | 1603 | static struct protection_domain *get_domain(struct device *dev) |
1370 | struct amd_iommu **iommu, | ||
1371 | struct protection_domain **domain, | ||
1372 | u16 *bdf) | ||
1373 | { | 1604 | { |
1605 | struct protection_domain *domain; | ||
1374 | struct dma_ops_domain *dma_dom; | 1606 | struct dma_ops_domain *dma_dom; |
1375 | struct pci_dev *pcidev; | 1607 | u16 devid = get_device_id(dev); |
1376 | u16 _bdf; | ||
1377 | |||
1378 | *iommu = NULL; | ||
1379 | *domain = NULL; | ||
1380 | *bdf = 0xffff; | ||
1381 | |||
1382 | if (dev->bus != &pci_bus_type) | ||
1383 | return 0; | ||
1384 | 1608 | ||
1385 | pcidev = to_pci_dev(dev); | 1609 | if (!check_device(dev)) |
1386 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | 1610 | return ERR_PTR(-EINVAL); |
1387 | 1611 | ||
1388 | /* device not translated by any IOMMU in the system? */ | 1612 | domain = domain_for_device(dev); |
1389 | if (_bdf > amd_iommu_last_bdf) | 1613 | if (domain != NULL && !dma_ops_domain(domain)) |
1390 | return 0; | 1614 | return ERR_PTR(-EBUSY); |
1391 | 1615 | ||
1392 | *bdf = amd_iommu_alias_table[_bdf]; | 1616 | if (domain != NULL) |
1617 | return domain; | ||
1393 | 1618 | ||
1394 | *iommu = amd_iommu_rlookup_table[*bdf]; | 1619 | /* Device not bount yet - bind it */ |
1395 | if (*iommu == NULL) | 1620 | dma_dom = find_protection_domain(devid); |
1396 | return 0; | 1621 | if (!dma_dom) |
1397 | *domain = domain_for_device(*bdf); | 1622 | dma_dom = amd_iommu_rlookup_table[devid]->default_dom; |
1398 | if (*domain == NULL) { | 1623 | attach_device(dev, &dma_dom->domain); |
1399 | dma_dom = find_protection_domain(*bdf); | 1624 | DUMP_printk("Using protection domain %d for device %s\n", |
1400 | if (!dma_dom) | 1625 | dma_dom->domain.id, dev_name(dev)); |
1401 | dma_dom = (*iommu)->default_dom; | ||
1402 | *domain = &dma_dom->domain; | ||
1403 | attach_device(*iommu, *domain, *bdf); | ||
1404 | DUMP_printk("Using protection domain %d for device %s\n", | ||
1405 | (*domain)->id, dev_name(dev)); | ||
1406 | } | ||
1407 | |||
1408 | if (domain_for_device(_bdf) == NULL) | ||
1409 | attach_device(*iommu, *domain, _bdf); | ||
1410 | 1626 | ||
1411 | return 1; | 1627 | return &dma_dom->domain; |
1412 | } | 1628 | } |
1413 | 1629 | ||
1414 | static void update_device_table(struct protection_domain *domain) | 1630 | static void update_device_table(struct protection_domain *domain) |
1415 | { | 1631 | { |
1416 | unsigned long flags; | 1632 | struct iommu_dev_data *dev_data; |
1417 | int i; | ||
1418 | 1633 | ||
1419 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 1634 | list_for_each_entry(dev_data, &domain->dev_list, list) { |
1420 | if (amd_iommu_pd_table[i] != domain) | 1635 | u16 devid = get_device_id(dev_data->dev); |
1421 | continue; | 1636 | set_dte_entry(devid, domain); |
1422 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1423 | set_dte_entry(i, domain); | ||
1424 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1425 | } | 1637 | } |
1426 | } | 1638 | } |
1427 | 1639 | ||
@@ -1431,76 +1643,13 @@ static void update_domain(struct protection_domain *domain) | |||
1431 | return; | 1643 | return; |
1432 | 1644 | ||
1433 | update_device_table(domain); | 1645 | update_device_table(domain); |
1434 | flush_devices_by_domain(domain); | 1646 | iommu_flush_domain_devices(domain); |
1435 | iommu_flush_domain(domain->id); | 1647 | iommu_flush_tlb_pde(domain); |
1436 | 1648 | ||
1437 | domain->updated = false; | 1649 | domain->updated = false; |
1438 | } | 1650 | } |
1439 | 1651 | ||
1440 | /* | 1652 | /* |
1441 | * This function is used to add another level to an IO page table. Adding | ||
1442 | * another level increases the size of the address space by 9 bits to a size up | ||
1443 | * to 64 bits. | ||
1444 | */ | ||
1445 | static bool increase_address_space(struct protection_domain *domain, | ||
1446 | gfp_t gfp) | ||
1447 | { | ||
1448 | u64 *pte; | ||
1449 | |||
1450 | if (domain->mode == PAGE_MODE_6_LEVEL) | ||
1451 | /* address space already 64 bit large */ | ||
1452 | return false; | ||
1453 | |||
1454 | pte = (void *)get_zeroed_page(gfp); | ||
1455 | if (!pte) | ||
1456 | return false; | ||
1457 | |||
1458 | *pte = PM_LEVEL_PDE(domain->mode, | ||
1459 | virt_to_phys(domain->pt_root)); | ||
1460 | domain->pt_root = pte; | ||
1461 | domain->mode += 1; | ||
1462 | domain->updated = true; | ||
1463 | |||
1464 | return true; | ||
1465 | } | ||
1466 | |||
1467 | static u64 *alloc_pte(struct protection_domain *domain, | ||
1468 | unsigned long address, | ||
1469 | int end_lvl, | ||
1470 | u64 **pte_page, | ||
1471 | gfp_t gfp) | ||
1472 | { | ||
1473 | u64 *pte, *page; | ||
1474 | int level; | ||
1475 | |||
1476 | while (address > PM_LEVEL_SIZE(domain->mode)) | ||
1477 | increase_address_space(domain, gfp); | ||
1478 | |||
1479 | level = domain->mode - 1; | ||
1480 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
1481 | |||
1482 | while (level > end_lvl) { | ||
1483 | if (!IOMMU_PTE_PRESENT(*pte)) { | ||
1484 | page = (u64 *)get_zeroed_page(gfp); | ||
1485 | if (!page) | ||
1486 | return NULL; | ||
1487 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
1488 | } | ||
1489 | |||
1490 | level -= 1; | ||
1491 | |||
1492 | pte = IOMMU_PTE_PAGE(*pte); | ||
1493 | |||
1494 | if (pte_page && level == end_lvl) | ||
1495 | *pte_page = pte; | ||
1496 | |||
1497 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | ||
1498 | } | ||
1499 | |||
1500 | return pte; | ||
1501 | } | ||
1502 | |||
1503 | /* | ||
1504 | * This function fetches the PTE for a given address in the aperture | 1653 | * This function fetches the PTE for a given address in the aperture |
1505 | */ | 1654 | */ |
1506 | static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | 1655 | static u64* dma_ops_get_pte(struct dma_ops_domain *dom, |
@@ -1530,8 +1679,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1530 | * This is the generic map function. It maps one 4kb page at paddr to | 1679 | * This is the generic map function. It maps one 4kb page at paddr to |
1531 | * the given address in the DMA address space for the domain. | 1680 | * the given address in the DMA address space for the domain. |
1532 | */ | 1681 | */ |
1533 | static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | 1682 | static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, |
1534 | struct dma_ops_domain *dom, | ||
1535 | unsigned long address, | 1683 | unsigned long address, |
1536 | phys_addr_t paddr, | 1684 | phys_addr_t paddr, |
1537 | int direction) | 1685 | int direction) |
@@ -1544,7 +1692,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1544 | 1692 | ||
1545 | pte = dma_ops_get_pte(dom, address); | 1693 | pte = dma_ops_get_pte(dom, address); |
1546 | if (!pte) | 1694 | if (!pte) |
1547 | return bad_dma_address; | 1695 | return DMA_ERROR_CODE; |
1548 | 1696 | ||
1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1697 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
1550 | 1698 | ||
@@ -1565,8 +1713,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1565 | /* | 1713 | /* |
1566 | * The generic unmapping function for on page in the DMA address space. | 1714 | * The generic unmapping function for on page in the DMA address space. |
1567 | */ | 1715 | */ |
1568 | static void dma_ops_domain_unmap(struct amd_iommu *iommu, | 1716 | static void dma_ops_domain_unmap(struct dma_ops_domain *dom, |
1569 | struct dma_ops_domain *dom, | ||
1570 | unsigned long address) | 1717 | unsigned long address) |
1571 | { | 1718 | { |
1572 | struct aperture_range *aperture; | 1719 | struct aperture_range *aperture; |
@@ -1597,7 +1744,6 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1597 | * Must be called with the domain lock held. | 1744 | * Must be called with the domain lock held. |
1598 | */ | 1745 | */ |
1599 | static dma_addr_t __map_single(struct device *dev, | 1746 | static dma_addr_t __map_single(struct device *dev, |
1600 | struct amd_iommu *iommu, | ||
1601 | struct dma_ops_domain *dma_dom, | 1747 | struct dma_ops_domain *dma_dom, |
1602 | phys_addr_t paddr, | 1748 | phys_addr_t paddr, |
1603 | size_t size, | 1749 | size_t size, |
@@ -1625,7 +1771,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
1625 | retry: | 1771 | retry: |
1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, | 1772 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, |
1627 | dma_mask); | 1773 | dma_mask); |
1628 | if (unlikely(address == bad_dma_address)) { | 1774 | if (unlikely(address == DMA_ERROR_CODE)) { |
1629 | /* | 1775 | /* |
1630 | * setting next_address here will let the address | 1776 | * setting next_address here will let the address |
1631 | * allocator only scan the new allocated range in the | 1777 | * allocator only scan the new allocated range in the |
@@ -1633,7 +1779,7 @@ retry: | |||
1633 | */ | 1779 | */ |
1634 | dma_dom->next_address = dma_dom->aperture_size; | 1780 | dma_dom->next_address = dma_dom->aperture_size; |
1635 | 1781 | ||
1636 | if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC)) | 1782 | if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) |
1637 | goto out; | 1783 | goto out; |
1638 | 1784 | ||
1639 | /* | 1785 | /* |
@@ -1645,8 +1791,8 @@ retry: | |||
1645 | 1791 | ||
1646 | start = address; | 1792 | start = address; |
1647 | for (i = 0; i < pages; ++i) { | 1793 | for (i = 0; i < pages; ++i) { |
1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 1794 | ret = dma_ops_domain_map(dma_dom, start, paddr, dir); |
1649 | if (ret == bad_dma_address) | 1795 | if (ret == DMA_ERROR_CODE) |
1650 | goto out_unmap; | 1796 | goto out_unmap; |
1651 | 1797 | ||
1652 | paddr += PAGE_SIZE; | 1798 | paddr += PAGE_SIZE; |
@@ -1657,10 +1803,10 @@ retry: | |||
1657 | ADD_STATS_COUNTER(alloced_io_mem, size); | 1803 | ADD_STATS_COUNTER(alloced_io_mem, size); |
1658 | 1804 | ||
1659 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { | 1805 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { |
1660 | iommu_flush_tlb(iommu, dma_dom->domain.id); | 1806 | iommu_flush_tlb(&dma_dom->domain); |
1661 | dma_dom->need_flush = false; | 1807 | dma_dom->need_flush = false; |
1662 | } else if (unlikely(iommu_has_npcache(iommu))) | 1808 | } else if (unlikely(amd_iommu_np_cache)) |
1663 | iommu_flush_pages(iommu, dma_dom->domain.id, address, size); | 1809 | iommu_flush_pages(&dma_dom->domain, address, size); |
1664 | 1810 | ||
1665 | out: | 1811 | out: |
1666 | return address; | 1812 | return address; |
@@ -1669,20 +1815,19 @@ out_unmap: | |||
1669 | 1815 | ||
1670 | for (--i; i >= 0; --i) { | 1816 | for (--i; i >= 0; --i) { |
1671 | start -= PAGE_SIZE; | 1817 | start -= PAGE_SIZE; |
1672 | dma_ops_domain_unmap(iommu, dma_dom, start); | 1818 | dma_ops_domain_unmap(dma_dom, start); |
1673 | } | 1819 | } |
1674 | 1820 | ||
1675 | dma_ops_free_addresses(dma_dom, address, pages); | 1821 | dma_ops_free_addresses(dma_dom, address, pages); |
1676 | 1822 | ||
1677 | return bad_dma_address; | 1823 | return DMA_ERROR_CODE; |
1678 | } | 1824 | } |
1679 | 1825 | ||
1680 | /* | 1826 | /* |
1681 | * Does the reverse of the __map_single function. Must be called with | 1827 | * Does the reverse of the __map_single function. Must be called with |
1682 | * the domain lock held too | 1828 | * the domain lock held too |
1683 | */ | 1829 | */ |
1684 | static void __unmap_single(struct amd_iommu *iommu, | 1830 | static void __unmap_single(struct dma_ops_domain *dma_dom, |
1685 | struct dma_ops_domain *dma_dom, | ||
1686 | dma_addr_t dma_addr, | 1831 | dma_addr_t dma_addr, |
1687 | size_t size, | 1832 | size_t size, |
1688 | int dir) | 1833 | int dir) |
@@ -1690,7 +1835,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1690 | dma_addr_t i, start; | 1835 | dma_addr_t i, start; |
1691 | unsigned int pages; | 1836 | unsigned int pages; |
1692 | 1837 | ||
1693 | if ((dma_addr == bad_dma_address) || | 1838 | if ((dma_addr == DMA_ERROR_CODE) || |
1694 | (dma_addr + size > dma_dom->aperture_size)) | 1839 | (dma_addr + size > dma_dom->aperture_size)) |
1695 | return; | 1840 | return; |
1696 | 1841 | ||
@@ -1699,7 +1844,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1699 | start = dma_addr; | 1844 | start = dma_addr; |
1700 | 1845 | ||
1701 | for (i = 0; i < pages; ++i) { | 1846 | for (i = 0; i < pages; ++i) { |
1702 | dma_ops_domain_unmap(iommu, dma_dom, start); | 1847 | dma_ops_domain_unmap(dma_dom, start); |
1703 | start += PAGE_SIZE; | 1848 | start += PAGE_SIZE; |
1704 | } | 1849 | } |
1705 | 1850 | ||
@@ -1708,7 +1853,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1708 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 1853 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
1709 | 1854 | ||
1710 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { | 1855 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
1711 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 1856 | iommu_flush_pages(&dma_dom->domain, dma_addr, size); |
1712 | dma_dom->need_flush = false; | 1857 | dma_dom->need_flush = false; |
1713 | } | 1858 | } |
1714 | } | 1859 | } |
@@ -1722,36 +1867,29 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1722 | struct dma_attrs *attrs) | 1867 | struct dma_attrs *attrs) |
1723 | { | 1868 | { |
1724 | unsigned long flags; | 1869 | unsigned long flags; |
1725 | struct amd_iommu *iommu; | ||
1726 | struct protection_domain *domain; | 1870 | struct protection_domain *domain; |
1727 | u16 devid; | ||
1728 | dma_addr_t addr; | 1871 | dma_addr_t addr; |
1729 | u64 dma_mask; | 1872 | u64 dma_mask; |
1730 | phys_addr_t paddr = page_to_phys(page) + offset; | 1873 | phys_addr_t paddr = page_to_phys(page) + offset; |
1731 | 1874 | ||
1732 | INC_STATS_COUNTER(cnt_map_single); | 1875 | INC_STATS_COUNTER(cnt_map_single); |
1733 | 1876 | ||
1734 | if (!check_device(dev)) | 1877 | domain = get_domain(dev); |
1735 | return bad_dma_address; | 1878 | if (PTR_ERR(domain) == -EINVAL) |
1736 | |||
1737 | dma_mask = *dev->dma_mask; | ||
1738 | |||
1739 | get_device_resources(dev, &iommu, &domain, &devid); | ||
1740 | |||
1741 | if (iommu == NULL || domain == NULL) | ||
1742 | /* device not handled by any AMD IOMMU */ | ||
1743 | return (dma_addr_t)paddr; | 1879 | return (dma_addr_t)paddr; |
1880 | else if (IS_ERR(domain)) | ||
1881 | return DMA_ERROR_CODE; | ||
1744 | 1882 | ||
1745 | if (!dma_ops_domain(domain)) | 1883 | dma_mask = *dev->dma_mask; |
1746 | return bad_dma_address; | ||
1747 | 1884 | ||
1748 | spin_lock_irqsave(&domain->lock, flags); | 1885 | spin_lock_irqsave(&domain->lock, flags); |
1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1886 | |
1887 | addr = __map_single(dev, domain->priv, paddr, size, dir, false, | ||
1750 | dma_mask); | 1888 | dma_mask); |
1751 | if (addr == bad_dma_address) | 1889 | if (addr == DMA_ERROR_CODE) |
1752 | goto out; | 1890 | goto out; |
1753 | 1891 | ||
1754 | iommu_completion_wait(iommu); | 1892 | iommu_flush_complete(domain); |
1755 | 1893 | ||
1756 | out: | 1894 | out: |
1757 | spin_unlock_irqrestore(&domain->lock, flags); | 1895 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1766,25 +1904,19 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
1766 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1904 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1767 | { | 1905 | { |
1768 | unsigned long flags; | 1906 | unsigned long flags; |
1769 | struct amd_iommu *iommu; | ||
1770 | struct protection_domain *domain; | 1907 | struct protection_domain *domain; |
1771 | u16 devid; | ||
1772 | 1908 | ||
1773 | INC_STATS_COUNTER(cnt_unmap_single); | 1909 | INC_STATS_COUNTER(cnt_unmap_single); |
1774 | 1910 | ||
1775 | if (!check_device(dev) || | 1911 | domain = get_domain(dev); |
1776 | !get_device_resources(dev, &iommu, &domain, &devid)) | 1912 | if (IS_ERR(domain)) |
1777 | /* device not handled by any AMD IOMMU */ | ||
1778 | return; | ||
1779 | |||
1780 | if (!dma_ops_domain(domain)) | ||
1781 | return; | 1913 | return; |
1782 | 1914 | ||
1783 | spin_lock_irqsave(&domain->lock, flags); | 1915 | spin_lock_irqsave(&domain->lock, flags); |
1784 | 1916 | ||
1785 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1917 | __unmap_single(domain->priv, dma_addr, size, dir); |
1786 | 1918 | ||
1787 | iommu_completion_wait(iommu); | 1919 | iommu_flush_complete(domain); |
1788 | 1920 | ||
1789 | spin_unlock_irqrestore(&domain->lock, flags); | 1921 | spin_unlock_irqrestore(&domain->lock, flags); |
1790 | } | 1922 | } |
@@ -1816,9 +1948,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1816 | struct dma_attrs *attrs) | 1948 | struct dma_attrs *attrs) |
1817 | { | 1949 | { |
1818 | unsigned long flags; | 1950 | unsigned long flags; |
1819 | struct amd_iommu *iommu; | ||
1820 | struct protection_domain *domain; | 1951 | struct protection_domain *domain; |
1821 | u16 devid; | ||
1822 | int i; | 1952 | int i; |
1823 | struct scatterlist *s; | 1953 | struct scatterlist *s; |
1824 | phys_addr_t paddr; | 1954 | phys_addr_t paddr; |
@@ -1827,25 +1957,20 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1827 | 1957 | ||
1828 | INC_STATS_COUNTER(cnt_map_sg); | 1958 | INC_STATS_COUNTER(cnt_map_sg); |
1829 | 1959 | ||
1830 | if (!check_device(dev)) | 1960 | domain = get_domain(dev); |
1961 | if (PTR_ERR(domain) == -EINVAL) | ||
1962 | return map_sg_no_iommu(dev, sglist, nelems, dir); | ||
1963 | else if (IS_ERR(domain)) | ||
1831 | return 0; | 1964 | return 0; |
1832 | 1965 | ||
1833 | dma_mask = *dev->dma_mask; | 1966 | dma_mask = *dev->dma_mask; |
1834 | 1967 | ||
1835 | get_device_resources(dev, &iommu, &domain, &devid); | ||
1836 | |||
1837 | if (!iommu || !domain) | ||
1838 | return map_sg_no_iommu(dev, sglist, nelems, dir); | ||
1839 | |||
1840 | if (!dma_ops_domain(domain)) | ||
1841 | return 0; | ||
1842 | |||
1843 | spin_lock_irqsave(&domain->lock, flags); | 1968 | spin_lock_irqsave(&domain->lock, flags); |
1844 | 1969 | ||
1845 | for_each_sg(sglist, s, nelems, i) { | 1970 | for_each_sg(sglist, s, nelems, i) { |
1846 | paddr = sg_phys(s); | 1971 | paddr = sg_phys(s); |
1847 | 1972 | ||
1848 | s->dma_address = __map_single(dev, iommu, domain->priv, | 1973 | s->dma_address = __map_single(dev, domain->priv, |
1849 | paddr, s->length, dir, false, | 1974 | paddr, s->length, dir, false, |
1850 | dma_mask); | 1975 | dma_mask); |
1851 | 1976 | ||
@@ -1856,7 +1981,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1856 | goto unmap; | 1981 | goto unmap; |
1857 | } | 1982 | } |
1858 | 1983 | ||
1859 | iommu_completion_wait(iommu); | 1984 | iommu_flush_complete(domain); |
1860 | 1985 | ||
1861 | out: | 1986 | out: |
1862 | spin_unlock_irqrestore(&domain->lock, flags); | 1987 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1865,7 +1990,7 @@ out: | |||
1865 | unmap: | 1990 | unmap: |
1866 | for_each_sg(sglist, s, mapped_elems, i) { | 1991 | for_each_sg(sglist, s, mapped_elems, i) { |
1867 | if (s->dma_address) | 1992 | if (s->dma_address) |
1868 | __unmap_single(iommu, domain->priv, s->dma_address, | 1993 | __unmap_single(domain->priv, s->dma_address, |
1869 | s->dma_length, dir); | 1994 | s->dma_length, dir); |
1870 | s->dma_address = s->dma_length = 0; | 1995 | s->dma_address = s->dma_length = 0; |
1871 | } | 1996 | } |
@@ -1884,30 +2009,25 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1884 | struct dma_attrs *attrs) | 2009 | struct dma_attrs *attrs) |
1885 | { | 2010 | { |
1886 | unsigned long flags; | 2011 | unsigned long flags; |
1887 | struct amd_iommu *iommu; | ||
1888 | struct protection_domain *domain; | 2012 | struct protection_domain *domain; |
1889 | struct scatterlist *s; | 2013 | struct scatterlist *s; |
1890 | u16 devid; | ||
1891 | int i; | 2014 | int i; |
1892 | 2015 | ||
1893 | INC_STATS_COUNTER(cnt_unmap_sg); | 2016 | INC_STATS_COUNTER(cnt_unmap_sg); |
1894 | 2017 | ||
1895 | if (!check_device(dev) || | 2018 | domain = get_domain(dev); |
1896 | !get_device_resources(dev, &iommu, &domain, &devid)) | 2019 | if (IS_ERR(domain)) |
1897 | return; | ||
1898 | |||
1899 | if (!dma_ops_domain(domain)) | ||
1900 | return; | 2020 | return; |
1901 | 2021 | ||
1902 | spin_lock_irqsave(&domain->lock, flags); | 2022 | spin_lock_irqsave(&domain->lock, flags); |
1903 | 2023 | ||
1904 | for_each_sg(sglist, s, nelems, i) { | 2024 | for_each_sg(sglist, s, nelems, i) { |
1905 | __unmap_single(iommu, domain->priv, s->dma_address, | 2025 | __unmap_single(domain->priv, s->dma_address, |
1906 | s->dma_length, dir); | 2026 | s->dma_length, dir); |
1907 | s->dma_address = s->dma_length = 0; | 2027 | s->dma_address = s->dma_length = 0; |
1908 | } | 2028 | } |
1909 | 2029 | ||
1910 | iommu_completion_wait(iommu); | 2030 | iommu_flush_complete(domain); |
1911 | 2031 | ||
1912 | spin_unlock_irqrestore(&domain->lock, flags); | 2032 | spin_unlock_irqrestore(&domain->lock, flags); |
1913 | } | 2033 | } |
@@ -1920,49 +2040,44 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1920 | { | 2040 | { |
1921 | unsigned long flags; | 2041 | unsigned long flags; |
1922 | void *virt_addr; | 2042 | void *virt_addr; |
1923 | struct amd_iommu *iommu; | ||
1924 | struct protection_domain *domain; | 2043 | struct protection_domain *domain; |
1925 | u16 devid; | ||
1926 | phys_addr_t paddr; | 2044 | phys_addr_t paddr; |
1927 | u64 dma_mask = dev->coherent_dma_mask; | 2045 | u64 dma_mask = dev->coherent_dma_mask; |
1928 | 2046 | ||
1929 | INC_STATS_COUNTER(cnt_alloc_coherent); | 2047 | INC_STATS_COUNTER(cnt_alloc_coherent); |
1930 | 2048 | ||
1931 | if (!check_device(dev)) | 2049 | domain = get_domain(dev); |
2050 | if (PTR_ERR(domain) == -EINVAL) { | ||
2051 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | ||
2052 | *dma_addr = __pa(virt_addr); | ||
2053 | return virt_addr; | ||
2054 | } else if (IS_ERR(domain)) | ||
1932 | return NULL; | 2055 | return NULL; |
1933 | 2056 | ||
1934 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 2057 | dma_mask = dev->coherent_dma_mask; |
1935 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 2058 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
2059 | flag |= __GFP_ZERO; | ||
1936 | 2060 | ||
1937 | flag |= __GFP_ZERO; | ||
1938 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 2061 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); |
1939 | if (!virt_addr) | 2062 | if (!virt_addr) |
1940 | return NULL; | 2063 | return NULL; |
1941 | 2064 | ||
1942 | paddr = virt_to_phys(virt_addr); | 2065 | paddr = virt_to_phys(virt_addr); |
1943 | 2066 | ||
1944 | if (!iommu || !domain) { | ||
1945 | *dma_addr = (dma_addr_t)paddr; | ||
1946 | return virt_addr; | ||
1947 | } | ||
1948 | |||
1949 | if (!dma_ops_domain(domain)) | ||
1950 | goto out_free; | ||
1951 | |||
1952 | if (!dma_mask) | 2067 | if (!dma_mask) |
1953 | dma_mask = *dev->dma_mask; | 2068 | dma_mask = *dev->dma_mask; |
1954 | 2069 | ||
1955 | spin_lock_irqsave(&domain->lock, flags); | 2070 | spin_lock_irqsave(&domain->lock, flags); |
1956 | 2071 | ||
1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 2072 | *dma_addr = __map_single(dev, domain->priv, paddr, |
1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 2073 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
1959 | 2074 | ||
1960 | if (*dma_addr == bad_dma_address) { | 2075 | if (*dma_addr == DMA_ERROR_CODE) { |
1961 | spin_unlock_irqrestore(&domain->lock, flags); | 2076 | spin_unlock_irqrestore(&domain->lock, flags); |
1962 | goto out_free; | 2077 | goto out_free; |
1963 | } | 2078 | } |
1964 | 2079 | ||
1965 | iommu_completion_wait(iommu); | 2080 | iommu_flush_complete(domain); |
1966 | 2081 | ||
1967 | spin_unlock_irqrestore(&domain->lock, flags); | 2082 | spin_unlock_irqrestore(&domain->lock, flags); |
1968 | 2083 | ||
@@ -1982,28 +2097,19 @@ static void free_coherent(struct device *dev, size_t size, | |||
1982 | void *virt_addr, dma_addr_t dma_addr) | 2097 | void *virt_addr, dma_addr_t dma_addr) |
1983 | { | 2098 | { |
1984 | unsigned long flags; | 2099 | unsigned long flags; |
1985 | struct amd_iommu *iommu; | ||
1986 | struct protection_domain *domain; | 2100 | struct protection_domain *domain; |
1987 | u16 devid; | ||
1988 | 2101 | ||
1989 | INC_STATS_COUNTER(cnt_free_coherent); | 2102 | INC_STATS_COUNTER(cnt_free_coherent); |
1990 | 2103 | ||
1991 | if (!check_device(dev)) | 2104 | domain = get_domain(dev); |
1992 | return; | 2105 | if (IS_ERR(domain)) |
1993 | |||
1994 | get_device_resources(dev, &iommu, &domain, &devid); | ||
1995 | |||
1996 | if (!iommu || !domain) | ||
1997 | goto free_mem; | ||
1998 | |||
1999 | if (!dma_ops_domain(domain)) | ||
2000 | goto free_mem; | 2106 | goto free_mem; |
2001 | 2107 | ||
2002 | spin_lock_irqsave(&domain->lock, flags); | 2108 | spin_lock_irqsave(&domain->lock, flags); |
2003 | 2109 | ||
2004 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2110 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
2005 | 2111 | ||
2006 | iommu_completion_wait(iommu); | 2112 | iommu_flush_complete(domain); |
2007 | 2113 | ||
2008 | spin_unlock_irqrestore(&domain->lock, flags); | 2114 | spin_unlock_irqrestore(&domain->lock, flags); |
2009 | 2115 | ||
@@ -2017,22 +2123,7 @@ free_mem: | |||
2017 | */ | 2123 | */ |
2018 | static int amd_iommu_dma_supported(struct device *dev, u64 mask) | 2124 | static int amd_iommu_dma_supported(struct device *dev, u64 mask) |
2019 | { | 2125 | { |
2020 | u16 bdf; | 2126 | return check_device(dev); |
2021 | struct pci_dev *pcidev; | ||
2022 | |||
2023 | /* No device or no PCI device */ | ||
2024 | if (!dev || dev->bus != &pci_bus_type) | ||
2025 | return 0; | ||
2026 | |||
2027 | pcidev = to_pci_dev(dev); | ||
2028 | |||
2029 | bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | ||
2030 | |||
2031 | /* Out of our scope? */ | ||
2032 | if (bdf > amd_iommu_last_bdf) | ||
2033 | return 0; | ||
2034 | |||
2035 | return 1; | ||
2036 | } | 2127 | } |
2037 | 2128 | ||
2038 | /* | 2129 | /* |
@@ -2046,25 +2137,30 @@ static void prealloc_protection_domains(void) | |||
2046 | { | 2137 | { |
2047 | struct pci_dev *dev = NULL; | 2138 | struct pci_dev *dev = NULL; |
2048 | struct dma_ops_domain *dma_dom; | 2139 | struct dma_ops_domain *dma_dom; |
2049 | struct amd_iommu *iommu; | ||
2050 | u16 devid; | 2140 | u16 devid; |
2051 | 2141 | ||
2052 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2142 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
2053 | devid = calc_devid(dev->bus->number, dev->devfn); | 2143 | |
2054 | if (devid > amd_iommu_last_bdf) | 2144 | /* Do we handle this device? */ |
2055 | continue; | 2145 | if (!check_device(&dev->dev)) |
2056 | devid = amd_iommu_alias_table[devid]; | ||
2057 | if (domain_for_device(devid)) | ||
2058 | continue; | 2146 | continue; |
2059 | iommu = amd_iommu_rlookup_table[devid]; | 2147 | |
2060 | if (!iommu) | 2148 | iommu_init_device(&dev->dev); |
2149 | |||
2150 | /* Is there already any domain for it? */ | ||
2151 | if (domain_for_device(&dev->dev)) | ||
2061 | continue; | 2152 | continue; |
2062 | dma_dom = dma_ops_domain_alloc(iommu); | 2153 | |
2154 | devid = get_device_id(&dev->dev); | ||
2155 | |||
2156 | dma_dom = dma_ops_domain_alloc(); | ||
2063 | if (!dma_dom) | 2157 | if (!dma_dom) |
2064 | continue; | 2158 | continue; |
2065 | init_unity_mappings_for_device(dma_dom, devid); | 2159 | init_unity_mappings_for_device(dma_dom, devid); |
2066 | dma_dom->target_dev = devid; | 2160 | dma_dom->target_dev = devid; |
2067 | 2161 | ||
2162 | attach_device(&dev->dev, &dma_dom->domain); | ||
2163 | |||
2068 | list_add_tail(&dma_dom->list, &iommu_pd_list); | 2164 | list_add_tail(&dma_dom->list, &iommu_pd_list); |
2069 | } | 2165 | } |
2070 | } | 2166 | } |
@@ -2093,7 +2189,7 @@ int __init amd_iommu_init_dma_ops(void) | |||
2093 | * protection domain will be assigned to the default one. | 2189 | * protection domain will be assigned to the default one. |
2094 | */ | 2190 | */ |
2095 | for_each_iommu(iommu) { | 2191 | for_each_iommu(iommu) { |
2096 | iommu->default_dom = dma_ops_domain_alloc(iommu); | 2192 | iommu->default_dom = dma_ops_domain_alloc(); |
2097 | if (iommu->default_dom == NULL) | 2193 | if (iommu->default_dom == NULL) |
2098 | return -ENOMEM; | 2194 | return -ENOMEM; |
2099 | iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; | 2195 | iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; |
@@ -2103,15 +2199,12 @@ int __init amd_iommu_init_dma_ops(void) | |||
2103 | } | 2199 | } |
2104 | 2200 | ||
2105 | /* | 2201 | /* |
2106 | * If device isolation is enabled, pre-allocate the protection | 2202 | * Pre-allocate the protection domains for each device. |
2107 | * domains for each device. | ||
2108 | */ | 2203 | */ |
2109 | if (amd_iommu_isolate) | 2204 | prealloc_protection_domains(); |
2110 | prealloc_protection_domains(); | ||
2111 | 2205 | ||
2112 | iommu_detected = 1; | 2206 | iommu_detected = 1; |
2113 | force_iommu = 1; | 2207 | swiotlb = 0; |
2114 | bad_dma_address = 0; | ||
2115 | #ifdef CONFIG_GART_IOMMU | 2208 | #ifdef CONFIG_GART_IOMMU |
2116 | gart_iommu_aperture_disabled = 1; | 2209 | gart_iommu_aperture_disabled = 1; |
2117 | gart_iommu_aperture = 0; | 2210 | gart_iommu_aperture = 0; |
@@ -2150,14 +2243,17 @@ free_domains: | |||
2150 | 2243 | ||
2151 | static void cleanup_domain(struct protection_domain *domain) | 2244 | static void cleanup_domain(struct protection_domain *domain) |
2152 | { | 2245 | { |
2246 | struct iommu_dev_data *dev_data, *next; | ||
2153 | unsigned long flags; | 2247 | unsigned long flags; |
2154 | u16 devid; | ||
2155 | 2248 | ||
2156 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 2249 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
2157 | 2250 | ||
2158 | for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) | 2251 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2159 | if (amd_iommu_pd_table[devid] == domain) | 2252 | struct device *dev = dev_data->dev; |
2160 | __detach_device(domain, devid); | 2253 | |
2254 | do_detach(dev); | ||
2255 | atomic_set(&dev_data->bind, 0); | ||
2256 | } | ||
2161 | 2257 | ||
2162 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2258 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
2163 | } | 2259 | } |
@@ -2167,6 +2263,8 @@ static void protection_domain_free(struct protection_domain *domain) | |||
2167 | if (!domain) | 2263 | if (!domain) |
2168 | return; | 2264 | return; |
2169 | 2265 | ||
2266 | del_domain_from_list(domain); | ||
2267 | |||
2170 | if (domain->id) | 2268 | if (domain->id) |
2171 | domain_id_free(domain->id); | 2269 | domain_id_free(domain->id); |
2172 | 2270 | ||
@@ -2185,6 +2283,9 @@ static struct protection_domain *protection_domain_alloc(void) | |||
2185 | domain->id = domain_id_alloc(); | 2283 | domain->id = domain_id_alloc(); |
2186 | if (!domain->id) | 2284 | if (!domain->id) |
2187 | goto out_err; | 2285 | goto out_err; |
2286 | INIT_LIST_HEAD(&domain->dev_list); | ||
2287 | |||
2288 | add_domain_to_list(domain); | ||
2188 | 2289 | ||
2189 | return domain; | 2290 | return domain; |
2190 | 2291 | ||
@@ -2241,26 +2342,23 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2241 | static void amd_iommu_detach_device(struct iommu_domain *dom, | 2342 | static void amd_iommu_detach_device(struct iommu_domain *dom, |
2242 | struct device *dev) | 2343 | struct device *dev) |
2243 | { | 2344 | { |
2244 | struct protection_domain *domain = dom->priv; | 2345 | struct iommu_dev_data *dev_data = dev->archdata.iommu; |
2245 | struct amd_iommu *iommu; | 2346 | struct amd_iommu *iommu; |
2246 | struct pci_dev *pdev; | ||
2247 | u16 devid; | 2347 | u16 devid; |
2248 | 2348 | ||
2249 | if (dev->bus != &pci_bus_type) | 2349 | if (!check_device(dev)) |
2250 | return; | 2350 | return; |
2251 | 2351 | ||
2252 | pdev = to_pci_dev(dev); | 2352 | devid = get_device_id(dev); |
2253 | |||
2254 | devid = calc_devid(pdev->bus->number, pdev->devfn); | ||
2255 | 2353 | ||
2256 | if (devid > 0) | 2354 | if (dev_data->domain != NULL) |
2257 | detach_device(domain, devid); | 2355 | detach_device(dev); |
2258 | 2356 | ||
2259 | iommu = amd_iommu_rlookup_table[devid]; | 2357 | iommu = amd_iommu_rlookup_table[devid]; |
2260 | if (!iommu) | 2358 | if (!iommu) |
2261 | return; | 2359 | return; |
2262 | 2360 | ||
2263 | iommu_queue_inv_dev_entry(iommu, devid); | 2361 | iommu_flush_device(dev); |
2264 | iommu_completion_wait(iommu); | 2362 | iommu_completion_wait(iommu); |
2265 | } | 2363 | } |
2266 | 2364 | ||
@@ -2268,35 +2366,30 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
2268 | struct device *dev) | 2366 | struct device *dev) |
2269 | { | 2367 | { |
2270 | struct protection_domain *domain = dom->priv; | 2368 | struct protection_domain *domain = dom->priv; |
2271 | struct protection_domain *old_domain; | 2369 | struct iommu_dev_data *dev_data; |
2272 | struct amd_iommu *iommu; | 2370 | struct amd_iommu *iommu; |
2273 | struct pci_dev *pdev; | 2371 | int ret; |
2274 | u16 devid; | 2372 | u16 devid; |
2275 | 2373 | ||
2276 | if (dev->bus != &pci_bus_type) | 2374 | if (!check_device(dev)) |
2277 | return -EINVAL; | 2375 | return -EINVAL; |
2278 | 2376 | ||
2279 | pdev = to_pci_dev(dev); | 2377 | dev_data = dev->archdata.iommu; |
2280 | 2378 | ||
2281 | devid = calc_devid(pdev->bus->number, pdev->devfn); | 2379 | devid = get_device_id(dev); |
2282 | |||
2283 | if (devid >= amd_iommu_last_bdf || | ||
2284 | devid != amd_iommu_alias_table[devid]) | ||
2285 | return -EINVAL; | ||
2286 | 2380 | ||
2287 | iommu = amd_iommu_rlookup_table[devid]; | 2381 | iommu = amd_iommu_rlookup_table[devid]; |
2288 | if (!iommu) | 2382 | if (!iommu) |
2289 | return -EINVAL; | 2383 | return -EINVAL; |
2290 | 2384 | ||
2291 | old_domain = domain_for_device(devid); | 2385 | if (dev_data->domain) |
2292 | if (old_domain) | 2386 | detach_device(dev); |
2293 | detach_device(old_domain, devid); | ||
2294 | 2387 | ||
2295 | attach_device(iommu, domain, devid); | 2388 | ret = attach_device(dev, domain); |
2296 | 2389 | ||
2297 | iommu_completion_wait(iommu); | 2390 | iommu_completion_wait(iommu); |
2298 | 2391 | ||
2299 | return 0; | 2392 | return ret; |
2300 | } | 2393 | } |
2301 | 2394 | ||
2302 | static int amd_iommu_map_range(struct iommu_domain *dom, | 2395 | static int amd_iommu_map_range(struct iommu_domain *dom, |
@@ -2342,7 +2435,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2342 | iova += PAGE_SIZE; | 2435 | iova += PAGE_SIZE; |
2343 | } | 2436 | } |
2344 | 2437 | ||
2345 | iommu_flush_domain(domain->id); | 2438 | iommu_flush_tlb_pde(domain); |
2346 | } | 2439 | } |
2347 | 2440 | ||
2348 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2441 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
@@ -2393,8 +2486,9 @@ static struct iommu_ops amd_iommu_ops = { | |||
2393 | 2486 | ||
2394 | int __init amd_iommu_init_passthrough(void) | 2487 | int __init amd_iommu_init_passthrough(void) |
2395 | { | 2488 | { |
2489 | struct amd_iommu *iommu; | ||
2396 | struct pci_dev *dev = NULL; | 2490 | struct pci_dev *dev = NULL; |
2397 | u16 devid, devid2; | 2491 | u16 devid; |
2398 | 2492 | ||
2399 | /* allocate passthroug domain */ | 2493 | /* allocate passthroug domain */ |
2400 | pt_domain = protection_domain_alloc(); | 2494 | pt_domain = protection_domain_alloc(); |
@@ -2404,20 +2498,17 @@ int __init amd_iommu_init_passthrough(void) | |||
2404 | pt_domain->mode |= PAGE_MODE_NONE; | 2498 | pt_domain->mode |= PAGE_MODE_NONE; |
2405 | 2499 | ||
2406 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2500 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
2407 | struct amd_iommu *iommu; | ||
2408 | 2501 | ||
2409 | devid = calc_devid(dev->bus->number, dev->devfn); | 2502 | if (!check_device(&dev->dev)) |
2410 | if (devid > amd_iommu_last_bdf) | ||
2411 | continue; | 2503 | continue; |
2412 | 2504 | ||
2413 | devid2 = amd_iommu_alias_table[devid]; | 2505 | devid = get_device_id(&dev->dev); |
2414 | 2506 | ||
2415 | iommu = amd_iommu_rlookup_table[devid2]; | 2507 | iommu = amd_iommu_rlookup_table[devid]; |
2416 | if (!iommu) | 2508 | if (!iommu) |
2417 | continue; | 2509 | continue; |
2418 | 2510 | ||
2419 | __attach_device(iommu, pt_domain, devid); | 2511 | attach_device(&dev->dev, pt_domain); |
2420 | __attach_device(iommu, pt_domain, devid2); | ||
2421 | } | 2512 | } |
2422 | 2513 | ||
2423 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | 2514 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c20001e4f556..7ffc39965233 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -25,10 +25,12 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <asm/pci-direct.h> | 27 | #include <asm/pci-direct.h> |
28 | #include <asm/amd_iommu_proto.h> | ||
28 | #include <asm/amd_iommu_types.h> | 29 | #include <asm/amd_iommu_types.h> |
29 | #include <asm/amd_iommu.h> | 30 | #include <asm/amd_iommu.h> |
30 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
31 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
33 | #include <asm/x86_init.h> | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * definitions for the ACPI scanning code | 36 | * definitions for the ACPI scanning code |
@@ -123,18 +125,24 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have | |||
123 | to handle */ | 125 | to handle */ |
124 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings | 126 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
125 | we find in ACPI */ | 127 | we find in ACPI */ |
126 | #ifdef CONFIG_IOMMU_STRESS | ||
127 | bool amd_iommu_isolate = false; | ||
128 | #else | ||
129 | bool amd_iommu_isolate = true; /* if true, device isolation is | ||
130 | enabled */ | ||
131 | #endif | ||
132 | |||
133 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ | 128 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
134 | 129 | ||
135 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | 130 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
136 | system */ | 131 | system */ |
137 | 132 | ||
133 | /* Array to assign indices to IOMMUs*/ | ||
134 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; | ||
135 | int amd_iommus_present; | ||
136 | |||
137 | /* IOMMUs have a non-present cache? */ | ||
138 | bool amd_iommu_np_cache __read_mostly; | ||
139 | |||
140 | /* | ||
141 | * List of protection domains - used during resume | ||
142 | */ | ||
143 | LIST_HEAD(amd_iommu_pd_list); | ||
144 | spinlock_t amd_iommu_pd_lock; | ||
145 | |||
138 | /* | 146 | /* |
139 | * Pointer to the device table which is shared by all AMD IOMMUs | 147 | * Pointer to the device table which is shared by all AMD IOMMUs |
140 | * it is indexed by the PCI device id or the HT unit id and contains | 148 | * it is indexed by the PCI device id or the HT unit id and contains |
@@ -157,12 +165,6 @@ u16 *amd_iommu_alias_table; | |||
157 | struct amd_iommu **amd_iommu_rlookup_table; | 165 | struct amd_iommu **amd_iommu_rlookup_table; |
158 | 166 | ||
159 | /* | 167 | /* |
160 | * The pd table (protection domain table) is used to find the protection domain | ||
161 | * data structure a device belongs to. Indexed with the PCI device id too. | ||
162 | */ | ||
163 | struct protection_domain **amd_iommu_pd_table; | ||
164 | |||
165 | /* | ||
166 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap | 168 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap |
167 | * to know which ones are already in use. | 169 | * to know which ones are already in use. |
168 | */ | 170 | */ |
@@ -838,7 +840,18 @@ static void __init free_iommu_all(void) | |||
838 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | 840 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
839 | { | 841 | { |
840 | spin_lock_init(&iommu->lock); | 842 | spin_lock_init(&iommu->lock); |
843 | |||
844 | /* Add IOMMU to internal data structures */ | ||
841 | list_add_tail(&iommu->list, &amd_iommu_list); | 845 | list_add_tail(&iommu->list, &amd_iommu_list); |
846 | iommu->index = amd_iommus_present++; | ||
847 | |||
848 | if (unlikely(iommu->index >= MAX_IOMMUS)) { | ||
849 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); | ||
850 | return -ENOSYS; | ||
851 | } | ||
852 | |||
853 | /* Index is fine - add IOMMU to the array */ | ||
854 | amd_iommus[iommu->index] = iommu; | ||
842 | 855 | ||
843 | /* | 856 | /* |
844 | * Copy data from ACPI table entry to the iommu struct | 857 | * Copy data from ACPI table entry to the iommu struct |
@@ -868,6 +881,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
868 | init_iommu_from_acpi(iommu, h); | 881 | init_iommu_from_acpi(iommu, h); |
869 | init_iommu_devices(iommu); | 882 | init_iommu_devices(iommu); |
870 | 883 | ||
884 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) | ||
885 | amd_iommu_np_cache = true; | ||
886 | |||
871 | return pci_enable_device(iommu->dev); | 887 | return pci_enable_device(iommu->dev); |
872 | } | 888 | } |
873 | 889 | ||
@@ -925,7 +941,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
925 | * | 941 | * |
926 | ****************************************************************************/ | 942 | ****************************************************************************/ |
927 | 943 | ||
928 | static int __init iommu_setup_msi(struct amd_iommu *iommu) | 944 | static int iommu_setup_msi(struct amd_iommu *iommu) |
929 | { | 945 | { |
930 | int r; | 946 | int r; |
931 | 947 | ||
@@ -1176,19 +1192,10 @@ static struct sys_device device_amd_iommu = { | |||
1176 | * functions. Finally it prints some information about AMD IOMMUs and | 1192 | * functions. Finally it prints some information about AMD IOMMUs and |
1177 | * the driver state and enables the hardware. | 1193 | * the driver state and enables the hardware. |
1178 | */ | 1194 | */ |
1179 | int __init amd_iommu_init(void) | 1195 | static int __init amd_iommu_init(void) |
1180 | { | 1196 | { |
1181 | int i, ret = 0; | 1197 | int i, ret = 0; |
1182 | 1198 | ||
1183 | |||
1184 | if (no_iommu) { | ||
1185 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); | ||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | if (!amd_iommu_detected) | ||
1190 | return -ENODEV; | ||
1191 | |||
1192 | /* | 1199 | /* |
1193 | * First parse ACPI tables to find the largest Bus/Dev/Func | 1200 | * First parse ACPI tables to find the largest Bus/Dev/Func |
1194 | * we need to handle. Upon this information the shared data | 1201 | * we need to handle. Upon this information the shared data |
@@ -1225,15 +1232,6 @@ int __init amd_iommu_init(void) | |||
1225 | if (amd_iommu_rlookup_table == NULL) | 1232 | if (amd_iommu_rlookup_table == NULL) |
1226 | goto free; | 1233 | goto free; |
1227 | 1234 | ||
1228 | /* | ||
1229 | * Protection Domain table - maps devices to protection domains | ||
1230 | * This table has the same size as the rlookup_table | ||
1231 | */ | ||
1232 | amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
1233 | get_order(rlookup_table_size)); | ||
1234 | if (amd_iommu_pd_table == NULL) | ||
1235 | goto free; | ||
1236 | |||
1237 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( | 1235 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
1238 | GFP_KERNEL | __GFP_ZERO, | 1236 | GFP_KERNEL | __GFP_ZERO, |
1239 | get_order(MAX_DOMAIN_ID/8)); | 1237 | get_order(MAX_DOMAIN_ID/8)); |
@@ -1255,6 +1253,8 @@ int __init amd_iommu_init(void) | |||
1255 | */ | 1253 | */ |
1256 | amd_iommu_pd_alloc_bitmap[0] = 1; | 1254 | amd_iommu_pd_alloc_bitmap[0] = 1; |
1257 | 1255 | ||
1256 | spin_lock_init(&amd_iommu_pd_lock); | ||
1257 | |||
1258 | /* | 1258 | /* |
1259 | * now the data structures are allocated and basically initialized | 1259 | * now the data structures are allocated and basically initialized |
1260 | * start the real acpi table scan | 1260 | * start the real acpi table scan |
@@ -1286,17 +1286,12 @@ int __init amd_iommu_init(void) | |||
1286 | if (iommu_pass_through) | 1286 | if (iommu_pass_through) |
1287 | goto out; | 1287 | goto out; |
1288 | 1288 | ||
1289 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1290 | if (amd_iommu_isolate) | ||
1291 | printk("enabled\n"); | ||
1292 | else | ||
1293 | printk("disabled\n"); | ||
1294 | |||
1295 | if (amd_iommu_unmap_flush) | 1289 | if (amd_iommu_unmap_flush) |
1296 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); | 1290 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1297 | else | 1291 | else |
1298 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); | 1292 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1299 | 1293 | ||
1294 | x86_platform.iommu_shutdown = disable_iommus; | ||
1300 | out: | 1295 | out: |
1301 | return ret; | 1296 | return ret; |
1302 | 1297 | ||
@@ -1304,9 +1299,6 @@ free: | |||
1304 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, | 1299 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1305 | get_order(MAX_DOMAIN_ID/8)); | 1300 | get_order(MAX_DOMAIN_ID/8)); |
1306 | 1301 | ||
1307 | free_pages((unsigned long)amd_iommu_pd_table, | ||
1308 | get_order(rlookup_table_size)); | ||
1309 | |||
1310 | free_pages((unsigned long)amd_iommu_rlookup_table, | 1302 | free_pages((unsigned long)amd_iommu_rlookup_table, |
1311 | get_order(rlookup_table_size)); | 1303 | get_order(rlookup_table_size)); |
1312 | 1304 | ||
@@ -1323,11 +1315,6 @@ free: | |||
1323 | goto out; | 1315 | goto out; |
1324 | } | 1316 | } |
1325 | 1317 | ||
1326 | void amd_iommu_shutdown(void) | ||
1327 | { | ||
1328 | disable_iommus(); | ||
1329 | } | ||
1330 | |||
1331 | /**************************************************************************** | 1318 | /**************************************************************************** |
1332 | * | 1319 | * |
1333 | * Early detect code. This code runs at IOMMU detection time in the DMA | 1320 | * Early detect code. This code runs at IOMMU detection time in the DMA |
@@ -1342,16 +1329,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table) | |||
1342 | 1329 | ||
1343 | void __init amd_iommu_detect(void) | 1330 | void __init amd_iommu_detect(void) |
1344 | { | 1331 | { |
1345 | if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) | 1332 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
1346 | return; | 1333 | return; |
1347 | 1334 | ||
1348 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { | 1335 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1349 | iommu_detected = 1; | 1336 | iommu_detected = 1; |
1350 | amd_iommu_detected = 1; | 1337 | amd_iommu_detected = 1; |
1351 | #ifdef CONFIG_GART_IOMMU | 1338 | x86_init.iommu.iommu_init = amd_iommu_init; |
1352 | gart_iommu_aperture_disabled = 1; | ||
1353 | gart_iommu_aperture = 0; | ||
1354 | #endif | ||
1355 | } | 1339 | } |
1356 | } | 1340 | } |
1357 | 1341 | ||
@@ -1372,10 +1356,6 @@ static int __init parse_amd_iommu_dump(char *str) | |||
1372 | static int __init parse_amd_iommu_options(char *str) | 1356 | static int __init parse_amd_iommu_options(char *str) |
1373 | { | 1357 | { |
1374 | for (; *str; ++str) { | 1358 | for (; *str; ++str) { |
1375 | if (strncmp(str, "isolate", 7) == 0) | ||
1376 | amd_iommu_isolate = true; | ||
1377 | if (strncmp(str, "share", 5) == 0) | ||
1378 | amd_iommu_isolate = false; | ||
1379 | if (strncmp(str, "fullflush", 9) == 0) | 1359 | if (strncmp(str, "fullflush", 9) == 0) |
1380 | amd_iommu_unmap_flush = true; | 1360 | amd_iommu_unmap_flush = true; |
1381 | } | 1361 | } |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 128111d8ffe0..e0dfb6856aa2 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/pci-direct.h> | 28 | #include <asm/pci-direct.h> |
29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
30 | #include <asm/k8.h> | 30 | #include <asm/k8.h> |
31 | #include <asm/x86_init.h> | ||
31 | 32 | ||
32 | int gart_iommu_aperture; | 33 | int gart_iommu_aperture; |
33 | int gart_iommu_aperture_disabled __initdata; | 34 | int gart_iommu_aperture_disabled __initdata; |
@@ -400,6 +401,7 @@ void __init gart_iommu_hole_init(void) | |||
400 | 401 | ||
401 | iommu_detected = 1; | 402 | iommu_detected = 1; |
402 | gart_iommu_aperture = 1; | 403 | gart_iommu_aperture = 1; |
404 | x86_init.iommu.iommu_init = gart_iommu_init; | ||
403 | 405 | ||
404 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 406 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; |
405 | aper_size = (32 * 1024 * 1024) << aper_order; | 407 | aper_size = (32 * 1024 * 1024) << aper_order; |
@@ -456,7 +458,7 @@ out: | |||
456 | 458 | ||
457 | if (aper_alloc) { | 459 | if (aper_alloc) { |
458 | /* Got the aperture from the AGP bridge */ | 460 | /* Got the aperture from the AGP bridge */ |
459 | } else if (swiotlb && !valid_agp) { | 461 | } else if (!valid_agp) { |
460 | /* Do nothing */ | 462 | /* Do nothing */ |
461 | } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || | 463 | } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || |
462 | force_iommu || | 464 | force_iommu || |
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index da7b7b9f8bd8..565c1bfc507d 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for local APIC drivers and for the IO-APIC code | 2 | # Makefile for local APIC drivers and for the IO-APIC code |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o | 5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o nmi.o |
6 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 6 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
7 | obj-$(CONFIG_SMP) += ipi.o | 7 | obj-$(CONFIG_SMP) += ipi.o |
8 | 8 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 894aa97f0717..ad8c75b9e453 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -241,28 +241,13 @@ static int modern_apic(void) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | /* | 243 | /* |
244 | * bare function to substitute write operation | 244 | * right after this call apic become NOOP driven |
245 | * and it's _that_ fast :) | 245 | * so apic->write/read doesn't do anything |
246 | */ | ||
247 | static void native_apic_write_dummy(u32 reg, u32 v) | ||
248 | { | ||
249 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
250 | } | ||
251 | |||
252 | static u32 native_apic_read_dummy(u32 reg) | ||
253 | { | ||
254 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * right after this call apic->write/read doesn't do anything | ||
260 | * note that there is no restore operation it works one way | ||
261 | */ | 246 | */ |
262 | void apic_disable(void) | 247 | void apic_disable(void) |
263 | { | 248 | { |
264 | apic->read = native_apic_read_dummy; | 249 | pr_info("APIC: switched to apic NOOP\n"); |
265 | apic->write = native_apic_write_dummy; | 250 | apic = &apic_noop; |
266 | } | 251 | } |
267 | 252 | ||
268 | void native_apic_wait_icr_idle(void) | 253 | void native_apic_wait_icr_idle(void) |
@@ -459,7 +444,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
459 | v = apic_read(APIC_LVTT); | 444 | v = apic_read(APIC_LVTT); |
460 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | 445 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); |
461 | apic_write(APIC_LVTT, v); | 446 | apic_write(APIC_LVTT, v); |
462 | apic_write(APIC_TMICT, 0xffffffff); | 447 | apic_write(APIC_TMICT, 0); |
463 | break; | 448 | break; |
464 | case CLOCK_EVT_MODE_RESUME: | 449 | case CLOCK_EVT_MODE_RESUME: |
465 | /* Nothing to do here */ | 450 | /* Nothing to do here */ |
@@ -1392,14 +1377,11 @@ void __init enable_IR_x2apic(void) | |||
1392 | unsigned long flags; | 1377 | unsigned long flags; |
1393 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1378 | struct IO_APIC_route_entry **ioapic_entries = NULL; |
1394 | int ret, x2apic_enabled = 0; | 1379 | int ret, x2apic_enabled = 0; |
1395 | int dmar_table_init_ret = 0; | 1380 | int dmar_table_init_ret; |
1396 | 1381 | ||
1397 | #ifdef CONFIG_INTR_REMAP | ||
1398 | dmar_table_init_ret = dmar_table_init(); | 1382 | dmar_table_init_ret = dmar_table_init(); |
1399 | if (dmar_table_init_ret) | 1383 | if (dmar_table_init_ret && !x2apic_supported()) |
1400 | pr_debug("dmar_table_init() failed with %d:\n", | 1384 | return; |
1401 | dmar_table_init_ret); | ||
1402 | #endif | ||
1403 | 1385 | ||
1404 | ioapic_entries = alloc_ioapic_entries(); | 1386 | ioapic_entries = alloc_ioapic_entries(); |
1405 | if (!ioapic_entries) { | 1387 | if (!ioapic_entries) { |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c new file mode 100644 index 000000000000..d9acc3bee0f4 --- /dev/null +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * NOOP APIC driver. | ||
3 | * | ||
4 | * Does almost nothing and should be substituted by a real apic driver via | ||
5 | * probe routine. | ||
6 | * | ||
7 | * Though in case if apic is disabled (for some reason) we try | ||
8 | * to not uglify the caller's code and allow to call (some) apic routines | ||
9 | * like self-ipi, etc... | ||
10 | */ | ||
11 | |||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/ctype.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <asm/fixmap.h> | ||
21 | #include <asm/mpspec.h> | ||
22 | #include <asm/apicdef.h> | ||
23 | #include <asm/apic.h> | ||
24 | #include <asm/setup.h> | ||
25 | |||
26 | #include <linux/smp.h> | ||
27 | #include <asm/ipi.h> | ||
28 | |||
29 | #include <linux/interrupt.h> | ||
30 | #include <asm/acpi.h> | ||
31 | #include <asm/e820.h> | ||
32 | |||
33 | static void noop_init_apic_ldr(void) { } | ||
34 | static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { } | ||
35 | static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { } | ||
36 | static void noop_send_IPI_allbutself(int vector) { } | ||
37 | static void noop_send_IPI_all(int vector) { } | ||
38 | static void noop_send_IPI_self(int vector) { } | ||
39 | static void noop_apic_wait_icr_idle(void) { } | ||
40 | static void noop_apic_icr_write(u32 low, u32 id) { } | ||
41 | |||
42 | static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) | ||
43 | { | ||
44 | return -1; | ||
45 | } | ||
46 | |||
47 | static u32 noop_safe_apic_wait_icr_idle(void) | ||
48 | { | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static u64 noop_apic_icr_read(void) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int noop_cpu_to_logical_apicid(int cpu) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static unsigned int noop_get_apic_id(unsigned long x) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int noop_probe(void) | ||
73 | { | ||
74 | /* | ||
75 | * NOOP apic should not ever be | ||
76 | * enabled via probe routine | ||
77 | */ | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static int noop_apic_id_registered(void) | ||
82 | { | ||
83 | /* | ||
84 | * if we would be really "pedantic" | ||
85 | * we should pass read_apic_id() here | ||
86 | * but since NOOP suppose APIC ID = 0 | ||
87 | * lets save a few cycles | ||
88 | */ | ||
89 | return physid_isset(0, phys_cpu_present_map); | ||
90 | } | ||
91 | |||
92 | static const struct cpumask *noop_target_cpus(void) | ||
93 | { | ||
94 | /* only BSP here */ | ||
95 | return cpumask_of(0); | ||
96 | } | ||
97 | |||
98 | static unsigned long noop_check_apicid_used(physid_mask_t *map, int apicid) | ||
99 | { | ||
100 | return physid_isset(apicid, *map); | ||
101 | } | ||
102 | |||
103 | static unsigned long noop_check_apicid_present(int bit) | ||
104 | { | ||
105 | return physid_isset(bit, phys_cpu_present_map); | ||
106 | } | ||
107 | |||
108 | static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
109 | { | ||
110 | if (cpu != 0) | ||
111 | pr_warning("APIC: Vector allocated for non-BSP cpu\n"); | ||
112 | cpumask_clear(retmask); | ||
113 | cpumask_set_cpu(cpu, retmask); | ||
114 | } | ||
115 | |||
116 | int noop_apicid_to_node(int logical_apicid) | ||
117 | { | ||
118 | /* we're always on node 0 */ | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static u32 noop_apic_read(u32 reg) | ||
123 | { | ||
124 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static void noop_apic_write(u32 reg, u32 v) | ||
129 | { | ||
130 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
131 | } | ||
132 | |||
133 | struct apic apic_noop = { | ||
134 | .name = "noop", | ||
135 | .probe = noop_probe, | ||
136 | .acpi_madt_oem_check = NULL, | ||
137 | |||
138 | .apic_id_registered = noop_apic_id_registered, | ||
139 | |||
140 | .irq_delivery_mode = dest_LowestPrio, | ||
141 | /* logical delivery broadcast to all CPUs: */ | ||
142 | .irq_dest_mode = 1, | ||
143 | |||
144 | .target_cpus = noop_target_cpus, | ||
145 | .disable_esr = 0, | ||
146 | .dest_logical = APIC_DEST_LOGICAL, | ||
147 | .check_apicid_used = noop_check_apicid_used, | ||
148 | .check_apicid_present = noop_check_apicid_present, | ||
149 | |||
150 | .vector_allocation_domain = noop_vector_allocation_domain, | ||
151 | .init_apic_ldr = noop_init_apic_ldr, | ||
152 | |||
153 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | ||
154 | .setup_apic_routing = NULL, | ||
155 | .multi_timer_check = NULL, | ||
156 | .apicid_to_node = noop_apicid_to_node, | ||
157 | |||
158 | .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, | ||
159 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
160 | .apicid_to_cpu_present = physid_set_mask_of_physid, | ||
161 | |||
162 | .setup_portio_remap = NULL, | ||
163 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
164 | .enable_apic_mode = NULL, | ||
165 | |||
166 | .phys_pkg_id = noop_phys_pkg_id, | ||
167 | |||
168 | .mps_oem_check = NULL, | ||
169 | |||
170 | .get_apic_id = noop_get_apic_id, | ||
171 | .set_apic_id = NULL, | ||
172 | .apic_id_mask = 0x0F << 24, | ||
173 | |||
174 | .cpu_mask_to_apicid = default_cpu_mask_to_apicid, | ||
175 | .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, | ||
176 | |||
177 | .send_IPI_mask = noop_send_IPI_mask, | ||
178 | .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, | ||
179 | .send_IPI_allbutself = noop_send_IPI_allbutself, | ||
180 | .send_IPI_all = noop_send_IPI_all, | ||
181 | .send_IPI_self = noop_send_IPI_self, | ||
182 | |||
183 | .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, | ||
184 | |||
185 | /* should be safe */ | ||
186 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
187 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
188 | |||
189 | .wait_for_init_deassert = NULL, | ||
190 | |||
191 | .smp_callin_clear_local_apic = NULL, | ||
192 | .inquire_remote_apic = NULL, | ||
193 | |||
194 | .read = noop_apic_read, | ||
195 | .write = noop_apic_write, | ||
196 | .icr_read = noop_apic_icr_read, | ||
197 | .icr_write = noop_apic_icr_write, | ||
198 | .wait_icr_idle = noop_apic_wait_icr_idle, | ||
199 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, | ||
200 | }; | ||
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 77a06413b6b2..38dcecfa5818 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -35,7 +35,7 @@ static const struct cpumask *bigsmp_target_cpus(void) | |||
35 | #endif | 35 | #endif |
36 | } | 36 | } |
37 | 37 | ||
38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) | 38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) |
39 | { | 39 | { |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
@@ -93,11 +93,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
93 | return BAD_APICID; | 93 | return BAD_APICID; |
94 | } | 94 | } |
95 | 95 | ||
96 | static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) | ||
97 | { | ||
98 | return physid_mask_of_physid(phys_apicid); | ||
99 | } | ||
100 | |||
101 | /* Mapping from cpu number to logical apicid */ | 96 | /* Mapping from cpu number to logical apicid */ |
102 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) | 97 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) |
103 | { | 98 | { |
@@ -106,10 +101,10 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) | |||
106 | return cpu_physical_id(cpu); | 101 | return cpu_physical_id(cpu); |
107 | } | 102 | } |
108 | 103 | ||
109 | static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) | 104 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
110 | { | 105 | { |
111 | /* For clustered we don't have a good way to do this yet - hack */ | 106 | /* For clustered we don't have a good way to do this yet - hack */ |
112 | return physids_promote(0xFFL); | 107 | physids_promote(0xFFL, retmap); |
113 | } | 108 | } |
114 | 109 | ||
115 | static int bigsmp_check_phys_apicid_present(int phys_apicid) | 110 | static int bigsmp_check_phys_apicid_present(int phys_apicid) |
@@ -230,7 +225,7 @@ struct apic apic_bigsmp = { | |||
230 | .apicid_to_node = bigsmp_apicid_to_node, | 225 | .apicid_to_node = bigsmp_apicid_to_node, |
231 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, | 226 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, |
232 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, | 227 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, |
233 | .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, | 228 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
234 | .setup_portio_remap = NULL, | 229 | .setup_portio_remap = NULL, |
235 | .check_phys_apicid_present = bigsmp_check_phys_apicid_present, | 230 | .check_phys_apicid_present = bigsmp_check_phys_apicid_present, |
236 | .enable_apic_mode = NULL, | 231 | .enable_apic_mode = NULL, |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 89174f847b49..e85f8fb7f8e7 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -466,11 +466,11 @@ static const struct cpumask *es7000_target_cpus(void) | |||
466 | return cpumask_of(smp_processor_id()); | 466 | return cpumask_of(smp_processor_id()); |
467 | } | 467 | } |
468 | 468 | ||
469 | static unsigned long | 469 | static unsigned long es7000_check_apicid_used(physid_mask_t *map, int apicid) |
470 | es7000_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
471 | { | 470 | { |
472 | return 0; | 471 | return 0; |
473 | } | 472 | } |
473 | |||
474 | static unsigned long es7000_check_apicid_present(int bit) | 474 | static unsigned long es7000_check_apicid_present(int bit) |
475 | { | 475 | { |
476 | return physid_isset(bit, phys_cpu_present_map); | 476 | return physid_isset(bit, phys_cpu_present_map); |
@@ -539,14 +539,10 @@ static int es7000_cpu_present_to_apicid(int mps_cpu) | |||
539 | 539 | ||
540 | static int cpu_id; | 540 | static int cpu_id; |
541 | 541 | ||
542 | static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) | 542 | static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) |
543 | { | 543 | { |
544 | physid_mask_t mask; | 544 | physid_set_mask_of_physid(cpu_id, retmap); |
545 | |||
546 | mask = physid_mask_of_physid(cpu_id); | ||
547 | ++cpu_id; | 545 | ++cpu_id; |
548 | |||
549 | return mask; | ||
550 | } | 546 | } |
551 | 547 | ||
552 | /* Mapping from cpu number to logical apicid */ | 548 | /* Mapping from cpu number to logical apicid */ |
@@ -561,10 +557,10 @@ static int es7000_cpu_to_logical_apicid(int cpu) | |||
561 | #endif | 557 | #endif |
562 | } | 558 | } |
563 | 559 | ||
564 | static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) | 560 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
565 | { | 561 | { |
566 | /* For clustered we don't have a good way to do this yet - hack */ | 562 | /* For clustered we don't have a good way to do this yet - hack */ |
567 | return physids_promote(0xff); | 563 | physids_promote(0xFFL, retmap); |
568 | } | 564 | } |
569 | 565 | ||
570 | static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | 566 | static int es7000_check_phys_apicid_present(int cpu_physical_apicid) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index dc69f28489f5..c0b4468683f9 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -60,8 +60,6 @@ | |||
60 | #include <asm/irq_remapping.h> | 60 | #include <asm/irq_remapping.h> |
61 | #include <asm/hpet.h> | 61 | #include <asm/hpet.h> |
62 | #include <asm/hw_irq.h> | 62 | #include <asm/hw_irq.h> |
63 | #include <asm/uv/uv_hub.h> | ||
64 | #include <asm/uv/uv_irq.h> | ||
65 | 63 | ||
66 | #include <asm/apic.h> | 64 | #include <asm/apic.h> |
67 | 65 | ||
@@ -140,20 +138,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
140 | return pin; | 138 | return pin; |
141 | } | 139 | } |
142 | 140 | ||
143 | /* | ||
144 | * This is performance-critical, we want to do it O(1) | ||
145 | * | ||
146 | * Most irqs are mapped 1:1 with pins. | ||
147 | */ | ||
148 | struct irq_cfg { | ||
149 | struct irq_pin_list *irq_2_pin; | ||
150 | cpumask_var_t domain; | ||
151 | cpumask_var_t old_domain; | ||
152 | unsigned move_cleanup_count; | ||
153 | u8 vector; | ||
154 | u8 move_in_progress : 1; | ||
155 | }; | ||
156 | |||
157 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 141 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
158 | #ifdef CONFIG_SPARSE_IRQ | 142 | #ifdef CONFIG_SPARSE_IRQ |
159 | static struct irq_cfg irq_cfgx[] = { | 143 | static struct irq_cfg irq_cfgx[] = { |
@@ -209,7 +193,7 @@ int __init arch_early_irq_init(void) | |||
209 | } | 193 | } |
210 | 194 | ||
211 | #ifdef CONFIG_SPARSE_IRQ | 195 | #ifdef CONFIG_SPARSE_IRQ |
212 | static struct irq_cfg *irq_cfg(unsigned int irq) | 196 | struct irq_cfg *irq_cfg(unsigned int irq) |
213 | { | 197 | { |
214 | struct irq_cfg *cfg = NULL; | 198 | struct irq_cfg *cfg = NULL; |
215 | struct irq_desc *desc; | 199 | struct irq_desc *desc; |
@@ -361,7 +345,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
361 | /* end for move_irq_desc */ | 345 | /* end for move_irq_desc */ |
362 | 346 | ||
363 | #else | 347 | #else |
364 | static struct irq_cfg *irq_cfg(unsigned int irq) | 348 | struct irq_cfg *irq_cfg(unsigned int irq) |
365 | { | 349 | { |
366 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 350 | return irq < nr_irqs ? irq_cfgx + irq : NULL; |
367 | } | 351 | } |
@@ -555,23 +539,41 @@ static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, | |||
555 | add_pin_to_irq_node(cfg, node, newapic, newpin); | 539 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
556 | } | 540 | } |
557 | 541 | ||
542 | static void __io_apic_modify_irq(struct irq_pin_list *entry, | ||
543 | int mask_and, int mask_or, | ||
544 | void (*final)(struct irq_pin_list *entry)) | ||
545 | { | ||
546 | unsigned int reg, pin; | ||
547 | |||
548 | pin = entry->pin; | ||
549 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | ||
550 | reg &= mask_and; | ||
551 | reg |= mask_or; | ||
552 | io_apic_modify(entry->apic, 0x10 + pin * 2, reg); | ||
553 | if (final) | ||
554 | final(entry); | ||
555 | } | ||
556 | |||
558 | static void io_apic_modify_irq(struct irq_cfg *cfg, | 557 | static void io_apic_modify_irq(struct irq_cfg *cfg, |
559 | int mask_and, int mask_or, | 558 | int mask_and, int mask_or, |
560 | void (*final)(struct irq_pin_list *entry)) | 559 | void (*final)(struct irq_pin_list *entry)) |
561 | { | 560 | { |
562 | int pin; | ||
563 | struct irq_pin_list *entry; | 561 | struct irq_pin_list *entry; |
564 | 562 | ||
565 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 563 | for_each_irq_pin(entry, cfg->irq_2_pin) |
566 | unsigned int reg; | 564 | __io_apic_modify_irq(entry, mask_and, mask_or, final); |
567 | pin = entry->pin; | 565 | } |
568 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | 566 | |
569 | reg &= mask_and; | 567 | static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) |
570 | reg |= mask_or; | 568 | { |
571 | io_apic_modify(entry->apic, 0x10 + pin * 2, reg); | 569 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, |
572 | if (final) | 570 | IO_APIC_REDIR_MASKED, NULL); |
573 | final(entry); | 571 | } |
574 | } | 572 | |
573 | static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | ||
574 | { | ||
575 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, | ||
576 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
575 | } | 577 | } |
576 | 578 | ||
577 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | 579 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) |
@@ -595,18 +597,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | |||
595 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 597 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
596 | } | 598 | } |
597 | 599 | ||
598 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) | ||
599 | { | ||
600 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER, | ||
601 | IO_APIC_REDIR_MASKED, NULL); | ||
602 | } | ||
603 | |||
604 | static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) | ||
605 | { | ||
606 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, | ||
607 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
608 | } | ||
609 | |||
610 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 600 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) |
611 | { | 601 | { |
612 | struct irq_cfg *cfg = desc->chip_data; | 602 | struct irq_cfg *cfg = desc->chip_data; |
@@ -1177,7 +1167,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
1177 | int cpu, err; | 1167 | int cpu, err; |
1178 | cpumask_var_t tmp_mask; | 1168 | cpumask_var_t tmp_mask; |
1179 | 1169 | ||
1180 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1170 | if (cfg->move_in_progress) |
1181 | return -EBUSY; | 1171 | return -EBUSY; |
1182 | 1172 | ||
1183 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | 1173 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
@@ -1237,8 +1227,7 @@ next: | |||
1237 | return err; | 1227 | return err; |
1238 | } | 1228 | } |
1239 | 1229 | ||
1240 | static int | 1230 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) |
1241 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1242 | { | 1231 | { |
1243 | int err; | 1232 | int err; |
1244 | unsigned long flags; | 1233 | unsigned long flags; |
@@ -1599,9 +1588,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1599 | struct irq_desc *desc; | 1588 | struct irq_desc *desc; |
1600 | unsigned int irq; | 1589 | unsigned int irq; |
1601 | 1590 | ||
1602 | if (apic_verbosity == APIC_QUIET) | ||
1603 | return; | ||
1604 | |||
1605 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1591 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
1606 | for (i = 0; i < nr_ioapics; i++) | 1592 | for (i = 0; i < nr_ioapics; i++) |
1607 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", | 1593 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", |
@@ -1708,9 +1694,6 @@ __apicdebuginit(void) print_APIC_field(int base) | |||
1708 | { | 1694 | { |
1709 | int i; | 1695 | int i; |
1710 | 1696 | ||
1711 | if (apic_verbosity == APIC_QUIET) | ||
1712 | return; | ||
1713 | |||
1714 | printk(KERN_DEBUG); | 1697 | printk(KERN_DEBUG); |
1715 | 1698 | ||
1716 | for (i = 0; i < 8; i++) | 1699 | for (i = 0; i < 8; i++) |
@@ -1724,9 +1707,6 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1724 | unsigned int i, v, ver, maxlvt; | 1707 | unsigned int i, v, ver, maxlvt; |
1725 | u64 icr; | 1708 | u64 icr; |
1726 | 1709 | ||
1727 | if (apic_verbosity == APIC_QUIET) | ||
1728 | return; | ||
1729 | |||
1730 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1710 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1731 | smp_processor_id(), hard_smp_processor_id()); | 1711 | smp_processor_id(), hard_smp_processor_id()); |
1732 | v = apic_read(APIC_ID); | 1712 | v = apic_read(APIC_ID); |
@@ -1824,13 +1804,19 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1824 | printk("\n"); | 1804 | printk("\n"); |
1825 | } | 1805 | } |
1826 | 1806 | ||
1827 | __apicdebuginit(void) print_all_local_APICs(void) | 1807 | __apicdebuginit(void) print_local_APICs(int maxcpu) |
1828 | { | 1808 | { |
1829 | int cpu; | 1809 | int cpu; |
1830 | 1810 | ||
1811 | if (!maxcpu) | ||
1812 | return; | ||
1813 | |||
1831 | preempt_disable(); | 1814 | preempt_disable(); |
1832 | for_each_online_cpu(cpu) | 1815 | for_each_online_cpu(cpu) { |
1816 | if (cpu >= maxcpu) | ||
1817 | break; | ||
1833 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | 1818 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); |
1819 | } | ||
1834 | preempt_enable(); | 1820 | preempt_enable(); |
1835 | } | 1821 | } |
1836 | 1822 | ||
@@ -1839,7 +1825,7 @@ __apicdebuginit(void) print_PIC(void) | |||
1839 | unsigned int v; | 1825 | unsigned int v; |
1840 | unsigned long flags; | 1826 | unsigned long flags; |
1841 | 1827 | ||
1842 | if (apic_verbosity == APIC_QUIET || !nr_legacy_irqs) | 1828 | if (!nr_legacy_irqs) |
1843 | return; | 1829 | return; |
1844 | 1830 | ||
1845 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | 1831 | printk(KERN_DEBUG "\nprinting PIC contents\n"); |
@@ -1866,21 +1852,41 @@ __apicdebuginit(void) print_PIC(void) | |||
1866 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | 1852 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); |
1867 | } | 1853 | } |
1868 | 1854 | ||
1869 | __apicdebuginit(int) print_all_ICs(void) | 1855 | static int __initdata show_lapic = 1; |
1856 | static __init int setup_show_lapic(char *arg) | ||
1870 | { | 1857 | { |
1858 | int num = -1; | ||
1859 | |||
1860 | if (strcmp(arg, "all") == 0) { | ||
1861 | show_lapic = CONFIG_NR_CPUS; | ||
1862 | } else { | ||
1863 | get_option(&arg, &num); | ||
1864 | if (num >= 0) | ||
1865 | show_lapic = num; | ||
1866 | } | ||
1867 | |||
1868 | return 1; | ||
1869 | } | ||
1870 | __setup("show_lapic=", setup_show_lapic); | ||
1871 | |||
1872 | __apicdebuginit(int) print_ICs(void) | ||
1873 | { | ||
1874 | if (apic_verbosity == APIC_QUIET) | ||
1875 | return 0; | ||
1876 | |||
1871 | print_PIC(); | 1877 | print_PIC(); |
1872 | 1878 | ||
1873 | /* don't print out if apic is not there */ | 1879 | /* don't print out if apic is not there */ |
1874 | if (!cpu_has_apic && !apic_from_smp_config()) | 1880 | if (!cpu_has_apic && !apic_from_smp_config()) |
1875 | return 0; | 1881 | return 0; |
1876 | 1882 | ||
1877 | print_all_local_APICs(); | 1883 | print_local_APICs(show_lapic); |
1878 | print_IO_APIC(); | 1884 | print_IO_APIC(); |
1879 | 1885 | ||
1880 | return 0; | 1886 | return 0; |
1881 | } | 1887 | } |
1882 | 1888 | ||
1883 | fs_initcall(print_all_ICs); | 1889 | fs_initcall(print_ICs); |
1884 | 1890 | ||
1885 | 1891 | ||
1886 | /* Where if anywhere is the i8259 connect in external int mode */ | 1892 | /* Where if anywhere is the i8259 connect in external int mode */ |
@@ -2031,7 +2037,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
2031 | * This is broken; anything with a real cpu count has to | 2037 | * This is broken; anything with a real cpu count has to |
2032 | * circumvent this idiocy regardless. | 2038 | * circumvent this idiocy regardless. |
2033 | */ | 2039 | */ |
2034 | phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map); | 2040 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); |
2035 | 2041 | ||
2036 | /* | 2042 | /* |
2037 | * Set the IOAPIC ID to the value stored in the MPC table. | 2043 | * Set the IOAPIC ID to the value stored in the MPC table. |
@@ -2058,7 +2064,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
2058 | * system must have a unique ID or we get lots of nice | 2064 | * system must have a unique ID or we get lots of nice |
2059 | * 'stuck on smp_invalidate_needed IPI wait' messages. | 2065 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
2060 | */ | 2066 | */ |
2061 | if (apic->check_apicid_used(phys_id_present_map, | 2067 | if (apic->check_apicid_used(&phys_id_present_map, |
2062 | mp_ioapics[apic_id].apicid)) { | 2068 | mp_ioapics[apic_id].apicid)) { |
2063 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", | 2069 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", |
2064 | apic_id, mp_ioapics[apic_id].apicid); | 2070 | apic_id, mp_ioapics[apic_id].apicid); |
@@ -2073,7 +2079,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
2073 | mp_ioapics[apic_id].apicid = i; | 2079 | mp_ioapics[apic_id].apicid = i; |
2074 | } else { | 2080 | } else { |
2075 | physid_mask_t tmp; | 2081 | physid_mask_t tmp; |
2076 | tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid); | 2082 | apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp); |
2077 | apic_printk(APIC_VERBOSE, "Setting %d in the " | 2083 | apic_printk(APIC_VERBOSE, "Setting %d in the " |
2078 | "phys_id_present_map\n", | 2084 | "phys_id_present_map\n", |
2079 | mp_ioapics[apic_id].apicid); | 2085 | mp_ioapics[apic_id].apicid); |
@@ -2228,20 +2234,16 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2228 | */ | 2234 | */ |
2229 | 2235 | ||
2230 | #ifdef CONFIG_SMP | 2236 | #ifdef CONFIG_SMP |
2231 | static void send_cleanup_vector(struct irq_cfg *cfg) | 2237 | void send_cleanup_vector(struct irq_cfg *cfg) |
2232 | { | 2238 | { |
2233 | cpumask_var_t cleanup_mask; | 2239 | cpumask_var_t cleanup_mask; |
2234 | 2240 | ||
2235 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | 2241 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { |
2236 | unsigned int i; | 2242 | unsigned int i; |
2237 | cfg->move_cleanup_count = 0; | ||
2238 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
2239 | cfg->move_cleanup_count++; | ||
2240 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | 2243 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) |
2241 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | 2244 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); |
2242 | } else { | 2245 | } else { |
2243 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | 2246 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); |
2244 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
2245 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | 2247 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); |
2246 | free_cpumask_var(cleanup_mask); | 2248 | free_cpumask_var(cleanup_mask); |
2247 | } | 2249 | } |
@@ -2272,15 +2274,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2272 | } | 2274 | } |
2273 | } | 2275 | } |
2274 | 2276 | ||
2275 | static int | ||
2276 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
2277 | |||
2278 | /* | 2277 | /* |
2279 | * Either sets desc->affinity to a valid value, and returns | 2278 | * Either sets desc->affinity to a valid value, and returns |
2280 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and | 2279 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and |
2281 | * leaves desc->affinity untouched. | 2280 | * leaves desc->affinity untouched. |
2282 | */ | 2281 | */ |
2283 | static unsigned int | 2282 | unsigned int |
2284 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | 2283 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) |
2285 | { | 2284 | { |
2286 | struct irq_cfg *cfg; | 2285 | struct irq_cfg *cfg; |
@@ -2433,8 +2432,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2433 | 2432 | ||
2434 | cfg = irq_cfg(irq); | 2433 | cfg = irq_cfg(irq); |
2435 | spin_lock(&desc->lock); | 2434 | spin_lock(&desc->lock); |
2436 | if (!cfg->move_cleanup_count) | ||
2437 | goto unlock; | ||
2438 | 2435 | ||
2439 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2436 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2440 | goto unlock; | 2437 | goto unlock; |
@@ -2452,7 +2449,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2452 | goto unlock; | 2449 | goto unlock; |
2453 | } | 2450 | } |
2454 | __get_cpu_var(vector_irq)[vector] = -1; | 2451 | __get_cpu_var(vector_irq)[vector] = -1; |
2455 | cfg->move_cleanup_count--; | ||
2456 | unlock: | 2452 | unlock: |
2457 | spin_unlock(&desc->lock); | 2453 | spin_unlock(&desc->lock); |
2458 | } | 2454 | } |
@@ -2460,21 +2456,33 @@ unlock: | |||
2460 | irq_exit(); | 2456 | irq_exit(); |
2461 | } | 2457 | } |
2462 | 2458 | ||
2463 | static void irq_complete_move(struct irq_desc **descp) | 2459 | static void __irq_complete_move(struct irq_desc **descp, unsigned vector) |
2464 | { | 2460 | { |
2465 | struct irq_desc *desc = *descp; | 2461 | struct irq_desc *desc = *descp; |
2466 | struct irq_cfg *cfg = desc->chip_data; | 2462 | struct irq_cfg *cfg = desc->chip_data; |
2467 | unsigned vector, me; | 2463 | unsigned me; |
2468 | 2464 | ||
2469 | if (likely(!cfg->move_in_progress)) | 2465 | if (likely(!cfg->move_in_progress)) |
2470 | return; | 2466 | return; |
2471 | 2467 | ||
2472 | vector = ~get_irq_regs()->orig_ax; | ||
2473 | me = smp_processor_id(); | 2468 | me = smp_processor_id(); |
2474 | 2469 | ||
2475 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2470 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2476 | send_cleanup_vector(cfg); | 2471 | send_cleanup_vector(cfg); |
2477 | } | 2472 | } |
2473 | |||
2474 | static void irq_complete_move(struct irq_desc **descp) | ||
2475 | { | ||
2476 | __irq_complete_move(descp, ~get_irq_regs()->orig_ax); | ||
2477 | } | ||
2478 | |||
2479 | void irq_force_complete_move(int irq) | ||
2480 | { | ||
2481 | struct irq_desc *desc = irq_to_desc(irq); | ||
2482 | struct irq_cfg *cfg = desc->chip_data; | ||
2483 | |||
2484 | __irq_complete_move(&desc, cfg->vector); | ||
2485 | } | ||
2478 | #else | 2486 | #else |
2479 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2487 | static inline void irq_complete_move(struct irq_desc **descp) {} |
2480 | #endif | 2488 | #endif |
@@ -2490,6 +2498,59 @@ static void ack_apic_edge(unsigned int irq) | |||
2490 | 2498 | ||
2491 | atomic_t irq_mis_count; | 2499 | atomic_t irq_mis_count; |
2492 | 2500 | ||
2501 | /* | ||
2502 | * IO-APIC versions below 0x20 don't support EOI register. | ||
2503 | * For the record, here is the information about various versions: | ||
2504 | * 0Xh 82489DX | ||
2505 | * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant | ||
2506 | * 2Xh I/O(x)APIC which is PCI 2.2 Compliant | ||
2507 | * 30h-FFh Reserved | ||
2508 | * | ||
2509 | * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic | ||
2510 | * version as 0x2. This is an error with documentation and these ICH chips | ||
2511 | * use io-apic's of version 0x20. | ||
2512 | * | ||
2513 | * For IO-APIC's with EOI register, we use that to do an explicit EOI. | ||
2514 | * Otherwise, we simulate the EOI message manually by changing the trigger | ||
2515 | * mode to edge and then back to level, with RTE being masked during this. | ||
2516 | */ | ||
2517 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
2518 | { | ||
2519 | struct irq_pin_list *entry; | ||
2520 | |||
2521 | for_each_irq_pin(entry, cfg->irq_2_pin) { | ||
2522 | if (mp_ioapics[entry->apic].apicver >= 0x20) { | ||
2523 | /* | ||
2524 | * Intr-remapping uses pin number as the virtual vector | ||
2525 | * in the RTE. Actual vector is programmed in | ||
2526 | * intr-remapping table entry. Hence for the io-apic | ||
2527 | * EOI we use the pin number. | ||
2528 | */ | ||
2529 | if (irq_remapped(irq)) | ||
2530 | io_apic_eoi(entry->apic, entry->pin); | ||
2531 | else | ||
2532 | io_apic_eoi(entry->apic, cfg->vector); | ||
2533 | } else { | ||
2534 | __mask_and_edge_IO_APIC_irq(entry); | ||
2535 | __unmask_and_level_IO_APIC_irq(entry); | ||
2536 | } | ||
2537 | } | ||
2538 | } | ||
2539 | |||
2540 | static void eoi_ioapic_irq(struct irq_desc *desc) | ||
2541 | { | ||
2542 | struct irq_cfg *cfg; | ||
2543 | unsigned long flags; | ||
2544 | unsigned int irq; | ||
2545 | |||
2546 | irq = desc->irq; | ||
2547 | cfg = desc->chip_data; | ||
2548 | |||
2549 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2550 | __eoi_ioapic_irq(irq, cfg); | ||
2551 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2552 | } | ||
2553 | |||
2493 | static void ack_apic_level(unsigned int irq) | 2554 | static void ack_apic_level(unsigned int irq) |
2494 | { | 2555 | { |
2495 | struct irq_desc *desc = irq_to_desc(irq); | 2556 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -2525,6 +2586,19 @@ static void ack_apic_level(unsigned int irq) | |||
2525 | * level-triggered interrupt. We mask the source for the time of the | 2586 | * level-triggered interrupt. We mask the source for the time of the |
2526 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | 2587 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
2527 | * The idea is from Manfred Spraul. --macro | 2588 | * The idea is from Manfred Spraul. --macro |
2589 | * | ||
2590 | * Also in the case when cpu goes offline, fixup_irqs() will forward | ||
2591 | * any unhandled interrupt on the offlined cpu to the new cpu | ||
2592 | * destination that is handling the corresponding interrupt. This | ||
2593 | * interrupt forwarding is done via IPI's. Hence, in this case also | ||
2594 | * level-triggered io-apic interrupt will be seen as an edge | ||
2595 | * interrupt in the IRR. And we can't rely on the cpu's EOI | ||
2596 | * to be broadcasted to the IO-APIC's which will clear the remoteIRR | ||
2597 | * corresponding to the level-triggered interrupt. Hence on IO-APIC's | ||
2598 | * supporting EOI register, we do an explicit EOI to clear the | ||
2599 | * remote IRR and on IO-APIC's which don't have an EOI register, | ||
2600 | * we use the above logic (mask+edge followed by unmask+level) from | ||
2601 | * Manfred Spraul to clear the remote IRR. | ||
2528 | */ | 2602 | */ |
2529 | cfg = desc->chip_data; | 2603 | cfg = desc->chip_data; |
2530 | i = cfg->vector; | 2604 | i = cfg->vector; |
@@ -2536,6 +2610,19 @@ static void ack_apic_level(unsigned int irq) | |||
2536 | */ | 2610 | */ |
2537 | ack_APIC_irq(); | 2611 | ack_APIC_irq(); |
2538 | 2612 | ||
2613 | /* | ||
2614 | * Tail end of clearing remote IRR bit (either by delivering the EOI | ||
2615 | * message via io-apic EOI register write or simulating it using | ||
2616 | * mask+edge followed by unnask+level logic) manually when the | ||
2617 | * level triggered interrupt is seen as the edge triggered interrupt | ||
2618 | * at the cpu. | ||
2619 | */ | ||
2620 | if (!(v & (1 << (i & 0x1f)))) { | ||
2621 | atomic_inc(&irq_mis_count); | ||
2622 | |||
2623 | eoi_ioapic_irq(desc); | ||
2624 | } | ||
2625 | |||
2539 | /* Now we can move and renable the irq */ | 2626 | /* Now we can move and renable the irq */ |
2540 | if (unlikely(do_unmask_irq)) { | 2627 | if (unlikely(do_unmask_irq)) { |
2541 | /* Only migrate the irq if the ack has been received. | 2628 | /* Only migrate the irq if the ack has been received. |
@@ -2569,41 +2656,9 @@ static void ack_apic_level(unsigned int irq) | |||
2569 | move_masked_irq(irq); | 2656 | move_masked_irq(irq); |
2570 | unmask_IO_APIC_irq_desc(desc); | 2657 | unmask_IO_APIC_irq_desc(desc); |
2571 | } | 2658 | } |
2572 | |||
2573 | /* Tail end of version 0x11 I/O APIC bug workaround */ | ||
2574 | if (!(v & (1 << (i & 0x1f)))) { | ||
2575 | atomic_inc(&irq_mis_count); | ||
2576 | spin_lock(&ioapic_lock); | ||
2577 | __mask_and_edge_IO_APIC_irq(cfg); | ||
2578 | __unmask_and_level_IO_APIC_irq(cfg); | ||
2579 | spin_unlock(&ioapic_lock); | ||
2580 | } | ||
2581 | } | 2659 | } |
2582 | 2660 | ||
2583 | #ifdef CONFIG_INTR_REMAP | 2661 | #ifdef CONFIG_INTR_REMAP |
2584 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
2585 | { | ||
2586 | struct irq_pin_list *entry; | ||
2587 | |||
2588 | for_each_irq_pin(entry, cfg->irq_2_pin) | ||
2589 | io_apic_eoi(entry->apic, entry->pin); | ||
2590 | } | ||
2591 | |||
2592 | static void | ||
2593 | eoi_ioapic_irq(struct irq_desc *desc) | ||
2594 | { | ||
2595 | struct irq_cfg *cfg; | ||
2596 | unsigned long flags; | ||
2597 | unsigned int irq; | ||
2598 | |||
2599 | irq = desc->irq; | ||
2600 | cfg = desc->chip_data; | ||
2601 | |||
2602 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2603 | __eoi_ioapic_irq(irq, cfg); | ||
2604 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2605 | } | ||
2606 | |||
2607 | static void ir_ack_apic_edge(unsigned int irq) | 2662 | static void ir_ack_apic_edge(unsigned int irq) |
2608 | { | 2663 | { |
2609 | ack_APIC_irq(); | 2664 | ack_APIC_irq(); |
@@ -3157,6 +3212,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) | |||
3157 | continue; | 3212 | continue; |
3158 | 3213 | ||
3159 | desc_new = move_irq_desc(desc_new, node); | 3214 | desc_new = move_irq_desc(desc_new, node); |
3215 | cfg_new = desc_new->chip_data; | ||
3160 | 3216 | ||
3161 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3217 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) |
3162 | irq = new; | 3218 | irq = new; |
@@ -3708,75 +3764,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3708 | } | 3764 | } |
3709 | #endif /* CONFIG_HT_IRQ */ | 3765 | #endif /* CONFIG_HT_IRQ */ |
3710 | 3766 | ||
3711 | #ifdef CONFIG_X86_UV | ||
3712 | /* | ||
3713 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
3714 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
3715 | */ | ||
3716 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
3717 | unsigned long mmr_offset) | ||
3718 | { | ||
3719 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
3720 | struct irq_cfg *cfg; | ||
3721 | int mmr_pnode; | ||
3722 | unsigned long mmr_value; | ||
3723 | struct uv_IO_APIC_route_entry *entry; | ||
3724 | unsigned long flags; | ||
3725 | int err; | ||
3726 | |||
3727 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3728 | |||
3729 | cfg = irq_cfg(irq); | ||
3730 | |||
3731 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
3732 | if (err != 0) | ||
3733 | return err; | ||
3734 | |||
3735 | spin_lock_irqsave(&vector_lock, flags); | ||
3736 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
3737 | irq_name); | ||
3738 | spin_unlock_irqrestore(&vector_lock, flags); | ||
3739 | |||
3740 | mmr_value = 0; | ||
3741 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3742 | entry->vector = cfg->vector; | ||
3743 | entry->delivery_mode = apic->irq_delivery_mode; | ||
3744 | entry->dest_mode = apic->irq_dest_mode; | ||
3745 | entry->polarity = 0; | ||
3746 | entry->trigger = 0; | ||
3747 | entry->mask = 0; | ||
3748 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
3749 | |||
3750 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
3751 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3752 | |||
3753 | if (cfg->move_in_progress) | ||
3754 | send_cleanup_vector(cfg); | ||
3755 | |||
3756 | return irq; | ||
3757 | } | ||
3758 | |||
3759 | /* | ||
3760 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
3761 | * longer allowed to be sent. | ||
3762 | */ | ||
3763 | void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) | ||
3764 | { | ||
3765 | unsigned long mmr_value; | ||
3766 | struct uv_IO_APIC_route_entry *entry; | ||
3767 | int mmr_pnode; | ||
3768 | |||
3769 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3770 | |||
3771 | mmr_value = 0; | ||
3772 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3773 | entry->mask = 1; | ||
3774 | |||
3775 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
3776 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3777 | } | ||
3778 | #endif /* CONFIG_X86_64 */ | ||
3779 | |||
3780 | int __init io_apic_get_redir_entries (int ioapic) | 3767 | int __init io_apic_get_redir_entries (int ioapic) |
3781 | { | 3768 | { |
3782 | union IO_APIC_reg_01 reg_01; | 3769 | union IO_APIC_reg_01 reg_01; |
@@ -3944,7 +3931,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3944 | */ | 3931 | */ |
3945 | 3932 | ||
3946 | if (physids_empty(apic_id_map)) | 3933 | if (physids_empty(apic_id_map)) |
3947 | apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); | 3934 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); |
3948 | 3935 | ||
3949 | spin_lock_irqsave(&ioapic_lock, flags); | 3936 | spin_lock_irqsave(&ioapic_lock, flags); |
3950 | reg_00.raw = io_apic_read(ioapic, 0); | 3937 | reg_00.raw = io_apic_read(ioapic, 0); |
@@ -3960,10 +3947,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3960 | * Every APIC in a system must have a unique ID or we get lots of nice | 3947 | * Every APIC in a system must have a unique ID or we get lots of nice |
3961 | * 'stuck on smp_invalidate_needed IPI wait' messages. | 3948 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
3962 | */ | 3949 | */ |
3963 | if (apic->check_apicid_used(apic_id_map, apic_id)) { | 3950 | if (apic->check_apicid_used(&apic_id_map, apic_id)) { |
3964 | 3951 | ||
3965 | for (i = 0; i < get_physical_broadcast(); i++) { | 3952 | for (i = 0; i < get_physical_broadcast(); i++) { |
3966 | if (!apic->check_apicid_used(apic_id_map, i)) | 3953 | if (!apic->check_apicid_used(&apic_id_map, i)) |
3967 | break; | 3954 | break; |
3968 | } | 3955 | } |
3969 | 3956 | ||
@@ -3976,7 +3963,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3976 | apic_id = i; | 3963 | apic_id = i; |
3977 | } | 3964 | } |
3978 | 3965 | ||
3979 | tmp = apic->apicid_to_cpu_present(apic_id); | 3966 | apic->apicid_to_cpu_present(apic_id, &tmp); |
3980 | physids_or(apic_id_map, apic_id_map, tmp); | 3967 | physids_or(apic_id_map, apic_id_map, tmp); |
3981 | 3968 | ||
3982 | if (reg_00.bits.ID != apic_id) { | 3969 | if (reg_00.bits.ID != apic_id) { |
@@ -4106,7 +4093,7 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics) | |||
4106 | for (i = 0; i < nr_ioapics; i++) { | 4093 | for (i = 0; i < nr_ioapics; i++) { |
4107 | res[i].name = mem; | 4094 | res[i].name = mem; |
4108 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 4095 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
4109 | sprintf(mem, "IOAPIC %u", i); | 4096 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
4110 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 4097 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
4111 | } | 4098 | } |
4112 | 4099 | ||
@@ -4140,18 +4127,17 @@ void __init ioapic_init_mappings(void) | |||
4140 | #ifdef CONFIG_X86_32 | 4127 | #ifdef CONFIG_X86_32 |
4141 | fake_ioapic_page: | 4128 | fake_ioapic_page: |
4142 | #endif | 4129 | #endif |
4143 | ioapic_phys = (unsigned long) | 4130 | ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); |
4144 | alloc_bootmem_pages(PAGE_SIZE); | ||
4145 | ioapic_phys = __pa(ioapic_phys); | 4131 | ioapic_phys = __pa(ioapic_phys); |
4146 | } | 4132 | } |
4147 | set_fixmap_nocache(idx, ioapic_phys); | 4133 | set_fixmap_nocache(idx, ioapic_phys); |
4148 | apic_printk(APIC_VERBOSE, | 4134 | apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", |
4149 | "mapped IOAPIC to %08lx (%08lx)\n", | 4135 | __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), |
4150 | __fix_to_virt(idx), ioapic_phys); | 4136 | ioapic_phys); |
4151 | idx++; | 4137 | idx++; |
4152 | 4138 | ||
4153 | ioapic_res->start = ioapic_phys; | 4139 | ioapic_res->start = ioapic_phys; |
4154 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; | 4140 | ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; |
4155 | ioapic_res++; | 4141 | ioapic_res++; |
4156 | } | 4142 | } |
4157 | } | 4143 | } |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 7ff61d6a188a..6389432a9dbf 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,8 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_t backtrace_mask __read_mostly; | 42 | /* For reliability, we're prepared to waste bits here. */ |
43 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | ||
43 | 44 | ||
44 | /* nmi_active: | 45 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 46 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -414,7 +415,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
414 | } | 415 | } |
415 | 416 | ||
416 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
417 | if (cpumask_test_cpu(cpu, &backtrace_mask)) { | 418 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
419 | 420 | ||
420 | spin_lock(&lock); | 421 | spin_lock(&lock); |
@@ -422,7 +423,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
422 | show_regs(regs); | 423 | show_regs(regs); |
423 | dump_stack(); | 424 | dump_stack(); |
424 | spin_unlock(&lock); | 425 | spin_unlock(&lock); |
425 | cpumask_clear_cpu(cpu, &backtrace_mask); | 426 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
426 | 427 | ||
427 | rc = 1; | 428 | rc = 1; |
428 | } | 429 | } |
@@ -558,14 +559,14 @@ void arch_trigger_all_cpu_backtrace(void) | |||
558 | { | 559 | { |
559 | int i; | 560 | int i; |
560 | 561 | ||
561 | cpumask_copy(&backtrace_mask, cpu_online_mask); | 562 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); |
562 | 563 | ||
563 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | 564 | printk(KERN_INFO "sending NMI to all CPUs:\n"); |
564 | apic->send_IPI_all(NMI_VECTOR); | 565 | apic->send_IPI_all(NMI_VECTOR); |
565 | 566 | ||
566 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 567 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
567 | for (i = 0; i < 10 * 1000; i++) { | 568 | for (i = 0; i < 10 * 1000; i++) { |
568 | if (cpumask_empty(&backtrace_mask)) | 569 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
569 | break; | 570 | break; |
570 | mdelay(1); | 571 | mdelay(1); |
571 | } | 572 | } |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index efa00e2b8505..07cdbdcd7a92 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -334,10 +334,9 @@ static inline const struct cpumask *numaq_target_cpus(void) | |||
334 | return cpu_all_mask; | 334 | return cpu_all_mask; |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline unsigned long | 337 | static unsigned long numaq_check_apicid_used(physid_mask_t *map, int apicid) |
338 | numaq_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
339 | { | 338 | { |
340 | return physid_isset(apicid, bitmap); | 339 | return physid_isset(apicid, *map); |
341 | } | 340 | } |
342 | 341 | ||
343 | static inline unsigned long numaq_check_apicid_present(int bit) | 342 | static inline unsigned long numaq_check_apicid_present(int bit) |
@@ -371,10 +370,10 @@ static inline int numaq_multi_timer_check(int apic, int irq) | |||
371 | return apic != 0 && irq == 0; | 370 | return apic != 0 && irq == 0; |
372 | } | 371 | } |
373 | 372 | ||
374 | static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) | 373 | static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
375 | { | 374 | { |
376 | /* We don't have a good way to do this yet - hack */ | 375 | /* We don't have a good way to do this yet - hack */ |
377 | return physids_promote(0xFUL); | 376 | return physids_promote(0xFUL, retmap); |
378 | } | 377 | } |
379 | 378 | ||
380 | static inline int numaq_cpu_to_logical_apicid(int cpu) | 379 | static inline int numaq_cpu_to_logical_apicid(int cpu) |
@@ -402,12 +401,12 @@ static inline int numaq_apicid_to_node(int logical_apicid) | |||
402 | return logical_apicid >> 4; | 401 | return logical_apicid >> 4; |
403 | } | 402 | } |
404 | 403 | ||
405 | static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) | 404 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) |
406 | { | 405 | { |
407 | int node = numaq_apicid_to_node(logical_apicid); | 406 | int node = numaq_apicid_to_node(logical_apicid); |
408 | int cpu = __ffs(logical_apicid & 0xf); | 407 | int cpu = __ffs(logical_apicid & 0xf); |
409 | 408 | ||
410 | return physid_mask_of_physid(cpu + 4*node); | 409 | physid_set_mask_of_physid(cpu + 4*node, retmap); |
411 | } | 410 | } |
412 | 411 | ||
413 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | 412 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 0c0182cc947d..1a6559f6768c 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -108,7 +108,7 @@ struct apic apic_default = { | |||
108 | .apicid_to_node = default_apicid_to_node, | 108 | .apicid_to_node = default_apicid_to_node, |
109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | 109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, |
110 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 110 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
111 | .apicid_to_cpu_present = default_apicid_to_cpu_present, | 111 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
112 | .setup_portio_remap = NULL, | 112 | .setup_portio_remap = NULL, |
113 | .check_phys_apicid_present = default_check_phys_apicid_present, | 113 | .check_phys_apicid_present = default_check_phys_apicid_present, |
114 | .enable_apic_mode = NULL, | 114 | .enable_apic_mode = NULL, |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 645ecc4ff0be..9b419263d90d 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -183,7 +183,7 @@ static const struct cpumask *summit_target_cpus(void) | |||
183 | return cpumask_of(0); | 183 | return cpumask_of(0); |
184 | } | 184 | } |
185 | 185 | ||
186 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) | 186 | static unsigned long summit_check_apicid_used(physid_mask_t *map, int apicid) |
187 | { | 187 | { |
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
@@ -261,15 +261,15 @@ static int summit_cpu_present_to_apicid(int mps_cpu) | |||
261 | return BAD_APICID; | 261 | return BAD_APICID; |
262 | } | 262 | } |
263 | 263 | ||
264 | static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) | 264 | static void summit_ioapic_phys_id_map(physid_mask_t *phys_id_map, physid_mask_t *retmap) |
265 | { | 265 | { |
266 | /* For clustered we don't have a good way to do this yet - hack */ | 266 | /* For clustered we don't have a good way to do this yet - hack */ |
267 | return physids_promote(0x0F); | 267 | physids_promote(0x0FL, retmap); |
268 | } | 268 | } |
269 | 269 | ||
270 | static physid_mask_t summit_apicid_to_cpu_present(int apicid) | 270 | static void summit_apicid_to_cpu_present(int apicid, physid_mask_t *retmap) |
271 | { | 271 | { |
272 | return physid_mask_of_physid(0); | 272 | physid_set_mask_of_physid(0, retmap); |
273 | } | 273 | } |
274 | 274 | ||
275 | static int summit_check_phys_apicid_present(int physical_apicid) | 275 | static int summit_check_phys_apicid_present(int physical_apicid) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 326c25477d3d..130c4b934877 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -409,6 +409,12 @@ static __init void map_mmioh_high(int max_pnode) | |||
409 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); | 409 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); |
410 | } | 410 | } |
411 | 411 | ||
412 | static __init void map_low_mmrs(void) | ||
413 | { | ||
414 | init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); | ||
415 | init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); | ||
416 | } | ||
417 | |||
412 | static __init void uv_rtc_init(void) | 418 | static __init void uv_rtc_init(void) |
413 | { | 419 | { |
414 | long status; | 420 | long status; |
@@ -550,6 +556,8 @@ void __init uv_system_init(void) | |||
550 | unsigned long mmr_base, present, paddr; | 556 | unsigned long mmr_base, present, paddr; |
551 | unsigned short pnode_mask; | 557 | unsigned short pnode_mask; |
552 | 558 | ||
559 | map_low_mmrs(); | ||
560 | |||
553 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 561 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
554 | m_val = m_n_config.s.m_skt; | 562 | m_val = m_n_config.s.m_skt; |
555 | n_val = m_n_config.s.n_skt; | 563 | n_val = m_n_config.s.n_skt; |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 151ace69a5aa..b5b6b23bce53 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -204,7 +204,6 @@ | |||
204 | #include <linux/module.h> | 204 | #include <linux/module.h> |
205 | 205 | ||
206 | #include <linux/poll.h> | 206 | #include <linux/poll.h> |
207 | #include <linux/smp_lock.h> | ||
208 | #include <linux/types.h> | 207 | #include <linux/types.h> |
209 | #include <linux/stddef.h> | 208 | #include <linux/stddef.h> |
210 | #include <linux/timer.h> | 209 | #include <linux/timer.h> |
@@ -403,6 +402,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | |||
403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | 402 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); |
404 | static struct apm_user *user_list; | 403 | static struct apm_user *user_list; |
405 | static DEFINE_SPINLOCK(user_list_lock); | 404 | static DEFINE_SPINLOCK(user_list_lock); |
405 | static DEFINE_MUTEX(apm_mutex); | ||
406 | 406 | ||
407 | /* | 407 | /* |
408 | * Set up a segment that references the real mode segment 0x40 | 408 | * Set up a segment that references the real mode segment 0x40 |
@@ -1531,7 +1531,7 @@ static long do_ioctl(struct file *filp, u_int cmd, u_long arg) | |||
1531 | return -EPERM; | 1531 | return -EPERM; |
1532 | switch (cmd) { | 1532 | switch (cmd) { |
1533 | case APM_IOC_STANDBY: | 1533 | case APM_IOC_STANDBY: |
1534 | lock_kernel(); | 1534 | mutex_lock(&apm_mutex); |
1535 | if (as->standbys_read > 0) { | 1535 | if (as->standbys_read > 0) { |
1536 | as->standbys_read--; | 1536 | as->standbys_read--; |
1537 | as->standbys_pending--; | 1537 | as->standbys_pending--; |
@@ -1540,10 +1540,10 @@ static long do_ioctl(struct file *filp, u_int cmd, u_long arg) | |||
1540 | queue_event(APM_USER_STANDBY, as); | 1540 | queue_event(APM_USER_STANDBY, as); |
1541 | if (standbys_pending <= 0) | 1541 | if (standbys_pending <= 0) |
1542 | standby(); | 1542 | standby(); |
1543 | unlock_kernel(); | 1543 | mutex_unlock(&apm_mutex); |
1544 | break; | 1544 | break; |
1545 | case APM_IOC_SUSPEND: | 1545 | case APM_IOC_SUSPEND: |
1546 | lock_kernel(); | 1546 | mutex_lock(&apm_mutex); |
1547 | if (as->suspends_read > 0) { | 1547 | if (as->suspends_read > 0) { |
1548 | as->suspends_read--; | 1548 | as->suspends_read--; |
1549 | as->suspends_pending--; | 1549 | as->suspends_pending--; |
@@ -1552,13 +1552,14 @@ static long do_ioctl(struct file *filp, u_int cmd, u_long arg) | |||
1552 | queue_event(APM_USER_SUSPEND, as); | 1552 | queue_event(APM_USER_SUSPEND, as); |
1553 | if (suspends_pending <= 0) { | 1553 | if (suspends_pending <= 0) { |
1554 | ret = suspend(1); | 1554 | ret = suspend(1); |
1555 | mutex_unlock(&apm_mutex); | ||
1555 | } else { | 1556 | } else { |
1556 | as->suspend_wait = 1; | 1557 | as->suspend_wait = 1; |
1558 | mutex_unlock(&apm_mutex); | ||
1557 | wait_event_interruptible(apm_suspend_waitqueue, | 1559 | wait_event_interruptible(apm_suspend_waitqueue, |
1558 | as->suspend_wait == 0); | 1560 | as->suspend_wait == 0); |
1559 | ret = as->suspend_result; | 1561 | ret = as->suspend_result; |
1560 | } | 1562 | } |
1561 | unlock_kernel(); | ||
1562 | return ret; | 1563 | return ret; |
1563 | default: | 1564 | default: |
1564 | return -ENOTTY; | 1565 | return -ENOTTY; |
@@ -1608,12 +1609,10 @@ static int do_open(struct inode *inode, struct file *filp) | |||
1608 | { | 1609 | { |
1609 | struct apm_user *as; | 1610 | struct apm_user *as; |
1610 | 1611 | ||
1611 | lock_kernel(); | ||
1612 | as = kmalloc(sizeof(*as), GFP_KERNEL); | 1612 | as = kmalloc(sizeof(*as), GFP_KERNEL); |
1613 | if (as == NULL) { | 1613 | if (as == NULL) { |
1614 | printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", | 1614 | printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", |
1615 | sizeof(*as)); | 1615 | sizeof(*as)); |
1616 | unlock_kernel(); | ||
1617 | return -ENOMEM; | 1616 | return -ENOMEM; |
1618 | } | 1617 | } |
1619 | as->magic = APM_BIOS_MAGIC; | 1618 | as->magic = APM_BIOS_MAGIC; |
@@ -1635,7 +1634,6 @@ static int do_open(struct inode *inode, struct file *filp) | |||
1635 | user_list = as; | 1634 | user_list = as; |
1636 | spin_unlock(&user_list_lock); | 1635 | spin_unlock(&user_list_lock); |
1637 | filp->private_data = as; | 1636 | filp->private_data = as; |
1638 | unlock_kernel(); | ||
1639 | return 0; | 1637 | return 0; |
1640 | } | 1638 | } |
1641 | 1639 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 68537e957a9b..1d2cb383410e 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | # Don't trace early stages of a secondary CPU boot | 5 | # Don't trace early stages of a secondary CPU boot |
6 | ifdef CONFIG_FUNCTION_TRACER | 6 | ifdef CONFIG_FUNCTION_TRACER |
7 | CFLAGS_REMOVE_common.o = -pg | 7 | CFLAGS_REMOVE_common.o = -pg |
8 | CFLAGS_REMOVE_perf_event.o = -pg | ||
8 | endif | 9 | endif |
9 | 10 | ||
10 | # Make sure load_percpu_segment has no stackprotector | 11 | # Make sure load_percpu_segment has no stackprotector |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c910a716a71c..7128b3799cec 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -535,7 +535,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
535 | } | 535 | } |
536 | } | 536 | } |
537 | 537 | ||
538 | display_cacheinfo(c); | 538 | cpu_detect_cache_sizes(c); |
539 | 539 | ||
540 | /* Multi core CPU? */ | 540 | /* Multi core CPU? */ |
541 | if (c->extended_cpuid_level >= 0x80000008) { | 541 | if (c->extended_cpuid_level >= 0x80000008) { |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index c95e831bb095..e58d978e0758 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -294,7 +294,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
294 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 294 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
295 | } | 295 | } |
296 | 296 | ||
297 | display_cacheinfo(c); | 297 | cpu_detect_cache_sizes(c); |
298 | } | 298 | } |
299 | 299 | ||
300 | enum { | 300 | enum { |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cc25c2b4a567..a4ec8b647544 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -61,7 +61,7 @@ void __init setup_cpu_local_masks(void) | |||
61 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 61 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
62 | { | 62 | { |
63 | #ifdef CONFIG_X86_64 | 63 | #ifdef CONFIG_X86_64 |
64 | display_cacheinfo(c); | 64 | cpu_detect_cache_sizes(c); |
65 | #else | 65 | #else |
66 | /* Not much we can do here... */ | 66 | /* Not much we can do here... */ |
67 | /* Check if at least it has cpuid */ | 67 | /* Check if at least it has cpuid */ |
@@ -383,7 +383,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
383 | } | 383 | } |
384 | } | 384 | } |
385 | 385 | ||
386 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 386 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
387 | { | 387 | { |
388 | unsigned int n, dummy, ebx, ecx, edx, l2size; | 388 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
389 | 389 | ||
@@ -391,8 +391,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
391 | 391 | ||
392 | if (n >= 0x80000005) { | 392 | if (n >= 0x80000005) { |
393 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | 393 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
394 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | ||
395 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
396 | c->x86_cache_size = (ecx>>24) + (edx>>24); | 394 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
397 | #ifdef CONFIG_X86_64 | 395 | #ifdef CONFIG_X86_64 |
398 | /* On K8 L1 TLB is inclusive, so don't count it */ | 396 | /* On K8 L1 TLB is inclusive, so don't count it */ |
@@ -422,9 +420,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
422 | #endif | 420 | #endif |
423 | 421 | ||
424 | c->x86_cache_size = l2size; | 422 | c->x86_cache_size = l2size; |
425 | |||
426 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
427 | l2size, ecx & 0xFF); | ||
428 | } | 423 | } |
429 | 424 | ||
430 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 425 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
@@ -659,24 +654,31 @@ void __init early_cpu_init(void) | |||
659 | const struct cpu_dev *const *cdev; | 654 | const struct cpu_dev *const *cdev; |
660 | int count = 0; | 655 | int count = 0; |
661 | 656 | ||
657 | #ifdef PROCESSOR_SELECT | ||
662 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 658 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
659 | #endif | ||
660 | |||
663 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 661 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
664 | const struct cpu_dev *cpudev = *cdev; | 662 | const struct cpu_dev *cpudev = *cdev; |
665 | unsigned int j; | ||
666 | 663 | ||
667 | if (count >= X86_VENDOR_NUM) | 664 | if (count >= X86_VENDOR_NUM) |
668 | break; | 665 | break; |
669 | cpu_devs[count] = cpudev; | 666 | cpu_devs[count] = cpudev; |
670 | count++; | 667 | count++; |
671 | 668 | ||
672 | for (j = 0; j < 2; j++) { | 669 | #ifdef PROCESSOR_SELECT |
673 | if (!cpudev->c_ident[j]) | 670 | { |
674 | continue; | 671 | unsigned int j; |
675 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | 672 | |
676 | cpudev->c_ident[j]); | 673 | for (j = 0; j < 2; j++) { |
674 | if (!cpudev->c_ident[j]) | ||
675 | continue; | ||
676 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | ||
677 | cpudev->c_ident[j]); | ||
678 | } | ||
677 | } | 679 | } |
680 | #endif | ||
678 | } | 681 | } |
679 | |||
680 | early_identify_cpu(&boot_cpu_data); | 682 | early_identify_cpu(&boot_cpu_data); |
681 | } | 683 | } |
682 | 684 | ||
@@ -837,10 +839,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
837 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 839 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
838 | } | 840 | } |
839 | 841 | ||
840 | #ifdef CONFIG_X86_MCE | ||
841 | /* Init Machine Check Exception if available. */ | 842 | /* Init Machine Check Exception if available. */ |
842 | mcheck_init(c); | 843 | mcheck_cpu_init(c); |
843 | #endif | ||
844 | 844 | ||
845 | select_idle_routine(c); | 845 | select_idle_routine(c); |
846 | 846 | ||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 6de9a908e400..3624e8a0f71b 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -32,6 +32,6 @@ struct cpu_dev { | |||
32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], | 32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
34 | 34 | ||
35 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
36 | 36 | ||
37 | #endif | 37 | #endif |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 19807b89f058..4fbd384fb645 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -373,7 +373,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | |||
373 | /* Handle the GX (Formally known as the GX2) */ | 373 | /* Handle the GX (Formally known as the GX2) */ |
374 | 374 | ||
375 | if (c->x86 == 5 && c->x86_model == 5) | 375 | if (c->x86 == 5 && c->x86_model == 5) |
376 | display_cacheinfo(c); | 376 | cpu_detect_cache_sizes(c); |
377 | else | 377 | else |
378 | init_cyrix(c); | 378 | init_cyrix(c); |
379 | } | 379 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 804c40e2bc3e..0df4c2b7107f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -488,22 +488,6 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
488 | #endif | 488 | #endif |
489 | } | 489 | } |
490 | 490 | ||
491 | if (trace) | ||
492 | printk(KERN_INFO "CPU: Trace cache: %dK uops", trace); | ||
493 | else if (l1i) | ||
494 | printk(KERN_INFO "CPU: L1 I cache: %dK", l1i); | ||
495 | |||
496 | if (l1d) | ||
497 | printk(KERN_CONT ", L1 D cache: %dK\n", l1d); | ||
498 | else | ||
499 | printk(KERN_CONT "\n"); | ||
500 | |||
501 | if (l2) | ||
502 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
503 | |||
504 | if (l3) | ||
505 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
506 | |||
507 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | 491 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); |
508 | 492 | ||
509 | return l2; | 493 | return l2; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 721a77ca8115..0bcaa3875863 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #include "mce-internal.h" | 47 | #include "mce-internal.h" |
48 | 48 | ||
49 | #define CREATE_TRACE_POINTS | ||
50 | #include <trace/events/mce.h> | ||
51 | |||
49 | int mce_disabled __read_mostly; | 52 | int mce_disabled __read_mostly; |
50 | 53 | ||
51 | #define MISC_MCELOG_MINOR 227 | 54 | #define MISC_MCELOG_MINOR 227 |
@@ -85,18 +88,26 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | |||
85 | static DEFINE_PER_CPU(struct mce, mces_seen); | 88 | static DEFINE_PER_CPU(struct mce, mces_seen); |
86 | static int cpu_missing; | 89 | static int cpu_missing; |
87 | 90 | ||
88 | static void default_decode_mce(struct mce *m) | 91 | /* |
92 | * CPU/chipset specific EDAC code can register a notifier call here to print | ||
93 | * MCE errors in a human-readable form. | ||
94 | */ | ||
95 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); | ||
96 | EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); | ||
97 | |||
98 | static int default_decode_mce(struct notifier_block *nb, unsigned long val, | ||
99 | void *data) | ||
89 | { | 100 | { |
90 | pr_emerg("No human readable MCE decoding support on this CPU type.\n"); | 101 | pr_emerg("No human readable MCE decoding support on this CPU type.\n"); |
91 | pr_emerg("Run the message through 'mcelog --ascii' to decode.\n"); | 102 | pr_emerg("Run the message through 'mcelog --ascii' to decode.\n"); |
103 | |||
104 | return NOTIFY_STOP; | ||
92 | } | 105 | } |
93 | 106 | ||
94 | /* | 107 | static struct notifier_block mce_dec_nb = { |
95 | * CPU/chipset specific EDAC code can register a callback here to print | 108 | .notifier_call = default_decode_mce, |
96 | * MCE errors in a human-readable form: | 109 | .priority = -1, |
97 | */ | 110 | }; |
98 | void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce; | ||
99 | EXPORT_SYMBOL(x86_mce_decode_callback); | ||
100 | 111 | ||
101 | /* MCA banks polled by the period polling timer for corrected events */ | 112 | /* MCA banks polled by the period polling timer for corrected events */ |
102 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | 113 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
@@ -141,6 +152,9 @@ void mce_log(struct mce *mce) | |||
141 | { | 152 | { |
142 | unsigned next, entry; | 153 | unsigned next, entry; |
143 | 154 | ||
155 | /* Emit the trace record: */ | ||
156 | trace_mce_record(mce); | ||
157 | |||
144 | mce->finished = 0; | 158 | mce->finished = 0; |
145 | wmb(); | 159 | wmb(); |
146 | for (;;) { | 160 | for (;;) { |
@@ -204,9 +218,9 @@ static void print_mce(struct mce *m) | |||
204 | 218 | ||
205 | /* | 219 | /* |
206 | * Print out human-readable details about the MCE error, | 220 | * Print out human-readable details about the MCE error, |
207 | * (if the CPU has an implementation for that): | 221 | * (if the CPU has an implementation for that) |
208 | */ | 222 | */ |
209 | x86_mce_decode_callback(m); | 223 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
210 | } | 224 | } |
211 | 225 | ||
212 | static void print_mce_head(void) | 226 | static void print_mce_head(void) |
@@ -1122,7 +1136,7 @@ static int check_interval = 5 * 60; /* 5 minutes */ | |||
1122 | static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ | 1136 | static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ |
1123 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 1137 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
1124 | 1138 | ||
1125 | static void mcheck_timer(unsigned long data) | 1139 | static void mce_start_timer(unsigned long data) |
1126 | { | 1140 | { |
1127 | struct timer_list *t = &per_cpu(mce_timer, data); | 1141 | struct timer_list *t = &per_cpu(mce_timer, data); |
1128 | int *n; | 1142 | int *n; |
@@ -1187,7 +1201,7 @@ int mce_notify_irq(void) | |||
1187 | } | 1201 | } |
1188 | EXPORT_SYMBOL_GPL(mce_notify_irq); | 1202 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
1189 | 1203 | ||
1190 | static int mce_banks_init(void) | 1204 | static int __cpuinit __mcheck_cpu_mce_banks_init(void) |
1191 | { | 1205 | { |
1192 | int i; | 1206 | int i; |
1193 | 1207 | ||
@@ -1206,7 +1220,7 @@ static int mce_banks_init(void) | |||
1206 | /* | 1220 | /* |
1207 | * Initialize Machine Checks for a CPU. | 1221 | * Initialize Machine Checks for a CPU. |
1208 | */ | 1222 | */ |
1209 | static int __cpuinit mce_cap_init(void) | 1223 | static int __cpuinit __mcheck_cpu_cap_init(void) |
1210 | { | 1224 | { |
1211 | unsigned b; | 1225 | unsigned b; |
1212 | u64 cap; | 1226 | u64 cap; |
@@ -1228,7 +1242,7 @@ static int __cpuinit mce_cap_init(void) | |||
1228 | WARN_ON(banks != 0 && b != banks); | 1242 | WARN_ON(banks != 0 && b != banks); |
1229 | banks = b; | 1243 | banks = b; |
1230 | if (!mce_banks) { | 1244 | if (!mce_banks) { |
1231 | int err = mce_banks_init(); | 1245 | int err = __mcheck_cpu_mce_banks_init(); |
1232 | 1246 | ||
1233 | if (err) | 1247 | if (err) |
1234 | return err; | 1248 | return err; |
@@ -1244,7 +1258,7 @@ static int __cpuinit mce_cap_init(void) | |||
1244 | return 0; | 1258 | return 0; |
1245 | } | 1259 | } |
1246 | 1260 | ||
1247 | static void mce_init(void) | 1261 | static void __mcheck_cpu_init_generic(void) |
1248 | { | 1262 | { |
1249 | mce_banks_t all_banks; | 1263 | mce_banks_t all_banks; |
1250 | u64 cap; | 1264 | u64 cap; |
@@ -1273,7 +1287,7 @@ static void mce_init(void) | |||
1273 | } | 1287 | } |
1274 | 1288 | ||
1275 | /* Add per CPU specific workarounds here */ | 1289 | /* Add per CPU specific workarounds here */ |
1276 | static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | 1290 | static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
1277 | { | 1291 | { |
1278 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { | 1292 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { |
1279 | pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); | 1293 | pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); |
@@ -1341,7 +1355,7 @@ static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
1341 | return 0; | 1355 | return 0; |
1342 | } | 1356 | } |
1343 | 1357 | ||
1344 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) | 1358 | static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
1345 | { | 1359 | { |
1346 | if (c->x86 != 5) | 1360 | if (c->x86 != 5) |
1347 | return; | 1361 | return; |
@@ -1355,7 +1369,7 @@ static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) | |||
1355 | } | 1369 | } |
1356 | } | 1370 | } |
1357 | 1371 | ||
1358 | static void mce_cpu_features(struct cpuinfo_x86 *c) | 1372 | static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
1359 | { | 1373 | { |
1360 | switch (c->x86_vendor) { | 1374 | switch (c->x86_vendor) { |
1361 | case X86_VENDOR_INTEL: | 1375 | case X86_VENDOR_INTEL: |
@@ -1369,7 +1383,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
1369 | } | 1383 | } |
1370 | } | 1384 | } |
1371 | 1385 | ||
1372 | static void mce_init_timer(void) | 1386 | static void __mcheck_cpu_init_timer(void) |
1373 | { | 1387 | { |
1374 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1388 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1375 | int *n = &__get_cpu_var(mce_next_interval); | 1389 | int *n = &__get_cpu_var(mce_next_interval); |
@@ -1380,7 +1394,7 @@ static void mce_init_timer(void) | |||
1380 | *n = check_interval * HZ; | 1394 | *n = check_interval * HZ; |
1381 | if (!*n) | 1395 | if (!*n) |
1382 | return; | 1396 | return; |
1383 | setup_timer(t, mcheck_timer, smp_processor_id()); | 1397 | setup_timer(t, mce_start_timer, smp_processor_id()); |
1384 | t->expires = round_jiffies(jiffies + *n); | 1398 | t->expires = round_jiffies(jiffies + *n); |
1385 | add_timer_on(t, smp_processor_id()); | 1399 | add_timer_on(t, smp_processor_id()); |
1386 | } | 1400 | } |
@@ -1400,27 +1414,28 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = | |||
1400 | * Called for each booted CPU to set up machine checks. | 1414 | * Called for each booted CPU to set up machine checks. |
1401 | * Must be called with preempt off: | 1415 | * Must be called with preempt off: |
1402 | */ | 1416 | */ |
1403 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 1417 | void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) |
1404 | { | 1418 | { |
1405 | if (mce_disabled) | 1419 | if (mce_disabled) |
1406 | return; | 1420 | return; |
1407 | 1421 | ||
1408 | mce_ancient_init(c); | 1422 | __mcheck_cpu_ancient_init(c); |
1409 | 1423 | ||
1410 | if (!mce_available(c)) | 1424 | if (!mce_available(c)) |
1411 | return; | 1425 | return; |
1412 | 1426 | ||
1413 | if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) { | 1427 | if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { |
1414 | mce_disabled = 1; | 1428 | mce_disabled = 1; |
1415 | return; | 1429 | return; |
1416 | } | 1430 | } |
1417 | 1431 | ||
1418 | machine_check_vector = do_machine_check; | 1432 | machine_check_vector = do_machine_check; |
1419 | 1433 | ||
1420 | mce_init(); | 1434 | __mcheck_cpu_init_generic(); |
1421 | mce_cpu_features(c); | 1435 | __mcheck_cpu_init_vendor(c); |
1422 | mce_init_timer(); | 1436 | __mcheck_cpu_init_timer(); |
1423 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1437 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); |
1438 | |||
1424 | } | 1439 | } |
1425 | 1440 | ||
1426 | /* | 1441 | /* |
@@ -1640,6 +1655,15 @@ static int __init mcheck_enable(char *str) | |||
1640 | } | 1655 | } |
1641 | __setup("mce", mcheck_enable); | 1656 | __setup("mce", mcheck_enable); |
1642 | 1657 | ||
1658 | int __init mcheck_init(void) | ||
1659 | { | ||
1660 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); | ||
1661 | |||
1662 | mcheck_intel_therm_init(); | ||
1663 | |||
1664 | return 0; | ||
1665 | } | ||
1666 | |||
1643 | /* | 1667 | /* |
1644 | * Sysfs support | 1668 | * Sysfs support |
1645 | */ | 1669 | */ |
@@ -1648,7 +1672,7 @@ __setup("mce", mcheck_enable); | |||
1648 | * Disable machine checks on suspend and shutdown. We can't really handle | 1672 | * Disable machine checks on suspend and shutdown. We can't really handle |
1649 | * them later. | 1673 | * them later. |
1650 | */ | 1674 | */ |
1651 | static int mce_disable(void) | 1675 | static int mce_disable_error_reporting(void) |
1652 | { | 1676 | { |
1653 | int i; | 1677 | int i; |
1654 | 1678 | ||
@@ -1663,12 +1687,12 @@ static int mce_disable(void) | |||
1663 | 1687 | ||
1664 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | 1688 | static int mce_suspend(struct sys_device *dev, pm_message_t state) |
1665 | { | 1689 | { |
1666 | return mce_disable(); | 1690 | return mce_disable_error_reporting(); |
1667 | } | 1691 | } |
1668 | 1692 | ||
1669 | static int mce_shutdown(struct sys_device *dev) | 1693 | static int mce_shutdown(struct sys_device *dev) |
1670 | { | 1694 | { |
1671 | return mce_disable(); | 1695 | return mce_disable_error_reporting(); |
1672 | } | 1696 | } |
1673 | 1697 | ||
1674 | /* | 1698 | /* |
@@ -1678,8 +1702,8 @@ static int mce_shutdown(struct sys_device *dev) | |||
1678 | */ | 1702 | */ |
1679 | static int mce_resume(struct sys_device *dev) | 1703 | static int mce_resume(struct sys_device *dev) |
1680 | { | 1704 | { |
1681 | mce_init(); | 1705 | __mcheck_cpu_init_generic(); |
1682 | mce_cpu_features(¤t_cpu_data); | 1706 | __mcheck_cpu_init_vendor(¤t_cpu_data); |
1683 | 1707 | ||
1684 | return 0; | 1708 | return 0; |
1685 | } | 1709 | } |
@@ -1689,8 +1713,8 @@ static void mce_cpu_restart(void *data) | |||
1689 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1713 | del_timer_sync(&__get_cpu_var(mce_timer)); |
1690 | if (!mce_available(¤t_cpu_data)) | 1714 | if (!mce_available(¤t_cpu_data)) |
1691 | return; | 1715 | return; |
1692 | mce_init(); | 1716 | __mcheck_cpu_init_generic(); |
1693 | mce_init_timer(); | 1717 | __mcheck_cpu_init_timer(); |
1694 | } | 1718 | } |
1695 | 1719 | ||
1696 | /* Reinit MCEs after user configuration changes */ | 1720 | /* Reinit MCEs after user configuration changes */ |
@@ -1716,7 +1740,7 @@ static void mce_enable_ce(void *all) | |||
1716 | cmci_reenable(); | 1740 | cmci_reenable(); |
1717 | cmci_recheck(); | 1741 | cmci_recheck(); |
1718 | if (all) | 1742 | if (all) |
1719 | mce_init_timer(); | 1743 | __mcheck_cpu_init_timer(); |
1720 | } | 1744 | } |
1721 | 1745 | ||
1722 | static struct sysdev_class mce_sysclass = { | 1746 | static struct sysdev_class mce_sysclass = { |
@@ -1929,13 +1953,14 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
1929 | } | 1953 | } |
1930 | 1954 | ||
1931 | /* Make sure there are no machine checks on offlined CPUs. */ | 1955 | /* Make sure there are no machine checks on offlined CPUs. */ |
1932 | static void mce_disable_cpu(void *h) | 1956 | static void __cpuinit mce_disable_cpu(void *h) |
1933 | { | 1957 | { |
1934 | unsigned long action = *(unsigned long *)h; | 1958 | unsigned long action = *(unsigned long *)h; |
1935 | int i; | 1959 | int i; |
1936 | 1960 | ||
1937 | if (!mce_available(¤t_cpu_data)) | 1961 | if (!mce_available(¤t_cpu_data)) |
1938 | return; | 1962 | return; |
1963 | |||
1939 | if (!(action & CPU_TASKS_FROZEN)) | 1964 | if (!(action & CPU_TASKS_FROZEN)) |
1940 | cmci_clear(); | 1965 | cmci_clear(); |
1941 | for (i = 0; i < banks; i++) { | 1966 | for (i = 0; i < banks; i++) { |
@@ -1946,7 +1971,7 @@ static void mce_disable_cpu(void *h) | |||
1946 | } | 1971 | } |
1947 | } | 1972 | } |
1948 | 1973 | ||
1949 | static void mce_reenable_cpu(void *h) | 1974 | static void __cpuinit mce_reenable_cpu(void *h) |
1950 | { | 1975 | { |
1951 | unsigned long action = *(unsigned long *)h; | 1976 | unsigned long action = *(unsigned long *)h; |
1952 | int i; | 1977 | int i; |
@@ -2025,7 +2050,7 @@ static __init void mce_init_banks(void) | |||
2025 | } | 2050 | } |
2026 | } | 2051 | } |
2027 | 2052 | ||
2028 | static __init int mce_init_device(void) | 2053 | static __init int mcheck_init_device(void) |
2029 | { | 2054 | { |
2030 | int err; | 2055 | int err; |
2031 | int i = 0; | 2056 | int i = 0; |
@@ -2053,7 +2078,7 @@ static __init int mce_init_device(void) | |||
2053 | return err; | 2078 | return err; |
2054 | } | 2079 | } |
2055 | 2080 | ||
2056 | device_initcall(mce_init_device); | 2081 | device_initcall(mcheck_init_device); |
2057 | 2082 | ||
2058 | /* | 2083 | /* |
2059 | * Old style boot options parsing. Only for compatibility. | 2084 | * Old style boot options parsing. Only for compatibility. |
@@ -2101,7 +2126,7 @@ static int fake_panic_set(void *data, u64 val) | |||
2101 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, | 2126 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, |
2102 | fake_panic_set, "%llu\n"); | 2127 | fake_panic_set, "%llu\n"); |
2103 | 2128 | ||
2104 | static int __init mce_debugfs_init(void) | 2129 | static int __init mcheck_debugfs_init(void) |
2105 | { | 2130 | { |
2106 | struct dentry *dmce, *ffake_panic; | 2131 | struct dentry *dmce, *ffake_panic; |
2107 | 2132 | ||
@@ -2115,5 +2140,5 @@ static int __init mce_debugfs_init(void) | |||
2115 | 2140 | ||
2116 | return 0; | 2141 | return 0; |
2117 | } | 2142 | } |
2118 | late_initcall(mce_debugfs_init); | 2143 | late_initcall(mcheck_debugfs_init); |
2119 | #endif | 2144 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index b3a1dba75330..4fef985fc221 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -49,6 +49,8 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state); | |||
49 | 49 | ||
50 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 50 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
51 | 51 | ||
52 | static u32 lvtthmr_init __read_mostly; | ||
53 | |||
52 | #ifdef CONFIG_SYSFS | 54 | #ifdef CONFIG_SYSFS |
53 | #define define_therm_throt_sysdev_one_ro(_name) \ | 55 | #define define_therm_throt_sysdev_one_ro(_name) \ |
54 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) | 56 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) |
@@ -254,6 +256,18 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | |||
254 | ack_APIC_irq(); | 256 | ack_APIC_irq(); |
255 | } | 257 | } |
256 | 258 | ||
259 | void __init mcheck_intel_therm_init(void) | ||
260 | { | ||
261 | /* | ||
262 | * This function is only called on boot CPU. Save the init thermal | ||
263 | * LVT value on BSP and use that value to restore APs' thermal LVT | ||
264 | * entry BIOS programmed later | ||
265 | */ | ||
266 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && | ||
267 | cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) | ||
268 | lvtthmr_init = apic_read(APIC_LVTTHMR); | ||
269 | } | ||
270 | |||
257 | void intel_init_thermal(struct cpuinfo_x86 *c) | 271 | void intel_init_thermal(struct cpuinfo_x86 *c) |
258 | { | 272 | { |
259 | unsigned int cpu = smp_processor_id(); | 273 | unsigned int cpu = smp_processor_id(); |
@@ -270,7 +284,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
270 | * since it might be delivered via SMI already: | 284 | * since it might be delivered via SMI already: |
271 | */ | 285 | */ |
272 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 286 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
273 | h = apic_read(APIC_LVTTHMR); | 287 | |
288 | /* | ||
289 | * The initial value of thermal LVT entries on all APs always reads | ||
290 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI | ||
291 | * sequence to them and LVT registers are reset to 0s except for | ||
292 | * the mask bits which are set to 1s when APs receive INIT IPI. | ||
293 | * Always restore the value that BIOS has programmed on AP based on | ||
294 | * BSP's info we saved since BIOS is always setting the same value | ||
295 | * for all threads/cores | ||
296 | */ | ||
297 | apic_write(APIC_LVTTHMR, lvtthmr_init); | ||
298 | |||
299 | h = lvtthmr_init; | ||
300 | |||
274 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | 301 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
275 | printk(KERN_DEBUG | 302 | printk(KERN_DEBUG |
276 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 303 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b5801c311846..c1bbed1021d9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -77,6 +77,18 @@ struct cpu_hw_events { | |||
77 | struct debug_store *ds; | 77 | struct debug_store *ds; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct event_constraint { | ||
81 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
82 | int code; | ||
83 | }; | ||
84 | |||
85 | #define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } | ||
86 | #define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } | ||
87 | |||
88 | #define for_each_event_constraint(e, c) \ | ||
89 | for ((e) = (c); (e)->idxmsk[0]; (e)++) | ||
90 | |||
91 | |||
80 | /* | 92 | /* |
81 | * struct x86_pmu - generic x86 pmu | 93 | * struct x86_pmu - generic x86 pmu |
82 | */ | 94 | */ |
@@ -102,6 +114,8 @@ struct x86_pmu { | |||
102 | u64 intel_ctrl; | 114 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | 115 | void (*enable_bts)(u64 config); |
104 | void (*disable_bts)(void); | 116 | void (*disable_bts)(void); |
117 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | ||
118 | struct hw_perf_event *hwc); | ||
105 | }; | 119 | }; |
106 | 120 | ||
107 | static struct x86_pmu x86_pmu __read_mostly; | 121 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |||
110 | .enabled = 1, | 124 | .enabled = 1, |
111 | }; | 125 | }; |
112 | 126 | ||
127 | static const struct event_constraint *event_constraints; | ||
128 | |||
113 | /* | 129 | /* |
114 | * Not sure about some of these | 130 | * Not sure about some of these |
115 | */ | 131 | */ |
@@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event) | |||
155 | return hw_event & P6_EVNTSEL_MASK; | 171 | return hw_event & P6_EVNTSEL_MASK; |
156 | } | 172 | } |
157 | 173 | ||
174 | static const struct event_constraint intel_p6_event_constraints[] = | ||
175 | { | ||
176 | EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ | ||
177 | EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
178 | EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ | ||
179 | EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
180 | EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
181 | EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
182 | EVENT_CONSTRAINT_END | ||
183 | }; | ||
158 | 184 | ||
159 | /* | 185 | /* |
160 | * Intel PerfMon v3. Used on Core2 and later. | 186 | * Intel PerfMon v3. Used on Core2 and later. |
@@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] = | |||
170 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | 196 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, |
171 | }; | 197 | }; |
172 | 198 | ||
199 | static const struct event_constraint intel_core_event_constraints[] = | ||
200 | { | ||
201 | EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
202 | EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | ||
203 | EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
204 | EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
205 | EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
206 | EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | ||
207 | EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | ||
208 | EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | ||
209 | EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | ||
210 | EVENT_CONSTRAINT_END | ||
211 | }; | ||
212 | |||
213 | static const struct event_constraint intel_nehalem_event_constraints[] = | ||
214 | { | ||
215 | EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | ||
216 | EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | ||
217 | EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | ||
218 | EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | ||
219 | EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | ||
220 | EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ | ||
221 | EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | ||
222 | EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ | ||
223 | EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ | ||
224 | EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ | ||
225 | EVENT_CONSTRAINT_END | ||
226 | }; | ||
227 | |||
173 | static u64 intel_pmu_event_map(int hw_event) | 228 | static u64 intel_pmu_event_map(int hw_event) |
174 | { | 229 | { |
175 | return intel_perfmon_event_map[hw_event]; | 230 | return intel_perfmon_event_map[hw_event]; |
@@ -190,7 +245,7 @@ static u64 __read_mostly hw_cache_event_ids | |||
190 | [PERF_COUNT_HW_CACHE_OP_MAX] | 245 | [PERF_COUNT_HW_CACHE_OP_MAX] |
191 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 246 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
192 | 247 | ||
193 | static const u64 nehalem_hw_cache_event_ids | 248 | static __initconst u64 nehalem_hw_cache_event_ids |
194 | [PERF_COUNT_HW_CACHE_MAX] | 249 | [PERF_COUNT_HW_CACHE_MAX] |
195 | [PERF_COUNT_HW_CACHE_OP_MAX] | 250 | [PERF_COUNT_HW_CACHE_OP_MAX] |
196 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 251 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
@@ -281,7 +336,7 @@ static const u64 nehalem_hw_cache_event_ids | |||
281 | }, | 336 | }, |
282 | }; | 337 | }; |
283 | 338 | ||
284 | static const u64 core2_hw_cache_event_ids | 339 | static __initconst u64 core2_hw_cache_event_ids |
285 | [PERF_COUNT_HW_CACHE_MAX] | 340 | [PERF_COUNT_HW_CACHE_MAX] |
286 | [PERF_COUNT_HW_CACHE_OP_MAX] | 341 | [PERF_COUNT_HW_CACHE_OP_MAX] |
287 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 342 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
@@ -372,7 +427,7 @@ static const u64 core2_hw_cache_event_ids | |||
372 | }, | 427 | }, |
373 | }; | 428 | }; |
374 | 429 | ||
375 | static const u64 atom_hw_cache_event_ids | 430 | static __initconst u64 atom_hw_cache_event_ids |
376 | [PERF_COUNT_HW_CACHE_MAX] | 431 | [PERF_COUNT_HW_CACHE_MAX] |
377 | [PERF_COUNT_HW_CACHE_OP_MAX] | 432 | [PERF_COUNT_HW_CACHE_OP_MAX] |
378 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 433 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
@@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) | |||
469 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | 524 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL |
470 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | 525 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL |
471 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | 526 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL |
472 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL | 527 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL |
473 | 528 | ||
474 | #define CORE_EVNTSEL_MASK \ | 529 | #define CORE_EVNTSEL_MASK \ |
475 | (CORE_EVNTSEL_EVENT_MASK | \ | 530 | (CORE_EVNTSEL_EVENT_MASK | \ |
@@ -481,7 +536,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) | |||
481 | return hw_event & CORE_EVNTSEL_MASK; | 536 | return hw_event & CORE_EVNTSEL_MASK; |
482 | } | 537 | } |
483 | 538 | ||
484 | static const u64 amd_hw_cache_event_ids | 539 | static __initconst u64 amd_hw_cache_event_ids |
485 | [PERF_COUNT_HW_CACHE_MAX] | 540 | [PERF_COUNT_HW_CACHE_MAX] |
486 | [PERF_COUNT_HW_CACHE_OP_MAX] | 541 | [PERF_COUNT_HW_CACHE_OP_MAX] |
487 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | 542 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
@@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
932 | */ | 987 | */ |
933 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; | 988 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; |
934 | 989 | ||
990 | hwc->idx = -1; | ||
991 | |||
935 | /* | 992 | /* |
936 | * Count user and OS events unless requested not to. | 993 | * Count user and OS events unless requested not to. |
937 | */ | 994 | */ |
@@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1334 | x86_pmu_enable_event(hwc, idx); | 1391 | x86_pmu_enable_event(hwc, idx); |
1335 | } | 1392 | } |
1336 | 1393 | ||
1337 | static int | 1394 | static int fixed_mode_idx(struct hw_perf_event *hwc) |
1338 | fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) | ||
1339 | { | 1395 | { |
1340 | unsigned int hw_event; | 1396 | unsigned int hw_event; |
1341 | 1397 | ||
@@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) | |||
1349 | if (!x86_pmu.num_events_fixed) | 1405 | if (!x86_pmu.num_events_fixed) |
1350 | return -1; | 1406 | return -1; |
1351 | 1407 | ||
1408 | /* | ||
1409 | * fixed counters do not take all possible filters | ||
1410 | */ | ||
1411 | if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) | ||
1412 | return -1; | ||
1413 | |||
1352 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1414 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1415 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1354 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1416 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) | |||
1360 | } | 1422 | } |
1361 | 1423 | ||
1362 | /* | 1424 | /* |
1363 | * Find a PMC slot for the freshly enabled / scheduled in event: | 1425 | * generic counter allocator: get next free counter |
1364 | */ | 1426 | */ |
1365 | static int x86_pmu_enable(struct perf_event *event) | 1427 | static int |
1428 | gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1429 | { | ||
1430 | int idx; | ||
1431 | |||
1432 | idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); | ||
1433 | return idx == x86_pmu.num_events ? -1 : idx; | ||
1434 | } | ||
1435 | |||
1436 | /* | ||
1437 | * intel-specific counter allocator: check event constraints | ||
1438 | */ | ||
1439 | static int | ||
1440 | intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1441 | { | ||
1442 | const struct event_constraint *event_constraint; | ||
1443 | int i, code; | ||
1444 | |||
1445 | if (!event_constraints) | ||
1446 | goto skip; | ||
1447 | |||
1448 | code = hwc->config & CORE_EVNTSEL_EVENT_MASK; | ||
1449 | |||
1450 | for_each_event_constraint(event_constraint, event_constraints) { | ||
1451 | if (code == event_constraint->code) { | ||
1452 | for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { | ||
1453 | if (!test_and_set_bit(i, cpuc->used_mask)) | ||
1454 | return i; | ||
1455 | } | ||
1456 | return -1; | ||
1457 | } | ||
1458 | } | ||
1459 | skip: | ||
1460 | return gen_get_event_idx(cpuc, hwc); | ||
1461 | } | ||
1462 | |||
1463 | static int | ||
1464 | x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1366 | { | 1465 | { |
1367 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1368 | struct hw_perf_event *hwc = &event->hw; | ||
1369 | int idx; | 1466 | int idx; |
1370 | 1467 | ||
1371 | idx = fixed_mode_idx(event, hwc); | 1468 | idx = fixed_mode_idx(hwc); |
1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { | 1469 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | 1470 | /* BTS is already occupied. */ |
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | 1471 | if (test_and_set_bit(idx, cpuc->used_mask)) |
1375 | return -EAGAIN; | 1472 | return -EAGAIN; |
1376 | 1473 | ||
1377 | hwc->config_base = 0; | 1474 | hwc->config_base = 0; |
1378 | hwc->event_base = 0; | 1475 | hwc->event_base = 0; |
1379 | hwc->idx = idx; | 1476 | hwc->idx = idx; |
1380 | } else if (idx >= 0) { | 1477 | } else if (idx >= 0) { |
1381 | /* | 1478 | /* |
@@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event) | |||
1396 | } else { | 1493 | } else { |
1397 | idx = hwc->idx; | 1494 | idx = hwc->idx; |
1398 | /* Try to get the previous generic event again */ | 1495 | /* Try to get the previous generic event again */ |
1399 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 1496 | if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { |
1400 | try_generic: | 1497 | try_generic: |
1401 | idx = find_first_zero_bit(cpuc->used_mask, | 1498 | idx = x86_pmu.get_event_idx(cpuc, hwc); |
1402 | x86_pmu.num_events); | 1499 | if (idx == -1) |
1403 | if (idx == x86_pmu.num_events) | ||
1404 | return -EAGAIN; | 1500 | return -EAGAIN; |
1405 | 1501 | ||
1406 | set_bit(idx, cpuc->used_mask); | 1502 | set_bit(idx, cpuc->used_mask); |
1407 | hwc->idx = idx; | 1503 | hwc->idx = idx; |
1408 | } | 1504 | } |
1409 | hwc->config_base = x86_pmu.eventsel; | 1505 | hwc->config_base = x86_pmu.eventsel; |
1410 | hwc->event_base = x86_pmu.perfctr; | 1506 | hwc->event_base = x86_pmu.perfctr; |
1411 | } | 1507 | } |
1412 | 1508 | ||
1509 | return idx; | ||
1510 | } | ||
1511 | |||
1512 | /* | ||
1513 | * Find a PMC slot for the freshly enabled / scheduled in event: | ||
1514 | */ | ||
1515 | static int x86_pmu_enable(struct perf_event *event) | ||
1516 | { | ||
1517 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1518 | struct hw_perf_event *hwc = &event->hw; | ||
1519 | int idx; | ||
1520 | |||
1521 | idx = x86_schedule_event(cpuc, hwc); | ||
1522 | if (idx < 0) | ||
1523 | return idx; | ||
1524 | |||
1413 | perf_events_lapic_init(); | 1525 | perf_events_lapic_init(); |
1414 | 1526 | ||
1415 | x86_pmu.disable(hwc, idx); | 1527 | x86_pmu.disable(hwc, idx); |
@@ -1852,7 +1964,7 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { | |||
1852 | .priority = 1 | 1964 | .priority = 1 |
1853 | }; | 1965 | }; |
1854 | 1966 | ||
1855 | static struct x86_pmu p6_pmu = { | 1967 | static __initconst struct x86_pmu p6_pmu = { |
1856 | .name = "p6", | 1968 | .name = "p6", |
1857 | .handle_irq = p6_pmu_handle_irq, | 1969 | .handle_irq = p6_pmu_handle_irq, |
1858 | .disable_all = p6_pmu_disable_all, | 1970 | .disable_all = p6_pmu_disable_all, |
@@ -1877,9 +1989,10 @@ static struct x86_pmu p6_pmu = { | |||
1877 | */ | 1989 | */ |
1878 | .event_bits = 32, | 1990 | .event_bits = 32, |
1879 | .event_mask = (1ULL << 32) - 1, | 1991 | .event_mask = (1ULL << 32) - 1, |
1992 | .get_event_idx = intel_get_event_idx, | ||
1880 | }; | 1993 | }; |
1881 | 1994 | ||
1882 | static struct x86_pmu intel_pmu = { | 1995 | static __initconst struct x86_pmu intel_pmu = { |
1883 | .name = "Intel", | 1996 | .name = "Intel", |
1884 | .handle_irq = intel_pmu_handle_irq, | 1997 | .handle_irq = intel_pmu_handle_irq, |
1885 | .disable_all = intel_pmu_disable_all, | 1998 | .disable_all = intel_pmu_disable_all, |
@@ -1900,9 +2013,10 @@ static struct x86_pmu intel_pmu = { | |||
1900 | .max_period = (1ULL << 31) - 1, | 2013 | .max_period = (1ULL << 31) - 1, |
1901 | .enable_bts = intel_pmu_enable_bts, | 2014 | .enable_bts = intel_pmu_enable_bts, |
1902 | .disable_bts = intel_pmu_disable_bts, | 2015 | .disable_bts = intel_pmu_disable_bts, |
2016 | .get_event_idx = intel_get_event_idx, | ||
1903 | }; | 2017 | }; |
1904 | 2018 | ||
1905 | static struct x86_pmu amd_pmu = { | 2019 | static __initconst struct x86_pmu amd_pmu = { |
1906 | .name = "AMD", | 2020 | .name = "AMD", |
1907 | .handle_irq = amd_pmu_handle_irq, | 2021 | .handle_irq = amd_pmu_handle_irq, |
1908 | .disable_all = amd_pmu_disable_all, | 2022 | .disable_all = amd_pmu_disable_all, |
@@ -1920,9 +2034,10 @@ static struct x86_pmu amd_pmu = { | |||
1920 | .apic = 1, | 2034 | .apic = 1, |
1921 | /* use highest bit to detect overflow */ | 2035 | /* use highest bit to detect overflow */ |
1922 | .max_period = (1ULL << 47) - 1, | 2036 | .max_period = (1ULL << 47) - 1, |
2037 | .get_event_idx = gen_get_event_idx, | ||
1923 | }; | 2038 | }; |
1924 | 2039 | ||
1925 | static int p6_pmu_init(void) | 2040 | static __init int p6_pmu_init(void) |
1926 | { | 2041 | { |
1927 | switch (boot_cpu_data.x86_model) { | 2042 | switch (boot_cpu_data.x86_model) { |
1928 | case 1: | 2043 | case 1: |
@@ -1932,10 +2047,12 @@ static int p6_pmu_init(void) | |||
1932 | case 7: | 2047 | case 7: |
1933 | case 8: | 2048 | case 8: |
1934 | case 11: /* Pentium III */ | 2049 | case 11: /* Pentium III */ |
2050 | event_constraints = intel_p6_event_constraints; | ||
1935 | break; | 2051 | break; |
1936 | case 9: | 2052 | case 9: |
1937 | case 13: | 2053 | case 13: |
1938 | /* Pentium M */ | 2054 | /* Pentium M */ |
2055 | event_constraints = intel_p6_event_constraints; | ||
1939 | break; | 2056 | break; |
1940 | default: | 2057 | default: |
1941 | pr_cont("unsupported p6 CPU model %d ", | 2058 | pr_cont("unsupported p6 CPU model %d ", |
@@ -1954,7 +2071,7 @@ static int p6_pmu_init(void) | |||
1954 | return 0; | 2071 | return 0; |
1955 | } | 2072 | } |
1956 | 2073 | ||
1957 | static int intel_pmu_init(void) | 2074 | static __init int intel_pmu_init(void) |
1958 | { | 2075 | { |
1959 | union cpuid10_edx edx; | 2076 | union cpuid10_edx edx; |
1960 | union cpuid10_eax eax; | 2077 | union cpuid10_eax eax; |
@@ -2007,12 +2124,14 @@ static int intel_pmu_init(void) | |||
2007 | sizeof(hw_cache_event_ids)); | 2124 | sizeof(hw_cache_event_ids)); |
2008 | 2125 | ||
2009 | pr_cont("Core2 events, "); | 2126 | pr_cont("Core2 events, "); |
2127 | event_constraints = intel_core_event_constraints; | ||
2010 | break; | 2128 | break; |
2011 | default: | 2129 | default: |
2012 | case 26: | 2130 | case 26: |
2013 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 2131 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
2014 | sizeof(hw_cache_event_ids)); | 2132 | sizeof(hw_cache_event_ids)); |
2015 | 2133 | ||
2134 | event_constraints = intel_nehalem_event_constraints; | ||
2016 | pr_cont("Nehalem/Corei7 events, "); | 2135 | pr_cont("Nehalem/Corei7 events, "); |
2017 | break; | 2136 | break; |
2018 | case 28: | 2137 | case 28: |
@@ -2025,7 +2144,7 @@ static int intel_pmu_init(void) | |||
2025 | return 0; | 2144 | return 0; |
2026 | } | 2145 | } |
2027 | 2146 | ||
2028 | static int amd_pmu_init(void) | 2147 | static __init int amd_pmu_init(void) |
2029 | { | 2148 | { |
2030 | /* Performance-monitoring supported from K7 and later: */ | 2149 | /* Performance-monitoring supported from K7 and later: */ |
2031 | if (boot_cpu_data.x86 < 6) | 2150 | if (boot_cpu_data.x86 < 6) |
@@ -2105,11 +2224,47 @@ static const struct pmu pmu = { | |||
2105 | .unthrottle = x86_pmu_unthrottle, | 2224 | .unthrottle = x86_pmu_unthrottle, |
2106 | }; | 2225 | }; |
2107 | 2226 | ||
2227 | static int | ||
2228 | validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
2229 | { | ||
2230 | struct hw_perf_event fake_event = event->hw; | ||
2231 | |||
2232 | if (event->pmu && event->pmu != &pmu) | ||
2233 | return 0; | ||
2234 | |||
2235 | return x86_schedule_event(cpuc, &fake_event) >= 0; | ||
2236 | } | ||
2237 | |||
2238 | static int validate_group(struct perf_event *event) | ||
2239 | { | ||
2240 | struct perf_event *sibling, *leader = event->group_leader; | ||
2241 | struct cpu_hw_events fake_pmu; | ||
2242 | |||
2243 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | ||
2244 | |||
2245 | if (!validate_event(&fake_pmu, leader)) | ||
2246 | return -ENOSPC; | ||
2247 | |||
2248 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
2249 | if (!validate_event(&fake_pmu, sibling)) | ||
2250 | return -ENOSPC; | ||
2251 | } | ||
2252 | |||
2253 | if (!validate_event(&fake_pmu, event)) | ||
2254 | return -ENOSPC; | ||
2255 | |||
2256 | return 0; | ||
2257 | } | ||
2258 | |||
2108 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 2259 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
2109 | { | 2260 | { |
2110 | int err; | 2261 | int err; |
2111 | 2262 | ||
2112 | err = __hw_perf_event_init(event); | 2263 | err = __hw_perf_event_init(event); |
2264 | if (!err) { | ||
2265 | if (event->group_leader != event) | ||
2266 | err = validate_group(event); | ||
2267 | } | ||
2113 | if (err) { | 2268 | if (err) { |
2114 | if (event->destroy) | 2269 | if (event->destroy) |
2115 | event->destroy(event); | 2270 | event->destroy(event); |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index fab786f60ed6..898df9719afb 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void) | |||
712 | switch (boot_cpu_data.x86_vendor) { | 712 | switch (boot_cpu_data.x86_vendor) { |
713 | case X86_VENDOR_AMD: | 713 | case X86_VENDOR_AMD: |
714 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | 714 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && |
715 | boot_cpu_data.x86 != 16) | 715 | boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) |
716 | return; | 716 | return; |
717 | wd_ops = &k7_wd_ops; | 717 | wd_ops = &k7_wd_ops; |
718 | break; | 718 | break; |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index bb62b3e5caad..28000743bbb0 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -26,7 +26,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
26 | 26 | ||
27 | early_init_transmeta(c); | 27 | early_init_transmeta(c); |
28 | 28 | ||
29 | display_cacheinfo(c); | 29 | cpu_detect_cache_sizes(c); |
30 | 30 | ||
31 | /* Print CMS and CPU revision */ | 31 | /* Print CMS and CPU revision */ |
32 | max = cpuid_eax(0x80860000); | 32 | max = cpuid_eax(0x80860000); |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 6a52d4b36a30..7ef24a796992 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -116,21 +116,16 @@ static int cpuid_open(struct inode *inode, struct file *file) | |||
116 | { | 116 | { |
117 | unsigned int cpu; | 117 | unsigned int cpu; |
118 | struct cpuinfo_x86 *c; | 118 | struct cpuinfo_x86 *c; |
119 | int ret = 0; | ||
120 | |||
121 | lock_kernel(); | ||
122 | 119 | ||
123 | cpu = iminor(file->f_path.dentry->d_inode); | 120 | cpu = iminor(file->f_path.dentry->d_inode); |
124 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { | 121 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
125 | ret = -ENXIO; /* No such CPU */ | 122 | return -ENXIO; /* No such CPU */ |
126 | goto out; | 123 | |
127 | } | ||
128 | c = &cpu_data(cpu); | 124 | c = &cpu_data(cpu); |
129 | if (c->cpuid_level < 0) | 125 | if (c->cpuid_level < 0) |
130 | ret = -EIO; /* CPUID not supported */ | 126 | return -EIO; /* CPUID not supported */ |
131 | out: | 127 | |
132 | unlock_kernel(); | 128 | return 0; |
133 | return ret; | ||
134 | } | 129 | } |
135 | 130 | ||
136 | /* | 131 | /* |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 5e409dc298a4..a4849c10a77e 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -27,8 +27,7 @@ | |||
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
30 | #include <asm/iommu.h> | 30 | #include <asm/x86_init.h> |
31 | |||
32 | 31 | ||
33 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
34 | 33 | ||
@@ -106,7 +105,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
106 | #endif | 105 | #endif |
107 | 106 | ||
108 | #ifdef CONFIG_X86_64 | 107 | #ifdef CONFIG_X86_64 |
109 | pci_iommu_shutdown(); | 108 | x86_platform.iommu_shutdown(); |
110 | #endif | 109 | #endif |
111 | 110 | ||
112 | crash_save_cpu(regs, safe_smp_processor_id()); | 111 | crash_save_cpu(regs, safe_smp_processor_id()); |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 2d8a371d4339..b8ce165dde5d 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -268,11 +268,12 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) | |||
268 | 268 | ||
269 | show_registers(regs); | 269 | show_registers(regs); |
270 | #ifdef CONFIG_X86_32 | 270 | #ifdef CONFIG_X86_32 |
271 | sp = (unsigned long) (®s->sp); | 271 | if (user_mode_vm(regs)) { |
272 | savesegment(ss, ss); | ||
273 | if (user_mode(regs)) { | ||
274 | sp = regs->sp; | 272 | sp = regs->sp; |
275 | ss = regs->ss & 0xffff; | 273 | ss = regs->ss & 0xffff; |
274 | } else { | ||
275 | sp = kernel_stack_pointer(regs); | ||
276 | savesegment(ss, ss); | ||
276 | } | 277 | } |
277 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); | 278 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); |
278 | print_symbol("%s", regs->ip); | 279 | print_symbol("%s", regs->ip); |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index f7dd2a7c3bf4..e0ed4c7abb62 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -10,9 +10,9 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/ptrace.h> | 11 | #include <linux/ptrace.h> |
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/sysfs.h> | ||
13 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
14 | #include <linux/nmi.h> | 15 | #include <linux/nmi.h> |
15 | #include <linux/sysfs.h> | ||
16 | 16 | ||
17 | #include <asm/stacktrace.h> | 17 | #include <asm/stacktrace.h> |
18 | 18 | ||
@@ -35,6 +35,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
35 | 35 | ||
36 | if (!stack) { | 36 | if (!stack) { |
37 | unsigned long dummy; | 37 | unsigned long dummy; |
38 | |||
38 | stack = &dummy; | 39 | stack = &dummy; |
39 | if (task && task != current) | 40 | if (task && task != current) |
40 | stack = (unsigned long *)task->thread.sp; | 41 | stack = (unsigned long *)task->thread.sp; |
@@ -57,8 +58,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
57 | 58 | ||
58 | context = (struct thread_info *) | 59 | context = (struct thread_info *) |
59 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 60 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
60 | bp = print_context_stack(context, stack, bp, ops, | 61 | bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph); |
61 | data, NULL, &graph); | ||
62 | 62 | ||
63 | stack = (unsigned long *)context->previous_esp; | 63 | stack = (unsigned long *)context->previous_esp; |
64 | if (!stack) | 64 | if (!stack) |
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(dump_trace); | |||
72 | 72 | ||
73 | void | 73 | void |
74 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | 74 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
75 | unsigned long *sp, unsigned long bp, char *log_lvl) | 75 | unsigned long *sp, unsigned long bp, char *log_lvl) |
76 | { | 76 | { |
77 | unsigned long *stack; | 77 | unsigned long *stack; |
78 | int i; | 78 | int i; |
@@ -156,4 +156,3 @@ int is_valid_bugaddr(unsigned long ip) | |||
156 | 156 | ||
157 | return ud2 == 0x0b0f; | 157 | return ud2 == 0x0b0f; |
158 | } | 158 | } |
159 | |||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index a071e6be177e..8e740934bd1f 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -10,26 +10,28 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/ptrace.h> | 11 | #include <linux/ptrace.h> |
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/sysfs.h> | ||
13 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
14 | #include <linux/nmi.h> | 15 | #include <linux/nmi.h> |
15 | #include <linux/sysfs.h> | ||
16 | 16 | ||
17 | #include <asm/stacktrace.h> | 17 | #include <asm/stacktrace.h> |
18 | 18 | ||
19 | #include "dumpstack.h" | 19 | #include "dumpstack.h" |
20 | 20 | ||
21 | #define N_EXCEPTION_STACKS_END \ | ||
22 | (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) | ||
21 | 23 | ||
22 | static char x86_stack_ids[][8] = { | 24 | static char x86_stack_ids[][8] = { |
23 | [DEBUG_STACK - 1] = "#DB", | 25 | [ DEBUG_STACK-1 ] = "#DB", |
24 | [NMI_STACK - 1] = "NMI", | 26 | [ NMI_STACK-1 ] = "NMI", |
25 | [DOUBLEFAULT_STACK - 1] = "#DF", | 27 | [ DOUBLEFAULT_STACK-1 ] = "#DF", |
26 | [STACKFAULT_STACK - 1] = "#SS", | 28 | [ STACKFAULT_STACK-1 ] = "#SS", |
27 | [MCE_STACK - 1] = "#MC", | 29 | [ MCE_STACK-1 ] = "#MC", |
28 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | 30 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
29 | [N_EXCEPTION_STACKS ... | 31 | [ N_EXCEPTION_STACKS ... |
30 | N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | 32 | N_EXCEPTION_STACKS_END ] = "#DB[?]" |
31 | #endif | 33 | #endif |
32 | }; | 34 | }; |
33 | 35 | ||
34 | int x86_is_stack_id(int id, char *name) | 36 | int x86_is_stack_id(int id, char *name) |
35 | { | 37 | { |
@@ -37,7 +39,7 @@ int x86_is_stack_id(int id, char *name) | |||
37 | } | 39 | } |
38 | 40 | ||
39 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 41 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
40 | unsigned *usedp, char **idp) | 42 | unsigned *usedp, char **idp) |
41 | { | 43 | { |
42 | unsigned k; | 44 | unsigned k; |
43 | 45 | ||
@@ -202,21 +204,24 @@ EXPORT_SYMBOL(dump_trace); | |||
202 | 204 | ||
203 | void | 205 | void |
204 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | 206 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
205 | unsigned long *sp, unsigned long bp, char *log_lvl) | 207 | unsigned long *sp, unsigned long bp, char *log_lvl) |
206 | { | 208 | { |
209 | unsigned long *irq_stack_end; | ||
210 | unsigned long *irq_stack; | ||
207 | unsigned long *stack; | 211 | unsigned long *stack; |
212 | int cpu; | ||
208 | int i; | 213 | int i; |
209 | const int cpu = smp_processor_id(); | 214 | |
210 | unsigned long *irq_stack_end = | 215 | preempt_disable(); |
211 | (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); | 216 | cpu = smp_processor_id(); |
212 | unsigned long *irq_stack = | 217 | |
213 | (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); | 218 | irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); |
219 | irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); | ||
214 | 220 | ||
215 | /* | 221 | /* |
216 | * debugging aid: "show_stack(NULL, NULL);" prints the | 222 | * Debugging aid: "show_stack(NULL, NULL);" prints the |
217 | * back trace for this cpu. | 223 | * back trace for this cpu: |
218 | */ | 224 | */ |
219 | |||
220 | if (sp == NULL) { | 225 | if (sp == NULL) { |
221 | if (task) | 226 | if (task) |
222 | sp = (unsigned long *)task->thread.sp; | 227 | sp = (unsigned long *)task->thread.sp; |
@@ -240,6 +245,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
240 | printk(" %016lx", *stack++); | 245 | printk(" %016lx", *stack++); |
241 | touch_nmi_watchdog(); | 246 | touch_nmi_watchdog(); |
242 | } | 247 | } |
248 | preempt_enable(); | ||
249 | |||
243 | printk("\n"); | 250 | printk("\n"); |
244 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | 251 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
245 | } | 252 | } |
@@ -303,4 +310,3 @@ int is_valid_bugaddr(unsigned long ip) | |||
303 | 310 | ||
304 | return ud2 == 0x0b0f; | 311 | return ud2 == 0x0b0f; |
305 | } | 312 | } |
306 | |||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d607c6..50b9c220e121 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -334,6 +334,10 @@ ENTRY(ret_from_fork) | |||
334 | END(ret_from_fork) | 334 | END(ret_from_fork) |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Interrupt exit functions should be protected against kprobes | ||
338 | */ | ||
339 | .pushsection .kprobes.text, "ax" | ||
340 | /* | ||
337 | * Return to user mode is not as complex as all this looks, | 341 | * Return to user mode is not as complex as all this looks, |
338 | * but we want the default path for a system call return to | 342 | * but we want the default path for a system call return to |
339 | * go as quickly as possible which is why some of this is | 343 | * go as quickly as possible which is why some of this is |
@@ -383,6 +387,10 @@ need_resched: | |||
383 | END(resume_kernel) | 387 | END(resume_kernel) |
384 | #endif | 388 | #endif |
385 | CFI_ENDPROC | 389 | CFI_ENDPROC |
390 | /* | ||
391 | * End of kprobes section | ||
392 | */ | ||
393 | .popsection | ||
386 | 394 | ||
387 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | 395 | /* SYSENTER_RETURN points to after the "sysenter" instruction in |
388 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | 396 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ |
@@ -513,6 +521,10 @@ sysexit_audit: | |||
513 | PTGS_TO_GS_EX | 521 | PTGS_TO_GS_EX |
514 | ENDPROC(ia32_sysenter_target) | 522 | ENDPROC(ia32_sysenter_target) |
515 | 523 | ||
524 | /* | ||
525 | * syscall stub including irq exit should be protected against kprobes | ||
526 | */ | ||
527 | .pushsection .kprobes.text, "ax" | ||
516 | # system call handler stub | 528 | # system call handler stub |
517 | ENTRY(system_call) | 529 | ENTRY(system_call) |
518 | RING0_INT_FRAME # can't unwind into user space anyway | 530 | RING0_INT_FRAME # can't unwind into user space anyway |
@@ -705,6 +717,10 @@ syscall_badsys: | |||
705 | jmp resume_userspace | 717 | jmp resume_userspace |
706 | END(syscall_badsys) | 718 | END(syscall_badsys) |
707 | CFI_ENDPROC | 719 | CFI_ENDPROC |
720 | /* | ||
721 | * End of kprobes section | ||
722 | */ | ||
723 | .popsection | ||
708 | 724 | ||
709 | /* | 725 | /* |
710 | * System calls that need a pt_regs pointer. | 726 | * System calls that need a pt_regs pointer. |
@@ -814,6 +830,10 @@ common_interrupt: | |||
814 | ENDPROC(common_interrupt) | 830 | ENDPROC(common_interrupt) |
815 | CFI_ENDPROC | 831 | CFI_ENDPROC |
816 | 832 | ||
833 | /* | ||
834 | * Irq entries should be protected against kprobes | ||
835 | */ | ||
836 | .pushsection .kprobes.text, "ax" | ||
817 | #define BUILD_INTERRUPT3(name, nr, fn) \ | 837 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
818 | ENTRY(name) \ | 838 | ENTRY(name) \ |
819 | RING0_INT_FRAME; \ | 839 | RING0_INT_FRAME; \ |
@@ -980,6 +1000,10 @@ ENTRY(spurious_interrupt_bug) | |||
980 | jmp error_code | 1000 | jmp error_code |
981 | CFI_ENDPROC | 1001 | CFI_ENDPROC |
982 | END(spurious_interrupt_bug) | 1002 | END(spurious_interrupt_bug) |
1003 | /* | ||
1004 | * End of kprobes section | ||
1005 | */ | ||
1006 | .popsection | ||
983 | 1007 | ||
984 | ENTRY(kernel_thread_helper) | 1008 | ENTRY(kernel_thread_helper) |
985 | pushl $0 # fake return address for unwinder | 1009 | pushl $0 # fake return address for unwinder |
@@ -1185,17 +1209,14 @@ END(ftrace_graph_caller) | |||
1185 | 1209 | ||
1186 | .globl return_to_handler | 1210 | .globl return_to_handler |
1187 | return_to_handler: | 1211 | return_to_handler: |
1188 | pushl $0 | ||
1189 | pushl %eax | 1212 | pushl %eax |
1190 | pushl %ecx | ||
1191 | pushl %edx | 1213 | pushl %edx |
1192 | movl %ebp, %eax | 1214 | movl %ebp, %eax |
1193 | call ftrace_return_to_handler | 1215 | call ftrace_return_to_handler |
1194 | movl %eax, 0xc(%esp) | 1216 | movl %eax, %ecx |
1195 | popl %edx | 1217 | popl %edx |
1196 | popl %ecx | ||
1197 | popl %eax | 1218 | popl %eax |
1198 | ret | 1219 | jmp *%ecx |
1199 | #endif | 1220 | #endif |
1200 | 1221 | ||
1201 | .section .rodata,"a" | 1222 | .section .rodata,"a" |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b5c061f8f358..4deb8fc849dd 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -155,11 +155,11 @@ GLOBAL(return_to_handler) | |||
155 | 155 | ||
156 | call ftrace_return_to_handler | 156 | call ftrace_return_to_handler |
157 | 157 | ||
158 | movq %rax, 16(%rsp) | 158 | movq %rax, %rdi |
159 | movq 8(%rsp), %rdx | 159 | movq 8(%rsp), %rdx |
160 | movq (%rsp), %rax | 160 | movq (%rsp), %rax |
161 | addq $16, %rsp | 161 | addq $24, %rsp |
162 | retq | 162 | jmp *%rdi |
163 | #endif | 163 | #endif |
164 | 164 | ||
165 | 165 | ||
@@ -803,6 +803,10 @@ END(interrupt) | |||
803 | call \func | 803 | call \func |
804 | .endm | 804 | .endm |
805 | 805 | ||
806 | /* | ||
807 | * Interrupt entry/exit should be protected against kprobes | ||
808 | */ | ||
809 | .pushsection .kprobes.text, "ax" | ||
806 | /* | 810 | /* |
807 | * The interrupt stubs push (~vector+0x80) onto the stack and | 811 | * The interrupt stubs push (~vector+0x80) onto the stack and |
808 | * then jump to common_interrupt. | 812 | * then jump to common_interrupt. |
@@ -941,6 +945,10 @@ ENTRY(retint_kernel) | |||
941 | 945 | ||
942 | CFI_ENDPROC | 946 | CFI_ENDPROC |
943 | END(common_interrupt) | 947 | END(common_interrupt) |
948 | /* | ||
949 | * End of kprobes section | ||
950 | */ | ||
951 | .popsection | ||
944 | 952 | ||
945 | /* | 953 | /* |
946 | * APIC interrupts. | 954 | * APIC interrupts. |
@@ -1491,12 +1499,17 @@ error_kernelspace: | |||
1491 | leaq irq_return(%rip),%rcx | 1499 | leaq irq_return(%rip),%rcx |
1492 | cmpq %rcx,RIP+8(%rsp) | 1500 | cmpq %rcx,RIP+8(%rsp) |
1493 | je error_swapgs | 1501 | je error_swapgs |
1494 | movl %ecx,%ecx /* zero extend */ | 1502 | movl %ecx,%eax /* zero extend */ |
1495 | cmpq %rcx,RIP+8(%rsp) | 1503 | cmpq %rax,RIP+8(%rsp) |
1496 | je error_swapgs | 1504 | je bstep_iret |
1497 | cmpq $gs_change,RIP+8(%rsp) | 1505 | cmpq $gs_change,RIP+8(%rsp) |
1498 | je error_swapgs | 1506 | je error_swapgs |
1499 | jmp error_sti | 1507 | jmp error_sti |
1508 | |||
1509 | bstep_iret: | ||
1510 | /* Fix truncated RIP */ | ||
1511 | movq %rcx,RIP+8(%rsp) | ||
1512 | jmp error_swapgs | ||
1500 | END(error_entry) | 1513 | END(error_entry) |
1501 | 1514 | ||
1502 | 1515 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 9dbb527e1652..5a1b9758fd62 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * the dangers of modifying code on the run. | 9 | * the dangers of modifying code on the run. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
13 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
14 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data) | |||
336 | 338 | ||
337 | switch (faulted) { | 339 | switch (faulted) { |
338 | case 0: | 340 | case 0: |
339 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | 341 | pr_info("converting mcount calls to 0f 1f 44 00 00\n"); |
340 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); | 342 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
341 | break; | 343 | break; |
342 | case 1: | 344 | case 1: |
343 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | 345 | pr_info("converting mcount calls to 66 66 66 66 90\n"); |
344 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); | 346 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
345 | break; | 347 | break; |
346 | case 2: | 348 | case 2: |
347 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); | 349 | pr_info("converting mcount calls to jmp . + 5\n"); |
348 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); | 350 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
349 | break; | 351 | break; |
350 | } | 352 | } |
@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
468 | 470 | ||
469 | #ifdef CONFIG_FTRACE_SYSCALLS | 471 | #ifdef CONFIG_FTRACE_SYSCALLS |
470 | 472 | ||
471 | extern unsigned long __start_syscalls_metadata[]; | ||
472 | extern unsigned long __stop_syscalls_metadata[]; | ||
473 | extern unsigned long *sys_call_table; | 473 | extern unsigned long *sys_call_table; |
474 | 474 | ||
475 | static struct syscall_metadata **syscalls_metadata; | 475 | unsigned long __init arch_syscall_addr(int nr) |
476 | |||
477 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | ||
478 | { | ||
479 | struct syscall_metadata *start; | ||
480 | struct syscall_metadata *stop; | ||
481 | char str[KSYM_SYMBOL_LEN]; | ||
482 | |||
483 | |||
484 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
485 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
486 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | ||
487 | |||
488 | for ( ; start < stop; start++) { | ||
489 | if (start->name && !strcmp(start->name, str)) | ||
490 | return start; | ||
491 | } | ||
492 | return NULL; | ||
493 | } | ||
494 | |||
495 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
496 | { | ||
497 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | ||
498 | return NULL; | ||
499 | |||
500 | return syscalls_metadata[nr]; | ||
501 | } | ||
502 | |||
503 | int syscall_name_to_nr(char *name) | ||
504 | { | 476 | { |
505 | int i; | 477 | return (unsigned long)(&sys_call_table)[nr]; |
506 | |||
507 | if (!syscalls_metadata) | ||
508 | return -1; | ||
509 | |||
510 | for (i = 0; i < NR_syscalls; i++) { | ||
511 | if (syscalls_metadata[i]) { | ||
512 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | return -1; | ||
517 | } | ||
518 | |||
519 | void set_syscall_enter_id(int num, int id) | ||
520 | { | ||
521 | syscalls_metadata[num]->enter_id = id; | ||
522 | } | ||
523 | |||
524 | void set_syscall_exit_id(int num, int id) | ||
525 | { | ||
526 | syscalls_metadata[num]->exit_id = id; | ||
527 | } | ||
528 | |||
529 | static int __init arch_init_ftrace_syscalls(void) | ||
530 | { | ||
531 | int i; | ||
532 | struct syscall_metadata *meta; | ||
533 | unsigned long **psys_syscall_table = &sys_call_table; | ||
534 | |||
535 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
536 | NR_syscalls, GFP_KERNEL); | ||
537 | if (!syscalls_metadata) { | ||
538 | WARN_ON(1); | ||
539 | return -ENOMEM; | ||
540 | } | ||
541 | |||
542 | for (i = 0; i < NR_syscalls; i++) { | ||
543 | meta = find_syscall_meta(psys_syscall_table[i]); | ||
544 | syscalls_metadata[i] = meta; | ||
545 | } | ||
546 | return 0; | ||
547 | } | 478 | } |
548 | arch_initcall(arch_init_ftrace_syscalls); | ||
549 | #endif | 479 | #endif |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 780cd928fcd5..22db86a37643 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -212,8 +212,8 @@ ENTRY(secondary_startup_64) | |||
212 | */ | 212 | */ |
213 | lgdt early_gdt_descr(%rip) | 213 | lgdt early_gdt_descr(%rip) |
214 | 214 | ||
215 | /* set up data segments. actually 0 would do too */ | 215 | /* set up data segments */ |
216 | movl $__KERNEL_DS,%eax | 216 | xorl %eax,%eax |
217 | movl %eax,%ds | 217 | movl %eax,%ds |
218 | movl %eax,%ss | 218 | movl %eax,%ss |
219 | movl %eax,%es | 219 | movl %eax,%es |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..d42f65ac4927 --- /dev/null +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,555 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2007 Alan Stern | ||
17 | * Copyright (C) 2009 IBM Corporation | ||
18 | * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com> | ||
19 | * | ||
20 | * Authors: Alan Stern <stern@rowland.harvard.edu> | ||
21 | * K.Prasad <prasad@linux.vnet.ibm.com> | ||
22 | * Frederic Weisbecker <fweisbec@gmail.com> | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
27 | * using the CPU's debug registers. | ||
28 | */ | ||
29 | |||
30 | #include <linux/perf_event.h> | ||
31 | #include <linux/hw_breakpoint.h> | ||
32 | #include <linux/irqflags.h> | ||
33 | #include <linux/notifier.h> | ||
34 | #include <linux/kallsyms.h> | ||
35 | #include <linux/kprobes.h> | ||
36 | #include <linux/percpu.h> | ||
37 | #include <linux/kdebug.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/smp.h> | ||
43 | |||
44 | #include <asm/hw_breakpoint.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/debugreg.h> | ||
47 | |||
48 | /* Per cpu debug control register value */ | ||
49 | DEFINE_PER_CPU(unsigned long, cpu_dr7); | ||
50 | EXPORT_PER_CPU_SYMBOL(cpu_dr7); | ||
51 | |||
52 | /* Per cpu debug address registers values */ | ||
53 | static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); | ||
54 | |||
55 | /* | ||
56 | * Stores the breakpoints currently in use on each breakpoint address | ||
57 | * register for each cpus | ||
58 | */ | ||
59 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | ||
60 | |||
61 | |||
62 | static inline unsigned long | ||
63 | __encode_dr7(int drnum, unsigned int len, unsigned int type) | ||
64 | { | ||
65 | unsigned long bp_info; | ||
66 | |||
67 | bp_info = (len | type) & 0xf; | ||
68 | bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); | ||
69 | bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)); | ||
70 | |||
71 | return bp_info; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Encode the length, type, Exact, and Enable bits for a particular breakpoint | ||
76 | * as stored in debug register 7. | ||
77 | */ | ||
78 | unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) | ||
79 | { | ||
80 | return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Decode the length and type bits for a particular breakpoint as | ||
85 | * stored in debug register 7. Return the "enabled" status. | ||
86 | */ | ||
87 | int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type) | ||
88 | { | ||
89 | int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); | ||
90 | |||
91 | *len = (bp_info & 0xc) | 0x40; | ||
92 | *type = (bp_info & 0x3) | 0x80; | ||
93 | |||
94 | return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Install a perf counter breakpoint. | ||
99 | * | ||
100 | * We seek a free debug address register and use it for this | ||
101 | * breakpoint. Eventually we enable it in the debug control register. | ||
102 | * | ||
103 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
104 | * and registers local to this cpu. | ||
105 | */ | ||
106 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
107 | { | ||
108 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
109 | unsigned long *dr7; | ||
110 | int i; | ||
111 | |||
112 | for (i = 0; i < HBP_NUM; i++) { | ||
113 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
114 | |||
115 | if (!*slot) { | ||
116 | *slot = bp; | ||
117 | break; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
122 | return -EBUSY; | ||
123 | |||
124 | set_debugreg(info->address, i); | ||
125 | __get_cpu_var(cpu_debugreg[i]) = info->address; | ||
126 | |||
127 | dr7 = &__get_cpu_var(cpu_dr7); | ||
128 | *dr7 |= encode_dr7(i, info->len, info->type); | ||
129 | |||
130 | set_debugreg(*dr7, 7); | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Uninstall the breakpoint contained in the given counter. | ||
137 | * | ||
138 | * First we search the debug address register it uses and then we disable | ||
139 | * it. | ||
140 | * | ||
141 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
142 | * and registers local to this cpu. | ||
143 | */ | ||
144 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
145 | { | ||
146 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
147 | unsigned long *dr7; | ||
148 | int i; | ||
149 | |||
150 | for (i = 0; i < HBP_NUM; i++) { | ||
151 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
152 | |||
153 | if (*slot == bp) { | ||
154 | *slot = NULL; | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
160 | return; | ||
161 | |||
162 | dr7 = &__get_cpu_var(cpu_dr7); | ||
163 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | ||
164 | |||
165 | set_debugreg(*dr7, 7); | ||
166 | } | ||
167 | |||
168 | static int get_hbp_len(u8 hbp_len) | ||
169 | { | ||
170 | unsigned int len_in_bytes = 0; | ||
171 | |||
172 | switch (hbp_len) { | ||
173 | case X86_BREAKPOINT_LEN_1: | ||
174 | len_in_bytes = 1; | ||
175 | break; | ||
176 | case X86_BREAKPOINT_LEN_2: | ||
177 | len_in_bytes = 2; | ||
178 | break; | ||
179 | case X86_BREAKPOINT_LEN_4: | ||
180 | len_in_bytes = 4; | ||
181 | break; | ||
182 | #ifdef CONFIG_X86_64 | ||
183 | case X86_BREAKPOINT_LEN_8: | ||
184 | len_in_bytes = 8; | ||
185 | break; | ||
186 | #endif | ||
187 | } | ||
188 | return len_in_bytes; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Check for virtual address in user space. | ||
193 | */ | ||
194 | int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) | ||
195 | { | ||
196 | unsigned int len; | ||
197 | |||
198 | len = get_hbp_len(hbp_len); | ||
199 | |||
200 | return (va <= TASK_SIZE - len); | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Check for virtual address in kernel space. | ||
205 | */ | ||
206 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | ||
207 | { | ||
208 | unsigned int len; | ||
209 | |||
210 | len = get_hbp_len(hbp_len); | ||
211 | |||
212 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Store a breakpoint's encoded address, length, and type. | ||
217 | */ | ||
218 | static int arch_store_info(struct perf_event *bp) | ||
219 | { | ||
220 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
221 | /* | ||
222 | * For kernel-addresses, either the address or symbol name can be | ||
223 | * specified. | ||
224 | */ | ||
225 | if (info->name) | ||
226 | info->address = (unsigned long) | ||
227 | kallsyms_lookup_name(info->name); | ||
228 | if (info->address) | ||
229 | return 0; | ||
230 | |||
231 | return -EINVAL; | ||
232 | } | ||
233 | |||
234 | int arch_bp_generic_fields(int x86_len, int x86_type, | ||
235 | int *gen_len, int *gen_type) | ||
236 | { | ||
237 | /* Len */ | ||
238 | switch (x86_len) { | ||
239 | case X86_BREAKPOINT_LEN_1: | ||
240 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
241 | break; | ||
242 | case X86_BREAKPOINT_LEN_2: | ||
243 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
244 | break; | ||
245 | case X86_BREAKPOINT_LEN_4: | ||
246 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
247 | break; | ||
248 | #ifdef CONFIG_X86_64 | ||
249 | case X86_BREAKPOINT_LEN_8: | ||
250 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
251 | break; | ||
252 | #endif | ||
253 | default: | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | /* Type */ | ||
258 | switch (x86_type) { | ||
259 | case X86_BREAKPOINT_EXECUTE: | ||
260 | *gen_type = HW_BREAKPOINT_X; | ||
261 | break; | ||
262 | case X86_BREAKPOINT_WRITE: | ||
263 | *gen_type = HW_BREAKPOINT_W; | ||
264 | break; | ||
265 | case X86_BREAKPOINT_RW: | ||
266 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
267 | break; | ||
268 | default: | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | |||
276 | static int arch_build_bp_info(struct perf_event *bp) | ||
277 | { | ||
278 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
279 | |||
280 | info->address = bp->attr.bp_addr; | ||
281 | |||
282 | /* Len */ | ||
283 | switch (bp->attr.bp_len) { | ||
284 | case HW_BREAKPOINT_LEN_1: | ||
285 | info->len = X86_BREAKPOINT_LEN_1; | ||
286 | break; | ||
287 | case HW_BREAKPOINT_LEN_2: | ||
288 | info->len = X86_BREAKPOINT_LEN_2; | ||
289 | break; | ||
290 | case HW_BREAKPOINT_LEN_4: | ||
291 | info->len = X86_BREAKPOINT_LEN_4; | ||
292 | break; | ||
293 | #ifdef CONFIG_X86_64 | ||
294 | case HW_BREAKPOINT_LEN_8: | ||
295 | info->len = X86_BREAKPOINT_LEN_8; | ||
296 | break; | ||
297 | #endif | ||
298 | default: | ||
299 | return -EINVAL; | ||
300 | } | ||
301 | |||
302 | /* Type */ | ||
303 | switch (bp->attr.bp_type) { | ||
304 | case HW_BREAKPOINT_W: | ||
305 | info->type = X86_BREAKPOINT_WRITE; | ||
306 | break; | ||
307 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
308 | info->type = X86_BREAKPOINT_RW; | ||
309 | break; | ||
310 | case HW_BREAKPOINT_X: | ||
311 | info->type = X86_BREAKPOINT_EXECUTE; | ||
312 | break; | ||
313 | default: | ||
314 | return -EINVAL; | ||
315 | } | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | /* | ||
320 | * Validate the arch-specific HW Breakpoint register settings | ||
321 | */ | ||
322 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
323 | struct task_struct *tsk) | ||
324 | { | ||
325 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
326 | unsigned int align; | ||
327 | int ret; | ||
328 | |||
329 | |||
330 | ret = arch_build_bp_info(bp); | ||
331 | if (ret) | ||
332 | return ret; | ||
333 | |||
334 | ret = -EINVAL; | ||
335 | |||
336 | if (info->type == X86_BREAKPOINT_EXECUTE) | ||
337 | /* | ||
338 | * Ptrace-refactoring code | ||
339 | * For now, we'll allow instruction breakpoint only for user-space | ||
340 | * addresses | ||
341 | */ | ||
342 | if ((!arch_check_va_in_userspace(info->address, info->len)) && | ||
343 | info->len != X86_BREAKPOINT_EXECUTE) | ||
344 | return ret; | ||
345 | |||
346 | switch (info->len) { | ||
347 | case X86_BREAKPOINT_LEN_1: | ||
348 | align = 0; | ||
349 | break; | ||
350 | case X86_BREAKPOINT_LEN_2: | ||
351 | align = 1; | ||
352 | break; | ||
353 | case X86_BREAKPOINT_LEN_4: | ||
354 | align = 3; | ||
355 | break; | ||
356 | #ifdef CONFIG_X86_64 | ||
357 | case X86_BREAKPOINT_LEN_8: | ||
358 | align = 7; | ||
359 | break; | ||
360 | #endif | ||
361 | default: | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | if (bp->callback) | ||
366 | ret = arch_store_info(bp); | ||
367 | |||
368 | if (ret < 0) | ||
369 | return ret; | ||
370 | /* | ||
371 | * Check that the low-order bits of the address are appropriate | ||
372 | * for the alignment implied by len. | ||
373 | */ | ||
374 | if (info->address & align) | ||
375 | return -EINVAL; | ||
376 | |||
377 | /* Check that the virtual address is in the proper range */ | ||
378 | if (tsk) { | ||
379 | if (!arch_check_va_in_userspace(info->address, info->len)) | ||
380 | return -EFAULT; | ||
381 | } else { | ||
382 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | ||
383 | return -EFAULT; | ||
384 | } | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Dump the debug register contents to the user. | ||
391 | * We can't dump our per cpu values because it | ||
392 | * may contain cpu wide breakpoint, something that | ||
393 | * doesn't belong to the current task. | ||
394 | * | ||
395 | * TODO: include non-ptrace user breakpoints (perf) | ||
396 | */ | ||
397 | void aout_dump_debugregs(struct user *dump) | ||
398 | { | ||
399 | int i; | ||
400 | int dr7 = 0; | ||
401 | struct perf_event *bp; | ||
402 | struct arch_hw_breakpoint *info; | ||
403 | struct thread_struct *thread = ¤t->thread; | ||
404 | |||
405 | for (i = 0; i < HBP_NUM; i++) { | ||
406 | bp = thread->ptrace_bps[i]; | ||
407 | |||
408 | if (bp && !bp->attr.disabled) { | ||
409 | dump->u_debugreg[i] = bp->attr.bp_addr; | ||
410 | info = counter_arch_bp(bp); | ||
411 | dr7 |= encode_dr7(i, info->len, info->type); | ||
412 | } else { | ||
413 | dump->u_debugreg[i] = 0; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | dump->u_debugreg[4] = 0; | ||
418 | dump->u_debugreg[5] = 0; | ||
419 | dump->u_debugreg[6] = current->thread.debugreg6; | ||
420 | |||
421 | dump->u_debugreg[7] = dr7; | ||
422 | } | ||
423 | EXPORT_SYMBOL_GPL(aout_dump_debugregs); | ||
424 | |||
425 | /* | ||
426 | * Release the user breakpoints used by ptrace | ||
427 | */ | ||
428 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
429 | { | ||
430 | int i; | ||
431 | struct thread_struct *t = &tsk->thread; | ||
432 | |||
433 | for (i = 0; i < HBP_NUM; i++) { | ||
434 | unregister_hw_breakpoint(t->ptrace_bps[i]); | ||
435 | t->ptrace_bps[i] = NULL; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | void hw_breakpoint_restore(void) | ||
440 | { | ||
441 | set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); | ||
442 | set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); | ||
443 | set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); | ||
444 | set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); | ||
445 | set_debugreg(current->thread.debugreg6, 6); | ||
446 | set_debugreg(__get_cpu_var(cpu_dr7), 7); | ||
447 | } | ||
448 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); | ||
449 | |||
450 | /* | ||
451 | * Handle debug exception notifications. | ||
452 | * | ||
453 | * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. | ||
454 | * | ||
455 | * NOTIFY_DONE returned if one of the following conditions is true. | ||
456 | * i) When the causative address is from user-space and the exception | ||
457 | * is a valid one, i.e. not triggered as a result of lazy debug register | ||
458 | * switching | ||
459 | * ii) When there are more bits than trap<n> set in DR6 register (such | ||
460 | * as BD, BS or BT) indicating that more than one debug condition is | ||
461 | * met and requires some more action in do_debug(). | ||
462 | * | ||
463 | * NOTIFY_STOP returned for all other cases | ||
464 | * | ||
465 | */ | ||
466 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | ||
467 | { | ||
468 | int i, cpu, rc = NOTIFY_STOP; | ||
469 | struct perf_event *bp; | ||
470 | unsigned long dr7, dr6; | ||
471 | unsigned long *dr6_p; | ||
472 | |||
473 | /* The DR6 value is pointed by args->err */ | ||
474 | dr6_p = (unsigned long *)ERR_PTR(args->err); | ||
475 | dr6 = *dr6_p; | ||
476 | |||
477 | /* Do an early return if no trap bits are set in DR6 */ | ||
478 | if ((dr6 & DR_TRAP_BITS) == 0) | ||
479 | return NOTIFY_DONE; | ||
480 | |||
481 | get_debugreg(dr7, 7); | ||
482 | /* Disable breakpoints during exception handling */ | ||
483 | set_debugreg(0UL, 7); | ||
484 | /* | ||
485 | * Assert that local interrupts are disabled | ||
486 | * Reset the DRn bits in the virtualized register value. | ||
487 | * The ptrace trigger routine will add in whatever is needed. | ||
488 | */ | ||
489 | current->thread.debugreg6 &= ~DR_TRAP_BITS; | ||
490 | cpu = get_cpu(); | ||
491 | |||
492 | /* Handle all the breakpoints that were triggered */ | ||
493 | for (i = 0; i < HBP_NUM; ++i) { | ||
494 | if (likely(!(dr6 & (DR_TRAP0 << i)))) | ||
495 | continue; | ||
496 | |||
497 | /* | ||
498 | * The counter may be concurrently released but that can only | ||
499 | * occur from a call_rcu() path. We can then safely fetch | ||
500 | * the breakpoint, use its callback, touch its counter | ||
501 | * while we are in an rcu_read_lock() path. | ||
502 | */ | ||
503 | rcu_read_lock(); | ||
504 | |||
505 | bp = per_cpu(bp_per_reg[i], cpu); | ||
506 | if (bp) | ||
507 | rc = NOTIFY_DONE; | ||
508 | /* | ||
509 | * Reset the 'i'th TRAP bit in dr6 to denote completion of | ||
510 | * exception handling | ||
511 | */ | ||
512 | (*dr6_p) &= ~(DR_TRAP0 << i); | ||
513 | /* | ||
514 | * bp can be NULL due to lazy debug register switching | ||
515 | * or due to concurrent perf counter removing. | ||
516 | */ | ||
517 | if (!bp) { | ||
518 | rcu_read_unlock(); | ||
519 | break; | ||
520 | } | ||
521 | |||
522 | (bp->callback)(bp, args->regs); | ||
523 | |||
524 | rcu_read_unlock(); | ||
525 | } | ||
526 | if (dr6 & (~DR_TRAP_BITS)) | ||
527 | rc = NOTIFY_DONE; | ||
528 | |||
529 | set_debugreg(dr7, 7); | ||
530 | put_cpu(); | ||
531 | |||
532 | return rc; | ||
533 | } | ||
534 | |||
535 | /* | ||
536 | * Handle debug exception notifications. | ||
537 | */ | ||
538 | int __kprobes hw_breakpoint_exceptions_notify( | ||
539 | struct notifier_block *unused, unsigned long val, void *data) | ||
540 | { | ||
541 | if (val != DIE_DEBUG) | ||
542 | return NOTIFY_DONE; | ||
543 | |||
544 | return hw_breakpoint_handler(data); | ||
545 | } | ||
546 | |||
547 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
548 | { | ||
549 | /* TODO */ | ||
550 | } | ||
551 | |||
552 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
553 | { | ||
554 | /* TODO */ | ||
555 | } | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 04bbd5278568..fee6cc2b2079 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -92,17 +92,17 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
92 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | 92 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); |
93 | seq_printf(p, " TLB shootdowns\n"); | 93 | seq_printf(p, " TLB shootdowns\n"); |
94 | #endif | 94 | #endif |
95 | #ifdef CONFIG_X86_MCE | 95 | #ifdef CONFIG_X86_THERMAL_VECTOR |
96 | seq_printf(p, "%*s: ", prec, "TRM"); | 96 | seq_printf(p, "%*s: ", prec, "TRM"); |
97 | for_each_online_cpu(j) | 97 | for_each_online_cpu(j) |
98 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); | 98 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); |
99 | seq_printf(p, " Thermal event interrupts\n"); | 99 | seq_printf(p, " Thermal event interrupts\n"); |
100 | # ifdef CONFIG_X86_MCE_THRESHOLD | 100 | #endif |
101 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
101 | seq_printf(p, "%*s: ", prec, "THR"); | 102 | seq_printf(p, "%*s: ", prec, "THR"); |
102 | for_each_online_cpu(j) | 103 | for_each_online_cpu(j) |
103 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); | 104 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); |
104 | seq_printf(p, " Threshold APIC interrupts\n"); | 105 | seq_printf(p, " Threshold APIC interrupts\n"); |
105 | # endif | ||
106 | #endif | 106 | #endif |
107 | #ifdef CONFIG_X86_MCE | 107 | #ifdef CONFIG_X86_MCE |
108 | seq_printf(p, "%*s: ", prec, "MCE"); | 108 | seq_printf(p, "%*s: ", prec, "MCE"); |
@@ -194,11 +194,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
194 | sum += irq_stats(cpu)->irq_call_count; | 194 | sum += irq_stats(cpu)->irq_call_count; |
195 | sum += irq_stats(cpu)->irq_tlb_count; | 195 | sum += irq_stats(cpu)->irq_tlb_count; |
196 | #endif | 196 | #endif |
197 | #ifdef CONFIG_X86_MCE | 197 | #ifdef CONFIG_X86_THERMAL_VECTOR |
198 | sum += irq_stats(cpu)->irq_thermal_count; | 198 | sum += irq_stats(cpu)->irq_thermal_count; |
199 | # ifdef CONFIG_X86_MCE_THRESHOLD | 199 | #endif |
200 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
200 | sum += irq_stats(cpu)->irq_threshold_count; | 201 | sum += irq_stats(cpu)->irq_threshold_count; |
201 | # endif | ||
202 | #endif | 202 | #endif |
203 | #ifdef CONFIG_X86_MCE | 203 | #ifdef CONFIG_X86_MCE |
204 | sum += per_cpu(mce_exception_count, cpu); | 204 | sum += per_cpu(mce_exception_count, cpu); |
@@ -274,3 +274,93 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
277 | |||
278 | #ifdef CONFIG_HOTPLUG_CPU | ||
279 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
280 | void fixup_irqs(void) | ||
281 | { | ||
282 | unsigned int irq, vector; | ||
283 | static int warned; | ||
284 | struct irq_desc *desc; | ||
285 | |||
286 | for_each_irq_desc(irq, desc) { | ||
287 | int break_affinity = 0; | ||
288 | int set_affinity = 1; | ||
289 | const struct cpumask *affinity; | ||
290 | |||
291 | if (!desc) | ||
292 | continue; | ||
293 | if (irq == 2) | ||
294 | continue; | ||
295 | |||
296 | /* interrupt's are disabled at this point */ | ||
297 | spin_lock(&desc->lock); | ||
298 | |||
299 | affinity = desc->affinity; | ||
300 | if (!irq_has_action(irq) || | ||
301 | cpumask_equal(affinity, cpu_online_mask)) { | ||
302 | spin_unlock(&desc->lock); | ||
303 | continue; | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Complete the irq move. This cpu is going down and for | ||
308 | * non intr-remapping case, we can't wait till this interrupt | ||
309 | * arrives at this cpu before completing the irq move. | ||
310 | */ | ||
311 | irq_force_complete_move(irq); | ||
312 | |||
313 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
314 | break_affinity = 1; | ||
315 | affinity = cpu_all_mask; | ||
316 | } | ||
317 | |||
318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | ||
319 | desc->chip->mask(irq); | ||
320 | |||
321 | if (desc->chip->set_affinity) | ||
322 | desc->chip->set_affinity(irq, affinity); | ||
323 | else if (!(warned++)) | ||
324 | set_affinity = 0; | ||
325 | |||
326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | ||
327 | desc->chip->unmask(irq); | ||
328 | |||
329 | spin_unlock(&desc->lock); | ||
330 | |||
331 | if (break_affinity && set_affinity) | ||
332 | printk("Broke affinity for irq %i\n", irq); | ||
333 | else if (!set_affinity) | ||
334 | printk("Cannot set affinity for irq %i\n", irq); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * We can remove mdelay() and then send spuriuous interrupts to | ||
339 | * new cpu targets for all the irqs that were handled previously by | ||
340 | * this cpu. While it works, I have seen spurious interrupt messages | ||
341 | * (nothing wrong but still...). | ||
342 | * | ||
343 | * So for now, retain mdelay(1) and check the IRR and then send those | ||
344 | * interrupts to new targets as this cpu is already offlined... | ||
345 | */ | ||
346 | mdelay(1); | ||
347 | |||
348 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
349 | unsigned int irr; | ||
350 | |||
351 | if (__get_cpu_var(vector_irq)[vector] < 0) | ||
352 | continue; | ||
353 | |||
354 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
355 | if (irr & (1 << (vector % 32))) { | ||
356 | irq = __get_cpu_var(vector_irq)[vector]; | ||
357 | |||
358 | desc = irq_to_desc(irq); | ||
359 | spin_lock(&desc->lock); | ||
360 | if (desc->chip->retrigger) | ||
361 | desc->chip->retrigger(irq); | ||
362 | spin_unlock(&desc->lock); | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | #endif | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 7d35d0fe2329..10709f29d166 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
211 | 211 | ||
212 | return true; | 212 | return true; |
213 | } | 213 | } |
214 | |||
215 | #ifdef CONFIG_HOTPLUG_CPU | ||
216 | |||
217 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
218 | void fixup_irqs(void) | ||
219 | { | ||
220 | unsigned int irq; | ||
221 | struct irq_desc *desc; | ||
222 | |||
223 | for_each_irq_desc(irq, desc) { | ||
224 | const struct cpumask *affinity; | ||
225 | |||
226 | if (!desc) | ||
227 | continue; | ||
228 | if (irq == 2) | ||
229 | continue; | ||
230 | |||
231 | affinity = desc->affinity; | ||
232 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
233 | printk("Breaking affinity for irq %i\n", irq); | ||
234 | affinity = cpu_all_mask; | ||
235 | } | ||
236 | if (desc->chip->set_affinity) | ||
237 | desc->chip->set_affinity(irq, affinity); | ||
238 | else if (desc->action) | ||
239 | printk_once("Cannot set affinity for irq %i\n", irq); | ||
240 | } | ||
241 | |||
242 | #if 0 | ||
243 | barrier(); | ||
244 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | ||
245 | [note the nop - the interrupt-enable boundary on x86 is two | ||
246 | instructions from sti] - to flush out pending hardirqs and | ||
247 | IPIs. After this point nothing is supposed to reach this CPU." */ | ||
248 | __asm__ __volatile__("sti; nop; cli"); | ||
249 | barrier(); | ||
250 | #else | ||
251 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
252 | local_irq_enable(); | ||
253 | mdelay(1); | ||
254 | local_irq_disable(); | ||
255 | #endif | ||
256 | } | ||
257 | #endif | ||
258 | |||
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 977d8b43a0dd..acf8fbf8fbda 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
62 | return true; | 62 | return true; |
63 | } | 63 | } |
64 | 64 | ||
65 | #ifdef CONFIG_HOTPLUG_CPU | ||
66 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
67 | void fixup_irqs(void) | ||
68 | { | ||
69 | unsigned int irq; | ||
70 | static int warned; | ||
71 | struct irq_desc *desc; | ||
72 | |||
73 | for_each_irq_desc(irq, desc) { | ||
74 | int break_affinity = 0; | ||
75 | int set_affinity = 1; | ||
76 | const struct cpumask *affinity; | ||
77 | |||
78 | if (!desc) | ||
79 | continue; | ||
80 | if (irq == 2) | ||
81 | continue; | ||
82 | |||
83 | /* interrupt's are disabled at this point */ | ||
84 | spin_lock(&desc->lock); | ||
85 | |||
86 | affinity = desc->affinity; | ||
87 | if (!irq_has_action(irq) || | ||
88 | cpumask_equal(affinity, cpu_online_mask)) { | ||
89 | spin_unlock(&desc->lock); | ||
90 | continue; | ||
91 | } | ||
92 | |||
93 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
94 | break_affinity = 1; | ||
95 | affinity = cpu_all_mask; | ||
96 | } | ||
97 | |||
98 | if (desc->chip->mask) | ||
99 | desc->chip->mask(irq); | ||
100 | |||
101 | if (desc->chip->set_affinity) | ||
102 | desc->chip->set_affinity(irq, affinity); | ||
103 | else if (!(warned++)) | ||
104 | set_affinity = 0; | ||
105 | |||
106 | if (desc->chip->unmask) | ||
107 | desc->chip->unmask(irq); | ||
108 | |||
109 | spin_unlock(&desc->lock); | ||
110 | |||
111 | if (break_affinity && set_affinity) | ||
112 | printk("Broke affinity for irq %i\n", irq); | ||
113 | else if (!set_affinity) | ||
114 | printk("Cannot set affinity for irq %i\n", irq); | ||
115 | } | ||
116 | |||
117 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
118 | local_irq_enable(); | ||
119 | mdelay(1); | ||
120 | local_irq_disable(); | ||
121 | } | ||
122 | #endif | ||
123 | 65 | ||
124 | extern void call_softirq(void); | 66 | extern void call_softirq(void); |
125 | 67 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 8d82a77a3f3b..20a5b3689463 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/smp.h> | 43 | #include <linux/smp.h> |
44 | #include <linux/nmi.h> | 44 | #include <linux/nmi.h> |
45 | 45 | ||
46 | #include <asm/debugreg.h> | ||
46 | #include <asm/apicdef.h> | 47 | #include <asm/apicdef.h> |
47 | #include <asm/system.h> | 48 | #include <asm/system.h> |
48 | 49 | ||
@@ -88,7 +89,6 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
88 | gdb_regs[GDB_SS] = __KERNEL_DS; | 89 | gdb_regs[GDB_SS] = __KERNEL_DS; |
89 | gdb_regs[GDB_FS] = 0xFFFF; | 90 | gdb_regs[GDB_FS] = 0xFFFF; |
90 | gdb_regs[GDB_GS] = 0xFFFF; | 91 | gdb_regs[GDB_GS] = 0xFFFF; |
91 | gdb_regs[GDB_SP] = (int)®s->sp; | ||
92 | #else | 92 | #else |
93 | gdb_regs[GDB_R8] = regs->r8; | 93 | gdb_regs[GDB_R8] = regs->r8; |
94 | gdb_regs[GDB_R9] = regs->r9; | 94 | gdb_regs[GDB_R9] = regs->r9; |
@@ -101,8 +101,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
101 | gdb_regs32[GDB_PS] = regs->flags; | 101 | gdb_regs32[GDB_PS] = regs->flags; |
102 | gdb_regs32[GDB_CS] = regs->cs; | 102 | gdb_regs32[GDB_CS] = regs->cs; |
103 | gdb_regs32[GDB_SS] = regs->ss; | 103 | gdb_regs32[GDB_SS] = regs->ss; |
104 | gdb_regs[GDB_SP] = regs->sp; | ||
105 | #endif | 104 | #endif |
105 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); | ||
106 | } | 106 | } |
107 | 107 | ||
108 | /** | 108 | /** |
@@ -434,6 +434,11 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) | |||
434 | "resuming...\n"); | 434 | "resuming...\n"); |
435 | kgdb_arch_handle_exception(args->trapnr, args->signr, | 435 | kgdb_arch_handle_exception(args->trapnr, args->signr, |
436 | args->err, "c", "", regs); | 436 | args->err, "c", "", regs); |
437 | /* | ||
438 | * Reset the BS bit in dr6 (pointed by args->err) to | ||
439 | * denote completion of processing | ||
440 | */ | ||
441 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | ||
437 | 442 | ||
438 | return NOTIFY_STOP; | 443 | return NOTIFY_STOP; |
439 | } | 444 | } |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b000..1f3186ce213c 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -48,31 +48,22 @@ | |||
48 | #include <linux/preempt.h> | 48 | #include <linux/preempt.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/kdebug.h> | 50 | #include <linux/kdebug.h> |
51 | #include <linux/kallsyms.h> | ||
51 | 52 | ||
52 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
53 | #include <asm/desc.h> | 54 | #include <asm/desc.h> |
54 | #include <asm/pgtable.h> | 55 | #include <asm/pgtable.h> |
55 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
56 | #include <asm/alternative.h> | 57 | #include <asm/alternative.h> |
58 | #include <asm/insn.h> | ||
59 | #include <asm/debugreg.h> | ||
57 | 60 | ||
58 | void jprobe_return_end(void); | 61 | void jprobe_return_end(void); |
59 | 62 | ||
60 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 63 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
61 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 64 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
62 | 65 | ||
63 | #ifdef CONFIG_X86_64 | 66 | #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) |
64 | #define stack_addr(regs) ((unsigned long *)regs->sp) | ||
65 | #else | ||
66 | /* | ||
67 | * "®s->sp" looks wrong, but it's correct for x86_32. x86_32 CPUs | ||
68 | * don't save the ss and esp registers if the CPU is already in kernel | ||
69 | * mode when it traps. So for kprobes, regs->sp and regs->ss are not | ||
70 | * the [nonexistent] saved stack pointer and ss register, but rather | ||
71 | * the top 8 bytes of the pre-int3 stack. So ®s->sp happens to | ||
72 | * point to the top of the pre-int3 stack. | ||
73 | */ | ||
74 | #define stack_addr(regs) ((unsigned long *)®s->sp) | ||
75 | #endif | ||
76 | 67 | ||
77 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ | 68 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
78 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | 69 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
@@ -106,50 +97,6 @@ static const u32 twobyte_is_boostable[256 / 32] = { | |||
106 | /* ----------------------------------------------- */ | 97 | /* ----------------------------------------------- */ |
107 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | 98 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
108 | }; | 99 | }; |
109 | static const u32 onebyte_has_modrm[256 / 32] = { | ||
110 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | ||
111 | /* ----------------------------------------------- */ | ||
112 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */ | ||
113 | W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */ | ||
114 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */ | ||
115 | W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */ | ||
116 | W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ | ||
117 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ | ||
118 | W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */ | ||
119 | W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */ | ||
120 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ | ||
121 | W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */ | ||
122 | W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */ | ||
123 | W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */ | ||
124 | W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */ | ||
125 | W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ | ||
126 | W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */ | ||
127 | W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */ | ||
128 | /* ----------------------------------------------- */ | ||
129 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | ||
130 | }; | ||
131 | static const u32 twobyte_has_modrm[256 / 32] = { | ||
132 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | ||
133 | /* ----------------------------------------------- */ | ||
134 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */ | ||
135 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */ | ||
136 | W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */ | ||
137 | W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */ | ||
138 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */ | ||
139 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */ | ||
140 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */ | ||
141 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */ | ||
142 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */ | ||
143 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */ | ||
144 | W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */ | ||
145 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */ | ||
146 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */ | ||
147 | W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */ | ||
148 | W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */ | ||
149 | W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */ | ||
150 | /* ----------------------------------------------- */ | ||
151 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | ||
152 | }; | ||
153 | #undef W | 100 | #undef W |
154 | 101 | ||
155 | struct kretprobe_blackpoint kretprobe_blacklist[] = { | 102 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
@@ -244,6 +191,75 @@ retry: | |||
244 | } | 191 | } |
245 | } | 192 | } |
246 | 193 | ||
194 | /* Recover the probed instruction at addr for further analysis. */ | ||
195 | static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | ||
196 | { | ||
197 | struct kprobe *kp; | ||
198 | kp = get_kprobe((void *)addr); | ||
199 | if (!kp) | ||
200 | return -EINVAL; | ||
201 | |||
202 | /* | ||
203 | * Basically, kp->ainsn.insn has an original instruction. | ||
204 | * However, RIP-relative instruction can not do single-stepping | ||
205 | * at different place, fix_riprel() tweaks the displacement of | ||
206 | * that instruction. In that case, we can't recover the instruction | ||
207 | * from the kp->ainsn.insn. | ||
208 | * | ||
209 | * On the other hand, kp->opcode has a copy of the first byte of | ||
210 | * the probed instruction, which is overwritten by int3. And | ||
211 | * the instruction at kp->addr is not modified by kprobes except | ||
212 | * for the first byte, we can recover the original instruction | ||
213 | * from it and kp->opcode. | ||
214 | */ | ||
215 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | ||
216 | buf[0] = kp->opcode; | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* Dummy buffers for kallsyms_lookup */ | ||
221 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
222 | |||
223 | /* Check if paddr is at an instruction boundary */ | ||
224 | static int __kprobes can_probe(unsigned long paddr) | ||
225 | { | ||
226 | int ret; | ||
227 | unsigned long addr, offset = 0; | ||
228 | struct insn insn; | ||
229 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
230 | |||
231 | if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) | ||
232 | return 0; | ||
233 | |||
234 | /* Decode instructions */ | ||
235 | addr = paddr - offset; | ||
236 | while (addr < paddr) { | ||
237 | kernel_insn_init(&insn, (void *)addr); | ||
238 | insn_get_opcode(&insn); | ||
239 | |||
240 | /* | ||
241 | * Check if the instruction has been modified by another | ||
242 | * kprobe, in which case we replace the breakpoint by the | ||
243 | * original instruction in our buffer. | ||
244 | */ | ||
245 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
246 | ret = recover_probed_instruction(buf, addr); | ||
247 | if (ret) | ||
248 | /* | ||
249 | * Another debugging subsystem might insert | ||
250 | * this breakpoint. In that case, we can't | ||
251 | * recover it. | ||
252 | */ | ||
253 | return 0; | ||
254 | kernel_insn_init(&insn, buf); | ||
255 | } | ||
256 | insn_get_length(&insn); | ||
257 | addr += insn.length; | ||
258 | } | ||
259 | |||
260 | return (addr == paddr); | ||
261 | } | ||
262 | |||
247 | /* | 263 | /* |
248 | * Returns non-zero if opcode modifies the interrupt flag. | 264 | * Returns non-zero if opcode modifies the interrupt flag. |
249 | */ | 265 | */ |
@@ -277,68 +293,30 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
277 | static void __kprobes fix_riprel(struct kprobe *p) | 293 | static void __kprobes fix_riprel(struct kprobe *p) |
278 | { | 294 | { |
279 | #ifdef CONFIG_X86_64 | 295 | #ifdef CONFIG_X86_64 |
280 | u8 *insn = p->ainsn.insn; | 296 | struct insn insn; |
281 | s64 disp; | 297 | kernel_insn_init(&insn, p->ainsn.insn); |
282 | int need_modrm; | ||
283 | |||
284 | /* Skip legacy instruction prefixes. */ | ||
285 | while (1) { | ||
286 | switch (*insn) { | ||
287 | case 0x66: | ||
288 | case 0x67: | ||
289 | case 0x2e: | ||
290 | case 0x3e: | ||
291 | case 0x26: | ||
292 | case 0x64: | ||
293 | case 0x65: | ||
294 | case 0x36: | ||
295 | case 0xf0: | ||
296 | case 0xf3: | ||
297 | case 0xf2: | ||
298 | ++insn; | ||
299 | continue; | ||
300 | } | ||
301 | break; | ||
302 | } | ||
303 | 298 | ||
304 | /* Skip REX instruction prefix. */ | 299 | if (insn_rip_relative(&insn)) { |
305 | if (is_REX_prefix(insn)) | 300 | s64 newdisp; |
306 | ++insn; | 301 | u8 *disp; |
307 | 302 | insn_get_displacement(&insn); | |
308 | if (*insn == 0x0f) { | 303 | /* |
309 | /* Two-byte opcode. */ | 304 | * The copied instruction uses the %rip-relative addressing |
310 | ++insn; | 305 | * mode. Adjust the displacement for the difference between |
311 | need_modrm = test_bit(*insn, | 306 | * the original location of this instruction and the location |
312 | (unsigned long *)twobyte_has_modrm); | 307 | * of the copy that will actually be run. The tricky bit here |
313 | } else | 308 | * is making sure that the sign extension happens correctly in |
314 | /* One-byte opcode. */ | 309 | * this calculation, since we need a signed 32-bit result to |
315 | need_modrm = test_bit(*insn, | 310 | * be sign-extended to 64 bits when it's added to the %rip |
316 | (unsigned long *)onebyte_has_modrm); | 311 | * value and yield the same 64-bit result that the sign- |
317 | 312 | * extension of the original signed 32-bit displacement would | |
318 | if (need_modrm) { | 313 | * have given. |
319 | u8 modrm = *++insn; | 314 | */ |
320 | if ((modrm & 0xc7) == 0x05) { | 315 | newdisp = (u8 *) p->addr + (s64) insn.displacement.value - |
321 | /* %rip+disp32 addressing mode */ | 316 | (u8 *) p->ainsn.insn; |
322 | /* Displacement follows ModRM byte. */ | 317 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ |
323 | ++insn; | 318 | disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); |
324 | /* | 319 | *(s32 *) disp = (s32) newdisp; |
325 | * The copied instruction uses the %rip-relative | ||
326 | * addressing mode. Adjust the displacement for the | ||
327 | * difference between the original location of this | ||
328 | * instruction and the location of the copy that will | ||
329 | * actually be run. The tricky bit here is making sure | ||
330 | * that the sign extension happens correctly in this | ||
331 | * calculation, since we need a signed 32-bit result to | ||
332 | * be sign-extended to 64 bits when it's added to the | ||
333 | * %rip value and yield the same 64-bit result that the | ||
334 | * sign-extension of the original signed 32-bit | ||
335 | * displacement would have given. | ||
336 | */ | ||
337 | disp = (u8 *) p->addr + *((s32 *) insn) - | ||
338 | (u8 *) p->ainsn.insn; | ||
339 | BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ | ||
340 | *(s32 *)insn = (s32) disp; | ||
341 | } | ||
342 | } | 320 | } |
343 | #endif | 321 | #endif |
344 | } | 322 | } |
@@ -359,6 +337,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
359 | 337 | ||
360 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 338 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
361 | { | 339 | { |
340 | if (!can_probe((unsigned long)p->addr)) | ||
341 | return -EILSEQ; | ||
362 | /* insn: must be on special executable page on x86. */ | 342 | /* insn: must be on special executable page on x86. */ |
363 | p->ainsn.insn = get_insn_slot(); | 343 | p->ainsn.insn = get_insn_slot(); |
364 | if (!p->ainsn.insn) | 344 | if (!p->ainsn.insn) |
@@ -472,17 +452,6 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
472 | { | 452 | { |
473 | switch (kcb->kprobe_status) { | 453 | switch (kcb->kprobe_status) { |
474 | case KPROBE_HIT_SSDONE: | 454 | case KPROBE_HIT_SSDONE: |
475 | #ifdef CONFIG_X86_64 | ||
476 | /* TODO: Provide re-entrancy from post_kprobes_handler() and | ||
477 | * avoid exception stack corruption while single-stepping on | ||
478 | * the instruction of the new probe. | ||
479 | */ | ||
480 | arch_disarm_kprobe(p); | ||
481 | regs->ip = (unsigned long)p->addr; | ||
482 | reset_current_kprobe(); | ||
483 | preempt_enable_no_resched(); | ||
484 | break; | ||
485 | #endif | ||
486 | case KPROBE_HIT_ACTIVE: | 455 | case KPROBE_HIT_ACTIVE: |
487 | save_previous_kprobe(kcb); | 456 | save_previous_kprobe(kcb); |
488 | set_current_kprobe(p, regs, kcb); | 457 | set_current_kprobe(p, regs, kcb); |
@@ -491,18 +460,16 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
491 | kcb->kprobe_status = KPROBE_REENTER; | 460 | kcb->kprobe_status = KPROBE_REENTER; |
492 | break; | 461 | break; |
493 | case KPROBE_HIT_SS: | 462 | case KPROBE_HIT_SS: |
494 | if (p == kprobe_running()) { | 463 | /* A probe has been hit in the codepath leading up to, or just |
495 | regs->flags &= ~X86_EFLAGS_TF; | 464 | * after, single-stepping of a probed instruction. This entire |
496 | regs->flags |= kcb->kprobe_saved_flags; | 465 | * codepath should strictly reside in .kprobes.text section. |
497 | return 0; | 466 | * Raise a BUG or we'll continue in an endless reentering loop |
498 | } else { | 467 | * and eventually a stack overflow. |
499 | /* A probe has been hit in the codepath leading up | 468 | */ |
500 | * to, or just after, single-stepping of a probed | 469 | printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", |
501 | * instruction. This entire codepath should strictly | 470 | p->addr); |
502 | * reside in .kprobes.text section. Raise a warning | 471 | dump_kprobe(p); |
503 | * to highlight this peculiar case. | 472 | BUG(); |
504 | */ | ||
505 | } | ||
506 | default: | 473 | default: |
507 | /* impossible cases */ | 474 | /* impossible cases */ |
508 | WARN_ON(1); | 475 | WARN_ON(1); |
@@ -967,8 +934,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
967 | ret = NOTIFY_STOP; | 934 | ret = NOTIFY_STOP; |
968 | break; | 935 | break; |
969 | case DIE_DEBUG: | 936 | case DIE_DEBUG: |
970 | if (post_kprobe_handler(args->regs)) | 937 | if (post_kprobe_handler(args->regs)) { |
938 | /* | ||
939 | * Reset the BS bit in dr6 (pointed by args->err) to | ||
940 | * denote completion of processing | ||
941 | */ | ||
942 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | ||
971 | ret = NOTIFY_STOP; | 943 | ret = NOTIFY_STOP; |
944 | } | ||
972 | break; | 945 | break; |
973 | case DIE_GPF: | 946 | case DIE_GPF: |
974 | /* | 947 | /* |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index c1c429d00130..c843f8406da2 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/desc.h> | 25 | #include <asm/desc.h> |
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
27 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/debugreg.h> | ||
28 | 29 | ||
29 | static void set_idt(void *newidt, __u16 limit) | 30 | static void set_idt(void *newidt, __u16 limit) |
30 | { | 31 | { |
@@ -202,6 +203,7 @@ void machine_kexec(struct kimage *image) | |||
202 | 203 | ||
203 | /* Interrupts aren't acceptable while we reboot */ | 204 | /* Interrupts aren't acceptable while we reboot */ |
204 | local_irq_disable(); | 205 | local_irq_disable(); |
206 | hw_breakpoint_disable(); | ||
205 | 207 | ||
206 | if (image->preserve_context) { | 208 | if (image->preserve_context) { |
207 | #ifdef CONFIG_X86_IO_APIC | 209 | #ifdef CONFIG_X86_IO_APIC |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 84c3bf209e98..4a8bb82248ae 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
21 | #include <asm/debugreg.h> | ||
21 | 22 | ||
22 | static int init_one_level2_page(struct kimage *image, pgd_t *pgd, | 23 | static int init_one_level2_page(struct kimage *image, pgd_t *pgd, |
23 | unsigned long addr) | 24 | unsigned long addr) |
@@ -282,6 +283,7 @@ void machine_kexec(struct kimage *image) | |||
282 | 283 | ||
283 | /* Interrupts aren't acceptable while we reboot */ | 284 | /* Interrupts aren't acceptable while we reboot */ |
284 | local_irq_disable(); | 285 | local_irq_disable(); |
286 | hw_breakpoint_disable(); | ||
285 | 287 | ||
286 | if (image->preserve_context) { | 288 | if (image->preserve_context) { |
287 | #ifdef CONFIG_X86_IO_APIC | 289 | #ifdef CONFIG_X86_IO_APIC |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 378e9a8f1bf8..2bcad3926edb 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -73,7 +73,6 @@ | |||
73 | #include <linux/platform_device.h> | 73 | #include <linux/platform_device.h> |
74 | #include <linux/miscdevice.h> | 74 | #include <linux/miscdevice.h> |
75 | #include <linux/capability.h> | 75 | #include <linux/capability.h> |
76 | #include <linux/smp_lock.h> | ||
77 | #include <linux/kernel.h> | 76 | #include <linux/kernel.h> |
78 | #include <linux/module.h> | 77 | #include <linux/module.h> |
79 | #include <linux/mutex.h> | 78 | #include <linux/mutex.h> |
@@ -201,7 +200,6 @@ static int do_microcode_update(const void __user *buf, size_t size) | |||
201 | 200 | ||
202 | static int microcode_open(struct inode *unused1, struct file *unused2) | 201 | static int microcode_open(struct inode *unused1, struct file *unused2) |
203 | { | 202 | { |
204 | cycle_kernel_lock(); | ||
205 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | 203 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
206 | } | 204 | } |
207 | 205 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 6a3cefc7dda1..553449951b84 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -174,21 +174,17 @@ static int msr_open(struct inode *inode, struct file *file) | |||
174 | { | 174 | { |
175 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); | 175 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); |
176 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 176 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
177 | int ret = 0; | ||
178 | 177 | ||
179 | lock_kernel(); | ||
180 | cpu = iminor(file->f_path.dentry->d_inode); | 178 | cpu = iminor(file->f_path.dentry->d_inode); |
181 | 179 | ||
182 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { | 180 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
183 | ret = -ENXIO; /* No such CPU */ | 181 | return -ENXIO; /* No such CPU */ |
184 | goto out; | 182 | |
185 | } | ||
186 | c = &cpu_data(cpu); | 183 | c = &cpu_data(cpu); |
187 | if (!cpu_has(c, X86_FEATURE_MSR)) | 184 | if (!cpu_has(c, X86_FEATURE_MSR)) |
188 | ret = -EIO; /* MSR not supported */ | 185 | return -EIO; /* MSR not supported */ |
189 | out: | 186 | |
190 | unlock_kernel(); | 187 | return 0; |
191 | return ret; | ||
192 | } | 188 | } |
193 | 189 | ||
194 | /* | 190 | /* |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 971a3bec47a8..c563e4c8ff39 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/dma.h> | 46 | #include <asm/dma.h> |
47 | #include <asm/rio.h> | 47 | #include <asm/rio.h> |
48 | #include <asm/bios_ebda.h> | 48 | #include <asm/bios_ebda.h> |
49 | #include <asm/x86_init.h> | ||
49 | 50 | ||
50 | #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT | 51 | #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT |
51 | int use_calgary __read_mostly = 1; | 52 | int use_calgary __read_mostly = 1; |
@@ -244,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev, | |||
244 | if (panic_on_overflow) | 245 | if (panic_on_overflow) |
245 | panic("Calgary: fix the allocator.\n"); | 246 | panic("Calgary: fix the allocator.\n"); |
246 | else | 247 | else |
247 | return bad_dma_address; | 248 | return DMA_ERROR_CODE; |
248 | } | 249 | } |
249 | } | 250 | } |
250 | 251 | ||
@@ -260,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
260 | void *vaddr, unsigned int npages, int direction) | 261 | void *vaddr, unsigned int npages, int direction) |
261 | { | 262 | { |
262 | unsigned long entry; | 263 | unsigned long entry; |
263 | dma_addr_t ret = bad_dma_address; | 264 | dma_addr_t ret; |
264 | 265 | ||
265 | entry = iommu_range_alloc(dev, tbl, npages); | 266 | entry = iommu_range_alloc(dev, tbl, npages); |
266 | 267 | ||
267 | if (unlikely(entry == bad_dma_address)) | 268 | if (unlikely(entry == DMA_ERROR_CODE)) { |
268 | goto error; | 269 | printk(KERN_WARNING "Calgary: failed to allocate %u pages in " |
270 | "iommu %p\n", npages, tbl); | ||
271 | return DMA_ERROR_CODE; | ||
272 | } | ||
269 | 273 | ||
270 | /* set the return dma address */ | 274 | /* set the return dma address */ |
271 | ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); | 275 | ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); |
@@ -273,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
273 | /* put the TCEs in the HW table */ | 277 | /* put the TCEs in the HW table */ |
274 | tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, | 278 | tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, |
275 | direction); | 279 | direction); |
276 | |||
277 | return ret; | 280 | return ret; |
278 | |||
279 | error: | ||
280 | printk(KERN_WARNING "Calgary: failed to allocate %u pages in " | ||
281 | "iommu %p\n", npages, tbl); | ||
282 | return bad_dma_address; | ||
283 | } | 281 | } |
284 | 282 | ||
285 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 283 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
@@ -290,8 +288,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
290 | unsigned long flags; | 288 | unsigned long flags; |
291 | 289 | ||
292 | /* were we called with bad_dma_address? */ | 290 | /* were we called with bad_dma_address? */ |
293 | badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); | 291 | badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE); |
294 | if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { | 292 | if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) { |
295 | WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " | 293 | WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " |
296 | "address 0x%Lx\n", dma_addr); | 294 | "address 0x%Lx\n", dma_addr); |
297 | return; | 295 | return; |
@@ -318,13 +316,15 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
318 | 316 | ||
319 | pdev = to_pci_dev(dev); | 317 | pdev = to_pci_dev(dev); |
320 | 318 | ||
319 | /* search up the device tree for an iommu */ | ||
321 | pbus = pdev->bus; | 320 | pbus = pdev->bus; |
322 | 321 | do { | |
323 | /* is the device behind a bridge? Look for the root bus */ | 322 | tbl = pci_iommu(pbus); |
324 | while (pbus->parent) | 323 | if (tbl && tbl->it_busno == pbus->number) |
324 | break; | ||
325 | tbl = NULL; | ||
325 | pbus = pbus->parent; | 326 | pbus = pbus->parent; |
326 | 327 | } while (pbus); | |
327 | tbl = pci_iommu(pbus); | ||
328 | 328 | ||
329 | BUG_ON(tbl && (tbl->it_busno != pbus->number)); | 329 | BUG_ON(tbl && (tbl->it_busno != pbus->number)); |
330 | 330 | ||
@@ -373,7 +373,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
373 | npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); | 373 | npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); |
374 | 374 | ||
375 | entry = iommu_range_alloc(dev, tbl, npages); | 375 | entry = iommu_range_alloc(dev, tbl, npages); |
376 | if (entry == bad_dma_address) { | 376 | if (entry == DMA_ERROR_CODE) { |
377 | /* makes sure unmap knows to stop */ | 377 | /* makes sure unmap knows to stop */ |
378 | s->dma_length = 0; | 378 | s->dma_length = 0; |
379 | goto error; | 379 | goto error; |
@@ -391,7 +391,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
391 | error: | 391 | error: |
392 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); | 392 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); |
393 | for_each_sg(sg, s, nelems, i) { | 393 | for_each_sg(sg, s, nelems, i) { |
394 | sg->dma_address = bad_dma_address; | 394 | sg->dma_address = DMA_ERROR_CODE; |
395 | sg->dma_length = 0; | 395 | sg->dma_length = 0; |
396 | } | 396 | } |
397 | return 0; | 397 | return 0; |
@@ -446,7 +446,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, | |||
446 | 446 | ||
447 | /* set up tces to cover the allocated range */ | 447 | /* set up tces to cover the allocated range */ |
448 | mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); | 448 | mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); |
449 | if (mapping == bad_dma_address) | 449 | if (mapping == DMA_ERROR_CODE) |
450 | goto free; | 450 | goto free; |
451 | *dma_handle = mapping; | 451 | *dma_handle = mapping; |
452 | return ret; | 452 | return ret; |
@@ -727,7 +727,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) | |||
727 | struct iommu_table *tbl = pci_iommu(dev->bus); | 727 | struct iommu_table *tbl = pci_iommu(dev->bus); |
728 | 728 | ||
729 | /* reserve EMERGENCY_PAGES from bad_dma_address and up */ | 729 | /* reserve EMERGENCY_PAGES from bad_dma_address and up */ |
730 | iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); | 730 | iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES); |
731 | 731 | ||
732 | /* avoid the BIOS/VGA first 640KB-1MB region */ | 732 | /* avoid the BIOS/VGA first 640KB-1MB region */ |
733 | /* for CalIOC2 - avoid the entire first MB */ | 733 | /* for CalIOC2 - avoid the entire first MB */ |
@@ -1344,6 +1344,23 @@ static void __init get_tce_space_from_tar(void) | |||
1344 | return; | 1344 | return; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | static int __init calgary_iommu_init(void) | ||
1348 | { | ||
1349 | int ret; | ||
1350 | |||
1351 | /* ok, we're trying to use Calgary - let's roll */ | ||
1352 | printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n"); | ||
1353 | |||
1354 | ret = calgary_init(); | ||
1355 | if (ret) { | ||
1356 | printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " | ||
1357 | "falling back to no_iommu\n", ret); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | |||
1361 | return 0; | ||
1362 | } | ||
1363 | |||
1347 | void __init detect_calgary(void) | 1364 | void __init detect_calgary(void) |
1348 | { | 1365 | { |
1349 | int bus; | 1366 | int bus; |
@@ -1357,7 +1374,7 @@ void __init detect_calgary(void) | |||
1357 | * if the user specified iommu=off or iommu=soft or we found | 1374 | * if the user specified iommu=off or iommu=soft or we found |
1358 | * another HW IOMMU already, bail out. | 1375 | * another HW IOMMU already, bail out. |
1359 | */ | 1376 | */ |
1360 | if (swiotlb || no_iommu || iommu_detected) | 1377 | if (no_iommu || iommu_detected) |
1361 | return; | 1378 | return; |
1362 | 1379 | ||
1363 | if (!use_calgary) | 1380 | if (!use_calgary) |
@@ -1442,9 +1459,7 @@ void __init detect_calgary(void) | |||
1442 | printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n", | 1459 | printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n", |
1443 | specified_table_size); | 1460 | specified_table_size); |
1444 | 1461 | ||
1445 | /* swiotlb for devices that aren't behind the Calgary. */ | 1462 | x86_init.iommu.iommu_init = calgary_iommu_init; |
1446 | if (max_pfn > MAX_DMA32_PFN) | ||
1447 | swiotlb = 1; | ||
1448 | } | 1463 | } |
1449 | return; | 1464 | return; |
1450 | 1465 | ||
@@ -1457,35 +1472,6 @@ cleanup: | |||
1457 | } | 1472 | } |
1458 | } | 1473 | } |
1459 | 1474 | ||
1460 | int __init calgary_iommu_init(void) | ||
1461 | { | ||
1462 | int ret; | ||
1463 | |||
1464 | if (no_iommu || (swiotlb && !calgary_detected)) | ||
1465 | return -ENODEV; | ||
1466 | |||
1467 | if (!calgary_detected) | ||
1468 | return -ENODEV; | ||
1469 | |||
1470 | /* ok, we're trying to use Calgary - let's roll */ | ||
1471 | printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n"); | ||
1472 | |||
1473 | ret = calgary_init(); | ||
1474 | if (ret) { | ||
1475 | printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " | ||
1476 | "falling back to no_iommu\n", ret); | ||
1477 | return ret; | ||
1478 | } | ||
1479 | |||
1480 | force_iommu = 1; | ||
1481 | bad_dma_address = 0x0; | ||
1482 | /* dma_ops is set to swiotlb or nommu */ | ||
1483 | if (!dma_ops) | ||
1484 | dma_ops = &nommu_dma_ops; | ||
1485 | |||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | static int __init calgary_parse_options(char *p) | 1475 | static int __init calgary_parse_options(char *p) |
1490 | { | 1476 | { |
1491 | unsigned int bridge; | 1477 | unsigned int bridge; |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index a6e804d16c35..afcc58b69c7c 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -11,10 +11,11 @@ | |||
11 | #include <asm/gart.h> | 11 | #include <asm/gart.h> |
12 | #include <asm/calgary.h> | 12 | #include <asm/calgary.h> |
13 | #include <asm/amd_iommu.h> | 13 | #include <asm/amd_iommu.h> |
14 | #include <asm/x86_init.h> | ||
14 | 15 | ||
15 | static int forbid_dac __read_mostly; | 16 | static int forbid_dac __read_mostly; |
16 | 17 | ||
17 | struct dma_map_ops *dma_ops; | 18 | struct dma_map_ops *dma_ops = &nommu_dma_ops; |
18 | EXPORT_SYMBOL(dma_ops); | 19 | EXPORT_SYMBOL(dma_ops); |
19 | 20 | ||
20 | static int iommu_sac_force __read_mostly; | 21 | static int iommu_sac_force __read_mostly; |
@@ -42,9 +43,6 @@ int iommu_detected __read_mostly = 0; | |||
42 | */ | 43 | */ |
43 | int iommu_pass_through __read_mostly; | 44 | int iommu_pass_through __read_mostly; |
44 | 45 | ||
45 | dma_addr_t bad_dma_address __read_mostly = 0; | ||
46 | EXPORT_SYMBOL(bad_dma_address); | ||
47 | |||
48 | /* Dummy device used for NULL arguments (normally ISA). */ | 46 | /* Dummy device used for NULL arguments (normally ISA). */ |
49 | struct device x86_dma_fallback_dev = { | 47 | struct device x86_dma_fallback_dev = { |
50 | .init_name = "fallback device", | 48 | .init_name = "fallback device", |
@@ -126,20 +124,17 @@ void __init pci_iommu_alloc(void) | |||
126 | /* free the range so iommu could get some range less than 4G */ | 124 | /* free the range so iommu could get some range less than 4G */ |
127 | dma32_free_bootmem(); | 125 | dma32_free_bootmem(); |
128 | #endif | 126 | #endif |
127 | if (pci_swiotlb_init()) | ||
128 | return; | ||
129 | 129 | ||
130 | /* | ||
131 | * The order of these functions is important for | ||
132 | * fall-back/fail-over reasons | ||
133 | */ | ||
134 | gart_iommu_hole_init(); | 130 | gart_iommu_hole_init(); |
135 | 131 | ||
136 | detect_calgary(); | 132 | detect_calgary(); |
137 | 133 | ||
138 | detect_intel_iommu(); | 134 | detect_intel_iommu(); |
139 | 135 | ||
136 | /* needs to be called after gart_iommu_hole_init */ | ||
140 | amd_iommu_detect(); | 137 | amd_iommu_detect(); |
141 | |||
142 | pci_swiotlb_init(); | ||
143 | } | 138 | } |
144 | 139 | ||
145 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 140 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
@@ -214,7 +209,7 @@ static __init int iommu_setup(char *p) | |||
214 | if (!strncmp(p, "allowdac", 8)) | 209 | if (!strncmp(p, "allowdac", 8)) |
215 | forbid_dac = 0; | 210 | forbid_dac = 0; |
216 | if (!strncmp(p, "nodac", 5)) | 211 | if (!strncmp(p, "nodac", 5)) |
217 | forbid_dac = -1; | 212 | forbid_dac = 1; |
218 | if (!strncmp(p, "usedac", 6)) { | 213 | if (!strncmp(p, "usedac", 6)) { |
219 | forbid_dac = -1; | 214 | forbid_dac = -1; |
220 | return 1; | 215 | return 1; |
@@ -289,25 +284,17 @@ static int __init pci_iommu_init(void) | |||
289 | #ifdef CONFIG_PCI | 284 | #ifdef CONFIG_PCI |
290 | dma_debug_add_bus(&pci_bus_type); | 285 | dma_debug_add_bus(&pci_bus_type); |
291 | #endif | 286 | #endif |
287 | x86_init.iommu.iommu_init(); | ||
292 | 288 | ||
293 | calgary_iommu_init(); | 289 | if (swiotlb) { |
294 | 290 | printk(KERN_INFO "PCI-DMA: " | |
295 | intel_iommu_init(); | 291 | "Using software bounce buffering for IO (SWIOTLB)\n"); |
292 | swiotlb_print_info(); | ||
293 | } else | ||
294 | swiotlb_free(); | ||
296 | 295 | ||
297 | amd_iommu_init(); | ||
298 | |||
299 | gart_iommu_init(); | ||
300 | |||
301 | no_iommu_init(); | ||
302 | return 0; | 296 | return 0; |
303 | } | 297 | } |
304 | |||
305 | void pci_iommu_shutdown(void) | ||
306 | { | ||
307 | gart_iommu_shutdown(); | ||
308 | |||
309 | amd_iommu_shutdown(); | ||
310 | } | ||
311 | /* Must execute after PCI subsystem */ | 298 | /* Must execute after PCI subsystem */ |
312 | rootfs_initcall(pci_iommu_init); | 299 | rootfs_initcall(pci_iommu_init); |
313 | 300 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a7f1b64f86e0..e6a0d402f171 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/swiotlb.h> | 39 | #include <asm/swiotlb.h> |
40 | #include <asm/dma.h> | 40 | #include <asm/dma.h> |
41 | #include <asm/k8.h> | 41 | #include <asm/k8.h> |
42 | #include <asm/x86_init.h> | ||
42 | 43 | ||
43 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ | 44 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
44 | static unsigned long iommu_size; /* size of remapping area bytes */ | 45 | static unsigned long iommu_size; /* size of remapping area bytes */ |
@@ -46,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */ | |||
46 | 47 | ||
47 | static u32 *iommu_gatt_base; /* Remapping table */ | 48 | static u32 *iommu_gatt_base; /* Remapping table */ |
48 | 49 | ||
50 | static dma_addr_t bad_dma_addr; | ||
51 | |||
49 | /* | 52 | /* |
50 | * If this is disabled the IOMMU will use an optimized flushing strategy | 53 | * If this is disabled the IOMMU will use an optimized flushing strategy |
51 | * of only flushing when an mapping is reused. With it true the GART is | 54 | * of only flushing when an mapping is reused. With it true the GART is |
@@ -92,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size, | |||
92 | 95 | ||
93 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | 96 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
94 | PAGE_SIZE) >> PAGE_SHIFT; | 97 | PAGE_SIZE) >> PAGE_SHIFT; |
95 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, | 98 | boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, |
96 | PAGE_SIZE) >> PAGE_SHIFT; | 99 | PAGE_SIZE) >> PAGE_SHIFT; |
97 | 100 | ||
98 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 101 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
@@ -216,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
216 | if (panic_on_overflow) | 219 | if (panic_on_overflow) |
217 | panic("dma_map_area overflow %lu bytes\n", size); | 220 | panic("dma_map_area overflow %lu bytes\n", size); |
218 | iommu_full(dev, size, dir); | 221 | iommu_full(dev, size, dir); |
219 | return bad_dma_address; | 222 | return bad_dma_addr; |
220 | } | 223 | } |
221 | 224 | ||
222 | for (i = 0; i < npages; i++) { | 225 | for (i = 0; i < npages; i++) { |
@@ -294,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
294 | int i; | 297 | int i; |
295 | 298 | ||
296 | #ifdef CONFIG_IOMMU_DEBUG | 299 | #ifdef CONFIG_IOMMU_DEBUG |
297 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | 300 | pr_debug("dma_map_sg overflow\n"); |
298 | #endif | 301 | #endif |
299 | 302 | ||
300 | for_each_sg(sg, s, nents, i) { | 303 | for_each_sg(sg, s, nents, i) { |
@@ -302,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
302 | 305 | ||
303 | if (nonforced_iommu(dev, addr, s->length)) { | 306 | if (nonforced_iommu(dev, addr, s->length)) { |
304 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 307 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
305 | if (addr == bad_dma_address) { | 308 | if (addr == bad_dma_addr) { |
306 | if (i > 0) | 309 | if (i > 0) |
307 | gart_unmap_sg(dev, sg, i, dir, NULL); | 310 | gart_unmap_sg(dev, sg, i, dir, NULL); |
308 | nents = 0; | 311 | nents = 0; |
@@ -389,12 +392,14 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
389 | if (!dev) | 392 | if (!dev) |
390 | dev = &x86_dma_fallback_dev; | 393 | dev = &x86_dma_fallback_dev; |
391 | 394 | ||
392 | out = 0; | 395 | out = 0; |
393 | start = 0; | 396 | start = 0; |
394 | start_sg = sgmap = sg; | 397 | start_sg = sg; |
395 | seg_size = 0; | 398 | sgmap = sg; |
396 | max_seg_size = dma_get_max_seg_size(dev); | 399 | seg_size = 0; |
397 | ps = NULL; /* shut up gcc */ | 400 | max_seg_size = dma_get_max_seg_size(dev); |
401 | ps = NULL; /* shut up gcc */ | ||
402 | |||
398 | for_each_sg(sg, s, nents, i) { | 403 | for_each_sg(sg, s, nents, i) { |
399 | dma_addr_t addr = sg_phys(s); | 404 | dma_addr_t addr = sg_phys(s); |
400 | 405 | ||
@@ -417,11 +422,12 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
417 | sgmap, pages, need) < 0) | 422 | sgmap, pages, need) < 0) |
418 | goto error; | 423 | goto error; |
419 | out++; | 424 | out++; |
420 | seg_size = 0; | 425 | |
421 | sgmap = sg_next(sgmap); | 426 | seg_size = 0; |
422 | pages = 0; | 427 | sgmap = sg_next(sgmap); |
423 | start = i; | 428 | pages = 0; |
424 | start_sg = s; | 429 | start = i; |
430 | start_sg = s; | ||
425 | } | 431 | } |
426 | } | 432 | } |
427 | 433 | ||
@@ -455,7 +461,7 @@ error: | |||
455 | 461 | ||
456 | iommu_full(dev, pages << PAGE_SHIFT, dir); | 462 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
457 | for_each_sg(sg, s, nents, i) | 463 | for_each_sg(sg, s, nents, i) |
458 | s->dma_address = bad_dma_address; | 464 | s->dma_address = bad_dma_addr; |
459 | return 0; | 465 | return 0; |
460 | } | 466 | } |
461 | 467 | ||
@@ -479,7 +485,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |||
479 | DMA_BIDIRECTIONAL, align_mask); | 485 | DMA_BIDIRECTIONAL, align_mask); |
480 | 486 | ||
481 | flush_gart(); | 487 | flush_gart(); |
482 | if (paddr != bad_dma_address) { | 488 | if (paddr != bad_dma_addr) { |
483 | *dma_addr = paddr; | 489 | *dma_addr = paddr; |
484 | return page_address(page); | 490 | return page_address(page); |
485 | } | 491 | } |
@@ -499,6 +505,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
499 | free_pages((unsigned long)vaddr, get_order(size)); | 505 | free_pages((unsigned long)vaddr, get_order(size)); |
500 | } | 506 | } |
501 | 507 | ||
508 | static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
509 | { | ||
510 | return (dma_addr == bad_dma_addr); | ||
511 | } | ||
512 | |||
502 | static int no_agp; | 513 | static int no_agp; |
503 | 514 | ||
504 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | 515 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
@@ -515,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |||
515 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; | 526 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
516 | 527 | ||
517 | if (iommu_size < 64*1024*1024) { | 528 | if (iommu_size < 64*1024*1024) { |
518 | printk(KERN_WARNING | 529 | pr_warning( |
519 | "PCI-DMA: Warning: Small IOMMU %luMB." | 530 | "PCI-DMA: Warning: Small IOMMU %luMB." |
520 | " Consider increasing the AGP aperture in BIOS\n", | 531 | " Consider increasing the AGP aperture in BIOS\n", |
521 | iommu_size >> 20); | 532 | iommu_size >> 20); |
@@ -570,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc) | |||
570 | aperture_alloc = aper_alloc; | 581 | aperture_alloc = aper_alloc; |
571 | } | 582 | } |
572 | 583 | ||
573 | static int gart_resume(struct sys_device *dev) | 584 | static void gart_fixup_northbridges(struct sys_device *dev) |
574 | { | 585 | { |
575 | printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n"); | 586 | int i; |
576 | 587 | ||
577 | if (fix_up_north_bridges) { | 588 | if (!fix_up_north_bridges) |
578 | int i; | 589 | return; |
579 | 590 | ||
580 | printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n"); | 591 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); |
581 | 592 | ||
582 | for (i = 0; i < num_k8_northbridges; i++) { | 593 | for (i = 0; i < num_k8_northbridges; i++) { |
583 | struct pci_dev *dev = k8_northbridges[i]; | 594 | struct pci_dev *dev = k8_northbridges[i]; |
584 | 595 | ||
585 | /* | 596 | /* |
586 | * Don't enable translations just yet. That is the next | 597 | * Don't enable translations just yet. That is the next |
587 | * step. Restore the pre-suspend aperture settings. | 598 | * step. Restore the pre-suspend aperture settings. |
588 | */ | 599 | */ |
589 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, | 600 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1); |
590 | aperture_order << 1); | 601 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); |
591 | pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, | ||
592 | aperture_alloc >> 25); | ||
593 | } | ||
594 | } | 602 | } |
603 | } | ||
604 | |||
605 | static int gart_resume(struct sys_device *dev) | ||
606 | { | ||
607 | pr_info("PCI-DMA: Resuming GART IOMMU\n"); | ||
608 | |||
609 | gart_fixup_northbridges(dev); | ||
595 | 610 | ||
596 | enable_gart_translations(); | 611 | enable_gart_translations(); |
597 | 612 | ||
@@ -604,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state) | |||
604 | } | 619 | } |
605 | 620 | ||
606 | static struct sysdev_class gart_sysdev_class = { | 621 | static struct sysdev_class gart_sysdev_class = { |
607 | .name = "gart", | 622 | .name = "gart", |
608 | .suspend = gart_suspend, | 623 | .suspend = gart_suspend, |
609 | .resume = gart_resume, | 624 | .resume = gart_resume, |
610 | 625 | ||
611 | }; | 626 | }; |
612 | 627 | ||
613 | static struct sys_device device_gart = { | 628 | static struct sys_device device_gart = { |
614 | .id = 0, | 629 | .cls = &gart_sysdev_class, |
615 | .cls = &gart_sysdev_class, | ||
616 | }; | 630 | }; |
617 | 631 | ||
618 | /* | 632 | /* |
@@ -627,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
627 | void *gatt; | 641 | void *gatt; |
628 | int i, error; | 642 | int i, error; |
629 | 643 | ||
630 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 644 | pr_info("PCI-DMA: Disabling AGP.\n"); |
645 | |||
631 | aper_size = aper_base = info->aper_size = 0; | 646 | aper_size = aper_base = info->aper_size = 0; |
632 | dev = NULL; | 647 | dev = NULL; |
633 | for (i = 0; i < num_k8_northbridges; i++) { | 648 | for (i = 0; i < num_k8_northbridges; i++) { |
@@ -645,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
645 | } | 660 | } |
646 | if (!aper_base) | 661 | if (!aper_base) |
647 | goto nommu; | 662 | goto nommu; |
663 | |||
648 | info->aper_base = aper_base; | 664 | info->aper_base = aper_base; |
649 | info->aper_size = aper_size >> 20; | 665 | info->aper_size = aper_size >> 20; |
650 | 666 | ||
@@ -667,14 +683,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
667 | 683 | ||
668 | flush_gart(); | 684 | flush_gart(); |
669 | 685 | ||
670 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 686 | pr_info("PCI-DMA: aperture base @ %x size %u KB\n", |
671 | aper_base, aper_size>>10); | 687 | aper_base, aper_size>>10); |
672 | 688 | ||
673 | return 0; | 689 | return 0; |
674 | 690 | ||
675 | nommu: | 691 | nommu: |
676 | /* Should not happen anymore */ | 692 | /* Should not happen anymore */ |
677 | printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | 693 | pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
678 | "falling back to iommu=soft.\n"); | 694 | "falling back to iommu=soft.\n"); |
679 | return -1; | 695 | return -1; |
680 | } | 696 | } |
@@ -686,14 +702,15 @@ static struct dma_map_ops gart_dma_ops = { | |||
686 | .unmap_page = gart_unmap_page, | 702 | .unmap_page = gart_unmap_page, |
687 | .alloc_coherent = gart_alloc_coherent, | 703 | .alloc_coherent = gart_alloc_coherent, |
688 | .free_coherent = gart_free_coherent, | 704 | .free_coherent = gart_free_coherent, |
705 | .mapping_error = gart_mapping_error, | ||
689 | }; | 706 | }; |
690 | 707 | ||
691 | void gart_iommu_shutdown(void) | 708 | static void gart_iommu_shutdown(void) |
692 | { | 709 | { |
693 | struct pci_dev *dev; | 710 | struct pci_dev *dev; |
694 | int i; | 711 | int i; |
695 | 712 | ||
696 | if (no_agp && (dma_ops != &gart_dma_ops)) | 713 | if (no_agp) |
697 | return; | 714 | return; |
698 | 715 | ||
699 | for (i = 0; i < num_k8_northbridges; i++) { | 716 | for (i = 0; i < num_k8_northbridges; i++) { |
@@ -708,7 +725,7 @@ void gart_iommu_shutdown(void) | |||
708 | } | 725 | } |
709 | } | 726 | } |
710 | 727 | ||
711 | void __init gart_iommu_init(void) | 728 | int __init gart_iommu_init(void) |
712 | { | 729 | { |
713 | struct agp_kern_info info; | 730 | struct agp_kern_info info; |
714 | unsigned long iommu_start; | 731 | unsigned long iommu_start; |
@@ -718,7 +735,7 @@ void __init gart_iommu_init(void) | |||
718 | long i; | 735 | long i; |
719 | 736 | ||
720 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) | 737 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) |
721 | return; | 738 | return 0; |
722 | 739 | ||
723 | #ifndef CONFIG_AGP_AMD64 | 740 | #ifndef CONFIG_AGP_AMD64 |
724 | no_agp = 1; | 741 | no_agp = 1; |
@@ -730,35 +747,28 @@ void __init gart_iommu_init(void) | |||
730 | (agp_copy_info(agp_bridge, &info) < 0); | 747 | (agp_copy_info(agp_bridge, &info) < 0); |
731 | #endif | 748 | #endif |
732 | 749 | ||
733 | if (swiotlb) | ||
734 | return; | ||
735 | |||
736 | /* Did we detect a different HW IOMMU? */ | ||
737 | if (iommu_detected && !gart_iommu_aperture) | ||
738 | return; | ||
739 | |||
740 | if (no_iommu || | 750 | if (no_iommu || |
741 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || | 751 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || |
742 | !gart_iommu_aperture || | 752 | !gart_iommu_aperture || |
743 | (no_agp && init_k8_gatt(&info) < 0)) { | 753 | (no_agp && init_k8_gatt(&info) < 0)) { |
744 | if (max_pfn > MAX_DMA32_PFN) { | 754 | if (max_pfn > MAX_DMA32_PFN) { |
745 | printk(KERN_WARNING "More than 4GB of memory " | 755 | pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); |
746 | "but GART IOMMU not available.\n"); | 756 | pr_warning("falling back to iommu=soft.\n"); |
747 | printk(KERN_WARNING "falling back to iommu=soft.\n"); | ||
748 | } | 757 | } |
749 | return; | 758 | return 0; |
750 | } | 759 | } |
751 | 760 | ||
752 | /* need to map that range */ | 761 | /* need to map that range */ |
753 | aper_size = info.aper_size << 20; | 762 | aper_size = info.aper_size << 20; |
754 | aper_base = info.aper_base; | 763 | aper_base = info.aper_base; |
755 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | 764 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); |
765 | |||
756 | if (end_pfn > max_low_pfn_mapped) { | 766 | if (end_pfn > max_low_pfn_mapped) { |
757 | start_pfn = (aper_base>>PAGE_SHIFT); | 767 | start_pfn = (aper_base>>PAGE_SHIFT); |
758 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | 768 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); |
759 | } | 769 | } |
760 | 770 | ||
761 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 771 | pr_info("PCI-DMA: using GART IOMMU.\n"); |
762 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 772 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
763 | iommu_pages = iommu_size >> PAGE_SHIFT; | 773 | iommu_pages = iommu_size >> PAGE_SHIFT; |
764 | 774 | ||
@@ -773,8 +783,7 @@ void __init gart_iommu_init(void) | |||
773 | 783 | ||
774 | ret = dma_debug_resize_entries(iommu_pages); | 784 | ret = dma_debug_resize_entries(iommu_pages); |
775 | if (ret) | 785 | if (ret) |
776 | printk(KERN_DEBUG | 786 | pr_debug("PCI-DMA: Cannot trace all the entries\n"); |
777 | "PCI-DMA: Cannot trace all the entries\n"); | ||
778 | } | 787 | } |
779 | #endif | 788 | #endif |
780 | 789 | ||
@@ -784,15 +793,14 @@ void __init gart_iommu_init(void) | |||
784 | */ | 793 | */ |
785 | iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | 794 | iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
786 | 795 | ||
787 | agp_memory_reserved = iommu_size; | 796 | pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", |
788 | printk(KERN_INFO | ||
789 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | ||
790 | iommu_size >> 20); | 797 | iommu_size >> 20); |
791 | 798 | ||
792 | iommu_start = aper_size - iommu_size; | 799 | agp_memory_reserved = iommu_size; |
793 | iommu_bus_base = info.aper_base + iommu_start; | 800 | iommu_start = aper_size - iommu_size; |
794 | bad_dma_address = iommu_bus_base; | 801 | iommu_bus_base = info.aper_base + iommu_start; |
795 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | 802 | bad_dma_addr = iommu_bus_base; |
803 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | ||
796 | 804 | ||
797 | /* | 805 | /* |
798 | * Unmap the IOMMU part of the GART. The alias of the page is | 806 | * Unmap the IOMMU part of the GART. The alias of the page is |
@@ -814,7 +822,7 @@ void __init gart_iommu_init(void) | |||
814 | * the pages as Not-Present: | 822 | * the pages as Not-Present: |
815 | */ | 823 | */ |
816 | wbinvd(); | 824 | wbinvd(); |
817 | 825 | ||
818 | /* | 826 | /* |
819 | * Now all caches are flushed and we can safely enable | 827 | * Now all caches are flushed and we can safely enable |
820 | * GART hardware. Doing it early leaves the possibility | 828 | * GART hardware. Doing it early leaves the possibility |
@@ -838,6 +846,10 @@ void __init gart_iommu_init(void) | |||
838 | 846 | ||
839 | flush_gart(); | 847 | flush_gart(); |
840 | dma_ops = &gart_dma_ops; | 848 | dma_ops = &gart_dma_ops; |
849 | x86_platform.iommu_shutdown = gart_iommu_shutdown; | ||
850 | swiotlb = 0; | ||
851 | |||
852 | return 0; | ||
841 | } | 853 | } |
842 | 854 | ||
843 | void __init gart_parse_options(char *p) | 855 | void __init gart_parse_options(char *p) |
@@ -856,7 +868,7 @@ void __init gart_parse_options(char *p) | |||
856 | #endif | 868 | #endif |
857 | if (isdigit(*p) && get_option(&p, &arg)) | 869 | if (isdigit(*p) && get_option(&p, &arg)) |
858 | iommu_size = arg; | 870 | iommu_size = arg; |
859 | if (!strncmp(p, "fullflush", 8)) | 871 | if (!strncmp(p, "fullflush", 9)) |
860 | iommu_fullflush = 1; | 872 | iommu_fullflush = 1; |
861 | if (!strncmp(p, "nofullflush", 11)) | 873 | if (!strncmp(p, "nofullflush", 11)) |
862 | iommu_fullflush = 0; | 874 | iommu_fullflush = 0; |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index a3933d4330cd..22be12b60a8f 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
33 | dma_addr_t bus = page_to_phys(page) + offset; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
34 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
35 | if (!check_addr("map_single", dev, bus, size)) | 35 | if (!check_addr("map_single", dev, bus, size)) |
36 | return bad_dma_address; | 36 | return DMA_ERROR_CODE; |
37 | flush_write_buffers(); | 37 | flush_write_buffers(); |
38 | return bus; | 38 | return bus; |
39 | } | 39 | } |
@@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = { | |||
103 | .sync_sg_for_device = nommu_sync_sg_for_device, | 103 | .sync_sg_for_device = nommu_sync_sg_for_device, |
104 | .is_phys = 1, | 104 | .is_phys = 1, |
105 | }; | 105 | }; |
106 | |||
107 | void __init no_iommu_init(void) | ||
108 | { | ||
109 | if (dma_ops) | ||
110 | return; | ||
111 | |||
112 | force_iommu = 0; /* no HW IOMMU */ | ||
113 | dma_ops = &nommu_dma_ops; | ||
114 | } | ||
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index aaa6b7839f1e..e3c0a66b9e77 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -42,18 +42,28 @@ static struct dma_map_ops swiotlb_dma_ops = { | |||
42 | .dma_supported = NULL, | 42 | .dma_supported = NULL, |
43 | }; | 43 | }; |
44 | 44 | ||
45 | void __init pci_swiotlb_init(void) | 45 | /* |
46 | * pci_swiotlb_init - initialize swiotlb if necessary | ||
47 | * | ||
48 | * This returns non-zero if we are forced to use swiotlb (by the boot | ||
49 | * option). | ||
50 | */ | ||
51 | int __init pci_swiotlb_init(void) | ||
46 | { | 52 | { |
53 | int use_swiotlb = swiotlb | swiotlb_force; | ||
54 | |||
47 | /* don't initialize swiotlb if iommu=off (no_iommu=1) */ | 55 | /* don't initialize swiotlb if iommu=off (no_iommu=1) */ |
48 | #ifdef CONFIG_X86_64 | 56 | #ifdef CONFIG_X86_64 |
49 | if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)) | 57 | if (!no_iommu && max_pfn > MAX_DMA32_PFN) |
50 | swiotlb = 1; | 58 | swiotlb = 1; |
51 | #endif | 59 | #endif |
52 | if (swiotlb_force) | 60 | if (swiotlb_force) |
53 | swiotlb = 1; | 61 | swiotlb = 1; |
62 | |||
54 | if (swiotlb) { | 63 | if (swiotlb) { |
55 | printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); | 64 | swiotlb_init(0); |
56 | swiotlb_init(); | ||
57 | dma_ops = &swiotlb_dma_ops; | 65 | dma_ops = &swiotlb_dma_ops; |
58 | } | 66 | } |
67 | |||
68 | return use_swiotlb; | ||
59 | } | 69 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5284cd2b5776..744508e7cfdd 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <trace/events/power.h> | 12 | #include <trace/events/power.h> |
13 | #include <linux/hw_breakpoint.h> | ||
13 | #include <asm/system.h> | 14 | #include <asm/system.h> |
14 | #include <asm/apic.h> | 15 | #include <asm/apic.h> |
15 | #include <asm/syscalls.h> | 16 | #include <asm/syscalls.h> |
@@ -17,6 +18,7 @@ | |||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/i387.h> | 19 | #include <asm/i387.h> |
19 | #include <asm/ds.h> | 20 | #include <asm/ds.h> |
21 | #include <asm/debugreg.h> | ||
20 | 22 | ||
21 | unsigned long idle_halt; | 23 | unsigned long idle_halt; |
22 | EXPORT_SYMBOL(idle_halt); | 24 | EXPORT_SYMBOL(idle_halt); |
@@ -103,14 +105,7 @@ void flush_thread(void) | |||
103 | } | 105 | } |
104 | #endif | 106 | #endif |
105 | 107 | ||
106 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | 108 | flush_ptrace_hw_breakpoint(tsk); |
107 | |||
108 | tsk->thread.debugreg0 = 0; | ||
109 | tsk->thread.debugreg1 = 0; | ||
110 | tsk->thread.debugreg2 = 0; | ||
111 | tsk->thread.debugreg3 = 0; | ||
112 | tsk->thread.debugreg6 = 0; | ||
113 | tsk->thread.debugreg7 = 0; | ||
114 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | 109 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
115 | /* | 110 | /* |
116 | * Forget coprocessor state.. | 111 | * Forget coprocessor state.. |
@@ -192,16 +187,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
192 | else if (next->debugctlmsr != prev->debugctlmsr) | 187 | else if (next->debugctlmsr != prev->debugctlmsr) |
193 | update_debugctlmsr(next->debugctlmsr); | 188 | update_debugctlmsr(next->debugctlmsr); |
194 | 189 | ||
195 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | ||
196 | set_debugreg(next->debugreg0, 0); | ||
197 | set_debugreg(next->debugreg1, 1); | ||
198 | set_debugreg(next->debugreg2, 2); | ||
199 | set_debugreg(next->debugreg3, 3); | ||
200 | /* no 4 and 5 */ | ||
201 | set_debugreg(next->debugreg6, 6); | ||
202 | set_debugreg(next->debugreg7, 7); | ||
203 | } | ||
204 | |||
205 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | 190 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ |
206 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | 191 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { |
207 | /* prev and next are different */ | 192 | /* prev and next are different */ |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4cf79567cdab..075580b35682 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <asm/idle.h> | 58 | #include <asm/idle.h> |
59 | #include <asm/syscalls.h> | 59 | #include <asm/syscalls.h> |
60 | #include <asm/ds.h> | 60 | #include <asm/ds.h> |
61 | #include <asm/debugreg.h> | ||
61 | 62 | ||
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 63 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | 64 | ||
@@ -134,7 +135,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
134 | ss = regs->ss & 0xffff; | 135 | ss = regs->ss & 0xffff; |
135 | gs = get_user_gs(regs); | 136 | gs = get_user_gs(regs); |
136 | } else { | 137 | } else { |
137 | sp = (unsigned long) (®s->sp); | 138 | sp = kernel_stack_pointer(regs); |
138 | savesegment(ss, ss); | 139 | savesegment(ss, ss); |
139 | savesegment(gs, gs); | 140 | savesegment(gs, gs); |
140 | } | 141 | } |
@@ -187,7 +188,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
187 | 188 | ||
188 | void show_regs(struct pt_regs *regs) | 189 | void show_regs(struct pt_regs *regs) |
189 | { | 190 | { |
190 | __show_regs(regs, 1); | 191 | show_registers(regs); |
191 | show_trace(NULL, regs, ®s->sp, regs->bp); | 192 | show_trace(NULL, regs, ®s->sp, regs->bp); |
192 | } | 193 | } |
193 | 194 | ||
@@ -259,7 +260,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
259 | 260 | ||
260 | task_user_gs(p) = get_user_gs(regs); | 261 | task_user_gs(p) = get_user_gs(regs); |
261 | 262 | ||
263 | p->thread.io_bitmap_ptr = NULL; | ||
262 | tsk = current; | 264 | tsk = current; |
265 | err = -ENOMEM; | ||
266 | |||
267 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
268 | |||
263 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { | 269 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
264 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, | 270 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
265 | IO_BITMAP_BYTES, GFP_KERNEL); | 271 | IO_BITMAP_BYTES, GFP_KERNEL); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index eb62cbcaa490..a98fe88fab64 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/idle.h> | 52 | #include <asm/idle.h> |
53 | #include <asm/syscalls.h> | 53 | #include <asm/syscalls.h> |
54 | #include <asm/ds.h> | 54 | #include <asm/ds.h> |
55 | #include <asm/debugreg.h> | ||
55 | 56 | ||
56 | asmlinkage extern void ret_from_fork(void); | 57 | asmlinkage extern void ret_from_fork(void); |
57 | 58 | ||
@@ -226,8 +227,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
226 | 227 | ||
227 | void show_regs(struct pt_regs *regs) | 228 | void show_regs(struct pt_regs *regs) |
228 | { | 229 | { |
229 | printk(KERN_INFO "CPU %d:", smp_processor_id()); | 230 | show_registers(regs); |
230 | __show_regs(regs, 1); | ||
231 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 231 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
232 | } | 232 | } |
233 | 233 | ||
@@ -297,12 +297,16 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
297 | 297 | ||
298 | p->thread.fs = me->thread.fs; | 298 | p->thread.fs = me->thread.fs; |
299 | p->thread.gs = me->thread.gs; | 299 | p->thread.gs = me->thread.gs; |
300 | p->thread.io_bitmap_ptr = NULL; | ||
300 | 301 | ||
301 | savesegment(gs, p->thread.gsindex); | 302 | savesegment(gs, p->thread.gsindex); |
302 | savesegment(fs, p->thread.fsindex); | 303 | savesegment(fs, p->thread.fsindex); |
303 | savesegment(es, p->thread.es); | 304 | savesegment(es, p->thread.es); |
304 | savesegment(ds, p->thread.ds); | 305 | savesegment(ds, p->thread.ds); |
305 | 306 | ||
307 | err = -ENOMEM; | ||
308 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
309 | |||
306 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { | 310 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
307 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | 311 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); |
308 | if (!p->thread.io_bitmap_ptr) { | 312 | if (!p->thread.io_bitmap_ptr) { |
@@ -341,6 +345,7 @@ out: | |||
341 | kfree(p->thread.io_bitmap_ptr); | 345 | kfree(p->thread.io_bitmap_ptr); |
342 | p->thread.io_bitmap_max = 0; | 346 | p->thread.io_bitmap_max = 0; |
343 | } | 347 | } |
348 | |||
344 | return err; | 349 | return err; |
345 | } | 350 | } |
346 | 351 | ||
@@ -495,6 +500,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
495 | */ | 500 | */ |
496 | if (preload_fpu) | 501 | if (preload_fpu) |
497 | __math_state_restore(); | 502 | __math_state_restore(); |
503 | |||
498 | return prev_p; | 504 | return prev_p; |
499 | } | 505 | } |
500 | 506 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 7b058a2dc66a..04d182a7cfdb 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/seccomp.h> | 22 | #include <linux/seccomp.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/perf_event.h> | ||
26 | #include <linux/hw_breakpoint.h> | ||
25 | 27 | ||
26 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
@@ -34,6 +36,7 @@ | |||
34 | #include <asm/prctl.h> | 36 | #include <asm/prctl.h> |
35 | #include <asm/proto.h> | 37 | #include <asm/proto.h> |
36 | #include <asm/ds.h> | 38 | #include <asm/ds.h> |
39 | #include <asm/hw_breakpoint.h> | ||
37 | 40 | ||
38 | #include "tls.h" | 41 | #include "tls.h" |
39 | 42 | ||
@@ -49,6 +52,118 @@ enum x86_regset { | |||
49 | REGSET_IOPERM32, | 52 | REGSET_IOPERM32, |
50 | }; | 53 | }; |
51 | 54 | ||
55 | struct pt_regs_offset { | ||
56 | const char *name; | ||
57 | int offset; | ||
58 | }; | ||
59 | |||
60 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} | ||
61 | #define REG_OFFSET_END {.name = NULL, .offset = 0} | ||
62 | |||
63 | static const struct pt_regs_offset regoffset_table[] = { | ||
64 | #ifdef CONFIG_X86_64 | ||
65 | REG_OFFSET_NAME(r15), | ||
66 | REG_OFFSET_NAME(r14), | ||
67 | REG_OFFSET_NAME(r13), | ||
68 | REG_OFFSET_NAME(r12), | ||
69 | REG_OFFSET_NAME(r11), | ||
70 | REG_OFFSET_NAME(r10), | ||
71 | REG_OFFSET_NAME(r9), | ||
72 | REG_OFFSET_NAME(r8), | ||
73 | #endif | ||
74 | REG_OFFSET_NAME(bx), | ||
75 | REG_OFFSET_NAME(cx), | ||
76 | REG_OFFSET_NAME(dx), | ||
77 | REG_OFFSET_NAME(si), | ||
78 | REG_OFFSET_NAME(di), | ||
79 | REG_OFFSET_NAME(bp), | ||
80 | REG_OFFSET_NAME(ax), | ||
81 | #ifdef CONFIG_X86_32 | ||
82 | REG_OFFSET_NAME(ds), | ||
83 | REG_OFFSET_NAME(es), | ||
84 | REG_OFFSET_NAME(fs), | ||
85 | REG_OFFSET_NAME(gs), | ||
86 | #endif | ||
87 | REG_OFFSET_NAME(orig_ax), | ||
88 | REG_OFFSET_NAME(ip), | ||
89 | REG_OFFSET_NAME(cs), | ||
90 | REG_OFFSET_NAME(flags), | ||
91 | REG_OFFSET_NAME(sp), | ||
92 | REG_OFFSET_NAME(ss), | ||
93 | REG_OFFSET_END, | ||
94 | }; | ||
95 | |||
96 | /** | ||
97 | * regs_query_register_offset() - query register offset from its name | ||
98 | * @name: the name of a register | ||
99 | * | ||
100 | * regs_query_register_offset() returns the offset of a register in struct | ||
101 | * pt_regs from its name. If the name is invalid, this returns -EINVAL; | ||
102 | */ | ||
103 | int regs_query_register_offset(const char *name) | ||
104 | { | ||
105 | const struct pt_regs_offset *roff; | ||
106 | for (roff = regoffset_table; roff->name != NULL; roff++) | ||
107 | if (!strcmp(roff->name, name)) | ||
108 | return roff->offset; | ||
109 | return -EINVAL; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * regs_query_register_name() - query register name from its offset | ||
114 | * @offset: the offset of a register in struct pt_regs. | ||
115 | * | ||
116 | * regs_query_register_name() returns the name of a register from its | ||
117 | * offset in struct pt_regs. If the @offset is invalid, this returns NULL; | ||
118 | */ | ||
119 | const char *regs_query_register_name(unsigned int offset) | ||
120 | { | ||
121 | const struct pt_regs_offset *roff; | ||
122 | for (roff = regoffset_table; roff->name != NULL; roff++) | ||
123 | if (roff->offset == offset) | ||
124 | return roff->name; | ||
125 | return NULL; | ||
126 | } | ||
127 | |||
128 | static const int arg_offs_table[] = { | ||
129 | #ifdef CONFIG_X86_32 | ||
130 | [0] = offsetof(struct pt_regs, ax), | ||
131 | [1] = offsetof(struct pt_regs, dx), | ||
132 | [2] = offsetof(struct pt_regs, cx) | ||
133 | #else /* CONFIG_X86_64 */ | ||
134 | [0] = offsetof(struct pt_regs, di), | ||
135 | [1] = offsetof(struct pt_regs, si), | ||
136 | [2] = offsetof(struct pt_regs, dx), | ||
137 | [3] = offsetof(struct pt_regs, cx), | ||
138 | [4] = offsetof(struct pt_regs, r8), | ||
139 | [5] = offsetof(struct pt_regs, r9) | ||
140 | #endif | ||
141 | }; | ||
142 | |||
143 | /** | ||
144 | * regs_get_argument_nth() - get Nth argument at function call | ||
145 | * @regs: pt_regs which contains registers at function entry. | ||
146 | * @n: argument number. | ||
147 | * | ||
148 | * regs_get_argument_nth() returns @n th argument of a function call. | ||
149 | * Since usually the kernel stack will be changed right after function entry, | ||
150 | * you must use this at function entry. If the @n th entry is NOT in the | ||
151 | * kernel stack or pt_regs, this returns 0. | ||
152 | */ | ||
153 | unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) | ||
154 | { | ||
155 | if (n < ARRAY_SIZE(arg_offs_table)) | ||
156 | return *(unsigned long *)((char *)regs + arg_offs_table[n]); | ||
157 | else { | ||
158 | /* | ||
159 | * The typical case: arg n is on the stack. | ||
160 | * (Note: stack[0] = return address, so skip it) | ||
161 | */ | ||
162 | n -= ARRAY_SIZE(arg_offs_table); | ||
163 | return regs_get_kernel_stack_nth(regs, 1 + n); | ||
164 | } | ||
165 | } | ||
166 | |||
52 | /* | 167 | /* |
53 | * does not yet catch signals sent when the child dies. | 168 | * does not yet catch signals sent when the child dies. |
54 | * in exit.c or in signal.c. | 169 | * in exit.c or in signal.c. |
@@ -137,11 +252,6 @@ static int set_segment_reg(struct task_struct *task, | |||
137 | return 0; | 252 | return 0; |
138 | } | 253 | } |
139 | 254 | ||
140 | static unsigned long debugreg_addr_limit(struct task_struct *task) | ||
141 | { | ||
142 | return TASK_SIZE - 3; | ||
143 | } | ||
144 | |||
145 | #else /* CONFIG_X86_64 */ | 255 | #else /* CONFIG_X86_64 */ |
146 | 256 | ||
147 | #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) | 257 | #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) |
@@ -266,15 +376,6 @@ static int set_segment_reg(struct task_struct *task, | |||
266 | return 0; | 376 | return 0; |
267 | } | 377 | } |
268 | 378 | ||
269 | static unsigned long debugreg_addr_limit(struct task_struct *task) | ||
270 | { | ||
271 | #ifdef CONFIG_IA32_EMULATION | ||
272 | if (test_tsk_thread_flag(task, TIF_IA32)) | ||
273 | return IA32_PAGE_OFFSET - 3; | ||
274 | #endif | ||
275 | return TASK_SIZE_MAX - 7; | ||
276 | } | ||
277 | |||
278 | #endif /* CONFIG_X86_32 */ | 379 | #endif /* CONFIG_X86_32 */ |
279 | 380 | ||
280 | static unsigned long get_flags(struct task_struct *task) | 381 | static unsigned long get_flags(struct task_struct *task) |
@@ -454,99 +555,239 @@ static int genregs_set(struct task_struct *target, | |||
454 | return ret; | 555 | return ret; |
455 | } | 556 | } |
456 | 557 | ||
558 | static void ptrace_triggered(struct perf_event *bp, void *data) | ||
559 | { | ||
560 | int i; | ||
561 | struct thread_struct *thread = &(current->thread); | ||
562 | |||
563 | /* | ||
564 | * Store in the virtual DR6 register the fact that the breakpoint | ||
565 | * was hit so the thread's debugger will see it. | ||
566 | */ | ||
567 | for (i = 0; i < HBP_NUM; i++) { | ||
568 | if (thread->ptrace_bps[i] == bp) | ||
569 | break; | ||
570 | } | ||
571 | |||
572 | thread->debugreg6 |= (DR_TRAP0 << i); | ||
573 | } | ||
574 | |||
457 | /* | 575 | /* |
458 | * This function is trivial and will be inlined by the compiler. | 576 | * Walk through every ptrace breakpoints for this thread and |
459 | * Having it separates the implementation details of debug | 577 | * build the dr7 value on top of their attributes. |
460 | * registers from the interface details of ptrace. | 578 | * |
461 | */ | 579 | */ |
462 | static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) | 580 | static unsigned long ptrace_get_dr7(struct perf_event *bp[]) |
463 | { | 581 | { |
464 | switch (n) { | 582 | int i; |
465 | case 0: return child->thread.debugreg0; | 583 | int dr7 = 0; |
466 | case 1: return child->thread.debugreg1; | 584 | struct arch_hw_breakpoint *info; |
467 | case 2: return child->thread.debugreg2; | 585 | |
468 | case 3: return child->thread.debugreg3; | 586 | for (i = 0; i < HBP_NUM; i++) { |
469 | case 6: return child->thread.debugreg6; | 587 | if (bp[i] && !bp[i]->attr.disabled) { |
470 | case 7: return child->thread.debugreg7; | 588 | info = counter_arch_bp(bp[i]); |
589 | dr7 |= encode_dr7(i, info->len, info->type); | ||
590 | } | ||
471 | } | 591 | } |
472 | return 0; | 592 | |
593 | return dr7; | ||
473 | } | 594 | } |
474 | 595 | ||
475 | static int ptrace_set_debugreg(struct task_struct *child, | 596 | static struct perf_event * |
476 | int n, unsigned long data) | 597 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, |
598 | struct task_struct *tsk, int disabled) | ||
477 | { | 599 | { |
478 | int i; | 600 | int err; |
601 | int gen_len, gen_type; | ||
602 | DEFINE_BREAKPOINT_ATTR(attr); | ||
479 | 603 | ||
480 | if (unlikely(n == 4 || n == 5)) | 604 | /* |
481 | return -EIO; | 605 | * We shoud have at least an inactive breakpoint at this |
606 | * slot. It means the user is writing dr7 without having | ||
607 | * written the address register first | ||
608 | */ | ||
609 | if (!bp) | ||
610 | return ERR_PTR(-EINVAL); | ||
482 | 611 | ||
483 | if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) | 612 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); |
484 | return -EIO; | 613 | if (err) |
614 | return ERR_PTR(err); | ||
485 | 615 | ||
486 | switch (n) { | 616 | attr = bp->attr; |
487 | case 0: child->thread.debugreg0 = data; break; | 617 | attr.bp_len = gen_len; |
488 | case 1: child->thread.debugreg1 = data; break; | 618 | attr.bp_type = gen_type; |
489 | case 2: child->thread.debugreg2 = data; break; | 619 | attr.disabled = disabled; |
490 | case 3: child->thread.debugreg3 = data; break; | ||
491 | 620 | ||
492 | case 6: | 621 | return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); |
493 | if ((data & ~0xffffffffUL) != 0) | 622 | } |
494 | return -EIO; | 623 | |
495 | child->thread.debugreg6 = data; | 624 | /* |
496 | break; | 625 | * Handle ptrace writes to debug register 7. |
626 | */ | ||
627 | static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) | ||
628 | { | ||
629 | struct thread_struct *thread = &(tsk->thread); | ||
630 | unsigned long old_dr7; | ||
631 | int i, orig_ret = 0, rc = 0; | ||
632 | int enabled, second_pass = 0; | ||
633 | unsigned len, type; | ||
634 | struct perf_event *bp; | ||
635 | |||
636 | data &= ~DR_CONTROL_RESERVED; | ||
637 | old_dr7 = ptrace_get_dr7(thread->ptrace_bps); | ||
638 | restore: | ||
639 | /* | ||
640 | * Loop through all the hardware breakpoints, making the | ||
641 | * appropriate changes to each. | ||
642 | */ | ||
643 | for (i = 0; i < HBP_NUM; i++) { | ||
644 | enabled = decode_dr7(data, i, &len, &type); | ||
645 | bp = thread->ptrace_bps[i]; | ||
646 | |||
647 | if (!enabled) { | ||
648 | if (bp) { | ||
649 | /* | ||
650 | * Don't unregister the breakpoints right-away, | ||
651 | * unless all register_user_hw_breakpoint() | ||
652 | * requests have succeeded. This prevents | ||
653 | * any window of opportunity for debug | ||
654 | * register grabbing by other users. | ||
655 | */ | ||
656 | if (!second_pass) | ||
657 | continue; | ||
658 | |||
659 | thread->ptrace_bps[i] = NULL; | ||
660 | bp = ptrace_modify_breakpoint(bp, len, type, | ||
661 | tsk, 1); | ||
662 | if (IS_ERR(bp)) { | ||
663 | rc = PTR_ERR(bp); | ||
664 | thread->ptrace_bps[i] = NULL; | ||
665 | break; | ||
666 | } | ||
667 | thread->ptrace_bps[i] = bp; | ||
668 | } | ||
669 | continue; | ||
670 | } | ||
671 | |||
672 | bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); | ||
673 | |||
674 | /* Incorrect bp, or we have a bug in bp API */ | ||
675 | if (IS_ERR(bp)) { | ||
676 | rc = PTR_ERR(bp); | ||
677 | thread->ptrace_bps[i] = NULL; | ||
678 | break; | ||
679 | } | ||
680 | thread->ptrace_bps[i] = bp; | ||
681 | } | ||
682 | /* | ||
683 | * Make a second pass to free the remaining unused breakpoints | ||
684 | * or to restore the original breakpoints if an error occurred. | ||
685 | */ | ||
686 | if (!second_pass) { | ||
687 | second_pass = 1; | ||
688 | if (rc < 0) { | ||
689 | orig_ret = rc; | ||
690 | data = old_dr7; | ||
691 | } | ||
692 | goto restore; | ||
693 | } | ||
694 | return ((orig_ret < 0) ? orig_ret : rc); | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * Handle PTRACE_PEEKUSR calls for the debug register area. | ||
699 | */ | ||
700 | static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) | ||
701 | { | ||
702 | struct thread_struct *thread = &(tsk->thread); | ||
703 | unsigned long val = 0; | ||
497 | 704 | ||
498 | case 7: | 705 | if (n < HBP_NUM) { |
706 | struct perf_event *bp; | ||
707 | bp = thread->ptrace_bps[n]; | ||
708 | if (!bp) | ||
709 | return 0; | ||
710 | val = bp->hw.info.address; | ||
711 | } else if (n == 6) { | ||
712 | val = thread->debugreg6; | ||
713 | } else if (n == 7) { | ||
714 | val = ptrace_get_dr7(thread->ptrace_bps); | ||
715 | } | ||
716 | return val; | ||
717 | } | ||
718 | |||
719 | static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | ||
720 | unsigned long addr) | ||
721 | { | ||
722 | struct perf_event *bp; | ||
723 | struct thread_struct *t = &tsk->thread; | ||
724 | DEFINE_BREAKPOINT_ATTR(attr); | ||
725 | |||
726 | if (!t->ptrace_bps[nr]) { | ||
499 | /* | 727 | /* |
500 | * Sanity-check data. Take one half-byte at once with | 728 | * Put stub len and type to register (reserve) an inactive but |
501 | * check = (val >> (16 + 4*i)) & 0xf. It contains the | 729 | * correct bp |
502 | * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits | ||
503 | * 2 and 3 are LENi. Given a list of invalid values, | ||
504 | * we do mask |= 1 << invalid_value, so that | ||
505 | * (mask >> check) & 1 is a correct test for invalid | ||
506 | * values. | ||
507 | * | ||
508 | * R/Wi contains the type of the breakpoint / | ||
509 | * watchpoint, LENi contains the length of the watched | ||
510 | * data in the watchpoint case. | ||
511 | * | ||
512 | * The invalid values are: | ||
513 | * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] | ||
514 | * - R/Wi == 0x10 (break on I/O reads or writes), so | ||
515 | * mask |= 0x4444. | ||
516 | * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= | ||
517 | * 0x1110. | ||
518 | * | ||
519 | * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. | ||
520 | * | ||
521 | * See the Intel Manual "System Programming Guide", | ||
522 | * 15.2.4 | ||
523 | * | ||
524 | * Note that LENi == 0x10 is defined on x86_64 in long | ||
525 | * mode (i.e. even for 32-bit userspace software, but | ||
526 | * 64-bit kernel), so the x86_64 mask value is 0x5454. | ||
527 | * See the AMD manual no. 24593 (AMD64 System Programming) | ||
528 | */ | 730 | */ |
529 | #ifdef CONFIG_X86_32 | 731 | attr.bp_addr = addr; |
530 | #define DR7_MASK 0x5f54 | 732 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
531 | #else | 733 | attr.bp_type = HW_BREAKPOINT_W; |
532 | #define DR7_MASK 0x5554 | 734 | attr.disabled = 1; |
533 | #endif | 735 | |
534 | data &= ~DR_CONTROL_RESERVED; | 736 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); |
535 | for (i = 0; i < 4; i++) | 737 | } else { |
536 | if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) | 738 | bp = t->ptrace_bps[nr]; |
537 | return -EIO; | 739 | t->ptrace_bps[nr] = NULL; |
538 | child->thread.debugreg7 = data; | 740 | |
539 | if (data) | 741 | attr = bp->attr; |
540 | set_tsk_thread_flag(child, TIF_DEBUG); | 742 | attr.bp_addr = addr; |
541 | else | 743 | bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); |
542 | clear_tsk_thread_flag(child, TIF_DEBUG); | ||
543 | break; | ||
544 | } | 744 | } |
745 | /* | ||
746 | * CHECKME: the previous code returned -EIO if the addr wasn't a | ||
747 | * valid task virtual addr. The new one will return -EINVAL in this | ||
748 | * case. | ||
749 | * -EINVAL may be what we want for in-kernel breakpoints users, but | ||
750 | * -EIO looks better for ptrace, since we refuse a register writing | ||
751 | * for the user. And anyway this is the previous behaviour. | ||
752 | */ | ||
753 | if (IS_ERR(bp)) | ||
754 | return PTR_ERR(bp); | ||
755 | |||
756 | t->ptrace_bps[nr] = bp; | ||
545 | 757 | ||
546 | return 0; | 758 | return 0; |
547 | } | 759 | } |
548 | 760 | ||
549 | /* | 761 | /* |
762 | * Handle PTRACE_POKEUSR calls for the debug register area. | ||
763 | */ | ||
764 | int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) | ||
765 | { | ||
766 | struct thread_struct *thread = &(tsk->thread); | ||
767 | int rc = 0; | ||
768 | |||
769 | /* There are no DR4 or DR5 registers */ | ||
770 | if (n == 4 || n == 5) | ||
771 | return -EIO; | ||
772 | |||
773 | if (n == 6) { | ||
774 | thread->debugreg6 = val; | ||
775 | goto ret_path; | ||
776 | } | ||
777 | if (n < HBP_NUM) { | ||
778 | rc = ptrace_set_breakpoint_addr(tsk, n, val); | ||
779 | if (rc) | ||
780 | return rc; | ||
781 | } | ||
782 | /* All that's left is DR7 */ | ||
783 | if (n == 7) | ||
784 | rc = ptrace_write_dr7(tsk, val); | ||
785 | |||
786 | ret_path: | ||
787 | return rc; | ||
788 | } | ||
789 | |||
790 | /* | ||
550 | * These access the current or another (stopped) task's io permission | 791 | * These access the current or another (stopped) task's io permission |
551 | * bitmap for debugging or core dump. | 792 | * bitmap for debugging or core dump. |
552 | */ | 793 | */ |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f93078746e00..2b97fc5b124e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -23,7 +23,7 @@ | |||
23 | # include <linux/ctype.h> | 23 | # include <linux/ctype.h> |
24 | # include <linux/mc146818rtc.h> | 24 | # include <linux/mc146818rtc.h> |
25 | #else | 25 | #else |
26 | # include <asm/iommu.h> | 26 | # include <asm/x86_init.h> |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | /* | 29 | /* |
@@ -622,7 +622,7 @@ void native_machine_shutdown(void) | |||
622 | #endif | 622 | #endif |
623 | 623 | ||
624 | #ifdef CONFIG_X86_64 | 624 | #ifdef CONFIG_X86_64 |
625 | pci_iommu_shutdown(); | 625 | x86_platform.iommu_shutdown(); |
626 | #endif | 626 | #endif |
627 | } | 627 | } |
628 | 628 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 2a34f9c5be21..82e88cdda9bc 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -109,6 +109,7 @@ | |||
109 | #ifdef CONFIG_X86_64 | 109 | #ifdef CONFIG_X86_64 |
110 | #include <asm/numa_64.h> | 110 | #include <asm/numa_64.h> |
111 | #endif | 111 | #endif |
112 | #include <asm/mce.h> | ||
112 | 113 | ||
113 | /* | 114 | /* |
114 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 115 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -247,7 +248,7 @@ EXPORT_SYMBOL(edd); | |||
247 | * from boot_params into a safe place. | 248 | * from boot_params into a safe place. |
248 | * | 249 | * |
249 | */ | 250 | */ |
250 | static inline void copy_edd(void) | 251 | static inline void __init copy_edd(void) |
251 | { | 252 | { |
252 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, | 253 | memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, |
253 | sizeof(edd.mbr_signature)); | 254 | sizeof(edd.mbr_signature)); |
@@ -256,7 +257,7 @@ static inline void copy_edd(void) | |||
256 | edd.edd_info_nr = boot_params.eddbuf_entries; | 257 | edd.edd_info_nr = boot_params.eddbuf_entries; |
257 | } | 258 | } |
258 | #else | 259 | #else |
259 | static inline void copy_edd(void) | 260 | static inline void __init copy_edd(void) |
260 | { | 261 | { |
261 | } | 262 | } |
262 | #endif | 263 | #endif |
@@ -1031,6 +1032,8 @@ void __init setup_arch(char **cmdline_p) | |||
1031 | #endif | 1032 | #endif |
1032 | #endif | 1033 | #endif |
1033 | x86_init.oem.banner(); | 1034 | x86_init.oem.banner(); |
1035 | |||
1036 | mcheck_init(); | ||
1034 | } | 1037 | } |
1035 | 1038 | ||
1036 | #ifdef CONFIG_X86_32 | 1039 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 6a44a76055ad..fbf3b07c8567 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -799,15 +799,6 @@ static void do_signal(struct pt_regs *regs) | |||
799 | 799 | ||
800 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 800 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
801 | if (signr > 0) { | 801 | if (signr > 0) { |
802 | /* | ||
803 | * Re-enable any watchpoints before delivering the | ||
804 | * signal to user space. The processor register will | ||
805 | * have been cleared if the watchpoint triggered | ||
806 | * inside the kernel. | ||
807 | */ | ||
808 | if (current->thread.debugreg7) | ||
809 | set_debugreg(current->thread.debugreg7, 7); | ||
810 | |||
811 | /* Whee! Actually deliver the signal. */ | 802 | /* Whee! Actually deliver the signal. */ |
812 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { | 803 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { |
813 | /* | 804 | /* |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 565ebc65920e..324f2a44c221 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1250,16 +1250,7 @@ static void __ref remove_cpu_from_maps(int cpu) | |||
1250 | void cpu_disable_common(void) | 1250 | void cpu_disable_common(void) |
1251 | { | 1251 | { |
1252 | int cpu = smp_processor_id(); | 1252 | int cpu = smp_processor_id(); |
1253 | /* | ||
1254 | * HACK: | ||
1255 | * Allow any queued timer interrupts to get serviced | ||
1256 | * This is only a temporary solution until we cleanup | ||
1257 | * fixup_irqs as we do for IA64. | ||
1258 | */ | ||
1259 | local_irq_enable(); | ||
1260 | mdelay(1); | ||
1261 | 1253 | ||
1262 | local_irq_disable(); | ||
1263 | remove_siblinginfo(cpu); | 1254 | remove_siblinginfo(cpu); |
1264 | 1255 | ||
1265 | /* It's now safe to remove this processor from the online map */ | 1256 | /* It's now safe to remove this processor from the online map */ |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 7e37dcee0cc3..33399176512a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -529,77 +529,56 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
529 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | 529 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) |
530 | { | 530 | { |
531 | struct task_struct *tsk = current; | 531 | struct task_struct *tsk = current; |
532 | unsigned long condition; | 532 | unsigned long dr6; |
533 | int si_code; | 533 | int si_code; |
534 | 534 | ||
535 | get_debugreg(condition, 6); | 535 | get_debugreg(dr6, 6); |
536 | 536 | ||
537 | /* Catch kmemcheck conditions first of all! */ | 537 | /* Catch kmemcheck conditions first of all! */ |
538 | if (condition & DR_STEP && kmemcheck_trap(regs)) | 538 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
539 | return; | 539 | return; |
540 | 540 | ||
541 | /* DR6 may or may not be cleared by the CPU */ | ||
542 | set_debugreg(0, 6); | ||
541 | /* | 543 | /* |
542 | * The processor cleared BTF, so don't mark that we need it set. | 544 | * The processor cleared BTF, so don't mark that we need it set. |
543 | */ | 545 | */ |
544 | clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); | 546 | clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); |
545 | tsk->thread.debugctlmsr = 0; | 547 | tsk->thread.debugctlmsr = 0; |
546 | 548 | ||
547 | if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, | 549 | /* Store the virtualized DR6 value */ |
548 | SIGTRAP) == NOTIFY_STOP) | 550 | tsk->thread.debugreg6 = dr6; |
551 | |||
552 | if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, | ||
553 | SIGTRAP) == NOTIFY_STOP) | ||
549 | return; | 554 | return; |
550 | 555 | ||
551 | /* It's safe to allow irq's after DR6 has been saved */ | 556 | /* It's safe to allow irq's after DR6 has been saved */ |
552 | preempt_conditional_sti(regs); | 557 | preempt_conditional_sti(regs); |
553 | 558 | ||
554 | /* Mask out spurious debug traps due to lazy DR7 setting */ | 559 | if (regs->flags & X86_VM_MASK) { |
555 | if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { | 560 | handle_vm86_trap((struct kernel_vm86_regs *) regs, |
556 | if (!tsk->thread.debugreg7) | 561 | error_code, 1); |
557 | goto clear_dr7; | 562 | return; |
558 | } | 563 | } |
559 | 564 | ||
560 | #ifdef CONFIG_X86_32 | ||
561 | if (regs->flags & X86_VM_MASK) | ||
562 | goto debug_vm86; | ||
563 | #endif | ||
564 | |||
565 | /* Save debug status register where ptrace can see it */ | ||
566 | tsk->thread.debugreg6 = condition; | ||
567 | |||
568 | /* | 565 | /* |
569 | * Single-stepping through TF: make sure we ignore any events in | 566 | * Single-stepping through system calls: ignore any exceptions in |
570 | * kernel space (but re-enable TF when returning to user mode). | 567 | * kernel space, but re-enable TF when returning to user mode. |
568 | * | ||
569 | * We already checked v86 mode above, so we can check for kernel mode | ||
570 | * by just checking the CPL of CS. | ||
571 | */ | 571 | */ |
572 | if (condition & DR_STEP) { | 572 | if ((dr6 & DR_STEP) && !user_mode(regs)) { |
573 | if (!user_mode(regs)) | 573 | tsk->thread.debugreg6 &= ~DR_STEP; |
574 | goto clear_TF_reenable; | 574 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
575 | regs->flags &= ~X86_EFLAGS_TF; | ||
575 | } | 576 | } |
576 | 577 | si_code = get_si_code(tsk->thread.debugreg6); | |
577 | si_code = get_si_code(condition); | 578 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) |
578 | /* Ok, finally something we can handle */ | 579 | send_sigtrap(tsk, regs, error_code, si_code); |
579 | send_sigtrap(tsk, regs, error_code, si_code); | ||
580 | |||
581 | /* | ||
582 | * Disable additional traps. They'll be re-enabled when | ||
583 | * the signal is delivered. | ||
584 | */ | ||
585 | clear_dr7: | ||
586 | set_debugreg(0, 7); | ||
587 | preempt_conditional_cli(regs); | 580 | preempt_conditional_cli(regs); |
588 | return; | ||
589 | 581 | ||
590 | #ifdef CONFIG_X86_32 | ||
591 | debug_vm86: | ||
592 | /* reenable preemption: handle_vm86_trap() might sleep */ | ||
593 | dec_preempt_count(); | ||
594 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); | ||
595 | conditional_cli(regs); | ||
596 | return; | ||
597 | #endif | ||
598 | |||
599 | clear_TF_reenable: | ||
600 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | ||
601 | regs->flags &= ~X86_EFLAGS_TF; | ||
602 | preempt_conditional_cli(regs); | ||
603 | return; | 582 | return; |
604 | } | 583 | } |
605 | 584 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index f37930954d15..eed156851f5d 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -114,13 +114,12 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
114 | return; | 114 | return; |
115 | 115 | ||
116 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | 116 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
117 | printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n"); | 117 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) |
118 | pr_info( | ||
119 | "Skipped synchronization checks as TSC is reliable.\n"); | ||
118 | return; | 120 | return; |
119 | } | 121 | } |
120 | 122 | ||
121 | pr_info("checking TSC synchronization [CPU#%d -> CPU#%d]:", | ||
122 | smp_processor_id(), cpu); | ||
123 | |||
124 | /* | 123 | /* |
125 | * Reset it - in case this is a second bootup: | 124 | * Reset it - in case this is a second bootup: |
126 | */ | 125 | */ |
@@ -142,12 +141,14 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
142 | cpu_relax(); | 141 | cpu_relax(); |
143 | 142 | ||
144 | if (nr_warps) { | 143 | if (nr_warps) { |
145 | printk("\n"); | 144 | pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
145 | smp_processor_id(), cpu); | ||
146 | pr_warning("Measured %Ld cycles TSC warp between CPUs, " | 146 | pr_warning("Measured %Ld cycles TSC warp between CPUs, " |
147 | "turning off TSC clock.\n", max_warp); | 147 | "turning off TSC clock.\n", max_warp); |
148 | mark_tsc_unstable("check_tsc_sync_source failed"); | 148 | mark_tsc_unstable("check_tsc_sync_source failed"); |
149 | } else { | 149 | } else { |
150 | printk(" passed.\n"); | 150 | pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", |
151 | smp_processor_id(), cpu); | ||
151 | } | 152 | } |
152 | 153 | ||
153 | /* | 154 | /* |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index aeef529917e4..61d805df4c91 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -9,10 +9,25 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/rbtree.h> | ||
12 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
13 | 14 | ||
14 | #include <asm/apic.h> | 15 | #include <asm/apic.h> |
15 | #include <asm/uv/uv_irq.h> | 16 | #include <asm/uv/uv_irq.h> |
17 | #include <asm/uv/uv_hub.h> | ||
18 | |||
19 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ | ||
20 | struct uv_irq_2_mmr_pnode{ | ||
21 | struct rb_node list; | ||
22 | unsigned long offset; | ||
23 | int pnode; | ||
24 | int irq; | ||
25 | }; | ||
26 | |||
27 | static spinlock_t uv_irq_lock; | ||
28 | static struct rb_root uv_irq_root; | ||
29 | |||
30 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | ||
16 | 31 | ||
17 | static void uv_noop(unsigned int irq) | 32 | static void uv_noop(unsigned int irq) |
18 | { | 33 | { |
@@ -39,25 +54,214 @@ struct irq_chip uv_irq_chip = { | |||
39 | .unmask = uv_noop, | 54 | .unmask = uv_noop, |
40 | .eoi = uv_ack_apic, | 55 | .eoi = uv_ack_apic, |
41 | .end = uv_noop, | 56 | .end = uv_noop, |
57 | .set_affinity = uv_set_irq_affinity, | ||
42 | }; | 58 | }; |
43 | 59 | ||
44 | /* | 60 | /* |
61 | * Add offset and pnode information of the hub sourcing interrupts to the | ||
62 | * rb tree for a specific irq. | ||
63 | */ | ||
64 | static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) | ||
65 | { | ||
66 | struct rb_node **link = &uv_irq_root.rb_node; | ||
67 | struct rb_node *parent = NULL; | ||
68 | struct uv_irq_2_mmr_pnode *n; | ||
69 | struct uv_irq_2_mmr_pnode *e; | ||
70 | unsigned long irqflags; | ||
71 | |||
72 | n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, | ||
73 | uv_blade_to_memory_nid(blade)); | ||
74 | if (!n) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | n->irq = irq; | ||
78 | n->offset = offset; | ||
79 | n->pnode = uv_blade_to_pnode(blade); | ||
80 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
81 | /* Find the right place in the rbtree: */ | ||
82 | while (*link) { | ||
83 | parent = *link; | ||
84 | e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); | ||
85 | |||
86 | if (unlikely(irq == e->irq)) { | ||
87 | /* irq entry exists */ | ||
88 | e->pnode = uv_blade_to_pnode(blade); | ||
89 | e->offset = offset; | ||
90 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
91 | kfree(n); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | if (irq < e->irq) | ||
96 | link = &(*link)->rb_left; | ||
97 | else | ||
98 | link = &(*link)->rb_right; | ||
99 | } | ||
100 | |||
101 | /* Insert the node into the rbtree. */ | ||
102 | rb_link_node(&n->list, parent, link); | ||
103 | rb_insert_color(&n->list, &uv_irq_root); | ||
104 | |||
105 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /* Retrieve offset and pnode information from the rb tree for a specific irq */ | ||
110 | int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | ||
111 | { | ||
112 | struct uv_irq_2_mmr_pnode *e; | ||
113 | struct rb_node *n; | ||
114 | unsigned long irqflags; | ||
115 | |||
116 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
117 | n = uv_irq_root.rb_node; | ||
118 | while (n) { | ||
119 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
120 | |||
121 | if (e->irq == irq) { | ||
122 | *offset = e->offset; | ||
123 | *pnode = e->pnode; | ||
124 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | if (irq < e->irq) | ||
129 | n = n->rb_left; | ||
130 | else | ||
131 | n = n->rb_right; | ||
132 | } | ||
133 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
134 | return -1; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
139 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
140 | */ | ||
141 | static int | ||
142 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
143 | unsigned long mmr_offset, int restrict) | ||
144 | { | ||
145 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
146 | struct irq_desc *desc = irq_to_desc(irq); | ||
147 | struct irq_cfg *cfg; | ||
148 | int mmr_pnode; | ||
149 | unsigned long mmr_value; | ||
150 | struct uv_IO_APIC_route_entry *entry; | ||
151 | int err; | ||
152 | |||
153 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
154 | sizeof(unsigned long)); | ||
155 | |||
156 | cfg = irq_cfg(irq); | ||
157 | |||
158 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
159 | if (err != 0) | ||
160 | return err; | ||
161 | |||
162 | if (restrict == UV_AFFINITY_CPU) | ||
163 | desc->status |= IRQ_NO_BALANCING; | ||
164 | else | ||
165 | desc->status |= IRQ_MOVE_PCNTXT; | ||
166 | |||
167 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
168 | irq_name); | ||
169 | |||
170 | mmr_value = 0; | ||
171 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
172 | entry->vector = cfg->vector; | ||
173 | entry->delivery_mode = apic->irq_delivery_mode; | ||
174 | entry->dest_mode = apic->irq_dest_mode; | ||
175 | entry->polarity = 0; | ||
176 | entry->trigger = 0; | ||
177 | entry->mask = 0; | ||
178 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
179 | |||
180 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
181 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
182 | |||
183 | if (cfg->move_in_progress) | ||
184 | send_cleanup_vector(cfg); | ||
185 | |||
186 | return irq; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
191 | * longer allowed to be sent. | ||
192 | */ | ||
193 | static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | ||
194 | { | ||
195 | unsigned long mmr_value; | ||
196 | struct uv_IO_APIC_route_entry *entry; | ||
197 | |||
198 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
199 | sizeof(unsigned long)); | ||
200 | |||
201 | mmr_value = 0; | ||
202 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
203 | entry->mask = 1; | ||
204 | |||
205 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
206 | } | ||
207 | |||
208 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
209 | { | ||
210 | struct irq_desc *desc = irq_to_desc(irq); | ||
211 | struct irq_cfg *cfg = desc->chip_data; | ||
212 | unsigned int dest; | ||
213 | unsigned long mmr_value; | ||
214 | struct uv_IO_APIC_route_entry *entry; | ||
215 | unsigned long mmr_offset; | ||
216 | unsigned mmr_pnode; | ||
217 | |||
218 | dest = set_desc_affinity(desc, mask); | ||
219 | if (dest == BAD_APICID) | ||
220 | return -1; | ||
221 | |||
222 | mmr_value = 0; | ||
223 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
224 | |||
225 | entry->vector = cfg->vector; | ||
226 | entry->delivery_mode = apic->irq_delivery_mode; | ||
227 | entry->dest_mode = apic->irq_dest_mode; | ||
228 | entry->polarity = 0; | ||
229 | entry->trigger = 0; | ||
230 | entry->mask = 0; | ||
231 | entry->dest = dest; | ||
232 | |||
233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | ||
234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | ||
235 | return -1; | ||
236 | |||
237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
238 | |||
239 | if (cfg->move_in_progress) | ||
240 | send_cleanup_vector(cfg); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /* | ||
45 | * Set up a mapping of an available irq and vector, and enable the specified | 246 | * Set up a mapping of an available irq and vector, and enable the specified |
46 | * MMR that defines the MSI that is to be sent to the specified CPU when an | 247 | * MMR that defines the MSI that is to be sent to the specified CPU when an |
47 | * interrupt is raised. | 248 | * interrupt is raised. |
48 | */ | 249 | */ |
49 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | 250 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, |
50 | unsigned long mmr_offset) | 251 | unsigned long mmr_offset, int restrict) |
51 | { | 252 | { |
52 | int irq; | 253 | int irq, ret; |
53 | int ret; | 254 | |
255 | irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); | ||
54 | 256 | ||
55 | irq = create_irq(); | ||
56 | if (irq <= 0) | 257 | if (irq <= 0) |
57 | return -EBUSY; | 258 | return -EBUSY; |
58 | 259 | ||
59 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); | 260 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, |
60 | if (ret != irq) | 261 | restrict); |
262 | if (ret == irq) | ||
263 | uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); | ||
264 | else | ||
61 | destroy_irq(irq); | 265 | destroy_irq(irq); |
62 | 266 | ||
63 | return ret; | 267 | return ret; |
@@ -71,9 +275,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq); | |||
71 | * | 275 | * |
72 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). | 276 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). |
73 | */ | 277 | */ |
74 | void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) | 278 | void uv_teardown_irq(unsigned int irq) |
75 | { | 279 | { |
76 | arch_disable_uv_irq(mmr_blade, mmr_offset); | 280 | struct uv_irq_2_mmr_pnode *e; |
281 | struct rb_node *n; | ||
282 | unsigned long irqflags; | ||
283 | |||
284 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
285 | n = uv_irq_root.rb_node; | ||
286 | while (n) { | ||
287 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
288 | if (e->irq == irq) { | ||
289 | arch_disable_uv_irq(e->pnode, e->offset); | ||
290 | rb_erase(n, &uv_irq_root); | ||
291 | kfree(e); | ||
292 | break; | ||
293 | } | ||
294 | if (irq < e->irq) | ||
295 | n = n->rb_left; | ||
296 | else | ||
297 | n = n->rb_right; | ||
298 | } | ||
299 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
77 | destroy_irq(irq); | 300 | destroy_irq(irq); |
78 | } | 301 | } |
79 | EXPORT_SYMBOL_GPL(uv_teardown_irq); | 302 | EXPORT_SYMBOL_GPL(uv_teardown_irq); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index f068553a1b17..abda6f53e71e 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -183,7 +183,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
183 | return; | 183 | return; |
184 | } | 184 | } |
185 | 185 | ||
186 | apic_cpus = apic->apicid_to_cpu_present(m->apicid); | 186 | apic->apicid_to_cpu_present(m->apicid, &apic_cpus); |
187 | physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); | 187 | physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); |
188 | /* | 188 | /* |
189 | * Validate version | 189 | * Validate version |
@@ -486,7 +486,7 @@ static void end_cobalt_irq(unsigned int irq) | |||
486 | } | 486 | } |
487 | 487 | ||
488 | static struct irq_chip cobalt_irq_type = { | 488 | static struct irq_chip cobalt_irq_type = { |
489 | .typename = "Cobalt-APIC", | 489 | .name = "Cobalt-APIC", |
490 | .startup = startup_cobalt_irq, | 490 | .startup = startup_cobalt_irq, |
491 | .shutdown = disable_cobalt_irq, | 491 | .shutdown = disable_cobalt_irq, |
492 | .enable = enable_cobalt_irq, | 492 | .enable = enable_cobalt_irq, |
@@ -523,7 +523,7 @@ static void end_piix4_master_irq(unsigned int irq) | |||
523 | } | 523 | } |
524 | 524 | ||
525 | static struct irq_chip piix4_master_irq_type = { | 525 | static struct irq_chip piix4_master_irq_type = { |
526 | .typename = "PIIX4-master", | 526 | .name = "PIIX4-master", |
527 | .startup = startup_piix4_master_irq, | 527 | .startup = startup_piix4_master_irq, |
528 | .ack = ack_cobalt_irq, | 528 | .ack = ack_cobalt_irq, |
529 | .end = end_piix4_master_irq, | 529 | .end = end_piix4_master_irq, |
@@ -531,7 +531,7 @@ static struct irq_chip piix4_master_irq_type = { | |||
531 | 531 | ||
532 | 532 | ||
533 | static struct irq_chip piix4_virtual_irq_type = { | 533 | static struct irq_chip piix4_virtual_irq_type = { |
534 | .typename = "PIIX4-virtual", | 534 | .name = "PIIX4-virtual", |
535 | .shutdown = disable_8259A_irq, | 535 | .shutdown = disable_8259A_irq, |
536 | .enable = enable_8259A_irq, | 536 | .enable = enable_8259A_irq, |
537 | .disable = disable_8259A_irq, | 537 | .disable = disable_8259A_irq, |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 3909e3ba5ce3..a1029769b6f2 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -30,9 +30,8 @@ EXPORT_SYMBOL(__put_user_8); | |||
30 | 30 | ||
31 | EXPORT_SYMBOL(copy_user_generic); | 31 | EXPORT_SYMBOL(copy_user_generic); |
32 | EXPORT_SYMBOL(__copy_user_nocache); | 32 | EXPORT_SYMBOL(__copy_user_nocache); |
33 | EXPORT_SYMBOL(copy_from_user); | 33 | EXPORT_SYMBOL(_copy_from_user); |
34 | EXPORT_SYMBOL(copy_to_user); | 34 | EXPORT_SYMBOL(_copy_to_user); |
35 | EXPORT_SYMBOL(__copy_from_user_inatomic); | ||
36 | 35 | ||
37 | EXPORT_SYMBOL(copy_page); | 36 | EXPORT_SYMBOL(copy_page); |
38 | EXPORT_SYMBOL(clear_page); | 37 | EXPORT_SYMBOL(clear_page); |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 4449a4a2c2ed..d11c5ff7c65e 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -14,10 +14,13 @@ | |||
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/irq.h> | 15 | #include <asm/irq.h> |
16 | #include <asm/tsc.h> | 16 | #include <asm/tsc.h> |
17 | #include <asm/iommu.h> | ||
17 | 18 | ||
18 | void __cpuinit x86_init_noop(void) { } | 19 | void __cpuinit x86_init_noop(void) { } |
19 | void __init x86_init_uint_noop(unsigned int unused) { } | 20 | void __init x86_init_uint_noop(unsigned int unused) { } |
20 | void __init x86_init_pgd_noop(pgd_t *unused) { } | 21 | void __init x86_init_pgd_noop(pgd_t *unused) { } |
22 | int __init iommu_init_noop(void) { return 0; } | ||
23 | void iommu_shutdown_noop(void) { } | ||
21 | 24 | ||
22 | /* | 25 | /* |
23 | * The platform setup functions are preset with the default functions | 26 | * The platform setup functions are preset with the default functions |
@@ -62,6 +65,10 @@ struct x86_init_ops x86_init __initdata = { | |||
62 | .tsc_pre_init = x86_init_noop, | 65 | .tsc_pre_init = x86_init_noop, |
63 | .timer_init = hpet_time_init, | 66 | .timer_init = hpet_time_init, |
64 | }, | 67 | }, |
68 | |||
69 | .iommu = { | ||
70 | .iommu_init = iommu_init_noop, | ||
71 | }, | ||
65 | }; | 72 | }; |
66 | 73 | ||
67 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | 74 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { |
@@ -72,4 +79,5 @@ struct x86_platform_ops x86_platform = { | |||
72 | .calibrate_tsc = native_calibrate_tsc, | 79 | .calibrate_tsc = native_calibrate_tsc, |
73 | .get_wallclock = mach_get_cmos_time, | 80 | .get_wallclock = mach_get_cmos_time, |
74 | .set_wallclock = mach_set_rtc_mmss, | 81 | .set_wallclock = mach_set_rtc_mmss, |
82 | .iommu_shutdown = iommu_shutdown_noop, | ||
75 | }; | 83 | }; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ae07d261527c..4fc80174191c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define CREATE_TRACE_POINTS | 42 | #define CREATE_TRACE_POINTS |
43 | #include "trace.h" | 43 | #include "trace.h" |
44 | 44 | ||
45 | #include <asm/debugreg.h> | ||
45 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
46 | #include <asm/msr.h> | 47 | #include <asm/msr.h> |
47 | #include <asm/desc.h> | 48 | #include <asm/desc.h> |
@@ -3643,14 +3644,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3643 | trace_kvm_entry(vcpu->vcpu_id); | 3644 | trace_kvm_entry(vcpu->vcpu_id); |
3644 | kvm_x86_ops->run(vcpu, kvm_run); | 3645 | kvm_x86_ops->run(vcpu, kvm_run); |
3645 | 3646 | ||
3646 | if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) { | 3647 | /* |
3647 | set_debugreg(current->thread.debugreg0, 0); | 3648 | * If the guest has used debug registers, at least dr7 |
3648 | set_debugreg(current->thread.debugreg1, 1); | 3649 | * will be disabled while returning to the host. |
3649 | set_debugreg(current->thread.debugreg2, 2); | 3650 | * If we don't have active breakpoints in the host, we don't |
3650 | set_debugreg(current->thread.debugreg3, 3); | 3651 | * care about the messed up debug address registers. But if |
3651 | set_debugreg(current->thread.debugreg6, 6); | 3652 | * we have some of them active, restore the old state. |
3652 | set_debugreg(current->thread.debugreg7, 7); | 3653 | */ |
3653 | } | 3654 | if (hw_breakpoint_active()) |
3655 | hw_breakpoint_restore(); | ||
3654 | 3656 | ||
3655 | set_bit(KVM_REQ_KICK, &vcpu->requests); | 3657 | set_bit(KVM_REQ_KICK, &vcpu->requests); |
3656 | local_irq_enable(); | 3658 | local_irq_enable(); |
diff --git a/arch/x86/lib/.gitignore b/arch/x86/lib/.gitignore new file mode 100644 index 000000000000..8df89f0a3fe6 --- /dev/null +++ b/arch/x86/lib/.gitignore | |||
@@ -0,0 +1 @@ | |||
inat-tables.c | |||
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 85f5db95c60f..a2d6472895fb 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -2,12 +2,25 @@ | |||
2 | # Makefile for x86 specific library files. | 2 | # Makefile for x86 specific library files. |
3 | # | 3 | # |
4 | 4 | ||
5 | inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk | ||
6 | inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt | ||
7 | quiet_cmd_inat_tables = GEN $@ | ||
8 | cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ | ||
9 | |||
10 | $(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) | ||
11 | $(call cmd,inat_tables) | ||
12 | |||
13 | $(obj)/inat.o: $(obj)/inat-tables.c | ||
14 | |||
15 | clean-files := inat-tables.c | ||
16 | |||
5 | obj-$(CONFIG_SMP) := msr.o | 17 | obj-$(CONFIG_SMP) := msr.o |
6 | 18 | ||
7 | lib-y := delay.o | 19 | lib-y := delay.o |
8 | lib-y += thunk_$(BITS).o | 20 | lib-y += thunk_$(BITS).o |
9 | lib-y += usercopy_$(BITS).o getuser.o putuser.o | 21 | lib-y += usercopy_$(BITS).o getuser.o putuser.o |
10 | lib-y += memcpy_$(BITS).o | 22 | lib-y += memcpy_$(BITS).o |
23 | lib-y += insn.o inat.o | ||
11 | 24 | ||
12 | obj-y += msr-reg.o msr-reg-export.o | 25 | obj-y += msr-reg.o msr-reg-export.o |
13 | 26 | ||
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 6ba0f7bb85ea..cf889d4e076a 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -65,7 +65,7 @@ | |||
65 | .endm | 65 | .endm |
66 | 66 | ||
67 | /* Standard copy_to_user with segment limit checking */ | 67 | /* Standard copy_to_user with segment limit checking */ |
68 | ENTRY(copy_to_user) | 68 | ENTRY(_copy_to_user) |
69 | CFI_STARTPROC | 69 | CFI_STARTPROC |
70 | GET_THREAD_INFO(%rax) | 70 | GET_THREAD_INFO(%rax) |
71 | movq %rdi,%rcx | 71 | movq %rdi,%rcx |
@@ -75,10 +75,10 @@ ENTRY(copy_to_user) | |||
75 | jae bad_to_user | 75 | jae bad_to_user |
76 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | 76 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string |
77 | CFI_ENDPROC | 77 | CFI_ENDPROC |
78 | ENDPROC(copy_to_user) | 78 | ENDPROC(_copy_to_user) |
79 | 79 | ||
80 | /* Standard copy_from_user with segment limit checking */ | 80 | /* Standard copy_from_user with segment limit checking */ |
81 | ENTRY(copy_from_user) | 81 | ENTRY(_copy_from_user) |
82 | CFI_STARTPROC | 82 | CFI_STARTPROC |
83 | GET_THREAD_INFO(%rax) | 83 | GET_THREAD_INFO(%rax) |
84 | movq %rsi,%rcx | 84 | movq %rsi,%rcx |
@@ -88,7 +88,7 @@ ENTRY(copy_from_user) | |||
88 | jae bad_from_user | 88 | jae bad_from_user |
89 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | 89 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string |
90 | CFI_ENDPROC | 90 | CFI_ENDPROC |
91 | ENDPROC(copy_from_user) | 91 | ENDPROC(_copy_from_user) |
92 | 92 | ||
93 | ENTRY(copy_user_generic) | 93 | ENTRY(copy_user_generic) |
94 | CFI_STARTPROC | 94 | CFI_STARTPROC |
@@ -96,12 +96,6 @@ ENTRY(copy_user_generic) | |||
96 | CFI_ENDPROC | 96 | CFI_ENDPROC |
97 | ENDPROC(copy_user_generic) | 97 | ENDPROC(copy_user_generic) |
98 | 98 | ||
99 | ENTRY(__copy_from_user_inatomic) | ||
100 | CFI_STARTPROC | ||
101 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | ||
102 | CFI_ENDPROC | ||
103 | ENDPROC(__copy_from_user_inatomic) | ||
104 | |||
105 | .section .fixup,"ax" | 99 | .section .fixup,"ax" |
106 | /* must zero dest */ | 100 | /* must zero dest */ |
107 | ENTRY(bad_from_user) | 101 | ENTRY(bad_from_user) |
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c new file mode 100644 index 000000000000..46fc4ee09fc4 --- /dev/null +++ b/arch/x86/lib/inat.c | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * x86 instruction attribute tables | ||
3 | * | ||
4 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | */ | ||
21 | #include <asm/insn.h> | ||
22 | |||
23 | /* Attribute tables are generated from opcode map */ | ||
24 | #include "inat-tables.c" | ||
25 | |||
26 | /* Attribute search APIs */ | ||
27 | insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) | ||
28 | { | ||
29 | return inat_primary_table[opcode]; | ||
30 | } | ||
31 | |||
32 | insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, | ||
33 | insn_attr_t esc_attr) | ||
34 | { | ||
35 | const insn_attr_t *table; | ||
36 | insn_attr_t lpfx_attr; | ||
37 | int n, m = 0; | ||
38 | |||
39 | n = inat_escape_id(esc_attr); | ||
40 | if (last_pfx) { | ||
41 | lpfx_attr = inat_get_opcode_attribute(last_pfx); | ||
42 | m = inat_last_prefix_id(lpfx_attr); | ||
43 | } | ||
44 | table = inat_escape_tables[n][0]; | ||
45 | if (!table) | ||
46 | return 0; | ||
47 | if (inat_has_variant(table[opcode]) && m) { | ||
48 | table = inat_escape_tables[n][m]; | ||
49 | if (!table) | ||
50 | return 0; | ||
51 | } | ||
52 | return table[opcode]; | ||
53 | } | ||
54 | |||
55 | insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, | ||
56 | insn_attr_t grp_attr) | ||
57 | { | ||
58 | const insn_attr_t *table; | ||
59 | insn_attr_t lpfx_attr; | ||
60 | int n, m = 0; | ||
61 | |||
62 | n = inat_group_id(grp_attr); | ||
63 | if (last_pfx) { | ||
64 | lpfx_attr = inat_get_opcode_attribute(last_pfx); | ||
65 | m = inat_last_prefix_id(lpfx_attr); | ||
66 | } | ||
67 | table = inat_group_tables[n][0]; | ||
68 | if (!table) | ||
69 | return inat_group_common_attribute(grp_attr); | ||
70 | if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { | ||
71 | table = inat_group_tables[n][m]; | ||
72 | if (!table) | ||
73 | return inat_group_common_attribute(grp_attr); | ||
74 | } | ||
75 | return table[X86_MODRM_REG(modrm)] | | ||
76 | inat_group_common_attribute(grp_attr); | ||
77 | } | ||
78 | |||
79 | insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, | ||
80 | insn_byte_t vex_p) | ||
81 | { | ||
82 | const insn_attr_t *table; | ||
83 | if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) | ||
84 | return 0; | ||
85 | table = inat_avx_tables[vex_m][vex_p]; | ||
86 | if (!table) | ||
87 | return 0; | ||
88 | return table[opcode]; | ||
89 | } | ||
90 | |||
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c new file mode 100644 index 000000000000..9f33b984d0ef --- /dev/null +++ b/arch/x86/lib/insn.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * x86 instruction analysis | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2002, 2004, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/string.h> | ||
22 | #include <asm/inat.h> | ||
23 | #include <asm/insn.h> | ||
24 | |||
25 | #define get_next(t, insn) \ | ||
26 | ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) | ||
27 | |||
28 | #define peek_next(t, insn) \ | ||
29 | ({t r; r = *(t*)insn->next_byte; r; }) | ||
30 | |||
31 | #define peek_nbyte_next(t, insn, n) \ | ||
32 | ({t r; r = *(t*)((insn)->next_byte + n); r; }) | ||
33 | |||
34 | /** | ||
35 | * insn_init() - initialize struct insn | ||
36 | * @insn: &struct insn to be initialized | ||
37 | * @kaddr: address (in kernel memory) of instruction (or copy thereof) | ||
38 | * @x86_64: !0 for 64-bit kernel or 64-bit app | ||
39 | */ | ||
40 | void insn_init(struct insn *insn, const void *kaddr, int x86_64) | ||
41 | { | ||
42 | memset(insn, 0, sizeof(*insn)); | ||
43 | insn->kaddr = kaddr; | ||
44 | insn->next_byte = kaddr; | ||
45 | insn->x86_64 = x86_64 ? 1 : 0; | ||
46 | insn->opnd_bytes = 4; | ||
47 | if (x86_64) | ||
48 | insn->addr_bytes = 8; | ||
49 | else | ||
50 | insn->addr_bytes = 4; | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * insn_get_prefixes - scan x86 instruction prefix bytes | ||
55 | * @insn: &struct insn containing instruction | ||
56 | * | ||
57 | * Populates the @insn->prefixes bitmap, and updates @insn->next_byte | ||
58 | * to point to the (first) opcode. No effect if @insn->prefixes.got | ||
59 | * is already set. | ||
60 | */ | ||
61 | void insn_get_prefixes(struct insn *insn) | ||
62 | { | ||
63 | struct insn_field *prefixes = &insn->prefixes; | ||
64 | insn_attr_t attr; | ||
65 | insn_byte_t b, lb; | ||
66 | int i, nb; | ||
67 | |||
68 | if (prefixes->got) | ||
69 | return; | ||
70 | |||
71 | nb = 0; | ||
72 | lb = 0; | ||
73 | b = peek_next(insn_byte_t, insn); | ||
74 | attr = inat_get_opcode_attribute(b); | ||
75 | while (inat_is_legacy_prefix(attr)) { | ||
76 | /* Skip if same prefix */ | ||
77 | for (i = 0; i < nb; i++) | ||
78 | if (prefixes->bytes[i] == b) | ||
79 | goto found; | ||
80 | if (nb == 4) | ||
81 | /* Invalid instruction */ | ||
82 | break; | ||
83 | prefixes->bytes[nb++] = b; | ||
84 | if (inat_is_address_size_prefix(attr)) { | ||
85 | /* address size switches 2/4 or 4/8 */ | ||
86 | if (insn->x86_64) | ||
87 | insn->addr_bytes ^= 12; | ||
88 | else | ||
89 | insn->addr_bytes ^= 6; | ||
90 | } else if (inat_is_operand_size_prefix(attr)) { | ||
91 | /* oprand size switches 2/4 */ | ||
92 | insn->opnd_bytes ^= 6; | ||
93 | } | ||
94 | found: | ||
95 | prefixes->nbytes++; | ||
96 | insn->next_byte++; | ||
97 | lb = b; | ||
98 | b = peek_next(insn_byte_t, insn); | ||
99 | attr = inat_get_opcode_attribute(b); | ||
100 | } | ||
101 | /* Set the last prefix */ | ||
102 | if (lb && lb != insn->prefixes.bytes[3]) { | ||
103 | if (unlikely(insn->prefixes.bytes[3])) { | ||
104 | /* Swap the last prefix */ | ||
105 | b = insn->prefixes.bytes[3]; | ||
106 | for (i = 0; i < nb; i++) | ||
107 | if (prefixes->bytes[i] == lb) | ||
108 | prefixes->bytes[i] = b; | ||
109 | } | ||
110 | insn->prefixes.bytes[3] = lb; | ||
111 | } | ||
112 | |||
113 | /* Decode REX prefix */ | ||
114 | if (insn->x86_64) { | ||
115 | b = peek_next(insn_byte_t, insn); | ||
116 | attr = inat_get_opcode_attribute(b); | ||
117 | if (inat_is_rex_prefix(attr)) { | ||
118 | insn->rex_prefix.value = b; | ||
119 | insn->rex_prefix.nbytes = 1; | ||
120 | insn->next_byte++; | ||
121 | if (X86_REX_W(b)) | ||
122 | /* REX.W overrides opnd_size */ | ||
123 | insn->opnd_bytes = 8; | ||
124 | } | ||
125 | } | ||
126 | insn->rex_prefix.got = 1; | ||
127 | |||
128 | /* Decode VEX prefix */ | ||
129 | b = peek_next(insn_byte_t, insn); | ||
130 | attr = inat_get_opcode_attribute(b); | ||
131 | if (inat_is_vex_prefix(attr)) { | ||
132 | insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); | ||
133 | if (!insn->x86_64) { | ||
134 | /* | ||
135 | * In 32-bits mode, if the [7:6] bits (mod bits of | ||
136 | * ModRM) on the second byte are not 11b, it is | ||
137 | * LDS or LES. | ||
138 | */ | ||
139 | if (X86_MODRM_MOD(b2) != 3) | ||
140 | goto vex_end; | ||
141 | } | ||
142 | insn->vex_prefix.bytes[0] = b; | ||
143 | insn->vex_prefix.bytes[1] = b2; | ||
144 | if (inat_is_vex3_prefix(attr)) { | ||
145 | b2 = peek_nbyte_next(insn_byte_t, insn, 2); | ||
146 | insn->vex_prefix.bytes[2] = b2; | ||
147 | insn->vex_prefix.nbytes = 3; | ||
148 | insn->next_byte += 3; | ||
149 | if (insn->x86_64 && X86_VEX_W(b2)) | ||
150 | /* VEX.W overrides opnd_size */ | ||
151 | insn->opnd_bytes = 8; | ||
152 | } else { | ||
153 | insn->vex_prefix.nbytes = 2; | ||
154 | insn->next_byte += 2; | ||
155 | } | ||
156 | } | ||
157 | vex_end: | ||
158 | insn->vex_prefix.got = 1; | ||
159 | |||
160 | prefixes->got = 1; | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * insn_get_opcode - collect opcode(s) | ||
166 | * @insn: &struct insn containing instruction | ||
167 | * | ||
168 | * Populates @insn->opcode, updates @insn->next_byte to point past the | ||
169 | * opcode byte(s), and set @insn->attr (except for groups). | ||
170 | * If necessary, first collects any preceding (prefix) bytes. | ||
171 | * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got | ||
172 | * is already 1. | ||
173 | */ | ||
174 | void insn_get_opcode(struct insn *insn) | ||
175 | { | ||
176 | struct insn_field *opcode = &insn->opcode; | ||
177 | insn_byte_t op, pfx; | ||
178 | if (opcode->got) | ||
179 | return; | ||
180 | if (!insn->prefixes.got) | ||
181 | insn_get_prefixes(insn); | ||
182 | |||
183 | /* Get first opcode */ | ||
184 | op = get_next(insn_byte_t, insn); | ||
185 | opcode->bytes[0] = op; | ||
186 | opcode->nbytes = 1; | ||
187 | |||
188 | /* Check if there is VEX prefix or not */ | ||
189 | if (insn_is_avx(insn)) { | ||
190 | insn_byte_t m, p; | ||
191 | m = insn_vex_m_bits(insn); | ||
192 | p = insn_vex_p_bits(insn); | ||
193 | insn->attr = inat_get_avx_attribute(op, m, p); | ||
194 | if (!inat_accept_vex(insn->attr)) | ||
195 | insn->attr = 0; /* This instruction is bad */ | ||
196 | goto end; /* VEX has only 1 byte for opcode */ | ||
197 | } | ||
198 | |||
199 | insn->attr = inat_get_opcode_attribute(op); | ||
200 | while (inat_is_escape(insn->attr)) { | ||
201 | /* Get escaped opcode */ | ||
202 | op = get_next(insn_byte_t, insn); | ||
203 | opcode->bytes[opcode->nbytes++] = op; | ||
204 | pfx = insn_last_prefix(insn); | ||
205 | insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); | ||
206 | } | ||
207 | if (inat_must_vex(insn->attr)) | ||
208 | insn->attr = 0; /* This instruction is bad */ | ||
209 | end: | ||
210 | opcode->got = 1; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * insn_get_modrm - collect ModRM byte, if any | ||
215 | * @insn: &struct insn containing instruction | ||
216 | * | ||
217 | * Populates @insn->modrm and updates @insn->next_byte to point past the | ||
218 | * ModRM byte, if any. If necessary, first collects the preceding bytes | ||
219 | * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. | ||
220 | */ | ||
221 | void insn_get_modrm(struct insn *insn) | ||
222 | { | ||
223 | struct insn_field *modrm = &insn->modrm; | ||
224 | insn_byte_t pfx, mod; | ||
225 | if (modrm->got) | ||
226 | return; | ||
227 | if (!insn->opcode.got) | ||
228 | insn_get_opcode(insn); | ||
229 | |||
230 | if (inat_has_modrm(insn->attr)) { | ||
231 | mod = get_next(insn_byte_t, insn); | ||
232 | modrm->value = mod; | ||
233 | modrm->nbytes = 1; | ||
234 | if (inat_is_group(insn->attr)) { | ||
235 | pfx = insn_last_prefix(insn); | ||
236 | insn->attr = inat_get_group_attribute(mod, pfx, | ||
237 | insn->attr); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | if (insn->x86_64 && inat_is_force64(insn->attr)) | ||
242 | insn->opnd_bytes = 8; | ||
243 | modrm->got = 1; | ||
244 | } | ||
245 | |||
246 | |||
247 | /** | ||
248 | * insn_rip_relative() - Does instruction use RIP-relative addressing mode? | ||
249 | * @insn: &struct insn containing instruction | ||
250 | * | ||
251 | * If necessary, first collects the instruction up to and including the | ||
252 | * ModRM byte. No effect if @insn->x86_64 is 0. | ||
253 | */ | ||
254 | int insn_rip_relative(struct insn *insn) | ||
255 | { | ||
256 | struct insn_field *modrm = &insn->modrm; | ||
257 | |||
258 | if (!insn->x86_64) | ||
259 | return 0; | ||
260 | if (!modrm->got) | ||
261 | insn_get_modrm(insn); | ||
262 | /* | ||
263 | * For rip-relative instructions, the mod field (top 2 bits) | ||
264 | * is zero and the r/m field (bottom 3 bits) is 0x5. | ||
265 | */ | ||
266 | return (modrm->nbytes && (modrm->value & 0xc7) == 0x5); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * insn_get_sib() - Get the SIB byte of instruction | ||
271 | * @insn: &struct insn containing instruction | ||
272 | * | ||
273 | * If necessary, first collects the instruction up to and including the | ||
274 | * ModRM byte. | ||
275 | */ | ||
276 | void insn_get_sib(struct insn *insn) | ||
277 | { | ||
278 | insn_byte_t modrm; | ||
279 | |||
280 | if (insn->sib.got) | ||
281 | return; | ||
282 | if (!insn->modrm.got) | ||
283 | insn_get_modrm(insn); | ||
284 | if (insn->modrm.nbytes) { | ||
285 | modrm = (insn_byte_t)insn->modrm.value; | ||
286 | if (insn->addr_bytes != 2 && | ||
287 | X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { | ||
288 | insn->sib.value = get_next(insn_byte_t, insn); | ||
289 | insn->sib.nbytes = 1; | ||
290 | } | ||
291 | } | ||
292 | insn->sib.got = 1; | ||
293 | } | ||
294 | |||
295 | |||
296 | /** | ||
297 | * insn_get_displacement() - Get the displacement of instruction | ||
298 | * @insn: &struct insn containing instruction | ||
299 | * | ||
300 | * If necessary, first collects the instruction up to and including the | ||
301 | * SIB byte. | ||
302 | * Displacement value is sign-expanded. | ||
303 | */ | ||
304 | void insn_get_displacement(struct insn *insn) | ||
305 | { | ||
306 | insn_byte_t mod, rm, base; | ||
307 | |||
308 | if (insn->displacement.got) | ||
309 | return; | ||
310 | if (!insn->sib.got) | ||
311 | insn_get_sib(insn); | ||
312 | if (insn->modrm.nbytes) { | ||
313 | /* | ||
314 | * Interpreting the modrm byte: | ||
315 | * mod = 00 - no displacement fields (exceptions below) | ||
316 | * mod = 01 - 1-byte displacement field | ||
317 | * mod = 10 - displacement field is 4 bytes, or 2 bytes if | ||
318 | * address size = 2 (0x67 prefix in 32-bit mode) | ||
319 | * mod = 11 - no memory operand | ||
320 | * | ||
321 | * If address size = 2... | ||
322 | * mod = 00, r/m = 110 - displacement field is 2 bytes | ||
323 | * | ||
324 | * If address size != 2... | ||
325 | * mod != 11, r/m = 100 - SIB byte exists | ||
326 | * mod = 00, SIB base = 101 - displacement field is 4 bytes | ||
327 | * mod = 00, r/m = 101 - rip-relative addressing, displacement | ||
328 | * field is 4 bytes | ||
329 | */ | ||
330 | mod = X86_MODRM_MOD(insn->modrm.value); | ||
331 | rm = X86_MODRM_RM(insn->modrm.value); | ||
332 | base = X86_SIB_BASE(insn->sib.value); | ||
333 | if (mod == 3) | ||
334 | goto out; | ||
335 | if (mod == 1) { | ||
336 | insn->displacement.value = get_next(char, insn); | ||
337 | insn->displacement.nbytes = 1; | ||
338 | } else if (insn->addr_bytes == 2) { | ||
339 | if ((mod == 0 && rm == 6) || mod == 2) { | ||
340 | insn->displacement.value = | ||
341 | get_next(short, insn); | ||
342 | insn->displacement.nbytes = 2; | ||
343 | } | ||
344 | } else { | ||
345 | if ((mod == 0 && rm == 5) || mod == 2 || | ||
346 | (mod == 0 && base == 5)) { | ||
347 | insn->displacement.value = get_next(int, insn); | ||
348 | insn->displacement.nbytes = 4; | ||
349 | } | ||
350 | } | ||
351 | } | ||
352 | out: | ||
353 | insn->displacement.got = 1; | ||
354 | } | ||
355 | |||
356 | /* Decode moffset16/32/64 */ | ||
357 | static void __get_moffset(struct insn *insn) | ||
358 | { | ||
359 | switch (insn->addr_bytes) { | ||
360 | case 2: | ||
361 | insn->moffset1.value = get_next(short, insn); | ||
362 | insn->moffset1.nbytes = 2; | ||
363 | break; | ||
364 | case 4: | ||
365 | insn->moffset1.value = get_next(int, insn); | ||
366 | insn->moffset1.nbytes = 4; | ||
367 | break; | ||
368 | case 8: | ||
369 | insn->moffset1.value = get_next(int, insn); | ||
370 | insn->moffset1.nbytes = 4; | ||
371 | insn->moffset2.value = get_next(int, insn); | ||
372 | insn->moffset2.nbytes = 4; | ||
373 | break; | ||
374 | } | ||
375 | insn->moffset1.got = insn->moffset2.got = 1; | ||
376 | } | ||
377 | |||
378 | /* Decode imm v32(Iz) */ | ||
379 | static void __get_immv32(struct insn *insn) | ||
380 | { | ||
381 | switch (insn->opnd_bytes) { | ||
382 | case 2: | ||
383 | insn->immediate.value = get_next(short, insn); | ||
384 | insn->immediate.nbytes = 2; | ||
385 | break; | ||
386 | case 4: | ||
387 | case 8: | ||
388 | insn->immediate.value = get_next(int, insn); | ||
389 | insn->immediate.nbytes = 4; | ||
390 | break; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | /* Decode imm v64(Iv/Ov) */ | ||
395 | static void __get_immv(struct insn *insn) | ||
396 | { | ||
397 | switch (insn->opnd_bytes) { | ||
398 | case 2: | ||
399 | insn->immediate1.value = get_next(short, insn); | ||
400 | insn->immediate1.nbytes = 2; | ||
401 | break; | ||
402 | case 4: | ||
403 | insn->immediate1.value = get_next(int, insn); | ||
404 | insn->immediate1.nbytes = 4; | ||
405 | break; | ||
406 | case 8: | ||
407 | insn->immediate1.value = get_next(int, insn); | ||
408 | insn->immediate1.nbytes = 4; | ||
409 | insn->immediate2.value = get_next(int, insn); | ||
410 | insn->immediate2.nbytes = 4; | ||
411 | break; | ||
412 | } | ||
413 | insn->immediate1.got = insn->immediate2.got = 1; | ||
414 | } | ||
415 | |||
416 | /* Decode ptr16:16/32(Ap) */ | ||
417 | static void __get_immptr(struct insn *insn) | ||
418 | { | ||
419 | switch (insn->opnd_bytes) { | ||
420 | case 2: | ||
421 | insn->immediate1.value = get_next(short, insn); | ||
422 | insn->immediate1.nbytes = 2; | ||
423 | break; | ||
424 | case 4: | ||
425 | insn->immediate1.value = get_next(int, insn); | ||
426 | insn->immediate1.nbytes = 4; | ||
427 | break; | ||
428 | case 8: | ||
429 | /* ptr16:64 is not exist (no segment) */ | ||
430 | return; | ||
431 | } | ||
432 | insn->immediate2.value = get_next(unsigned short, insn); | ||
433 | insn->immediate2.nbytes = 2; | ||
434 | insn->immediate1.got = insn->immediate2.got = 1; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * insn_get_immediate() - Get the immediates of instruction | ||
439 | * @insn: &struct insn containing instruction | ||
440 | * | ||
441 | * If necessary, first collects the instruction up to and including the | ||
442 | * displacement bytes. | ||
443 | * Basically, most of immediates are sign-expanded. Unsigned-value can be | ||
444 | * get by bit masking with ((1 << (nbytes * 8)) - 1) | ||
445 | */ | ||
446 | void insn_get_immediate(struct insn *insn) | ||
447 | { | ||
448 | if (insn->immediate.got) | ||
449 | return; | ||
450 | if (!insn->displacement.got) | ||
451 | insn_get_displacement(insn); | ||
452 | |||
453 | if (inat_has_moffset(insn->attr)) { | ||
454 | __get_moffset(insn); | ||
455 | goto done; | ||
456 | } | ||
457 | |||
458 | if (!inat_has_immediate(insn->attr)) | ||
459 | /* no immediates */ | ||
460 | goto done; | ||
461 | |||
462 | switch (inat_immediate_size(insn->attr)) { | ||
463 | case INAT_IMM_BYTE: | ||
464 | insn->immediate.value = get_next(char, insn); | ||
465 | insn->immediate.nbytes = 1; | ||
466 | break; | ||
467 | case INAT_IMM_WORD: | ||
468 | insn->immediate.value = get_next(short, insn); | ||
469 | insn->immediate.nbytes = 2; | ||
470 | break; | ||
471 | case INAT_IMM_DWORD: | ||
472 | insn->immediate.value = get_next(int, insn); | ||
473 | insn->immediate.nbytes = 4; | ||
474 | break; | ||
475 | case INAT_IMM_QWORD: | ||
476 | insn->immediate1.value = get_next(int, insn); | ||
477 | insn->immediate1.nbytes = 4; | ||
478 | insn->immediate2.value = get_next(int, insn); | ||
479 | insn->immediate2.nbytes = 4; | ||
480 | break; | ||
481 | case INAT_IMM_PTR: | ||
482 | __get_immptr(insn); | ||
483 | break; | ||
484 | case INAT_IMM_VWORD32: | ||
485 | __get_immv32(insn); | ||
486 | break; | ||
487 | case INAT_IMM_VWORD: | ||
488 | __get_immv(insn); | ||
489 | break; | ||
490 | default: | ||
491 | break; | ||
492 | } | ||
493 | if (inat_has_second_immediate(insn->attr)) { | ||
494 | insn->immediate2.value = get_next(char, insn); | ||
495 | insn->immediate2.nbytes = 1; | ||
496 | } | ||
497 | done: | ||
498 | insn->immediate.got = 1; | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * insn_get_length() - Get the length of instruction | ||
503 | * @insn: &struct insn containing instruction | ||
504 | * | ||
505 | * If necessary, first collects the instruction up to and including the | ||
506 | * immediates bytes. | ||
507 | */ | ||
508 | void insn_get_length(struct insn *insn) | ||
509 | { | ||
510 | if (insn->length) | ||
511 | return; | ||
512 | if (!insn->immediate.got) | ||
513 | insn_get_immediate(insn); | ||
514 | insn->length = (unsigned char)((unsigned long)insn->next_byte | ||
515 | - (unsigned long)insn->kaddr); | ||
516 | } | ||
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 33a1e3ca22d8..41628b104b9e 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c | |||
@@ -71,14 +71,9 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
71 | } | 71 | } |
72 | EXPORT_SYMBOL(wrmsr_on_cpu); | 72 | EXPORT_SYMBOL(wrmsr_on_cpu); |
73 | 73 | ||
74 | /* rdmsr on a bunch of CPUs | 74 | static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, |
75 | * | 75 | struct msr *msrs, |
76 | * @mask: which CPUs | 76 | void (*msr_func) (void *info)) |
77 | * @msr_no: which MSR | ||
78 | * @msrs: array of MSR values | ||
79 | * | ||
80 | */ | ||
81 | void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | ||
82 | { | 77 | { |
83 | struct msr_info rv; | 78 | struct msr_info rv; |
84 | int this_cpu; | 79 | int this_cpu; |
@@ -92,11 +87,23 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | |||
92 | this_cpu = get_cpu(); | 87 | this_cpu = get_cpu(); |
93 | 88 | ||
94 | if (cpumask_test_cpu(this_cpu, mask)) | 89 | if (cpumask_test_cpu(this_cpu, mask)) |
95 | __rdmsr_on_cpu(&rv); | 90 | msr_func(&rv); |
96 | 91 | ||
97 | smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); | 92 | smp_call_function_many(mask, msr_func, &rv, 1); |
98 | put_cpu(); | 93 | put_cpu(); |
99 | } | 94 | } |
95 | |||
96 | /* rdmsr on a bunch of CPUs | ||
97 | * | ||
98 | * @mask: which CPUs | ||
99 | * @msr_no: which MSR | ||
100 | * @msrs: array of MSR values | ||
101 | * | ||
102 | */ | ||
103 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) | ||
104 | { | ||
105 | __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); | ||
106 | } | ||
100 | EXPORT_SYMBOL(rdmsr_on_cpus); | 107 | EXPORT_SYMBOL(rdmsr_on_cpus); |
101 | 108 | ||
102 | /* | 109 | /* |
@@ -107,24 +114,9 @@ EXPORT_SYMBOL(rdmsr_on_cpus); | |||
107 | * @msrs: array of MSR values | 114 | * @msrs: array of MSR values |
108 | * | 115 | * |
109 | */ | 116 | */ |
110 | void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | 117 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) |
111 | { | 118 | { |
112 | struct msr_info rv; | 119 | __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); |
113 | int this_cpu; | ||
114 | |||
115 | memset(&rv, 0, sizeof(rv)); | ||
116 | |||
117 | rv.off = cpumask_first(mask); | ||
118 | rv.msrs = msrs; | ||
119 | rv.msr_no = msr_no; | ||
120 | |||
121 | this_cpu = get_cpu(); | ||
122 | |||
123 | if (cpumask_test_cpu(this_cpu, mask)) | ||
124 | __wrmsr_on_cpu(&rv); | ||
125 | |||
126 | smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); | ||
127 | put_cpu(); | ||
128 | } | 120 | } |
129 | EXPORT_SYMBOL(wrmsr_on_cpus); | 121 | EXPORT_SYMBOL(wrmsr_on_cpus); |
130 | 122 | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1f118d462acc..e218d5df85ff 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -874,7 +874,7 @@ EXPORT_SYMBOL(copy_to_user); | |||
874 | * data to the requested size using zero bytes. | 874 | * data to the requested size using zero bytes. |
875 | */ | 875 | */ |
876 | unsigned long | 876 | unsigned long |
877 | copy_from_user(void *to, const void __user *from, unsigned long n) | 877 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
878 | { | 878 | { |
879 | if (access_ok(VERIFY_READ, from, n)) | 879 | if (access_ok(VERIFY_READ, from, n)) |
880 | n = __copy_from_user(to, from, n); | 880 | n = __copy_from_user(to, from, n); |
@@ -882,4 +882,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) | |||
882 | memset(to, 0, n); | 882 | memset(to, 0, n); |
883 | return n; | 883 | return n; |
884 | } | 884 | } |
885 | EXPORT_SYMBOL(copy_from_user); | 885 | EXPORT_SYMBOL(_copy_from_user); |
886 | |||
887 | void copy_from_user_overflow(void) | ||
888 | { | ||
889 | WARN(1, "Buffer overflow detected!\n"); | ||
890 | } | ||
891 | EXPORT_SYMBOL(copy_from_user_overflow); | ||
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt new file mode 100644 index 000000000000..a793da5e560e --- /dev/null +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -0,0 +1,893 @@ | |||
1 | # x86 Opcode Maps | ||
2 | # | ||
3 | #<Opcode maps> | ||
4 | # Table: table-name | ||
5 | # Referrer: escaped-name | ||
6 | # AVXcode: avx-code | ||
7 | # opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] | ||
8 | # (or) | ||
9 | # opcode: escape # escaped-name | ||
10 | # EndTable | ||
11 | # | ||
12 | #<group maps> | ||
13 | # GrpTable: GrpXXX | ||
14 | # reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] | ||
15 | # EndTable | ||
16 | # | ||
17 | # AVX Superscripts | ||
18 | # (VEX): this opcode can accept VEX prefix. | ||
19 | # (oVEX): this opcode requires VEX prefix. | ||
20 | # (o128): this opcode only supports 128bit VEX. | ||
21 | # (o256): this opcode only supports 256bit VEX. | ||
22 | # | ||
23 | |||
24 | Table: one byte opcode | ||
25 | Referrer: | ||
26 | AVXcode: | ||
27 | # 0x00 - 0x0f | ||
28 | 00: ADD Eb,Gb | ||
29 | 01: ADD Ev,Gv | ||
30 | 02: ADD Gb,Eb | ||
31 | 03: ADD Gv,Ev | ||
32 | 04: ADD AL,Ib | ||
33 | 05: ADD rAX,Iz | ||
34 | 06: PUSH ES (i64) | ||
35 | 07: POP ES (i64) | ||
36 | 08: OR Eb,Gb | ||
37 | 09: OR Ev,Gv | ||
38 | 0a: OR Gb,Eb | ||
39 | 0b: OR Gv,Ev | ||
40 | 0c: OR AL,Ib | ||
41 | 0d: OR rAX,Iz | ||
42 | 0e: PUSH CS (i64) | ||
43 | 0f: escape # 2-byte escape | ||
44 | # 0x10 - 0x1f | ||
45 | 10: ADC Eb,Gb | ||
46 | 11: ADC Ev,Gv | ||
47 | 12: ADC Gb,Eb | ||
48 | 13: ADC Gv,Ev | ||
49 | 14: ADC AL,Ib | ||
50 | 15: ADC rAX,Iz | ||
51 | 16: PUSH SS (i64) | ||
52 | 17: POP SS (i64) | ||
53 | 18: SBB Eb,Gb | ||
54 | 19: SBB Ev,Gv | ||
55 | 1a: SBB Gb,Eb | ||
56 | 1b: SBB Gv,Ev | ||
57 | 1c: SBB AL,Ib | ||
58 | 1d: SBB rAX,Iz | ||
59 | 1e: PUSH DS (i64) | ||
60 | 1f: POP DS (i64) | ||
61 | # 0x20 - 0x2f | ||
62 | 20: AND Eb,Gb | ||
63 | 21: AND Ev,Gv | ||
64 | 22: AND Gb,Eb | ||
65 | 23: AND Gv,Ev | ||
66 | 24: AND AL,Ib | ||
67 | 25: AND rAx,Iz | ||
68 | 26: SEG=ES (Prefix) | ||
69 | 27: DAA (i64) | ||
70 | 28: SUB Eb,Gb | ||
71 | 29: SUB Ev,Gv | ||
72 | 2a: SUB Gb,Eb | ||
73 | 2b: SUB Gv,Ev | ||
74 | 2c: SUB AL,Ib | ||
75 | 2d: SUB rAX,Iz | ||
76 | 2e: SEG=CS (Prefix) | ||
77 | 2f: DAS (i64) | ||
78 | # 0x30 - 0x3f | ||
79 | 30: XOR Eb,Gb | ||
80 | 31: XOR Ev,Gv | ||
81 | 32: XOR Gb,Eb | ||
82 | 33: XOR Gv,Ev | ||
83 | 34: XOR AL,Ib | ||
84 | 35: XOR rAX,Iz | ||
85 | 36: SEG=SS (Prefix) | ||
86 | 37: AAA (i64) | ||
87 | 38: CMP Eb,Gb | ||
88 | 39: CMP Ev,Gv | ||
89 | 3a: CMP Gb,Eb | ||
90 | 3b: CMP Gv,Ev | ||
91 | 3c: CMP AL,Ib | ||
92 | 3d: CMP rAX,Iz | ||
93 | 3e: SEG=DS (Prefix) | ||
94 | 3f: AAS (i64) | ||
95 | # 0x40 - 0x4f | ||
96 | 40: INC eAX (i64) | REX (o64) | ||
97 | 41: INC eCX (i64) | REX.B (o64) | ||
98 | 42: INC eDX (i64) | REX.X (o64) | ||
99 | 43: INC eBX (i64) | REX.XB (o64) | ||
100 | 44: INC eSP (i64) | REX.R (o64) | ||
101 | 45: INC eBP (i64) | REX.RB (o64) | ||
102 | 46: INC eSI (i64) | REX.RX (o64) | ||
103 | 47: INC eDI (i64) | REX.RXB (o64) | ||
104 | 48: DEC eAX (i64) | REX.W (o64) | ||
105 | 49: DEC eCX (i64) | REX.WB (o64) | ||
106 | 4a: DEC eDX (i64) | REX.WX (o64) | ||
107 | 4b: DEC eBX (i64) | REX.WXB (o64) | ||
108 | 4c: DEC eSP (i64) | REX.WR (o64) | ||
109 | 4d: DEC eBP (i64) | REX.WRB (o64) | ||
110 | 4e: DEC eSI (i64) | REX.WRX (o64) | ||
111 | 4f: DEC eDI (i64) | REX.WRXB (o64) | ||
112 | # 0x50 - 0x5f | ||
113 | 50: PUSH rAX/r8 (d64) | ||
114 | 51: PUSH rCX/r9 (d64) | ||
115 | 52: PUSH rDX/r10 (d64) | ||
116 | 53: PUSH rBX/r11 (d64) | ||
117 | 54: PUSH rSP/r12 (d64) | ||
118 | 55: PUSH rBP/r13 (d64) | ||
119 | 56: PUSH rSI/r14 (d64) | ||
120 | 57: PUSH rDI/r15 (d64) | ||
121 | 58: POP rAX/r8 (d64) | ||
122 | 59: POP rCX/r9 (d64) | ||
123 | 5a: POP rDX/r10 (d64) | ||
124 | 5b: POP rBX/r11 (d64) | ||
125 | 5c: POP rSP/r12 (d64) | ||
126 | 5d: POP rBP/r13 (d64) | ||
127 | 5e: POP rSI/r14 (d64) | ||
128 | 5f: POP rDI/r15 (d64) | ||
129 | # 0x60 - 0x6f | ||
130 | 60: PUSHA/PUSHAD (i64) | ||
131 | 61: POPA/POPAD (i64) | ||
132 | 62: BOUND Gv,Ma (i64) | ||
133 | 63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) | ||
134 | 64: SEG=FS (Prefix) | ||
135 | 65: SEG=GS (Prefix) | ||
136 | 66: Operand-Size (Prefix) | ||
137 | 67: Address-Size (Prefix) | ||
138 | 68: PUSH Iz (d64) | ||
139 | 69: IMUL Gv,Ev,Iz | ||
140 | 6a: PUSH Ib (d64) | ||
141 | 6b: IMUL Gv,Ev,Ib | ||
142 | 6c: INS/INSB Yb,DX | ||
143 | 6d: INS/INSW/INSD Yz,DX | ||
144 | 6e: OUTS/OUTSB DX,Xb | ||
145 | 6f: OUTS/OUTSW/OUTSD DX,Xz | ||
146 | # 0x70 - 0x7f | ||
147 | 70: JO Jb | ||
148 | 71: JNO Jb | ||
149 | 72: JB/JNAE/JC Jb | ||
150 | 73: JNB/JAE/JNC Jb | ||
151 | 74: JZ/JE Jb | ||
152 | 75: JNZ/JNE Jb | ||
153 | 76: JBE/JNA Jb | ||
154 | 77: JNBE/JA Jb | ||
155 | 78: JS Jb | ||
156 | 79: JNS Jb | ||
157 | 7a: JP/JPE Jb | ||
158 | 7b: JNP/JPO Jb | ||
159 | 7c: JL/JNGE Jb | ||
160 | 7d: JNL/JGE Jb | ||
161 | 7e: JLE/JNG Jb | ||
162 | 7f: JNLE/JG Jb | ||
163 | # 0x80 - 0x8f | ||
164 | 80: Grp1 Eb,Ib (1A) | ||
165 | 81: Grp1 Ev,Iz (1A) | ||
166 | 82: Grp1 Eb,Ib (1A),(i64) | ||
167 | 83: Grp1 Ev,Ib (1A) | ||
168 | 84: TEST Eb,Gb | ||
169 | 85: TEST Ev,Gv | ||
170 | 86: XCHG Eb,Gb | ||
171 | 87: XCHG Ev,Gv | ||
172 | 88: MOV Eb,Gb | ||
173 | 89: MOV Ev,Gv | ||
174 | 8a: MOV Gb,Eb | ||
175 | 8b: MOV Gv,Ev | ||
176 | 8c: MOV Ev,Sw | ||
177 | 8d: LEA Gv,M | ||
178 | 8e: MOV Sw,Ew | ||
179 | 8f: Grp1A (1A) | POP Ev (d64) | ||
180 | # 0x90 - 0x9f | ||
181 | 90: NOP | PAUSE (F3) | XCHG r8,rAX | ||
182 | 91: XCHG rCX/r9,rAX | ||
183 | 92: XCHG rDX/r10,rAX | ||
184 | 93: XCHG rBX/r11,rAX | ||
185 | 94: XCHG rSP/r12,rAX | ||
186 | 95: XCHG rBP/r13,rAX | ||
187 | 96: XCHG rSI/r14,rAX | ||
188 | 97: XCHG rDI/r15,rAX | ||
189 | 98: CBW/CWDE/CDQE | ||
190 | 99: CWD/CDQ/CQO | ||
191 | 9a: CALLF Ap (i64) | ||
192 | 9b: FWAIT/WAIT | ||
193 | 9c: PUSHF/D/Q Fv (d64) | ||
194 | 9d: POPF/D/Q Fv (d64) | ||
195 | 9e: SAHF | ||
196 | 9f: LAHF | ||
197 | # 0xa0 - 0xaf | ||
198 | a0: MOV AL,Ob | ||
199 | a1: MOV rAX,Ov | ||
200 | a2: MOV Ob,AL | ||
201 | a3: MOV Ov,rAX | ||
202 | a4: MOVS/B Xb,Yb | ||
203 | a5: MOVS/W/D/Q Xv,Yv | ||
204 | a6: CMPS/B Xb,Yb | ||
205 | a7: CMPS/W/D Xv,Yv | ||
206 | a8: TEST AL,Ib | ||
207 | a9: TEST rAX,Iz | ||
208 | aa: STOS/B Yb,AL | ||
209 | ab: STOS/W/D/Q Yv,rAX | ||
210 | ac: LODS/B AL,Xb | ||
211 | ad: LODS/W/D/Q rAX,Xv | ||
212 | ae: SCAS/B AL,Yb | ||
213 | af: SCAS/W/D/Q rAX,Xv | ||
214 | # 0xb0 - 0xbf | ||
215 | b0: MOV AL/R8L,Ib | ||
216 | b1: MOV CL/R9L,Ib | ||
217 | b2: MOV DL/R10L,Ib | ||
218 | b3: MOV BL/R11L,Ib | ||
219 | b4: MOV AH/R12L,Ib | ||
220 | b5: MOV CH/R13L,Ib | ||
221 | b6: MOV DH/R14L,Ib | ||
222 | b7: MOV BH/R15L,Ib | ||
223 | b8: MOV rAX/r8,Iv | ||
224 | b9: MOV rCX/r9,Iv | ||
225 | ba: MOV rDX/r10,Iv | ||
226 | bb: MOV rBX/r11,Iv | ||
227 | bc: MOV rSP/r12,Iv | ||
228 | bd: MOV rBP/r13,Iv | ||
229 | be: MOV rSI/r14,Iv | ||
230 | bf: MOV rDI/r15,Iv | ||
231 | # 0xc0 - 0xcf | ||
232 | c0: Grp2 Eb,Ib (1A) | ||
233 | c1: Grp2 Ev,Ib (1A) | ||
234 | c2: RETN Iw (f64) | ||
235 | c3: RETN | ||
236 | c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix) | ||
237 | c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix) | ||
238 | c6: Grp11 Eb,Ib (1A) | ||
239 | c7: Grp11 Ev,Iz (1A) | ||
240 | c8: ENTER Iw,Ib | ||
241 | c9: LEAVE (d64) | ||
242 | ca: RETF Iw | ||
243 | cb: RETF | ||
244 | cc: INT3 | ||
245 | cd: INT Ib | ||
246 | ce: INTO (i64) | ||
247 | cf: IRET/D/Q | ||
248 | # 0xd0 - 0xdf | ||
249 | d0: Grp2 Eb,1 (1A) | ||
250 | d1: Grp2 Ev,1 (1A) | ||
251 | d2: Grp2 Eb,CL (1A) | ||
252 | d3: Grp2 Ev,CL (1A) | ||
253 | d4: AAM Ib (i64) | ||
254 | d5: AAD Ib (i64) | ||
255 | d6: | ||
256 | d7: XLAT/XLATB | ||
257 | d8: ESC | ||
258 | d9: ESC | ||
259 | da: ESC | ||
260 | db: ESC | ||
261 | dc: ESC | ||
262 | dd: ESC | ||
263 | de: ESC | ||
264 | df: ESC | ||
265 | # 0xe0 - 0xef | ||
266 | e0: LOOPNE/LOOPNZ Jb (f64) | ||
267 | e1: LOOPE/LOOPZ Jb (f64) | ||
268 | e2: LOOP Jb (f64) | ||
269 | e3: JrCXZ Jb (f64) | ||
270 | e4: IN AL,Ib | ||
271 | e5: IN eAX,Ib | ||
272 | e6: OUT Ib,AL | ||
273 | e7: OUT Ib,eAX | ||
274 | e8: CALL Jz (f64) | ||
275 | e9: JMP-near Jz (f64) | ||
276 | ea: JMP-far Ap (i64) | ||
277 | eb: JMP-short Jb (f64) | ||
278 | ec: IN AL,DX | ||
279 | ed: IN eAX,DX | ||
280 | ee: OUT DX,AL | ||
281 | ef: OUT DX,eAX | ||
282 | # 0xf0 - 0xff | ||
283 | f0: LOCK (Prefix) | ||
284 | f1: | ||
285 | f2: REPNE (Prefix) | ||
286 | f3: REP/REPE (Prefix) | ||
287 | f4: HLT | ||
288 | f5: CMC | ||
289 | f6: Grp3_1 Eb (1A) | ||
290 | f7: Grp3_2 Ev (1A) | ||
291 | f8: CLC | ||
292 | f9: STC | ||
293 | fa: CLI | ||
294 | fb: STI | ||
295 | fc: CLD | ||
296 | fd: STD | ||
297 | fe: Grp4 (1A) | ||
298 | ff: Grp5 (1A) | ||
299 | EndTable | ||
300 | |||
301 | Table: 2-byte opcode (0x0f) | ||
302 | Referrer: 2-byte escape | ||
303 | AVXcode: 1 | ||
304 | # 0x0f 0x00-0x0f | ||
305 | 00: Grp6 (1A) | ||
306 | 01: Grp7 (1A) | ||
307 | 02: LAR Gv,Ew | ||
308 | 03: LSL Gv,Ew | ||
309 | 04: | ||
310 | 05: SYSCALL (o64) | ||
311 | 06: CLTS | ||
312 | 07: SYSRET (o64) | ||
313 | 08: INVD | ||
314 | 09: WBINVD | ||
315 | 0a: | ||
316 | 0b: UD2 (1B) | ||
317 | 0c: | ||
318 | 0d: NOP Ev | GrpP | ||
319 | 0e: FEMMS | ||
320 | # 3DNow! uses the last imm byte as opcode extension. | ||
321 | 0f: 3DNow! Pq,Qq,Ib | ||
322 | # 0x0f 0x10-0x1f | ||
323 | 10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128) | ||
324 | 11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128) | ||
325 | 12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX) | ||
326 | 13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128) | ||
327 | 14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX) | ||
328 | 15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX) | ||
329 | 16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX) | ||
330 | 17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128) | ||
331 | 18: Grp16 (1A) | ||
332 | 19: | ||
333 | 1a: | ||
334 | 1b: | ||
335 | 1c: | ||
336 | 1d: | ||
337 | 1e: | ||
338 | 1f: NOP Ev | ||
339 | # 0x0f 0x20-0x2f | ||
340 | 20: MOV Rd,Cd | ||
341 | 21: MOV Rd,Dd | ||
342 | 22: MOV Cd,Rd | ||
343 | 23: MOV Dd,Rd | ||
344 | 24: | ||
345 | 25: | ||
346 | 26: | ||
347 | 27: | ||
348 | 28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX) | ||
349 | 29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX) | ||
350 | 2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128) | ||
351 | 2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX) | ||
352 | 2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128) | ||
353 | 2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128) | ||
354 | 2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128) | ||
355 | 2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128) | ||
356 | # 0x0f 0x30-0x3f | ||
357 | 30: WRMSR | ||
358 | 31: RDTSC | ||
359 | 32: RDMSR | ||
360 | 33: RDPMC | ||
361 | 34: SYSENTER | ||
362 | 35: SYSEXIT | ||
363 | 36: | ||
364 | 37: GETSEC | ||
365 | 38: escape # 3-byte escape 1 | ||
366 | 39: | ||
367 | 3a: escape # 3-byte escape 2 | ||
368 | 3b: | ||
369 | 3c: | ||
370 | 3d: | ||
371 | 3e: | ||
372 | 3f: | ||
373 | # 0x0f 0x40-0x4f | ||
374 | 40: CMOVO Gv,Ev | ||
375 | 41: CMOVNO Gv,Ev | ||
376 | 42: CMOVB/C/NAE Gv,Ev | ||
377 | 43: CMOVAE/NB/NC Gv,Ev | ||
378 | 44: CMOVE/Z Gv,Ev | ||
379 | 45: CMOVNE/NZ Gv,Ev | ||
380 | 46: CMOVBE/NA Gv,Ev | ||
381 | 47: CMOVA/NBE Gv,Ev | ||
382 | 48: CMOVS Gv,Ev | ||
383 | 49: CMOVNS Gv,Ev | ||
384 | 4a: CMOVP/PE Gv,Ev | ||
385 | 4b: CMOVNP/PO Gv,Ev | ||
386 | 4c: CMOVL/NGE Gv,Ev | ||
387 | 4d: CMOVNL/GE Gv,Ev | ||
388 | 4e: CMOVLE/NG Gv,Ev | ||
389 | 4f: CMOVNLE/G Gv,Ev | ||
390 | # 0x0f 0x50-0x5f | ||
391 | 50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX) | ||
392 | 51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128) | ||
393 | 52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128) | ||
394 | 53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128) | ||
395 | 54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX) | ||
396 | 55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX) | ||
397 | 56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX) | ||
398 | 57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX) | ||
399 | 58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128) | ||
400 | 59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128) | ||
401 | 5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128) | ||
402 | 5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX) | ||
403 | 5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128) | ||
404 | 5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128) | ||
405 | 5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128) | ||
406 | 5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128) | ||
407 | # 0x0f 0x60-0x6f | ||
408 | 60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128) | ||
409 | 61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128) | ||
410 | 62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128) | ||
411 | 63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128) | ||
412 | 64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128) | ||
413 | 65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128) | ||
414 | 66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128) | ||
415 | 67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128) | ||
416 | 68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128) | ||
417 | 69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128) | ||
418 | 6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128) | ||
419 | 6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128) | ||
420 | 6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128) | ||
421 | 6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128) | ||
422 | 6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128) | ||
423 | 6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX) | ||
424 | # 0x0f 0x70-0x7f | ||
425 | 70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128) | ||
426 | 71: Grp12 (1A) | ||
427 | 72: Grp13 (1A) | ||
428 | 73: Grp14 (1A) | ||
429 | 74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128) | ||
430 | 75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128) | ||
431 | 76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128) | ||
432 | 77: emms/vzeroupper/vzeroall (VEX) | ||
433 | 78: VMREAD Ed/q,Gd/q | ||
434 | 79: VMWRITE Gd/q,Ed/q | ||
435 | 7a: | ||
436 | 7b: | ||
437 | 7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX) | ||
438 | 7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX) | ||
439 | 7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128) | ||
440 | 7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX) | ||
441 | # 0x0f 0x80-0x8f | ||
442 | 80: JO Jz (f64) | ||
443 | 81: JNO Jz (f64) | ||
444 | 82: JB/JNAE/JC Jz (f64) | ||
445 | 83: JNB/JAE/JNC Jz (f64) | ||
446 | 84: JZ/JE Jz (f64) | ||
447 | 85: JNZ/JNE Jz (f64) | ||
448 | 86: JBE/JNA Jz (f64) | ||
449 | 87: JNBE/JA Jz (f64) | ||
450 | 88: JS Jz (f64) | ||
451 | 89: JNS Jz (f64) | ||
452 | 8a: JP/JPE Jz (f64) | ||
453 | 8b: JNP/JPO Jz (f64) | ||
454 | 8c: JL/JNGE Jz (f64) | ||
455 | 8d: JNL/JGE Jz (f64) | ||
456 | 8e: JLE/JNG Jz (f64) | ||
457 | 8f: JNLE/JG Jz (f64) | ||
458 | # 0x0f 0x90-0x9f | ||
459 | 90: SETO Eb | ||
460 | 91: SETNO Eb | ||
461 | 92: SETB/C/NAE Eb | ||
462 | 93: SETAE/NB/NC Eb | ||
463 | 94: SETE/Z Eb | ||
464 | 95: SETNE/NZ Eb | ||
465 | 96: SETBE/NA Eb | ||
466 | 97: SETA/NBE Eb | ||
467 | 98: SETS Eb | ||
468 | 99: SETNS Eb | ||
469 | 9a: SETP/PE Eb | ||
470 | 9b: SETNP/PO Eb | ||
471 | 9c: SETL/NGE Eb | ||
472 | 9d: SETNL/GE Eb | ||
473 | 9e: SETLE/NG Eb | ||
474 | 9f: SETNLE/G Eb | ||
475 | # 0x0f 0xa0-0xaf | ||
476 | a0: PUSH FS (d64) | ||
477 | a1: POP FS (d64) | ||
478 | a2: CPUID | ||
479 | a3: BT Ev,Gv | ||
480 | a4: SHLD Ev,Gv,Ib | ||
481 | a5: SHLD Ev,Gv,CL | ||
482 | a6: GrpPDLK | ||
483 | a7: GrpRNG | ||
484 | a8: PUSH GS (d64) | ||
485 | a9: POP GS (d64) | ||
486 | aa: RSM | ||
487 | ab: BTS Ev,Gv | ||
488 | ac: SHRD Ev,Gv,Ib | ||
489 | ad: SHRD Ev,Gv,CL | ||
490 | ae: Grp15 (1A),(1C) | ||
491 | af: IMUL Gv,Ev | ||
492 | # 0x0f 0xb0-0xbf | ||
493 | b0: CMPXCHG Eb,Gb | ||
494 | b1: CMPXCHG Ev,Gv | ||
495 | b2: LSS Gv,Mp | ||
496 | b3: BTR Ev,Gv | ||
497 | b4: LFS Gv,Mp | ||
498 | b5: LGS Gv,Mp | ||
499 | b6: MOVZX Gv,Eb | ||
500 | b7: MOVZX Gv,Ew | ||
501 | b8: JMPE | POPCNT Gv,Ev (F3) | ||
502 | b9: Grp10 (1A) | ||
503 | ba: Grp8 Ev,Ib (1A) | ||
504 | bb: BTC Ev,Gv | ||
505 | bc: BSF Gv,Ev | ||
506 | bd: BSR Gv,Ev | ||
507 | be: MOVSX Gv,Eb | ||
508 | bf: MOVSX Gv,Ew | ||
509 | # 0x0f 0xc0-0xcf | ||
510 | c0: XADD Eb,Gb | ||
511 | c1: XADD Ev,Gv | ||
512 | c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX) | ||
513 | c3: movnti Md/q,Gd/q | ||
514 | c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128) | ||
515 | c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128) | ||
516 | c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX) | ||
517 | c7: Grp9 (1A) | ||
518 | c8: BSWAP RAX/EAX/R8/R8D | ||
519 | c9: BSWAP RCX/ECX/R9/R9D | ||
520 | ca: BSWAP RDX/EDX/R10/R10D | ||
521 | cb: BSWAP RBX/EBX/R11/R11D | ||
522 | cc: BSWAP RSP/ESP/R12/R12D | ||
523 | cd: BSWAP RBP/EBP/R13/R13D | ||
524 | ce: BSWAP RSI/ESI/R14/R14D | ||
525 | cf: BSWAP RDI/EDI/R15/R15D | ||
526 | # 0x0f 0xd0-0xdf | ||
527 | d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX) | ||
528 | d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128) | ||
529 | d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128) | ||
530 | d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128) | ||
531 | d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128) | ||
532 | d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128) | ||
533 | d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) | ||
534 | d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128) | ||
535 | d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128) | ||
536 | d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128) | ||
537 | da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128) | ||
538 | db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128) | ||
539 | dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128) | ||
540 | dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128) | ||
541 | de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128) | ||
542 | df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128) | ||
543 | # 0x0f 0xe0-0xef | ||
544 | e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128) | ||
545 | e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128) | ||
546 | e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128) | ||
547 | e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128) | ||
548 | e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128) | ||
549 | e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128) | ||
550 | e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX) | ||
551 | e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX) | ||
552 | e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128) | ||
553 | e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128) | ||
554 | ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128) | ||
555 | eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128) | ||
556 | ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128) | ||
557 | ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128) | ||
558 | ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128) | ||
559 | ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128) | ||
560 | # 0x0f 0xf0-0xff | ||
561 | f0: lddqu Vdq,Mdq (F2),(VEX) | ||
562 | f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128) | ||
563 | f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128) | ||
564 | f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128) | ||
565 | f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128) | ||
566 | f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128) | ||
567 | f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128) | ||
568 | f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128) | ||
569 | f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128) | ||
570 | f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128) | ||
571 | fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128) | ||
572 | fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128) | ||
573 | fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128) | ||
574 | fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128) | ||
575 | fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128) | ||
576 | ff: | ||
577 | EndTable | ||
578 | |||
579 | Table: 3-byte opcode 1 (0x0f 0x38) | ||
580 | Referrer: 3-byte escape 1 | ||
581 | AVXcode: 2 | ||
582 | # 0x0f 0x38 0x00-0x0f | ||
583 | 00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128) | ||
584 | 01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128) | ||
585 | 02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128) | ||
586 | 03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128) | ||
587 | 04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128) | ||
588 | 05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128) | ||
589 | 06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128) | ||
590 | 07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128) | ||
591 | 08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128) | ||
592 | 09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128) | ||
593 | 0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128) | ||
594 | 0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128) | ||
595 | 0c: Vpermilps /r (66),(oVEX) | ||
596 | 0d: Vpermilpd /r (66),(oVEX) | ||
597 | 0e: vtestps /r (66),(oVEX) | ||
598 | 0f: vtestpd /r (66),(oVEX) | ||
599 | # 0x0f 0x38 0x10-0x1f | ||
600 | 10: pblendvb Vdq,Wdq (66) | ||
601 | 11: | ||
602 | 12: | ||
603 | 13: | ||
604 | 14: blendvps Vdq,Wdq (66) | ||
605 | 15: blendvpd Vdq,Wdq (66) | ||
606 | 16: | ||
607 | 17: ptest Vdq,Wdq (66),(VEX) | ||
608 | 18: vbroadcastss /r (66),(oVEX) | ||
609 | 19: vbroadcastsd /r (66),(oVEX),(o256) | ||
610 | 1a: vbroadcastf128 /r (66),(oVEX),(o256) | ||
611 | 1b: | ||
612 | 1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128) | ||
613 | 1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128) | ||
614 | 1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128) | ||
615 | 1f: | ||
616 | # 0x0f 0x38 0x20-0x2f | ||
617 | 20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128) | ||
618 | 21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128) | ||
619 | 22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128) | ||
620 | 23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128) | ||
621 | 24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128) | ||
622 | 25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128) | ||
623 | 26: | ||
624 | 27: | ||
625 | 28: pmuldq Vdq,Wdq (66),(VEX),(o128) | ||
626 | 29: pcmpeqq Vdq,Wdq (66),(VEX),(o128) | ||
627 | 2a: movntdqa Vdq,Mdq (66),(VEX),(o128) | ||
628 | 2b: packusdw Vdq,Wdq (66),(VEX),(o128) | ||
629 | 2c: vmaskmovps(ld) /r (66),(oVEX) | ||
630 | 2d: vmaskmovpd(ld) /r (66),(oVEX) | ||
631 | 2e: vmaskmovps(st) /r (66),(oVEX) | ||
632 | 2f: vmaskmovpd(st) /r (66),(oVEX) | ||
633 | # 0x0f 0x38 0x30-0x3f | ||
634 | 30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128) | ||
635 | 31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128) | ||
636 | 32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128) | ||
637 | 33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128) | ||
638 | 34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128) | ||
639 | 35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128) | ||
640 | 36: | ||
641 | 37: pcmpgtq Vdq,Wdq (66),(VEX),(o128) | ||
642 | 38: pminsb Vdq,Wdq (66),(VEX),(o128) | ||
643 | 39: pminsd Vdq,Wdq (66),(VEX),(o128) | ||
644 | 3a: pminuw Vdq,Wdq (66),(VEX),(o128) | ||
645 | 3b: pminud Vdq,Wdq (66),(VEX),(o128) | ||
646 | 3c: pmaxsb Vdq,Wdq (66),(VEX),(o128) | ||
647 | 3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) | ||
648 | 3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) | ||
649 | 3f: pmaxud Vdq,Wdq (66),(VEX),(o128) | ||
650 | # 0x0f 0x38 0x40-0x8f | ||
651 | 40: pmulld Vdq,Wdq (66),(VEX),(o128) | ||
652 | 41: phminposuw Vdq,Wdq (66),(VEX),(o128) | ||
653 | 80: INVEPT Gd/q,Mdq (66) | ||
654 | 81: INVPID Gd/q,Mdq (66) | ||
655 | # 0x0f 0x38 0x90-0xbf (FMA) | ||
656 | 96: vfmaddsub132pd/ps /r (66),(VEX) | ||
657 | 97: vfmsubadd132pd/ps /r (66),(VEX) | ||
658 | 98: vfmadd132pd/ps /r (66),(VEX) | ||
659 | 99: vfmadd132sd/ss /r (66),(VEX),(o128) | ||
660 | 9a: vfmsub132pd/ps /r (66),(VEX) | ||
661 | 9b: vfmsub132sd/ss /r (66),(VEX),(o128) | ||
662 | 9c: vfnmadd132pd/ps /r (66),(VEX) | ||
663 | 9d: vfnmadd132sd/ss /r (66),(VEX),(o128) | ||
664 | 9e: vfnmsub132pd/ps /r (66),(VEX) | ||
665 | 9f: vfnmsub132sd/ss /r (66),(VEX),(o128) | ||
666 | a6: vfmaddsub213pd/ps /r (66),(VEX) | ||
667 | a7: vfmsubadd213pd/ps /r (66),(VEX) | ||
668 | a8: vfmadd213pd/ps /r (66),(VEX) | ||
669 | a9: vfmadd213sd/ss /r (66),(VEX),(o128) | ||
670 | aa: vfmsub213pd/ps /r (66),(VEX) | ||
671 | ab: vfmsub213sd/ss /r (66),(VEX),(o128) | ||
672 | ac: vfnmadd213pd/ps /r (66),(VEX) | ||
673 | ad: vfnmadd213sd/ss /r (66),(VEX),(o128) | ||
674 | ae: vfnmsub213pd/ps /r (66),(VEX) | ||
675 | af: vfnmsub213sd/ss /r (66),(VEX),(o128) | ||
676 | b6: vfmaddsub231pd/ps /r (66),(VEX) | ||
677 | b7: vfmsubadd231pd/ps /r (66),(VEX) | ||
678 | b8: vfmadd231pd/ps /r (66),(VEX) | ||
679 | b9: vfmadd231sd/ss /r (66),(VEX),(o128) | ||
680 | ba: vfmsub231pd/ps /r (66),(VEX) | ||
681 | bb: vfmsub231sd/ss /r (66),(VEX),(o128) | ||
682 | bc: vfnmadd231pd/ps /r (66),(VEX) | ||
683 | bd: vfnmadd231sd/ss /r (66),(VEX),(o128) | ||
684 | be: vfnmsub231pd/ps /r (66),(VEX) | ||
685 | bf: vfnmsub231sd/ss /r (66),(VEX),(o128) | ||
686 | # 0x0f 0x38 0xc0-0xff | ||
687 | db: aesimc Vdq,Wdq (66),(VEX),(o128) | ||
688 | dc: aesenc Vdq,Wdq (66),(VEX),(o128) | ||
689 | dd: aesenclast Vdq,Wdq (66),(VEX),(o128) | ||
690 | de: aesdec Vdq,Wdq (66),(VEX),(o128) | ||
691 | df: aesdeclast Vdq,Wdq (66),(VEX),(o128) | ||
692 | f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) | ||
693 | f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) | ||
694 | EndTable | ||
695 | |||
696 | Table: 3-byte opcode 2 (0x0f 0x3a) | ||
697 | Referrer: 3-byte escape 2 | ||
698 | AVXcode: 3 | ||
699 | # 0x0f 0x3a 0x00-0xff | ||
700 | 04: vpermilps /r,Ib (66),(oVEX) | ||
701 | 05: vpermilpd /r,Ib (66),(oVEX) | ||
702 | 06: vperm2f128 /r,Ib (66),(oVEX),(o256) | ||
703 | 08: roundps Vdq,Wdq,Ib (66),(VEX) | ||
704 | 09: roundpd Vdq,Wdq,Ib (66),(VEX) | ||
705 | 0a: roundss Vss,Wss,Ib (66),(VEX),(o128) | ||
706 | 0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128) | ||
707 | 0c: blendps Vdq,Wdq,Ib (66),(VEX) | ||
708 | 0d: blendpd Vdq,Wdq,Ib (66),(VEX) | ||
709 | 0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128) | ||
710 | 0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128) | ||
711 | 14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128) | ||
712 | 15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128) | ||
713 | 16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128) | ||
714 | 17: extractps Ed,Vdq,Ib (66),(VEX),(o128) | ||
715 | 18: vinsertf128 /r,Ib (66),(oVEX),(o256) | ||
716 | 19: vextractf128 /r,Ib (66),(oVEX),(o256) | ||
717 | 20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128) | ||
718 | 21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128) | ||
719 | 22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128) | ||
720 | 40: dpps Vdq,Wdq,Ib (66),(VEX) | ||
721 | 41: dppd Vdq,Wdq,Ib (66),(VEX),(o128) | ||
722 | 42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128) | ||
723 | 44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128) | ||
724 | 4a: vblendvps /r,Ib (66),(oVEX) | ||
725 | 4b: vblendvpd /r,Ib (66),(oVEX) | ||
726 | 4c: vpblendvb /r,Ib (66),(oVEX),(o128) | ||
727 | 60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128) | ||
728 | 61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128) | ||
729 | 62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128) | ||
730 | 63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128) | ||
731 | df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128) | ||
732 | EndTable | ||
733 | |||
734 | GrpTable: Grp1 | ||
735 | 0: ADD | ||
736 | 1: OR | ||
737 | 2: ADC | ||
738 | 3: SBB | ||
739 | 4: AND | ||
740 | 5: SUB | ||
741 | 6: XOR | ||
742 | 7: CMP | ||
743 | EndTable | ||
744 | |||
745 | GrpTable: Grp1A | ||
746 | 0: POP | ||
747 | EndTable | ||
748 | |||
749 | GrpTable: Grp2 | ||
750 | 0: ROL | ||
751 | 1: ROR | ||
752 | 2: RCL | ||
753 | 3: RCR | ||
754 | 4: SHL/SAL | ||
755 | 5: SHR | ||
756 | 6: | ||
757 | 7: SAR | ||
758 | EndTable | ||
759 | |||
760 | GrpTable: Grp3_1 | ||
761 | 0: TEST Eb,Ib | ||
762 | 1: | ||
763 | 2: NOT Eb | ||
764 | 3: NEG Eb | ||
765 | 4: MUL AL,Eb | ||
766 | 5: IMUL AL,Eb | ||
767 | 6: DIV AL,Eb | ||
768 | 7: IDIV AL,Eb | ||
769 | EndTable | ||
770 | |||
771 | GrpTable: Grp3_2 | ||
772 | 0: TEST Ev,Iz | ||
773 | 1: | ||
774 | 2: NOT Ev | ||
775 | 3: NEG Ev | ||
776 | 4: MUL rAX,Ev | ||
777 | 5: IMUL rAX,Ev | ||
778 | 6: DIV rAX,Ev | ||
779 | 7: IDIV rAX,Ev | ||
780 | EndTable | ||
781 | |||
782 | GrpTable: Grp4 | ||
783 | 0: INC Eb | ||
784 | 1: DEC Eb | ||
785 | EndTable | ||
786 | |||
787 | GrpTable: Grp5 | ||
788 | 0: INC Ev | ||
789 | 1: DEC Ev | ||
790 | 2: CALLN Ev (f64) | ||
791 | 3: CALLF Ep | ||
792 | 4: JMPN Ev (f64) | ||
793 | 5: JMPF Ep | ||
794 | 6: PUSH Ev (d64) | ||
795 | 7: | ||
796 | EndTable | ||
797 | |||
798 | GrpTable: Grp6 | ||
799 | 0: SLDT Rv/Mw | ||
800 | 1: STR Rv/Mw | ||
801 | 2: LLDT Ew | ||
802 | 3: LTR Ew | ||
803 | 4: VERR Ew | ||
804 | 5: VERW Ew | ||
805 | EndTable | ||
806 | |||
807 | GrpTable: Grp7 | ||
808 | 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | ||
809 | 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) | ||
810 | 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | ||
811 | 3: LIDT Ms | ||
812 | 4: SMSW Mw/Rv | ||
813 | 5: | ||
814 | 6: LMSW Ew | ||
815 | 7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) | ||
816 | EndTable | ||
817 | |||
818 | GrpTable: Grp8 | ||
819 | 4: BT | ||
820 | 5: BTS | ||
821 | 6: BTR | ||
822 | 7: BTC | ||
823 | EndTable | ||
824 | |||
825 | GrpTable: Grp9 | ||
826 | 1: CMPXCHG8B/16B Mq/Mdq | ||
827 | 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | ||
828 | 7: VMPTRST Mq | ||
829 | EndTable | ||
830 | |||
831 | GrpTable: Grp10 | ||
832 | EndTable | ||
833 | |||
834 | GrpTable: Grp11 | ||
835 | 0: MOV | ||
836 | EndTable | ||
837 | |||
838 | GrpTable: Grp12 | ||
839 | 2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128) | ||
840 | 4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128) | ||
841 | 6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128) | ||
842 | EndTable | ||
843 | |||
844 | GrpTable: Grp13 | ||
845 | 2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128) | ||
846 | 4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128) | ||
847 | 6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128) | ||
848 | EndTable | ||
849 | |||
850 | GrpTable: Grp14 | ||
851 | 2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128) | ||
852 | 3: psrldq Udq,Ib (66),(11B),(VEX),(o128) | ||
853 | 6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128) | ||
854 | 7: pslldq Udq,Ib (66),(11B),(VEX),(o128) | ||
855 | EndTable | ||
856 | |||
857 | GrpTable: Grp15 | ||
858 | 0: fxsave | ||
859 | 1: fxstor | ||
860 | 2: ldmxcsr (VEX) | ||
861 | 3: stmxcsr (VEX) | ||
862 | 4: XSAVE | ||
863 | 5: XRSTOR | lfence (11B) | ||
864 | 6: mfence (11B) | ||
865 | 7: clflush | sfence (11B) | ||
866 | EndTable | ||
867 | |||
868 | GrpTable: Grp16 | ||
869 | 0: prefetch NTA | ||
870 | 1: prefetch T0 | ||
871 | 2: prefetch T1 | ||
872 | 3: prefetch T2 | ||
873 | EndTable | ||
874 | |||
875 | # AMD's Prefetch Group | ||
876 | GrpTable: GrpP | ||
877 | 0: PREFETCH | ||
878 | 1: PREFETCHW | ||
879 | EndTable | ||
880 | |||
881 | GrpTable: GrpPDLK | ||
882 | 0: MONTMUL | ||
883 | 1: XSHA1 | ||
884 | 2: XSHA2 | ||
885 | EndTable | ||
886 | |||
887 | GrpTable: GrpRNG | ||
888 | 0: xstore-rng | ||
889 | 1: xcrypt-ecb | ||
890 | 2: xcrypt-cbc | ||
891 | 4: xcrypt-cfb | ||
892 | 5: xcrypt-ofb | ||
893 | EndTable | ||
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 61b41ca3b5a2..d0474ad2a6e5 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs) | |||
35 | 35 | ||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | |||
39 | #ifdef CONFIG_X86_64 | ||
40 | /* | ||
41 | * Need to defined our own search_extable on X86_64 to work around | ||
42 | * a B stepping K8 bug. | ||
43 | */ | ||
44 | const struct exception_table_entry * | ||
45 | search_extable(const struct exception_table_entry *first, | ||
46 | const struct exception_table_entry *last, | ||
47 | unsigned long value) | ||
48 | { | ||
49 | /* B stepping K8 bug */ | ||
50 | if ((value >> 32) == 0) | ||
51 | value |= 0xffffffffUL << 32; | ||
52 | |||
53 | while (first <= last) { | ||
54 | const struct exception_table_entry *mid; | ||
55 | long diff; | ||
56 | |||
57 | mid = (last - first) / 2 + first; | ||
58 | diff = mid->insn - value; | ||
59 | if (diff == 0) | ||
60 | return mid; | ||
61 | else if (diff < 0) | ||
62 | first = mid+1; | ||
63 | else | ||
64 | last = mid-1; | ||
65 | } | ||
66 | return NULL; | ||
67 | } | ||
68 | #endif | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f4cee9028cf0..f62777940dfb 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -38,7 +38,8 @@ enum x86_pf_error_code { | |||
38 | * Returns 0 if mmiotrace is disabled, or if the fault is not | 38 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
39 | * handled by mmiotrace: | 39 | * handled by mmiotrace: |
40 | */ | 40 | */ |
41 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | 41 | static inline int __kprobes |
42 | kmmio_fault(struct pt_regs *regs, unsigned long addr) | ||
42 | { | 43 | { |
43 | if (unlikely(is_kmmio_active())) | 44 | if (unlikely(is_kmmio_active())) |
44 | if (kmmio_handler(regs, addr) == 1) | 45 | if (kmmio_handler(regs, addr) == 1) |
@@ -46,7 +47,7 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | |||
46 | return 0; | 47 | return 0; |
47 | } | 48 | } |
48 | 49 | ||
49 | static inline int notify_page_fault(struct pt_regs *regs) | 50 | static inline int __kprobes notify_page_fault(struct pt_regs *regs) |
50 | { | 51 | { |
51 | int ret = 0; | 52 | int ret = 0; |
52 | 53 | ||
@@ -240,7 +241,7 @@ void vmalloc_sync_all(void) | |||
240 | * | 241 | * |
241 | * Handle a fault on the vmalloc or module mapping area | 242 | * Handle a fault on the vmalloc or module mapping area |
242 | */ | 243 | */ |
243 | static noinline int vmalloc_fault(unsigned long address) | 244 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
244 | { | 245 | { |
245 | unsigned long pgd_paddr; | 246 | unsigned long pgd_paddr; |
246 | pmd_t *pmd_k; | 247 | pmd_t *pmd_k; |
@@ -357,7 +358,7 @@ void vmalloc_sync_all(void) | |||
357 | * | 358 | * |
358 | * This assumes no large pages in there. | 359 | * This assumes no large pages in there. |
359 | */ | 360 | */ |
360 | static noinline int vmalloc_fault(unsigned long address) | 361 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
361 | { | 362 | { |
362 | pgd_t *pgd, *pgd_ref; | 363 | pgd_t *pgd, *pgd_ref; |
363 | pud_t *pud, *pud_ref; | 364 | pud_t *pud, *pud_ref; |
@@ -658,7 +659,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
658 | show_fault_oops(regs, error_code, address); | 659 | show_fault_oops(regs, error_code, address); |
659 | 660 | ||
660 | stackend = end_of_stack(tsk); | 661 | stackend = end_of_stack(tsk); |
661 | if (*stackend != STACK_END_MAGIC) | 662 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) |
662 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | 663 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
663 | 664 | ||
664 | tsk->thread.cr2 = address; | 665 | tsk->thread.cr2 = address; |
@@ -860,7 +861,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) | |||
860 | * There are no security implications to leaving a stale TLB when | 861 | * There are no security implications to leaving a stale TLB when |
861 | * increasing the permissions on a page. | 862 | * increasing the permissions on a page. |
862 | */ | 863 | */ |
863 | static noinline int | 864 | static noinline __kprobes int |
864 | spurious_fault(unsigned long error_code, unsigned long address) | 865 | spurious_fault(unsigned long error_code, unsigned long address) |
865 | { | 866 | { |
866 | pgd_t *pgd; | 867 | pgd_t *pgd; |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 16ccbd77917f..11a4ad4d6253 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -540,8 +540,14 @@ kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) | |||
540 | struct die_args *arg = args; | 540 | struct die_args *arg = args; |
541 | 541 | ||
542 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) | 542 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) |
543 | if (post_kmmio_handler(arg->err, arg->regs) == 1) | 543 | if (post_kmmio_handler(arg->err, arg->regs) == 1) { |
544 | /* | ||
545 | * Reset the BS bit in dr6 (pointed by args->err) to | ||
546 | * denote completion of processing | ||
547 | */ | ||
548 | (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; | ||
544 | return NOTIFY_STOP; | 549 | return NOTIFY_STOP; |
550 | } | ||
545 | 551 | ||
546 | return NOTIFY_DONE; | 552 | return NOTIFY_DONE; |
547 | } | 553 | } |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index dbb5381f7b3b..9d7ce96e5a5c 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -136,7 +136,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) | |||
136 | apicid_to_node[apic_id] = node; | 136 | apicid_to_node[apic_id] = node; |
137 | node_set(node, cpu_nodes_parsed); | 137 | node_set(node, cpu_nodes_parsed); |
138 | acpi_numa = 1; | 138 | acpi_numa = 1; |
139 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 139 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", |
140 | pxm, apic_id, node); | 140 | pxm, apic_id, node); |
141 | } | 141 | } |
142 | 142 | ||
@@ -170,7 +170,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
170 | apicid_to_node[apic_id] = node; | 170 | apicid_to_node[apic_id] = node; |
171 | node_set(node, cpu_nodes_parsed); | 171 | node_set(node, cpu_nodes_parsed); |
172 | acpi_numa = 1; | 172 | acpi_numa = 1; |
173 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 173 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", |
174 | pxm, apic_id, node); | 174 | pxm, apic_id, node); |
175 | } | 175 | } |
176 | 176 | ||
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 427fd1b56df5..8565d944f7cf 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c | |||
@@ -1,12 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> | 2 | * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi> |
3 | */ | 3 | */ |
4 | |||
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
6 | |||
4 | #include <linux/module.h> | 7 | #include <linux/module.h> |
5 | #include <linux/io.h> | 8 | #include <linux/io.h> |
6 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
7 | 10 | ||
8 | #define MODULE_NAME "testmmiotrace" | ||
9 | |||
10 | static unsigned long mmio_address; | 11 | static unsigned long mmio_address; |
11 | module_param(mmio_address, ulong, 0); | 12 | module_param(mmio_address, ulong, 0); |
12 | MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " | 13 | MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " |
@@ -30,7 +31,7 @@ static unsigned v32(unsigned i) | |||
30 | static void do_write_test(void __iomem *p) | 31 | static void do_write_test(void __iomem *p) |
31 | { | 32 | { |
32 | unsigned int i; | 33 | unsigned int i; |
33 | pr_info(MODULE_NAME ": write test.\n"); | 34 | pr_info("write test.\n"); |
34 | mmiotrace_printk("Write test.\n"); | 35 | mmiotrace_printk("Write test.\n"); |
35 | 36 | ||
36 | for (i = 0; i < 256; i++) | 37 | for (i = 0; i < 256; i++) |
@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p) | |||
47 | { | 48 | { |
48 | unsigned int i; | 49 | unsigned int i; |
49 | unsigned errs[3] = { 0 }; | 50 | unsigned errs[3] = { 0 }; |
50 | pr_info(MODULE_NAME ": read test.\n"); | 51 | pr_info("read test.\n"); |
51 | mmiotrace_printk("Read test.\n"); | 52 | mmiotrace_printk("Read test.\n"); |
52 | 53 | ||
53 | for (i = 0; i < 256; i++) | 54 | for (i = 0; i < 256; i++) |
@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p) | |||
68 | 69 | ||
69 | static void do_read_far_test(void __iomem *p) | 70 | static void do_read_far_test(void __iomem *p) |
70 | { | 71 | { |
71 | pr_info(MODULE_NAME ": read far test.\n"); | 72 | pr_info("read far test.\n"); |
72 | mmiotrace_printk("Read far test.\n"); | 73 | mmiotrace_printk("Read far test.\n"); |
73 | 74 | ||
74 | ioread32(p + read_far); | 75 | ioread32(p + read_far); |
@@ -78,7 +79,7 @@ static void do_test(unsigned long size) | |||
78 | { | 79 | { |
79 | void __iomem *p = ioremap_nocache(mmio_address, size); | 80 | void __iomem *p = ioremap_nocache(mmio_address, size); |
80 | if (!p) { | 81 | if (!p) { |
81 | pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); | 82 | pr_err("could not ioremap, aborting.\n"); |
82 | return; | 83 | return; |
83 | } | 84 | } |
84 | mmiotrace_printk("ioremap returned %p.\n", p); | 85 | mmiotrace_printk("ioremap returned %p.\n", p); |
@@ -94,24 +95,22 @@ static int __init init(void) | |||
94 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); | 95 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); |
95 | 96 | ||
96 | if (mmio_address == 0) { | 97 | if (mmio_address == 0) { |
97 | pr_err(MODULE_NAME ": you have to use the module argument " | 98 | pr_err("you have to use the module argument mmio_address.\n"); |
98 | "mmio_address.\n"); | 99 | pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n"); |
99 | pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS" | ||
100 | " YOU REALLY KNOW WHAT YOU ARE DOING!\n"); | ||
101 | return -ENXIO; | 100 | return -ENXIO; |
102 | } | 101 | } |
103 | 102 | ||
104 | pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " | 103 | pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, " |
105 | "address space, and writing 16 kB of rubbish in there.\n", | 104 | "and writing 16 kB of rubbish in there.\n", |
106 | size >> 10, mmio_address); | 105 | size >> 10, mmio_address); |
107 | do_test(size); | 106 | do_test(size); |
108 | pr_info(MODULE_NAME ": All done.\n"); | 107 | pr_info("All done.\n"); |
109 | return 0; | 108 | return 0; |
110 | } | 109 | } |
111 | 110 | ||
112 | static void __exit cleanup(void) | 111 | static void __exit cleanup(void) |
113 | { | 112 | { |
114 | pr_debug(MODULE_NAME ": unloaded.\n"); | 113 | pr_debug("unloaded.\n"); |
115 | } | 114 | } |
116 | 115 | ||
117 | module_init(init); | 116 | module_init(init); |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 8aa85f17667e..0a979f3e5b8a 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/mce.h> | 18 | #include <asm/mce.h> |
19 | #include <asm/xcr.h> | 19 | #include <asm/xcr.h> |
20 | #include <asm/suspend.h> | 20 | #include <asm/suspend.h> |
21 | #include <asm/debugreg.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_X86_32 | 23 | #ifdef CONFIG_X86_32 |
23 | static struct saved_context saved_context; | 24 | static struct saved_context saved_context; |
@@ -142,31 +143,6 @@ static void fix_processor_context(void) | |||
142 | #endif | 143 | #endif |
143 | load_TR_desc(); /* This does ltr */ | 144 | load_TR_desc(); /* This does ltr */ |
144 | load_LDT(¤t->active_mm->context); /* This does lldt */ | 145 | load_LDT(¤t->active_mm->context); /* This does lldt */ |
145 | |||
146 | /* | ||
147 | * Now maybe reload the debug registers | ||
148 | */ | ||
149 | if (current->thread.debugreg7) { | ||
150 | #ifdef CONFIG_X86_32 | ||
151 | set_debugreg(current->thread.debugreg0, 0); | ||
152 | set_debugreg(current->thread.debugreg1, 1); | ||
153 | set_debugreg(current->thread.debugreg2, 2); | ||
154 | set_debugreg(current->thread.debugreg3, 3); | ||
155 | /* no 4 and 5 */ | ||
156 | set_debugreg(current->thread.debugreg6, 6); | ||
157 | set_debugreg(current->thread.debugreg7, 7); | ||
158 | #else | ||
159 | /* CONFIG_X86_64 */ | ||
160 | loaddebug(¤t->thread, 0); | ||
161 | loaddebug(¤t->thread, 1); | ||
162 | loaddebug(¤t->thread, 2); | ||
163 | loaddebug(¤t->thread, 3); | ||
164 | /* no 4 and 5 */ | ||
165 | loaddebug(¤t->thread, 6); | ||
166 | loaddebug(¤t->thread, 7); | ||
167 | #endif | ||
168 | } | ||
169 | |||
170 | } | 146 | } |
171 | 147 | ||
172 | /** | 148 | /** |
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile new file mode 100644 index 000000000000..f82082677337 --- /dev/null +++ b/arch/x86/tools/Makefile | |||
@@ -0,0 +1,31 @@ | |||
1 | PHONY += posttest | ||
2 | |||
3 | ifeq ($(KBUILD_VERBOSE),1) | ||
4 | posttest_verbose = -v | ||
5 | else | ||
6 | posttest_verbose = | ||
7 | endif | ||
8 | |||
9 | ifeq ($(CONFIG_64BIT),y) | ||
10 | posttest_64bit = -y | ||
11 | else | ||
12 | posttest_64bit = -n | ||
13 | endif | ||
14 | |||
15 | distill_awk = $(srctree)/arch/x86/tools/distill.awk | ||
16 | chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk | ||
17 | |||
18 | quiet_cmd_posttest = TEST $@ | ||
19 | cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) | ||
20 | |||
21 | posttest: $(obj)/test_get_len vmlinux | ||
22 | $(call cmd,posttest) | ||
23 | |||
24 | hostprogs-y := test_get_len | ||
25 | |||
26 | # -I needed for generated C source and C source which in the kernel tree. | ||
27 | HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ | ||
28 | |||
29 | # Dependencies are also needed. | ||
30 | $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c | ||
31 | |||
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk new file mode 100644 index 000000000000..0d13cd9fdcff --- /dev/null +++ b/arch/x86/tools/chkobjdump.awk | |||
@@ -0,0 +1,23 @@ | |||
1 | # GNU objdump version checker | ||
2 | # | ||
3 | # Usage: | ||
4 | # objdump -v | awk -f chkobjdump.awk | ||
5 | BEGIN { | ||
6 | # objdump version 2.19 or later is OK for the test. | ||
7 | od_ver = 2; | ||
8 | od_sver = 19; | ||
9 | } | ||
10 | |||
11 | /^GNU/ { | ||
12 | split($4, ver, "."); | ||
13 | if (ver[1] > od_ver || | ||
14 | (ver[1] == od_ver && ver[2] >= od_sver)) { | ||
15 | exit 1; | ||
16 | } else { | ||
17 | printf("Warning: objdump version %s is older than %d.%d\n", | ||
18 | $4, od_ver, od_sver); | ||
19 | print("Warning: Skipping posttest."); | ||
20 | # Logic is inverted, because we just skip test without error. | ||
21 | exit 0; | ||
22 | } | ||
23 | } | ||
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk new file mode 100644 index 000000000000..c13c0ee48ab4 --- /dev/null +++ b/arch/x86/tools/distill.awk | |||
@@ -0,0 +1,47 @@ | |||
1 | #!/bin/awk -f | ||
2 | # Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len | ||
3 | # Distills the disassembly as follows: | ||
4 | # - Removes all lines except the disassembled instructions. | ||
5 | # - For instructions that exceed 1 line (7 bytes), crams all the hex bytes | ||
6 | # into a single line. | ||
7 | # - Remove bad(or prefix only) instructions | ||
8 | |||
9 | BEGIN { | ||
10 | prev_addr = "" | ||
11 | prev_hex = "" | ||
12 | prev_mnemonic = "" | ||
13 | bad_expr = "(\\(bad\\)|^rex|^.byte|^rep(z|nz)$|^lock$|^es$|^cs$|^ss$|^ds$|^fs$|^gs$|^data(16|32)$|^addr(16|32|64))" | ||
14 | fwait_expr = "^9b " | ||
15 | fwait_str="9b\tfwait" | ||
16 | } | ||
17 | |||
18 | /^ *[0-9a-f]+ <[^>]*>:/ { | ||
19 | # Symbol entry | ||
20 | printf("%s%s\n", $2, $1) | ||
21 | } | ||
22 | |||
23 | /^ *[0-9a-f]+:/ { | ||
24 | if (split($0, field, "\t") < 3) { | ||
25 | # This is a continuation of the same insn. | ||
26 | prev_hex = prev_hex field[2] | ||
27 | } else { | ||
28 | # Skip bad instructions | ||
29 | if (match(prev_mnemonic, bad_expr)) | ||
30 | prev_addr = "" | ||
31 | # Split fwait from other f* instructions | ||
32 | if (match(prev_hex, fwait_expr) && prev_mnemonic != "fwait") { | ||
33 | printf "%s\t%s\n", prev_addr, fwait_str | ||
34 | sub(fwait_expr, "", prev_hex) | ||
35 | } | ||
36 | if (prev_addr != "") | ||
37 | printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic | ||
38 | prev_addr = field[1] | ||
39 | prev_hex = field[2] | ||
40 | prev_mnemonic = field[3] | ||
41 | } | ||
42 | } | ||
43 | |||
44 | END { | ||
45 | if (prev_addr != "") | ||
46 | printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic | ||
47 | } | ||
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk new file mode 100644 index 000000000000..e34e92a28eb6 --- /dev/null +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
@@ -0,0 +1,380 @@ | |||
1 | #!/bin/awk -f | ||
2 | # gen-insn-attr-x86.awk: Instruction attribute table generator | ||
3 | # Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
4 | # | ||
5 | # Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c | ||
6 | |||
7 | # Awk implementation sanity check | ||
8 | function check_awk_implement() { | ||
9 | if (!match("abc", "[[:lower:]]+")) | ||
10 | return "Your awk doesn't support charactor-class." | ||
11 | if (sprintf("%x", 0) != "0") | ||
12 | return "Your awk has a printf-format problem." | ||
13 | return "" | ||
14 | } | ||
15 | |||
16 | # Clear working vars | ||
17 | function clear_vars() { | ||
18 | delete table | ||
19 | delete lptable2 | ||
20 | delete lptable1 | ||
21 | delete lptable3 | ||
22 | eid = -1 # escape id | ||
23 | gid = -1 # group id | ||
24 | aid = -1 # AVX id | ||
25 | tname = "" | ||
26 | } | ||
27 | |||
28 | BEGIN { | ||
29 | # Implementation error checking | ||
30 | awkchecked = check_awk_implement() | ||
31 | if (awkchecked != "") { | ||
32 | print "Error: " awkchecked > "/dev/stderr" | ||
33 | print "Please try to use gawk." > "/dev/stderr" | ||
34 | exit 1 | ||
35 | } | ||
36 | |||
37 | # Setup generating tables | ||
38 | print "/* x86 opcode map generated from x86-opcode-map.txt */" | ||
39 | print "/* Do not change this code. */\n" | ||
40 | ggid = 1 | ||
41 | geid = 1 | ||
42 | gaid = 0 | ||
43 | delete etable | ||
44 | delete gtable | ||
45 | delete atable | ||
46 | |||
47 | opnd_expr = "^[[:alpha:]/]" | ||
48 | ext_expr = "^\\(" | ||
49 | sep_expr = "^\\|$" | ||
50 | group_expr = "^Grp[[:alnum:]]+" | ||
51 | |||
52 | imm_expr = "^[IJAO][[:lower:]]" | ||
53 | imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" | ||
54 | imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" | ||
55 | imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" | ||
56 | imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)" | ||
57 | imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)" | ||
58 | imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)" | ||
59 | imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" | ||
60 | imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" | ||
61 | imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" | ||
62 | imm_flag["Ob"] = "INAT_MOFFSET" | ||
63 | imm_flag["Ov"] = "INAT_MOFFSET" | ||
64 | |||
65 | modrm_expr = "^([CDEGMNPQRSUVW/][[:lower:]]+|NTA|T[012])" | ||
66 | force64_expr = "\\([df]64\\)" | ||
67 | rex_expr = "^REX(\\.[XRWB]+)*" | ||
68 | fpu_expr = "^ESC" # TODO | ||
69 | |||
70 | lprefix1_expr = "\\(66\\)" | ||
71 | lprefix2_expr = "\\(F3\\)" | ||
72 | lprefix3_expr = "\\(F2\\)" | ||
73 | max_lprefix = 4 | ||
74 | |||
75 | vexok_expr = "\\(VEX\\)" | ||
76 | vexonly_expr = "\\(oVEX\\)" | ||
77 | |||
78 | prefix_expr = "\\(Prefix\\)" | ||
79 | prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" | ||
80 | prefix_num["REPNE"] = "INAT_PFX_REPNE" | ||
81 | prefix_num["REP/REPE"] = "INAT_PFX_REPE" | ||
82 | prefix_num["LOCK"] = "INAT_PFX_LOCK" | ||
83 | prefix_num["SEG=CS"] = "INAT_PFX_CS" | ||
84 | prefix_num["SEG=DS"] = "INAT_PFX_DS" | ||
85 | prefix_num["SEG=ES"] = "INAT_PFX_ES" | ||
86 | prefix_num["SEG=FS"] = "INAT_PFX_FS" | ||
87 | prefix_num["SEG=GS"] = "INAT_PFX_GS" | ||
88 | prefix_num["SEG=SS"] = "INAT_PFX_SS" | ||
89 | prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" | ||
90 | prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2" | ||
91 | prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3" | ||
92 | |||
93 | clear_vars() | ||
94 | } | ||
95 | |||
96 | function semantic_error(msg) { | ||
97 | print "Semantic error at " NR ": " msg > "/dev/stderr" | ||
98 | exit 1 | ||
99 | } | ||
100 | |||
101 | function debug(msg) { | ||
102 | print "DEBUG: " msg | ||
103 | } | ||
104 | |||
105 | function array_size(arr, i,c) { | ||
106 | c = 0 | ||
107 | for (i in arr) | ||
108 | c++ | ||
109 | return c | ||
110 | } | ||
111 | |||
112 | /^Table:/ { | ||
113 | print "/* " $0 " */" | ||
114 | if (tname != "") | ||
115 | semantic_error("Hit Table: before EndTable:."); | ||
116 | } | ||
117 | |||
118 | /^Referrer:/ { | ||
119 | if (NF != 1) { | ||
120 | # escape opcode table | ||
121 | ref = "" | ||
122 | for (i = 2; i <= NF; i++) | ||
123 | ref = ref $i | ||
124 | eid = escape[ref] | ||
125 | tname = sprintf("inat_escape_table_%d", eid) | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /^AVXcode:/ { | ||
130 | if (NF != 1) { | ||
131 | # AVX/escape opcode table | ||
132 | aid = $2 | ||
133 | if (gaid <= aid) | ||
134 | gaid = aid + 1 | ||
135 | if (tname == "") # AVX only opcode table | ||
136 | tname = sprintf("inat_avx_table_%d", $2) | ||
137 | } | ||
138 | if (aid == -1 && eid == -1) # primary opcode table | ||
139 | tname = "inat_primary_table" | ||
140 | } | ||
141 | |||
142 | /^GrpTable:/ { | ||
143 | print "/* " $0 " */" | ||
144 | if (!($2 in group)) | ||
145 | semantic_error("No group: " $2 ) | ||
146 | gid = group[$2] | ||
147 | tname = "inat_group_table_" gid | ||
148 | } | ||
149 | |||
150 | function print_table(tbl,name,fmt,n) | ||
151 | { | ||
152 | print "const insn_attr_t " name " = {" | ||
153 | for (i = 0; i < n; i++) { | ||
154 | id = sprintf(fmt, i) | ||
155 | if (tbl[id]) | ||
156 | print " [" id "] = " tbl[id] "," | ||
157 | } | ||
158 | print "};" | ||
159 | } | ||
160 | |||
161 | /^EndTable/ { | ||
162 | if (gid != -1) { | ||
163 | # print group tables | ||
164 | if (array_size(table) != 0) { | ||
165 | print_table(table, tname "[INAT_GROUP_TABLE_SIZE]", | ||
166 | "0x%x", 8) | ||
167 | gtable[gid,0] = tname | ||
168 | } | ||
169 | if (array_size(lptable1) != 0) { | ||
170 | print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]", | ||
171 | "0x%x", 8) | ||
172 | gtable[gid,1] = tname "_1" | ||
173 | } | ||
174 | if (array_size(lptable2) != 0) { | ||
175 | print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]", | ||
176 | "0x%x", 8) | ||
177 | gtable[gid,2] = tname "_2" | ||
178 | } | ||
179 | if (array_size(lptable3) != 0) { | ||
180 | print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]", | ||
181 | "0x%x", 8) | ||
182 | gtable[gid,3] = tname "_3" | ||
183 | } | ||
184 | } else { | ||
185 | # print primary/escaped tables | ||
186 | if (array_size(table) != 0) { | ||
187 | print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]", | ||
188 | "0x%02x", 256) | ||
189 | etable[eid,0] = tname | ||
190 | if (aid >= 0) | ||
191 | atable[aid,0] = tname | ||
192 | } | ||
193 | if (array_size(lptable1) != 0) { | ||
194 | print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]", | ||
195 | "0x%02x", 256) | ||
196 | etable[eid,1] = tname "_1" | ||
197 | if (aid >= 0) | ||
198 | atable[aid,1] = tname "_1" | ||
199 | } | ||
200 | if (array_size(lptable2) != 0) { | ||
201 | print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]", | ||
202 | "0x%02x", 256) | ||
203 | etable[eid,2] = tname "_2" | ||
204 | if (aid >= 0) | ||
205 | atable[aid,2] = tname "_2" | ||
206 | } | ||
207 | if (array_size(lptable3) != 0) { | ||
208 | print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]", | ||
209 | "0x%02x", 256) | ||
210 | etable[eid,3] = tname "_3" | ||
211 | if (aid >= 0) | ||
212 | atable[aid,3] = tname "_3" | ||
213 | } | ||
214 | } | ||
215 | print "" | ||
216 | clear_vars() | ||
217 | } | ||
218 | |||
219 | function add_flags(old,new) { | ||
220 | if (old && new) | ||
221 | return old " | " new | ||
222 | else if (old) | ||
223 | return old | ||
224 | else | ||
225 | return new | ||
226 | } | ||
227 | |||
228 | # convert operands to flags. | ||
229 | function convert_operands(opnd, i,imm,mod) | ||
230 | { | ||
231 | imm = null | ||
232 | mod = null | ||
233 | for (i in opnd) { | ||
234 | i = opnd[i] | ||
235 | if (match(i, imm_expr) == 1) { | ||
236 | if (!imm_flag[i]) | ||
237 | semantic_error("Unknown imm opnd: " i) | ||
238 | if (imm) { | ||
239 | if (i != "Ib") | ||
240 | semantic_error("Second IMM error") | ||
241 | imm = add_flags(imm, "INAT_SCNDIMM") | ||
242 | } else | ||
243 | imm = imm_flag[i] | ||
244 | } else if (match(i, modrm_expr)) | ||
245 | mod = "INAT_MODRM" | ||
246 | } | ||
247 | return add_flags(imm, mod) | ||
248 | } | ||
249 | |||
250 | /^[0-9a-f]+\:/ { | ||
251 | if (NR == 1) | ||
252 | next | ||
253 | # get index | ||
254 | idx = "0x" substr($1, 1, index($1,":") - 1) | ||
255 | if (idx in table) | ||
256 | semantic_error("Redefine " idx " in " tname) | ||
257 | |||
258 | # check if escaped opcode | ||
259 | if ("escape" == $2) { | ||
260 | if ($3 != "#") | ||
261 | semantic_error("No escaped name") | ||
262 | ref = "" | ||
263 | for (i = 4; i <= NF; i++) | ||
264 | ref = ref $i | ||
265 | if (ref in escape) | ||
266 | semantic_error("Redefine escape (" ref ")") | ||
267 | escape[ref] = geid | ||
268 | geid++ | ||
269 | table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")" | ||
270 | next | ||
271 | } | ||
272 | |||
273 | variant = null | ||
274 | # converts | ||
275 | i = 2 | ||
276 | while (i <= NF) { | ||
277 | opcode = $(i++) | ||
278 | delete opnds | ||
279 | ext = null | ||
280 | flags = null | ||
281 | opnd = null | ||
282 | # parse one opcode | ||
283 | if (match($i, opnd_expr)) { | ||
284 | opnd = $i | ||
285 | split($(i++), opnds, ",") | ||
286 | flags = convert_operands(opnds) | ||
287 | } | ||
288 | if (match($i, ext_expr)) | ||
289 | ext = $(i++) | ||
290 | if (match($i, sep_expr)) | ||
291 | i++ | ||
292 | else if (i < NF) | ||
293 | semantic_error($i " is not a separator") | ||
294 | |||
295 | # check if group opcode | ||
296 | if (match(opcode, group_expr)) { | ||
297 | if (!(opcode in group)) { | ||
298 | group[opcode] = ggid | ||
299 | ggid++ | ||
300 | } | ||
301 | flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")") | ||
302 | } | ||
303 | # check force(or default) 64bit | ||
304 | if (match(ext, force64_expr)) | ||
305 | flags = add_flags(flags, "INAT_FORCE64") | ||
306 | |||
307 | # check REX prefix | ||
308 | if (match(opcode, rex_expr)) | ||
309 | flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)") | ||
310 | |||
311 | # check coprocessor escape : TODO | ||
312 | if (match(opcode, fpu_expr)) | ||
313 | flags = add_flags(flags, "INAT_MODRM") | ||
314 | |||
315 | # check VEX only code | ||
316 | if (match(ext, vexonly_expr)) | ||
317 | flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") | ||
318 | |||
319 | # check VEX only code | ||
320 | if (match(ext, vexok_expr)) | ||
321 | flags = add_flags(flags, "INAT_VEXOK") | ||
322 | |||
323 | # check prefixes | ||
324 | if (match(ext, prefix_expr)) { | ||
325 | if (!prefix_num[opcode]) | ||
326 | semantic_error("Unknown prefix: " opcode) | ||
327 | flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")") | ||
328 | } | ||
329 | if (length(flags) == 0) | ||
330 | continue | ||
331 | # check if last prefix | ||
332 | if (match(ext, lprefix1_expr)) { | ||
333 | lptable1[idx] = add_flags(lptable1[idx],flags) | ||
334 | variant = "INAT_VARIANT" | ||
335 | } else if (match(ext, lprefix2_expr)) { | ||
336 | lptable2[idx] = add_flags(lptable2[idx],flags) | ||
337 | variant = "INAT_VARIANT" | ||
338 | } else if (match(ext, lprefix3_expr)) { | ||
339 | lptable3[idx] = add_flags(lptable3[idx],flags) | ||
340 | variant = "INAT_VARIANT" | ||
341 | } else { | ||
342 | table[idx] = add_flags(table[idx],flags) | ||
343 | } | ||
344 | } | ||
345 | if (variant) | ||
346 | table[idx] = add_flags(table[idx],variant) | ||
347 | } | ||
348 | |||
349 | END { | ||
350 | if (awkchecked != "") | ||
351 | exit 1 | ||
352 | # print escape opcode map's array | ||
353 | print "/* Escape opcode map array */" | ||
354 | print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ | ||
355 | "[INAT_LSTPFX_MAX + 1] = {" | ||
356 | for (i = 0; i < geid; i++) | ||
357 | for (j = 0; j < max_lprefix; j++) | ||
358 | if (etable[i,j]) | ||
359 | print " ["i"]["j"] = "etable[i,j]"," | ||
360 | print "};\n" | ||
361 | # print group opcode map's array | ||
362 | print "/* Group opcode map array */" | ||
363 | print "const insn_attr_t const *inat_group_tables[INAT_GRP_MAX + 1]"\ | ||
364 | "[INAT_LSTPFX_MAX + 1] = {" | ||
365 | for (i = 0; i < ggid; i++) | ||
366 | for (j = 0; j < max_lprefix; j++) | ||
367 | if (gtable[i,j]) | ||
368 | print " ["i"]["j"] = "gtable[i,j]"," | ||
369 | print "};\n" | ||
370 | # print AVX opcode map's array | ||
371 | print "/* AVX opcode map array */" | ||
372 | print "const insn_attr_t const *inat_avx_tables[X86_VEX_M_MAX + 1]"\ | ||
373 | "[INAT_LSTPFX_MAX + 1] = {" | ||
374 | for (i = 0; i < gaid; i++) | ||
375 | for (j = 0; j < max_lprefix; j++) | ||
376 | if (atable[i,j]) | ||
377 | print " ["i"]["j"] = "atable[i,j]"," | ||
378 | print "};" | ||
379 | } | ||
380 | |||
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c new file mode 100644 index 000000000000..d8214dc03fa7 --- /dev/null +++ b/arch/x86/tools/test_get_len.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) IBM Corporation, 2009 | ||
17 | */ | ||
18 | |||
19 | #include <stdlib.h> | ||
20 | #include <stdio.h> | ||
21 | #include <string.h> | ||
22 | #include <assert.h> | ||
23 | #include <unistd.h> | ||
24 | |||
25 | #define unlikely(cond) (cond) | ||
26 | |||
27 | #include <asm/insn.h> | ||
28 | #include <inat.c> | ||
29 | #include <insn.c> | ||
30 | |||
31 | /* | ||
32 | * Test of instruction analysis in general and insn_get_length() in | ||
33 | * particular. See if insn_get_length() and the disassembler agree | ||
34 | * on the length of each instruction in an elf disassembly. | ||
35 | * | ||
36 | * Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len | ||
37 | */ | ||
38 | |||
39 | const char *prog; | ||
40 | static int verbose; | ||
41 | static int x86_64; | ||
42 | |||
43 | static void usage(void) | ||
44 | { | ||
45 | fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" | ||
46 | " %s [-y|-n] [-v] \n", prog); | ||
47 | fprintf(stderr, "\t-y 64bit mode\n"); | ||
48 | fprintf(stderr, "\t-n 32bit mode\n"); | ||
49 | fprintf(stderr, "\t-v verbose mode\n"); | ||
50 | exit(1); | ||
51 | } | ||
52 | |||
53 | static void malformed_line(const char *line, int line_nr) | ||
54 | { | ||
55 | fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line); | ||
56 | exit(3); | ||
57 | } | ||
58 | |||
59 | static void dump_field(FILE *fp, const char *name, const char *indent, | ||
60 | struct insn_field *field) | ||
61 | { | ||
62 | fprintf(fp, "%s.%s = {\n", indent, name); | ||
63 | fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n", | ||
64 | indent, field->value, field->bytes[0], field->bytes[1], | ||
65 | field->bytes[2], field->bytes[3]); | ||
66 | fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent, | ||
67 | field->got, field->nbytes); | ||
68 | } | ||
69 | |||
70 | static void dump_insn(FILE *fp, struct insn *insn) | ||
71 | { | ||
72 | fprintf(fp, "Instruction = { \n"); | ||
73 | dump_field(fp, "prefixes", "\t", &insn->prefixes); | ||
74 | dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix); | ||
75 | dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix); | ||
76 | dump_field(fp, "opcode", "\t", &insn->opcode); | ||
77 | dump_field(fp, "modrm", "\t", &insn->modrm); | ||
78 | dump_field(fp, "sib", "\t", &insn->sib); | ||
79 | dump_field(fp, "displacement", "\t", &insn->displacement); | ||
80 | dump_field(fp, "immediate1", "\t", &insn->immediate1); | ||
81 | dump_field(fp, "immediate2", "\t", &insn->immediate2); | ||
82 | fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n", | ||
83 | insn->attr, insn->opnd_bytes, insn->addr_bytes); | ||
84 | fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n", | ||
85 | insn->length, insn->x86_64, insn->kaddr); | ||
86 | } | ||
87 | |||
88 | static void parse_args(int argc, char **argv) | ||
89 | { | ||
90 | int c; | ||
91 | prog = argv[0]; | ||
92 | while ((c = getopt(argc, argv, "ynv")) != -1) { | ||
93 | switch (c) { | ||
94 | case 'y': | ||
95 | x86_64 = 1; | ||
96 | break; | ||
97 | case 'n': | ||
98 | x86_64 = 0; | ||
99 | break; | ||
100 | case 'v': | ||
101 | verbose = 1; | ||
102 | break; | ||
103 | default: | ||
104 | usage(); | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | |||
109 | #define BUFSIZE 256 | ||
110 | |||
111 | int main(int argc, char **argv) | ||
112 | { | ||
113 | char line[BUFSIZE], sym[BUFSIZE] = "<unknown>"; | ||
114 | unsigned char insn_buf[16]; | ||
115 | struct insn insn; | ||
116 | int insns = 0, c; | ||
117 | int warnings = 0; | ||
118 | |||
119 | parse_args(argc, argv); | ||
120 | |||
121 | while (fgets(line, BUFSIZE, stdin)) { | ||
122 | char copy[BUFSIZE], *s, *tab1, *tab2; | ||
123 | int nb = 0; | ||
124 | unsigned int b; | ||
125 | |||
126 | if (line[0] == '<') { | ||
127 | /* Symbol line */ | ||
128 | strcpy(sym, line); | ||
129 | continue; | ||
130 | } | ||
131 | |||
132 | insns++; | ||
133 | memset(insn_buf, 0, 16); | ||
134 | strcpy(copy, line); | ||
135 | tab1 = strchr(copy, '\t'); | ||
136 | if (!tab1) | ||
137 | malformed_line(line, insns); | ||
138 | s = tab1 + 1; | ||
139 | s += strspn(s, " "); | ||
140 | tab2 = strchr(s, '\t'); | ||
141 | if (!tab2) | ||
142 | malformed_line(line, insns); | ||
143 | *tab2 = '\0'; /* Characters beyond tab2 aren't examined */ | ||
144 | while (s < tab2) { | ||
145 | if (sscanf(s, "%x", &b) == 1) { | ||
146 | insn_buf[nb++] = (unsigned char) b; | ||
147 | s += 3; | ||
148 | } else | ||
149 | break; | ||
150 | } | ||
151 | /* Decode an instruction */ | ||
152 | insn_init(&insn, insn_buf, x86_64); | ||
153 | insn_get_length(&insn); | ||
154 | if (insn.length != nb) { | ||
155 | warnings++; | ||
156 | fprintf(stderr, "Warning: %s found difference at %s\n", | ||
157 | prog, sym); | ||
158 | fprintf(stderr, "Warning: %s", line); | ||
159 | fprintf(stderr, "Warning: objdump says %d bytes, but " | ||
160 | "insn_get_length() says %d\n", nb, | ||
161 | insn.length); | ||
162 | if (verbose) | ||
163 | dump_insn(stderr, &insn); | ||
164 | } | ||
165 | } | ||
166 | if (warnings) | ||
167 | fprintf(stderr, "Warning: decoded and checked %d" | ||
168 | " instructions with %d warnings\n", insns, warnings); | ||
169 | else | ||
170 | fprintf(stderr, "Succeed: decoded and checked %d" | ||
171 | " instructions\n", insns); | ||
172 | return 0; | ||
173 | } | ||
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index dc99e26f8e5b..1b392c9e8531 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -177,9 +177,6 @@ static struct ata_port_operations pcmcia_8bit_port_ops = { | |||
177 | .drain_fifo = pcmcia_8bit_drain_fifo, | 177 | .drain_fifo = pcmcia_8bit_drain_fifo, |
178 | }; | 178 | }; |
179 | 179 | ||
180 | #define CS_CHECK(fn, ret) \ | ||
181 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
182 | |||
183 | 180 | ||
184 | struct pcmcia_config_check { | 181 | struct pcmcia_config_check { |
185 | unsigned long ctl_base; | 182 | unsigned long ctl_base; |
@@ -252,7 +249,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) | |||
252 | struct ata_port *ap; | 249 | struct ata_port *ap; |
253 | struct ata_pcmcia_info *info; | 250 | struct ata_pcmcia_info *info; |
254 | struct pcmcia_config_check *stk = NULL; | 251 | struct pcmcia_config_check *stk = NULL; |
255 | int last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p; | 252 | int is_kme = 0, ret = -ENOMEM, p; |
256 | unsigned long io_base, ctl_base; | 253 | unsigned long io_base, ctl_base; |
257 | void __iomem *io_addr, *ctl_addr; | 254 | void __iomem *io_addr, *ctl_addr; |
258 | int n_ports = 1; | 255 | int n_ports = 1; |
@@ -271,7 +268,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) | |||
271 | pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 268 | pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
272 | pdev->io.IOAddrLines = 3; | 269 | pdev->io.IOAddrLines = 3; |
273 | pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 270 | pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
274 | pdev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
275 | pdev->conf.Attributes = CONF_ENABLE_IRQ; | 271 | pdev->conf.Attributes = CONF_ENABLE_IRQ; |
276 | pdev->conf.IntType = INT_MEMORY_AND_IO; | 272 | pdev->conf.IntType = INT_MEMORY_AND_IO; |
277 | 273 | ||
@@ -296,8 +292,13 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) | |||
296 | } | 292 | } |
297 | io_base = pdev->io.BasePort1; | 293 | io_base = pdev->io.BasePort1; |
298 | ctl_base = stk->ctl_base; | 294 | ctl_base = stk->ctl_base; |
299 | CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq)); | 295 | ret = pcmcia_request_irq(pdev, &pdev->irq); |
300 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf)); | 296 | if (ret) |
297 | goto failed; | ||
298 | |||
299 | ret = pcmcia_request_configuration(pdev, &pdev->conf); | ||
300 | if (ret) | ||
301 | goto failed; | ||
301 | 302 | ||
302 | /* iomap */ | 303 | /* iomap */ |
303 | ret = -ENOMEM; | 304 | ret = -ENOMEM; |
@@ -351,8 +352,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) | |||
351 | kfree(stk); | 352 | kfree(stk); |
352 | return 0; | 353 | return 0; |
353 | 354 | ||
354 | cs_failed: | ||
355 | cs_error(pdev, last_fn, last_ret); | ||
356 | failed: | 355 | failed: |
357 | kfree(stk); | 356 | kfree(stk); |
358 | info->ndev = 0; | 357 | info->ndev = 0; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 965ece2c7e4d..13bb69d2abb3 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -735,6 +735,21 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector | |||
735 | part_stat_unlock(); | 735 | part_stat_unlock(); |
736 | } | 736 | } |
737 | 737 | ||
738 | /* | ||
739 | * Ensure we don't create aliases in VI caches | ||
740 | */ | ||
741 | static inline void | ||
742 | killalias(struct bio *bio) | ||
743 | { | ||
744 | struct bio_vec *bv; | ||
745 | int i; | ||
746 | |||
747 | if (bio_data_dir(bio) == READ) | ||
748 | __bio_for_each_segment(bv, bio, i, 0) { | ||
749 | flush_dcache_page(bv->bv_page); | ||
750 | } | ||
751 | } | ||
752 | |||
738 | void | 753 | void |
739 | aoecmd_ata_rsp(struct sk_buff *skb) | 754 | aoecmd_ata_rsp(struct sk_buff *skb) |
740 | { | 755 | { |
@@ -853,8 +868,12 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
853 | 868 | ||
854 | if (buf && --buf->nframesout == 0 && buf->resid == 0) { | 869 | if (buf && --buf->nframesout == 0 && buf->resid == 0) { |
855 | diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); | 870 | diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); |
856 | n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; | 871 | if (buf->flags & BUFFL_FAIL) |
857 | bio_endio(buf->bio, n); | 872 | bio_endio(buf->bio, -EIO); |
873 | else { | ||
874 | killalias(buf->bio); | ||
875 | bio_endio(buf->bio, 0); | ||
876 | } | ||
858 | mempool_free(buf, d->bufpool); | 877 | mempool_free(buf, d->bufpool); |
859 | } | 878 | } |
860 | 879 | ||
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index b0e569ba730d..2acdc605cb4b 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -867,11 +867,9 @@ static int bluecard_probe(struct pcmcia_device *link) | |||
867 | 867 | ||
868 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 868 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
869 | link->io.NumPorts1 = 8; | 869 | link->io.NumPorts1 = 8; |
870 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 870 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
871 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
872 | 871 | ||
873 | link->irq.Handler = bluecard_interrupt; | 872 | link->irq.Handler = bluecard_interrupt; |
874 | link->irq.Instance = info; | ||
875 | 873 | ||
876 | link->conf.Attributes = CONF_ENABLE_IRQ; | 874 | link->conf.Attributes = CONF_ENABLE_IRQ; |
877 | link->conf.IntType = INT_MEMORY_AND_IO; | 875 | link->conf.IntType = INT_MEMORY_AND_IO; |
@@ -905,22 +903,16 @@ static int bluecard_config(struct pcmcia_device *link) | |||
905 | break; | 903 | break; |
906 | } | 904 | } |
907 | 905 | ||
908 | if (i != 0) { | 906 | if (i != 0) |
909 | cs_error(link, RequestIO, i); | ||
910 | goto failed; | 907 | goto failed; |
911 | } | ||
912 | 908 | ||
913 | i = pcmcia_request_irq(link, &link->irq); | 909 | i = pcmcia_request_irq(link, &link->irq); |
914 | if (i != 0) { | 910 | if (i != 0) |
915 | cs_error(link, RequestIRQ, i); | ||
916 | link->irq.AssignedIRQ = 0; | 911 | link->irq.AssignedIRQ = 0; |
917 | } | ||
918 | 912 | ||
919 | i = pcmcia_request_configuration(link, &link->conf); | 913 | i = pcmcia_request_configuration(link, &link->conf); |
920 | if (i != 0) { | 914 | if (i != 0) |
921 | cs_error(link, RequestConfiguration, i); | ||
922 | goto failed; | 915 | goto failed; |
923 | } | ||
924 | 916 | ||
925 | if (bluecard_open(info) != 0) | 917 | if (bluecard_open(info) != 0) |
926 | goto failed; | 918 | goto failed; |
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index d58e22b9f06a..d814a2755ccb 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -659,11 +659,9 @@ static int bt3c_probe(struct pcmcia_device *link) | |||
659 | 659 | ||
660 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 660 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
661 | link->io.NumPorts1 = 8; | 661 | link->io.NumPorts1 = 8; |
662 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 662 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
663 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
664 | 663 | ||
665 | link->irq.Handler = bt3c_interrupt; | 664 | link->irq.Handler = bt3c_interrupt; |
666 | link->irq.Instance = info; | ||
667 | 665 | ||
668 | link->conf.Attributes = CONF_ENABLE_IRQ; | 666 | link->conf.Attributes = CONF_ENABLE_IRQ; |
669 | link->conf.IntType = INT_MEMORY_AND_IO; | 667 | link->conf.IntType = INT_MEMORY_AND_IO; |
@@ -740,21 +738,16 @@ static int bt3c_config(struct pcmcia_device *link) | |||
740 | goto found_port; | 738 | goto found_port; |
741 | 739 | ||
742 | BT_ERR("No usable port range found"); | 740 | BT_ERR("No usable port range found"); |
743 | cs_error(link, RequestIO, -ENODEV); | ||
744 | goto failed; | 741 | goto failed; |
745 | 742 | ||
746 | found_port: | 743 | found_port: |
747 | i = pcmcia_request_irq(link, &link->irq); | 744 | i = pcmcia_request_irq(link, &link->irq); |
748 | if (i != 0) { | 745 | if (i != 0) |
749 | cs_error(link, RequestIRQ, i); | ||
750 | link->irq.AssignedIRQ = 0; | 746 | link->irq.AssignedIRQ = 0; |
751 | } | ||
752 | 747 | ||
753 | i = pcmcia_request_configuration(link, &link->conf); | 748 | i = pcmcia_request_configuration(link, &link->conf); |
754 | if (i != 0) { | 749 | if (i != 0) |
755 | cs_error(link, RequestConfiguration, i); | ||
756 | goto failed; | 750 | goto failed; |
757 | } | ||
758 | 751 | ||
759 | if (bt3c_open(info) != 0) | 752 | if (bt3c_open(info) != 0) |
760 | goto failed; | 753 | goto failed; |
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index efd689a062eb..d339464dc15e 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -588,11 +588,9 @@ static int btuart_probe(struct pcmcia_device *link) | |||
588 | 588 | ||
589 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 589 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
590 | link->io.NumPorts1 = 8; | 590 | link->io.NumPorts1 = 8; |
591 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 591 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
592 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
593 | 592 | ||
594 | link->irq.Handler = btuart_interrupt; | 593 | link->irq.Handler = btuart_interrupt; |
595 | link->irq.Instance = info; | ||
596 | 594 | ||
597 | link->conf.Attributes = CONF_ENABLE_IRQ; | 595 | link->conf.Attributes = CONF_ENABLE_IRQ; |
598 | link->conf.IntType = INT_MEMORY_AND_IO; | 596 | link->conf.IntType = INT_MEMORY_AND_IO; |
@@ -669,21 +667,16 @@ static int btuart_config(struct pcmcia_device *link) | |||
669 | goto found_port; | 667 | goto found_port; |
670 | 668 | ||
671 | BT_ERR("No usable port range found"); | 669 | BT_ERR("No usable port range found"); |
672 | cs_error(link, RequestIO, -ENODEV); | ||
673 | goto failed; | 670 | goto failed; |
674 | 671 | ||
675 | found_port: | 672 | found_port: |
676 | i = pcmcia_request_irq(link, &link->irq); | 673 | i = pcmcia_request_irq(link, &link->irq); |
677 | if (i != 0) { | 674 | if (i != 0) |
678 | cs_error(link, RequestIRQ, i); | ||
679 | link->irq.AssignedIRQ = 0; | 675 | link->irq.AssignedIRQ = 0; |
680 | } | ||
681 | 676 | ||
682 | i = pcmcia_request_configuration(link, &link->conf); | 677 | i = pcmcia_request_configuration(link, &link->conf); |
683 | if (i != 0) { | 678 | if (i != 0) |
684 | cs_error(link, RequestConfiguration, i); | ||
685 | goto failed; | 679 | goto failed; |
686 | } | ||
687 | 680 | ||
688 | if (btuart_open(info) != 0) | 681 | if (btuart_open(info) != 0) |
689 | goto failed; | 682 | goto failed; |
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index b881a9cd8741..4f02a6f3c980 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c | |||
@@ -573,11 +573,9 @@ static int dtl1_probe(struct pcmcia_device *link) | |||
573 | 573 | ||
574 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 574 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
575 | link->io.NumPorts1 = 8; | 575 | link->io.NumPorts1 = 8; |
576 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 576 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
577 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
578 | 577 | ||
579 | link->irq.Handler = dtl1_interrupt; | 578 | link->irq.Handler = dtl1_interrupt; |
580 | link->irq.Instance = info; | ||
581 | 579 | ||
582 | link->conf.Attributes = CONF_ENABLE_IRQ; | 580 | link->conf.Attributes = CONF_ENABLE_IRQ; |
583 | link->conf.IntType = INT_MEMORY_AND_IO; | 581 | link->conf.IntType = INT_MEMORY_AND_IO; |
@@ -622,16 +620,12 @@ static int dtl1_config(struct pcmcia_device *link) | |||
622 | goto failed; | 620 | goto failed; |
623 | 621 | ||
624 | i = pcmcia_request_irq(link, &link->irq); | 622 | i = pcmcia_request_irq(link, &link->irq); |
625 | if (i != 0) { | 623 | if (i != 0) |
626 | cs_error(link, RequestIRQ, i); | ||
627 | link->irq.AssignedIRQ = 0; | 624 | link->irq.AssignedIRQ = 0; |
628 | } | ||
629 | 625 | ||
630 | i = pcmcia_request_configuration(link, &link->conf); | 626 | i = pcmcia_request_configuration(link, &link->conf); |
631 | if (i != 0) { | 627 | if (i != 0) |
632 | cs_error(link, RequestConfiguration, i); | ||
633 | goto failed; | 628 | goto failed; |
634 | } | ||
635 | 629 | ||
636 | if (dtl1_open(info) != 0) | 630 | if (dtl1_open(info) != 0) |
637 | goto failed; | 631 | goto failed; |
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index ccb1fa89de29..2fb3a480f6b0 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig | |||
@@ -56,9 +56,8 @@ config AGP_AMD | |||
56 | X on AMD Irongate, 761, and 762 chipsets. | 56 | X on AMD Irongate, 761, and 762 chipsets. |
57 | 57 | ||
58 | config AGP_AMD64 | 58 | config AGP_AMD64 |
59 | tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU | 59 | tristate "AMD Opteron/Athlon64 on-CPU GART support" |
60 | depends on AGP && X86 | 60 | depends on AGP && X86 |
61 | default y if GART_IOMMU | ||
62 | help | 61 | help |
63 | This option gives you AGP support for the GLX component of | 62 | This option gives you AGP support for the GLX component of |
64 | X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. | 63 | X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index c250a31efa53..2db4c0a29b05 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -23,8 +23,6 @@ | |||
23 | * All rights reserved. Licensed under dual BSD/GPL license. | 23 | * All rights reserved. Licensed under dual BSD/GPL license. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | /* #define PCMCIA_DEBUG 6 */ | ||
27 | |||
28 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 27 | #include <linux/module.h> |
30 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
@@ -47,18 +45,17 @@ | |||
47 | 45 | ||
48 | /* #define ATR_CSUM */ | 46 | /* #define ATR_CSUM */ |
49 | 47 | ||
50 | #ifdef PCMCIA_DEBUG | 48 | #define reader_to_dev(x) (&x->p_dev->dev) |
51 | #define reader_to_dev(x) (&handle_to_dev(x->p_dev)) | 49 | |
52 | static int pc_debug = PCMCIA_DEBUG; | 50 | /* n (debug level) is ignored */ |
53 | module_param(pc_debug, int, 0600); | 51 | /* additional debug output may be enabled by re-compiling with |
54 | #define DEBUGP(n, rdr, x, args...) do { \ | 52 | * CM4000_DEBUG set */ |
55 | if (pc_debug >= (n)) \ | 53 | /* #define CM4000_DEBUG */ |
56 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ | 54 | #define DEBUGP(n, rdr, x, args...) do { \ |
57 | __func__ , ## args); \ | 55 | dev_dbg(reader_to_dev(rdr), "%s:" x, \ |
56 | __func__ , ## args); \ | ||
58 | } while (0) | 57 | } while (0) |
59 | #else | 58 | |
60 | #define DEBUGP(n, rdr, x, args...) | ||
61 | #endif | ||
62 | static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte"; | 59 | static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte"; |
63 | 60 | ||
64 | #define T_1SEC (HZ) | 61 | #define T_1SEC (HZ) |
@@ -174,14 +171,13 @@ static unsigned char fi_di_table[10][14] = { | |||
174 | /* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} | 171 | /* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} |
175 | }; | 172 | }; |
176 | 173 | ||
177 | #ifndef PCMCIA_DEBUG | 174 | #ifndef CM4000_DEBUG |
178 | #define xoutb outb | 175 | #define xoutb outb |
179 | #define xinb inb | 176 | #define xinb inb |
180 | #else | 177 | #else |
181 | static inline void xoutb(unsigned char val, unsigned short port) | 178 | static inline void xoutb(unsigned char val, unsigned short port) |
182 | { | 179 | { |
183 | if (pc_debug >= 7) | 180 | pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); |
184 | printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port); | ||
185 | outb(val, port); | 181 | outb(val, port); |
186 | } | 182 | } |
187 | static inline unsigned char xinb(unsigned short port) | 183 | static inline unsigned char xinb(unsigned short port) |
@@ -189,8 +185,7 @@ static inline unsigned char xinb(unsigned short port) | |||
189 | unsigned char val; | 185 | unsigned char val; |
190 | 186 | ||
191 | val = inb(port); | 187 | val = inb(port); |
192 | if (pc_debug >= 7) | 188 | pr_debug("%.2x=inb(%.4x)\n", val, port); |
193 | printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port); | ||
194 | 189 | ||
195 | return val; | 190 | return val; |
196 | } | 191 | } |
@@ -514,12 +509,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) | |||
514 | for (i = 0; i < 4; i++) { | 509 | for (i = 0; i < 4; i++) { |
515 | xoutb(i, REG_BUF_ADDR(iobase)); | 510 | xoutb(i, REG_BUF_ADDR(iobase)); |
516 | xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ | 511 | xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ |
517 | #ifdef PCMCIA_DEBUG | 512 | #ifdef CM4000_DEBUG |
518 | if (pc_debug >= 5) | 513 | pr_debug("0x%.2x ", dev->pts[i]); |
519 | printk("0x%.2x ", dev->pts[i]); | ||
520 | } | 514 | } |
521 | if (pc_debug >= 5) | 515 | pr_debug("\n"); |
522 | printk("\n"); | ||
523 | #else | 516 | #else |
524 | } | 517 | } |
525 | #endif | 518 | #endif |
@@ -579,14 +572,13 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) | |||
579 | pts_reply[i] = inb(REG_BUF_DATA(iobase)); | 572 | pts_reply[i] = inb(REG_BUF_DATA(iobase)); |
580 | } | 573 | } |
581 | 574 | ||
582 | #ifdef PCMCIA_DEBUG | 575 | #ifdef CM4000_DEBUG |
583 | DEBUGP(2, dev, "PTSreply: "); | 576 | DEBUGP(2, dev, "PTSreply: "); |
584 | for (i = 0; i < num_bytes_read; i++) { | 577 | for (i = 0; i < num_bytes_read; i++) { |
585 | if (pc_debug >= 5) | 578 | pr_debug("0x%.2x ", pts_reply[i]); |
586 | printk("0x%.2x ", pts_reply[i]); | ||
587 | } | 579 | } |
588 | printk("\n"); | 580 | pr_debug("\n"); |
589 | #endif /* PCMCIA_DEBUG */ | 581 | #endif /* CM4000_DEBUG */ |
590 | 582 | ||
591 | DEBUGP(5, dev, "Clear Tactive in Flags1\n"); | 583 | DEBUGP(5, dev, "Clear Tactive in Flags1\n"); |
592 | xoutb(0x20, REG_FLAGS1(iobase)); | 584 | xoutb(0x20, REG_FLAGS1(iobase)); |
@@ -655,7 +647,7 @@ static void terminate_monitor(struct cm4000_dev *dev) | |||
655 | 647 | ||
656 | DEBUGP(5, dev, "Delete timer\n"); | 648 | DEBUGP(5, dev, "Delete timer\n"); |
657 | del_timer_sync(&dev->timer); | 649 | del_timer_sync(&dev->timer); |
658 | #ifdef PCMCIA_DEBUG | 650 | #ifdef CM4000_DEBUG |
659 | dev->monitor_running = 0; | 651 | dev->monitor_running = 0; |
660 | #endif | 652 | #endif |
661 | 653 | ||
@@ -898,7 +890,7 @@ static void monitor_card(unsigned long p) | |||
898 | DEBUGP(4, dev, "ATR checksum (0x%.2x, should " | 890 | DEBUGP(4, dev, "ATR checksum (0x%.2x, should " |
899 | "be zero) failed\n", dev->atr_csum); | 891 | "be zero) failed\n", dev->atr_csum); |
900 | } | 892 | } |
901 | #ifdef PCMCIA_DEBUG | 893 | #ifdef CM4000_DEBUG |
902 | else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { | 894 | else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { |
903 | DEBUGP(4, dev, "ATR length error\n"); | 895 | DEBUGP(4, dev, "ATR length error\n"); |
904 | } else { | 896 | } else { |
@@ -1415,7 +1407,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1415 | int size; | 1407 | int size; |
1416 | int rc; | 1408 | int rc; |
1417 | void __user *argp = (void __user *)arg; | 1409 | void __user *argp = (void __user *)arg; |
1418 | #ifdef PCMCIA_DEBUG | 1410 | #ifdef CM4000_DEBUG |
1419 | char *ioctl_names[CM_IOC_MAXNR + 1] = { | 1411 | char *ioctl_names[CM_IOC_MAXNR + 1] = { |
1420 | [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", | 1412 | [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", |
1421 | [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", | 1413 | [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", |
@@ -1423,9 +1415,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1423 | [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", | 1415 | [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", |
1424 | [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", | 1416 | [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", |
1425 | }; | 1417 | }; |
1426 | #endif | ||
1427 | DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), | 1418 | DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), |
1428 | iminor(inode), ioctl_names[_IOC_NR(cmd)]); | 1419 | iminor(inode), ioctl_names[_IOC_NR(cmd)]); |
1420 | #endif | ||
1429 | 1421 | ||
1430 | lock_kernel(); | 1422 | lock_kernel(); |
1431 | rc = -ENODEV; | 1423 | rc = -ENODEV; |
@@ -1523,7 +1515,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1523 | } | 1515 | } |
1524 | case CM_IOCARDOFF: | 1516 | case CM_IOCARDOFF: |
1525 | 1517 | ||
1526 | #ifdef PCMCIA_DEBUG | 1518 | #ifdef CM4000_DEBUG |
1527 | DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); | 1519 | DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); |
1528 | if (dev->flags0 & 0x01) { | 1520 | if (dev->flags0 & 0x01) { |
1529 | DEBUGP(4, dev, " Card inserted\n"); | 1521 | DEBUGP(4, dev, " Card inserted\n"); |
@@ -1625,18 +1617,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1625 | 1617 | ||
1626 | } | 1618 | } |
1627 | break; | 1619 | break; |
1628 | #ifdef PCMCIA_DEBUG | 1620 | #ifdef CM4000_DEBUG |
1629 | case CM_IOSDBGLVL: /* set debug log level */ | 1621 | case CM_IOSDBGLVL: |
1630 | { | 1622 | rc = -ENOTTY; |
1631 | int old_pc_debug = 0; | ||
1632 | |||
1633 | old_pc_debug = pc_debug; | ||
1634 | if (copy_from_user(&pc_debug, argp, sizeof(int))) | ||
1635 | rc = -EFAULT; | ||
1636 | else if (old_pc_debug != pc_debug) | ||
1637 | DEBUGP(0, dev, "Changed debug log level " | ||
1638 | "to %i\n", pc_debug); | ||
1639 | } | ||
1640 | break; | 1623 | break; |
1641 | #endif | 1624 | #endif |
1642 | default: | 1625 | default: |
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 4f0723b07974..a6a70e476bea 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
@@ -17,8 +17,6 @@ | |||
17 | * All rights reserved, Dual BSD/GPL Licensed. | 17 | * All rights reserved, Dual BSD/GPL Licensed. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* #define PCMCIA_DEBUG 6 */ | ||
21 | |||
22 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 21 | #include <linux/module.h> |
24 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
@@ -41,18 +39,16 @@ | |||
41 | #include "cm4040_cs.h" | 39 | #include "cm4040_cs.h" |
42 | 40 | ||
43 | 41 | ||
44 | #ifdef PCMCIA_DEBUG | 42 | #define reader_to_dev(x) (&x->p_dev->dev) |
45 | #define reader_to_dev(x) (&handle_to_dev(x->p_dev)) | 43 | |
46 | static int pc_debug = PCMCIA_DEBUG; | 44 | /* n (debug level) is ignored */ |
47 | module_param(pc_debug, int, 0600); | 45 | /* additional debug output may be enabled by re-compiling with |
48 | #define DEBUGP(n, rdr, x, args...) do { \ | 46 | * CM4040_DEBUG set */ |
49 | if (pc_debug >= (n)) \ | 47 | /* #define CM4040_DEBUG */ |
50 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ | 48 | #define DEBUGP(n, rdr, x, args...) do { \ |
51 | __func__ , ##args); \ | 49 | dev_dbg(reader_to_dev(rdr), "%s:" x, \ |
50 | __func__ , ## args); \ | ||
52 | } while (0) | 51 | } while (0) |
53 | #else | ||
54 | #define DEBUGP(n, rdr, x, args...) | ||
55 | #endif | ||
56 | 52 | ||
57 | static char *version = | 53 | static char *version = |
58 | "OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte"; | 54 | "OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte"; |
@@ -90,14 +86,13 @@ struct reader_dev { | |||
90 | 86 | ||
91 | static struct pcmcia_device *dev_table[CM_MAX_DEV]; | 87 | static struct pcmcia_device *dev_table[CM_MAX_DEV]; |
92 | 88 | ||
93 | #ifndef PCMCIA_DEBUG | 89 | #ifndef CM4040_DEBUG |
94 | #define xoutb outb | 90 | #define xoutb outb |
95 | #define xinb inb | 91 | #define xinb inb |
96 | #else | 92 | #else |
97 | static inline void xoutb(unsigned char val, unsigned short port) | 93 | static inline void xoutb(unsigned char val, unsigned short port) |
98 | { | 94 | { |
99 | if (pc_debug >= 7) | 95 | pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); |
100 | printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port); | ||
101 | outb(val, port); | 96 | outb(val, port); |
102 | } | 97 | } |
103 | 98 | ||
@@ -106,8 +101,7 @@ static inline unsigned char xinb(unsigned short port) | |||
106 | unsigned char val; | 101 | unsigned char val; |
107 | 102 | ||
108 | val = inb(port); | 103 | val = inb(port); |
109 | if (pc_debug >= 7) | 104 | pr_debug("%.2x=inb(%.4x)\n", val, port); |
110 | printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port); | ||
111 | return val; | 105 | return val; |
112 | } | 106 | } |
113 | #endif | 107 | #endif |
@@ -260,23 +254,22 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf, | |||
260 | return -EIO; | 254 | return -EIO; |
261 | } | 255 | } |
262 | dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); | 256 | dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); |
263 | #ifdef PCMCIA_DEBUG | 257 | #ifdef CM4040_DEBUG |
264 | if (pc_debug >= 6) | 258 | pr_debug("%lu:%2x ", i, dev->r_buf[i]); |
265 | printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]); | ||
266 | } | 259 | } |
267 | printk("\n"); | 260 | pr_debug("\n"); |
268 | #else | 261 | #else |
269 | } | 262 | } |
270 | #endif | 263 | #endif |
271 | 264 | ||
272 | bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); | 265 | bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); |
273 | 266 | ||
274 | DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); | 267 | DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read); |
275 | 268 | ||
276 | min_bytes_to_read = min(count, bytes_to_read + 5); | 269 | min_bytes_to_read = min(count, bytes_to_read + 5); |
277 | min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); | 270 | min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); |
278 | 271 | ||
279 | DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); | 272 | DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read); |
280 | 273 | ||
281 | for (i = 0; i < (min_bytes_to_read-5); i++) { | 274 | for (i = 0; i < (min_bytes_to_read-5); i++) { |
282 | rc = wait_for_bulk_in_ready(dev); | 275 | rc = wait_for_bulk_in_ready(dev); |
@@ -288,11 +281,10 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf, | |||
288 | return -EIO; | 281 | return -EIO; |
289 | } | 282 | } |
290 | dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); | 283 | dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); |
291 | #ifdef PCMCIA_DEBUG | 284 | #ifdef CM4040_DEBUG |
292 | if (pc_debug >= 6) | 285 | pr_debug("%lu:%2x ", i, dev->r_buf[i]); |
293 | printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]); | ||
294 | } | 286 | } |
295 | printk("\n"); | 287 | pr_debug("\n"); |
296 | #else | 288 | #else |
297 | } | 289 | } |
298 | #endif | 290 | #endif |
@@ -547,7 +539,7 @@ static int cm4040_config_check(struct pcmcia_device *p_dev, | |||
547 | p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; | 539 | p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; |
548 | 540 | ||
549 | rc = pcmcia_request_io(p_dev, &p_dev->io); | 541 | rc = pcmcia_request_io(p_dev, &p_dev->io); |
550 | dev_printk(KERN_INFO, &handle_to_dev(p_dev), | 542 | dev_printk(KERN_INFO, &p_dev->dev, |
551 | "pcmcia_request_io returned 0x%x\n", rc); | 543 | "pcmcia_request_io returned 0x%x\n", rc); |
552 | return rc; | 544 | return rc; |
553 | } | 545 | } |
@@ -569,7 +561,7 @@ static int reader_config(struct pcmcia_device *link, int devno) | |||
569 | 561 | ||
570 | fail_rc = pcmcia_request_configuration(link, &link->conf); | 562 | fail_rc = pcmcia_request_configuration(link, &link->conf); |
571 | if (fail_rc != 0) { | 563 | if (fail_rc != 0) { |
572 | dev_printk(KERN_INFO, &handle_to_dev(link), | 564 | dev_printk(KERN_INFO, &link->dev, |
573 | "pcmcia_request_configuration failed 0x%x\n", | 565 | "pcmcia_request_configuration failed 0x%x\n", |
574 | fail_rc); | 566 | fail_rc); |
575 | goto cs_release; | 567 | goto cs_release; |
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c index 4c1820cad712..99cffdab1056 100644 --- a/drivers/char/pcmcia/ipwireless/hardware.c +++ b/drivers/char/pcmcia/ipwireless/hardware.c | |||
@@ -1213,12 +1213,12 @@ static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq, | |||
1213 | 1213 | ||
1214 | irqreturn_t ipwireless_interrupt(int irq, void *dev_id) | 1214 | irqreturn_t ipwireless_interrupt(int irq, void *dev_id) |
1215 | { | 1215 | { |
1216 | struct ipw_hardware *hw = dev_id; | 1216 | struct ipw_dev *ipw = dev_id; |
1217 | 1217 | ||
1218 | if (hw->hw_version == HW_VERSION_1) | 1218 | if (ipw->hardware->hw_version == HW_VERSION_1) |
1219 | return ipwireless_handle_v1_interrupt(irq, hw); | 1219 | return ipwireless_handle_v1_interrupt(irq, ipw->hardware); |
1220 | else | 1220 | else |
1221 | return ipwireless_handle_v2_v3_interrupt(irq, hw); | 1221 | return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware); |
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | static void flush_packets_to_hw(struct ipw_hardware *hw) | 1224 | static void flush_packets_to_hw(struct ipw_hardware *hw) |
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 5216fce0c62d..dff24dae1485 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c | |||
@@ -65,10 +65,7 @@ static void signalled_reboot_work(struct work_struct *work_reboot) | |||
65 | struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, | 65 | struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, |
66 | work_reboot); | 66 | work_reboot); |
67 | struct pcmcia_device *link = ipw->link; | 67 | struct pcmcia_device *link = ipw->link; |
68 | int ret = pcmcia_reset_card(link->socket); | 68 | pcmcia_reset_card(link->socket); |
69 | |||
70 | if (ret != 0) | ||
71 | cs_error(link, ResetCard, ret); | ||
72 | } | 69 | } |
73 | 70 | ||
74 | static void signalled_reboot_callback(void *callback_data) | 71 | static void signalled_reboot_callback(void *callback_data) |
@@ -79,208 +76,127 @@ static void signalled_reboot_callback(void *callback_data) | |||
79 | schedule_work(&ipw->work_reboot); | 76 | schedule_work(&ipw->work_reboot); |
80 | } | 77 | } |
81 | 78 | ||
82 | static int config_ipwireless(struct ipw_dev *ipw) | 79 | static int ipwireless_probe(struct pcmcia_device *p_dev, |
80 | cistpl_cftable_entry_t *cfg, | ||
81 | cistpl_cftable_entry_t *dflt, | ||
82 | unsigned int vcc, | ||
83 | void *priv_data) | ||
83 | { | 84 | { |
84 | struct pcmcia_device *link = ipw->link; | 85 | struct ipw_dev *ipw = priv_data; |
85 | int ret; | 86 | struct resource *io_resource; |
86 | tuple_t tuple; | ||
87 | unsigned short buf[64]; | ||
88 | cisparse_t parse; | ||
89 | unsigned short cor_value; | ||
90 | memreq_t memreq_attr_memory; | 87 | memreq_t memreq_attr_memory; |
91 | memreq_t memreq_common_memory; | 88 | memreq_t memreq_common_memory; |
89 | int ret; | ||
92 | 90 | ||
93 | ipw->is_v2_card = 0; | 91 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
94 | 92 | p_dev->io.BasePort1 = cfg->io.win[0].base; | |
95 | tuple.Attributes = 0; | 93 | p_dev->io.NumPorts1 = cfg->io.win[0].len; |
96 | tuple.TupleData = (cisdata_t *) buf; | 94 | p_dev->io.IOAddrLines = 16; |
97 | tuple.TupleDataMax = sizeof(buf); | ||
98 | tuple.TupleOffset = 0; | ||
99 | |||
100 | tuple.DesiredTuple = RETURN_FIRST_TUPLE; | ||
101 | |||
102 | ret = pcmcia_get_first_tuple(link, &tuple); | ||
103 | |||
104 | while (ret == 0) { | ||
105 | ret = pcmcia_get_tuple_data(link, &tuple); | ||
106 | |||
107 | if (ret != 0) { | ||
108 | cs_error(link, GetTupleData, ret); | ||
109 | goto exit0; | ||
110 | } | ||
111 | ret = pcmcia_get_next_tuple(link, &tuple); | ||
112 | } | ||
113 | |||
114 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
115 | |||
116 | ret = pcmcia_get_first_tuple(link, &tuple); | ||
117 | |||
118 | if (ret != 0) { | ||
119 | cs_error(link, GetFirstTuple, ret); | ||
120 | goto exit0; | ||
121 | } | ||
122 | |||
123 | ret = pcmcia_get_tuple_data(link, &tuple); | ||
124 | |||
125 | if (ret != 0) { | ||
126 | cs_error(link, GetTupleData, ret); | ||
127 | goto exit0; | ||
128 | } | ||
129 | |||
130 | ret = pcmcia_parse_tuple(&tuple, &parse); | ||
131 | |||
132 | if (ret != 0) { | ||
133 | cs_error(link, ParseTuple, ret); | ||
134 | goto exit0; | ||
135 | } | ||
136 | |||
137 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
138 | link->io.BasePort1 = parse.cftable_entry.io.win[0].base; | ||
139 | link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; | ||
140 | link->io.IOAddrLines = 16; | ||
141 | |||
142 | link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1; | ||
143 | 95 | ||
144 | /* 0x40 causes it to generate level mode interrupts. */ | 96 | /* 0x40 causes it to generate level mode interrupts. */ |
145 | /* 0x04 enables IREQ pin. */ | 97 | /* 0x04 enables IREQ pin. */ |
146 | cor_value = parse.cftable_entry.index | 0x44; | 98 | p_dev->conf.ConfigIndex = cfg->index | 0x44; |
147 | link->conf.ConfigIndex = cor_value; | 99 | ret = pcmcia_request_io(p_dev, &p_dev->io); |
100 | if (ret) | ||
101 | return ret; | ||
148 | 102 | ||
149 | /* IRQ and I/O settings */ | 103 | io_resource = request_region(p_dev->io.BasePort1, p_dev->io.NumPorts1, |
150 | tuple.DesiredTuple = CISTPL_CONFIG; | 104 | IPWIRELESS_PCCARD_NAME); |
151 | 105 | ||
152 | ret = pcmcia_get_first_tuple(link, &tuple); | 106 | if (cfg->mem.nwin == 0) |
107 | return 0; | ||
153 | 108 | ||
154 | if (ret != 0) { | 109 | ipw->request_common_memory.Attributes = |
155 | cs_error(link, GetFirstTuple, ret); | 110 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; |
156 | goto exit0; | 111 | ipw->request_common_memory.Base = cfg->mem.win[0].host_addr; |
157 | } | 112 | ipw->request_common_memory.Size = cfg->mem.win[0].len; |
113 | if (ipw->request_common_memory.Size < 0x1000) | ||
114 | ipw->request_common_memory.Size = 0x1000; | ||
115 | ipw->request_common_memory.AccessSpeed = 0; | ||
158 | 116 | ||
159 | ret = pcmcia_get_tuple_data(link, &tuple); | 117 | ret = pcmcia_request_window(p_dev, &ipw->request_common_memory, |
160 | 118 | &ipw->handle_common_memory); | |
161 | if (ret != 0) { | ||
162 | cs_error(link, GetTupleData, ret); | ||
163 | goto exit0; | ||
164 | } | ||
165 | 119 | ||
166 | ret = pcmcia_parse_tuple(&tuple, &parse); | 120 | if (ret != 0) |
121 | goto exit1; | ||
167 | 122 | ||
168 | if (ret != 0) { | 123 | memreq_common_memory.CardOffset = cfg->mem.win[0].card_addr; |
169 | cs_error(link, GetTupleData, ret); | 124 | memreq_common_memory.Page = 0; |
170 | goto exit0; | ||
171 | } | ||
172 | link->conf.Attributes = CONF_ENABLE_IRQ; | ||
173 | link->conf.ConfigBase = parse.config.base; | ||
174 | link->conf.Present = parse.config.rmask[0]; | ||
175 | link->conf.IntType = INT_MEMORY_AND_IO; | ||
176 | 125 | ||
177 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 126 | ret = pcmcia_map_mem_page(p_dev, ipw->handle_common_memory, |
178 | link->irq.Handler = ipwireless_interrupt; | 127 | &memreq_common_memory); |
179 | link->irq.Instance = ipw->hardware; | ||
180 | 128 | ||
181 | ret = pcmcia_request_io(link, &link->io); | 129 | if (ret != 0) |
130 | goto exit2; | ||
182 | 131 | ||
183 | if (ret != 0) { | 132 | ipw->is_v2_card = cfg->mem.win[0].len == 0x100; |
184 | cs_error(link, RequestIO, ret); | ||
185 | goto exit0; | ||
186 | } | ||
187 | 133 | ||
188 | request_region(link->io.BasePort1, link->io.NumPorts1, | 134 | ipw->common_memory = ioremap(ipw->request_common_memory.Base, |
135 | ipw->request_common_memory.Size); | ||
136 | request_mem_region(ipw->request_common_memory.Base, | ||
137 | ipw->request_common_memory.Size, | ||
189 | IPWIRELESS_PCCARD_NAME); | 138 | IPWIRELESS_PCCARD_NAME); |
190 | 139 | ||
191 | /* memory settings */ | 140 | ipw->request_attr_memory.Attributes = |
192 | 141 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; | |
193 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | 142 | ipw->request_attr_memory.Base = 0; |
194 | 143 | ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */ | |
195 | ret = pcmcia_get_first_tuple(link, &tuple); | 144 | ipw->request_attr_memory.AccessSpeed = 0; |
196 | |||
197 | if (ret != 0) { | ||
198 | cs_error(link, GetFirstTuple, ret); | ||
199 | goto exit1; | ||
200 | } | ||
201 | |||
202 | ret = pcmcia_get_tuple_data(link, &tuple); | ||
203 | 145 | ||
204 | if (ret != 0) { | 146 | ret = pcmcia_request_window(p_dev, &ipw->request_attr_memory, |
205 | cs_error(link, GetTupleData, ret); | 147 | &ipw->handle_attr_memory); |
206 | goto exit1; | ||
207 | } | ||
208 | |||
209 | ret = pcmcia_parse_tuple(&tuple, &parse); | ||
210 | |||
211 | if (ret != 0) { | ||
212 | cs_error(link, ParseTuple, ret); | ||
213 | goto exit1; | ||
214 | } | ||
215 | 148 | ||
216 | if (parse.cftable_entry.mem.nwin > 0) { | 149 | if (ret != 0) |
217 | ipw->request_common_memory.Attributes = | 150 | goto exit2; |
218 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; | ||
219 | ipw->request_common_memory.Base = | ||
220 | parse.cftable_entry.mem.win[0].host_addr; | ||
221 | ipw->request_common_memory.Size = parse.cftable_entry.mem.win[0].len; | ||
222 | if (ipw->request_common_memory.Size < 0x1000) | ||
223 | ipw->request_common_memory.Size = 0x1000; | ||
224 | ipw->request_common_memory.AccessSpeed = 0; | ||
225 | |||
226 | ret = pcmcia_request_window(&link, &ipw->request_common_memory, | ||
227 | &ipw->handle_common_memory); | ||
228 | 151 | ||
229 | if (ret != 0) { | 152 | memreq_attr_memory.CardOffset = 0; |
230 | cs_error(link, RequestWindow, ret); | 153 | memreq_attr_memory.Page = 0; |
231 | goto exit1; | ||
232 | } | ||
233 | 154 | ||
234 | memreq_common_memory.CardOffset = | 155 | ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory, |
235 | parse.cftable_entry.mem.win[0].card_addr; | 156 | &memreq_attr_memory); |
236 | memreq_common_memory.Page = 0; | ||
237 | 157 | ||
238 | ret = pcmcia_map_mem_page(ipw->handle_common_memory, | 158 | if (ret != 0) |
239 | &memreq_common_memory); | 159 | goto exit3; |
240 | 160 | ||
241 | if (ret != 0) { | 161 | ipw->attr_memory = ioremap(ipw->request_attr_memory.Base, |
242 | cs_error(link, MapMemPage, ret); | 162 | ipw->request_attr_memory.Size); |
243 | goto exit1; | 163 | request_mem_region(ipw->request_attr_memory.Base, |
244 | } | 164 | ipw->request_attr_memory.Size, IPWIRELESS_PCCARD_NAME); |
245 | 165 | ||
246 | ipw->is_v2_card = | 166 | return 0; |
247 | parse.cftable_entry.mem.win[0].len == 0x100; | ||
248 | 167 | ||
249 | ipw->common_memory = ioremap(ipw->request_common_memory.Base, | 168 | exit3: |
169 | pcmcia_release_window(p_dev, ipw->handle_attr_memory); | ||
170 | exit2: | ||
171 | if (ipw->common_memory) { | ||
172 | release_mem_region(ipw->request_common_memory.Base, | ||
250 | ipw->request_common_memory.Size); | 173 | ipw->request_common_memory.Size); |
251 | request_mem_region(ipw->request_common_memory.Base, | 174 | iounmap(ipw->common_memory); |
252 | ipw->request_common_memory.Size, IPWIRELESS_PCCARD_NAME); | 175 | pcmcia_release_window(p_dev, ipw->handle_common_memory); |
253 | 176 | } else | |
254 | ipw->request_attr_memory.Attributes = | 177 | pcmcia_release_window(p_dev, ipw->handle_common_memory); |
255 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; | 178 | exit1: |
256 | ipw->request_attr_memory.Base = 0; | 179 | release_resource(io_resource); |
257 | ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */ | 180 | pcmcia_disable_device(p_dev); |
258 | ipw->request_attr_memory.AccessSpeed = 0; | 181 | return -1; |
259 | 182 | } | |
260 | ret = pcmcia_request_window(&link, &ipw->request_attr_memory, | ||
261 | &ipw->handle_attr_memory); | ||
262 | 183 | ||
263 | if (ret != 0) { | 184 | static int config_ipwireless(struct ipw_dev *ipw) |
264 | cs_error(link, RequestWindow, ret); | 185 | { |
265 | goto exit2; | 186 | struct pcmcia_device *link = ipw->link; |
266 | } | 187 | int ret = 0; |
267 | 188 | ||
268 | memreq_attr_memory.CardOffset = 0; | 189 | ipw->is_v2_card = 0; |
269 | memreq_attr_memory.Page = 0; | ||
270 | 190 | ||
271 | ret = pcmcia_map_mem_page(ipw->handle_attr_memory, | 191 | ret = pcmcia_loop_config(link, ipwireless_probe, ipw); |
272 | &memreq_attr_memory); | 192 | if (ret != 0) |
193 | return ret; | ||
273 | 194 | ||
274 | if (ret != 0) { | 195 | link->conf.Attributes = CONF_ENABLE_IRQ; |
275 | cs_error(link, MapMemPage, ret); | 196 | link->conf.IntType = INT_MEMORY_AND_IO; |
276 | goto exit2; | ||
277 | } | ||
278 | 197 | ||
279 | ipw->attr_memory = ioremap(ipw->request_attr_memory.Base, | 198 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
280 | ipw->request_attr_memory.Size); | 199 | link->irq.Handler = ipwireless_interrupt; |
281 | request_mem_region(ipw->request_attr_memory.Base, ipw->request_attr_memory.Size, | ||
282 | IPWIRELESS_PCCARD_NAME); | ||
283 | } | ||
284 | 200 | ||
285 | INIT_WORK(&ipw->work_reboot, signalled_reboot_work); | 201 | INIT_WORK(&ipw->work_reboot, signalled_reboot_work); |
286 | 202 | ||
@@ -291,10 +207,8 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
291 | 207 | ||
292 | ret = pcmcia_request_irq(link, &link->irq); | 208 | ret = pcmcia_request_irq(link, &link->irq); |
293 | 209 | ||
294 | if (ret != 0) { | 210 | if (ret != 0) |
295 | cs_error(link, RequestIRQ, ret); | 211 | goto exit; |
296 | goto exit3; | ||
297 | } | ||
298 | 212 | ||
299 | printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", | 213 | printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", |
300 | ipw->is_v2_card ? "V2/V3" : "V1"); | 214 | ipw->is_v2_card ? "V2/V3" : "V1"); |
@@ -316,12 +230,12 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
316 | 230 | ||
317 | ipw->network = ipwireless_network_create(ipw->hardware); | 231 | ipw->network = ipwireless_network_create(ipw->hardware); |
318 | if (!ipw->network) | 232 | if (!ipw->network) |
319 | goto exit3; | 233 | goto exit; |
320 | 234 | ||
321 | ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, | 235 | ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, |
322 | ipw->nodes); | 236 | ipw->nodes); |
323 | if (!ipw->tty) | 237 | if (!ipw->tty) |
324 | goto exit3; | 238 | goto exit; |
325 | 239 | ||
326 | ipwireless_init_hardware_v2_v3(ipw->hardware); | 240 | ipwireless_init_hardware_v2_v3(ipw->hardware); |
327 | 241 | ||
@@ -331,35 +245,27 @@ static int config_ipwireless(struct ipw_dev *ipw) | |||
331 | */ | 245 | */ |
332 | ret = pcmcia_request_configuration(link, &link->conf); | 246 | ret = pcmcia_request_configuration(link, &link->conf); |
333 | 247 | ||
334 | if (ret != 0) { | 248 | if (ret != 0) |
335 | cs_error(link, RequestConfiguration, ret); | 249 | goto exit; |
336 | goto exit4; | ||
337 | } | ||
338 | 250 | ||
339 | link->dev_node = &ipw->nodes[0]; | 251 | link->dev_node = &ipw->nodes[0]; |
340 | 252 | ||
341 | return 0; | 253 | return 0; |
342 | 254 | ||
343 | exit4: | 255 | exit: |
344 | pcmcia_disable_device(link); | ||
345 | exit3: | ||
346 | if (ipw->attr_memory) { | 256 | if (ipw->attr_memory) { |
347 | release_mem_region(ipw->request_attr_memory.Base, | 257 | release_mem_region(ipw->request_attr_memory.Base, |
348 | ipw->request_attr_memory.Size); | 258 | ipw->request_attr_memory.Size); |
349 | iounmap(ipw->attr_memory); | 259 | iounmap(ipw->attr_memory); |
350 | pcmcia_release_window(ipw->handle_attr_memory); | 260 | pcmcia_release_window(link, ipw->handle_attr_memory); |
351 | pcmcia_disable_device(link); | ||
352 | } | 261 | } |
353 | exit2: | ||
354 | if (ipw->common_memory) { | 262 | if (ipw->common_memory) { |
355 | release_mem_region(ipw->request_common_memory.Base, | 263 | release_mem_region(ipw->request_common_memory.Base, |
356 | ipw->request_common_memory.Size); | 264 | ipw->request_common_memory.Size); |
357 | iounmap(ipw->common_memory); | 265 | iounmap(ipw->common_memory); |
358 | pcmcia_release_window(ipw->handle_common_memory); | 266 | pcmcia_release_window(link, ipw->handle_common_memory); |
359 | } | 267 | } |
360 | exit1: | ||
361 | pcmcia_disable_device(link); | 268 | pcmcia_disable_device(link); |
362 | exit0: | ||
363 | return -1; | 269 | return -1; |
364 | } | 270 | } |
365 | 271 | ||
@@ -378,9 +284,9 @@ static void release_ipwireless(struct ipw_dev *ipw) | |||
378 | iounmap(ipw->attr_memory); | 284 | iounmap(ipw->attr_memory); |
379 | } | 285 | } |
380 | if (ipw->common_memory) | 286 | if (ipw->common_memory) |
381 | pcmcia_release_window(ipw->handle_common_memory); | 287 | pcmcia_release_window(ipw->link, ipw->handle_common_memory); |
382 | if (ipw->attr_memory) | 288 | if (ipw->attr_memory) |
383 | pcmcia_release_window(ipw->handle_attr_memory); | 289 | pcmcia_release_window(ipw->link, ipw->handle_attr_memory); |
384 | 290 | ||
385 | /* Break the link with Card Services */ | 291 | /* Break the link with Card Services */ |
386 | pcmcia_disable_device(ipw->link); | 292 | pcmcia_disable_device(ipw->link); |
@@ -406,7 +312,6 @@ static int ipwireless_attach(struct pcmcia_device *link) | |||
406 | 312 | ||
407 | ipw->link = link; | 313 | ipw->link = link; |
408 | link->priv = ipw; | 314 | link->priv = ipw; |
409 | link->irq.Instance = ipw; | ||
410 | 315 | ||
411 | /* Link this device into our device list. */ | 316 | /* Link this device into our device list. */ |
412 | link->dev_node = &ipw->nodes[0]; | 317 | link->dev_node = &ipw->nodes[0]; |
@@ -421,7 +326,6 @@ static int ipwireless_attach(struct pcmcia_device *link) | |||
421 | ret = config_ipwireless(ipw); | 326 | ret = config_ipwireless(ipw); |
422 | 327 | ||
423 | if (ret != 0) { | 328 | if (ret != 0) { |
424 | cs_error(link, RegisterClient, ret); | ||
425 | ipwireless_detach(link); | 329 | ipwireless_detach(link); |
426 | return ret; | 330 | return ret; |
427 | } | 331 | } |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index caf6e4d19469..c31a0d913d37 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -554,7 +554,6 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
554 | 554 | ||
555 | /* Interrupt setup */ | 555 | /* Interrupt setup */ |
556 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 556 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
557 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
558 | link->irq.Handler = NULL; | 557 | link->irq.Handler = NULL; |
559 | 558 | ||
560 | link->conf.Attributes = 0; | 559 | link->conf.Attributes = 0; |
@@ -572,69 +571,51 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
572 | /* Card has been inserted. | 571 | /* Card has been inserted. |
573 | */ | 572 | */ |
574 | 573 | ||
575 | #define CS_CHECK(fn, ret) \ | 574 | static int mgslpc_ioprobe(struct pcmcia_device *p_dev, |
576 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | 575 | cistpl_cftable_entry_t *cfg, |
576 | cistpl_cftable_entry_t *dflt, | ||
577 | unsigned int vcc, | ||
578 | void *priv_data) | ||
579 | { | ||
580 | if (cfg->io.nwin > 0) { | ||
581 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
582 | if (!(cfg->io.flags & CISTPL_IO_8BIT)) | ||
583 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
584 | if (!(cfg->io.flags & CISTPL_IO_16BIT)) | ||
585 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
586 | p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; | ||
587 | p_dev->io.BasePort1 = cfg->io.win[0].base; | ||
588 | p_dev->io.NumPorts1 = cfg->io.win[0].len; | ||
589 | return pcmcia_request_io(p_dev, &p_dev->io); | ||
590 | } | ||
591 | return -ENODEV; | ||
592 | } | ||
577 | 593 | ||
578 | static int mgslpc_config(struct pcmcia_device *link) | 594 | static int mgslpc_config(struct pcmcia_device *link) |
579 | { | 595 | { |
580 | MGSLPC_INFO *info = link->priv; | 596 | MGSLPC_INFO *info = link->priv; |
581 | tuple_t tuple; | 597 | int ret; |
582 | cisparse_t parse; | ||
583 | int last_fn, last_ret; | ||
584 | u_char buf[64]; | ||
585 | cistpl_cftable_entry_t dflt = { 0 }; | ||
586 | cistpl_cftable_entry_t *cfg; | ||
587 | 598 | ||
588 | if (debug_level >= DEBUG_LEVEL_INFO) | 599 | if (debug_level >= DEBUG_LEVEL_INFO) |
589 | printk("mgslpc_config(0x%p)\n", link); | 600 | printk("mgslpc_config(0x%p)\n", link); |
590 | 601 | ||
591 | tuple.Attributes = 0; | 602 | ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); |
592 | tuple.TupleData = buf; | 603 | if (ret != 0) |
593 | tuple.TupleDataMax = sizeof(buf); | 604 | goto failed; |
594 | tuple.TupleOffset = 0; | ||
595 | |||
596 | /* get CIS configuration entry */ | ||
597 | |||
598 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
599 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); | ||
600 | |||
601 | cfg = &(parse.cftable_entry); | ||
602 | CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); | ||
603 | CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse)); | ||
604 | |||
605 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; | ||
606 | if (cfg->index == 0) | ||
607 | goto cs_failed; | ||
608 | |||
609 | link->conf.ConfigIndex = cfg->index; | ||
610 | link->conf.Attributes |= CONF_ENABLE_IRQ; | ||
611 | |||
612 | /* IO window settings */ | ||
613 | link->io.NumPorts1 = 0; | ||
614 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
615 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
616 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
617 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
618 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
619 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
620 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
621 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
622 | link->io.BasePort1 = io->win[0].base; | ||
623 | link->io.NumPorts1 = io->win[0].len; | ||
624 | CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); | ||
625 | } | ||
626 | 605 | ||
627 | link->conf.Attributes = CONF_ENABLE_IRQ; | 606 | link->conf.Attributes = CONF_ENABLE_IRQ; |
628 | link->conf.IntType = INT_MEMORY_AND_IO; | 607 | link->conf.IntType = INT_MEMORY_AND_IO; |
629 | link->conf.ConfigIndex = 8; | 608 | link->conf.ConfigIndex = 8; |
630 | link->conf.Present = PRESENT_OPTION; | 609 | link->conf.Present = PRESENT_OPTION; |
631 | 610 | ||
632 | link->irq.Attributes |= IRQ_HANDLE_PRESENT; | ||
633 | link->irq.Handler = mgslpc_isr; | 611 | link->irq.Handler = mgslpc_isr; |
634 | link->irq.Instance = info; | ||
635 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | ||
636 | 612 | ||
637 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 613 | ret = pcmcia_request_irq(link, &link->irq); |
614 | if (ret) | ||
615 | goto failed; | ||
616 | ret = pcmcia_request_configuration(link, &link->conf); | ||
617 | if (ret) | ||
618 | goto failed; | ||
638 | 619 | ||
639 | info->io_base = link->io.BasePort1; | 620 | info->io_base = link->io.BasePort1; |
640 | info->irq_level = link->irq.AssignedIRQ; | 621 | info->irq_level = link->irq.AssignedIRQ; |
@@ -654,8 +635,7 @@ static int mgslpc_config(struct pcmcia_device *link) | |||
654 | printk("\n"); | 635 | printk("\n"); |
655 | return 0; | 636 | return 0; |
656 | 637 | ||
657 | cs_failed: | 638 | failed: |
658 | cs_error(link, last_fn, last_ret); | ||
659 | mgslpc_release((u_long)link); | 639 | mgslpc_release((u_long)link); |
660 | return -ENODEV; | 640 | return -ENODEV; |
661 | } | 641 | } |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 47c2d2763456..f06bb37defb1 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | enum tpm_const { | 32 | enum tpm_const { |
33 | TPM_MINOR = 224, /* officially assigned */ | 33 | TPM_MINOR = 224, /* officially assigned */ |
34 | TPM_BUFSIZE = 2048, | 34 | TPM_BUFSIZE = 4096, |
35 | TPM_NUM_DEVICES = 256, | 35 | TPM_NUM_DEVICES = 256, |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 0b73e4ec1add..2405f17b29dd 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -257,6 +257,10 @@ out: | |||
257 | return size; | 257 | return size; |
258 | } | 258 | } |
259 | 259 | ||
260 | static int itpm; | ||
261 | module_param(itpm, bool, 0444); | ||
262 | MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); | ||
263 | |||
260 | /* | 264 | /* |
261 | * If interrupts are used (signaled by an irq set in the vendor structure) | 265 | * If interrupts are used (signaled by an irq set in the vendor structure) |
262 | * tpm.c can skip polling for the data to be available as the interrupt is | 266 | * tpm.c can skip polling for the data to be available as the interrupt is |
@@ -293,7 +297,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
293 | wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, | 297 | wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, |
294 | &chip->vendor.int_queue); | 298 | &chip->vendor.int_queue); |
295 | status = tpm_tis_status(chip); | 299 | status = tpm_tis_status(chip); |
296 | if ((status & TPM_STS_DATA_EXPECT) == 0) { | 300 | if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { |
297 | rc = -EIO; | 301 | rc = -EIO; |
298 | goto out_err; | 302 | goto out_err; |
299 | } | 303 | } |
@@ -467,6 +471,10 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, | |||
467 | "1.2 TPM (device-id 0x%X, rev-id %d)\n", | 471 | "1.2 TPM (device-id 0x%X, rev-id %d)\n", |
468 | vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); | 472 | vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); |
469 | 473 | ||
474 | if (itpm) | ||
475 | dev_info(dev, "Intel iTPM workaround enabled\n"); | ||
476 | |||
477 | |||
470 | /* Figure out the capabilities */ | 478 | /* Figure out the capabilities */ |
471 | intfcaps = | 479 | intfcaps = |
472 | ioread32(chip->vendor.iobase + | 480 | ioread32(chip->vendor.iobase + |
@@ -629,6 +637,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { | |||
629 | {"", 0}, /* User Specified */ | 637 | {"", 0}, /* User Specified */ |
630 | {"", 0} /* Terminator */ | 638 | {"", 0} /* Terminator */ |
631 | }; | 639 | }; |
640 | MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); | ||
632 | 641 | ||
633 | static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) | 642 | static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) |
634 | { | 643 | { |
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 2e8552dc5eda..c63f3d33914a 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c | |||
@@ -219,8 +219,11 @@ int tty_port_block_til_ready(struct tty_port *port, | |||
219 | 219 | ||
220 | /* if non-blocking mode is set we can pass directly to open unless | 220 | /* if non-blocking mode is set we can pass directly to open unless |
221 | the port has just hung up or is in another error state */ | 221 | the port has just hung up or is in another error state */ |
222 | if ((filp->f_flags & O_NONBLOCK) || | 222 | if (tty->flags & (1 << TTY_IO_ERROR)) { |
223 | (tty->flags & (1 << TTY_IO_ERROR))) { | 223 | port->flags |= ASYNC_NORMAL_ACTIVE; |
224 | return 0; | ||
225 | } | ||
226 | if (filp->f_flags & O_NONBLOCK) { | ||
224 | /* Indicate we are open */ | 227 | /* Indicate we are open */ |
225 | if (tty->termios->c_cflag & CBAUD) | 228 | if (tty->termios->c_cflag & CBAUD) |
226 | tty_port_raise_dtr_rts(port); | 229 | tty_port_raise_dtr_rts(port); |
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 713ed7d37247..689cc6a6214d 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | static bool report_gart_errors; | 4 | static bool report_gart_errors; |
5 | static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); | 5 | static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); |
6 | static void (*orig_mce_callback)(struct mce *m); | ||
7 | 6 | ||
8 | void amd_report_gart_errors(bool v) | 7 | void amd_report_gart_errors(bool v) |
9 | { | 8 | { |
@@ -363,8 +362,10 @@ static inline void amd_decode_err_code(unsigned int ec) | |||
363 | pr_warning("Huh? Unknown MCE error 0x%x\n", ec); | 362 | pr_warning("Huh? Unknown MCE error 0x%x\n", ec); |
364 | } | 363 | } |
365 | 364 | ||
366 | static void amd_decode_mce(struct mce *m) | 365 | static int amd_decode_mce(struct notifier_block *nb, unsigned long val, |
366 | void *data) | ||
367 | { | 367 | { |
368 | struct mce *m = (struct mce *)data; | ||
368 | struct err_regs regs; | 369 | struct err_regs regs; |
369 | int node, ecc; | 370 | int node, ecc; |
370 | 371 | ||
@@ -420,20 +421,22 @@ static void amd_decode_mce(struct mce *m) | |||
420 | } | 421 | } |
421 | 422 | ||
422 | amd_decode_err_code(m->status & 0xffff); | 423 | amd_decode_err_code(m->status & 0xffff); |
424 | |||
425 | return NOTIFY_STOP; | ||
423 | } | 426 | } |
424 | 427 | ||
428 | static struct notifier_block amd_mce_dec_nb = { | ||
429 | .notifier_call = amd_decode_mce, | ||
430 | }; | ||
431 | |||
425 | static int __init mce_amd_init(void) | 432 | static int __init mce_amd_init(void) |
426 | { | 433 | { |
427 | /* | 434 | /* |
428 | * We can decode MCEs for Opteron and later CPUs: | 435 | * We can decode MCEs for Opteron and later CPUs: |
429 | */ | 436 | */ |
430 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && | 437 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && |
431 | (boot_cpu_data.x86 >= 0xf)) { | 438 | (boot_cpu_data.x86 >= 0xf)) |
432 | /* safe the default decode mce callback */ | 439 | atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); |
433 | orig_mce_callback = x86_mce_decode_callback; | ||
434 | |||
435 | x86_mce_decode_callback = amd_decode_mce; | ||
436 | } | ||
437 | 440 | ||
438 | return 0; | 441 | return 0; |
439 | } | 442 | } |
@@ -442,7 +445,7 @@ early_initcall(mce_amd_init); | |||
442 | #ifdef MODULE | 445 | #ifdef MODULE |
443 | static void __exit mce_amd_exit(void) | 446 | static void __exit mce_amd_exit(void) |
444 | { | 447 | { |
445 | x86_mce_decode_callback = orig_mce_callback; | 448 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); |
446 | } | 449 | } |
447 | 450 | ||
448 | MODULE_DESCRIPTION("AMD MCE decoder"); | 451 | MODULE_DESCRIPTION("AMD MCE decoder"); |
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c index 5711ce5353c6..4baf3d7d0f8e 100644 --- a/drivers/gpio/langwell_gpio.c +++ b/drivers/gpio/langwell_gpio.c | |||
@@ -144,13 +144,6 @@ static int lnw_irq_type(unsigned irq, unsigned type) | |||
144 | 144 | ||
145 | static void lnw_irq_unmask(unsigned irq) | 145 | static void lnw_irq_unmask(unsigned irq) |
146 | { | 146 | { |
147 | struct lnw_gpio *lnw = get_irq_chip_data(irq); | ||
148 | u32 gpio = irq - lnw->irq_base; | ||
149 | u8 reg = gpio / 32; | ||
150 | void __iomem *gedr; | ||
151 | |||
152 | gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); | ||
153 | writel(BIT(gpio % 32), gedr); | ||
154 | }; | 147 | }; |
155 | 148 | ||
156 | static void lnw_irq_mask(unsigned irq) | 149 | static void lnw_irq_mask(unsigned irq) |
@@ -183,13 +176,11 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) | |||
183 | gedr_v = readl(gedr); | 176 | gedr_v = readl(gedr); |
184 | if (!gedr_v) | 177 | if (!gedr_v) |
185 | continue; | 178 | continue; |
186 | for (gpio = reg*32; gpio < reg*32+32; gpio++) { | 179 | for (gpio = reg*32; gpio < reg*32+32; gpio++) |
187 | gedr_v = readl(gedr); | ||
188 | if (gedr_v & BIT(gpio % 32)) { | 180 | if (gedr_v & BIT(gpio % 32)) { |
189 | pr_debug("pin %d triggered\n", gpio); | 181 | pr_debug("pin %d triggered\n", gpio); |
190 | generic_handle_irq(lnw->irq_base + gpio); | 182 | generic_handle_irq(lnw->irq_base + gpio); |
191 | } | 183 | } |
192 | } | ||
193 | /* clear the edge detect status bit */ | 184 | /* clear the edge detect status bit */ |
194 | writel(gedr_v, gedr); | 185 | writel(gedr_v, gedr); |
195 | } | 186 | } |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 063b933d864a..dd6396384c25 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -60,15 +60,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | |||
60 | MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); | 60 | MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); |
61 | MODULE_LICENSE("Dual MPL/GPL"); | 61 | MODULE_LICENSE("Dual MPL/GPL"); |
62 | 62 | ||
63 | #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) | ||
64 | |||
65 | #ifdef CONFIG_PCMCIA_DEBUG | ||
66 | INT_MODULE_PARM(pc_debug, 0); | ||
67 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
68 | #else | ||
69 | #define DEBUG(n, args...) | ||
70 | #endif | ||
71 | |||
72 | /*====================================================================*/ | 63 | /*====================================================================*/ |
73 | 64 | ||
74 | typedef struct ide_info_t { | 65 | typedef struct ide_info_t { |
@@ -98,7 +89,7 @@ static int ide_probe(struct pcmcia_device *link) | |||
98 | { | 89 | { |
99 | ide_info_t *info; | 90 | ide_info_t *info; |
100 | 91 | ||
101 | DEBUG(0, "ide_attach()\n"); | 92 | dev_dbg(&link->dev, "ide_attach()\n"); |
102 | 93 | ||
103 | /* Create new ide device */ | 94 | /* Create new ide device */ |
104 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 95 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -112,7 +103,6 @@ static int ide_probe(struct pcmcia_device *link) | |||
112 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 103 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
113 | link->io.IOAddrLines = 3; | 104 | link->io.IOAddrLines = 3; |
114 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 105 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
115 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
116 | link->conf.Attributes = CONF_ENABLE_IRQ; | 106 | link->conf.Attributes = CONF_ENABLE_IRQ; |
117 | link->conf.IntType = INT_MEMORY_AND_IO; | 107 | link->conf.IntType = INT_MEMORY_AND_IO; |
118 | 108 | ||
@@ -134,7 +124,7 @@ static void ide_detach(struct pcmcia_device *link) | |||
134 | ide_hwif_t *hwif = info->host->ports[0]; | 124 | ide_hwif_t *hwif = info->host->ports[0]; |
135 | unsigned long data_addr, ctl_addr; | 125 | unsigned long data_addr, ctl_addr; |
136 | 126 | ||
137 | DEBUG(0, "ide_detach(0x%p)\n", link); | 127 | dev_dbg(&link->dev, "ide_detach(0x%p)\n", link); |
138 | 128 | ||
139 | data_addr = hwif->io_ports.data_addr; | 129 | data_addr = hwif->io_ports.data_addr; |
140 | ctl_addr = hwif->io_ports.ctl_addr; | 130 | ctl_addr = hwif->io_ports.ctl_addr; |
@@ -217,9 +207,6 @@ out_release: | |||
217 | 207 | ||
218 | ======================================================================*/ | 208 | ======================================================================*/ |
219 | 209 | ||
220 | #define CS_CHECK(fn, ret) \ | ||
221 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
222 | |||
223 | struct pcmcia_config_check { | 210 | struct pcmcia_config_check { |
224 | unsigned long ctl_base; | 211 | unsigned long ctl_base; |
225 | int skip_vcc; | 212 | int skip_vcc; |
@@ -282,11 +269,11 @@ static int ide_config(struct pcmcia_device *link) | |||
282 | { | 269 | { |
283 | ide_info_t *info = link->priv; | 270 | ide_info_t *info = link->priv; |
284 | struct pcmcia_config_check *stk = NULL; | 271 | struct pcmcia_config_check *stk = NULL; |
285 | int last_ret = 0, last_fn = 0, is_kme = 0; | 272 | int ret = 0, is_kme = 0; |
286 | unsigned long io_base, ctl_base; | 273 | unsigned long io_base, ctl_base; |
287 | struct ide_host *host; | 274 | struct ide_host *host; |
288 | 275 | ||
289 | DEBUG(0, "ide_config(0x%p)\n", link); | 276 | dev_dbg(&link->dev, "ide_config(0x%p)\n", link); |
290 | 277 | ||
291 | is_kme = ((link->manf_id == MANFID_KME) && | 278 | is_kme = ((link->manf_id == MANFID_KME) && |
292 | ((link->card_id == PRODID_KME_KXLC005_A) || | 279 | ((link->card_id == PRODID_KME_KXLC005_A) || |
@@ -306,8 +293,12 @@ static int ide_config(struct pcmcia_device *link) | |||
306 | io_base = link->io.BasePort1; | 293 | io_base = link->io.BasePort1; |
307 | ctl_base = stk->ctl_base; | 294 | ctl_base = stk->ctl_base; |
308 | 295 | ||
309 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 296 | ret = pcmcia_request_irq(link, &link->irq); |
310 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 297 | if (ret) |
298 | goto failed; | ||
299 | ret = pcmcia_request_configuration(link, &link->conf); | ||
300 | if (ret) | ||
301 | goto failed; | ||
311 | 302 | ||
312 | /* disable drive interrupts during IDE probe */ | 303 | /* disable drive interrupts during IDE probe */ |
313 | outb(0x02, ctl_base); | 304 | outb(0x02, ctl_base); |
@@ -342,8 +333,6 @@ err_mem: | |||
342 | printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); | 333 | printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); |
343 | goto failed; | 334 | goto failed; |
344 | 335 | ||
345 | cs_failed: | ||
346 | cs_error(link, last_fn, last_ret); | ||
347 | failed: | 336 | failed: |
348 | kfree(stk); | 337 | kfree(stk); |
349 | ide_release(link); | 338 | ide_release(link); |
@@ -363,7 +352,7 @@ static void ide_release(struct pcmcia_device *link) | |||
363 | ide_info_t *info = link->priv; | 352 | ide_info_t *info = link->priv; |
364 | struct ide_host *host = info->host; | 353 | struct ide_host *host = info->host; |
365 | 354 | ||
366 | DEBUG(0, "ide_release(0x%p)\n", link); | 355 | dev_dbg(&link->dev, "ide_release(0x%p)\n", link); |
367 | 356 | ||
368 | if (info->ndev) | 357 | if (info->ndev) |
369 | /* FIXME: if this fails we need to queue the cleanup somehow | 358 | /* FIXME: if this fails we need to queue the cleanup somehow |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a537925f7651..2bcf1ace27c0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -447,6 +447,27 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = { | |||
447 | DMI_MATCH(DMI_PRODUCT_NAME, "N10"), | 447 | DMI_MATCH(DMI_PRODUCT_NAME, "N10"), |
448 | }, | 448 | }, |
449 | }, | 449 | }, |
450 | { | ||
451 | .ident = "Dell Vostro 1320", | ||
452 | .matches = { | ||
453 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
454 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), | ||
455 | }, | ||
456 | }, | ||
457 | { | ||
458 | .ident = "Dell Vostro 1520", | ||
459 | .matches = { | ||
460 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
461 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), | ||
462 | }, | ||
463 | }, | ||
464 | { | ||
465 | .ident = "Dell Vostro 1720", | ||
466 | .matches = { | ||
467 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
468 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), | ||
469 | }, | ||
470 | }, | ||
450 | { } | 471 | { } |
451 | }; | 472 | }; |
452 | 473 | ||
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c index c72565520e41..5a6ae646a636 100644 --- a/drivers/isdn/hardware/avm/avm_cs.c +++ b/drivers/isdn/hardware/avm/avm_cs.c | |||
@@ -111,8 +111,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev) | |||
111 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 111 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
112 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 112 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
113 | 113 | ||
114 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
115 | |||
116 | /* General socket configuration */ | 114 | /* General socket configuration */ |
117 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; | 115 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; |
118 | p_dev->conf.IntType = INT_MEMORY_AND_IO; | 116 | p_dev->conf.IntType = INT_MEMORY_AND_IO; |
@@ -198,7 +196,6 @@ static int avmcs_config(struct pcmcia_device *link) | |||
198 | */ | 196 | */ |
199 | i = pcmcia_request_irq(link, &link->irq); | 197 | i = pcmcia_request_irq(link, &link->irq); |
200 | if (i != 0) { | 198 | if (i != 0) { |
201 | cs_error(link, RequestIRQ, i); | ||
202 | /* undo */ | 199 | /* undo */ |
203 | pcmcia_disable_device(link); | 200 | pcmcia_disable_device(link); |
204 | break; | 201 | break; |
@@ -209,7 +206,6 @@ static int avmcs_config(struct pcmcia_device *link) | |||
209 | */ | 206 | */ |
210 | i = pcmcia_request_configuration(link, &link->conf); | 207 | i = pcmcia_request_configuration(link, &link->conf); |
211 | if (i != 0) { | 208 | if (i != 0) { |
212 | cs_error(link, RequestConfiguration, i); | ||
213 | pcmcia_disable_device(link); | 209 | pcmcia_disable_device(link); |
214 | break; | 210 | break; |
215 | } | 211 | } |
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c index 23560c897ec3..f9bdff39cf4a 100644 --- a/drivers/isdn/hisax/avma1_cs.c +++ b/drivers/isdn/hisax/avma1_cs.c | |||
@@ -30,22 +30,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA car | |||
30 | MODULE_AUTHOR("Carsten Paeth"); | 30 | MODULE_AUTHOR("Carsten Paeth"); |
31 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
32 | 32 | ||
33 | /* | ||
34 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
35 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
36 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
37 | be present but disabled -- but it can then be enabled for specific | ||
38 | modules at load time with a 'pc_debug=#' option to insmod. | ||
39 | */ | ||
40 | #ifdef PCMCIA_DEBUG | ||
41 | static int pc_debug = PCMCIA_DEBUG; | ||
42 | module_param(pc_debug, int, 0); | ||
43 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); | ||
44 | static char *version = | ||
45 | "avma1_cs.c 1.00 1998/01/23 10:00:00 (Carsten Paeth)"; | ||
46 | #else | ||
47 | #define DEBUG(n, args...) | ||
48 | #endif | ||
49 | 33 | ||
50 | /*====================================================================*/ | 34 | /*====================================================================*/ |
51 | 35 | ||
@@ -119,7 +103,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) | |||
119 | { | 103 | { |
120 | local_info_t *local; | 104 | local_info_t *local; |
121 | 105 | ||
122 | DEBUG(0, "avma1cs_attach()\n"); | 106 | dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); |
123 | 107 | ||
124 | /* Allocate space for private device-specific data */ | 108 | /* Allocate space for private device-specific data */ |
125 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); | 109 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); |
@@ -139,8 +123,6 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) | |||
139 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 123 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
140 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 124 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
141 | 125 | ||
142 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
143 | |||
144 | /* General socket configuration */ | 126 | /* General socket configuration */ |
145 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; | 127 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; |
146 | p_dev->conf.IntType = INT_MEMORY_AND_IO; | 128 | p_dev->conf.IntType = INT_MEMORY_AND_IO; |
@@ -161,7 +143,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) | |||
161 | 143 | ||
162 | static void avma1cs_detach(struct pcmcia_device *link) | 144 | static void avma1cs_detach(struct pcmcia_device *link) |
163 | { | 145 | { |
164 | DEBUG(0, "avma1cs_detach(0x%p)\n", link); | 146 | dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); |
165 | avma1cs_release(link); | 147 | avma1cs_release(link); |
166 | kfree(link->priv); | 148 | kfree(link->priv); |
167 | } /* avma1cs_detach */ | 149 | } /* avma1cs_detach */ |
@@ -203,7 +185,7 @@ static int avma1cs_config(struct pcmcia_device *link) | |||
203 | 185 | ||
204 | dev = link->priv; | 186 | dev = link->priv; |
205 | 187 | ||
206 | DEBUG(0, "avma1cs_config(0x%p)\n", link); | 188 | dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); |
207 | 189 | ||
208 | devname[0] = 0; | 190 | devname[0] = 0; |
209 | if (link->prod_id[1]) | 191 | if (link->prod_id[1]) |
@@ -218,7 +200,6 @@ static int avma1cs_config(struct pcmcia_device *link) | |||
218 | */ | 200 | */ |
219 | i = pcmcia_request_irq(link, &link->irq); | 201 | i = pcmcia_request_irq(link, &link->irq); |
220 | if (i != 0) { | 202 | if (i != 0) { |
221 | cs_error(link, RequestIRQ, i); | ||
222 | /* undo */ | 203 | /* undo */ |
223 | pcmcia_disable_device(link); | 204 | pcmcia_disable_device(link); |
224 | break; | 205 | break; |
@@ -229,7 +210,6 @@ static int avma1cs_config(struct pcmcia_device *link) | |||
229 | */ | 210 | */ |
230 | i = pcmcia_request_configuration(link, &link->conf); | 211 | i = pcmcia_request_configuration(link, &link->conf); |
231 | if (i != 0) { | 212 | if (i != 0) { |
232 | cs_error(link, RequestConfiguration, i); | ||
233 | pcmcia_disable_device(link); | 213 | pcmcia_disable_device(link); |
234 | break; | 214 | break; |
235 | } | 215 | } |
@@ -281,7 +261,7 @@ static void avma1cs_release(struct pcmcia_device *link) | |||
281 | { | 261 | { |
282 | local_info_t *local = link->priv; | 262 | local_info_t *local = link->priv; |
283 | 263 | ||
284 | DEBUG(0, "avma1cs_release(0x%p)\n", link); | 264 | dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); |
285 | 265 | ||
286 | /* now unregister function with hisax */ | 266 | /* now unregister function with hisax */ |
287 | HiSax_closecard(local->node.minor); | 267 | HiSax_closecard(local->node.minor); |
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c index f4d0fe29bcf8..a2f709f53974 100644 --- a/drivers/isdn/hisax/elsa_cs.c +++ b/drivers/isdn/hisax/elsa_cs.c | |||
@@ -57,23 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Elsa PCM cards"); | |||
57 | MODULE_AUTHOR("Klaus Lichtenwalder"); | 57 | MODULE_AUTHOR("Klaus Lichtenwalder"); |
58 | MODULE_LICENSE("Dual MPL/GPL"); | 58 | MODULE_LICENSE("Dual MPL/GPL"); |
59 | 59 | ||
60 | /* | ||
61 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
62 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
63 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
64 | be present but disabled -- but it can then be enabled for specific | ||
65 | modules at load time with a 'pc_debug=#' option to insmod. | ||
66 | */ | ||
67 | |||
68 | #ifdef PCMCIA_DEBUG | ||
69 | static int pc_debug = PCMCIA_DEBUG; | ||
70 | module_param(pc_debug, int, 0); | ||
71 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); | ||
72 | static char *version = | ||
73 | "elsa_cs.c $Revision: 1.2.2.4 $ $Date: 2004/01/25 15:07:06 $ (K.Lichtenwalder)"; | ||
74 | #else | ||
75 | #define DEBUG(n, args...) | ||
76 | #endif | ||
77 | 60 | ||
78 | /*====================================================================*/ | 61 | /*====================================================================*/ |
79 | 62 | ||
@@ -142,7 +125,7 @@ static int elsa_cs_probe(struct pcmcia_device *link) | |||
142 | { | 125 | { |
143 | local_info_t *local; | 126 | local_info_t *local; |
144 | 127 | ||
145 | DEBUG(0, "elsa_cs_attach()\n"); | 128 | dev_dbg(&link->dev, "elsa_cs_attach()\n"); |
146 | 129 | ||
147 | /* Allocate space for private device-specific data */ | 130 | /* Allocate space for private device-specific data */ |
148 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); | 131 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); |
@@ -155,7 +138,6 @@ static int elsa_cs_probe(struct pcmcia_device *link) | |||
155 | 138 | ||
156 | /* Interrupt setup */ | 139 | /* Interrupt setup */ |
157 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 140 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
158 | link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID; | ||
159 | link->irq.Handler = NULL; | 141 | link->irq.Handler = NULL; |
160 | 142 | ||
161 | /* | 143 | /* |
@@ -188,7 +170,7 @@ static void elsa_cs_detach(struct pcmcia_device *link) | |||
188 | { | 170 | { |
189 | local_info_t *info = link->priv; | 171 | local_info_t *info = link->priv; |
190 | 172 | ||
191 | DEBUG(0, "elsa_cs_detach(0x%p)\n", link); | 173 | dev_dbg(&link->dev, "elsa_cs_detach(0x%p)\n", link); |
192 | 174 | ||
193 | info->busy = 1; | 175 | info->busy = 1; |
194 | elsa_cs_release(link); | 176 | elsa_cs_release(link); |
@@ -231,30 +213,25 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, | |||
231 | static int elsa_cs_config(struct pcmcia_device *link) | 213 | static int elsa_cs_config(struct pcmcia_device *link) |
232 | { | 214 | { |
233 | local_info_t *dev; | 215 | local_info_t *dev; |
234 | int i, last_fn; | 216 | int i; |
235 | IsdnCard_t icard; | 217 | IsdnCard_t icard; |
236 | 218 | ||
237 | DEBUG(0, "elsa_config(0x%p)\n", link); | 219 | dev_dbg(&link->dev, "elsa_config(0x%p)\n", link); |
238 | dev = link->priv; | 220 | dev = link->priv; |
239 | 221 | ||
240 | i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL); | 222 | i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL); |
241 | if (i != 0) { | 223 | if (i != 0) |
242 | last_fn = RequestIO; | 224 | goto failed; |
243 | goto cs_failed; | ||
244 | } | ||
245 | 225 | ||
246 | i = pcmcia_request_irq(link, &link->irq); | 226 | i = pcmcia_request_irq(link, &link->irq); |
247 | if (i != 0) { | 227 | if (i != 0) { |
248 | link->irq.AssignedIRQ = 0; | 228 | link->irq.AssignedIRQ = 0; |
249 | last_fn = RequestIRQ; | 229 | goto failed; |
250 | goto cs_failed; | ||
251 | } | 230 | } |
252 | 231 | ||
253 | i = pcmcia_request_configuration(link, &link->conf); | 232 | i = pcmcia_request_configuration(link, &link->conf); |
254 | if (i != 0) { | 233 | if (i != 0) |
255 | last_fn = RequestConfiguration; | 234 | goto failed; |
256 | goto cs_failed; | ||
257 | } | ||
258 | 235 | ||
259 | /* At this point, the dev_node_t structure(s) should be | 236 | /* At this point, the dev_node_t structure(s) should be |
260 | initialized and arranged in a linked list at link->dev. *//* */ | 237 | initialized and arranged in a linked list at link->dev. *//* */ |
@@ -290,8 +267,7 @@ static int elsa_cs_config(struct pcmcia_device *link) | |||
290 | ((local_info_t*)link->priv)->cardnr = i; | 267 | ((local_info_t*)link->priv)->cardnr = i; |
291 | 268 | ||
292 | return 0; | 269 | return 0; |
293 | cs_failed: | 270 | failed: |
294 | cs_error(link, last_fn, i); | ||
295 | elsa_cs_release(link); | 271 | elsa_cs_release(link); |
296 | return -ENODEV; | 272 | return -ENODEV; |
297 | } /* elsa_cs_config */ | 273 | } /* elsa_cs_config */ |
@@ -308,7 +284,7 @@ static void elsa_cs_release(struct pcmcia_device *link) | |||
308 | { | 284 | { |
309 | local_info_t *local = link->priv; | 285 | local_info_t *local = link->priv; |
310 | 286 | ||
311 | DEBUG(0, "elsa_cs_release(0x%p)\n", link); | 287 | dev_dbg(&link->dev, "elsa_cs_release(0x%p)\n", link); |
312 | 288 | ||
313 | if (local) { | 289 | if (local) { |
314 | if (local->cardnr >= 0) { | 290 | if (local->cardnr >= 0) { |
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c index 9a3c9f5e4fe8..af5d393cc2d0 100644 --- a/drivers/isdn/hisax/sedlbauer_cs.c +++ b/drivers/isdn/hisax/sedlbauer_cs.c | |||
@@ -57,24 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Sedlbauer cards"); | |||
57 | MODULE_AUTHOR("Marcus Niemann"); | 57 | MODULE_AUTHOR("Marcus Niemann"); |
58 | MODULE_LICENSE("Dual MPL/GPL"); | 58 | MODULE_LICENSE("Dual MPL/GPL"); |
59 | 59 | ||
60 | /* | ||
61 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
62 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
63 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
64 | be present but disabled -- but it can then be enabled for specific | ||
65 | modules at load time with a 'pc_debug=#' option to insmod. | ||
66 | */ | ||
67 | |||
68 | #ifdef PCMCIA_DEBUG | ||
69 | static int pc_debug = PCMCIA_DEBUG; | ||
70 | module_param(pc_debug, int, 0); | ||
71 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); | ||
72 | static char *version = | ||
73 | "sedlbauer_cs.c 1.1a 2001/01/28 15:04:04 (M.Niemann)"; | ||
74 | #else | ||
75 | #define DEBUG(n, args...) | ||
76 | #endif | ||
77 | |||
78 | 60 | ||
79 | /*====================================================================*/ | 61 | /*====================================================================*/ |
80 | 62 | ||
@@ -151,7 +133,7 @@ static int sedlbauer_probe(struct pcmcia_device *link) | |||
151 | { | 133 | { |
152 | local_info_t *local; | 134 | local_info_t *local; |
153 | 135 | ||
154 | DEBUG(0, "sedlbauer_attach()\n"); | 136 | dev_dbg(&link->dev, "sedlbauer_attach()\n"); |
155 | 137 | ||
156 | /* Allocate space for private device-specific data */ | 138 | /* Allocate space for private device-specific data */ |
157 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); | 139 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); |
@@ -163,7 +145,6 @@ static int sedlbauer_probe(struct pcmcia_device *link) | |||
163 | 145 | ||
164 | /* Interrupt setup */ | 146 | /* Interrupt setup */ |
165 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 147 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
166 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
167 | link->irq.Handler = NULL; | 148 | link->irq.Handler = NULL; |
168 | 149 | ||
169 | /* | 150 | /* |
@@ -198,7 +179,7 @@ static int sedlbauer_probe(struct pcmcia_device *link) | |||
198 | 179 | ||
199 | static void sedlbauer_detach(struct pcmcia_device *link) | 180 | static void sedlbauer_detach(struct pcmcia_device *link) |
200 | { | 181 | { |
201 | DEBUG(0, "sedlbauer_detach(0x%p)\n", link); | 182 | dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); |
202 | 183 | ||
203 | ((local_info_t *)link->priv)->stop = 1; | 184 | ((local_info_t *)link->priv)->stop = 1; |
204 | sedlbauer_release(link); | 185 | sedlbauer_release(link); |
@@ -214,9 +195,6 @@ static void sedlbauer_detach(struct pcmcia_device *link) | |||
214 | device available to the system. | 195 | device available to the system. |
215 | 196 | ||
216 | ======================================================================*/ | 197 | ======================================================================*/ |
217 | #define CS_CHECK(fn, ret) \ | ||
218 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
219 | |||
220 | static int sedlbauer_config_check(struct pcmcia_device *p_dev, | 198 | static int sedlbauer_config_check(struct pcmcia_device *p_dev, |
221 | cistpl_cftable_entry_t *cfg, | 199 | cistpl_cftable_entry_t *cfg, |
222 | cistpl_cftable_entry_t *dflt, | 200 | cistpl_cftable_entry_t *dflt, |
@@ -293,11 +271,11 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev, | |||
293 | req->Base = mem->win[0].host_addr; | 271 | req->Base = mem->win[0].host_addr; |
294 | req->Size = mem->win[0].len; | 272 | req->Size = mem->win[0].len; |
295 | req->AccessSpeed = 0; | 273 | req->AccessSpeed = 0; |
296 | if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) | 274 | if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0) |
297 | return -ENODEV; | 275 | return -ENODEV; |
298 | map.Page = 0; | 276 | map.Page = 0; |
299 | map.CardOffset = mem->win[0].card_addr; | 277 | map.CardOffset = mem->win[0].card_addr; |
300 | if (pcmcia_map_mem_page(p_dev->win, &map) != 0) | 278 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0) |
301 | return -ENODEV; | 279 | return -ENODEV; |
302 | } | 280 | } |
303 | return 0; | 281 | return 0; |
@@ -309,10 +287,10 @@ static int sedlbauer_config(struct pcmcia_device *link) | |||
309 | { | 287 | { |
310 | local_info_t *dev = link->priv; | 288 | local_info_t *dev = link->priv; |
311 | win_req_t *req; | 289 | win_req_t *req; |
312 | int last_fn, last_ret; | 290 | int ret; |
313 | IsdnCard_t icard; | 291 | IsdnCard_t icard; |
314 | 292 | ||
315 | DEBUG(0, "sedlbauer_config(0x%p)\n", link); | 293 | dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link); |
316 | 294 | ||
317 | req = kzalloc(sizeof(win_req_t), GFP_KERNEL); | 295 | req = kzalloc(sizeof(win_req_t), GFP_KERNEL); |
318 | if (!req) | 296 | if (!req) |
@@ -330,8 +308,8 @@ static int sedlbauer_config(struct pcmcia_device *link) | |||
330 | these things without consulting the CIS, and most client drivers | 308 | these things without consulting the CIS, and most client drivers |
331 | will only use the CIS to fill in implementation-defined details. | 309 | will only use the CIS to fill in implementation-defined details. |
332 | */ | 310 | */ |
333 | last_ret = pcmcia_loop_config(link, sedlbauer_config_check, req); | 311 | ret = pcmcia_loop_config(link, sedlbauer_config_check, req); |
334 | if (last_ret) | 312 | if (ret) |
335 | goto failed; | 313 | goto failed; |
336 | 314 | ||
337 | /* | 315 | /* |
@@ -339,15 +317,20 @@ static int sedlbauer_config(struct pcmcia_device *link) | |||
339 | handler to the interrupt, unless the 'Handler' member of the | 317 | handler to the interrupt, unless the 'Handler' member of the |
340 | irq structure is initialized. | 318 | irq structure is initialized. |
341 | */ | 319 | */ |
342 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 320 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
343 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 321 | ret = pcmcia_request_irq(link, &link->irq); |
322 | if (ret) | ||
323 | goto failed; | ||
324 | } | ||
344 | 325 | ||
345 | /* | 326 | /* |
346 | This actually configures the PCMCIA socket -- setting up | 327 | This actually configures the PCMCIA socket -- setting up |
347 | the I/O windows and the interrupt mapping, and putting the | 328 | the I/O windows and the interrupt mapping, and putting the |
348 | card and host interface into "Memory and IO" mode. | 329 | card and host interface into "Memory and IO" mode. |
349 | */ | 330 | */ |
350 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 331 | ret = pcmcia_request_configuration(link, &link->conf); |
332 | if (ret) | ||
333 | goto failed; | ||
351 | 334 | ||
352 | /* | 335 | /* |
353 | At this point, the dev_node_t structure(s) need to be | 336 | At this point, the dev_node_t structure(s) need to be |
@@ -380,19 +363,18 @@ static int sedlbauer_config(struct pcmcia_device *link) | |||
380 | icard.protocol = protocol; | 363 | icard.protocol = protocol; |
381 | icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; | 364 | icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; |
382 | 365 | ||
383 | last_ret = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->stop), &icard); | 366 | ret = hisax_init_pcmcia(link, |
384 | if (last_ret < 0) { | 367 | &(((local_info_t *)link->priv)->stop), &icard); |
385 | printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", | 368 | if (ret < 0) { |
386 | last_ret, link->io.BasePort1); | 369 | printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", |
370 | ret, link->io.BasePort1); | ||
387 | sedlbauer_release(link); | 371 | sedlbauer_release(link); |
388 | return -ENODEV; | 372 | return -ENODEV; |
389 | } else | 373 | } else |
390 | ((local_info_t*)link->priv)->cardnr = last_ret; | 374 | ((local_info_t *)link->priv)->cardnr = ret; |
391 | 375 | ||
392 | return 0; | 376 | return 0; |
393 | 377 | ||
394 | cs_failed: | ||
395 | cs_error(link, last_fn, last_ret); | ||
396 | failed: | 378 | failed: |
397 | sedlbauer_release(link); | 379 | sedlbauer_release(link); |
398 | return -ENODEV; | 380 | return -ENODEV; |
@@ -410,7 +392,7 @@ failed: | |||
410 | static void sedlbauer_release(struct pcmcia_device *link) | 392 | static void sedlbauer_release(struct pcmcia_device *link) |
411 | { | 393 | { |
412 | local_info_t *local = link->priv; | 394 | local_info_t *local = link->priv; |
413 | DEBUG(0, "sedlbauer_release(0x%p)\n", link); | 395 | dev_dbg(&link->dev, "sedlbauer_release(0x%p)\n", link); |
414 | 396 | ||
415 | if (local) { | 397 | if (local) { |
416 | if (local->cardnr >= 0) { | 398 | if (local->cardnr >= 0) { |
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c index 623d111544d4..ea705394ce2b 100644 --- a/drivers/isdn/hisax/teles_cs.c +++ b/drivers/isdn/hisax/teles_cs.c | |||
@@ -38,23 +38,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Teles PCMCIA cards"); | |||
38 | MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de"); | 38 | MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de"); |
39 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
40 | 40 | ||
41 | /* | ||
42 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
43 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
44 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
45 | be present but disabled -- but it can then be enabled for specific | ||
46 | modules at load time with a 'pc_debug=#' option to insmod. | ||
47 | */ | ||
48 | |||
49 | #ifdef PCMCIA_DEBUG | ||
50 | static int pc_debug = PCMCIA_DEBUG; | ||
51 | module_param(pc_debug, int, 0); | ||
52 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); | ||
53 | static char *version = | ||
54 | "teles_cs.c 2.10 2002/07/30 22:23:34 kkeil"; | ||
55 | #else | ||
56 | #define DEBUG(n, args...) | ||
57 | #endif | ||
58 | 41 | ||
59 | /*====================================================================*/ | 42 | /*====================================================================*/ |
60 | 43 | ||
@@ -133,7 +116,7 @@ static int teles_probe(struct pcmcia_device *link) | |||
133 | { | 116 | { |
134 | local_info_t *local; | 117 | local_info_t *local; |
135 | 118 | ||
136 | DEBUG(0, "teles_attach()\n"); | 119 | dev_dbg(&link->dev, "teles_attach()\n"); |
137 | 120 | ||
138 | /* Allocate space for private device-specific data */ | 121 | /* Allocate space for private device-specific data */ |
139 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); | 122 | local = kzalloc(sizeof(local_info_t), GFP_KERNEL); |
@@ -145,7 +128,6 @@ static int teles_probe(struct pcmcia_device *link) | |||
145 | 128 | ||
146 | /* Interrupt setup */ | 129 | /* Interrupt setup */ |
147 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 130 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
148 | link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID; | ||
149 | link->irq.Handler = NULL; | 131 | link->irq.Handler = NULL; |
150 | 132 | ||
151 | /* | 133 | /* |
@@ -178,7 +160,7 @@ static void teles_detach(struct pcmcia_device *link) | |||
178 | { | 160 | { |
179 | local_info_t *info = link->priv; | 161 | local_info_t *info = link->priv; |
180 | 162 | ||
181 | DEBUG(0, "teles_detach(0x%p)\n", link); | 163 | dev_dbg(&link->dev, "teles_detach(0x%p)\n", link); |
182 | 164 | ||
183 | info->busy = 1; | 165 | info->busy = 1; |
184 | teles_cs_release(link); | 166 | teles_cs_release(link); |
@@ -221,30 +203,25 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, | |||
221 | static int teles_cs_config(struct pcmcia_device *link) | 203 | static int teles_cs_config(struct pcmcia_device *link) |
222 | { | 204 | { |
223 | local_info_t *dev; | 205 | local_info_t *dev; |
224 | int i, last_fn; | 206 | int i; |
225 | IsdnCard_t icard; | 207 | IsdnCard_t icard; |
226 | 208 | ||
227 | DEBUG(0, "teles_config(0x%p)\n", link); | 209 | dev_dbg(&link->dev, "teles_config(0x%p)\n", link); |
228 | dev = link->priv; | 210 | dev = link->priv; |
229 | 211 | ||
230 | i = pcmcia_loop_config(link, teles_cs_configcheck, NULL); | 212 | i = pcmcia_loop_config(link, teles_cs_configcheck, NULL); |
231 | if (i != 0) { | 213 | if (i != 0) |
232 | last_fn = RequestIO; | ||
233 | goto cs_failed; | 214 | goto cs_failed; |
234 | } | ||
235 | 215 | ||
236 | i = pcmcia_request_irq(link, &link->irq); | 216 | i = pcmcia_request_irq(link, &link->irq); |
237 | if (i != 0) { | 217 | if (i != 0) { |
238 | link->irq.AssignedIRQ = 0; | 218 | link->irq.AssignedIRQ = 0; |
239 | last_fn = RequestIRQ; | ||
240 | goto cs_failed; | 219 | goto cs_failed; |
241 | } | 220 | } |
242 | 221 | ||
243 | i = pcmcia_request_configuration(link, &link->conf); | 222 | i = pcmcia_request_configuration(link, &link->conf); |
244 | if (i != 0) { | 223 | if (i != 0) |
245 | last_fn = RequestConfiguration; | ||
246 | goto cs_failed; | 224 | goto cs_failed; |
247 | } | ||
248 | 225 | ||
249 | /* At this point, the dev_node_t structure(s) should be | 226 | /* At this point, the dev_node_t structure(s) should be |
250 | initialized and arranged in a linked list at link->dev. *//* */ | 227 | initialized and arranged in a linked list at link->dev. *//* */ |
@@ -283,7 +260,6 @@ static int teles_cs_config(struct pcmcia_device *link) | |||
283 | return 0; | 260 | return 0; |
284 | 261 | ||
285 | cs_failed: | 262 | cs_failed: |
286 | cs_error(link, last_fn, i); | ||
287 | teles_cs_release(link); | 263 | teles_cs_release(link); |
288 | return -ENODEV; | 264 | return -ENODEV; |
289 | } /* teles_cs_config */ | 265 | } /* teles_cs_config */ |
@@ -300,7 +276,7 @@ static void teles_cs_release(struct pcmcia_device *link) | |||
300 | { | 276 | { |
301 | local_info_t *local = link->priv; | 277 | local_info_t *local = link->priv; |
302 | 278 | ||
303 | DEBUG(0, "teles_cs_release(0x%p)\n", link); | 279 | dev_dbg(&link->dev, "teles_cs_release(0x%p)\n", link); |
304 | 280 | ||
305 | if (local) { | 281 | if (local) { |
306 | if (local->cardnr >= 0) { | 282 | if (local->cardnr >= 0) { |
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c index 5d91362e3066..1f7c10f6b7f2 100644 --- a/drivers/leds/leds-locomo.c +++ b/drivers/leds/leds-locomo.c | |||
@@ -44,7 +44,7 @@ static void locomoled_brightness_set1(struct led_classdev *led_cdev, | |||
44 | 44 | ||
45 | static struct led_classdev locomo_led0 = { | 45 | static struct led_classdev locomo_led0 = { |
46 | .name = "locomo:amber:charge", | 46 | .name = "locomo:amber:charge", |
47 | .default_trigger = "sharpsl-charge", | 47 | .default_trigger = "main-battery-charging", |
48 | .brightness_set = locomoled_brightness_set0, | 48 | .brightness_set = locomoled_brightness_set0, |
49 | }; | 49 | }; |
50 | 50 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a053423785c9..e07ce2e033a9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1650,11 +1650,12 @@ static void raid1d(mddev_t *mddev) | |||
1650 | r1_bio->sector, | 1650 | r1_bio->sector, |
1651 | r1_bio->sectors); | 1651 | r1_bio->sectors); |
1652 | unfreeze_array(conf); | 1652 | unfreeze_array(conf); |
1653 | } | 1653 | } else |
1654 | md_error(mddev, | ||
1655 | conf->mirrors[r1_bio->read_disk].rdev); | ||
1654 | 1656 | ||
1655 | bio = r1_bio->bios[r1_bio->read_disk]; | 1657 | bio = r1_bio->bios[r1_bio->read_disk]; |
1656 | if ((disk=read_balance(conf, r1_bio)) == -1 || | 1658 | if ((disk=read_balance(conf, r1_bio)) == -1) { |
1657 | disk == r1_bio->read_disk) { | ||
1658 | printk(KERN_ALERT "raid1: %s: unrecoverable I/O" | 1659 | printk(KERN_ALERT "raid1: %s: unrecoverable I/O" |
1659 | " read error for block %llu\n", | 1660 | " read error for block %llu\n", |
1660 | bdevname(bio->bi_bdev,b), | 1661 | bdevname(bio->bi_bdev,b), |
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index ddf639ed2fd8..98082416aa52 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/wait.h> | 31 | #include <linux/wait.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
34 | #include <linux/semaphore.h> | ||
34 | #include <linux/module.h> | 35 | #include <linux/module.h> |
35 | #include <linux/list.h> | 36 | #include <linux/list.h> |
36 | #include <linux/freezer.h> | 37 | #include <linux/freezer.h> |
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c index 57271cb3b316..84815f9ef636 100644 --- a/drivers/mfd/mcp-core.c +++ b/drivers/mfd/mcp-core.c | |||
@@ -17,11 +17,11 @@ | |||
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/mfd/mcp.h> | ||
20 | 21 | ||
21 | #include <mach/dma.h> | 22 | #include <mach/dma.h> |
22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
23 | 24 | ||
24 | #include "mcp.h" | ||
25 | 25 | ||
26 | #define to_mcp(d) container_of(d, struct mcp, attached_device) | 26 | #define to_mcp(d) container_of(d, struct mcp, attached_device) |
27 | #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) | 27 | #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) |
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c index 62b32dabf629..258427232728 100644 --- a/drivers/mfd/mcp-sa11x0.c +++ b/drivers/mfd/mcp-sa11x0.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/mfd/mcp.h> | ||
22 | 23 | ||
23 | #include <mach/dma.h> | 24 | #include <mach/dma.h> |
24 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
@@ -28,7 +29,6 @@ | |||
28 | 29 | ||
29 | #include <mach/assabet.h> | 30 | #include <mach/assabet.h> |
30 | 31 | ||
31 | #include "mcp.h" | ||
32 | 32 | ||
33 | struct mcp_sa11x0 { | 33 | struct mcp_sa11x0 { |
34 | u32 mccr0; | 34 | u32 mccr0; |
@@ -163,6 +163,7 @@ static int mcp_sa11x0_probe(struct platform_device *pdev) | |||
163 | mcp->dma_audio_wr = DMA_Ser4MCP0Wr; | 163 | mcp->dma_audio_wr = DMA_Ser4MCP0Wr; |
164 | mcp->dma_telco_rd = DMA_Ser4MCP1Rd; | 164 | mcp->dma_telco_rd = DMA_Ser4MCP1Rd; |
165 | mcp->dma_telco_wr = DMA_Ser4MCP1Wr; | 165 | mcp->dma_telco_wr = DMA_Ser4MCP1Wr; |
166 | mcp->gpio_base = data->gpio_base; | ||
166 | 167 | ||
167 | platform_set_drvdata(pdev, mcp); | 168 | platform_set_drvdata(pdev, mcp); |
168 | 169 | ||
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c index 86fed4870f93..cea9da60850d 100644 --- a/drivers/mfd/ucb1x00-assabet.c +++ b/drivers/mfd/ucb1x00-assabet.c | |||
@@ -14,10 +14,10 @@ | |||
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/mfd/ucb1x00.h> | ||
17 | 18 | ||
18 | #include <mach/dma.h> | 19 | #include <mach/dma.h> |
19 | 20 | ||
20 | #include "ucb1x00.h" | ||
21 | 21 | ||
22 | #define UCB1X00_ATTR(name,input)\ | 22 | #define UCB1X00_ATTR(name,input)\ |
23 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ | 23 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ |
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 60c3988f3cf3..252b74188ec2 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c | |||
@@ -25,12 +25,12 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/mfd/ucb1x00.h> | ||
29 | #include <linux/gpio.h> | ||
28 | 30 | ||
29 | #include <mach/dma.h> | 31 | #include <mach/dma.h> |
30 | #include <mach/hardware.h> | 32 | #include <mach/hardware.h> |
31 | 33 | ||
32 | #include "ucb1x00.h" | ||
33 | |||
34 | static DEFINE_MUTEX(ucb1x00_mutex); | 34 | static DEFINE_MUTEX(ucb1x00_mutex); |
35 | static LIST_HEAD(ucb1x00_drivers); | 35 | static LIST_HEAD(ucb1x00_drivers); |
36 | static LIST_HEAD(ucb1x00_devices); | 36 | static LIST_HEAD(ucb1x00_devices); |
@@ -108,6 +108,60 @@ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) | |||
108 | return ucb1x00_reg_read(ucb, UCB_IO_DATA); | 108 | return ucb1x00_reg_read(ucb, UCB_IO_DATA); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | ||
112 | { | ||
113 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); | ||
114 | unsigned long flags; | ||
115 | |||
116 | spin_lock_irqsave(&ucb->io_lock, flags); | ||
117 | if (value) | ||
118 | ucb->io_out |= 1 << offset; | ||
119 | else | ||
120 | ucb->io_out &= ~(1 << offset); | ||
121 | |||
122 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); | ||
123 | spin_unlock_irqrestore(&ucb->io_lock, flags); | ||
124 | } | ||
125 | |||
126 | static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) | ||
127 | { | ||
128 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); | ||
129 | return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset); | ||
130 | } | ||
131 | |||
132 | static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
133 | { | ||
134 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); | ||
135 | unsigned long flags; | ||
136 | |||
137 | spin_lock_irqsave(&ucb->io_lock, flags); | ||
138 | ucb->io_dir &= ~(1 << offset); | ||
139 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | ||
140 | spin_unlock_irqrestore(&ucb->io_lock, flags); | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset | ||
146 | , int value) | ||
147 | { | ||
148 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); | ||
149 | unsigned long flags; | ||
150 | |||
151 | spin_lock_irqsave(&ucb->io_lock, flags); | ||
152 | ucb->io_dir |= (1 << offset); | ||
153 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | ||
154 | |||
155 | if (value) | ||
156 | ucb->io_out |= 1 << offset; | ||
157 | else | ||
158 | ucb->io_out &= ~(1 << offset); | ||
159 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); | ||
160 | spin_unlock_irqrestore(&ucb->io_lock, flags); | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
111 | /* | 165 | /* |
112 | * UCB1300 data sheet says we must: | 166 | * UCB1300 data sheet says we must: |
113 | * 1. enable ADC => 5us (including reference startup time) | 167 | * 1. enable ADC => 5us (including reference startup time) |
@@ -476,6 +530,7 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
476 | struct ucb1x00_driver *drv; | 530 | struct ucb1x00_driver *drv; |
477 | unsigned int id; | 531 | unsigned int id; |
478 | int ret = -ENODEV; | 532 | int ret = -ENODEV; |
533 | int temp; | ||
479 | 534 | ||
480 | mcp_enable(mcp); | 535 | mcp_enable(mcp); |
481 | id = mcp_reg_read(mcp, UCB_ID); | 536 | id = mcp_reg_read(mcp, UCB_ID); |
@@ -508,12 +563,27 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
508 | goto err_free; | 563 | goto err_free; |
509 | } | 564 | } |
510 | 565 | ||
566 | ucb->gpio.base = -1; | ||
567 | if (mcp->gpio_base != 0) { | ||
568 | ucb->gpio.label = dev_name(&ucb->dev); | ||
569 | ucb->gpio.base = mcp->gpio_base; | ||
570 | ucb->gpio.ngpio = 10; | ||
571 | ucb->gpio.set = ucb1x00_gpio_set; | ||
572 | ucb->gpio.get = ucb1x00_gpio_get; | ||
573 | ucb->gpio.direction_input = ucb1x00_gpio_direction_input; | ||
574 | ucb->gpio.direction_output = ucb1x00_gpio_direction_output; | ||
575 | ret = gpiochip_add(&ucb->gpio); | ||
576 | if (ret) | ||
577 | goto err_free; | ||
578 | } else | ||
579 | dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); | ||
580 | |||
511 | ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, | 581 | ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, |
512 | "UCB1x00", ucb); | 582 | "UCB1x00", ucb); |
513 | if (ret) { | 583 | if (ret) { |
514 | printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n", | 584 | printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n", |
515 | ucb->irq, ret); | 585 | ucb->irq, ret); |
516 | goto err_free; | 586 | goto err_gpio; |
517 | } | 587 | } |
518 | 588 | ||
519 | mcp_set_drvdata(mcp, ucb); | 589 | mcp_set_drvdata(mcp, ucb); |
@@ -522,6 +592,7 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
522 | if (ret) | 592 | if (ret) |
523 | goto err_irq; | 593 | goto err_irq; |
524 | 594 | ||
595 | |||
525 | INIT_LIST_HEAD(&ucb->devs); | 596 | INIT_LIST_HEAD(&ucb->devs); |
526 | mutex_lock(&ucb1x00_mutex); | 597 | mutex_lock(&ucb1x00_mutex); |
527 | list_add(&ucb->node, &ucb1x00_devices); | 598 | list_add(&ucb->node, &ucb1x00_devices); |
@@ -529,10 +600,14 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
529 | ucb1x00_add_dev(ucb, drv); | 600 | ucb1x00_add_dev(ucb, drv); |
530 | } | 601 | } |
531 | mutex_unlock(&ucb1x00_mutex); | 602 | mutex_unlock(&ucb1x00_mutex); |
603 | |||
532 | goto out; | 604 | goto out; |
533 | 605 | ||
534 | err_irq: | 606 | err_irq: |
535 | free_irq(ucb->irq, ucb); | 607 | free_irq(ucb->irq, ucb); |
608 | err_gpio: | ||
609 | if (ucb->gpio.base != -1) | ||
610 | temp = gpiochip_remove(&ucb->gpio); | ||
536 | err_free: | 611 | err_free: |
537 | kfree(ucb); | 612 | kfree(ucb); |
538 | err_disable: | 613 | err_disable: |
@@ -545,6 +620,7 @@ static void ucb1x00_remove(struct mcp *mcp) | |||
545 | { | 620 | { |
546 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); | 621 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); |
547 | struct list_head *l, *n; | 622 | struct list_head *l, *n; |
623 | int ret; | ||
548 | 624 | ||
549 | mutex_lock(&ucb1x00_mutex); | 625 | mutex_lock(&ucb1x00_mutex); |
550 | list_del(&ucb->node); | 626 | list_del(&ucb->node); |
@@ -554,6 +630,12 @@ static void ucb1x00_remove(struct mcp *mcp) | |||
554 | } | 630 | } |
555 | mutex_unlock(&ucb1x00_mutex); | 631 | mutex_unlock(&ucb1x00_mutex); |
556 | 632 | ||
633 | if (ucb->gpio.base != -1) { | ||
634 | ret = gpiochip_remove(&ucb->gpio); | ||
635 | if (ret) | ||
636 | dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret); | ||
637 | } | ||
638 | |||
557 | free_irq(ucb->irq, ucb); | 639 | free_irq(ucb->irq, ucb); |
558 | device_unregister(&ucb->dev); | 640 | device_unregister(&ucb->dev); |
559 | } | 641 | } |
@@ -604,6 +686,7 @@ static int ucb1x00_resume(struct mcp *mcp) | |||
604 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); | 686 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); |
605 | struct ucb1x00_dev *dev; | 687 | struct ucb1x00_dev *dev; |
606 | 688 | ||
689 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | ||
607 | mutex_lock(&ucb1x00_mutex); | 690 | mutex_lock(&ucb1x00_mutex); |
608 | list_for_each_entry(dev, &ucb->devs, dev_node) { | 691 | list_for_each_entry(dev, &ucb->devs, dev_node) { |
609 | if (dev->drv->resume) | 692 | if (dev->drv->resume) |
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 61b7d3eb9a2f..000cb414a78a 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c | |||
@@ -30,12 +30,12 @@ | |||
30 | #include <linux/freezer.h> | 30 | #include <linux/freezer.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/kthread.h> | 32 | #include <linux/kthread.h> |
33 | #include <linux/mfd/ucb1x00.h> | ||
33 | 34 | ||
34 | #include <mach/dma.h> | 35 | #include <mach/dma.h> |
35 | #include <mach/collie.h> | 36 | #include <mach/collie.h> |
36 | #include <asm/mach-types.h> | 37 | #include <asm/mach-types.h> |
37 | 38 | ||
38 | #include "ucb1x00.h" | ||
39 | 39 | ||
40 | 40 | ||
41 | struct ucb1x00_ts { | 41 | struct ucb1x00_ts { |
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 49b7885c2702..7f27576ca046 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c | |||
@@ -29,7 +29,7 @@ | |||
29 | /* Current settings - values are 2*2^(reg_val/4) microamps. These are | 29 | /* Current settings - values are 2*2^(reg_val/4) microamps. These are |
30 | * exported since they are used by multiple drivers. | 30 | * exported since they are used by multiple drivers. |
31 | */ | 31 | */ |
32 | int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL] = { | 32 | int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = { |
33 | 2, | 33 | 2, |
34 | 2, | 34 | 2, |
35 | 3, | 35 | 3, |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c76677afda1b..b5bbe59f9c57 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -106,7 +106,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |||
106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | 106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
107 | 107 | ||
108 | #if defined CONFIG_X86_64 | 108 | #if defined CONFIG_X86_64 |
109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | 109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, |
110 | UV_AFFINITY_CPU); | ||
110 | if (mq->irq < 0) { | 111 | if (mq->irq < 0) { |
111 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | 112 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", |
112 | -mq->irq); | 113 | -mq->irq); |
@@ -136,7 +137,7 @@ static void | |||
136 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | 137 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) |
137 | { | 138 | { |
138 | #if defined CONFIG_X86_64 | 139 | #if defined CONFIG_X86_64 |
139 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | 140 | uv_teardown_irq(mq->irq); |
140 | 141 | ||
141 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 142 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
142 | int mmr_pnode; | 143 | int mmr_pnode; |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index c85f6166056e..bb47ff465c04 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -762,6 +762,8 @@ static int pxamci_remove(struct platform_device *pdev) | |||
762 | if (mmc) { | 762 | if (mmc) { |
763 | struct pxamci_host *host = mmc_priv(mmc); | 763 | struct pxamci_host *host = mmc_priv(mmc); |
764 | 764 | ||
765 | mmc_remove_host(mmc); | ||
766 | |||
765 | if (host->pdata) { | 767 | if (host->pdata) { |
766 | gpio_cd = host->pdata->gpio_card_detect; | 768 | gpio_cd = host->pdata->gpio_card_detect; |
767 | gpio_ro = host->pdata->gpio_card_ro; | 769 | gpio_ro = host->pdata->gpio_card_ro; |
@@ -781,8 +783,6 @@ static int pxamci_remove(struct platform_device *pdev) | |||
781 | if (host->pdata && host->pdata->exit) | 783 | if (host->pdata && host->pdata->exit) |
782 | host->pdata->exit(&pdev->dev, mmc); | 784 | host->pdata->exit(&pdev->dev, mmc); |
783 | 785 | ||
784 | mmc_remove_host(mmc); | ||
785 | |||
786 | pxamci_stop_clock(host); | 786 | pxamci_stop_clock(host); |
787 | writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| | 787 | writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| |
788 | END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, | 788 | END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, |
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index d600c2deff73..689d6a79ffc0 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c | |||
@@ -118,11 +118,9 @@ static caddr_t remap_window(struct map_info *map, unsigned long to) | |||
118 | DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", | 118 | DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", |
119 | dev->offset, mrq.CardOffset); | 119 | dev->offset, mrq.CardOffset); |
120 | mrq.Page = 0; | 120 | mrq.Page = 0; |
121 | ret = pcmcia_map_mem_page(win, &mrq); | 121 | ret = pcmcia_map_mem_page(dev->p_dev, win, &mrq); |
122 | if (ret != 0) { | 122 | if (ret != 0) |
123 | cs_error(dev->p_dev, MapMemPage, ret); | ||
124 | return NULL; | 123 | return NULL; |
125 | } | ||
126 | dev->offset = mrq.CardOffset; | 124 | dev->offset = mrq.CardOffset; |
127 | } | 125 | } |
128 | return dev->win_base + (to & (dev->win_size-1)); | 126 | return dev->win_base + (to & (dev->win_size-1)); |
@@ -327,8 +325,6 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on) | |||
327 | 325 | ||
328 | DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); | 326 | DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); |
329 | ret = pcmcia_modify_configuration(link, &mod); | 327 | ret = pcmcia_modify_configuration(link, &mod); |
330 | if (ret != 0) | ||
331 | cs_error(link, ModifyConfiguration, ret); | ||
332 | } | 328 | } |
333 | 329 | ||
334 | 330 | ||
@@ -348,107 +344,116 @@ static void pcmciamtd_release(struct pcmcia_device *link) | |||
348 | iounmap(dev->win_base); | 344 | iounmap(dev->win_base); |
349 | dev->win_base = NULL; | 345 | dev->win_base = NULL; |
350 | } | 346 | } |
351 | pcmcia_release_window(link->win); | 347 | pcmcia_release_window(link, link->win); |
352 | } | 348 | } |
353 | pcmcia_disable_device(link); | 349 | pcmcia_disable_device(link); |
354 | } | 350 | } |
355 | 351 | ||
356 | 352 | ||
357 | static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) | 353 | #ifdef CONFIG_MTD_DEBUG |
354 | static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, | ||
355 | tuple_t *tuple, | ||
356 | void *priv_data) | ||
358 | { | 357 | { |
359 | int rc; | ||
360 | tuple_t tuple; | ||
361 | cisparse_t parse; | 358 | cisparse_t parse; |
362 | u_char buf[64]; | ||
363 | |||
364 | tuple.Attributes = 0; | ||
365 | tuple.TupleData = (cisdata_t *)buf; | ||
366 | tuple.TupleDataMax = sizeof(buf); | ||
367 | tuple.TupleOffset = 0; | ||
368 | tuple.DesiredTuple = RETURN_FIRST_TUPLE; | ||
369 | |||
370 | rc = pcmcia_get_first_tuple(link, &tuple); | ||
371 | while (rc == 0) { | ||
372 | rc = pcmcia_get_tuple_data(link, &tuple); | ||
373 | if (rc != 0) { | ||
374 | cs_error(link, GetTupleData, rc); | ||
375 | break; | ||
376 | } | ||
377 | rc = pcmcia_parse_tuple(&tuple, &parse); | ||
378 | if (rc != 0) { | ||
379 | cs_error(link, ParseTuple, rc); | ||
380 | break; | ||
381 | } | ||
382 | 359 | ||
383 | switch(tuple.TupleCode) { | 360 | if (!pcmcia_parse_tuple(tuple, &parse)) { |
384 | case CISTPL_FORMAT: { | 361 | cistpl_format_t *t = &parse.format; |
385 | cistpl_format_t *t = &parse.format; | 362 | (void)t; /* Shut up, gcc */ |
386 | (void)t; /* Shut up, gcc */ | 363 | DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", |
387 | DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", | 364 | t->type, t->edc, t->offset, t->length); |
388 | t->type, t->edc, t->offset, t->length); | 365 | } |
389 | break; | 366 | return -ENOSPC; |
367 | } | ||
390 | 368 | ||
391 | } | 369 | static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev, |
370 | tuple_t *tuple, | ||
371 | void *priv_data) | ||
372 | { | ||
373 | cisparse_t parse; | ||
374 | int i; | ||
392 | 375 | ||
393 | case CISTPL_DEVICE: { | 376 | if (!pcmcia_parse_tuple(tuple, &parse)) { |
394 | cistpl_device_t *t = &parse.device; | 377 | cistpl_jedec_t *t = &parse.jedec; |
395 | int i; | 378 | for (i = 0; i < t->nid; i++) |
396 | DEBUG(2, "Common memory:"); | 379 | DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); |
397 | dev->pcmcia_map.size = t->dev[0].size; | 380 | } |
398 | for(i = 0; i < t->ndev; i++) { | 381 | return -ENOSPC; |
399 | DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); | 382 | } |
400 | DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); | 383 | #endif |
401 | DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed); | ||
402 | DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size); | ||
403 | } | ||
404 | break; | ||
405 | } | ||
406 | 384 | ||
407 | case CISTPL_VERS_1: { | 385 | static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, |
408 | cistpl_vers_1_t *t = &parse.version_1; | 386 | tuple_t *tuple, |
409 | int i; | 387 | void *priv_data) |
410 | if(t->ns) { | 388 | { |
411 | dev->mtd_name[0] = '\0'; | 389 | struct pcmciamtd_dev *dev = priv_data; |
412 | for(i = 0; i < t->ns; i++) { | 390 | cisparse_t parse; |
413 | if(i) | 391 | cistpl_device_t *t = &parse.device; |
414 | strcat(dev->mtd_name, " "); | 392 | int i; |
415 | strcat(dev->mtd_name, t->str+t->ofs[i]); | ||
416 | } | ||
417 | } | ||
418 | DEBUG(2, "Found name: %s", dev->mtd_name); | ||
419 | break; | ||
420 | } | ||
421 | 393 | ||
422 | case CISTPL_JEDEC_C: { | 394 | if (pcmcia_parse_tuple(tuple, &parse)) |
423 | cistpl_jedec_t *t = &parse.jedec; | 395 | return -EINVAL; |
424 | int i; | 396 | |
425 | for(i = 0; i < t->nid; i++) { | 397 | DEBUG(2, "Common memory:"); |
426 | DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); | 398 | dev->pcmcia_map.size = t->dev[0].size; |
427 | } | 399 | /* from here on: DEBUG only */ |
428 | break; | 400 | for (i = 0; i < t->ndev; i++) { |
429 | } | 401 | DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); |
402 | DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); | ||
403 | DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed); | ||
404 | DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size); | ||
405 | } | ||
406 | return 0; | ||
407 | } | ||
430 | 408 | ||
431 | case CISTPL_DEVICE_GEO: { | 409 | static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev, |
432 | cistpl_device_geo_t *t = &parse.device_geo; | 410 | tuple_t *tuple, |
433 | int i; | 411 | void *priv_data) |
434 | dev->pcmcia_map.bankwidth = t->geo[0].buswidth; | 412 | { |
435 | for(i = 0; i < t->ngeo; i++) { | 413 | struct pcmciamtd_dev *dev = priv_data; |
436 | DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); | 414 | cisparse_t parse; |
437 | DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); | 415 | cistpl_device_geo_t *t = &parse.device_geo; |
438 | DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); | 416 | int i; |
439 | DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block); | ||
440 | DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition); | ||
441 | DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave); | ||
442 | } | ||
443 | break; | ||
444 | } | ||
445 | 417 | ||
446 | default: | 418 | if (pcmcia_parse_tuple(tuple, &parse)) |
447 | DEBUG(2, "Unknown tuple code %d", tuple.TupleCode); | 419 | return -EINVAL; |
448 | } | 420 | |
421 | dev->pcmcia_map.bankwidth = t->geo[0].buswidth; | ||
422 | /* from here on: DEBUG only */ | ||
423 | for (i = 0; i < t->ngeo; i++) { | ||
424 | DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); | ||
425 | DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); | ||
426 | DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); | ||
427 | DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block); | ||
428 | DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition); | ||
429 | DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave); | ||
430 | } | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | |||
435 | static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) | ||
436 | { | ||
437 | int i; | ||
449 | 438 | ||
450 | rc = pcmcia_get_next_tuple(link, &tuple); | 439 | if (p_dev->prod_id[0]) { |
440 | dev->mtd_name[0] = '\0'; | ||
441 | for (i = 0; i < 4; i++) { | ||
442 | if (i) | ||
443 | strcat(dev->mtd_name, " "); | ||
444 | if (p_dev->prod_id[i]) | ||
445 | strcat(dev->mtd_name, p_dev->prod_id[i]); | ||
446 | } | ||
447 | DEBUG(2, "Found name: %s", dev->mtd_name); | ||
451 | } | 448 | } |
449 | |||
450 | #ifdef CONFIG_MTD_DEBUG | ||
451 | pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); | ||
452 | pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); | ||
453 | #endif | ||
454 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); | ||
455 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); | ||
456 | |||
452 | if(!dev->pcmcia_map.size) | 457 | if(!dev->pcmcia_map.size) |
453 | dev->pcmcia_map.size = MAX_PCMCIA_ADDR; | 458 | dev->pcmcia_map.size = MAX_PCMCIA_ADDR; |
454 | 459 | ||
@@ -481,16 +486,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, | |||
481 | * MTD device available to the system. | 486 | * MTD device available to the system. |
482 | */ | 487 | */ |
483 | 488 | ||
484 | #define CS_CHECK(fn, ret) \ | ||
485 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
486 | |||
487 | static int pcmciamtd_config(struct pcmcia_device *link) | 489 | static int pcmciamtd_config(struct pcmcia_device *link) |
488 | { | 490 | { |
489 | struct pcmciamtd_dev *dev = link->priv; | 491 | struct pcmciamtd_dev *dev = link->priv; |
490 | struct mtd_info *mtd = NULL; | 492 | struct mtd_info *mtd = NULL; |
491 | cs_status_t status; | 493 | cs_status_t status; |
492 | win_req_t req; | 494 | win_req_t req; |
493 | int last_ret = 0, last_fn = 0; | ||
494 | int ret; | 495 | int ret; |
495 | int i; | 496 | int i; |
496 | static char *probes[] = { "jedec_probe", "cfi_probe" }; | 497 | static char *probes[] = { "jedec_probe", "cfi_probe" }; |
@@ -529,7 +530,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
529 | int ret; | 530 | int ret; |
530 | DEBUG(2, "requesting window with size = %dKiB memspeed = %d", | 531 | DEBUG(2, "requesting window with size = %dKiB memspeed = %d", |
531 | req.Size >> 10, req.AccessSpeed); | 532 | req.Size >> 10, req.AccessSpeed); |
532 | ret = pcmcia_request_window(&link, &req, &link->win); | 533 | ret = pcmcia_request_window(link, &req, &link->win); |
533 | DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); | 534 | DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); |
534 | if(ret) { | 535 | if(ret) { |
535 | req.Size >>= 1; | 536 | req.Size >>= 1; |
@@ -577,7 +578,6 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
577 | DEBUG(2, "Setting Configuration"); | 578 | DEBUG(2, "Setting Configuration"); |
578 | ret = pcmcia_request_configuration(link, &link->conf); | 579 | ret = pcmcia_request_configuration(link, &link->conf); |
579 | if (ret != 0) { | 580 | if (ret != 0) { |
580 | cs_error(link, RequestConfiguration, ret); | ||
581 | if (dev->win_base) { | 581 | if (dev->win_base) { |
582 | iounmap(dev->win_base); | 582 | iounmap(dev->win_base); |
583 | dev->win_base = NULL; | 583 | dev->win_base = NULL; |
@@ -652,8 +652,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
652 | link->dev_node = &dev->node; | 652 | link->dev_node = &dev->node; |
653 | return 0; | 653 | return 0; |
654 | 654 | ||
655 | cs_failed: | 655 | failed: |
656 | cs_error(link, last_fn, last_ret); | ||
657 | err("CS Error, exiting"); | 656 | err("CS Error, exiting"); |
658 | pcmciamtd_release(link); | 657 | pcmciamtd_release(link); |
659 | return -ENODEV; | 658 | return -ENODEV; |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index b58965a2b3ae..17a27225cc98 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -118,14 +118,6 @@ INT_MODULE_PARM(full_duplex, 0); | |||
118 | /* Autodetect link polarity reversal? */ | 118 | /* Autodetect link polarity reversal? */ |
119 | INT_MODULE_PARM(auto_polarity, 1); | 119 | INT_MODULE_PARM(auto_polarity, 1); |
120 | 120 | ||
121 | #ifdef PCMCIA_DEBUG | ||
122 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
123 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
124 | static char *version = | ||
125 | "3c574_cs.c 1.65ac1 2003/04/07 Donald Becker/David Hinds, becker@scyld.com.\n"; | ||
126 | #else | ||
127 | #define DEBUG(n, args...) | ||
128 | #endif | ||
129 | 121 | ||
130 | /*====================================================================*/ | 122 | /*====================================================================*/ |
131 | 123 | ||
@@ -278,7 +270,7 @@ static int tc574_probe(struct pcmcia_device *link) | |||
278 | struct el3_private *lp; | 270 | struct el3_private *lp; |
279 | struct net_device *dev; | 271 | struct net_device *dev; |
280 | 272 | ||
281 | DEBUG(0, "3c574_attach()\n"); | 273 | dev_dbg(&link->dev, "3c574_attach()\n"); |
282 | 274 | ||
283 | /* Create the PC card device object. */ | 275 | /* Create the PC card device object. */ |
284 | dev = alloc_etherdev(sizeof(struct el3_private)); | 276 | dev = alloc_etherdev(sizeof(struct el3_private)); |
@@ -291,10 +283,8 @@ static int tc574_probe(struct pcmcia_device *link) | |||
291 | spin_lock_init(&lp->window_lock); | 283 | spin_lock_init(&lp->window_lock); |
292 | link->io.NumPorts1 = 32; | 284 | link->io.NumPorts1 = 32; |
293 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 285 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
294 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; | 286 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
295 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
296 | link->irq.Handler = &el3_interrupt; | 287 | link->irq.Handler = &el3_interrupt; |
297 | link->irq.Instance = dev; | ||
298 | link->conf.Attributes = CONF_ENABLE_IRQ; | 288 | link->conf.Attributes = CONF_ENABLE_IRQ; |
299 | link->conf.IntType = INT_MEMORY_AND_IO; | 289 | link->conf.IntType = INT_MEMORY_AND_IO; |
300 | link->conf.ConfigIndex = 1; | 290 | link->conf.ConfigIndex = 1; |
@@ -319,7 +309,7 @@ static void tc574_detach(struct pcmcia_device *link) | |||
319 | { | 309 | { |
320 | struct net_device *dev = link->priv; | 310 | struct net_device *dev = link->priv; |
321 | 311 | ||
322 | DEBUG(0, "3c574_detach(0x%p)\n", link); | 312 | dev_dbg(&link->dev, "3c574_detach()\n"); |
323 | 313 | ||
324 | if (link->dev_node) | 314 | if (link->dev_node) |
325 | unregister_netdev(dev); | 315 | unregister_netdev(dev); |
@@ -335,26 +325,23 @@ static void tc574_detach(struct pcmcia_device *link) | |||
335 | ethernet device available to the system. | 325 | ethernet device available to the system. |
336 | */ | 326 | */ |
337 | 327 | ||
338 | #define CS_CHECK(fn, ret) \ | ||
339 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
340 | |||
341 | static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 328 | static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
342 | 329 | ||
343 | static int tc574_config(struct pcmcia_device *link) | 330 | static int tc574_config(struct pcmcia_device *link) |
344 | { | 331 | { |
345 | struct net_device *dev = link->priv; | 332 | struct net_device *dev = link->priv; |
346 | struct el3_private *lp = netdev_priv(dev); | 333 | struct el3_private *lp = netdev_priv(dev); |
347 | tuple_t tuple; | 334 | int ret, i, j; |
348 | __le16 buf[32]; | ||
349 | int last_fn, last_ret, i, j; | ||
350 | unsigned int ioaddr; | 335 | unsigned int ioaddr; |
351 | __be16 *phys_addr; | 336 | __be16 *phys_addr; |
352 | char *cardname; | 337 | char *cardname; |
353 | __u32 config; | 338 | __u32 config; |
339 | u8 *buf; | ||
340 | size_t len; | ||
354 | 341 | ||
355 | phys_addr = (__be16 *)dev->dev_addr; | 342 | phys_addr = (__be16 *)dev->dev_addr; |
356 | 343 | ||
357 | DEBUG(0, "3c574_config(0x%p)\n", link); | 344 | dev_dbg(&link->dev, "3c574_config()\n"); |
358 | 345 | ||
359 | link->io.IOAddrLines = 16; | 346 | link->io.IOAddrLines = 16; |
360 | for (i = j = 0; j < 0x400; j += 0x20) { | 347 | for (i = j = 0; j < 0x400; j += 0x20) { |
@@ -363,12 +350,16 @@ static int tc574_config(struct pcmcia_device *link) | |||
363 | if (i == 0) | 350 | if (i == 0) |
364 | break; | 351 | break; |
365 | } | 352 | } |
366 | if (i != 0) { | 353 | if (i != 0) |
367 | cs_error(link, RequestIO, i); | 354 | goto failed; |
355 | |||
356 | ret = pcmcia_request_irq(link, &link->irq); | ||
357 | if (ret) | ||
358 | goto failed; | ||
359 | |||
360 | ret = pcmcia_request_configuration(link, &link->conf); | ||
361 | if (ret) | ||
368 | goto failed; | 362 | goto failed; |
369 | } | ||
370 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | ||
371 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | ||
372 | 363 | ||
373 | dev->irq = link->irq.AssignedIRQ; | 364 | dev->irq = link->irq.AssignedIRQ; |
374 | dev->base_addr = link->io.BasePort1; | 365 | dev->base_addr = link->io.BasePort1; |
@@ -378,16 +369,14 @@ static int tc574_config(struct pcmcia_device *link) | |||
378 | /* The 3c574 normally uses an EEPROM for configuration info, including | 369 | /* The 3c574 normally uses an EEPROM for configuration info, including |
379 | the hardware address. The future products may include a modem chip | 370 | the hardware address. The future products may include a modem chip |
380 | and put the address in the CIS. */ | 371 | and put the address in the CIS. */ |
381 | tuple.Attributes = 0; | 372 | |
382 | tuple.TupleData = (cisdata_t *)buf; | 373 | len = pcmcia_get_tuple(link, 0x88, &buf); |
383 | tuple.TupleDataMax = 64; | 374 | if (buf && len >= 6) { |
384 | tuple.TupleOffset = 0; | ||
385 | tuple.DesiredTuple = 0x88; | ||
386 | if (pcmcia_get_first_tuple(link, &tuple) == 0) { | ||
387 | pcmcia_get_tuple_data(link, &tuple); | ||
388 | for (i = 0; i < 3; i++) | 375 | for (i = 0; i < 3; i++) |
389 | phys_addr[i] = htons(le16_to_cpu(buf[i])); | 376 | phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); |
377 | kfree(buf); | ||
390 | } else { | 378 | } else { |
379 | kfree(buf); /* 0 < len < 6 */ | ||
391 | EL3WINDOW(0); | 380 | EL3WINDOW(0); |
392 | for (i = 0; i < 3; i++) | 381 | for (i = 0; i < 3; i++) |
393 | phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); | 382 | phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); |
@@ -435,7 +424,8 @@ static int tc574_config(struct pcmcia_device *link) | |||
435 | mii_status = mdio_read(ioaddr, phy & 0x1f, 1); | 424 | mii_status = mdio_read(ioaddr, phy & 0x1f, 1); |
436 | if (mii_status != 0xffff) { | 425 | if (mii_status != 0xffff) { |
437 | lp->phys = phy & 0x1f; | 426 | lp->phys = phy & 0x1f; |
438 | DEBUG(0, " MII transceiver at index %d, status %x.\n", | 427 | dev_dbg(&link->dev, " MII transceiver at " |
428 | "index %d, status %x.\n", | ||
439 | phy, mii_status); | 429 | phy, mii_status); |
440 | if ((mii_status & 0x0040) == 0) | 430 | if ((mii_status & 0x0040) == 0) |
441 | mii_preamble_required = 1; | 431 | mii_preamble_required = 1; |
@@ -457,7 +447,7 @@ static int tc574_config(struct pcmcia_device *link) | |||
457 | } | 447 | } |
458 | 448 | ||
459 | link->dev_node = &lp->node; | 449 | link->dev_node = &lp->node; |
460 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 450 | SET_NETDEV_DEV(dev, &link->dev); |
461 | 451 | ||
462 | if (register_netdev(dev) != 0) { | 452 | if (register_netdev(dev) != 0) { |
463 | printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); | 453 | printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); |
@@ -478,8 +468,6 @@ static int tc574_config(struct pcmcia_device *link) | |||
478 | 468 | ||
479 | return 0; | 469 | return 0; |
480 | 470 | ||
481 | cs_failed: | ||
482 | cs_error(link, last_fn, last_ret); | ||
483 | failed: | 471 | failed: |
484 | tc574_release(link); | 472 | tc574_release(link); |
485 | return -ENODEV; | 473 | return -ENODEV; |
@@ -738,7 +726,7 @@ static int el3_open(struct net_device *dev) | |||
738 | lp->media.expires = jiffies + HZ; | 726 | lp->media.expires = jiffies + HZ; |
739 | add_timer(&lp->media); | 727 | add_timer(&lp->media); |
740 | 728 | ||
741 | DEBUG(2, "%s: opened, status %4.4x.\n", | 729 | dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", |
742 | dev->name, inw(dev->base_addr + EL3_STATUS)); | 730 | dev->name, inw(dev->base_addr + EL3_STATUS)); |
743 | 731 | ||
744 | return 0; | 732 | return 0; |
@@ -772,7 +760,7 @@ static void pop_tx_status(struct net_device *dev) | |||
772 | if (tx_status & 0x30) | 760 | if (tx_status & 0x30) |
773 | tc574_wait_for_completion(dev, TxReset); | 761 | tc574_wait_for_completion(dev, TxReset); |
774 | if (tx_status & 0x38) { | 762 | if (tx_status & 0x38) { |
775 | DEBUG(1, "%s: transmit error: status 0x%02x\n", | 763 | pr_debug("%s: transmit error: status 0x%02x\n", |
776 | dev->name, tx_status); | 764 | dev->name, tx_status); |
777 | outw(TxEnable, ioaddr + EL3_CMD); | 765 | outw(TxEnable, ioaddr + EL3_CMD); |
778 | dev->stats.tx_aborted_errors++; | 766 | dev->stats.tx_aborted_errors++; |
@@ -788,7 +776,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | |||
788 | struct el3_private *lp = netdev_priv(dev); | 776 | struct el3_private *lp = netdev_priv(dev); |
789 | unsigned long flags; | 777 | unsigned long flags; |
790 | 778 | ||
791 | DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " | 779 | pr_debug("%s: el3_start_xmit(length = %ld) called, " |
792 | "status %4.4x.\n", dev->name, (long)skb->len, | 780 | "status %4.4x.\n", dev->name, (long)skb->len, |
793 | inw(ioaddr + EL3_STATUS)); | 781 | inw(ioaddr + EL3_STATUS)); |
794 | 782 | ||
@@ -827,7 +815,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
827 | return IRQ_NONE; | 815 | return IRQ_NONE; |
828 | ioaddr = dev->base_addr; | 816 | ioaddr = dev->base_addr; |
829 | 817 | ||
830 | DEBUG(3, "%s: interrupt, status %4.4x.\n", | 818 | pr_debug("%s: interrupt, status %4.4x.\n", |
831 | dev->name, inw(ioaddr + EL3_STATUS)); | 819 | dev->name, inw(ioaddr + EL3_STATUS)); |
832 | 820 | ||
833 | spin_lock(&lp->window_lock); | 821 | spin_lock(&lp->window_lock); |
@@ -836,7 +824,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
836 | (IntLatch | RxComplete | RxEarly | StatsFull)) { | 824 | (IntLatch | RxComplete | RxEarly | StatsFull)) { |
837 | if (!netif_device_present(dev) || | 825 | if (!netif_device_present(dev) || |
838 | ((status & 0xe000) != 0x2000)) { | 826 | ((status & 0xe000) != 0x2000)) { |
839 | DEBUG(1, "%s: Interrupt from dead card\n", dev->name); | 827 | pr_debug("%s: Interrupt from dead card\n", dev->name); |
840 | break; | 828 | break; |
841 | } | 829 | } |
842 | 830 | ||
@@ -846,7 +834,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
846 | work_budget = el3_rx(dev, work_budget); | 834 | work_budget = el3_rx(dev, work_budget); |
847 | 835 | ||
848 | if (status & TxAvailable) { | 836 | if (status & TxAvailable) { |
849 | DEBUG(3, " TX room bit was handled.\n"); | 837 | pr_debug(" TX room bit was handled.\n"); |
850 | /* There's room in the FIFO for a full-sized packet. */ | 838 | /* There's room in the FIFO for a full-sized packet. */ |
851 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | 839 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); |
852 | netif_wake_queue(dev); | 840 | netif_wake_queue(dev); |
@@ -886,7 +874,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
886 | } | 874 | } |
887 | 875 | ||
888 | if (--work_budget < 0) { | 876 | if (--work_budget < 0) { |
889 | DEBUG(0, "%s: Too much work in interrupt, " | 877 | pr_debug("%s: Too much work in interrupt, " |
890 | "status %4.4x.\n", dev->name, status); | 878 | "status %4.4x.\n", dev->name, status); |
891 | /* Clear all interrupts */ | 879 | /* Clear all interrupts */ |
892 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); | 880 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); |
@@ -896,7 +884,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
896 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | 884 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); |
897 | } | 885 | } |
898 | 886 | ||
899 | DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", | 887 | pr_debug("%s: exiting interrupt, status %4.4x.\n", |
900 | dev->name, inw(ioaddr + EL3_STATUS)); | 888 | dev->name, inw(ioaddr + EL3_STATUS)); |
901 | 889 | ||
902 | spin_unlock(&lp->window_lock); | 890 | spin_unlock(&lp->window_lock); |
@@ -1003,7 +991,7 @@ static void update_stats(struct net_device *dev) | |||
1003 | unsigned int ioaddr = dev->base_addr; | 991 | unsigned int ioaddr = dev->base_addr; |
1004 | u8 rx, tx, up; | 992 | u8 rx, tx, up; |
1005 | 993 | ||
1006 | DEBUG(2, "%s: updating the statistics.\n", dev->name); | 994 | pr_debug("%s: updating the statistics.\n", dev->name); |
1007 | 995 | ||
1008 | if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ | 996 | if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ |
1009 | return; | 997 | return; |
@@ -1039,7 +1027,7 @@ static int el3_rx(struct net_device *dev, int worklimit) | |||
1039 | unsigned int ioaddr = dev->base_addr; | 1027 | unsigned int ioaddr = dev->base_addr; |
1040 | short rx_status; | 1028 | short rx_status; |
1041 | 1029 | ||
1042 | DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", | 1030 | pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", |
1043 | dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); | 1031 | dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); |
1044 | while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && | 1032 | while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && |
1045 | worklimit > 0) { | 1033 | worklimit > 0) { |
@@ -1061,7 +1049,7 @@ static int el3_rx(struct net_device *dev, int worklimit) | |||
1061 | 1049 | ||
1062 | skb = dev_alloc_skb(pkt_len+5); | 1050 | skb = dev_alloc_skb(pkt_len+5); |
1063 | 1051 | ||
1064 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 1052 | pr_debug(" Receiving packet size %d status %4.4x.\n", |
1065 | pkt_len, rx_status); | 1053 | pkt_len, rx_status); |
1066 | if (skb != NULL) { | 1054 | if (skb != NULL) { |
1067 | skb_reserve(skb, 2); | 1055 | skb_reserve(skb, 2); |
@@ -1072,7 +1060,7 @@ static int el3_rx(struct net_device *dev, int worklimit) | |||
1072 | dev->stats.rx_packets++; | 1060 | dev->stats.rx_packets++; |
1073 | dev->stats.rx_bytes += pkt_len; | 1061 | dev->stats.rx_bytes += pkt_len; |
1074 | } else { | 1062 | } else { |
1075 | DEBUG(1, "%s: couldn't allocate a sk_buff of" | 1063 | pr_debug("%s: couldn't allocate a sk_buff of" |
1076 | " size %d.\n", dev->name, pkt_len); | 1064 | " size %d.\n", dev->name, pkt_len); |
1077 | dev->stats.rx_dropped++; | 1065 | dev->stats.rx_dropped++; |
1078 | } | 1066 | } |
@@ -1101,7 +1089,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1101 | struct mii_ioctl_data *data = if_mii(rq); | 1089 | struct mii_ioctl_data *data = if_mii(rq); |
1102 | int phy = lp->phys & 0x1f; | 1090 | int phy = lp->phys & 0x1f; |
1103 | 1091 | ||
1104 | DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", | 1092 | pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", |
1105 | dev->name, rq->ifr_ifrn.ifrn_name, cmd, | 1093 | dev->name, rq->ifr_ifrn.ifrn_name, cmd, |
1106 | data->phy_id, data->reg_num, data->val_in, data->val_out); | 1094 | data->phy_id, data->reg_num, data->val_in, data->val_out); |
1107 | 1095 | ||
@@ -1178,7 +1166,7 @@ static int el3_close(struct net_device *dev) | |||
1178 | struct el3_private *lp = netdev_priv(dev); | 1166 | struct el3_private *lp = netdev_priv(dev); |
1179 | struct pcmcia_device *link = lp->p_dev; | 1167 | struct pcmcia_device *link = lp->p_dev; |
1180 | 1168 | ||
1181 | DEBUG(2, "%s: shutting down ethercard.\n", dev->name); | 1169 | dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); |
1182 | 1170 | ||
1183 | if (pcmcia_dev_present(link)) { | 1171 | if (pcmcia_dev_present(link)) { |
1184 | unsigned long flags; | 1172 | unsigned long flags; |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 569fb06793cf..6f8d7e2e5922 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -130,14 +130,6 @@ MODULE_LICENSE("GPL"); | |||
130 | /* Special hook for setting if_port when module is loaded */ | 130 | /* Special hook for setting if_port when module is loaded */ |
131 | INT_MODULE_PARM(if_port, 0); | 131 | INT_MODULE_PARM(if_port, 0); |
132 | 132 | ||
133 | #ifdef PCMCIA_DEBUG | ||
134 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
135 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
136 | static char *version = | ||
137 | DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)"; | ||
138 | #else | ||
139 | #define DEBUG(n, args...) | ||
140 | #endif | ||
141 | 133 | ||
142 | /*====================================================================*/ | 134 | /*====================================================================*/ |
143 | 135 | ||
@@ -189,7 +181,7 @@ static int tc589_probe(struct pcmcia_device *link) | |||
189 | struct el3_private *lp; | 181 | struct el3_private *lp; |
190 | struct net_device *dev; | 182 | struct net_device *dev; |
191 | 183 | ||
192 | DEBUG(0, "3c589_attach()\n"); | 184 | dev_dbg(&link->dev, "3c589_attach()\n"); |
193 | 185 | ||
194 | /* Create new ethernet device */ | 186 | /* Create new ethernet device */ |
195 | dev = alloc_etherdev(sizeof(struct el3_private)); | 187 | dev = alloc_etherdev(sizeof(struct el3_private)); |
@@ -202,10 +194,8 @@ static int tc589_probe(struct pcmcia_device *link) | |||
202 | spin_lock_init(&lp->lock); | 194 | spin_lock_init(&lp->lock); |
203 | link->io.NumPorts1 = 16; | 195 | link->io.NumPorts1 = 16; |
204 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 196 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
205 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; | 197 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
206 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
207 | link->irq.Handler = &el3_interrupt; | 198 | link->irq.Handler = &el3_interrupt; |
208 | link->irq.Instance = dev; | ||
209 | link->conf.Attributes = CONF_ENABLE_IRQ; | 199 | link->conf.Attributes = CONF_ENABLE_IRQ; |
210 | link->conf.IntType = INT_MEMORY_AND_IO; | 200 | link->conf.IntType = INT_MEMORY_AND_IO; |
211 | link->conf.ConfigIndex = 1; | 201 | link->conf.ConfigIndex = 1; |
@@ -231,7 +221,7 @@ static void tc589_detach(struct pcmcia_device *link) | |||
231 | { | 221 | { |
232 | struct net_device *dev = link->priv; | 222 | struct net_device *dev = link->priv; |
233 | 223 | ||
234 | DEBUG(0, "3c589_detach(0x%p)\n", link); | 224 | dev_dbg(&link->dev, "3c589_detach\n"); |
235 | 225 | ||
236 | if (link->dev_node) | 226 | if (link->dev_node) |
237 | unregister_netdev(dev); | 227 | unregister_netdev(dev); |
@@ -249,29 +239,20 @@ static void tc589_detach(struct pcmcia_device *link) | |||
249 | 239 | ||
250 | ======================================================================*/ | 240 | ======================================================================*/ |
251 | 241 | ||
252 | #define CS_CHECK(fn, ret) \ | ||
253 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
254 | |||
255 | static int tc589_config(struct pcmcia_device *link) | 242 | static int tc589_config(struct pcmcia_device *link) |
256 | { | 243 | { |
257 | struct net_device *dev = link->priv; | 244 | struct net_device *dev = link->priv; |
258 | struct el3_private *lp = netdev_priv(dev); | 245 | struct el3_private *lp = netdev_priv(dev); |
259 | tuple_t tuple; | ||
260 | __le16 buf[32]; | ||
261 | __be16 *phys_addr; | 246 | __be16 *phys_addr; |
262 | int last_fn, last_ret, i, j, multi = 0, fifo; | 247 | int ret, i, j, multi = 0, fifo; |
263 | unsigned int ioaddr; | 248 | unsigned int ioaddr; |
264 | char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | 249 | char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; |
250 | u8 *buf; | ||
251 | size_t len; | ||
265 | 252 | ||
266 | DEBUG(0, "3c589_config(0x%p)\n", link); | 253 | dev_dbg(&link->dev, "3c589_config\n"); |
267 | 254 | ||
268 | phys_addr = (__be16 *)dev->dev_addr; | 255 | phys_addr = (__be16 *)dev->dev_addr; |
269 | tuple.Attributes = 0; | ||
270 | tuple.TupleData = (cisdata_t *)buf; | ||
271 | tuple.TupleDataMax = sizeof(buf); | ||
272 | tuple.TupleOffset = 0; | ||
273 | tuple.Attributes = TUPLE_RETURN_COMMON; | ||
274 | |||
275 | /* Is this a 3c562? */ | 256 | /* Is this a 3c562? */ |
276 | if (link->manf_id != MANFID_3COM) | 257 | if (link->manf_id != MANFID_3COM) |
277 | printk(KERN_INFO "3c589_cs: hmmm, is this really a " | 258 | printk(KERN_INFO "3c589_cs: hmmm, is this really a " |
@@ -287,12 +268,16 @@ static int tc589_config(struct pcmcia_device *link) | |||
287 | if (i == 0) | 268 | if (i == 0) |
288 | break; | 269 | break; |
289 | } | 270 | } |
290 | if (i != 0) { | 271 | if (i != 0) |
291 | cs_error(link, RequestIO, i); | ||
292 | goto failed; | 272 | goto failed; |
293 | } | 273 | |
294 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 274 | ret = pcmcia_request_irq(link, &link->irq); |
295 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 275 | if (ret) |
276 | goto failed; | ||
277 | |||
278 | ret = pcmcia_request_configuration(link, &link->conf); | ||
279 | if (ret) | ||
280 | goto failed; | ||
296 | 281 | ||
297 | dev->irq = link->irq.AssignedIRQ; | 282 | dev->irq = link->irq.AssignedIRQ; |
298 | dev->base_addr = link->io.BasePort1; | 283 | dev->base_addr = link->io.BasePort1; |
@@ -301,12 +286,13 @@ static int tc589_config(struct pcmcia_device *link) | |||
301 | 286 | ||
302 | /* The 3c589 has an extra EEPROM for configuration info, including | 287 | /* The 3c589 has an extra EEPROM for configuration info, including |
303 | the hardware address. The 3c562 puts the address in the CIS. */ | 288 | the hardware address. The 3c562 puts the address in the CIS. */ |
304 | tuple.DesiredTuple = 0x88; | 289 | len = pcmcia_get_tuple(link, 0x88, &buf); |
305 | if (pcmcia_get_first_tuple(link, &tuple) == 0) { | 290 | if (buf && len >= 6) { |
306 | pcmcia_get_tuple_data(link, &tuple); | 291 | for (i = 0; i < 3; i++) |
307 | for (i = 0; i < 3; i++) | 292 | phys_addr[i] = htons(le16_to_cpu(buf[i*2])); |
308 | phys_addr[i] = htons(le16_to_cpu(buf[i])); | 293 | kfree(buf); |
309 | } else { | 294 | } else { |
295 | kfree(buf); /* 0 < len < 6 */ | ||
310 | for (i = 0; i < 3; i++) | 296 | for (i = 0; i < 3; i++) |
311 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | 297 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); |
312 | if (phys_addr[0] == htons(0x6060)) { | 298 | if (phys_addr[0] == htons(0x6060)) { |
@@ -328,7 +314,7 @@ static int tc589_config(struct pcmcia_device *link) | |||
328 | printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); | 314 | printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); |
329 | 315 | ||
330 | link->dev_node = &lp->node; | 316 | link->dev_node = &lp->node; |
331 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 317 | SET_NETDEV_DEV(dev, &link->dev); |
332 | 318 | ||
333 | if (register_netdev(dev) != 0) { | 319 | if (register_netdev(dev) != 0) { |
334 | printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); | 320 | printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); |
@@ -347,8 +333,6 @@ static int tc589_config(struct pcmcia_device *link) | |||
347 | if_names[dev->if_port]); | 333 | if_names[dev->if_port]); |
348 | return 0; | 334 | return 0; |
349 | 335 | ||
350 | cs_failed: | ||
351 | cs_error(link, last_fn, last_ret); | ||
352 | failed: | 336 | failed: |
353 | tc589_release(link); | 337 | tc589_release(link); |
354 | return -ENODEV; | 338 | return -ENODEV; |
@@ -511,24 +495,8 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
511 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); | 495 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); |
512 | } | 496 | } |
513 | 497 | ||
514 | #ifdef PCMCIA_DEBUG | ||
515 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
516 | { | ||
517 | return pc_debug; | ||
518 | } | ||
519 | |||
520 | static void netdev_set_msglevel(struct net_device *dev, u32 level) | ||
521 | { | ||
522 | pc_debug = level; | ||
523 | } | ||
524 | #endif /* PCMCIA_DEBUG */ | ||
525 | |||
526 | static const struct ethtool_ops netdev_ethtool_ops = { | 498 | static const struct ethtool_ops netdev_ethtool_ops = { |
527 | .get_drvinfo = netdev_get_drvinfo, | 499 | .get_drvinfo = netdev_get_drvinfo, |
528 | #ifdef PCMCIA_DEBUG | ||
529 | .get_msglevel = netdev_get_msglevel, | ||
530 | .set_msglevel = netdev_set_msglevel, | ||
531 | #endif /* PCMCIA_DEBUG */ | ||
532 | }; | 500 | }; |
533 | 501 | ||
534 | static int el3_config(struct net_device *dev, struct ifmap *map) | 502 | static int el3_config(struct net_device *dev, struct ifmap *map) |
@@ -563,7 +531,7 @@ static int el3_open(struct net_device *dev) | |||
563 | lp->media.expires = jiffies + HZ; | 531 | lp->media.expires = jiffies + HZ; |
564 | add_timer(&lp->media); | 532 | add_timer(&lp->media); |
565 | 533 | ||
566 | DEBUG(1, "%s: opened, status %4.4x.\n", | 534 | dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", |
567 | dev->name, inw(dev->base_addr + EL3_STATUS)); | 535 | dev->name, inw(dev->base_addr + EL3_STATUS)); |
568 | 536 | ||
569 | return 0; | 537 | return 0; |
@@ -596,7 +564,7 @@ static void pop_tx_status(struct net_device *dev) | |||
596 | if (tx_status & 0x30) | 564 | if (tx_status & 0x30) |
597 | tc589_wait_for_completion(dev, TxReset); | 565 | tc589_wait_for_completion(dev, TxReset); |
598 | if (tx_status & 0x38) { | 566 | if (tx_status & 0x38) { |
599 | DEBUG(1, "%s: transmit error: status 0x%02x\n", | 567 | pr_debug("%s: transmit error: status 0x%02x\n", |
600 | dev->name, tx_status); | 568 | dev->name, tx_status); |
601 | outw(TxEnable, ioaddr + EL3_CMD); | 569 | outw(TxEnable, ioaddr + EL3_CMD); |
602 | dev->stats.tx_aborted_errors++; | 570 | dev->stats.tx_aborted_errors++; |
@@ -612,7 +580,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | |||
612 | struct el3_private *priv = netdev_priv(dev); | 580 | struct el3_private *priv = netdev_priv(dev); |
613 | unsigned long flags; | 581 | unsigned long flags; |
614 | 582 | ||
615 | DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " | 583 | pr_debug("%s: el3_start_xmit(length = %ld) called, " |
616 | "status %4.4x.\n", dev->name, (long)skb->len, | 584 | "status %4.4x.\n", dev->name, (long)skb->len, |
617 | inw(ioaddr + EL3_STATUS)); | 585 | inw(ioaddr + EL3_STATUS)); |
618 | 586 | ||
@@ -654,14 +622,14 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
654 | 622 | ||
655 | ioaddr = dev->base_addr; | 623 | ioaddr = dev->base_addr; |
656 | 624 | ||
657 | DEBUG(3, "%s: interrupt, status %4.4x.\n", | 625 | pr_debug("%s: interrupt, status %4.4x.\n", |
658 | dev->name, inw(ioaddr + EL3_STATUS)); | 626 | dev->name, inw(ioaddr + EL3_STATUS)); |
659 | 627 | ||
660 | spin_lock(&lp->lock); | 628 | spin_lock(&lp->lock); |
661 | while ((status = inw(ioaddr + EL3_STATUS)) & | 629 | while ((status = inw(ioaddr + EL3_STATUS)) & |
662 | (IntLatch | RxComplete | StatsFull)) { | 630 | (IntLatch | RxComplete | StatsFull)) { |
663 | if ((status & 0xe000) != 0x2000) { | 631 | if ((status & 0xe000) != 0x2000) { |
664 | DEBUG(1, "%s: interrupt from dead card\n", dev->name); | 632 | pr_debug("%s: interrupt from dead card\n", dev->name); |
665 | handled = 0; | 633 | handled = 0; |
666 | break; | 634 | break; |
667 | } | 635 | } |
@@ -670,7 +638,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
670 | el3_rx(dev); | 638 | el3_rx(dev); |
671 | 639 | ||
672 | if (status & TxAvailable) { | 640 | if (status & TxAvailable) { |
673 | DEBUG(3, " TX room bit was handled.\n"); | 641 | pr_debug(" TX room bit was handled.\n"); |
674 | /* There's room in the FIFO for a full-sized packet. */ | 642 | /* There's room in the FIFO for a full-sized packet. */ |
675 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | 643 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); |
676 | netif_wake_queue(dev); | 644 | netif_wake_queue(dev); |
@@ -722,7 +690,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) | |||
722 | 690 | ||
723 | lp->last_irq = jiffies; | 691 | lp->last_irq = jiffies; |
724 | spin_unlock(&lp->lock); | 692 | spin_unlock(&lp->lock); |
725 | DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", | 693 | pr_debug("%s: exiting interrupt, status %4.4x.\n", |
726 | dev->name, inw(ioaddr + EL3_STATUS)); | 694 | dev->name, inw(ioaddr + EL3_STATUS)); |
727 | return IRQ_RETVAL(handled); | 695 | return IRQ_RETVAL(handled); |
728 | } | 696 | } |
@@ -833,7 +801,7 @@ static void update_stats(struct net_device *dev) | |||
833 | { | 801 | { |
834 | unsigned int ioaddr = dev->base_addr; | 802 | unsigned int ioaddr = dev->base_addr; |
835 | 803 | ||
836 | DEBUG(2, "%s: updating the statistics.\n", dev->name); | 804 | pr_debug("%s: updating the statistics.\n", dev->name); |
837 | /* Turn off statistics updates while reading. */ | 805 | /* Turn off statistics updates while reading. */ |
838 | outw(StatsDisable, ioaddr + EL3_CMD); | 806 | outw(StatsDisable, ioaddr + EL3_CMD); |
839 | /* Switch to the stats window, and read everything. */ | 807 | /* Switch to the stats window, and read everything. */ |
@@ -861,7 +829,7 @@ static int el3_rx(struct net_device *dev) | |||
861 | int worklimit = 32; | 829 | int worklimit = 32; |
862 | short rx_status; | 830 | short rx_status; |
863 | 831 | ||
864 | DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", | 832 | pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", |
865 | dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); | 833 | dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); |
866 | while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && | 834 | while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && |
867 | worklimit > 0) { | 835 | worklimit > 0) { |
@@ -883,7 +851,7 @@ static int el3_rx(struct net_device *dev) | |||
883 | 851 | ||
884 | skb = dev_alloc_skb(pkt_len+5); | 852 | skb = dev_alloc_skb(pkt_len+5); |
885 | 853 | ||
886 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 854 | pr_debug(" Receiving packet size %d status %4.4x.\n", |
887 | pkt_len, rx_status); | 855 | pkt_len, rx_status); |
888 | if (skb != NULL) { | 856 | if (skb != NULL) { |
889 | skb_reserve(skb, 2); | 857 | skb_reserve(skb, 2); |
@@ -894,7 +862,7 @@ static int el3_rx(struct net_device *dev) | |||
894 | dev->stats.rx_packets++; | 862 | dev->stats.rx_packets++; |
895 | dev->stats.rx_bytes += pkt_len; | 863 | dev->stats.rx_bytes += pkt_len; |
896 | } else { | 864 | } else { |
897 | DEBUG(1, "%s: couldn't allocate a sk_buff of" | 865 | pr_debug("%s: couldn't allocate a sk_buff of" |
898 | " size %d.\n", dev->name, pkt_len); | 866 | " size %d.\n", dev->name, pkt_len); |
899 | dev->stats.rx_dropped++; | 867 | dev->stats.rx_dropped++; |
900 | } | 868 | } |
@@ -935,7 +903,7 @@ static int el3_close(struct net_device *dev) | |||
935 | struct pcmcia_device *link = lp->p_dev; | 903 | struct pcmcia_device *link = lp->p_dev; |
936 | unsigned int ioaddr = dev->base_addr; | 904 | unsigned int ioaddr = dev->base_addr; |
937 | 905 | ||
938 | DEBUG(1, "%s: shutting down ethercard.\n", dev->name); | 906 | dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); |
939 | 907 | ||
940 | if (pcmcia_dev_present(link)) { | 908 | if (pcmcia_dev_present(link)) { |
941 | /* Turn off statistics ASAP. We update dev->stats below. */ | 909 | /* Turn off statistics ASAP. We update dev->stats below. */ |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 3131a59a8d32..800597b82d18 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -75,16 +75,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | |||
75 | MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); | 75 | MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); |
76 | MODULE_LICENSE("GPL"); | 76 | MODULE_LICENSE("GPL"); |
77 | 77 | ||
78 | #ifdef PCMCIA_DEBUG | ||
79 | #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) | ||
80 | |||
81 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
82 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
83 | static char *version = | ||
84 | "axnet_cs.c 1.28 2002/06/29 06:27:37 (David Hinds)"; | ||
85 | #else | ||
86 | #define DEBUG(n, args...) | ||
87 | #endif | ||
88 | 78 | ||
89 | /*====================================================================*/ | 79 | /*====================================================================*/ |
90 | 80 | ||
@@ -167,7 +157,7 @@ static int axnet_probe(struct pcmcia_device *link) | |||
167 | struct net_device *dev; | 157 | struct net_device *dev; |
168 | struct ei_device *ei_local; | 158 | struct ei_device *ei_local; |
169 | 159 | ||
170 | DEBUG(0, "axnet_attach()\n"); | 160 | dev_dbg(&link->dev, "axnet_attach()\n"); |
171 | 161 | ||
172 | dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); | 162 | dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); |
173 | if (!dev) | 163 | if (!dev) |
@@ -180,7 +170,6 @@ static int axnet_probe(struct pcmcia_device *link) | |||
180 | info->p_dev = link; | 170 | info->p_dev = link; |
181 | link->priv = dev; | 171 | link->priv = dev; |
182 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 172 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
183 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
184 | link->conf.Attributes = CONF_ENABLE_IRQ; | 173 | link->conf.Attributes = CONF_ENABLE_IRQ; |
185 | link->conf.IntType = INT_MEMORY_AND_IO; | 174 | link->conf.IntType = INT_MEMORY_AND_IO; |
186 | 175 | ||
@@ -205,7 +194,7 @@ static void axnet_detach(struct pcmcia_device *link) | |||
205 | { | 194 | { |
206 | struct net_device *dev = link->priv; | 195 | struct net_device *dev = link->priv; |
207 | 196 | ||
208 | DEBUG(0, "axnet_detach(0x%p)\n", link); | 197 | dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); |
209 | 198 | ||
210 | if (link->dev_node) | 199 | if (link->dev_node) |
211 | unregister_netdev(dev); | 200 | unregister_netdev(dev); |
@@ -272,9 +261,6 @@ static int get_prom(struct pcmcia_device *link) | |||
272 | 261 | ||
273 | ======================================================================*/ | 262 | ======================================================================*/ |
274 | 263 | ||
275 | #define CS_CHECK(fn, ret) \ | ||
276 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
277 | |||
278 | static int try_io_port(struct pcmcia_device *link) | 264 | static int try_io_port(struct pcmcia_device *link) |
279 | { | 265 | { |
280 | int j, ret; | 266 | int j, ret; |
@@ -341,26 +327,29 @@ static int axnet_config(struct pcmcia_device *link) | |||
341 | { | 327 | { |
342 | struct net_device *dev = link->priv; | 328 | struct net_device *dev = link->priv; |
343 | axnet_dev_t *info = PRIV(dev); | 329 | axnet_dev_t *info = PRIV(dev); |
344 | int i, j, j2, last_ret, last_fn; | 330 | int i, j, j2, ret; |
345 | 331 | ||
346 | DEBUG(0, "axnet_config(0x%p)\n", link); | 332 | dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); |
347 | 333 | ||
348 | /* don't trust the CIS on this; Linksys got it wrong */ | 334 | /* don't trust the CIS on this; Linksys got it wrong */ |
349 | link->conf.Present = 0x63; | 335 | link->conf.Present = 0x63; |
350 | last_ret = pcmcia_loop_config(link, axnet_configcheck, NULL); | 336 | ret = pcmcia_loop_config(link, axnet_configcheck, NULL); |
351 | if (last_ret != 0) { | 337 | if (ret != 0) |
352 | cs_error(link, RequestIO, last_ret); | ||
353 | goto failed; | 338 | goto failed; |
354 | } | ||
355 | 339 | ||
356 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 340 | ret = pcmcia_request_irq(link, &link->irq); |
341 | if (ret) | ||
342 | goto failed; | ||
357 | 343 | ||
358 | if (link->io.NumPorts2 == 8) { | 344 | if (link->io.NumPorts2 == 8) { |
359 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 345 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
360 | link->conf.Status = CCSR_AUDIO_ENA; | 346 | link->conf.Status = CCSR_AUDIO_ENA; |
361 | } | 347 | } |
362 | 348 | ||
363 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 349 | ret = pcmcia_request_configuration(link, &link->conf); |
350 | if (ret) | ||
351 | goto failed; | ||
352 | |||
364 | dev->irq = link->irq.AssignedIRQ; | 353 | dev->irq = link->irq.AssignedIRQ; |
365 | dev->base_addr = link->io.BasePort1; | 354 | dev->base_addr = link->io.BasePort1; |
366 | 355 | ||
@@ -410,7 +399,7 @@ static int axnet_config(struct pcmcia_device *link) | |||
410 | 399 | ||
411 | info->phy_id = (i < 32) ? i : -1; | 400 | info->phy_id = (i < 32) ? i : -1; |
412 | link->dev_node = &info->node; | 401 | link->dev_node = &info->node; |
413 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 402 | SET_NETDEV_DEV(dev, &link->dev); |
414 | 403 | ||
415 | if (register_netdev(dev) != 0) { | 404 | if (register_netdev(dev) != 0) { |
416 | printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); | 405 | printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); |
@@ -426,14 +415,12 @@ static int axnet_config(struct pcmcia_device *link) | |||
426 | dev->base_addr, dev->irq, | 415 | dev->base_addr, dev->irq, |
427 | dev->dev_addr); | 416 | dev->dev_addr); |
428 | if (info->phy_id != -1) { | 417 | if (info->phy_id != -1) { |
429 | DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); | 418 | dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j); |
430 | } else { | 419 | } else { |
431 | printk(KERN_NOTICE " No MII transceivers found!\n"); | 420 | printk(KERN_NOTICE " No MII transceivers found!\n"); |
432 | } | 421 | } |
433 | return 0; | 422 | return 0; |
434 | 423 | ||
435 | cs_failed: | ||
436 | cs_error(link, last_fn, last_ret); | ||
437 | failed: | 424 | failed: |
438 | axnet_release(link); | 425 | axnet_release(link); |
439 | return -ENODEV; | 426 | return -ENODEV; |
@@ -543,7 +530,7 @@ static int axnet_open(struct net_device *dev) | |||
543 | struct pcmcia_device *link = info->p_dev; | 530 | struct pcmcia_device *link = info->p_dev; |
544 | unsigned int nic_base = dev->base_addr; | 531 | unsigned int nic_base = dev->base_addr; |
545 | 532 | ||
546 | DEBUG(2, "axnet_open('%s')\n", dev->name); | 533 | dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name); |
547 | 534 | ||
548 | if (!pcmcia_dev_present(link)) | 535 | if (!pcmcia_dev_present(link)) |
549 | return -ENODEV; | 536 | return -ENODEV; |
@@ -572,7 +559,7 @@ static int axnet_close(struct net_device *dev) | |||
572 | axnet_dev_t *info = PRIV(dev); | 559 | axnet_dev_t *info = PRIV(dev); |
573 | struct pcmcia_device *link = info->p_dev; | 560 | struct pcmcia_device *link = info->p_dev; |
574 | 561 | ||
575 | DEBUG(2, "axnet_close('%s')\n", dev->name); | 562 | dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name); |
576 | 563 | ||
577 | ax_close(dev); | 564 | ax_close(dev); |
578 | free_irq(dev->irq, dev); | 565 | free_irq(dev->irq, dev); |
@@ -741,10 +728,8 @@ static void block_input(struct net_device *dev, int count, | |||
741 | int xfer_count = count; | 728 | int xfer_count = count; |
742 | char *buf = skb->data; | 729 | char *buf = skb->data; |
743 | 730 | ||
744 | #ifdef PCMCIA_DEBUG | ||
745 | if ((ei_debug > 4) && (count != 4)) | 731 | if ((ei_debug > 4) && (count != 4)) |
746 | printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); | 732 | pr_debug("%s: [bi=%d]\n", dev->name, count+4); |
747 | #endif | ||
748 | outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); | 733 | outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); |
749 | outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); | 734 | outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); |
750 | outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); | 735 | outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); |
@@ -762,10 +747,7 @@ static void block_output(struct net_device *dev, int count, | |||
762 | { | 747 | { |
763 | unsigned int nic_base = dev->base_addr; | 748 | unsigned int nic_base = dev->base_addr; |
764 | 749 | ||
765 | #ifdef PCMCIA_DEBUG | 750 | pr_debug("%s: [bo=%d]\n", dev->name, count); |
766 | if (ei_debug > 4) | ||
767 | printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count); | ||
768 | #endif | ||
769 | 751 | ||
770 | /* Round the count up for word writes. Do we need to do this? | 752 | /* Round the count up for word writes. Do we need to do this? |
771 | What effect will an odd byte count have on the 8390? | 753 | What effect will an odd byte count have on the 8390? |
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c index 7b5c77b7bd27..21d9c9d815d1 100644 --- a/drivers/net/pcmcia/com20020_cs.c +++ b/drivers/net/pcmcia/com20020_cs.c | |||
@@ -53,11 +53,7 @@ | |||
53 | 53 | ||
54 | #define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" | 54 | #define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" |
55 | 55 | ||
56 | #ifdef PCMCIA_DEBUG | 56 | #ifdef DEBUG |
57 | |||
58 | static int pc_debug = PCMCIA_DEBUG; | ||
59 | module_param(pc_debug, int, 0); | ||
60 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
61 | 57 | ||
62 | static void regdump(struct net_device *dev) | 58 | static void regdump(struct net_device *dev) |
63 | { | 59 | { |
@@ -92,7 +88,6 @@ static void regdump(struct net_device *dev) | |||
92 | 88 | ||
93 | #else | 89 | #else |
94 | 90 | ||
95 | #define DEBUG(n, args...) do { } while (0) | ||
96 | static inline void regdump(struct net_device *dev) { } | 91 | static inline void regdump(struct net_device *dev) { } |
97 | 92 | ||
98 | #endif | 93 | #endif |
@@ -144,7 +139,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) | |||
144 | struct net_device *dev; | 139 | struct net_device *dev; |
145 | struct arcnet_local *lp; | 140 | struct arcnet_local *lp; |
146 | 141 | ||
147 | DEBUG(0, "com20020_attach()\n"); | 142 | dev_dbg(&p_dev->dev, "com20020_attach()\n"); |
148 | 143 | ||
149 | /* Create new network device */ | 144 | /* Create new network device */ |
150 | info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); | 145 | info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); |
@@ -169,11 +164,10 @@ static int com20020_probe(struct pcmcia_device *p_dev) | |||
169 | p_dev->io.NumPorts1 = 16; | 164 | p_dev->io.NumPorts1 = 16; |
170 | p_dev->io.IOAddrLines = 16; | 165 | p_dev->io.IOAddrLines = 16; |
171 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 166 | p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
172 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
173 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; | 167 | p_dev->conf.Attributes = CONF_ENABLE_IRQ; |
174 | p_dev->conf.IntType = INT_MEMORY_AND_IO; | 168 | p_dev->conf.IntType = INT_MEMORY_AND_IO; |
175 | 169 | ||
176 | p_dev->irq.Instance = info->dev = dev; | 170 | info->dev = dev; |
177 | p_dev->priv = info; | 171 | p_dev->priv = info; |
178 | 172 | ||
179 | return com20020_config(p_dev); | 173 | return com20020_config(p_dev); |
@@ -198,12 +192,12 @@ static void com20020_detach(struct pcmcia_device *link) | |||
198 | struct com20020_dev_t *info = link->priv; | 192 | struct com20020_dev_t *info = link->priv; |
199 | struct net_device *dev = info->dev; | 193 | struct net_device *dev = info->dev; |
200 | 194 | ||
201 | DEBUG(1,"detach...\n"); | 195 | dev_dbg(&link->dev, "detach...\n"); |
202 | 196 | ||
203 | DEBUG(0, "com20020_detach(0x%p)\n", link); | 197 | dev_dbg(&link->dev, "com20020_detach\n"); |
204 | 198 | ||
205 | if (link->dev_node) { | 199 | if (link->dev_node) { |
206 | DEBUG(1,"unregister...\n"); | 200 | dev_dbg(&link->dev, "unregister...\n"); |
207 | 201 | ||
208 | unregister_netdev(dev); | 202 | unregister_netdev(dev); |
209 | 203 | ||
@@ -218,16 +212,16 @@ static void com20020_detach(struct pcmcia_device *link) | |||
218 | com20020_release(link); | 212 | com20020_release(link); |
219 | 213 | ||
220 | /* Unlink device structure, free bits */ | 214 | /* Unlink device structure, free bits */ |
221 | DEBUG(1,"unlinking...\n"); | 215 | dev_dbg(&link->dev, "unlinking...\n"); |
222 | if (link->priv) | 216 | if (link->priv) |
223 | { | 217 | { |
224 | dev = info->dev; | 218 | dev = info->dev; |
225 | if (dev) | 219 | if (dev) |
226 | { | 220 | { |
227 | DEBUG(1,"kfree...\n"); | 221 | dev_dbg(&link->dev, "kfree...\n"); |
228 | free_netdev(dev); | 222 | free_netdev(dev); |
229 | } | 223 | } |
230 | DEBUG(1,"kfree2...\n"); | 224 | dev_dbg(&link->dev, "kfree2...\n"); |
231 | kfree(info); | 225 | kfree(info); |
232 | } | 226 | } |
233 | 227 | ||
@@ -241,25 +235,22 @@ static void com20020_detach(struct pcmcia_device *link) | |||
241 | 235 | ||
242 | ======================================================================*/ | 236 | ======================================================================*/ |
243 | 237 | ||
244 | #define CS_CHECK(fn, ret) \ | ||
245 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
246 | |||
247 | static int com20020_config(struct pcmcia_device *link) | 238 | static int com20020_config(struct pcmcia_device *link) |
248 | { | 239 | { |
249 | struct arcnet_local *lp; | 240 | struct arcnet_local *lp; |
250 | com20020_dev_t *info; | 241 | com20020_dev_t *info; |
251 | struct net_device *dev; | 242 | struct net_device *dev; |
252 | int i, last_ret, last_fn; | 243 | int i, ret; |
253 | int ioaddr; | 244 | int ioaddr; |
254 | 245 | ||
255 | info = link->priv; | 246 | info = link->priv; |
256 | dev = info->dev; | 247 | dev = info->dev; |
257 | 248 | ||
258 | DEBUG(1,"config...\n"); | 249 | dev_dbg(&link->dev, "config...\n"); |
259 | 250 | ||
260 | DEBUG(0, "com20020_config(0x%p)\n", link); | 251 | dev_dbg(&link->dev, "com20020_config\n"); |
261 | 252 | ||
262 | DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1); | 253 | dev_dbg(&link->dev, "baseport1 is %Xh\n", link->io.BasePort1); |
263 | i = -ENODEV; | 254 | i = -ENODEV; |
264 | if (!link->io.BasePort1) | 255 | if (!link->io.BasePort1) |
265 | { | 256 | { |
@@ -276,26 +267,27 @@ static int com20020_config(struct pcmcia_device *link) | |||
276 | 267 | ||
277 | if (i != 0) | 268 | if (i != 0) |
278 | { | 269 | { |
279 | DEBUG(1,"arcnet: requestIO failed totally!\n"); | 270 | dev_dbg(&link->dev, "requestIO failed totally!\n"); |
280 | goto failed; | 271 | goto failed; |
281 | } | 272 | } |
282 | 273 | ||
283 | ioaddr = dev->base_addr = link->io.BasePort1; | 274 | ioaddr = dev->base_addr = link->io.BasePort1; |
284 | DEBUG(1,"arcnet: got ioaddr %Xh\n", ioaddr); | 275 | dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr); |
285 | 276 | ||
286 | DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n", | 277 | dev_dbg(&link->dev, "request IRQ %d\n", |
287 | link->irq.AssignedIRQ, | 278 | link->irq.AssignedIRQ); |
288 | link->irq.IRQInfo1, link->irq.IRQInfo2); | ||
289 | i = pcmcia_request_irq(link, &link->irq); | 279 | i = pcmcia_request_irq(link, &link->irq); |
290 | if (i != 0) | 280 | if (i != 0) |
291 | { | 281 | { |
292 | DEBUG(1,"arcnet: requestIRQ failed totally!\n"); | 282 | dev_dbg(&link->dev, "requestIRQ failed totally!\n"); |
293 | goto failed; | 283 | goto failed; |
294 | } | 284 | } |
295 | 285 | ||
296 | dev->irq = link->irq.AssignedIRQ; | 286 | dev->irq = link->irq.AssignedIRQ; |
297 | 287 | ||
298 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 288 | ret = pcmcia_request_configuration(link, &link->conf); |
289 | if (ret) | ||
290 | goto failed; | ||
299 | 291 | ||
300 | if (com20020_check(dev)) | 292 | if (com20020_check(dev)) |
301 | { | 293 | { |
@@ -308,26 +300,25 @@ static int com20020_config(struct pcmcia_device *link) | |||
308 | lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ | 300 | lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ |
309 | 301 | ||
310 | link->dev_node = &info->node; | 302 | link->dev_node = &info->node; |
311 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 303 | SET_NETDEV_DEV(dev, &link->dev); |
312 | 304 | ||
313 | i = com20020_found(dev, 0); /* calls register_netdev */ | 305 | i = com20020_found(dev, 0); /* calls register_netdev */ |
314 | 306 | ||
315 | if (i != 0) { | 307 | if (i != 0) { |
316 | DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n"); | 308 | dev_printk(KERN_NOTICE, &link->dev, |
309 | "com20020_cs: com20020_found() failed\n"); | ||
317 | link->dev_node = NULL; | 310 | link->dev_node = NULL; |
318 | goto failed; | 311 | goto failed; |
319 | } | 312 | } |
320 | 313 | ||
321 | strcpy(info->node.dev_name, dev->name); | 314 | strcpy(info->node.dev_name, dev->name); |
322 | 315 | ||
323 | DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n", | 316 | dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n", |
324 | dev->name, dev->base_addr, dev->irq); | 317 | dev->name, dev->base_addr, dev->irq); |
325 | return 0; | 318 | return 0; |
326 | 319 | ||
327 | cs_failed: | ||
328 | cs_error(link, last_fn, last_ret); | ||
329 | failed: | 320 | failed: |
330 | DEBUG(1,"com20020_config failed...\n"); | 321 | dev_dbg(&link->dev, "com20020_config failed...\n"); |
331 | com20020_release(link); | 322 | com20020_release(link); |
332 | return -ENODEV; | 323 | return -ENODEV; |
333 | } /* com20020_config */ | 324 | } /* com20020_config */ |
@@ -342,7 +333,7 @@ failed: | |||
342 | 333 | ||
343 | static void com20020_release(struct pcmcia_device *link) | 334 | static void com20020_release(struct pcmcia_device *link) |
344 | { | 335 | { |
345 | DEBUG(0, "com20020_release(0x%p)\n", link); | 336 | dev_dbg(&link->dev, "com20020_release\n"); |
346 | pcmcia_disable_device(link); | 337 | pcmcia_disable_device(link); |
347 | } | 338 | } |
348 | 339 | ||
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 7e01fbdb87e0..6e3e1ced6db4 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -72,13 +72,6 @@ MODULE_LICENSE("GPL"); | |||
72 | /* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ | 72 | /* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ |
73 | INT_MODULE_PARM(sram_config, 0); | 73 | INT_MODULE_PARM(sram_config, 0); |
74 | 74 | ||
75 | #ifdef PCMCIA_DEBUG | ||
76 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
77 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
78 | static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23"; | ||
79 | #else | ||
80 | #define DEBUG(n, args...) | ||
81 | #endif | ||
82 | 75 | ||
83 | /*====================================================================*/ | 76 | /*====================================================================*/ |
84 | /* | 77 | /* |
@@ -245,7 +238,7 @@ static int fmvj18x_probe(struct pcmcia_device *link) | |||
245 | local_info_t *lp; | 238 | local_info_t *lp; |
246 | struct net_device *dev; | 239 | struct net_device *dev; |
247 | 240 | ||
248 | DEBUG(0, "fmvj18x_attach()\n"); | 241 | dev_dbg(&link->dev, "fmvj18x_attach()\n"); |
249 | 242 | ||
250 | /* Make up a FMVJ18x specific data structure */ | 243 | /* Make up a FMVJ18x specific data structure */ |
251 | dev = alloc_etherdev(sizeof(local_info_t)); | 244 | dev = alloc_etherdev(sizeof(local_info_t)); |
@@ -262,10 +255,8 @@ static int fmvj18x_probe(struct pcmcia_device *link) | |||
262 | link->io.IOAddrLines = 5; | 255 | link->io.IOAddrLines = 5; |
263 | 256 | ||
264 | /* Interrupt setup */ | 257 | /* Interrupt setup */ |
265 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; | 258 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
266 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
267 | link->irq.Handler = &fjn_interrupt; | 259 | link->irq.Handler = &fjn_interrupt; |
268 | link->irq.Instance = dev; | ||
269 | 260 | ||
270 | /* General socket configuration */ | 261 | /* General socket configuration */ |
271 | link->conf.Attributes = CONF_ENABLE_IRQ; | 262 | link->conf.Attributes = CONF_ENABLE_IRQ; |
@@ -285,7 +276,7 @@ static void fmvj18x_detach(struct pcmcia_device *link) | |||
285 | { | 276 | { |
286 | struct net_device *dev = link->priv; | 277 | struct net_device *dev = link->priv; |
287 | 278 | ||
288 | DEBUG(0, "fmvj18x_detach(0x%p)\n", link); | 279 | dev_dbg(&link->dev, "fmvj18x_detach\n"); |
289 | 280 | ||
290 | if (link->dev_node) | 281 | if (link->dev_node) |
291 | unregister_netdev(dev); | 282 | unregister_netdev(dev); |
@@ -297,9 +288,6 @@ static void fmvj18x_detach(struct pcmcia_device *link) | |||
297 | 288 | ||
298 | /*====================================================================*/ | 289 | /*====================================================================*/ |
299 | 290 | ||
300 | #define CS_CHECK(fn, ret) \ | ||
301 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
302 | |||
303 | static int mfc_try_io_port(struct pcmcia_device *link) | 291 | static int mfc_try_io_port(struct pcmcia_device *link) |
304 | { | 292 | { |
305 | int i, ret; | 293 | int i, ret; |
@@ -341,33 +329,38 @@ static int ungermann_try_io_port(struct pcmcia_device *link) | |||
341 | return ret; /* RequestIO failed */ | 329 | return ret; /* RequestIO failed */ |
342 | } | 330 | } |
343 | 331 | ||
332 | static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, | ||
333 | cistpl_cftable_entry_t *cfg, | ||
334 | cistpl_cftable_entry_t *dflt, | ||
335 | unsigned int vcc, | ||
336 | void *priv_data) | ||
337 | { | ||
338 | return 0; /* strange, but that's what the code did already before... */ | ||
339 | } | ||
340 | |||
344 | static int fmvj18x_config(struct pcmcia_device *link) | 341 | static int fmvj18x_config(struct pcmcia_device *link) |
345 | { | 342 | { |
346 | struct net_device *dev = link->priv; | 343 | struct net_device *dev = link->priv; |
347 | local_info_t *lp = netdev_priv(dev); | 344 | local_info_t *lp = netdev_priv(dev); |
348 | tuple_t tuple; | 345 | int i, ret; |
349 | cisparse_t parse; | ||
350 | u_short buf[32]; | ||
351 | int i, last_fn = 0, last_ret = 0, ret; | ||
352 | unsigned int ioaddr; | 346 | unsigned int ioaddr; |
353 | cardtype_t cardtype; | 347 | cardtype_t cardtype; |
354 | char *card_name = "unknown"; | 348 | char *card_name = "unknown"; |
355 | u_char *node_id; | 349 | u8 *buf; |
350 | size_t len; | ||
351 | u_char buggybuf[32]; | ||
352 | |||
353 | dev_dbg(&link->dev, "fmvj18x_config\n"); | ||
356 | 354 | ||
357 | DEBUG(0, "fmvj18x_config(0x%p)\n", link); | 355 | len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); |
356 | kfree(buf); | ||
358 | 357 | ||
359 | tuple.TupleData = (u_char *)buf; | 358 | if (len) { |
360 | tuple.TupleDataMax = 64; | ||
361 | tuple.TupleOffset = 0; | ||
362 | tuple.DesiredTuple = CISTPL_FUNCE; | ||
363 | tuple.TupleOffset = 0; | ||
364 | if (pcmcia_get_first_tuple(link, &tuple) == 0) { | ||
365 | /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ | 359 | /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ |
366 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | 360 | ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL); |
367 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); | 361 | if (ret != 0) |
368 | CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); | 362 | goto failed; |
369 | CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse)); | 363 | |
370 | link->conf.ConfigIndex = parse.cftable_entry.index; | ||
371 | switch (link->manf_id) { | 364 | switch (link->manf_id) { |
372 | case MANFID_TDK: | 365 | case MANFID_TDK: |
373 | cardtype = TDK; | 366 | cardtype = TDK; |
@@ -433,17 +426,24 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
433 | 426 | ||
434 | if (link->io.NumPorts2 != 0) { | 427 | if (link->io.NumPorts2 != 0) { |
435 | link->irq.Attributes = | 428 | link->irq.Attributes = |
436 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; | 429 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
437 | ret = mfc_try_io_port(link); | 430 | ret = mfc_try_io_port(link); |
438 | if (ret != 0) goto cs_failed; | 431 | if (ret != 0) goto failed; |
439 | } else if (cardtype == UNGERMANN) { | 432 | } else if (cardtype == UNGERMANN) { |
440 | ret = ungermann_try_io_port(link); | 433 | ret = ungermann_try_io_port(link); |
441 | if (ret != 0) goto cs_failed; | 434 | if (ret != 0) goto failed; |
442 | } else { | 435 | } else { |
443 | CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); | 436 | ret = pcmcia_request_io(link, &link->io); |
437 | if (ret) | ||
438 | goto failed; | ||
444 | } | 439 | } |
445 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 440 | ret = pcmcia_request_irq(link, &link->irq); |
446 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 441 | if (ret) |
442 | goto failed; | ||
443 | ret = pcmcia_request_configuration(link, &link->conf); | ||
444 | if (ret) | ||
445 | goto failed; | ||
446 | |||
447 | dev->irq = link->irq.AssignedIRQ; | 447 | dev->irq = link->irq.AssignedIRQ; |
448 | dev->base_addr = link->io.BasePort1; | 448 | dev->base_addr = link->io.BasePort1; |
449 | 449 | ||
@@ -474,21 +474,21 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
474 | case CONTEC: | 474 | case CONTEC: |
475 | case NEC: | 475 | case NEC: |
476 | case KME: | 476 | case KME: |
477 | tuple.DesiredTuple = CISTPL_FUNCE; | ||
478 | tuple.TupleOffset = 0; | ||
479 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); | ||
480 | tuple.TupleOffset = 0; | ||
481 | CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); | ||
482 | if (cardtype == MBH10304) { | 477 | if (cardtype == MBH10304) { |
483 | /* MBH10304's CIS_FUNCE is corrupted */ | ||
484 | node_id = &(tuple.TupleData[5]); | ||
485 | card_name = "FMV-J182"; | 478 | card_name = "FMV-J182"; |
486 | } else { | 479 | |
487 | while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) { | 480 | len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); |
488 | CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple)); | 481 | if (len < 11) { |
489 | CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); | 482 | kfree(buf); |
483 | goto failed; | ||
490 | } | 484 | } |
491 | node_id = &(tuple.TupleData[2]); | 485 | /* Read MACID from CIS */ |
486 | for (i = 5; i < 11; i++) | ||
487 | dev->dev_addr[i] = buf[i]; | ||
488 | kfree(buf); | ||
489 | } else { | ||
490 | if (pcmcia_get_mac_from_cis(link, dev)) | ||
491 | goto failed; | ||
492 | if( cardtype == TDK ) { | 492 | if( cardtype == TDK ) { |
493 | card_name = "TDK LAK-CD021"; | 493 | card_name = "TDK LAK-CD021"; |
494 | } else if( cardtype == LA501 ) { | 494 | } else if( cardtype == LA501 ) { |
@@ -501,9 +501,6 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
501 | card_name = "C-NET(PC)C"; | 501 | card_name = "C-NET(PC)C"; |
502 | } | 502 | } |
503 | } | 503 | } |
504 | /* Read MACID from CIS */ | ||
505 | for (i = 0; i < 6; i++) | ||
506 | dev->dev_addr[i] = node_id[i]; | ||
507 | break; | 504 | break; |
508 | case UNGERMANN: | 505 | case UNGERMANN: |
509 | /* Read MACID from register */ | 506 | /* Read MACID from register */ |
@@ -513,12 +510,12 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
513 | break; | 510 | break; |
514 | case XXX10304: | 511 | case XXX10304: |
515 | /* Read MACID from Buggy CIS */ | 512 | /* Read MACID from Buggy CIS */ |
516 | if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) { | 513 | if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { |
517 | printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); | 514 | printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); |
518 | goto failed; | 515 | goto failed; |
519 | } | 516 | } |
520 | for (i = 0 ; i < 6; i++) { | 517 | for (i = 0 ; i < 6; i++) { |
521 | dev->dev_addr[i] = tuple.TupleData[i]; | 518 | dev->dev_addr[i] = buggybuf[i]; |
522 | } | 519 | } |
523 | card_name = "FMV-J182"; | 520 | card_name = "FMV-J182"; |
524 | break; | 521 | break; |
@@ -533,7 +530,7 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
533 | 530 | ||
534 | lp->cardtype = cardtype; | 531 | lp->cardtype = cardtype; |
535 | link->dev_node = &lp->node; | 532 | link->dev_node = &lp->node; |
536 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 533 | SET_NETDEV_DEV(dev, &link->dev); |
537 | 534 | ||
538 | if (register_netdev(dev) != 0) { | 535 | if (register_netdev(dev) != 0) { |
539 | printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); | 536 | printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); |
@@ -551,9 +548,6 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
551 | 548 | ||
552 | return 0; | 549 | return 0; |
553 | 550 | ||
554 | cs_failed: | ||
555 | /* All Card Services errors end up here */ | ||
556 | cs_error(link, last_fn, last_ret); | ||
557 | failed: | 551 | failed: |
558 | fmvj18x_release(link); | 552 | fmvj18x_release(link); |
559 | return -ENODEV; | 553 | return -ENODEV; |
@@ -571,16 +565,14 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) | |||
571 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 565 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
572 | req.Base = 0; req.Size = 0; | 566 | req.Base = 0; req.Size = 0; |
573 | req.AccessSpeed = 0; | 567 | req.AccessSpeed = 0; |
574 | i = pcmcia_request_window(&link, &req, &link->win); | 568 | i = pcmcia_request_window(link, &req, &link->win); |
575 | if (i != 0) { | 569 | if (i != 0) |
576 | cs_error(link, RequestWindow, i); | ||
577 | return -1; | 570 | return -1; |
578 | } | ||
579 | 571 | ||
580 | base = ioremap(req.Base, req.Size); | 572 | base = ioremap(req.Base, req.Size); |
581 | mem.Page = 0; | 573 | mem.Page = 0; |
582 | mem.CardOffset = 0; | 574 | mem.CardOffset = 0; |
583 | pcmcia_map_mem_page(link->win, &mem); | 575 | pcmcia_map_mem_page(link, link->win, &mem); |
584 | 576 | ||
585 | /* | 577 | /* |
586 | * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format | 578 | * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format |
@@ -605,9 +597,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) | |||
605 | } | 597 | } |
606 | 598 | ||
607 | iounmap(base); | 599 | iounmap(base); |
608 | j = pcmcia_release_window(link->win); | 600 | j = pcmcia_release_window(link, link->win); |
609 | if (j != 0) | ||
610 | cs_error(link, ReleaseWindow, j); | ||
611 | return (i != 0x200) ? 0 : -1; | 601 | return (i != 0x200) ? 0 : -1; |
612 | 602 | ||
613 | } /* fmvj18x_get_hwinfo */ | 603 | } /* fmvj18x_get_hwinfo */ |
@@ -626,11 +616,9 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link) | |||
626 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 616 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
627 | req.Base = 0; req.Size = 0; | 617 | req.Base = 0; req.Size = 0; |
628 | req.AccessSpeed = 0; | 618 | req.AccessSpeed = 0; |
629 | i = pcmcia_request_window(&link, &req, &link->win); | 619 | i = pcmcia_request_window(link, &req, &link->win); |
630 | if (i != 0) { | 620 | if (i != 0) |
631 | cs_error(link, RequestWindow, i); | ||
632 | return -1; | 621 | return -1; |
633 | } | ||
634 | 622 | ||
635 | lp->base = ioremap(req.Base, req.Size); | 623 | lp->base = ioremap(req.Base, req.Size); |
636 | if (lp->base == NULL) { | 624 | if (lp->base == NULL) { |
@@ -640,11 +628,10 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link) | |||
640 | 628 | ||
641 | mem.Page = 0; | 629 | mem.Page = 0; |
642 | mem.CardOffset = 0; | 630 | mem.CardOffset = 0; |
643 | i = pcmcia_map_mem_page(link->win, &mem); | 631 | i = pcmcia_map_mem_page(link, link->win, &mem); |
644 | if (i != 0) { | 632 | if (i != 0) { |
645 | iounmap(lp->base); | 633 | iounmap(lp->base); |
646 | lp->base = NULL; | 634 | lp->base = NULL; |
647 | cs_error(link, MapMemPage, i); | ||
648 | return -1; | 635 | return -1; |
649 | } | 636 | } |
650 | 637 | ||
@@ -671,15 +658,13 @@ static void fmvj18x_release(struct pcmcia_device *link) | |||
671 | u_char __iomem *tmp; | 658 | u_char __iomem *tmp; |
672 | int j; | 659 | int j; |
673 | 660 | ||
674 | DEBUG(0, "fmvj18x_release(0x%p)\n", link); | 661 | dev_dbg(&link->dev, "fmvj18x_release\n"); |
675 | 662 | ||
676 | if (lp->base != NULL) { | 663 | if (lp->base != NULL) { |
677 | tmp = lp->base; | 664 | tmp = lp->base; |
678 | lp->base = NULL; /* set NULL before iounmap */ | 665 | lp->base = NULL; /* set NULL before iounmap */ |
679 | iounmap(tmp); | 666 | iounmap(tmp); |
680 | j = pcmcia_release_window(link->win); | 667 | j = pcmcia_release_window(link, link->win); |
681 | if (j != 0) | ||
682 | cs_error(link, ReleaseWindow, j); | ||
683 | } | 668 | } |
684 | 669 | ||
685 | pcmcia_disable_device(link); | 670 | pcmcia_disable_device(link); |
@@ -788,8 +773,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) | |||
788 | outb(tx_stat, ioaddr + TX_STATUS); | 773 | outb(tx_stat, ioaddr + TX_STATUS); |
789 | outb(rx_stat, ioaddr + RX_STATUS); | 774 | outb(rx_stat, ioaddr + RX_STATUS); |
790 | 775 | ||
791 | DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); | 776 | pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); |
792 | DEBUG(4, " tx_status %02x.\n", tx_stat); | 777 | pr_debug(" tx_status %02x.\n", tx_stat); |
793 | 778 | ||
794 | if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { | 779 | if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { |
795 | /* there is packet(s) in rx buffer */ | 780 | /* there is packet(s) in rx buffer */ |
@@ -809,8 +794,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) | |||
809 | } | 794 | } |
810 | netif_wake_queue(dev); | 795 | netif_wake_queue(dev); |
811 | } | 796 | } |
812 | DEBUG(4, "%s: exiting interrupt,\n", dev->name); | 797 | pr_debug("%s: exiting interrupt,\n", dev->name); |
813 | DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); | 798 | pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); |
814 | 799 | ||
815 | outb(D_TX_INTR, ioaddr + TX_INTR); | 800 | outb(D_TX_INTR, ioaddr + TX_INTR); |
816 | outb(D_RX_INTR, ioaddr + RX_INTR); | 801 | outb(D_RX_INTR, ioaddr + RX_INTR); |
@@ -882,7 +867,7 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, | |||
882 | return NETDEV_TX_BUSY; | 867 | return NETDEV_TX_BUSY; |
883 | } | 868 | } |
884 | 869 | ||
885 | DEBUG(4, "%s: Transmitting a packet of length %lu.\n", | 870 | pr_debug("%s: Transmitting a packet of length %lu.\n", |
886 | dev->name, (unsigned long)skb->len); | 871 | dev->name, (unsigned long)skb->len); |
887 | dev->stats.tx_bytes += skb->len; | 872 | dev->stats.tx_bytes += skb->len; |
888 | 873 | ||
@@ -937,7 +922,7 @@ static void fjn_reset(struct net_device *dev) | |||
937 | unsigned int ioaddr = dev->base_addr; | 922 | unsigned int ioaddr = dev->base_addr; |
938 | int i; | 923 | int i; |
939 | 924 | ||
940 | DEBUG(4, "fjn_reset(%s) called.\n",dev->name); | 925 | pr_debug("fjn_reset(%s) called.\n",dev->name); |
941 | 926 | ||
942 | /* Reset controller */ | 927 | /* Reset controller */ |
943 | if( sram_config == 0 ) | 928 | if( sram_config == 0 ) |
@@ -1015,13 +1000,13 @@ static void fjn_rx(struct net_device *dev) | |||
1015 | unsigned int ioaddr = dev->base_addr; | 1000 | unsigned int ioaddr = dev->base_addr; |
1016 | int boguscount = 10; /* 5 -> 10: by agy 19940922 */ | 1001 | int boguscount = 10; /* 5 -> 10: by agy 19940922 */ |
1017 | 1002 | ||
1018 | DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n", | 1003 | pr_debug("%s: in rx_packet(), rx_status %02x.\n", |
1019 | dev->name, inb(ioaddr + RX_STATUS)); | 1004 | dev->name, inb(ioaddr + RX_STATUS)); |
1020 | 1005 | ||
1021 | while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { | 1006 | while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { |
1022 | u_short status = inw(ioaddr + DATAPORT); | 1007 | u_short status = inw(ioaddr + DATAPORT); |
1023 | 1008 | ||
1024 | DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n", | 1009 | pr_debug("%s: Rxing packet mode %02x status %04x.\n", |
1025 | dev->name, inb(ioaddr + RX_MODE), status); | 1010 | dev->name, inb(ioaddr + RX_MODE), status); |
1026 | #ifndef final_version | 1011 | #ifndef final_version |
1027 | if (status == 0) { | 1012 | if (status == 0) { |
@@ -1061,16 +1046,14 @@ static void fjn_rx(struct net_device *dev) | |||
1061 | (pkt_len + 1) >> 1); | 1046 | (pkt_len + 1) >> 1); |
1062 | skb->protocol = eth_type_trans(skb, dev); | 1047 | skb->protocol = eth_type_trans(skb, dev); |
1063 | 1048 | ||
1064 | #ifdef PCMCIA_DEBUG | 1049 | { |
1065 | if (pc_debug > 5) { | ||
1066 | int i; | 1050 | int i; |
1067 | printk(KERN_DEBUG "%s: Rxed packet of length %d: ", | 1051 | pr_debug("%s: Rxed packet of length %d: ", |
1068 | dev->name, pkt_len); | 1052 | dev->name, pkt_len); |
1069 | for (i = 0; i < 14; i++) | 1053 | for (i = 0; i < 14; i++) |
1070 | printk(" %02x", skb->data[i]); | 1054 | pr_debug(" %02x", skb->data[i]); |
1071 | printk(".\n"); | 1055 | pr_debug(".\n"); |
1072 | } | 1056 | } |
1073 | #endif | ||
1074 | 1057 | ||
1075 | netif_rx(skb); | 1058 | netif_rx(skb); |
1076 | dev->stats.rx_packets++; | 1059 | dev->stats.rx_packets++; |
@@ -1094,7 +1077,7 @@ static void fjn_rx(struct net_device *dev) | |||
1094 | } | 1077 | } |
1095 | 1078 | ||
1096 | if (i > 0) | 1079 | if (i > 0) |
1097 | DEBUG(5, "%s: Exint Rx packet with mode %02x after " | 1080 | pr_debug("%s: Exint Rx packet with mode %02x after " |
1098 | "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); | 1081 | "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); |
1099 | } | 1082 | } |
1100 | */ | 1083 | */ |
@@ -1112,24 +1095,8 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
1112 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); | 1095 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); |
1113 | } | 1096 | } |
1114 | 1097 | ||
1115 | #ifdef PCMCIA_DEBUG | ||
1116 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
1117 | { | ||
1118 | return pc_debug; | ||
1119 | } | ||
1120 | |||
1121 | static void netdev_set_msglevel(struct net_device *dev, u32 level) | ||
1122 | { | ||
1123 | pc_debug = level; | ||
1124 | } | ||
1125 | #endif /* PCMCIA_DEBUG */ | ||
1126 | |||
1127 | static const struct ethtool_ops netdev_ethtool_ops = { | 1098 | static const struct ethtool_ops netdev_ethtool_ops = { |
1128 | .get_drvinfo = netdev_get_drvinfo, | 1099 | .get_drvinfo = netdev_get_drvinfo, |
1129 | #ifdef PCMCIA_DEBUG | ||
1130 | .get_msglevel = netdev_get_msglevel, | ||
1131 | .set_msglevel = netdev_set_msglevel, | ||
1132 | #endif /* PCMCIA_DEBUG */ | ||
1133 | }; | 1100 | }; |
1134 | 1101 | ||
1135 | static int fjn_config(struct net_device *dev, struct ifmap *map){ | 1102 | static int fjn_config(struct net_device *dev, struct ifmap *map){ |
@@ -1141,7 +1108,7 @@ static int fjn_open(struct net_device *dev) | |||
1141 | struct local_info_t *lp = netdev_priv(dev); | 1108 | struct local_info_t *lp = netdev_priv(dev); |
1142 | struct pcmcia_device *link = lp->p_dev; | 1109 | struct pcmcia_device *link = lp->p_dev; |
1143 | 1110 | ||
1144 | DEBUG(4, "fjn_open('%s').\n", dev->name); | 1111 | pr_debug("fjn_open('%s').\n", dev->name); |
1145 | 1112 | ||
1146 | if (!pcmcia_dev_present(link)) | 1113 | if (!pcmcia_dev_present(link)) |
1147 | return -ENODEV; | 1114 | return -ENODEV; |
@@ -1167,7 +1134,7 @@ static int fjn_close(struct net_device *dev) | |||
1167 | struct pcmcia_device *link = lp->p_dev; | 1134 | struct pcmcia_device *link = lp->p_dev; |
1168 | unsigned int ioaddr = dev->base_addr; | 1135 | unsigned int ioaddr = dev->base_addr; |
1169 | 1136 | ||
1170 | DEBUG(4, "fjn_close('%s').\n", dev->name); | 1137 | pr_debug("fjn_close('%s').\n", dev->name); |
1171 | 1138 | ||
1172 | lp->open_time = 0; | 1139 | lp->open_time = 0; |
1173 | netif_stop_queue(dev); | 1140 | netif_stop_queue(dev); |
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c index 06618af1a468..37f4a6fdc3ef 100644 --- a/drivers/net/pcmcia/ibmtr_cs.c +++ b/drivers/net/pcmcia/ibmtr_cs.c | |||
@@ -69,17 +69,6 @@ | |||
69 | #define PCMCIA | 69 | #define PCMCIA |
70 | #include "../tokenring/ibmtr.c" | 70 | #include "../tokenring/ibmtr.c" |
71 | 71 | ||
72 | #ifdef PCMCIA_DEBUG | ||
73 | static int pc_debug = PCMCIA_DEBUG; | ||
74 | module_param(pc_debug, int, 0); | ||
75 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
76 | static char *version = | ||
77 | "ibmtr_cs.c 1.10 1996/01/06 05:19:00 (Steve Kipisz)\n" | ||
78 | " 2.2.7 1999/05/03 12:00:00 (Mike Phillips)\n" | ||
79 | " 2.4.2 2001/30/28 Midnight (Burt Silverman)\n"; | ||
80 | #else | ||
81 | #define DEBUG(n, args...) | ||
82 | #endif | ||
83 | 72 | ||
84 | /*====================================================================*/ | 73 | /*====================================================================*/ |
85 | 74 | ||
@@ -130,6 +119,12 @@ static const struct ethtool_ops netdev_ethtool_ops = { | |||
130 | .get_drvinfo = netdev_get_drvinfo, | 119 | .get_drvinfo = netdev_get_drvinfo, |
131 | }; | 120 | }; |
132 | 121 | ||
122 | static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { | ||
123 | ibmtr_dev_t *info = dev_id; | ||
124 | struct net_device *dev = info->dev; | ||
125 | return tok_interrupt(irq, dev); | ||
126 | }; | ||
127 | |||
133 | /*====================================================================== | 128 | /*====================================================================== |
134 | 129 | ||
135 | ibmtr_attach() creates an "instance" of the driver, allocating | 130 | ibmtr_attach() creates an "instance" of the driver, allocating |
@@ -143,7 +138,7 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) | |||
143 | ibmtr_dev_t *info; | 138 | ibmtr_dev_t *info; |
144 | struct net_device *dev; | 139 | struct net_device *dev; |
145 | 140 | ||
146 | DEBUG(0, "ibmtr_attach()\n"); | 141 | dev_dbg(&link->dev, "ibmtr_attach()\n"); |
147 | 142 | ||
148 | /* Create new token-ring device */ | 143 | /* Create new token-ring device */ |
149 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 144 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -161,14 +156,13 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) | |||
161 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 156 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
162 | link->io.NumPorts1 = 4; | 157 | link->io.NumPorts1 = 4; |
163 | link->io.IOAddrLines = 16; | 158 | link->io.IOAddrLines = 16; |
164 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 159 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
165 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 160 | link->irq.Handler = ibmtr_interrupt; |
166 | link->irq.Handler = &tok_interrupt; | ||
167 | link->conf.Attributes = CONF_ENABLE_IRQ; | 161 | link->conf.Attributes = CONF_ENABLE_IRQ; |
168 | link->conf.IntType = INT_MEMORY_AND_IO; | 162 | link->conf.IntType = INT_MEMORY_AND_IO; |
169 | link->conf.Present = PRESENT_OPTION; | 163 | link->conf.Present = PRESENT_OPTION; |
170 | 164 | ||
171 | link->irq.Instance = info->dev = dev; | 165 | info->dev = dev; |
172 | 166 | ||
173 | SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); | 167 | SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); |
174 | 168 | ||
@@ -190,7 +184,7 @@ static void ibmtr_detach(struct pcmcia_device *link) | |||
190 | struct net_device *dev = info->dev; | 184 | struct net_device *dev = info->dev; |
191 | struct tok_info *ti = netdev_priv(dev); | 185 | struct tok_info *ti = netdev_priv(dev); |
192 | 186 | ||
193 | DEBUG(0, "ibmtr_detach(0x%p)\n", link); | 187 | dev_dbg(&link->dev, "ibmtr_detach\n"); |
194 | 188 | ||
195 | /* | 189 | /* |
196 | * When the card removal interrupt hits tok_interrupt(), | 190 | * When the card removal interrupt hits tok_interrupt(), |
@@ -217,9 +211,6 @@ static void ibmtr_detach(struct pcmcia_device *link) | |||
217 | 211 | ||
218 | ======================================================================*/ | 212 | ======================================================================*/ |
219 | 213 | ||
220 | #define CS_CHECK(fn, ret) \ | ||
221 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
222 | |||
223 | static int __devinit ibmtr_config(struct pcmcia_device *link) | 214 | static int __devinit ibmtr_config(struct pcmcia_device *link) |
224 | { | 215 | { |
225 | ibmtr_dev_t *info = link->priv; | 216 | ibmtr_dev_t *info = link->priv; |
@@ -227,9 +218,9 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
227 | struct tok_info *ti = netdev_priv(dev); | 218 | struct tok_info *ti = netdev_priv(dev); |
228 | win_req_t req; | 219 | win_req_t req; |
229 | memreq_t mem; | 220 | memreq_t mem; |
230 | int i, last_ret, last_fn; | 221 | int i, ret; |
231 | 222 | ||
232 | DEBUG(0, "ibmtr_config(0x%p)\n", link); | 223 | dev_dbg(&link->dev, "ibmtr_config\n"); |
233 | 224 | ||
234 | link->conf.ConfigIndex = 0x61; | 225 | link->conf.ConfigIndex = 0x61; |
235 | 226 | ||
@@ -241,11 +232,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
241 | if (i != 0) { | 232 | if (i != 0) { |
242 | /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ | 233 | /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ |
243 | link->io.BasePort1 = 0xA24; | 234 | link->io.BasePort1 = 0xA24; |
244 | CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); | 235 | ret = pcmcia_request_io(link, &link->io); |
236 | if (ret) | ||
237 | goto failed; | ||
245 | } | 238 | } |
246 | dev->base_addr = link->io.BasePort1; | 239 | dev->base_addr = link->io.BasePort1; |
247 | 240 | ||
248 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 241 | ret = pcmcia_request_irq(link, &link->irq); |
242 | if (ret) | ||
243 | goto failed; | ||
249 | dev->irq = link->irq.AssignedIRQ; | 244 | dev->irq = link->irq.AssignedIRQ; |
250 | ti->irq = link->irq.AssignedIRQ; | 245 | ti->irq = link->irq.AssignedIRQ; |
251 | ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); | 246 | ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); |
@@ -256,11 +251,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
256 | req.Base = 0; | 251 | req.Base = 0; |
257 | req.Size = 0x2000; | 252 | req.Size = 0x2000; |
258 | req.AccessSpeed = 250; | 253 | req.AccessSpeed = 250; |
259 | CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); | 254 | ret = pcmcia_request_window(link, &req, &link->win); |
255 | if (ret) | ||
256 | goto failed; | ||
260 | 257 | ||
261 | mem.CardOffset = mmiobase; | 258 | mem.CardOffset = mmiobase; |
262 | mem.Page = 0; | 259 | mem.Page = 0; |
263 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); | 260 | ret = pcmcia_map_mem_page(link, link->win, &mem); |
261 | if (ret) | ||
262 | goto failed; | ||
264 | ti->mmio = ioremap(req.Base, req.Size); | 263 | ti->mmio = ioremap(req.Base, req.Size); |
265 | 264 | ||
266 | /* Allocate the SRAM memory window */ | 265 | /* Allocate the SRAM memory window */ |
@@ -269,17 +268,23 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
269 | req.Base = 0; | 268 | req.Base = 0; |
270 | req.Size = sramsize * 1024; | 269 | req.Size = sramsize * 1024; |
271 | req.AccessSpeed = 250; | 270 | req.AccessSpeed = 250; |
272 | CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &info->sram_win_handle)); | 271 | ret = pcmcia_request_window(link, &req, &info->sram_win_handle); |
272 | if (ret) | ||
273 | goto failed; | ||
273 | 274 | ||
274 | mem.CardOffset = srambase; | 275 | mem.CardOffset = srambase; |
275 | mem.Page = 0; | 276 | mem.Page = 0; |
276 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(info->sram_win_handle, &mem)); | 277 | ret = pcmcia_map_mem_page(link, info->sram_win_handle, &mem); |
278 | if (ret) | ||
279 | goto failed; | ||
277 | 280 | ||
278 | ti->sram_base = mem.CardOffset >> 12; | 281 | ti->sram_base = mem.CardOffset >> 12; |
279 | ti->sram_virt = ioremap(req.Base, req.Size); | 282 | ti->sram_virt = ioremap(req.Base, req.Size); |
280 | ti->sram_phys = req.Base; | 283 | ti->sram_phys = req.Base; |
281 | 284 | ||
282 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 285 | ret = pcmcia_request_configuration(link, &link->conf); |
286 | if (ret) | ||
287 | goto failed; | ||
283 | 288 | ||
284 | /* Set up the Token-Ring Controller Configuration Register and | 289 | /* Set up the Token-Ring Controller Configuration Register and |
285 | turn on the card. Check the "Local Area Network Credit Card | 290 | turn on the card. Check the "Local Area Network Credit Card |
@@ -287,7 +292,7 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
287 | ibmtr_hw_setup(dev, mmiobase); | 292 | ibmtr_hw_setup(dev, mmiobase); |
288 | 293 | ||
289 | link->dev_node = &info->node; | 294 | link->dev_node = &info->node; |
290 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 295 | SET_NETDEV_DEV(dev, &link->dev); |
291 | 296 | ||
292 | i = ibmtr_probe_card(dev); | 297 | i = ibmtr_probe_card(dev); |
293 | if (i != 0) { | 298 | if (i != 0) { |
@@ -305,8 +310,6 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) | |||
305 | dev->dev_addr); | 310 | dev->dev_addr); |
306 | return 0; | 311 | return 0; |
307 | 312 | ||
308 | cs_failed: | ||
309 | cs_error(link, last_fn, last_ret); | ||
310 | failed: | 313 | failed: |
311 | ibmtr_release(link); | 314 | ibmtr_release(link); |
312 | return -ENODEV; | 315 | return -ENODEV; |
@@ -325,12 +328,12 @@ static void ibmtr_release(struct pcmcia_device *link) | |||
325 | ibmtr_dev_t *info = link->priv; | 328 | ibmtr_dev_t *info = link->priv; |
326 | struct net_device *dev = info->dev; | 329 | struct net_device *dev = info->dev; |
327 | 330 | ||
328 | DEBUG(0, "ibmtr_release(0x%p)\n", link); | 331 | dev_dbg(&link->dev, "ibmtr_release\n"); |
329 | 332 | ||
330 | if (link->win) { | 333 | if (link->win) { |
331 | struct tok_info *ti = netdev_priv(dev); | 334 | struct tok_info *ti = netdev_priv(dev); |
332 | iounmap(ti->mmio); | 335 | iounmap(ti->mmio); |
333 | pcmcia_release_window(info->sram_win_handle); | 336 | pcmcia_release_window(link, info->sram_win_handle); |
334 | } | 337 | } |
335 | pcmcia_disable_device(link); | 338 | pcmcia_disable_device(link); |
336 | } | 339 | } |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 5ed6339c52bc..dae5ef6b2609 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -381,13 +381,6 @@ typedef struct _mace_private { | |||
381 | Private Global Variables | 381 | Private Global Variables |
382 | ---------------------------------------------------------------------------- */ | 382 | ---------------------------------------------------------------------------- */ |
383 | 383 | ||
384 | #ifdef PCMCIA_DEBUG | ||
385 | static char rcsid[] = | ||
386 | "nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao"; | ||
387 | static char *version = | ||
388 | DRV_NAME " " DRV_VERSION " (Roger C. Pao)"; | ||
389 | #endif | ||
390 | |||
391 | static const char *if_names[]={ | 384 | static const char *if_names[]={ |
392 | "Auto", "10baseT", "BNC", | 385 | "Auto", "10baseT", "BNC", |
393 | }; | 386 | }; |
@@ -406,12 +399,6 @@ MODULE_LICENSE("GPL"); | |||
406 | /* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ | 399 | /* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ |
407 | INT_MODULE_PARM(if_port, 0); | 400 | INT_MODULE_PARM(if_port, 0); |
408 | 401 | ||
409 | #ifdef PCMCIA_DEBUG | ||
410 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
411 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
412 | #else | ||
413 | #define DEBUG(n, args...) | ||
414 | #endif | ||
415 | 402 | ||
416 | /* ---------------------------------------------------------------------------- | 403 | /* ---------------------------------------------------------------------------- |
417 | Function Prototypes | 404 | Function Prototypes |
@@ -462,8 +449,7 @@ static int nmclan_probe(struct pcmcia_device *link) | |||
462 | mace_private *lp; | 449 | mace_private *lp; |
463 | struct net_device *dev; | 450 | struct net_device *dev; |
464 | 451 | ||
465 | DEBUG(0, "nmclan_attach()\n"); | 452 | dev_dbg(&link->dev, "nmclan_attach()\n"); |
466 | DEBUG(1, "%s\n", rcsid); | ||
467 | 453 | ||
468 | /* Create new ethernet device */ | 454 | /* Create new ethernet device */ |
469 | dev = alloc_etherdev(sizeof(mace_private)); | 455 | dev = alloc_etherdev(sizeof(mace_private)); |
@@ -477,10 +463,8 @@ static int nmclan_probe(struct pcmcia_device *link) | |||
477 | link->io.NumPorts1 = 32; | 463 | link->io.NumPorts1 = 32; |
478 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 464 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
479 | link->io.IOAddrLines = 5; | 465 | link->io.IOAddrLines = 5; |
480 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 466 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
481 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
482 | link->irq.Handler = &mace_interrupt; | 467 | link->irq.Handler = &mace_interrupt; |
483 | link->irq.Instance = dev; | ||
484 | link->conf.Attributes = CONF_ENABLE_IRQ; | 468 | link->conf.Attributes = CONF_ENABLE_IRQ; |
485 | link->conf.IntType = INT_MEMORY_AND_IO; | 469 | link->conf.IntType = INT_MEMORY_AND_IO; |
486 | link->conf.ConfigIndex = 1; | 470 | link->conf.ConfigIndex = 1; |
@@ -507,7 +491,7 @@ static void nmclan_detach(struct pcmcia_device *link) | |||
507 | { | 491 | { |
508 | struct net_device *dev = link->priv; | 492 | struct net_device *dev = link->priv; |
509 | 493 | ||
510 | DEBUG(0, "nmclan_detach(0x%p)\n", link); | 494 | dev_dbg(&link->dev, "nmclan_detach\n"); |
511 | 495 | ||
512 | if (link->dev_node) | 496 | if (link->dev_node) |
513 | unregister_netdev(dev); | 497 | unregister_netdev(dev); |
@@ -654,37 +638,40 @@ nmclan_config | |||
654 | ethernet device available to the system. | 638 | ethernet device available to the system. |
655 | ---------------------------------------------------------------------------- */ | 639 | ---------------------------------------------------------------------------- */ |
656 | 640 | ||
657 | #define CS_CHECK(fn, ret) \ | ||
658 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
659 | |||
660 | static int nmclan_config(struct pcmcia_device *link) | 641 | static int nmclan_config(struct pcmcia_device *link) |
661 | { | 642 | { |
662 | struct net_device *dev = link->priv; | 643 | struct net_device *dev = link->priv; |
663 | mace_private *lp = netdev_priv(dev); | 644 | mace_private *lp = netdev_priv(dev); |
664 | tuple_t tuple; | 645 | u8 *buf; |
665 | u_char buf[64]; | 646 | size_t len; |
666 | int i, last_ret, last_fn; | 647 | int i, ret; |
667 | unsigned int ioaddr; | 648 | unsigned int ioaddr; |
668 | 649 | ||
669 | DEBUG(0, "nmclan_config(0x%p)\n", link); | 650 | dev_dbg(&link->dev, "nmclan_config\n"); |
651 | |||
652 | ret = pcmcia_request_io(link, &link->io); | ||
653 | if (ret) | ||
654 | goto failed; | ||
655 | ret = pcmcia_request_irq(link, &link->irq); | ||
656 | if (ret) | ||
657 | goto failed; | ||
658 | ret = pcmcia_request_configuration(link, &link->conf); | ||
659 | if (ret) | ||
660 | goto failed; | ||
670 | 661 | ||
671 | CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); | ||
672 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | ||
673 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | ||
674 | dev->irq = link->irq.AssignedIRQ; | 662 | dev->irq = link->irq.AssignedIRQ; |
675 | dev->base_addr = link->io.BasePort1; | 663 | dev->base_addr = link->io.BasePort1; |
676 | 664 | ||
677 | ioaddr = dev->base_addr; | 665 | ioaddr = dev->base_addr; |
678 | 666 | ||
679 | /* Read the ethernet address from the CIS. */ | 667 | /* Read the ethernet address from the CIS. */ |
680 | tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */; | 668 | len = pcmcia_get_tuple(link, 0x80, &buf); |
681 | tuple.TupleData = buf; | 669 | if (!buf || len < ETHER_ADDR_LEN) { |
682 | tuple.TupleDataMax = 64; | 670 | kfree(buf); |
683 | tuple.TupleOffset = 0; | 671 | goto failed; |
684 | tuple.Attributes = 0; | 672 | } |
685 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); | 673 | memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN); |
686 | CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); | 674 | kfree(buf); |
687 | memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN); | ||
688 | 675 | ||
689 | /* Verify configuration by reading the MACE ID. */ | 676 | /* Verify configuration by reading the MACE ID. */ |
690 | { | 677 | { |
@@ -693,7 +680,7 @@ static int nmclan_config(struct pcmcia_device *link) | |||
693 | sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); | 680 | sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); |
694 | sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); | 681 | sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); |
695 | if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { | 682 | if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { |
696 | DEBUG(0, "nmclan_cs configured: mace id=%x %x\n", | 683 | dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", |
697 | sig[0], sig[1]); | 684 | sig[0], sig[1]); |
698 | } else { | 685 | } else { |
699 | printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" | 686 | printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" |
@@ -712,7 +699,7 @@ static int nmclan_config(struct pcmcia_device *link) | |||
712 | printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); | 699 | printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); |
713 | 700 | ||
714 | link->dev_node = &lp->node; | 701 | link->dev_node = &lp->node; |
715 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 702 | SET_NETDEV_DEV(dev, &link->dev); |
716 | 703 | ||
717 | i = register_netdev(dev); | 704 | i = register_netdev(dev); |
718 | if (i != 0) { | 705 | if (i != 0) { |
@@ -729,8 +716,6 @@ static int nmclan_config(struct pcmcia_device *link) | |||
729 | dev->dev_addr); | 716 | dev->dev_addr); |
730 | return 0; | 717 | return 0; |
731 | 718 | ||
732 | cs_failed: | ||
733 | cs_error(link, last_fn, last_ret); | ||
734 | failed: | 719 | failed: |
735 | nmclan_release(link); | 720 | nmclan_release(link); |
736 | return -ENODEV; | 721 | return -ENODEV; |
@@ -744,7 +729,7 @@ nmclan_release | |||
744 | ---------------------------------------------------------------------------- */ | 729 | ---------------------------------------------------------------------------- */ |
745 | static void nmclan_release(struct pcmcia_device *link) | 730 | static void nmclan_release(struct pcmcia_device *link) |
746 | { | 731 | { |
747 | DEBUG(0, "nmclan_release(0x%p)\n", link); | 732 | dev_dbg(&link->dev, "nmclan_release\n"); |
748 | pcmcia_disable_device(link); | 733 | pcmcia_disable_device(link); |
749 | } | 734 | } |
750 | 735 | ||
@@ -795,7 +780,7 @@ static void nmclan_reset(struct net_device *dev) | |||
795 | /* Reset Xilinx */ | 780 | /* Reset Xilinx */ |
796 | reg.Action = CS_WRITE; | 781 | reg.Action = CS_WRITE; |
797 | reg.Offset = CISREG_COR; | 782 | reg.Offset = CISREG_COR; |
798 | DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n", | 783 | dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n", |
799 | OrigCorValue); | 784 | OrigCorValue); |
800 | reg.Value = COR_SOFT_RESET; | 785 | reg.Value = COR_SOFT_RESET; |
801 | pcmcia_access_configuration_register(link, ®); | 786 | pcmcia_access_configuration_register(link, ®); |
@@ -872,7 +857,7 @@ static int mace_close(struct net_device *dev) | |||
872 | mace_private *lp = netdev_priv(dev); | 857 | mace_private *lp = netdev_priv(dev); |
873 | struct pcmcia_device *link = lp->p_dev; | 858 | struct pcmcia_device *link = lp->p_dev; |
874 | 859 | ||
875 | DEBUG(2, "%s: shutting down ethercard.\n", dev->name); | 860 | dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); |
876 | 861 | ||
877 | /* Mask off all interrupts from the MACE chip. */ | 862 | /* Mask off all interrupts from the MACE chip. */ |
878 | outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); | 863 | outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); |
@@ -891,24 +876,8 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
891 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); | 876 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); |
892 | } | 877 | } |
893 | 878 | ||
894 | #ifdef PCMCIA_DEBUG | ||
895 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
896 | { | ||
897 | return pc_debug; | ||
898 | } | ||
899 | |||
900 | static void netdev_set_msglevel(struct net_device *dev, u32 level) | ||
901 | { | ||
902 | pc_debug = level; | ||
903 | } | ||
904 | #endif /* PCMCIA_DEBUG */ | ||
905 | |||
906 | static const struct ethtool_ops netdev_ethtool_ops = { | 879 | static const struct ethtool_ops netdev_ethtool_ops = { |
907 | .get_drvinfo = netdev_get_drvinfo, | 880 | .get_drvinfo = netdev_get_drvinfo, |
908 | #ifdef PCMCIA_DEBUG | ||
909 | .get_msglevel = netdev_get_msglevel, | ||
910 | .set_msglevel = netdev_set_msglevel, | ||
911 | #endif /* PCMCIA_DEBUG */ | ||
912 | }; | 881 | }; |
913 | 882 | ||
914 | /* ---------------------------------------------------------------------------- | 883 | /* ---------------------------------------------------------------------------- |
@@ -946,7 +915,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb, | |||
946 | 915 | ||
947 | netif_stop_queue(dev); | 916 | netif_stop_queue(dev); |
948 | 917 | ||
949 | DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n", | 918 | pr_debug("%s: mace_start_xmit(length = %ld) called.\n", |
950 | dev->name, (long)skb->len); | 919 | dev->name, (long)skb->len); |
951 | 920 | ||
952 | #if (!TX_INTERRUPTABLE) | 921 | #if (!TX_INTERRUPTABLE) |
@@ -1008,7 +977,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
1008 | int IntrCnt = MACE_MAX_IR_ITERATIONS; | 977 | int IntrCnt = MACE_MAX_IR_ITERATIONS; |
1009 | 978 | ||
1010 | if (dev == NULL) { | 979 | if (dev == NULL) { |
1011 | DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n", | 980 | pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", |
1012 | irq); | 981 | irq); |
1013 | return IRQ_NONE; | 982 | return IRQ_NONE; |
1014 | } | 983 | } |
@@ -1031,7 +1000,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
1031 | } | 1000 | } |
1032 | 1001 | ||
1033 | if (!netif_device_present(dev)) { | 1002 | if (!netif_device_present(dev)) { |
1034 | DEBUG(2, "%s: interrupt from dead card\n", dev->name); | 1003 | pr_debug("%s: interrupt from dead card\n", dev->name); |
1035 | return IRQ_NONE; | 1004 | return IRQ_NONE; |
1036 | } | 1005 | } |
1037 | 1006 | ||
@@ -1039,7 +1008,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
1039 | /* WARNING: MACE_IR is a READ/CLEAR port! */ | 1008 | /* WARNING: MACE_IR is a READ/CLEAR port! */ |
1040 | status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); | 1009 | status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); |
1041 | 1010 | ||
1042 | DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); | 1011 | pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); |
1043 | 1012 | ||
1044 | if (status & MACE_IR_RCVINT) { | 1013 | if (status & MACE_IR_RCVINT) { |
1045 | mace_rx(dev, MACE_MAX_RX_ITERATIONS); | 1014 | mace_rx(dev, MACE_MAX_RX_ITERATIONS); |
@@ -1158,7 +1127,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) | |||
1158 | ) { | 1127 | ) { |
1159 | rx_status = inw(ioaddr + AM2150_RCV); | 1128 | rx_status = inw(ioaddr + AM2150_RCV); |
1160 | 1129 | ||
1161 | DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status" | 1130 | pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" |
1162 | " 0x%X.\n", dev->name, rx_framecnt, rx_status); | 1131 | " 0x%X.\n", dev->name, rx_framecnt, rx_status); |
1163 | 1132 | ||
1164 | if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ | 1133 | if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ |
@@ -1185,7 +1154,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) | |||
1185 | lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); | 1154 | lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); |
1186 | /* rcv collision count */ | 1155 | /* rcv collision count */ |
1187 | 1156 | ||
1188 | DEBUG(3, " receiving packet size 0x%X rx_status" | 1157 | pr_debug(" receiving packet size 0x%X rx_status" |
1189 | " 0x%X.\n", pkt_len, rx_status); | 1158 | " 0x%X.\n", pkt_len, rx_status); |
1190 | 1159 | ||
1191 | skb = dev_alloc_skb(pkt_len+2); | 1160 | skb = dev_alloc_skb(pkt_len+2); |
@@ -1204,7 +1173,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) | |||
1204 | outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ | 1173 | outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ |
1205 | continue; | 1174 | continue; |
1206 | } else { | 1175 | } else { |
1207 | DEBUG(1, "%s: couldn't allocate a sk_buff of size" | 1176 | pr_debug("%s: couldn't allocate a sk_buff of size" |
1208 | " %d.\n", dev->name, pkt_len); | 1177 | " %d.\n", dev->name, pkt_len); |
1209 | lp->linux_stats.rx_dropped++; | 1178 | lp->linux_stats.rx_dropped++; |
1210 | } | 1179 | } |
@@ -1220,28 +1189,28 @@ pr_linux_stats | |||
1220 | ---------------------------------------------------------------------------- */ | 1189 | ---------------------------------------------------------------------------- */ |
1221 | static void pr_linux_stats(struct net_device_stats *pstats) | 1190 | static void pr_linux_stats(struct net_device_stats *pstats) |
1222 | { | 1191 | { |
1223 | DEBUG(2, "pr_linux_stats\n"); | 1192 | pr_debug("pr_linux_stats\n"); |
1224 | DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n", | 1193 | pr_debug(" rx_packets=%-7ld tx_packets=%ld\n", |
1225 | (long)pstats->rx_packets, (long)pstats->tx_packets); | 1194 | (long)pstats->rx_packets, (long)pstats->tx_packets); |
1226 | DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n", | 1195 | pr_debug(" rx_errors=%-7ld tx_errors=%ld\n", |
1227 | (long)pstats->rx_errors, (long)pstats->tx_errors); | 1196 | (long)pstats->rx_errors, (long)pstats->tx_errors); |
1228 | DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n", | 1197 | pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n", |
1229 | (long)pstats->rx_dropped, (long)pstats->tx_dropped); | 1198 | (long)pstats->rx_dropped, (long)pstats->tx_dropped); |
1230 | DEBUG(2, " multicast=%-7ld collisions=%ld\n", | 1199 | pr_debug(" multicast=%-7ld collisions=%ld\n", |
1231 | (long)pstats->multicast, (long)pstats->collisions); | 1200 | (long)pstats->multicast, (long)pstats->collisions); |
1232 | 1201 | ||
1233 | DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n", | 1202 | pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n", |
1234 | (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); | 1203 | (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); |
1235 | DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n", | 1204 | pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n", |
1236 | (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); | 1205 | (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); |
1237 | DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", | 1206 | pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", |
1238 | (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); | 1207 | (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); |
1239 | 1208 | ||
1240 | DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", | 1209 | pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", |
1241 | (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); | 1210 | (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); |
1242 | DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", | 1211 | pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", |
1243 | (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); | 1212 | (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); |
1244 | DEBUG(2, " tx_window_errors=%ld\n", | 1213 | pr_debug(" tx_window_errors=%ld\n", |
1245 | (long)pstats->tx_window_errors); | 1214 | (long)pstats->tx_window_errors); |
1246 | } /* pr_linux_stats */ | 1215 | } /* pr_linux_stats */ |
1247 | 1216 | ||
@@ -1250,48 +1219,48 @@ pr_mace_stats | |||
1250 | ---------------------------------------------------------------------------- */ | 1219 | ---------------------------------------------------------------------------- */ |
1251 | static void pr_mace_stats(mace_statistics *pstats) | 1220 | static void pr_mace_stats(mace_statistics *pstats) |
1252 | { | 1221 | { |
1253 | DEBUG(2, "pr_mace_stats\n"); | 1222 | pr_debug("pr_mace_stats\n"); |
1254 | 1223 | ||
1255 | DEBUG(2, " xmtsv=%-7d uflo=%d\n", | 1224 | pr_debug(" xmtsv=%-7d uflo=%d\n", |
1256 | pstats->xmtsv, pstats->uflo); | 1225 | pstats->xmtsv, pstats->uflo); |
1257 | DEBUG(2, " lcol=%-7d more=%d\n", | 1226 | pr_debug(" lcol=%-7d more=%d\n", |
1258 | pstats->lcol, pstats->more); | 1227 | pstats->lcol, pstats->more); |
1259 | DEBUG(2, " one=%-7d defer=%d\n", | 1228 | pr_debug(" one=%-7d defer=%d\n", |
1260 | pstats->one, pstats->defer); | 1229 | pstats->one, pstats->defer); |
1261 | DEBUG(2, " lcar=%-7d rtry=%d\n", | 1230 | pr_debug(" lcar=%-7d rtry=%d\n", |
1262 | pstats->lcar, pstats->rtry); | 1231 | pstats->lcar, pstats->rtry); |
1263 | 1232 | ||
1264 | /* MACE_XMTRC */ | 1233 | /* MACE_XMTRC */ |
1265 | DEBUG(2, " exdef=%-7d xmtrc=%d\n", | 1234 | pr_debug(" exdef=%-7d xmtrc=%d\n", |
1266 | pstats->exdef, pstats->xmtrc); | 1235 | pstats->exdef, pstats->xmtrc); |
1267 | 1236 | ||
1268 | /* RFS1--Receive Status (RCVSTS) */ | 1237 | /* RFS1--Receive Status (RCVSTS) */ |
1269 | DEBUG(2, " oflo=%-7d clsn=%d\n", | 1238 | pr_debug(" oflo=%-7d clsn=%d\n", |
1270 | pstats->oflo, pstats->clsn); | 1239 | pstats->oflo, pstats->clsn); |
1271 | DEBUG(2, " fram=%-7d fcs=%d\n", | 1240 | pr_debug(" fram=%-7d fcs=%d\n", |
1272 | pstats->fram, pstats->fcs); | 1241 | pstats->fram, pstats->fcs); |
1273 | 1242 | ||
1274 | /* RFS2--Runt Packet Count (RNTPC) */ | 1243 | /* RFS2--Runt Packet Count (RNTPC) */ |
1275 | /* RFS3--Receive Collision Count (RCVCC) */ | 1244 | /* RFS3--Receive Collision Count (RCVCC) */ |
1276 | DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n", | 1245 | pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n", |
1277 | pstats->rfs_rntpc, pstats->rfs_rcvcc); | 1246 | pstats->rfs_rntpc, pstats->rfs_rcvcc); |
1278 | 1247 | ||
1279 | /* MACE_IR */ | 1248 | /* MACE_IR */ |
1280 | DEBUG(2, " jab=%-7d babl=%d\n", | 1249 | pr_debug(" jab=%-7d babl=%d\n", |
1281 | pstats->jab, pstats->babl); | 1250 | pstats->jab, pstats->babl); |
1282 | DEBUG(2, " cerr=%-7d rcvcco=%d\n", | 1251 | pr_debug(" cerr=%-7d rcvcco=%d\n", |
1283 | pstats->cerr, pstats->rcvcco); | 1252 | pstats->cerr, pstats->rcvcco); |
1284 | DEBUG(2, " rntpco=%-7d mpco=%d\n", | 1253 | pr_debug(" rntpco=%-7d mpco=%d\n", |
1285 | pstats->rntpco, pstats->mpco); | 1254 | pstats->rntpco, pstats->mpco); |
1286 | 1255 | ||
1287 | /* MACE_MPC */ | 1256 | /* MACE_MPC */ |
1288 | DEBUG(2, " mpc=%d\n", pstats->mpc); | 1257 | pr_debug(" mpc=%d\n", pstats->mpc); |
1289 | 1258 | ||
1290 | /* MACE_RNTPC */ | 1259 | /* MACE_RNTPC */ |
1291 | DEBUG(2, " rntpc=%d\n", pstats->rntpc); | 1260 | pr_debug(" rntpc=%d\n", pstats->rntpc); |
1292 | 1261 | ||
1293 | /* MACE_RCVCC */ | 1262 | /* MACE_RCVCC */ |
1294 | DEBUG(2, " rcvcc=%d\n", pstats->rcvcc); | 1263 | pr_debug(" rcvcc=%d\n", pstats->rcvcc); |
1295 | 1264 | ||
1296 | } /* pr_mace_stats */ | 1265 | } /* pr_mace_stats */ |
1297 | 1266 | ||
@@ -1360,7 +1329,7 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev) | |||
1360 | 1329 | ||
1361 | update_stats(dev->base_addr, dev); | 1330 | update_stats(dev->base_addr, dev); |
1362 | 1331 | ||
1363 | DEBUG(1, "%s: updating the statistics.\n", dev->name); | 1332 | pr_debug("%s: updating the statistics.\n", dev->name); |
1364 | pr_linux_stats(&lp->linux_stats); | 1333 | pr_linux_stats(&lp->linux_stats); |
1365 | pr_mace_stats(&lp->mace_stats); | 1334 | pr_mace_stats(&lp->mace_stats); |
1366 | 1335 | ||
@@ -1427,7 +1396,7 @@ static void BuildLAF(int *ladrf, int *adr) | |||
1427 | ladrf[byte] |= (1 << (hashcode & 7)); | 1396 | ladrf[byte] |= (1 << (hashcode & 7)); |
1428 | 1397 | ||
1429 | #ifdef PCMCIA_DEBUG | 1398 | #ifdef PCMCIA_DEBUG |
1430 | if (pc_debug > 2) | 1399 | if (0) |
1431 | printk(KERN_DEBUG " adr =%pM\n", adr); | 1400 | printk(KERN_DEBUG " adr =%pM\n", adr); |
1432 | printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); | 1401 | printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); |
1433 | for (i = 0; i < 8; i++) | 1402 | for (i = 0; i < 8; i++) |
@@ -1454,12 +1423,12 @@ static void restore_multicast_list(struct net_device *dev) | |||
1454 | unsigned int ioaddr = dev->base_addr; | 1423 | unsigned int ioaddr = dev->base_addr; |
1455 | int i; | 1424 | int i; |
1456 | 1425 | ||
1457 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", | 1426 | pr_debug("%s: restoring Rx mode to %d addresses.\n", |
1458 | dev->name, num_addrs); | 1427 | dev->name, num_addrs); |
1459 | 1428 | ||
1460 | if (num_addrs > 0) { | 1429 | if (num_addrs > 0) { |
1461 | 1430 | ||
1462 | DEBUG(1, "Attempt to restore multicast list detected.\n"); | 1431 | pr_debug("Attempt to restore multicast list detected.\n"); |
1463 | 1432 | ||
1464 | mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); | 1433 | mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); |
1465 | /* Poll ADDRCHG bit */ | 1434 | /* Poll ADDRCHG bit */ |
@@ -1511,11 +1480,11 @@ static void set_multicast_list(struct net_device *dev) | |||
1511 | struct dev_mc_list *dmi = dev->mc_list; | 1480 | struct dev_mc_list *dmi = dev->mc_list; |
1512 | 1481 | ||
1513 | #ifdef PCMCIA_DEBUG | 1482 | #ifdef PCMCIA_DEBUG |
1514 | if (pc_debug > 1) { | 1483 | { |
1515 | static int old; | 1484 | static int old; |
1516 | if (dev->mc_count != old) { | 1485 | if (dev->mc_count != old) { |
1517 | old = dev->mc_count; | 1486 | old = dev->mc_count; |
1518 | DEBUG(0, "%s: setting Rx mode to %d addresses.\n", | 1487 | pr_debug("%s: setting Rx mode to %d addresses.\n", |
1519 | dev->name, old); | 1488 | dev->name, old); |
1520 | } | 1489 | } |
1521 | } | 1490 | } |
@@ -1546,7 +1515,7 @@ static void restore_multicast_list(struct net_device *dev) | |||
1546 | unsigned int ioaddr = dev->base_addr; | 1515 | unsigned int ioaddr = dev->base_addr; |
1547 | mace_private *lp = netdev_priv(dev); | 1516 | mace_private *lp = netdev_priv(dev); |
1548 | 1517 | ||
1549 | DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name, | 1518 | pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, |
1550 | lp->multicast_num_addrs); | 1519 | lp->multicast_num_addrs); |
1551 | 1520 | ||
1552 | if (dev->flags & IFF_PROMISC) { | 1521 | if (dev->flags & IFF_PROMISC) { |
@@ -1567,11 +1536,11 @@ static void set_multicast_list(struct net_device *dev) | |||
1567 | mace_private *lp = netdev_priv(dev); | 1536 | mace_private *lp = netdev_priv(dev); |
1568 | 1537 | ||
1569 | #ifdef PCMCIA_DEBUG | 1538 | #ifdef PCMCIA_DEBUG |
1570 | if (pc_debug > 1) { | 1539 | { |
1571 | static int old; | 1540 | static int old; |
1572 | if (dev->mc_count != old) { | 1541 | if (dev->mc_count != old) { |
1573 | old = dev->mc_count; | 1542 | old = dev->mc_count; |
1574 | DEBUG(0, "%s: setting Rx mode to %d addresses.\n", | 1543 | pr_debug("%s: setting Rx mode to %d addresses.\n", |
1575 | dev->name, old); | 1544 | dev->name, old); |
1576 | } | 1545 | } |
1577 | } | 1546 | } |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 94c9ad2746bc..cbe462ed221f 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -71,15 +71,6 @@ | |||
71 | 71 | ||
72 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; | 72 | static const char *if_names[] = { "auto", "10baseT", "10base2"}; |
73 | 73 | ||
74 | #ifdef PCMCIA_DEBUG | ||
75 | static int pc_debug = PCMCIA_DEBUG; | ||
76 | module_param(pc_debug, int, 0); | ||
77 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
78 | static char *version = | ||
79 | "pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)"; | ||
80 | #else | ||
81 | #define DEBUG(n, args...) | ||
82 | #endif | ||
83 | 74 | ||
84 | /*====================================================================*/ | 75 | /*====================================================================*/ |
85 | 76 | ||
@@ -265,7 +256,7 @@ static int pcnet_probe(struct pcmcia_device *link) | |||
265 | pcnet_dev_t *info; | 256 | pcnet_dev_t *info; |
266 | struct net_device *dev; | 257 | struct net_device *dev; |
267 | 258 | ||
268 | DEBUG(0, "pcnet_attach()\n"); | 259 | dev_dbg(&link->dev, "pcnet_attach()\n"); |
269 | 260 | ||
270 | /* Create new ethernet device */ | 261 | /* Create new ethernet device */ |
271 | dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); | 262 | dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); |
@@ -275,7 +266,6 @@ static int pcnet_probe(struct pcmcia_device *link) | |||
275 | link->priv = dev; | 266 | link->priv = dev; |
276 | 267 | ||
277 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 268 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
278 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
279 | link->conf.Attributes = CONF_ENABLE_IRQ; | 269 | link->conf.Attributes = CONF_ENABLE_IRQ; |
280 | link->conf.IntType = INT_MEMORY_AND_IO; | 270 | link->conf.IntType = INT_MEMORY_AND_IO; |
281 | 271 | ||
@@ -297,7 +287,7 @@ static void pcnet_detach(struct pcmcia_device *link) | |||
297 | { | 287 | { |
298 | struct net_device *dev = link->priv; | 288 | struct net_device *dev = link->priv; |
299 | 289 | ||
300 | DEBUG(0, "pcnet_detach(0x%p)\n", link); | 290 | dev_dbg(&link->dev, "pcnet_detach\n"); |
301 | 291 | ||
302 | if (link->dev_node) | 292 | if (link->dev_node) |
303 | unregister_netdev(dev); | 293 | unregister_netdev(dev); |
@@ -326,17 +316,15 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) | |||
326 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 316 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
327 | req.Base = 0; req.Size = 0; | 317 | req.Base = 0; req.Size = 0; |
328 | req.AccessSpeed = 0; | 318 | req.AccessSpeed = 0; |
329 | i = pcmcia_request_window(&link, &req, &link->win); | 319 | i = pcmcia_request_window(link, &req, &link->win); |
330 | if (i != 0) { | 320 | if (i != 0) |
331 | cs_error(link, RequestWindow, i); | ||
332 | return NULL; | 321 | return NULL; |
333 | } | ||
334 | 322 | ||
335 | virt = ioremap(req.Base, req.Size); | 323 | virt = ioremap(req.Base, req.Size); |
336 | mem.Page = 0; | 324 | mem.Page = 0; |
337 | for (i = 0; i < NR_INFO; i++) { | 325 | for (i = 0; i < NR_INFO; i++) { |
338 | mem.CardOffset = hw_info[i].offset & ~(req.Size-1); | 326 | mem.CardOffset = hw_info[i].offset & ~(req.Size-1); |
339 | pcmcia_map_mem_page(link->win, &mem); | 327 | pcmcia_map_mem_page(link, link->win, &mem); |
340 | base = &virt[hw_info[i].offset & (req.Size-1)]; | 328 | base = &virt[hw_info[i].offset & (req.Size-1)]; |
341 | if ((readb(base+0) == hw_info[i].a0) && | 329 | if ((readb(base+0) == hw_info[i].a0) && |
342 | (readb(base+2) == hw_info[i].a1) && | 330 | (readb(base+2) == hw_info[i].a1) && |
@@ -348,9 +336,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) | |||
348 | } | 336 | } |
349 | 337 | ||
350 | iounmap(virt); | 338 | iounmap(virt); |
351 | j = pcmcia_release_window(link->win); | 339 | j = pcmcia_release_window(link, link->win); |
352 | if (j != 0) | ||
353 | cs_error(link, ReleaseWindow, j); | ||
354 | return (i < NR_INFO) ? hw_info+i : NULL; | 340 | return (i < NR_INFO) ? hw_info+i : NULL; |
355 | } /* get_hwinfo */ | 341 | } /* get_hwinfo */ |
356 | 342 | ||
@@ -495,9 +481,6 @@ static hw_info_t *get_hwired(struct pcmcia_device *link) | |||
495 | 481 | ||
496 | ======================================================================*/ | 482 | ======================================================================*/ |
497 | 483 | ||
498 | #define CS_CHECK(fn, ret) \ | ||
499 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
500 | |||
501 | static int try_io_port(struct pcmcia_device *link) | 484 | static int try_io_port(struct pcmcia_device *link) |
502 | { | 485 | { |
503 | int j, ret; | 486 | int j, ret; |
@@ -567,19 +550,19 @@ static int pcnet_config(struct pcmcia_device *link) | |||
567 | { | 550 | { |
568 | struct net_device *dev = link->priv; | 551 | struct net_device *dev = link->priv; |
569 | pcnet_dev_t *info = PRIV(dev); | 552 | pcnet_dev_t *info = PRIV(dev); |
570 | int last_ret, last_fn, start_pg, stop_pg, cm_offset; | 553 | int ret, start_pg, stop_pg, cm_offset; |
571 | int has_shmem = 0; | 554 | int has_shmem = 0; |
572 | hw_info_t *local_hw_info; | 555 | hw_info_t *local_hw_info; |
573 | 556 | ||
574 | DEBUG(0, "pcnet_config(0x%p)\n", link); | 557 | dev_dbg(&link->dev, "pcnet_config\n"); |
575 | 558 | ||
576 | last_ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); | 559 | ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); |
577 | if (last_ret) { | 560 | if (ret) |
578 | cs_error(link, RequestIO, last_ret); | ||
579 | goto failed; | 561 | goto failed; |
580 | } | ||
581 | 562 | ||
582 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 563 | ret = pcmcia_request_irq(link, &link->irq); |
564 | if (ret) | ||
565 | goto failed; | ||
583 | 566 | ||
584 | if (link->io.NumPorts2 == 8) { | 567 | if (link->io.NumPorts2 == 8) { |
585 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 568 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
@@ -589,7 +572,9 @@ static int pcnet_config(struct pcmcia_device *link) | |||
589 | (link->card_id == PRODID_IBM_HOME_AND_AWAY)) | 572 | (link->card_id == PRODID_IBM_HOME_AND_AWAY)) |
590 | link->conf.ConfigIndex |= 0x10; | 573 | link->conf.ConfigIndex |= 0x10; |
591 | 574 | ||
592 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 575 | ret = pcmcia_request_configuration(link, &link->conf); |
576 | if (ret) | ||
577 | goto failed; | ||
593 | dev->irq = link->irq.AssignedIRQ; | 578 | dev->irq = link->irq.AssignedIRQ; |
594 | dev->base_addr = link->io.BasePort1; | 579 | dev->base_addr = link->io.BasePort1; |
595 | if (info->flags & HAS_MISC_REG) { | 580 | if (info->flags & HAS_MISC_REG) { |
@@ -660,7 +645,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
660 | mii_phy_probe(dev); | 645 | mii_phy_probe(dev); |
661 | 646 | ||
662 | link->dev_node = &info->node; | 647 | link->dev_node = &info->node; |
663 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 648 | SET_NETDEV_DEV(dev, &link->dev); |
664 | 649 | ||
665 | if (register_netdev(dev) != 0) { | 650 | if (register_netdev(dev) != 0) { |
666 | printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); | 651 | printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); |
@@ -687,8 +672,6 @@ static int pcnet_config(struct pcmcia_device *link) | |||
687 | printk(" hw_addr %pM\n", dev->dev_addr); | 672 | printk(" hw_addr %pM\n", dev->dev_addr); |
688 | return 0; | 673 | return 0; |
689 | 674 | ||
690 | cs_failed: | ||
691 | cs_error(link, last_fn, last_ret); | ||
692 | failed: | 675 | failed: |
693 | pcnet_release(link); | 676 | pcnet_release(link); |
694 | return -ENODEV; | 677 | return -ENODEV; |
@@ -706,7 +689,7 @@ static void pcnet_release(struct pcmcia_device *link) | |||
706 | { | 689 | { |
707 | pcnet_dev_t *info = PRIV(link->priv); | 690 | pcnet_dev_t *info = PRIV(link->priv); |
708 | 691 | ||
709 | DEBUG(0, "pcnet_release(0x%p)\n", link); | 692 | dev_dbg(&link->dev, "pcnet_release\n"); |
710 | 693 | ||
711 | if (info->flags & USE_SHMEM) | 694 | if (info->flags & USE_SHMEM) |
712 | iounmap(info->base); | 695 | iounmap(info->base); |
@@ -960,7 +943,7 @@ static void mii_phy_probe(struct net_device *dev) | |||
960 | phyid = tmp << 16; | 943 | phyid = tmp << 16; |
961 | phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); | 944 | phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); |
962 | phyid &= MII_PHYID_REV_MASK; | 945 | phyid &= MII_PHYID_REV_MASK; |
963 | DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid); | 946 | pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid); |
964 | if (phyid == AM79C9XX_HOME_PHY) { | 947 | if (phyid == AM79C9XX_HOME_PHY) { |
965 | info->pna_phy = i; | 948 | info->pna_phy = i; |
966 | } else if (phyid != AM79C9XX_ETH_PHY) { | 949 | } else if (phyid != AM79C9XX_ETH_PHY) { |
@@ -976,7 +959,7 @@ static int pcnet_open(struct net_device *dev) | |||
976 | struct pcmcia_device *link = info->p_dev; | 959 | struct pcmcia_device *link = info->p_dev; |
977 | unsigned int nic_base = dev->base_addr; | 960 | unsigned int nic_base = dev->base_addr; |
978 | 961 | ||
979 | DEBUG(2, "pcnet_open('%s')\n", dev->name); | 962 | dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name); |
980 | 963 | ||
981 | if (!pcmcia_dev_present(link)) | 964 | if (!pcmcia_dev_present(link)) |
982 | return -ENODEV; | 965 | return -ENODEV; |
@@ -1008,7 +991,7 @@ static int pcnet_close(struct net_device *dev) | |||
1008 | pcnet_dev_t *info = PRIV(dev); | 991 | pcnet_dev_t *info = PRIV(dev); |
1009 | struct pcmcia_device *link = info->p_dev; | 992 | struct pcmcia_device *link = info->p_dev; |
1010 | 993 | ||
1011 | DEBUG(2, "pcnet_close('%s')\n", dev->name); | 994 | dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); |
1012 | 995 | ||
1013 | ei_close(dev); | 996 | ei_close(dev); |
1014 | free_irq(dev->irq, dev); | 997 | free_irq(dev->irq, dev); |
@@ -1251,10 +1234,8 @@ static void dma_block_input(struct net_device *dev, int count, | |||
1251 | int xfer_count = count; | 1234 | int xfer_count = count; |
1252 | char *buf = skb->data; | 1235 | char *buf = skb->data; |
1253 | 1236 | ||
1254 | #ifdef PCMCIA_DEBUG | ||
1255 | if ((ei_debug > 4) && (count != 4)) | 1237 | if ((ei_debug > 4) && (count != 4)) |
1256 | printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); | 1238 | pr_debug("%s: [bi=%d]\n", dev->name, count+4); |
1257 | #endif | ||
1258 | if (ei_status.dmaing) { | 1239 | if (ei_status.dmaing) { |
1259 | printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." | 1240 | printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." |
1260 | "[DMAstat:%1x][irqlock:%1x]\n", | 1241 | "[DMAstat:%1x][irqlock:%1x]\n", |
@@ -1495,7 +1476,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
1495 | pcnet_dev_t *info = PRIV(dev); | 1476 | pcnet_dev_t *info = PRIV(dev); |
1496 | win_req_t req; | 1477 | win_req_t req; |
1497 | memreq_t mem; | 1478 | memreq_t mem; |
1498 | int i, window_size, offset, last_ret, last_fn; | 1479 | int i, window_size, offset, ret; |
1499 | 1480 | ||
1500 | window_size = (stop_pg - start_pg) << 8; | 1481 | window_size = (stop_pg - start_pg) << 8; |
1501 | if (window_size > 32 * 1024) | 1482 | if (window_size > 32 * 1024) |
@@ -1509,13 +1490,17 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
1509 | req.Attributes |= WIN_USE_WAIT; | 1490 | req.Attributes |= WIN_USE_WAIT; |
1510 | req.Base = 0; req.Size = window_size; | 1491 | req.Base = 0; req.Size = window_size; |
1511 | req.AccessSpeed = mem_speed; | 1492 | req.AccessSpeed = mem_speed; |
1512 | CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); | 1493 | ret = pcmcia_request_window(link, &req, &link->win); |
1494 | if (ret) | ||
1495 | goto failed; | ||
1513 | 1496 | ||
1514 | mem.CardOffset = (start_pg << 8) + cm_offset; | 1497 | mem.CardOffset = (start_pg << 8) + cm_offset; |
1515 | offset = mem.CardOffset % window_size; | 1498 | offset = mem.CardOffset % window_size; |
1516 | mem.CardOffset -= offset; | 1499 | mem.CardOffset -= offset; |
1517 | mem.Page = 0; | 1500 | mem.Page = 0; |
1518 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); | 1501 | ret = pcmcia_map_mem_page(link, link->win, &mem); |
1502 | if (ret) | ||
1503 | goto failed; | ||
1519 | 1504 | ||
1520 | /* Try scribbling on the buffer */ | 1505 | /* Try scribbling on the buffer */ |
1521 | info->base = ioremap(req.Base, window_size); | 1506 | info->base = ioremap(req.Base, window_size); |
@@ -1527,8 +1512,8 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
1527 | pcnet_reset_8390(dev); | 1512 | pcnet_reset_8390(dev); |
1528 | if (i != (TX_PAGES<<8)) { | 1513 | if (i != (TX_PAGES<<8)) { |
1529 | iounmap(info->base); | 1514 | iounmap(info->base); |
1530 | pcmcia_release_window(link->win); | 1515 | pcmcia_release_window(link, link->win); |
1531 | info->base = NULL; link->win = NULL; | 1516 | info->base = NULL; link->win = 0; |
1532 | goto failed; | 1517 | goto failed; |
1533 | } | 1518 | } |
1534 | 1519 | ||
@@ -1549,8 +1534,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
1549 | info->flags |= USE_SHMEM; | 1534 | info->flags |= USE_SHMEM; |
1550 | return 0; | 1535 | return 0; |
1551 | 1536 | ||
1552 | cs_failed: | ||
1553 | cs_error(link, last_fn, last_ret); | ||
1554 | failed: | 1537 | failed: |
1555 | return 1; | 1538 | return 1; |
1556 | } | 1539 | } |
@@ -1788,7 +1771,6 @@ static int __init init_pcnet_cs(void) | |||
1788 | 1771 | ||
1789 | static void __exit exit_pcnet_cs(void) | 1772 | static void __exit exit_pcnet_cs(void) |
1790 | { | 1773 | { |
1791 | DEBUG(0, "pcnet_cs: unloading\n"); | ||
1792 | pcmcia_unregister_driver(&pcnet_driver); | 1774 | pcmcia_unregister_driver(&pcnet_driver); |
1793 | } | 1775 | } |
1794 | 1776 | ||
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 7bde2cd34c7e..9e0da370912e 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -79,14 +79,6 @@ MODULE_FIRMWARE(FIRMWARE_NAME); | |||
79 | */ | 79 | */ |
80 | INT_MODULE_PARM(if_port, 0); | 80 | INT_MODULE_PARM(if_port, 0); |
81 | 81 | ||
82 | #ifdef PCMCIA_DEBUG | ||
83 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
84 | static const char *version = | ||
85 | "smc91c92_cs.c 1.123 2006/11/09 Donald Becker, becker@scyld.com.\n"; | ||
86 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
87 | #else | ||
88 | #define DEBUG(n, args...) | ||
89 | #endif | ||
90 | 82 | ||
91 | #define DRV_NAME "smc91c92_cs" | 83 | #define DRV_NAME "smc91c92_cs" |
92 | #define DRV_VERSION "1.123" | 84 | #define DRV_VERSION "1.123" |
@@ -126,12 +118,6 @@ struct smc_private { | |||
126 | int rx_ovrn; | 118 | int rx_ovrn; |
127 | }; | 119 | }; |
128 | 120 | ||
129 | struct smc_cfg_mem { | ||
130 | tuple_t tuple; | ||
131 | cisparse_t parse; | ||
132 | u_char buf[255]; | ||
133 | }; | ||
134 | |||
135 | /* Special definitions for Megahertz multifunction cards */ | 121 | /* Special definitions for Megahertz multifunction cards */ |
136 | #define MEGAHERTZ_ISR 0x0380 | 122 | #define MEGAHERTZ_ISR 0x0380 |
137 | 123 | ||
@@ -329,7 +315,7 @@ static int smc91c92_probe(struct pcmcia_device *link) | |||
329 | struct smc_private *smc; | 315 | struct smc_private *smc; |
330 | struct net_device *dev; | 316 | struct net_device *dev; |
331 | 317 | ||
332 | DEBUG(0, "smc91c92_attach()\n"); | 318 | dev_dbg(&link->dev, "smc91c92_attach()\n"); |
333 | 319 | ||
334 | /* Create new ethernet device */ | 320 | /* Create new ethernet device */ |
335 | dev = alloc_etherdev(sizeof(struct smc_private)); | 321 | dev = alloc_etherdev(sizeof(struct smc_private)); |
@@ -343,10 +329,8 @@ static int smc91c92_probe(struct pcmcia_device *link) | |||
343 | link->io.NumPorts1 = 16; | 329 | link->io.NumPorts1 = 16; |
344 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 330 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
345 | link->io.IOAddrLines = 4; | 331 | link->io.IOAddrLines = 4; |
346 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; | 332 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
347 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
348 | link->irq.Handler = &smc_interrupt; | 333 | link->irq.Handler = &smc_interrupt; |
349 | link->irq.Instance = dev; | ||
350 | link->conf.Attributes = CONF_ENABLE_IRQ; | 334 | link->conf.Attributes = CONF_ENABLE_IRQ; |
351 | link->conf.IntType = INT_MEMORY_AND_IO; | 335 | link->conf.IntType = INT_MEMORY_AND_IO; |
352 | 336 | ||
@@ -377,7 +361,7 @@ static void smc91c92_detach(struct pcmcia_device *link) | |||
377 | { | 361 | { |
378 | struct net_device *dev = link->priv; | 362 | struct net_device *dev = link->priv; |
379 | 363 | ||
380 | DEBUG(0, "smc91c92_detach(0x%p)\n", link); | 364 | dev_dbg(&link->dev, "smc91c92_detach\n"); |
381 | 365 | ||
382 | if (link->dev_node) | 366 | if (link->dev_node) |
383 | unregister_netdev(dev); | 367 | unregister_netdev(dev); |
@@ -408,34 +392,7 @@ static int cvt_ascii_address(struct net_device *dev, char *s) | |||
408 | return 0; | 392 | return 0; |
409 | } | 393 | } |
410 | 394 | ||
411 | /*====================================================================*/ | 395 | /*==================================================================== |
412 | |||
413 | static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, | ||
414 | cisparse_t *parse) | ||
415 | { | ||
416 | int i; | ||
417 | |||
418 | i = pcmcia_get_first_tuple(handle, tuple); | ||
419 | if (i != 0) | ||
420 | return i; | ||
421 | i = pcmcia_get_tuple_data(handle, tuple); | ||
422 | if (i != 0) | ||
423 | return i; | ||
424 | return pcmcia_parse_tuple(tuple, parse); | ||
425 | } | ||
426 | |||
427 | static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, | ||
428 | cisparse_t *parse) | ||
429 | { | ||
430 | int i; | ||
431 | |||
432 | if ((i = pcmcia_get_next_tuple(handle, tuple)) != 0 || | ||
433 | (i = pcmcia_get_tuple_data(handle, tuple)) != 0) | ||
434 | return i; | ||
435 | return pcmcia_parse_tuple(tuple, parse); | ||
436 | } | ||
437 | |||
438 | /*====================================================================== | ||
439 | 396 | ||
440 | Configuration stuff for Megahertz cards | 397 | Configuration stuff for Megahertz cards |
441 | 398 | ||
@@ -490,19 +447,14 @@ static int mhz_mfc_config(struct pcmcia_device *link) | |||
490 | { | 447 | { |
491 | struct net_device *dev = link->priv; | 448 | struct net_device *dev = link->priv; |
492 | struct smc_private *smc = netdev_priv(dev); | 449 | struct smc_private *smc = netdev_priv(dev); |
493 | struct smc_cfg_mem *cfg_mem; | ||
494 | win_req_t req; | 450 | win_req_t req; |
495 | memreq_t mem; | 451 | memreq_t mem; |
496 | int i; | 452 | int i; |
497 | 453 | ||
498 | cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); | ||
499 | if (!cfg_mem) | ||
500 | return -ENOMEM; | ||
501 | |||
502 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 454 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
503 | link->conf.Status = CCSR_AUDIO_ENA; | 455 | link->conf.Status = CCSR_AUDIO_ENA; |
504 | link->irq.Attributes = | 456 | link->irq.Attributes = |
505 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; | 457 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
506 | link->io.IOAddrLines = 16; | 458 | link->io.IOAddrLines = 16; |
507 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 459 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
508 | link->io.NumPorts2 = 8; | 460 | link->io.NumPorts2 = 8; |
@@ -510,91 +462,80 @@ static int mhz_mfc_config(struct pcmcia_device *link) | |||
510 | /* The Megahertz combo cards have modem-like CIS entries, so | 462 | /* The Megahertz combo cards have modem-like CIS entries, so |
511 | we have to explicitly try a bunch of port combinations. */ | 463 | we have to explicitly try a bunch of port combinations. */ |
512 | if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) | 464 | if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) |
513 | goto free_cfg_mem; | 465 | return -ENODEV; |
466 | |||
514 | dev->base_addr = link->io.BasePort1; | 467 | dev->base_addr = link->io.BasePort1; |
515 | 468 | ||
516 | /* Allocate a memory window, for accessing the ISR */ | 469 | /* Allocate a memory window, for accessing the ISR */ |
517 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 470 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
518 | req.Base = req.Size = 0; | 471 | req.Base = req.Size = 0; |
519 | req.AccessSpeed = 0; | 472 | req.AccessSpeed = 0; |
520 | i = pcmcia_request_window(&link, &req, &link->win); | 473 | i = pcmcia_request_window(link, &req, &link->win); |
521 | if (i != 0) | 474 | if (i != 0) |
522 | goto free_cfg_mem; | 475 | return -ENODEV; |
476 | |||
523 | smc->base = ioremap(req.Base, req.Size); | 477 | smc->base = ioremap(req.Base, req.Size); |
524 | mem.CardOffset = mem.Page = 0; | 478 | mem.CardOffset = mem.Page = 0; |
525 | if (smc->manfid == MANFID_MOTOROLA) | 479 | if (smc->manfid == MANFID_MOTOROLA) |
526 | mem.CardOffset = link->conf.ConfigBase; | 480 | mem.CardOffset = link->conf.ConfigBase; |
527 | i = pcmcia_map_mem_page(link->win, &mem); | 481 | i = pcmcia_map_mem_page(link, link->win, &mem); |
528 | 482 | ||
529 | if ((i == 0) | 483 | if ((i == 0) |
530 | && (smc->manfid == MANFID_MEGAHERTZ) | 484 | && (smc->manfid == MANFID_MEGAHERTZ) |
531 | && (smc->cardid == PRODID_MEGAHERTZ_EM3288)) | 485 | && (smc->cardid == PRODID_MEGAHERTZ_EM3288)) |
532 | mhz_3288_power(link); | 486 | mhz_3288_power(link); |
533 | 487 | ||
534 | free_cfg_mem: | 488 | return 0; |
535 | kfree(cfg_mem); | ||
536 | return -ENODEV; | ||
537 | } | 489 | } |
538 | 490 | ||
539 | static int mhz_setup(struct pcmcia_device *link) | 491 | static int pcmcia_get_versmac(struct pcmcia_device *p_dev, |
492 | tuple_t *tuple, | ||
493 | void *priv) | ||
540 | { | 494 | { |
541 | struct net_device *dev = link->priv; | 495 | struct net_device *dev = priv; |
542 | struct smc_cfg_mem *cfg_mem; | 496 | cisparse_t parse; |
543 | tuple_t *tuple; | ||
544 | cisparse_t *parse; | ||
545 | u_char *buf, *station_addr; | ||
546 | int rc; | ||
547 | 497 | ||
548 | cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); | 498 | if (pcmcia_parse_tuple(tuple, &parse)) |
549 | if (!cfg_mem) | 499 | return -EINVAL; |
550 | return -1; | ||
551 | 500 | ||
552 | tuple = &cfg_mem->tuple; | 501 | if ((parse.version_1.ns > 3) && |
553 | parse = &cfg_mem->parse; | 502 | (cvt_ascii_address(dev, |
554 | buf = cfg_mem->buf; | 503 | (parse.version_1.str + parse.version_1.ofs[3])))) |
504 | return 0; | ||
555 | 505 | ||
556 | tuple->Attributes = tuple->TupleOffset = 0; | 506 | return -EINVAL; |
557 | tuple->TupleData = (cisdata_t *)buf; | 507 | }; |
558 | tuple->TupleDataMax = 255; | 508 | |
509 | static int mhz_setup(struct pcmcia_device *link) | ||
510 | { | ||
511 | struct net_device *dev = link->priv; | ||
512 | size_t len; | ||
513 | u8 *buf; | ||
514 | int rc; | ||
559 | 515 | ||
560 | /* Read the station address from the CIS. It is stored as the last | 516 | /* Read the station address from the CIS. It is stored as the last |
561 | (fourth) string in the Version 1 Version/ID tuple. */ | 517 | (fourth) string in the Version 1 Version/ID tuple. */ |
562 | tuple->DesiredTuple = CISTPL_VERS_1; | 518 | if ((link->prod_id[3]) && |
563 | if (first_tuple(link, tuple, parse) != 0) { | 519 | (cvt_ascii_address(dev, link->prod_id[3]) == 0)) |
564 | rc = -1; | 520 | return 0; |
565 | goto free_cfg_mem; | 521 | |
566 | } | 522 | /* Workarounds for broken cards start here. */ |
567 | /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ | 523 | /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ |
568 | if (next_tuple(link, tuple, parse) != 0) | 524 | if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev)) |
569 | first_tuple(link, tuple, parse); | 525 | return 0; |
570 | if (parse->version_1.ns > 3) { | ||
571 | station_addr = parse->version_1.str + parse->version_1.ofs[3]; | ||
572 | if (cvt_ascii_address(dev, station_addr) == 0) { | ||
573 | rc = 0; | ||
574 | goto free_cfg_mem; | ||
575 | } | ||
576 | } | ||
577 | 526 | ||
578 | /* Another possibility: for the EM3288, in a special tuple */ | 527 | /* Another possibility: for the EM3288, in a special tuple */ |
579 | tuple->DesiredTuple = 0x81; | ||
580 | if (pcmcia_get_first_tuple(link, tuple) != 0) { | ||
581 | rc = -1; | ||
582 | goto free_cfg_mem; | ||
583 | } | ||
584 | if (pcmcia_get_tuple_data(link, tuple) != 0) { | ||
585 | rc = -1; | ||
586 | goto free_cfg_mem; | ||
587 | } | ||
588 | buf[12] = '\0'; | ||
589 | if (cvt_ascii_address(dev, buf) == 0) { | ||
590 | rc = 0; | ||
591 | goto free_cfg_mem; | ||
592 | } | ||
593 | rc = -1; | 528 | rc = -1; |
594 | free_cfg_mem: | 529 | len = pcmcia_get_tuple(link, 0x81, &buf); |
595 | kfree(cfg_mem); | 530 | if (buf && len >= 13) { |
596 | return rc; | 531 | buf[12] = '\0'; |
597 | } | 532 | if (cvt_ascii_address(dev, buf)) |
533 | rc = 0; | ||
534 | } | ||
535 | kfree(buf); | ||
536 | |||
537 | return rc; | ||
538 | }; | ||
598 | 539 | ||
599 | /*====================================================================== | 540 | /*====================================================================== |
600 | 541 | ||
@@ -684,58 +625,21 @@ static int smc_config(struct pcmcia_device *link) | |||
684 | return i; | 625 | return i; |
685 | } | 626 | } |
686 | 627 | ||
628 | |||
687 | static int smc_setup(struct pcmcia_device *link) | 629 | static int smc_setup(struct pcmcia_device *link) |
688 | { | 630 | { |
689 | struct net_device *dev = link->priv; | 631 | struct net_device *dev = link->priv; |
690 | struct smc_cfg_mem *cfg_mem; | ||
691 | tuple_t *tuple; | ||
692 | cisparse_t *parse; | ||
693 | cistpl_lan_node_id_t *node_id; | ||
694 | u_char *buf, *station_addr; | ||
695 | int i, rc; | ||
696 | |||
697 | cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); | ||
698 | if (!cfg_mem) | ||
699 | return -ENOMEM; | ||
700 | |||
701 | tuple = &cfg_mem->tuple; | ||
702 | parse = &cfg_mem->parse; | ||
703 | buf = cfg_mem->buf; | ||
704 | |||
705 | tuple->Attributes = tuple->TupleOffset = 0; | ||
706 | tuple->TupleData = (cisdata_t *)buf; | ||
707 | tuple->TupleDataMax = 255; | ||
708 | 632 | ||
709 | /* Check for a LAN function extension tuple */ | 633 | /* Check for a LAN function extension tuple */ |
710 | tuple->DesiredTuple = CISTPL_FUNCE; | 634 | if (!pcmcia_get_mac_from_cis(link, dev)) |
711 | i = first_tuple(link, tuple, parse); | 635 | return 0; |
712 | while (i == 0) { | 636 | |
713 | if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID) | ||
714 | break; | ||
715 | i = next_tuple(link, tuple, parse); | ||
716 | } | ||
717 | if (i == 0) { | ||
718 | node_id = (cistpl_lan_node_id_t *)parse->funce.data; | ||
719 | if (node_id->nb == 6) { | ||
720 | for (i = 0; i < 6; i++) | ||
721 | dev->dev_addr[i] = node_id->id[i]; | ||
722 | rc = 0; | ||
723 | goto free_cfg_mem; | ||
724 | } | ||
725 | } | ||
726 | /* Try the third string in the Version 1 Version/ID tuple. */ | 637 | /* Try the third string in the Version 1 Version/ID tuple. */ |
727 | if (link->prod_id[2]) { | 638 | if (link->prod_id[2]) { |
728 | station_addr = link->prod_id[2]; | 639 | if (cvt_ascii_address(dev, link->prod_id[2]) == 0) |
729 | if (cvt_ascii_address(dev, station_addr) == 0) { | 640 | return 0; |
730 | rc = 0; | ||
731 | goto free_cfg_mem; | ||
732 | } | ||
733 | } | 641 | } |
734 | 642 | return -1; | |
735 | rc = -1; | ||
736 | free_cfg_mem: | ||
737 | kfree(cfg_mem); | ||
738 | return rc; | ||
739 | } | 643 | } |
740 | 644 | ||
741 | /*====================================================================*/ | 645 | /*====================================================================*/ |
@@ -749,7 +653,7 @@ static int osi_config(struct pcmcia_device *link) | |||
749 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 653 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
750 | link->conf.Status = CCSR_AUDIO_ENA; | 654 | link->conf.Status = CCSR_AUDIO_ENA; |
751 | link->irq.Attributes = | 655 | link->irq.Attributes = |
752 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; | 656 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
753 | link->io.NumPorts1 = 64; | 657 | link->io.NumPorts1 = 64; |
754 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 658 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
755 | link->io.NumPorts2 = 8; | 659 | link->io.NumPorts2 = 8; |
@@ -794,41 +698,31 @@ static int osi_load_firmware(struct pcmcia_device *link) | |||
794 | return err; | 698 | return err; |
795 | } | 699 | } |
796 | 700 | ||
797 | static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) | 701 | static int pcmcia_osi_mac(struct pcmcia_device *p_dev, |
702 | tuple_t *tuple, | ||
703 | void *priv) | ||
798 | { | 704 | { |
799 | struct net_device *dev = link->priv; | 705 | struct net_device *dev = priv; |
800 | struct smc_cfg_mem *cfg_mem; | 706 | int i; |
801 | tuple_t *tuple; | ||
802 | u_char *buf; | ||
803 | int i, rc; | ||
804 | 707 | ||
805 | cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); | 708 | if (tuple->TupleDataLen < 8) |
806 | if (!cfg_mem) | 709 | return -EINVAL; |
807 | return -1; | 710 | if (tuple->TupleData[0] != 0x04) |
711 | return -EINVAL; | ||
712 | for (i = 0; i < 6; i++) | ||
713 | dev->dev_addr[i] = tuple->TupleData[i+2]; | ||
714 | return 0; | ||
715 | }; | ||
808 | 716 | ||
809 | tuple = &cfg_mem->tuple; | ||
810 | buf = cfg_mem->buf; | ||
811 | 717 | ||
812 | tuple->Attributes = TUPLE_RETURN_COMMON; | 718 | static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) |
813 | tuple->TupleData = (cisdata_t *)buf; | 719 | { |
814 | tuple->TupleDataMax = 255; | 720 | struct net_device *dev = link->priv; |
815 | tuple->TupleOffset = 0; | 721 | int rc; |
816 | 722 | ||
817 | /* Read the station address from tuple 0x90, subtuple 0x04 */ | 723 | /* Read the station address from tuple 0x90, subtuple 0x04 */ |
818 | tuple->DesiredTuple = 0x90; | 724 | if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev)) |
819 | i = pcmcia_get_first_tuple(link, tuple); | 725 | return -1; |
820 | while (i == 0) { | ||
821 | i = pcmcia_get_tuple_data(link, tuple); | ||
822 | if ((i != 0) || (buf[0] == 0x04)) | ||
823 | break; | ||
824 | i = pcmcia_get_next_tuple(link, tuple); | ||
825 | } | ||
826 | if (i != 0) { | ||
827 | rc = -1; | ||
828 | goto free_cfg_mem; | ||
829 | } | ||
830 | for (i = 0; i < 6; i++) | ||
831 | dev->dev_addr[i] = buf[i+2]; | ||
832 | 726 | ||
833 | if (((manfid == MANFID_OSITECH) && | 727 | if (((manfid == MANFID_OSITECH) && |
834 | (cardid == PRODID_OSITECH_SEVEN)) || | 728 | (cardid == PRODID_OSITECH_SEVEN)) || |
@@ -836,20 +730,17 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) | |||
836 | (cardid == PRODID_PSION_NET100))) { | 730 | (cardid == PRODID_PSION_NET100))) { |
837 | rc = osi_load_firmware(link); | 731 | rc = osi_load_firmware(link); |
838 | if (rc) | 732 | if (rc) |
839 | goto free_cfg_mem; | 733 | return rc; |
840 | } else if (manfid == MANFID_OSITECH) { | 734 | } else if (manfid == MANFID_OSITECH) { |
841 | /* Make sure both functions are powered up */ | 735 | /* Make sure both functions are powered up */ |
842 | set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); | 736 | set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); |
843 | /* Now, turn on the interrupt for both card functions */ | 737 | /* Now, turn on the interrupt for both card functions */ |
844 | set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR); | 738 | set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR); |
845 | DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", | 739 | dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", |
846 | inw(link->io.BasePort1 + OSITECH_AUI_PWR), | 740 | inw(link->io.BasePort1 + OSITECH_AUI_PWR), |
847 | inw(link->io.BasePort1 + OSITECH_RESET_ISR)); | 741 | inw(link->io.BasePort1 + OSITECH_RESET_ISR)); |
848 | } | 742 | } |
849 | rc = 0; | 743 | return 0; |
850 | free_cfg_mem: | ||
851 | kfree(cfg_mem); | ||
852 | return rc; | ||
853 | } | 744 | } |
854 | 745 | ||
855 | static int smc91c92_suspend(struct pcmcia_device *link) | 746 | static int smc91c92_suspend(struct pcmcia_device *link) |
@@ -959,12 +850,6 @@ static int check_sig(struct pcmcia_device *link) | |||
959 | 850 | ||
960 | ======================================================================*/ | 851 | ======================================================================*/ |
961 | 852 | ||
962 | #define CS_EXIT_TEST(ret, svc, label) \ | ||
963 | if (ret != 0) { \ | ||
964 | cs_error(link, svc, ret); \ | ||
965 | goto label; \ | ||
966 | } | ||
967 | |||
968 | static int smc91c92_config(struct pcmcia_device *link) | 853 | static int smc91c92_config(struct pcmcia_device *link) |
969 | { | 854 | { |
970 | struct net_device *dev = link->priv; | 855 | struct net_device *dev = link->priv; |
@@ -974,7 +859,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
974 | unsigned int ioaddr; | 859 | unsigned int ioaddr; |
975 | u_long mir; | 860 | u_long mir; |
976 | 861 | ||
977 | DEBUG(0, "smc91c92_config(0x%p)\n", link); | 862 | dev_dbg(&link->dev, "smc91c92_config\n"); |
978 | 863 | ||
979 | smc->manfid = link->manf_id; | 864 | smc->manfid = link->manf_id; |
980 | smc->cardid = link->card_id; | 865 | smc->cardid = link->card_id; |
@@ -990,12 +875,15 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
990 | } else { | 875 | } else { |
991 | i = smc_config(link); | 876 | i = smc_config(link); |
992 | } | 877 | } |
993 | CS_EXIT_TEST(i, RequestIO, config_failed); | 878 | if (i) |
879 | goto config_failed; | ||
994 | 880 | ||
995 | i = pcmcia_request_irq(link, &link->irq); | 881 | i = pcmcia_request_irq(link, &link->irq); |
996 | CS_EXIT_TEST(i, RequestIRQ, config_failed); | 882 | if (i) |
883 | goto config_failed; | ||
997 | i = pcmcia_request_configuration(link, &link->conf); | 884 | i = pcmcia_request_configuration(link, &link->conf); |
998 | CS_EXIT_TEST(i, RequestConfiguration, config_failed); | 885 | if (i) |
886 | goto config_failed; | ||
999 | 887 | ||
1000 | if (smc->manfid == MANFID_MOTOROLA) | 888 | if (smc->manfid == MANFID_MOTOROLA) |
1001 | mot_config(link); | 889 | mot_config(link); |
@@ -1074,7 +962,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
1074 | } | 962 | } |
1075 | 963 | ||
1076 | link->dev_node = &smc->node; | 964 | link->dev_node = &smc->node; |
1077 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 965 | SET_NETDEV_DEV(dev, &link->dev); |
1078 | 966 | ||
1079 | if (register_netdev(dev) != 0) { | 967 | if (register_netdev(dev) != 0) { |
1080 | printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); | 968 | printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); |
@@ -1100,7 +988,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
1100 | 988 | ||
1101 | if (smc->cfg & CFG_MII_SELECT) { | 989 | if (smc->cfg & CFG_MII_SELECT) { |
1102 | if (smc->mii_if.phy_id != -1) { | 990 | if (smc->mii_if.phy_id != -1) { |
1103 | DEBUG(0, " MII transceiver at index %d, status %x.\n", | 991 | dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", |
1104 | smc->mii_if.phy_id, j); | 992 | smc->mii_if.phy_id, j); |
1105 | } else { | 993 | } else { |
1106 | printk(KERN_NOTICE " No MII transceivers found!\n"); | 994 | printk(KERN_NOTICE " No MII transceivers found!\n"); |
@@ -1110,7 +998,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
1110 | 998 | ||
1111 | config_undo: | 999 | config_undo: |
1112 | unregister_netdev(dev); | 1000 | unregister_netdev(dev); |
1113 | config_failed: /* CS_EXIT_TEST() calls jump to here... */ | 1001 | config_failed: |
1114 | smc91c92_release(link); | 1002 | smc91c92_release(link); |
1115 | return -ENODEV; | 1003 | return -ENODEV; |
1116 | } /* smc91c92_config */ | 1004 | } /* smc91c92_config */ |
@@ -1125,7 +1013,7 @@ config_failed: /* CS_EXIT_TEST() calls jump to here... */ | |||
1125 | 1013 | ||
1126 | static void smc91c92_release(struct pcmcia_device *link) | 1014 | static void smc91c92_release(struct pcmcia_device *link) |
1127 | { | 1015 | { |
1128 | DEBUG(0, "smc91c92_release(0x%p)\n", link); | 1016 | dev_dbg(&link->dev, "smc91c92_release\n"); |
1129 | if (link->win) { | 1017 | if (link->win) { |
1130 | struct net_device *dev = link->priv; | 1018 | struct net_device *dev = link->priv; |
1131 | struct smc_private *smc = netdev_priv(dev); | 1019 | struct smc_private *smc = netdev_priv(dev); |
@@ -1222,10 +1110,10 @@ static int smc_open(struct net_device *dev) | |||
1222 | struct smc_private *smc = netdev_priv(dev); | 1110 | struct smc_private *smc = netdev_priv(dev); |
1223 | struct pcmcia_device *link = smc->p_dev; | 1111 | struct pcmcia_device *link = smc->p_dev; |
1224 | 1112 | ||
1225 | #ifdef PCMCIA_DEBUG | 1113 | dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n", |
1226 | DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n", | ||
1227 | dev->name, dev, inw(dev->base_addr + BANK_SELECT)); | 1114 | dev->name, dev, inw(dev->base_addr + BANK_SELECT)); |
1228 | if (pc_debug > 1) smc_dump(dev); | 1115 | #ifdef PCMCIA_DEBUG |
1116 | smc_dump(dev); | ||
1229 | #endif | 1117 | #endif |
1230 | 1118 | ||
1231 | /* Check that the PCMCIA card is still here. */ | 1119 | /* Check that the PCMCIA card is still here. */ |
@@ -1260,7 +1148,7 @@ static int smc_close(struct net_device *dev) | |||
1260 | struct pcmcia_device *link = smc->p_dev; | 1148 | struct pcmcia_device *link = smc->p_dev; |
1261 | unsigned int ioaddr = dev->base_addr; | 1149 | unsigned int ioaddr = dev->base_addr; |
1262 | 1150 | ||
1263 | DEBUG(0, "%s: smc_close(), status %4.4x.\n", | 1151 | dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n", |
1264 | dev->name, inw(ioaddr + BANK_SELECT)); | 1152 | dev->name, inw(ioaddr + BANK_SELECT)); |
1265 | 1153 | ||
1266 | netif_stop_queue(dev); | 1154 | netif_stop_queue(dev); |
@@ -1327,7 +1215,7 @@ static void smc_hardware_send_packet(struct net_device * dev) | |||
1327 | u_char *buf = skb->data; | 1215 | u_char *buf = skb->data; |
1328 | u_int length = skb->len; /* The chip will pad to ethernet min. */ | 1216 | u_int length = skb->len; /* The chip will pad to ethernet min. */ |
1329 | 1217 | ||
1330 | DEBUG(2, "%s: Trying to xmit packet of length %d.\n", | 1218 | pr_debug("%s: Trying to xmit packet of length %d.\n", |
1331 | dev->name, length); | 1219 | dev->name, length); |
1332 | 1220 | ||
1333 | /* send the packet length: +6 for status word, length, and ctl */ | 1221 | /* send the packet length: +6 for status word, length, and ctl */ |
@@ -1382,7 +1270,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb, | |||
1382 | 1270 | ||
1383 | netif_stop_queue(dev); | 1271 | netif_stop_queue(dev); |
1384 | 1272 | ||
1385 | DEBUG(2, "%s: smc_start_xmit(length = %d) called," | 1273 | pr_debug("%s: smc_start_xmit(length = %d) called," |
1386 | " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); | 1274 | " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); |
1387 | 1275 | ||
1388 | if (smc->saved_skb) { | 1276 | if (smc->saved_skb) { |
@@ -1429,7 +1317,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb, | |||
1429 | } | 1317 | } |
1430 | 1318 | ||
1431 | /* Otherwise defer until the Tx-space-allocated interrupt. */ | 1319 | /* Otherwise defer until the Tx-space-allocated interrupt. */ |
1432 | DEBUG(2, "%s: memory allocation deferred.\n", dev->name); | 1320 | pr_debug("%s: memory allocation deferred.\n", dev->name); |
1433 | outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); | 1321 | outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); |
1434 | spin_unlock_irqrestore(&smc->lock, flags); | 1322 | spin_unlock_irqrestore(&smc->lock, flags); |
1435 | 1323 | ||
@@ -1494,7 +1382,7 @@ static void smc_eph_irq(struct net_device *dev) | |||
1494 | 1382 | ||
1495 | SMC_SELECT_BANK(0); | 1383 | SMC_SELECT_BANK(0); |
1496 | ephs = inw(ioaddr + EPH); | 1384 | ephs = inw(ioaddr + EPH); |
1497 | DEBUG(2, "%s: Ethernet protocol handler interrupt, status" | 1385 | pr_debug("%s: Ethernet protocol handler interrupt, status" |
1498 | " %4.4x.\n", dev->name, ephs); | 1386 | " %4.4x.\n", dev->name, ephs); |
1499 | /* Could be a counter roll-over warning: update stats. */ | 1387 | /* Could be a counter roll-over warning: update stats. */ |
1500 | card_stats = inw(ioaddr + COUNTER); | 1388 | card_stats = inw(ioaddr + COUNTER); |
@@ -1534,7 +1422,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1534 | 1422 | ||
1535 | ioaddr = dev->base_addr; | 1423 | ioaddr = dev->base_addr; |
1536 | 1424 | ||
1537 | DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, | 1425 | pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name, |
1538 | irq, ioaddr); | 1426 | irq, ioaddr); |
1539 | 1427 | ||
1540 | spin_lock(&smc->lock); | 1428 | spin_lock(&smc->lock); |
@@ -1543,7 +1431,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1543 | if ((saved_bank & 0xff00) != 0x3300) { | 1431 | if ((saved_bank & 0xff00) != 0x3300) { |
1544 | /* The device does not exist -- the card could be off-line, or | 1432 | /* The device does not exist -- the card could be off-line, or |
1545 | maybe it has been ejected. */ | 1433 | maybe it has been ejected. */ |
1546 | DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent" | 1434 | pr_debug("%s: SMC91c92 interrupt %d for non-existent" |
1547 | "/ejected device.\n", dev->name, irq); | 1435 | "/ejected device.\n", dev->name, irq); |
1548 | handled = 0; | 1436 | handled = 0; |
1549 | goto irq_done; | 1437 | goto irq_done; |
@@ -1557,7 +1445,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1557 | 1445 | ||
1558 | do { /* read the status flag, and mask it */ | 1446 | do { /* read the status flag, and mask it */ |
1559 | status = inw(ioaddr + INTERRUPT) & 0xff; | 1447 | status = inw(ioaddr + INTERRUPT) & 0xff; |
1560 | DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, | 1448 | pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, |
1561 | status, mask); | 1449 | status, mask); |
1562 | if ((status & mask) == 0) { | 1450 | if ((status & mask) == 0) { |
1563 | if (bogus_cnt == INTR_WORK) | 1451 | if (bogus_cnt == INTR_WORK) |
@@ -1602,7 +1490,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1602 | smc_eph_irq(dev); | 1490 | smc_eph_irq(dev); |
1603 | } while (--bogus_cnt); | 1491 | } while (--bogus_cnt); |
1604 | 1492 | ||
1605 | DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x" | 1493 | pr_debug(" Restoring saved registers mask %2.2x bank %4.4x" |
1606 | " pointer %4.4x.\n", mask, saved_bank, saved_pointer); | 1494 | " pointer %4.4x.\n", mask, saved_bank, saved_pointer); |
1607 | 1495 | ||
1608 | /* restore state register */ | 1496 | /* restore state register */ |
@@ -1610,7 +1498,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1610 | outw(saved_pointer, ioaddr + POINTER); | 1498 | outw(saved_pointer, ioaddr + POINTER); |
1611 | SMC_SELECT_BANK(saved_bank); | 1499 | SMC_SELECT_BANK(saved_bank); |
1612 | 1500 | ||
1613 | DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq); | 1501 | pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq); |
1614 | 1502 | ||
1615 | irq_done: | 1503 | irq_done: |
1616 | 1504 | ||
@@ -1661,7 +1549,7 @@ static void smc_rx(struct net_device *dev) | |||
1661 | rx_status = inw(ioaddr + DATA_1); | 1549 | rx_status = inw(ioaddr + DATA_1); |
1662 | packet_length = inw(ioaddr + DATA_1) & 0x07ff; | 1550 | packet_length = inw(ioaddr + DATA_1) & 0x07ff; |
1663 | 1551 | ||
1664 | DEBUG(2, "%s: Receive status %4.4x length %d.\n", | 1552 | pr_debug("%s: Receive status %4.4x length %d.\n", |
1665 | dev->name, rx_status, packet_length); | 1553 | dev->name, rx_status, packet_length); |
1666 | 1554 | ||
1667 | if (!(rx_status & RS_ERRORS)) { | 1555 | if (!(rx_status & RS_ERRORS)) { |
@@ -1672,7 +1560,7 @@ static void smc_rx(struct net_device *dev) | |||
1672 | skb = dev_alloc_skb(packet_length+2); | 1560 | skb = dev_alloc_skb(packet_length+2); |
1673 | 1561 | ||
1674 | if (skb == NULL) { | 1562 | if (skb == NULL) { |
1675 | DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name); | 1563 | pr_debug("%s: Low memory, packet dropped.\n", dev->name); |
1676 | dev->stats.rx_dropped++; | 1564 | dev->stats.rx_dropped++; |
1677 | outw(MC_RELEASE, ioaddr + MMU_CMD); | 1565 | outw(MC_RELEASE, ioaddr + MMU_CMD); |
1678 | return; | 1566 | return; |
@@ -1832,7 +1720,7 @@ static void smc_reset(struct net_device *dev) | |||
1832 | struct smc_private *smc = netdev_priv(dev); | 1720 | struct smc_private *smc = netdev_priv(dev); |
1833 | int i; | 1721 | int i; |
1834 | 1722 | ||
1835 | DEBUG(0, "%s: smc91c92 reset called.\n", dev->name); | 1723 | pr_debug("%s: smc91c92 reset called.\n", dev->name); |
1836 | 1724 | ||
1837 | /* The first interaction must be a write to bring the chip out | 1725 | /* The first interaction must be a write to bring the chip out |
1838 | of sleep mode. */ | 1726 | of sleep mode. */ |
@@ -2149,18 +2037,6 @@ static u32 smc_get_link(struct net_device *dev) | |||
2149 | return ret; | 2037 | return ret; |
2150 | } | 2038 | } |
2151 | 2039 | ||
2152 | #ifdef PCMCIA_DEBUG | ||
2153 | static u32 smc_get_msglevel(struct net_device *dev) | ||
2154 | { | ||
2155 | return pc_debug; | ||
2156 | } | ||
2157 | |||
2158 | static void smc_set_msglevel(struct net_device *dev, u32 val) | ||
2159 | { | ||
2160 | pc_debug = val; | ||
2161 | } | ||
2162 | #endif | ||
2163 | |||
2164 | static int smc_nway_reset(struct net_device *dev) | 2040 | static int smc_nway_reset(struct net_device *dev) |
2165 | { | 2041 | { |
2166 | struct smc_private *smc = netdev_priv(dev); | 2042 | struct smc_private *smc = netdev_priv(dev); |
@@ -2184,10 +2060,6 @@ static const struct ethtool_ops ethtool_ops = { | |||
2184 | .get_settings = smc_get_settings, | 2060 | .get_settings = smc_get_settings, |
2185 | .set_settings = smc_set_settings, | 2061 | .set_settings = smc_set_settings, |
2186 | .get_link = smc_get_link, | 2062 | .get_link = smc_get_link, |
2187 | #ifdef PCMCIA_DEBUG | ||
2188 | .get_msglevel = smc_get_msglevel, | ||
2189 | .set_msglevel = smc_set_msglevel, | ||
2190 | #endif | ||
2191 | .nway_reset = smc_nway_reset, | 2063 | .nway_reset = smc_nway_reset, |
2192 | }; | 2064 | }; |
2193 | 2065 | ||
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index cf8423102538..fe504b7f369f 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -211,20 +211,6 @@ enum xirc_cmd { /* Commands */ | |||
211 | 211 | ||
212 | static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; | 212 | static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; |
213 | 213 | ||
214 | /**************** | ||
215 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
216 | * you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
217 | * left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
218 | * be present but disabled -- but it can then be enabled for specific | ||
219 | * modules at load time with a 'pc_debug=#' option to insmod. | ||
220 | */ | ||
221 | #ifdef PCMCIA_DEBUG | ||
222 | static int pc_debug = PCMCIA_DEBUG; | ||
223 | module_param(pc_debug, int, 0); | ||
224 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args) | ||
225 | #else | ||
226 | #define DEBUG(n, args...) | ||
227 | #endif | ||
228 | 214 | ||
229 | #define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: " | 215 | #define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: " |
230 | #define KERR_XIRC KERN_ERR "xirc2ps_cs: " | 216 | #define KERR_XIRC KERN_ERR "xirc2ps_cs: " |
@@ -359,7 +345,7 @@ static void xirc_tx_timeout(struct net_device *dev); | |||
359 | static void xirc2ps_tx_timeout_task(struct work_struct *work); | 345 | static void xirc2ps_tx_timeout_task(struct work_struct *work); |
360 | static void set_addresses(struct net_device *dev); | 346 | static void set_addresses(struct net_device *dev); |
361 | static void set_multicast_list(struct net_device *dev); | 347 | static void set_multicast_list(struct net_device *dev); |
362 | static int set_card_type(struct pcmcia_device *link, const void *s); | 348 | static int set_card_type(struct pcmcia_device *link); |
363 | static int do_config(struct net_device *dev, struct ifmap *map); | 349 | static int do_config(struct net_device *dev, struct ifmap *map); |
364 | static int do_open(struct net_device *dev); | 350 | static int do_open(struct net_device *dev); |
365 | static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 351 | static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
@@ -371,28 +357,6 @@ static void do_powerdown(struct net_device *dev); | |||
371 | static int do_stop(struct net_device *dev); | 357 | static int do_stop(struct net_device *dev); |
372 | 358 | ||
373 | /*=============== Helper functions =========================*/ | 359 | /*=============== Helper functions =========================*/ |
374 | static int | ||
375 | first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse) | ||
376 | { | ||
377 | int err; | ||
378 | |||
379 | if ((err = pcmcia_get_first_tuple(handle, tuple)) == 0 && | ||
380 | (err = pcmcia_get_tuple_data(handle, tuple)) == 0) | ||
381 | err = pcmcia_parse_tuple(tuple, parse); | ||
382 | return err; | ||
383 | } | ||
384 | |||
385 | static int | ||
386 | next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse) | ||
387 | { | ||
388 | int err; | ||
389 | |||
390 | if ((err = pcmcia_get_next_tuple(handle, tuple)) == 0 && | ||
391 | (err = pcmcia_get_tuple_data(handle, tuple)) == 0) | ||
392 | err = pcmcia_parse_tuple(tuple, parse); | ||
393 | return err; | ||
394 | } | ||
395 | |||
396 | #define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) | 360 | #define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) |
397 | #define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) | 361 | #define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) |
398 | #define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) | 362 | #define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) |
@@ -400,7 +364,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse) | |||
400 | #define PutWord(reg,value) outw((value), ioaddr+(reg)) | 364 | #define PutWord(reg,value) outw((value), ioaddr+(reg)) |
401 | 365 | ||
402 | /*====== Functions used for debugging =================================*/ | 366 | /*====== Functions used for debugging =================================*/ |
403 | #if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */ | 367 | #if 0 /* reading regs may change system status */ |
404 | static void | 368 | static void |
405 | PrintRegisters(struct net_device *dev) | 369 | PrintRegisters(struct net_device *dev) |
406 | { | 370 | { |
@@ -432,7 +396,7 @@ PrintRegisters(struct net_device *dev) | |||
432 | } | 396 | } |
433 | } | 397 | } |
434 | } | 398 | } |
435 | #endif /* PCMCIA_DEBUG */ | 399 | #endif /* 0 */ |
436 | 400 | ||
437 | /*============== MII Management functions ===============*/ | 401 | /*============== MII Management functions ===============*/ |
438 | 402 | ||
@@ -576,7 +540,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
576 | struct net_device *dev; | 540 | struct net_device *dev; |
577 | local_info_t *local; | 541 | local_info_t *local; |
578 | 542 | ||
579 | DEBUG(0, "attach()\n"); | 543 | dev_dbg(&link->dev, "attach()\n"); |
580 | 544 | ||
581 | /* Allocate the device structure */ | 545 | /* Allocate the device structure */ |
582 | dev = alloc_etherdev(sizeof(local_info_t)); | 546 | dev = alloc_etherdev(sizeof(local_info_t)); |
@@ -592,7 +556,6 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
592 | link->conf.IntType = INT_MEMORY_AND_IO; | 556 | link->conf.IntType = INT_MEMORY_AND_IO; |
593 | link->conf.ConfigIndex = 1; | 557 | link->conf.ConfigIndex = 1; |
594 | link->irq.Handler = xirc2ps_interrupt; | 558 | link->irq.Handler = xirc2ps_interrupt; |
595 | link->irq.Instance = dev; | ||
596 | 559 | ||
597 | /* Fill in card specific entries */ | 560 | /* Fill in card specific entries */ |
598 | dev->netdev_ops = &netdev_ops; | 561 | dev->netdev_ops = &netdev_ops; |
@@ -615,7 +578,7 @@ xirc2ps_detach(struct pcmcia_device *link) | |||
615 | { | 578 | { |
616 | struct net_device *dev = link->priv; | 579 | struct net_device *dev = link->priv; |
617 | 580 | ||
618 | DEBUG(0, "detach(0x%p)\n", link); | 581 | dev_dbg(&link->dev, "detach\n"); |
619 | 582 | ||
620 | if (link->dev_node) | 583 | if (link->dev_node) |
621 | unregister_netdev(dev); | 584 | unregister_netdev(dev); |
@@ -644,17 +607,25 @@ xirc2ps_detach(struct pcmcia_device *link) | |||
644 | * | 607 | * |
645 | */ | 608 | */ |
646 | static int | 609 | static int |
647 | set_card_type(struct pcmcia_device *link, const void *s) | 610 | set_card_type(struct pcmcia_device *link) |
648 | { | 611 | { |
649 | struct net_device *dev = link->priv; | 612 | struct net_device *dev = link->priv; |
650 | local_info_t *local = netdev_priv(dev); | 613 | local_info_t *local = netdev_priv(dev); |
651 | #ifdef PCMCIA_DEBUG | 614 | u8 *buf; |
652 | unsigned cisrev = ((const unsigned char *)s)[2]; | 615 | unsigned int cisrev, mediaid, prodid; |
653 | #endif | 616 | size_t len; |
654 | unsigned mediaid= ((const unsigned char *)s)[3]; | 617 | |
655 | unsigned prodid = ((const unsigned char *)s)[4]; | 618 | len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf); |
619 | if (len < 5) { | ||
620 | dev_err(&link->dev, "invalid CIS -- sorry\n"); | ||
621 | return 0; | ||
622 | } | ||
656 | 623 | ||
657 | DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n", | 624 | cisrev = buf[2]; |
625 | mediaid = buf[3]; | ||
626 | prodid = buf[4]; | ||
627 | |||
628 | dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n", | ||
658 | cisrev, mediaid, prodid); | 629 | cisrev, mediaid, prodid); |
659 | 630 | ||
660 | local->mohawk = 0; | 631 | local->mohawk = 0; |
@@ -761,6 +732,26 @@ xirc2ps_config_check(struct pcmcia_device *p_dev, | |||
761 | 732 | ||
762 | } | 733 | } |
763 | 734 | ||
735 | |||
736 | static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, | ||
737 | tuple_t *tuple, | ||
738 | void *priv) | ||
739 | { | ||
740 | struct net_device *dev = priv; | ||
741 | int i; | ||
742 | |||
743 | if (tuple->TupleDataLen != 13) | ||
744 | return -EINVAL; | ||
745 | if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) || | ||
746 | (tuple->TupleData[2] != 6)) | ||
747 | return -EINVAL; | ||
748 | /* another try (James Lehmer's CE2 version 4.1)*/ | ||
749 | for (i = 2; i < 6; i++) | ||
750 | dev->dev_addr[i] = tuple->TupleData[i+2]; | ||
751 | return 0; | ||
752 | }; | ||
753 | |||
754 | |||
764 | /**************** | 755 | /**************** |
765 | * xirc2ps_config() is scheduled to run after a CARD_INSERTION event | 756 | * xirc2ps_config() is scheduled to run after a CARD_INSERTION event |
766 | * is received, to configure the PCMCIA socket, and to make the | 757 | * is received, to configure the PCMCIA socket, and to make the |
@@ -772,33 +763,21 @@ xirc2ps_config(struct pcmcia_device * link) | |||
772 | struct net_device *dev = link->priv; | 763 | struct net_device *dev = link->priv; |
773 | local_info_t *local = netdev_priv(dev); | 764 | local_info_t *local = netdev_priv(dev); |
774 | unsigned int ioaddr; | 765 | unsigned int ioaddr; |
775 | tuple_t tuple; | 766 | int err; |
776 | cisparse_t parse; | 767 | u8 *buf; |
777 | int err, i; | 768 | size_t len; |
778 | u_char buf[64]; | ||
779 | cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; | ||
780 | 769 | ||
781 | local->dingo_ccr = NULL; | 770 | local->dingo_ccr = NULL; |
782 | 771 | ||
783 | DEBUG(0, "config(0x%p)\n", link); | 772 | dev_dbg(&link->dev, "config\n"); |
784 | |||
785 | /* | ||
786 | * This reads the card's CONFIG tuple to find its configuration | ||
787 | * registers. | ||
788 | */ | ||
789 | tuple.Attributes = 0; | ||
790 | tuple.TupleData = buf; | ||
791 | tuple.TupleDataMax = 64; | ||
792 | tuple.TupleOffset = 0; | ||
793 | 773 | ||
794 | /* Is this a valid card */ | 774 | /* Is this a valid card */ |
795 | tuple.DesiredTuple = CISTPL_MANFID; | 775 | if (link->has_manf_id == 0) { |
796 | if ((err=first_tuple(link, &tuple, &parse))) { | ||
797 | printk(KNOT_XIRC "manfid not found in CIS\n"); | 776 | printk(KNOT_XIRC "manfid not found in CIS\n"); |
798 | goto failure; | 777 | goto failure; |
799 | } | 778 | } |
800 | 779 | ||
801 | switch(parse.manfid.manf) { | 780 | switch (link->manf_id) { |
802 | case MANFID_XIRCOM: | 781 | case MANFID_XIRCOM: |
803 | local->manf_str = "Xircom"; | 782 | local->manf_str = "Xircom"; |
804 | break; | 783 | break; |
@@ -817,65 +796,44 @@ xirc2ps_config(struct pcmcia_device * link) | |||
817 | break; | 796 | break; |
818 | default: | 797 | default: |
819 | printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", | 798 | printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", |
820 | (unsigned)parse.manfid.manf); | 799 | (unsigned)link->manf_id); |
821 | goto failure; | 800 | goto failure; |
822 | } | 801 | } |
823 | DEBUG(0, "found %s card\n", local->manf_str); | 802 | dev_dbg(&link->dev, "found %s card\n", local->manf_str); |
824 | 803 | ||
825 | if (!set_card_type(link, buf)) { | 804 | if (!set_card_type(link)) { |
826 | printk(KNOT_XIRC "this card is not supported\n"); | 805 | printk(KNOT_XIRC "this card is not supported\n"); |
827 | goto failure; | 806 | goto failure; |
828 | } | 807 | } |
829 | 808 | ||
830 | /* get the ethernet address from the CIS */ | 809 | /* get the ethernet address from the CIS */ |
831 | tuple.DesiredTuple = CISTPL_FUNCE; | 810 | err = pcmcia_get_mac_from_cis(link, dev); |
832 | for (err = first_tuple(link, &tuple, &parse); !err; | 811 | |
833 | err = next_tuple(link, &tuple, &parse)) { | 812 | /* not found: try to get the node-id from tuple 0x89 */ |
834 | /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries: | 813 | if (err) { |
835 | * the first one with a length of zero the second correct - | 814 | len = pcmcia_get_tuple(link, 0x89, &buf); |
836 | * so I skip all entries with length 0 */ | 815 | /* data layout looks like tuple 0x22 */ |
837 | if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID | 816 | if (buf && len == 8) { |
838 | && ((cistpl_lan_node_id_t *)parse.funce.data)->nb) | 817 | if (*buf == CISTPL_FUNCE_LAN_NODE_ID) { |
839 | break; | 818 | int i; |
840 | } | 819 | for (i = 2; i < 6; i++) |
841 | if (err) { /* not found: try to get the node-id from tuple 0x89 */ | 820 | dev->dev_addr[i] = buf[i+2]; |
842 | tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */ | 821 | } else |
843 | if ((err = pcmcia_get_first_tuple(link, &tuple)) == 0 && | 822 | err = -1; |
844 | (err = pcmcia_get_tuple_data(link, &tuple)) == 0) { | ||
845 | if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID) | ||
846 | memcpy(&parse, buf, 8); | ||
847 | else | ||
848 | err = -1; | ||
849 | } | ||
850 | } | ||
851 | if (err) { /* another try (James Lehmer's CE2 version 4.1)*/ | ||
852 | tuple.DesiredTuple = CISTPL_FUNCE; | ||
853 | for (err = first_tuple(link, &tuple, &parse); !err; | ||
854 | err = next_tuple(link, &tuple, &parse)) { | ||
855 | if (parse.funce.type == 0x02 && parse.funce.data[0] == 1 | ||
856 | && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) { | ||
857 | buf[1] = 4; | ||
858 | memcpy(&parse, buf+1, 8); | ||
859 | break; | ||
860 | } | 823 | } |
861 | } | 824 | kfree(buf); |
862 | } | 825 | } |
826 | |||
827 | if (err) | ||
828 | err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); | ||
829 | |||
863 | if (err) { | 830 | if (err) { |
864 | printk(KNOT_XIRC "node-id not found in CIS\n"); | 831 | printk(KNOT_XIRC "node-id not found in CIS\n"); |
865 | goto failure; | 832 | goto failure; |
866 | } | 833 | } |
867 | node_id = (cistpl_lan_node_id_t *)parse.funce.data; | ||
868 | if (node_id->nb != 6) { | ||
869 | printk(KNOT_XIRC "malformed node-id in CIS\n"); | ||
870 | goto failure; | ||
871 | } | ||
872 | for (i=0; i < 6; i++) | ||
873 | dev->dev_addr[i] = node_id->id[i]; | ||
874 | 834 | ||
875 | link->io.IOAddrLines =10; | 835 | link->io.IOAddrLines =10; |
876 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 836 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
877 | link->irq.Attributes = IRQ_HANDLE_PRESENT; | ||
878 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
879 | if (local->modem) { | 837 | if (local->modem) { |
880 | int pass; | 838 | int pass; |
881 | 839 | ||
@@ -916,10 +874,8 @@ xirc2ps_config(struct pcmcia_device * link) | |||
916 | goto port_found; | 874 | goto port_found; |
917 | } | 875 | } |
918 | link->io.BasePort1 = 0; /* let CS decide */ | 876 | link->io.BasePort1 = 0; /* let CS decide */ |
919 | if ((err=pcmcia_request_io(link, &link->io))) { | 877 | if ((err=pcmcia_request_io(link, &link->io))) |
920 | cs_error(link, RequestIO, err); | ||
921 | goto config_error; | 878 | goto config_error; |
922 | } | ||
923 | } | 879 | } |
924 | port_found: | 880 | port_found: |
925 | if (err) | 881 | if (err) |
@@ -929,19 +885,15 @@ xirc2ps_config(struct pcmcia_device * link) | |||
929 | * Now allocate an interrupt line. Note that this does not | 885 | * Now allocate an interrupt line. Note that this does not |
930 | * actually assign a handler to the interrupt. | 886 | * actually assign a handler to the interrupt. |
931 | */ | 887 | */ |
932 | if ((err=pcmcia_request_irq(link, &link->irq))) { | 888 | if ((err=pcmcia_request_irq(link, &link->irq))) |
933 | cs_error(link, RequestIRQ, err); | ||
934 | goto config_error; | 889 | goto config_error; |
935 | } | ||
936 | 890 | ||
937 | /**************** | 891 | /**************** |
938 | * This actually configures the PCMCIA socket -- setting up | 892 | * This actually configures the PCMCIA socket -- setting up |
939 | * the I/O windows and the interrupt mapping. | 893 | * the I/O windows and the interrupt mapping. |
940 | */ | 894 | */ |
941 | if ((err=pcmcia_request_configuration(link, &link->conf))) { | 895 | if ((err=pcmcia_request_configuration(link, &link->conf))) |
942 | cs_error(link, RequestConfiguration, err); | ||
943 | goto config_error; | 896 | goto config_error; |
944 | } | ||
945 | 897 | ||
946 | if (local->dingo) { | 898 | if (local->dingo) { |
947 | conf_reg_t reg; | 899 | conf_reg_t reg; |
@@ -956,17 +908,13 @@ xirc2ps_config(struct pcmcia_device * link) | |||
956 | reg.Action = CS_WRITE; | 908 | reg.Action = CS_WRITE; |
957 | reg.Offset = CISREG_IOBASE_0; | 909 | reg.Offset = CISREG_IOBASE_0; |
958 | reg.Value = link->io.BasePort2 & 0xff; | 910 | reg.Value = link->io.BasePort2 & 0xff; |
959 | if ((err = pcmcia_access_configuration_register(link, ®))) { | 911 | if ((err = pcmcia_access_configuration_register(link, ®))) |
960 | cs_error(link, AccessConfigurationRegister, err); | ||
961 | goto config_error; | 912 | goto config_error; |
962 | } | ||
963 | reg.Action = CS_WRITE; | 913 | reg.Action = CS_WRITE; |
964 | reg.Offset = CISREG_IOBASE_1; | 914 | reg.Offset = CISREG_IOBASE_1; |
965 | reg.Value = (link->io.BasePort2 >> 8) & 0xff; | 915 | reg.Value = (link->io.BasePort2 >> 8) & 0xff; |
966 | if ((err = pcmcia_access_configuration_register(link, ®))) { | 916 | if ((err = pcmcia_access_configuration_register(link, ®))) |
967 | cs_error(link, AccessConfigurationRegister, err); | ||
968 | goto config_error; | 917 | goto config_error; |
969 | } | ||
970 | 918 | ||
971 | /* There is no config entry for the Ethernet part which | 919 | /* There is no config entry for the Ethernet part which |
972 | * is at 0x0800. So we allocate a window into the attribute | 920 | * is at 0x0800. So we allocate a window into the attribute |
@@ -975,17 +923,14 @@ xirc2ps_config(struct pcmcia_device * link) | |||
975 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 923 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
976 | req.Base = req.Size = 0; | 924 | req.Base = req.Size = 0; |
977 | req.AccessSpeed = 0; | 925 | req.AccessSpeed = 0; |
978 | if ((err = pcmcia_request_window(&link, &req, &link->win))) { | 926 | if ((err = pcmcia_request_window(link, &req, &link->win))) |
979 | cs_error(link, RequestWindow, err); | ||
980 | goto config_error; | 927 | goto config_error; |
981 | } | 928 | |
982 | local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; | 929 | local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; |
983 | mem.CardOffset = 0x0; | 930 | mem.CardOffset = 0x0; |
984 | mem.Page = 0; | 931 | mem.Page = 0; |
985 | if ((err = pcmcia_map_mem_page(link->win, &mem))) { | 932 | if ((err = pcmcia_map_mem_page(link, link->win, &mem))) |
986 | cs_error(link, MapMemPage, err); | ||
987 | goto config_error; | 933 | goto config_error; |
988 | } | ||
989 | 934 | ||
990 | /* Setup the CCRs; there are no infos in the CIS about the Ethernet | 935 | /* Setup the CCRs; there are no infos in the CIS about the Ethernet |
991 | * part. | 936 | * part. |
@@ -1044,7 +989,7 @@ xirc2ps_config(struct pcmcia_device * link) | |||
1044 | do_reset(dev, 1); /* a kludge to make the cem56 work */ | 989 | do_reset(dev, 1); /* a kludge to make the cem56 work */ |
1045 | 990 | ||
1046 | link->dev_node = &local->node; | 991 | link->dev_node = &local->node; |
1047 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 992 | SET_NETDEV_DEV(dev, &link->dev); |
1048 | 993 | ||
1049 | if ((err=register_netdev(dev))) { | 994 | if ((err=register_netdev(dev))) { |
1050 | printk(KNOT_XIRC "register_netdev() failed\n"); | 995 | printk(KNOT_XIRC "register_netdev() failed\n"); |
@@ -1077,7 +1022,7 @@ xirc2ps_config(struct pcmcia_device * link) | |||
1077 | static void | 1022 | static void |
1078 | xirc2ps_release(struct pcmcia_device *link) | 1023 | xirc2ps_release(struct pcmcia_device *link) |
1079 | { | 1024 | { |
1080 | DEBUG(0, "release(0x%p)\n", link); | 1025 | dev_dbg(&link->dev, "release\n"); |
1081 | 1026 | ||
1082 | if (link->win) { | 1027 | if (link->win) { |
1083 | struct net_device *dev = link->priv; | 1028 | struct net_device *dev = link->priv; |
@@ -1144,7 +1089,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1144 | PutByte(XIRCREG_CR, 0); | 1089 | PutByte(XIRCREG_CR, 0); |
1145 | } | 1090 | } |
1146 | 1091 | ||
1147 | DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); | 1092 | pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); |
1148 | 1093 | ||
1149 | saved_page = GetByte(XIRCREG_PR); | 1094 | saved_page = GetByte(XIRCREG_PR); |
1150 | /* Read the ISR to see whats the cause for the interrupt. | 1095 | /* Read the ISR to see whats the cause for the interrupt. |
@@ -1154,7 +1099,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1154 | bytes_rcvd = 0; | 1099 | bytes_rcvd = 0; |
1155 | loop_entry: | 1100 | loop_entry: |
1156 | if (int_status == 0xff) { /* card may be ejected */ | 1101 | if (int_status == 0xff) { /* card may be ejected */ |
1157 | DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq); | 1102 | pr_debug("%s: interrupt %d for dead card\n", dev->name, irq); |
1158 | goto leave; | 1103 | goto leave; |
1159 | } | 1104 | } |
1160 | eth_status = GetByte(XIRCREG_ESR); | 1105 | eth_status = GetByte(XIRCREG_ESR); |
@@ -1167,7 +1112,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1167 | PutByte(XIRCREG40_TXST0, 0); | 1112 | PutByte(XIRCREG40_TXST0, 0); |
1168 | PutByte(XIRCREG40_TXST1, 0); | 1113 | PutByte(XIRCREG40_TXST1, 0); |
1169 | 1114 | ||
1170 | DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", | 1115 | pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", |
1171 | dev->name, int_status, eth_status, rx_status, tx_status); | 1116 | dev->name, int_status, eth_status, rx_status, tx_status); |
1172 | 1117 | ||
1173 | /***** receive section ******/ | 1118 | /***** receive section ******/ |
@@ -1178,14 +1123,14 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1178 | /* too many bytes received during this int, drop the rest of the | 1123 | /* too many bytes received during this int, drop the rest of the |
1179 | * packets */ | 1124 | * packets */ |
1180 | dev->stats.rx_dropped++; | 1125 | dev->stats.rx_dropped++; |
1181 | DEBUG(2, "%s: RX drop, too much done\n", dev->name); | 1126 | pr_debug("%s: RX drop, too much done\n", dev->name); |
1182 | } else if (rsr & PktRxOk) { | 1127 | } else if (rsr & PktRxOk) { |
1183 | struct sk_buff *skb; | 1128 | struct sk_buff *skb; |
1184 | 1129 | ||
1185 | pktlen = GetWord(XIRCREG0_RBC); | 1130 | pktlen = GetWord(XIRCREG0_RBC); |
1186 | bytes_rcvd += pktlen; | 1131 | bytes_rcvd += pktlen; |
1187 | 1132 | ||
1188 | DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen); | 1133 | pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen); |
1189 | 1134 | ||
1190 | skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ | 1135 | skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ |
1191 | if (!skb) { | 1136 | if (!skb) { |
@@ -1253,19 +1198,19 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1253 | dev->stats.multicast++; | 1198 | dev->stats.multicast++; |
1254 | } | 1199 | } |
1255 | } else { /* bad packet */ | 1200 | } else { /* bad packet */ |
1256 | DEBUG(5, "rsr=%#02x\n", rsr); | 1201 | pr_debug("rsr=%#02x\n", rsr); |
1257 | } | 1202 | } |
1258 | if (rsr & PktTooLong) { | 1203 | if (rsr & PktTooLong) { |
1259 | dev->stats.rx_frame_errors++; | 1204 | dev->stats.rx_frame_errors++; |
1260 | DEBUG(3, "%s: Packet too long\n", dev->name); | 1205 | pr_debug("%s: Packet too long\n", dev->name); |
1261 | } | 1206 | } |
1262 | if (rsr & CRCErr) { | 1207 | if (rsr & CRCErr) { |
1263 | dev->stats.rx_crc_errors++; | 1208 | dev->stats.rx_crc_errors++; |
1264 | DEBUG(3, "%s: CRC error\n", dev->name); | 1209 | pr_debug("%s: CRC error\n", dev->name); |
1265 | } | 1210 | } |
1266 | if (rsr & AlignErr) { | 1211 | if (rsr & AlignErr) { |
1267 | dev->stats.rx_fifo_errors++; /* okay ? */ | 1212 | dev->stats.rx_fifo_errors++; /* okay ? */ |
1268 | DEBUG(3, "%s: Alignment error\n", dev->name); | 1213 | pr_debug("%s: Alignment error\n", dev->name); |
1269 | } | 1214 | } |
1270 | 1215 | ||
1271 | /* clear the received/dropped/error packet */ | 1216 | /* clear the received/dropped/error packet */ |
@@ -1277,7 +1222,7 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1277 | if (rx_status & 0x10) { /* Receive overrun */ | 1222 | if (rx_status & 0x10) { /* Receive overrun */ |
1278 | dev->stats.rx_over_errors++; | 1223 | dev->stats.rx_over_errors++; |
1279 | PutByte(XIRCREG_CR, ClearRxOvrun); | 1224 | PutByte(XIRCREG_CR, ClearRxOvrun); |
1280 | DEBUG(3, "receive overrun cleared\n"); | 1225 | pr_debug("receive overrun cleared\n"); |
1281 | } | 1226 | } |
1282 | 1227 | ||
1283 | /***** transmit section ******/ | 1228 | /***** transmit section ******/ |
@@ -1290,13 +1235,13 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1290 | if (nn < n) /* rollover */ | 1235 | if (nn < n) /* rollover */ |
1291 | dev->stats.tx_packets += 256 - n; | 1236 | dev->stats.tx_packets += 256 - n; |
1292 | else if (n == nn) { /* happens sometimes - don't know why */ | 1237 | else if (n == nn) { /* happens sometimes - don't know why */ |
1293 | DEBUG(0, "PTR not changed?\n"); | 1238 | pr_debug("PTR not changed?\n"); |
1294 | } else | 1239 | } else |
1295 | dev->stats.tx_packets += lp->last_ptr_value - n; | 1240 | dev->stats.tx_packets += lp->last_ptr_value - n; |
1296 | netif_wake_queue(dev); | 1241 | netif_wake_queue(dev); |
1297 | } | 1242 | } |
1298 | if (tx_status & 0x0002) { /* Execessive collissions */ | 1243 | if (tx_status & 0x0002) { /* Execessive collissions */ |
1299 | DEBUG(0, "tx restarted due to execssive collissions\n"); | 1244 | pr_debug("tx restarted due to execssive collissions\n"); |
1300 | PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ | 1245 | PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ |
1301 | } | 1246 | } |
1302 | if (tx_status & 0x0040) | 1247 | if (tx_status & 0x0040) |
@@ -1315,14 +1260,14 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1315 | maxrx_bytes = 2000; | 1260 | maxrx_bytes = 2000; |
1316 | else if (maxrx_bytes > 22000) | 1261 | else if (maxrx_bytes > 22000) |
1317 | maxrx_bytes = 22000; | 1262 | maxrx_bytes = 22000; |
1318 | DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n", | 1263 | pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n", |
1319 | maxrx_bytes, bytes_rcvd, duration); | 1264 | maxrx_bytes, bytes_rcvd, duration); |
1320 | } else if (!duration && maxrx_bytes < 22000) { | 1265 | } else if (!duration && maxrx_bytes < 22000) { |
1321 | /* now much faster */ | 1266 | /* now much faster */ |
1322 | maxrx_bytes += 2000; | 1267 | maxrx_bytes += 2000; |
1323 | if (maxrx_bytes > 22000) | 1268 | if (maxrx_bytes > 22000) |
1324 | maxrx_bytes = 22000; | 1269 | maxrx_bytes = 22000; |
1325 | DEBUG(1, "set maxrx=%u\n", maxrx_bytes); | 1270 | pr_debug("set maxrx=%u\n", maxrx_bytes); |
1326 | } | 1271 | } |
1327 | } | 1272 | } |
1328 | 1273 | ||
@@ -1372,7 +1317,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1372 | unsigned freespace; | 1317 | unsigned freespace; |
1373 | unsigned pktlen = skb->len; | 1318 | unsigned pktlen = skb->len; |
1374 | 1319 | ||
1375 | DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n", | 1320 | pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n", |
1376 | skb, dev, pktlen); | 1321 | skb, dev, pktlen); |
1377 | 1322 | ||
1378 | 1323 | ||
@@ -1398,7 +1343,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1398 | freespace &= 0x7fff; | 1343 | freespace &= 0x7fff; |
1399 | /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ | 1344 | /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ |
1400 | okay = pktlen +2 < freespace; | 1345 | okay = pktlen +2 < freespace; |
1401 | DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n", | 1346 | pr_debug("%s: avail. tx space=%u%s\n", |
1402 | dev->name, freespace, okay ? " (okay)":" (not enough)"); | 1347 | dev->name, freespace, okay ? " (okay)":" (not enough)"); |
1403 | if (!okay) { /* not enough space */ | 1348 | if (!okay) { /* not enough space */ |
1404 | return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ | 1349 | return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ |
@@ -1500,7 +1445,7 @@ do_config(struct net_device *dev, struct ifmap *map) | |||
1500 | { | 1445 | { |
1501 | local_info_t *local = netdev_priv(dev); | 1446 | local_info_t *local = netdev_priv(dev); |
1502 | 1447 | ||
1503 | DEBUG(0, "do_config(%p)\n", dev); | 1448 | pr_debug("do_config(%p)\n", dev); |
1504 | if (map->port != 255 && map->port != dev->if_port) { | 1449 | if (map->port != 255 && map->port != dev->if_port) { |
1505 | if (map->port > 4) | 1450 | if (map->port > 4) |
1506 | return -EINVAL; | 1451 | return -EINVAL; |
@@ -1527,7 +1472,7 @@ do_open(struct net_device *dev) | |||
1527 | local_info_t *lp = netdev_priv(dev); | 1472 | local_info_t *lp = netdev_priv(dev); |
1528 | struct pcmcia_device *link = lp->p_dev; | 1473 | struct pcmcia_device *link = lp->p_dev; |
1529 | 1474 | ||
1530 | DEBUG(0, "do_open(%p)\n", dev); | 1475 | dev_dbg(&link->dev, "do_open(%p)\n", dev); |
1531 | 1476 | ||
1532 | /* Check that the PCMCIA card is still here. */ | 1477 | /* Check that the PCMCIA card is still here. */ |
1533 | /* Physical device present signature. */ | 1478 | /* Physical device present signature. */ |
@@ -1561,7 +1506,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1561 | unsigned int ioaddr = dev->base_addr; | 1506 | unsigned int ioaddr = dev->base_addr; |
1562 | struct mii_ioctl_data *data = if_mii(rq); | 1507 | struct mii_ioctl_data *data = if_mii(rq); |
1563 | 1508 | ||
1564 | DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", | 1509 | pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", |
1565 | dev->name, rq->ifr_ifrn.ifrn_name, cmd, | 1510 | dev->name, rq->ifr_ifrn.ifrn_name, cmd, |
1566 | data->phy_id, data->reg_num, data->val_in, data->val_out); | 1511 | data->phy_id, data->reg_num, data->val_in, data->val_out); |
1567 | 1512 | ||
@@ -1610,7 +1555,7 @@ do_reset(struct net_device *dev, int full) | |||
1610 | unsigned int ioaddr = dev->base_addr; | 1555 | unsigned int ioaddr = dev->base_addr; |
1611 | unsigned value; | 1556 | unsigned value; |
1612 | 1557 | ||
1613 | DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); | 1558 | pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); |
1614 | 1559 | ||
1615 | hardreset(dev); | 1560 | hardreset(dev); |
1616 | PutByte(XIRCREG_CR, SoftReset); /* set */ | 1561 | PutByte(XIRCREG_CR, SoftReset); /* set */ |
@@ -1648,8 +1593,8 @@ do_reset(struct net_device *dev, int full) | |||
1648 | } | 1593 | } |
1649 | msleep(40); /* wait 40 msec to let it complete */ | 1594 | msleep(40); /* wait 40 msec to let it complete */ |
1650 | 1595 | ||
1651 | #ifdef PCMCIA_DEBUG | 1596 | #if 0 |
1652 | if (pc_debug) { | 1597 | { |
1653 | SelectPage(0); | 1598 | SelectPage(0); |
1654 | value = GetByte(XIRCREG_ESR); /* read the ESR */ | 1599 | value = GetByte(XIRCREG_ESR); /* read the ESR */ |
1655 | printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); | 1600 | printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); |
@@ -1666,7 +1611,7 @@ do_reset(struct net_device *dev, int full) | |||
1666 | value |= DisableLinkPulse; | 1611 | value |= DisableLinkPulse; |
1667 | PutByte(XIRCREG1_ECR, value); | 1612 | PutByte(XIRCREG1_ECR, value); |
1668 | #endif | 1613 | #endif |
1669 | DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value); | 1614 | pr_debug("%s: ECR is: %#02x\n", dev->name, value); |
1670 | 1615 | ||
1671 | SelectPage(0x42); | 1616 | SelectPage(0x42); |
1672 | PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ | 1617 | PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ |
@@ -1844,7 +1789,7 @@ do_powerdown(struct net_device *dev) | |||
1844 | 1789 | ||
1845 | unsigned int ioaddr = dev->base_addr; | 1790 | unsigned int ioaddr = dev->base_addr; |
1846 | 1791 | ||
1847 | DEBUG(0, "do_powerdown(%p)\n", dev); | 1792 | pr_debug("do_powerdown(%p)\n", dev); |
1848 | 1793 | ||
1849 | SelectPage(4); | 1794 | SelectPage(4); |
1850 | PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ | 1795 | PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ |
@@ -1858,7 +1803,7 @@ do_stop(struct net_device *dev) | |||
1858 | local_info_t *lp = netdev_priv(dev); | 1803 | local_info_t *lp = netdev_priv(dev); |
1859 | struct pcmcia_device *link = lp->p_dev; | 1804 | struct pcmcia_device *link = lp->p_dev; |
1860 | 1805 | ||
1861 | DEBUG(0, "do_stop(%p)\n", dev); | 1806 | dev_dbg(&link->dev, "do_stop(%p)\n", dev); |
1862 | 1807 | ||
1863 | if (!link) | 1808 | if (!link) |
1864 | return -ENODEV; | 1809 | return -ENODEV; |
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c index d0593ed9170e..f6036fb42319 100644 --- a/drivers/net/wireless/airo_cs.c +++ b/drivers/net/wireless/airo_cs.c | |||
@@ -43,21 +43,6 @@ | |||
43 | 43 | ||
44 | #include "airo.h" | 44 | #include "airo.h" |
45 | 45 | ||
46 | /* | ||
47 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
48 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
49 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
50 | be present but disabled -- but it can then be enabled for specific | ||
51 | modules at load time with a 'pc_debug=#' option to insmod. | ||
52 | */ | ||
53 | #ifdef PCMCIA_DEBUG | ||
54 | static int pc_debug = PCMCIA_DEBUG; | ||
55 | module_param(pc_debug, int, 0); | ||
56 | static char *version = "$Revision: 1.2 $"; | ||
57 | #define DEBUG(n, args...) if (pc_debug > (n)) printk(KERN_DEBUG args); | ||
58 | #else | ||
59 | #define DEBUG(n, args...) | ||
60 | #endif | ||
61 | 46 | ||
62 | /*====================================================================*/ | 47 | /*====================================================================*/ |
63 | 48 | ||
@@ -145,11 +130,10 @@ static int airo_probe(struct pcmcia_device *p_dev) | |||
145 | { | 130 | { |
146 | local_info_t *local; | 131 | local_info_t *local; |
147 | 132 | ||
148 | DEBUG(0, "airo_attach()\n"); | 133 | dev_dbg(&p_dev->dev, "airo_attach()\n"); |
149 | 134 | ||
150 | /* Interrupt setup */ | 135 | /* Interrupt setup */ |
151 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 136 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
152 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
153 | p_dev->irq.Handler = NULL; | 137 | p_dev->irq.Handler = NULL; |
154 | 138 | ||
155 | /* | 139 | /* |
@@ -184,7 +168,7 @@ static int airo_probe(struct pcmcia_device *p_dev) | |||
184 | 168 | ||
185 | static void airo_detach(struct pcmcia_device *link) | 169 | static void airo_detach(struct pcmcia_device *link) |
186 | { | 170 | { |
187 | DEBUG(0, "airo_detach(0x%p)\n", link); | 171 | dev_dbg(&link->dev, "airo_detach\n"); |
188 | 172 | ||
189 | airo_release(link); | 173 | airo_release(link); |
190 | 174 | ||
@@ -204,9 +188,6 @@ static void airo_detach(struct pcmcia_device *link) | |||
204 | 188 | ||
205 | ======================================================================*/ | 189 | ======================================================================*/ |
206 | 190 | ||
207 | #define CS_CHECK(fn, ret) \ | ||
208 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
209 | |||
210 | static int airo_cs_config_check(struct pcmcia_device *p_dev, | 191 | static int airo_cs_config_check(struct pcmcia_device *p_dev, |
211 | cistpl_cftable_entry_t *cfg, | 192 | cistpl_cftable_entry_t *cfg, |
212 | cistpl_cftable_entry_t *dflt, | 193 | cistpl_cftable_entry_t *dflt, |
@@ -275,11 +256,11 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, | |||
275 | req->Base = mem->win[0].host_addr; | 256 | req->Base = mem->win[0].host_addr; |
276 | req->Size = mem->win[0].len; | 257 | req->Size = mem->win[0].len; |
277 | req->AccessSpeed = 0; | 258 | req->AccessSpeed = 0; |
278 | if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) | 259 | if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0) |
279 | return -ENODEV; | 260 | return -ENODEV; |
280 | map.Page = 0; | 261 | map.Page = 0; |
281 | map.CardOffset = mem->win[0].card_addr; | 262 | map.CardOffset = mem->win[0].card_addr; |
282 | if (pcmcia_map_mem_page(p_dev->win, &map) != 0) | 263 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0) |
283 | return -ENODEV; | 264 | return -ENODEV; |
284 | } | 265 | } |
285 | /* If we got this far, we're cool! */ | 266 | /* If we got this far, we're cool! */ |
@@ -291,11 +272,11 @@ static int airo_config(struct pcmcia_device *link) | |||
291 | { | 272 | { |
292 | local_info_t *dev; | 273 | local_info_t *dev; |
293 | win_req_t *req; | 274 | win_req_t *req; |
294 | int last_fn, last_ret; | 275 | int ret; |
295 | 276 | ||
296 | dev = link->priv; | 277 | dev = link->priv; |
297 | 278 | ||
298 | DEBUG(0, "airo_config(0x%p)\n", link); | 279 | dev_dbg(&link->dev, "airo_config\n"); |
299 | 280 | ||
300 | req = kzalloc(sizeof(win_req_t), GFP_KERNEL); | 281 | req = kzalloc(sizeof(win_req_t), GFP_KERNEL); |
301 | if (!req) | 282 | if (!req) |
@@ -315,8 +296,8 @@ static int airo_config(struct pcmcia_device *link) | |||
315 | * and most client drivers will only use the CIS to fill in | 296 | * and most client drivers will only use the CIS to fill in |
316 | * implementation-defined details. | 297 | * implementation-defined details. |
317 | */ | 298 | */ |
318 | last_ret = pcmcia_loop_config(link, airo_cs_config_check, req); | 299 | ret = pcmcia_loop_config(link, airo_cs_config_check, req); |
319 | if (last_ret) | 300 | if (ret) |
320 | goto failed; | 301 | goto failed; |
321 | 302 | ||
322 | /* | 303 | /* |
@@ -324,21 +305,25 @@ static int airo_config(struct pcmcia_device *link) | |||
324 | handler to the interrupt, unless the 'Handler' member of the | 305 | handler to the interrupt, unless the 'Handler' member of the |
325 | irq structure is initialized. | 306 | irq structure is initialized. |
326 | */ | 307 | */ |
327 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 308 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
328 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 309 | ret = pcmcia_request_irq(link, &link->irq); |
310 | if (ret) | ||
311 | goto failed; | ||
312 | } | ||
329 | 313 | ||
330 | /* | 314 | /* |
331 | This actually configures the PCMCIA socket -- setting up | 315 | This actually configures the PCMCIA socket -- setting up |
332 | the I/O windows and the interrupt mapping, and putting the | 316 | the I/O windows and the interrupt mapping, and putting the |
333 | card and host interface into "Memory and IO" mode. | 317 | card and host interface into "Memory and IO" mode. |
334 | */ | 318 | */ |
335 | CS_CHECK(RequestConfiguration, | 319 | ret = pcmcia_request_configuration(link, &link->conf); |
336 | pcmcia_request_configuration(link, &link->conf)); | 320 | if (ret) |
321 | goto failed; | ||
337 | ((local_info_t *)link->priv)->eth_dev = | 322 | ((local_info_t *)link->priv)->eth_dev = |
338 | init_airo_card(link->irq.AssignedIRQ, | 323 | init_airo_card(link->irq.AssignedIRQ, |
339 | link->io.BasePort1, 1, &handle_to_dev(link)); | 324 | link->io.BasePort1, 1, &link->dev); |
340 | if (!((local_info_t *)link->priv)->eth_dev) | 325 | if (!((local_info_t *)link->priv)->eth_dev) |
341 | goto cs_failed; | 326 | goto failed; |
342 | 327 | ||
343 | /* | 328 | /* |
344 | At this point, the dev_node_t structure(s) need to be | 329 | At this point, the dev_node_t structure(s) need to be |
@@ -368,8 +353,6 @@ static int airo_config(struct pcmcia_device *link) | |||
368 | kfree(req); | 353 | kfree(req); |
369 | return 0; | 354 | return 0; |
370 | 355 | ||
371 | cs_failed: | ||
372 | cs_error(link, last_fn, last_ret); | ||
373 | failed: | 356 | failed: |
374 | airo_release(link); | 357 | airo_release(link); |
375 | kfree(req); | 358 | kfree(req); |
@@ -386,7 +369,7 @@ static int airo_config(struct pcmcia_device *link) | |||
386 | 369 | ||
387 | static void airo_release(struct pcmcia_device *link) | 370 | static void airo_release(struct pcmcia_device *link) |
388 | { | 371 | { |
389 | DEBUG(0, "airo_release(0x%p)\n", link); | 372 | dev_dbg(&link->dev, "airo_release\n"); |
390 | pcmcia_disable_device(link); | 373 | pcmcia_disable_device(link); |
391 | } | 374 | } |
392 | 375 | ||
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index ddaa859c3491..32407911842f 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c | |||
@@ -55,22 +55,6 @@ | |||
55 | 55 | ||
56 | #include "atmel.h" | 56 | #include "atmel.h" |
57 | 57 | ||
58 | /* | ||
59 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
60 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
61 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
62 | be present but disabled -- but it can then be enabled for specific | ||
63 | modules at load time with a 'pc_debug=#' option to insmod. | ||
64 | */ | ||
65 | |||
66 | #ifdef PCMCIA_DEBUG | ||
67 | static int pc_debug = PCMCIA_DEBUG; | ||
68 | module_param(pc_debug, int, 0); | ||
69 | static char *version = "$Revision: 1.2 $"; | ||
70 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); | ||
71 | #else | ||
72 | #define DEBUG(n, args...) | ||
73 | #endif | ||
74 | 58 | ||
75 | /*====================================================================*/ | 59 | /*====================================================================*/ |
76 | 60 | ||
@@ -155,11 +139,10 @@ static int atmel_probe(struct pcmcia_device *p_dev) | |||
155 | { | 139 | { |
156 | local_info_t *local; | 140 | local_info_t *local; |
157 | 141 | ||
158 | DEBUG(0, "atmel_attach()\n"); | 142 | dev_dbg(&p_dev->dev, "atmel_attach()\n"); |
159 | 143 | ||
160 | /* Interrupt setup */ | 144 | /* Interrupt setup */ |
161 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 145 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
162 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
163 | p_dev->irq.Handler = NULL; | 146 | p_dev->irq.Handler = NULL; |
164 | 147 | ||
165 | /* | 148 | /* |
@@ -194,7 +177,7 @@ static int atmel_probe(struct pcmcia_device *p_dev) | |||
194 | 177 | ||
195 | static void atmel_detach(struct pcmcia_device *link) | 178 | static void atmel_detach(struct pcmcia_device *link) |
196 | { | 179 | { |
197 | DEBUG(0, "atmel_detach(0x%p)\n", link); | 180 | dev_dbg(&link->dev, "atmel_detach\n"); |
198 | 181 | ||
199 | atmel_release(link); | 182 | atmel_release(link); |
200 | 183 | ||
@@ -209,9 +192,6 @@ static void atmel_detach(struct pcmcia_device *link) | |||
209 | 192 | ||
210 | ======================================================================*/ | 193 | ======================================================================*/ |
211 | 194 | ||
212 | #define CS_CHECK(fn, ret) \ | ||
213 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
214 | |||
215 | /* Call-back function to interrogate PCMCIA-specific information | 195 | /* Call-back function to interrogate PCMCIA-specific information |
216 | about the current existance of the card */ | 196 | about the current existance of the card */ |
217 | static int card_present(void *arg) | 197 | static int card_present(void *arg) |
@@ -275,13 +255,13 @@ static int atmel_config_check(struct pcmcia_device *p_dev, | |||
275 | static int atmel_config(struct pcmcia_device *link) | 255 | static int atmel_config(struct pcmcia_device *link) |
276 | { | 256 | { |
277 | local_info_t *dev; | 257 | local_info_t *dev; |
278 | int last_fn, last_ret; | 258 | int ret; |
279 | struct pcmcia_device_id *did; | 259 | struct pcmcia_device_id *did; |
280 | 260 | ||
281 | dev = link->priv; | 261 | dev = link->priv; |
282 | did = dev_get_drvdata(&handle_to_dev(link)); | 262 | did = dev_get_drvdata(&link->dev); |
283 | 263 | ||
284 | DEBUG(0, "atmel_config(0x%p)\n", link); | 264 | dev_dbg(&link->dev, "atmel_config\n"); |
285 | 265 | ||
286 | /* | 266 | /* |
287 | In this loop, we scan the CIS for configuration table entries, | 267 | In this loop, we scan the CIS for configuration table entries, |
@@ -303,31 +283,36 @@ static int atmel_config(struct pcmcia_device *link) | |||
303 | handler to the interrupt, unless the 'Handler' member of the | 283 | handler to the interrupt, unless the 'Handler' member of the |
304 | irq structure is initialized. | 284 | irq structure is initialized. |
305 | */ | 285 | */ |
306 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 286 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
307 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 287 | ret = pcmcia_request_irq(link, &link->irq); |
288 | if (ret) | ||
289 | goto failed; | ||
290 | } | ||
308 | 291 | ||
309 | /* | 292 | /* |
310 | This actually configures the PCMCIA socket -- setting up | 293 | This actually configures the PCMCIA socket -- setting up |
311 | the I/O windows and the interrupt mapping, and putting the | 294 | the I/O windows and the interrupt mapping, and putting the |
312 | card and host interface into "Memory and IO" mode. | 295 | card and host interface into "Memory and IO" mode. |
313 | */ | 296 | */ |
314 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 297 | ret = pcmcia_request_configuration(link, &link->conf); |
298 | if (ret) | ||
299 | goto failed; | ||
315 | 300 | ||
316 | if (link->irq.AssignedIRQ == 0) { | 301 | if (link->irq.AssignedIRQ == 0) { |
317 | printk(KERN_ALERT | 302 | printk(KERN_ALERT |
318 | "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); | 303 | "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); |
319 | goto cs_failed; | 304 | goto failed; |
320 | } | 305 | } |
321 | 306 | ||
322 | ((local_info_t*)link->priv)->eth_dev = | 307 | ((local_info_t*)link->priv)->eth_dev = |
323 | init_atmel_card(link->irq.AssignedIRQ, | 308 | init_atmel_card(link->irq.AssignedIRQ, |
324 | link->io.BasePort1, | 309 | link->io.BasePort1, |
325 | did ? did->driver_info : ATMEL_FW_TYPE_NONE, | 310 | did ? did->driver_info : ATMEL_FW_TYPE_NONE, |
326 | &handle_to_dev(link), | 311 | &link->dev, |
327 | card_present, | 312 | card_present, |
328 | link); | 313 | link); |
329 | if (!((local_info_t*)link->priv)->eth_dev) | 314 | if (!((local_info_t*)link->priv)->eth_dev) |
330 | goto cs_failed; | 315 | goto failed; |
331 | 316 | ||
332 | 317 | ||
333 | /* | 318 | /* |
@@ -340,8 +325,6 @@ static int atmel_config(struct pcmcia_device *link) | |||
340 | 325 | ||
341 | return 0; | 326 | return 0; |
342 | 327 | ||
343 | cs_failed: | ||
344 | cs_error(link, last_fn, last_ret); | ||
345 | failed: | 328 | failed: |
346 | atmel_release(link); | 329 | atmel_release(link); |
347 | return -ENODEV; | 330 | return -ENODEV; |
@@ -359,7 +342,7 @@ static void atmel_release(struct pcmcia_device *link) | |||
359 | { | 342 | { |
360 | struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; | 343 | struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; |
361 | 344 | ||
362 | DEBUG(0, "atmel_release(0x%p)\n", link); | 345 | dev_dbg(&link->dev, "atmel_release\n"); |
363 | 346 | ||
364 | if (dev) | 347 | if (dev) |
365 | stop_atmel_card(dev); | 348 | stop_atmel_card(dev); |
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c index 6c3a74964ab8..984174bc7b0f 100644 --- a/drivers/net/wireless/b43/pcmcia.c +++ b/drivers/net/wireless/b43/pcmcia.c | |||
@@ -65,35 +65,15 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) | |||
65 | struct ssb_bus *ssb; | 65 | struct ssb_bus *ssb; |
66 | win_req_t win; | 66 | win_req_t win; |
67 | memreq_t mem; | 67 | memreq_t mem; |
68 | tuple_t tuple; | ||
69 | cisparse_t parse; | ||
70 | int err = -ENOMEM; | 68 | int err = -ENOMEM; |
71 | int res = 0; | 69 | int res = 0; |
72 | unsigned char buf[64]; | ||
73 | 70 | ||
74 | ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); | 71 | ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); |
75 | if (!ssb) | 72 | if (!ssb) |
76 | goto out_error; | 73 | goto out_error; |
77 | 74 | ||
78 | err = -ENODEV; | 75 | err = -ENODEV; |
79 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
80 | tuple.Attributes = 0; | ||
81 | tuple.TupleData = buf; | ||
82 | tuple.TupleDataMax = sizeof(buf); | ||
83 | tuple.TupleOffset = 0; | ||
84 | 76 | ||
85 | res = pcmcia_get_first_tuple(dev, &tuple); | ||
86 | if (res != 0) | ||
87 | goto err_kfree_ssb; | ||
88 | res = pcmcia_get_tuple_data(dev, &tuple); | ||
89 | if (res != 0) | ||
90 | goto err_kfree_ssb; | ||
91 | res = pcmcia_parse_tuple(&tuple, &parse); | ||
92 | if (res != 0) | ||
93 | goto err_kfree_ssb; | ||
94 | |||
95 | dev->conf.ConfigBase = parse.config.base; | ||
96 | dev->conf.Present = parse.config.rmask[0]; | ||
97 | dev->conf.Attributes = CONF_ENABLE_IRQ; | 77 | dev->conf.Attributes = CONF_ENABLE_IRQ; |
98 | dev->conf.IntType = INT_MEMORY_AND_IO; | 78 | dev->conf.IntType = INT_MEMORY_AND_IO; |
99 | 79 | ||
@@ -107,20 +87,18 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) | |||
107 | win.Base = 0; | 87 | win.Base = 0; |
108 | win.Size = SSB_CORE_SIZE; | 88 | win.Size = SSB_CORE_SIZE; |
109 | win.AccessSpeed = 250; | 89 | win.AccessSpeed = 250; |
110 | res = pcmcia_request_window(&dev, &win, &dev->win); | 90 | res = pcmcia_request_window(dev, &win, &dev->win); |
111 | if (res != 0) | 91 | if (res != 0) |
112 | goto err_kfree_ssb; | 92 | goto err_kfree_ssb; |
113 | 93 | ||
114 | mem.CardOffset = 0; | 94 | mem.CardOffset = 0; |
115 | mem.Page = 0; | 95 | mem.Page = 0; |
116 | res = pcmcia_map_mem_page(dev->win, &mem); | 96 | res = pcmcia_map_mem_page(dev, dev->win, &mem); |
117 | if (res != 0) | 97 | if (res != 0) |
118 | goto err_disable; | 98 | goto err_disable; |
119 | 99 | ||
120 | dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 100 | dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
121 | dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
122 | dev->irq.Handler = NULL; /* The handler is registered later. */ | 101 | dev->irq.Handler = NULL; /* The handler is registered later. */ |
123 | dev->irq.Instance = NULL; | ||
124 | res = pcmcia_request_irq(dev, &dev->irq); | 102 | res = pcmcia_request_irq(dev, &dev->irq); |
125 | if (res != 0) | 103 | if (res != 0) |
126 | goto err_disable; | 104 | goto err_disable; |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index ad8eab4a639b..c9640a3e02c9 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -274,9 +274,6 @@ static int sandisk_enable_wireless(struct net_device *dev) | |||
274 | conf_reg_t reg; | 274 | conf_reg_t reg; |
275 | struct hostap_interface *iface = netdev_priv(dev); | 275 | struct hostap_interface *iface = netdev_priv(dev); |
276 | local_info_t *local = iface->local; | 276 | local_info_t *local = iface->local; |
277 | tuple_t tuple; | ||
278 | cisparse_t *parse = NULL; | ||
279 | u_char buf[64]; | ||
280 | struct hostap_cs_priv *hw_priv = local->hw_priv; | 277 | struct hostap_cs_priv *hw_priv = local->hw_priv; |
281 | 278 | ||
282 | if (hw_priv->link->io.NumPorts1 < 0x42) { | 279 | if (hw_priv->link->io.NumPorts1 < 0x42) { |
@@ -285,28 +282,13 @@ static int sandisk_enable_wireless(struct net_device *dev) | |||
285 | goto done; | 282 | goto done; |
286 | } | 283 | } |
287 | 284 | ||
288 | parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL); | ||
289 | if (parse == NULL) { | ||
290 | ret = -ENOMEM; | ||
291 | goto done; | ||
292 | } | ||
293 | |||
294 | tuple.Attributes = TUPLE_RETURN_COMMON; | ||
295 | tuple.TupleData = buf; | ||
296 | tuple.TupleDataMax = sizeof(buf); | ||
297 | tuple.TupleOffset = 0; | ||
298 | |||
299 | if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) { | 285 | if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) { |
300 | /* No SanDisk manfid found */ | 286 | /* No SanDisk manfid found */ |
301 | ret = -ENODEV; | 287 | ret = -ENODEV; |
302 | goto done; | 288 | goto done; |
303 | } | 289 | } |
304 | 290 | ||
305 | tuple.DesiredTuple = CISTPL_LONGLINK_MFC; | 291 | if (hw_priv->link->socket->functions < 2) { |
306 | if (pcmcia_get_first_tuple(hw_priv->link, &tuple) || | ||
307 | pcmcia_get_tuple_data(hw_priv->link, &tuple) || | ||
308 | pcmcia_parse_tuple(&tuple, parse) || | ||
309 | parse->longlink_mfc.nfn < 2) { | ||
310 | /* No multi-function links found */ | 292 | /* No multi-function links found */ |
311 | ret = -ENODEV; | 293 | ret = -ENODEV; |
312 | goto done; | 294 | goto done; |
@@ -354,7 +336,6 @@ static int sandisk_enable_wireless(struct net_device *dev) | |||
354 | udelay(10); | 336 | udelay(10); |
355 | 337 | ||
356 | done: | 338 | done: |
357 | kfree(parse); | ||
358 | return ret; | 339 | return ret; |
359 | } | 340 | } |
360 | 341 | ||
@@ -529,10 +510,6 @@ static void prism2_detach(struct pcmcia_device *link) | |||
529 | } | 510 | } |
530 | 511 | ||
531 | 512 | ||
532 | #define CS_CHECK(fn, ret) \ | ||
533 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
534 | |||
535 | |||
536 | /* run after a CARD_INSERTION event is received to configure the PCMCIA | 513 | /* run after a CARD_INSERTION event is received to configure the PCMCIA |
537 | * socket and make the device available to the system */ | 514 | * socket and make the device available to the system */ |
538 | 515 | ||
@@ -624,7 +601,6 @@ static int prism2_config(struct pcmcia_device *link) | |||
624 | struct hostap_interface *iface; | 601 | struct hostap_interface *iface; |
625 | local_info_t *local; | 602 | local_info_t *local; |
626 | int ret = 1; | 603 | int ret = 1; |
627 | int last_fn, last_ret; | ||
628 | struct hostap_cs_priv *hw_priv; | 604 | struct hostap_cs_priv *hw_priv; |
629 | 605 | ||
630 | PDEBUG(DEBUG_FLOW, "prism2_config()\n"); | 606 | PDEBUG(DEBUG_FLOW, "prism2_config()\n"); |
@@ -636,19 +612,18 @@ static int prism2_config(struct pcmcia_device *link) | |||
636 | } | 612 | } |
637 | 613 | ||
638 | /* Look for an appropriate configuration table entry in the CIS */ | 614 | /* Look for an appropriate configuration table entry in the CIS */ |
639 | last_ret = pcmcia_loop_config(link, prism2_config_check, NULL); | 615 | ret = pcmcia_loop_config(link, prism2_config_check, NULL); |
640 | if (last_ret) { | 616 | if (ret) { |
641 | if (!ignore_cis_vcc) | 617 | if (!ignore_cis_vcc) |
642 | printk(KERN_ERR "GetNextTuple(): No matching " | 618 | printk(KERN_ERR "GetNextTuple(): No matching " |
643 | "CIS configuration. Maybe you need the " | 619 | "CIS configuration. Maybe you need the " |
644 | "ignore_cis_vcc=1 parameter.\n"); | 620 | "ignore_cis_vcc=1 parameter.\n"); |
645 | cs_error(link, RequestIO, last_ret); | ||
646 | goto failed; | 621 | goto failed; |
647 | } | 622 | } |
648 | 623 | ||
649 | /* Need to allocate net_device before requesting IRQ handler */ | 624 | /* Need to allocate net_device before requesting IRQ handler */ |
650 | dev = prism2_init_local_data(&prism2_pccard_funcs, 0, | 625 | dev = prism2_init_local_data(&prism2_pccard_funcs, 0, |
651 | &handle_to_dev(link)); | 626 | &link->dev); |
652 | if (dev == NULL) | 627 | if (dev == NULL) |
653 | goto failed; | 628 | goto failed; |
654 | link->priv = dev; | 629 | link->priv = dev; |
@@ -666,13 +641,11 @@ static int prism2_config(struct pcmcia_device *link) | |||
666 | * irq structure is initialized. | 641 | * irq structure is initialized. |
667 | */ | 642 | */ |
668 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 643 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
669 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | | 644 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
670 | IRQ_HANDLE_PRESENT; | ||
671 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
672 | link->irq.Handler = prism2_interrupt; | 645 | link->irq.Handler = prism2_interrupt; |
673 | link->irq.Instance = dev; | 646 | ret = pcmcia_request_irq(link, &link->irq); |
674 | CS_CHECK(RequestIRQ, | 647 | if (ret) |
675 | pcmcia_request_irq(link, &link->irq)); | 648 | goto failed; |
676 | } | 649 | } |
677 | 650 | ||
678 | /* | 651 | /* |
@@ -680,8 +653,9 @@ static int prism2_config(struct pcmcia_device *link) | |||
680 | * the I/O windows and the interrupt mapping, and putting the | 653 | * the I/O windows and the interrupt mapping, and putting the |
681 | * card and host interface into "Memory and IO" mode. | 654 | * card and host interface into "Memory and IO" mode. |
682 | */ | 655 | */ |
683 | CS_CHECK(RequestConfiguration, | 656 | ret = pcmcia_request_configuration(link, &link->conf); |
684 | pcmcia_request_configuration(link, &link->conf)); | 657 | if (ret) |
658 | goto failed; | ||
685 | 659 | ||
686 | dev->irq = link->irq.AssignedIRQ; | 660 | dev->irq = link->irq.AssignedIRQ; |
687 | dev->base_addr = link->io.BasePort1; | 661 | dev->base_addr = link->io.BasePort1; |
@@ -714,9 +688,6 @@ static int prism2_config(struct pcmcia_device *link) | |||
714 | } | 688 | } |
715 | return ret; | 689 | return ret; |
716 | 690 | ||
717 | cs_failed: | ||
718 | cs_error(link, last_fn, last_ret); | ||
719 | |||
720 | failed: | 691 | failed: |
721 | kfree(hw_priv); | 692 | kfree(hw_priv); |
722 | prism2_release((u_long)link); | 693 | prism2_release((u_long)link); |
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index 62381768f2d5..b1d84592b959 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c | |||
@@ -590,7 +590,7 @@ static int if_cs_prog_helper(struct if_cs_card *card) | |||
590 | 590 | ||
591 | /* TODO: make firmware file configurable */ | 591 | /* TODO: make firmware file configurable */ |
592 | ret = request_firmware(&fw, "libertas_cs_helper.fw", | 592 | ret = request_firmware(&fw, "libertas_cs_helper.fw", |
593 | &handle_to_dev(card->p_dev)); | 593 | &card->p_dev->dev); |
594 | if (ret) { | 594 | if (ret) { |
595 | lbs_pr_err("can't load helper firmware\n"); | 595 | lbs_pr_err("can't load helper firmware\n"); |
596 | ret = -ENODEV; | 596 | ret = -ENODEV; |
@@ -663,7 +663,7 @@ static int if_cs_prog_real(struct if_cs_card *card) | |||
663 | 663 | ||
664 | /* TODO: make firmware file configurable */ | 664 | /* TODO: make firmware file configurable */ |
665 | ret = request_firmware(&fw, "libertas_cs.fw", | 665 | ret = request_firmware(&fw, "libertas_cs.fw", |
666 | &handle_to_dev(card->p_dev)); | 666 | &card->p_dev->dev); |
667 | if (ret) { | 667 | if (ret) { |
668 | lbs_pr_err("can't load firmware\n"); | 668 | lbs_pr_err("can't load firmware\n"); |
669 | ret = -ENODEV; | 669 | ret = -ENODEV; |
@@ -793,18 +793,37 @@ static void if_cs_release(struct pcmcia_device *p_dev) | |||
793 | * configure the card at this point -- we wait until we receive a card | 793 | * configure the card at this point -- we wait until we receive a card |
794 | * insertion event. | 794 | * insertion event. |
795 | */ | 795 | */ |
796 | |||
797 | static int if_cs_ioprobe(struct pcmcia_device *p_dev, | ||
798 | cistpl_cftable_entry_t *cfg, | ||
799 | cistpl_cftable_entry_t *dflt, | ||
800 | unsigned int vcc, | ||
801 | void *priv_data) | ||
802 | { | ||
803 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
804 | p_dev->io.BasePort1 = cfg->io.win[0].base; | ||
805 | p_dev->io.NumPorts1 = cfg->io.win[0].len; | ||
806 | |||
807 | /* Do we need to allocate an interrupt? */ | ||
808 | if (cfg->irq.IRQInfo1) | ||
809 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; | ||
810 | |||
811 | /* IO window settings */ | ||
812 | if (cfg->io.nwin != 1) { | ||
813 | lbs_pr_err("wrong CIS (check number of IO windows)\n"); | ||
814 | return -ENODEV; | ||
815 | } | ||
816 | |||
817 | /* This reserves IO space but doesn't actually enable it */ | ||
818 | return pcmcia_request_io(p_dev, &p_dev->io); | ||
819 | } | ||
820 | |||
796 | static int if_cs_probe(struct pcmcia_device *p_dev) | 821 | static int if_cs_probe(struct pcmcia_device *p_dev) |
797 | { | 822 | { |
798 | int ret = -ENOMEM; | 823 | int ret = -ENOMEM; |
799 | unsigned int prod_id; | 824 | unsigned int prod_id; |
800 | struct lbs_private *priv; | 825 | struct lbs_private *priv; |
801 | struct if_cs_card *card; | 826 | struct if_cs_card *card; |
802 | /* CIS parsing */ | ||
803 | tuple_t tuple; | ||
804 | cisparse_t parse; | ||
805 | cistpl_cftable_entry_t *cfg = &parse.cftable_entry; | ||
806 | cistpl_io_t *io = &cfg->io; | ||
807 | u_char buf[64]; | ||
808 | 827 | ||
809 | lbs_deb_enter(LBS_DEB_CS); | 828 | lbs_deb_enter(LBS_DEB_CS); |
810 | 829 | ||
@@ -818,48 +837,15 @@ static int if_cs_probe(struct pcmcia_device *p_dev) | |||
818 | 837 | ||
819 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 838 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
820 | p_dev->irq.Handler = NULL; | 839 | p_dev->irq.Handler = NULL; |
821 | p_dev->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID; | ||
822 | 840 | ||
823 | p_dev->conf.Attributes = 0; | 841 | p_dev->conf.Attributes = 0; |
824 | p_dev->conf.IntType = INT_MEMORY_AND_IO; | 842 | p_dev->conf.IntType = INT_MEMORY_AND_IO; |
825 | 843 | ||
826 | tuple.Attributes = 0; | 844 | if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) { |
827 | tuple.TupleData = buf; | 845 | lbs_pr_err("error in pcmcia_loop_config\n"); |
828 | tuple.TupleDataMax = sizeof(buf); | ||
829 | tuple.TupleOffset = 0; | ||
830 | |||
831 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
832 | if ((ret = pcmcia_get_first_tuple(p_dev, &tuple)) != 0 || | ||
833 | (ret = pcmcia_get_tuple_data(p_dev, &tuple)) != 0 || | ||
834 | (ret = pcmcia_parse_tuple(&tuple, &parse)) != 0) | ||
835 | { | ||
836 | lbs_pr_err("error in pcmcia_get_first_tuple etc\n"); | ||
837 | goto out1; | ||
838 | } | ||
839 | |||
840 | p_dev->conf.ConfigIndex = cfg->index; | ||
841 | |||
842 | /* Do we need to allocate an interrupt? */ | ||
843 | if (cfg->irq.IRQInfo1) { | ||
844 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; | ||
845 | } | ||
846 | |||
847 | /* IO window settings */ | ||
848 | if (cfg->io.nwin != 1) { | ||
849 | lbs_pr_err("wrong CIS (check number of IO windows)\n"); | ||
850 | ret = -ENODEV; | ||
851 | goto out1; | 846 | goto out1; |
852 | } | 847 | } |
853 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
854 | p_dev->io.BasePort1 = io->win[0].base; | ||
855 | p_dev->io.NumPorts1 = io->win[0].len; | ||
856 | 848 | ||
857 | /* This reserves IO space but doesn't actually enable it */ | ||
858 | ret = pcmcia_request_io(p_dev, &p_dev->io); | ||
859 | if (ret) { | ||
860 | lbs_pr_err("error in pcmcia_request_io\n"); | ||
861 | goto out1; | ||
862 | } | ||
863 | 849 | ||
864 | /* | 850 | /* |
865 | * Allocate an interrupt line. Note that this does not assign | 851 | * Allocate an interrupt line. Note that this does not assign |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index 9498b46c99a4..e61e6b9440ab 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -145,23 +145,6 @@ static const unsigned int txConfEUD = 0x10; /* Enable Uni-Data packets */ | |||
145 | static const unsigned int txConfKey = 0x02; /* Scramble data packets */ | 145 | static const unsigned int txConfKey = 0x02; /* Scramble data packets */ |
146 | static const unsigned int txConfLoop = 0x01; /* Loopback mode */ | 146 | static const unsigned int txConfLoop = 0x01; /* Loopback mode */ |
147 | 147 | ||
148 | /* | ||
149 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
150 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
151 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
152 | be present but disabled -- but it can then be enabled for specific | ||
153 | modules at load time with a 'pc_debug=#' option to insmod. | ||
154 | */ | ||
155 | |||
156 | #ifdef PCMCIA_DEBUG | ||
157 | static int pc_debug = PCMCIA_DEBUG; | ||
158 | module_param(pc_debug, int, 0); | ||
159 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
160 | static char *version = | ||
161 | "netwave_cs.c 0.3.0 Thu Jul 17 14:36:02 1997 (John Markus Bjørndalen)\n"; | ||
162 | #else | ||
163 | #define DEBUG(n, args...) | ||
164 | #endif | ||
165 | 148 | ||
166 | /*====================================================================*/ | 149 | /*====================================================================*/ |
167 | 150 | ||
@@ -383,7 +366,7 @@ static int netwave_probe(struct pcmcia_device *link) | |||
383 | struct net_device *dev; | 366 | struct net_device *dev; |
384 | netwave_private *priv; | 367 | netwave_private *priv; |
385 | 368 | ||
386 | DEBUG(0, "netwave_attach()\n"); | 369 | dev_dbg(&link->dev, "netwave_attach()\n"); |
387 | 370 | ||
388 | /* Initialize the struct pcmcia_device structure */ | 371 | /* Initialize the struct pcmcia_device structure */ |
389 | dev = alloc_etherdev(sizeof(netwave_private)); | 372 | dev = alloc_etherdev(sizeof(netwave_private)); |
@@ -401,8 +384,7 @@ static int netwave_probe(struct pcmcia_device *link) | |||
401 | link->io.IOAddrLines = 5; | 384 | link->io.IOAddrLines = 5; |
402 | 385 | ||
403 | /* Interrupt setup */ | 386 | /* Interrupt setup */ |
404 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 387 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
405 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
406 | link->irq.Handler = &netwave_interrupt; | 388 | link->irq.Handler = &netwave_interrupt; |
407 | 389 | ||
408 | /* General socket configuration */ | 390 | /* General socket configuration */ |
@@ -421,8 +403,6 @@ static int netwave_probe(struct pcmcia_device *link) | |||
421 | 403 | ||
422 | dev->watchdog_timeo = TX_TIMEOUT; | 404 | dev->watchdog_timeo = TX_TIMEOUT; |
423 | 405 | ||
424 | link->irq.Instance = dev; | ||
425 | |||
426 | return netwave_pcmcia_config( link); | 406 | return netwave_pcmcia_config( link); |
427 | } /* netwave_attach */ | 407 | } /* netwave_attach */ |
428 | 408 | ||
@@ -438,7 +418,7 @@ static void netwave_detach(struct pcmcia_device *link) | |||
438 | { | 418 | { |
439 | struct net_device *dev = link->priv; | 419 | struct net_device *dev = link->priv; |
440 | 420 | ||
441 | DEBUG(0, "netwave_detach(0x%p)\n", link); | 421 | dev_dbg(&link->dev, "netwave_detach\n"); |
442 | 422 | ||
443 | netwave_release(link); | 423 | netwave_release(link); |
444 | 424 | ||
@@ -725,18 +705,15 @@ static const struct iw_handler_def netwave_handler_def = | |||
725 | * | 705 | * |
726 | */ | 706 | */ |
727 | 707 | ||
728 | #define CS_CHECK(fn, ret) \ | ||
729 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
730 | |||
731 | static int netwave_pcmcia_config(struct pcmcia_device *link) { | 708 | static int netwave_pcmcia_config(struct pcmcia_device *link) { |
732 | struct net_device *dev = link->priv; | 709 | struct net_device *dev = link->priv; |
733 | netwave_private *priv = netdev_priv(dev); | 710 | netwave_private *priv = netdev_priv(dev); |
734 | int i, j, last_ret, last_fn; | 711 | int i, j, ret; |
735 | win_req_t req; | 712 | win_req_t req; |
736 | memreq_t mem; | 713 | memreq_t mem; |
737 | u_char __iomem *ramBase = NULL; | 714 | u_char __iomem *ramBase = NULL; |
738 | 715 | ||
739 | DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); | 716 | dev_dbg(&link->dev, "netwave_pcmcia_config\n"); |
740 | 717 | ||
741 | /* | 718 | /* |
742 | * Try allocating IO ports. This tries a few fixed addresses. | 719 | * Try allocating IO ports. This tries a few fixed addresses. |
@@ -749,22 +726,24 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { | |||
749 | if (i == 0) | 726 | if (i == 0) |
750 | break; | 727 | break; |
751 | } | 728 | } |
752 | if (i != 0) { | 729 | if (i != 0) |
753 | cs_error(link, RequestIO, i); | ||
754 | goto failed; | 730 | goto failed; |
755 | } | ||
756 | 731 | ||
757 | /* | 732 | /* |
758 | * Now allocate an interrupt line. Note that this does not | 733 | * Now allocate an interrupt line. Note that this does not |
759 | * actually assign a handler to the interrupt. | 734 | * actually assign a handler to the interrupt. |
760 | */ | 735 | */ |
761 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 736 | ret = pcmcia_request_irq(link, &link->irq); |
737 | if (ret) | ||
738 | goto failed; | ||
762 | 739 | ||
763 | /* | 740 | /* |
764 | * This actually configures the PCMCIA socket -- setting up | 741 | * This actually configures the PCMCIA socket -- setting up |
765 | * the I/O windows and the interrupt mapping. | 742 | * the I/O windows and the interrupt mapping. |
766 | */ | 743 | */ |
767 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 744 | ret = pcmcia_request_configuration(link, &link->conf); |
745 | if (ret) | ||
746 | goto failed; | ||
768 | 747 | ||
769 | /* | 748 | /* |
770 | * Allocate a 32K memory window. Note that the struct pcmcia_device | 749 | * Allocate a 32K memory window. Note that the struct pcmcia_device |
@@ -772,14 +751,18 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { | |||
772 | * device needs several windows, you'll need to keep track of | 751 | * device needs several windows, you'll need to keep track of |
773 | * the handles in your private data structure, dev->priv. | 752 | * the handles in your private data structure, dev->priv. |
774 | */ | 753 | */ |
775 | DEBUG(1, "Setting mem speed of %d\n", mem_speed); | 754 | dev_dbg(&link->dev, "Setting mem speed of %d\n", mem_speed); |
776 | 755 | ||
777 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE; | 756 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE; |
778 | req.Base = 0; req.Size = 0x8000; | 757 | req.Base = 0; req.Size = 0x8000; |
779 | req.AccessSpeed = mem_speed; | 758 | req.AccessSpeed = mem_speed; |
780 | CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); | 759 | ret = pcmcia_request_window(link, &req, &link->win); |
760 | if (ret) | ||
761 | goto failed; | ||
781 | mem.CardOffset = 0x20000; mem.Page = 0; | 762 | mem.CardOffset = 0x20000; mem.Page = 0; |
782 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); | 763 | ret = pcmcia_map_mem_page(link, link->win, &mem); |
764 | if (ret) | ||
765 | goto failed; | ||
783 | 766 | ||
784 | /* Store base address of the common window frame */ | 767 | /* Store base address of the common window frame */ |
785 | ramBase = ioremap(req.Base, 0x8000); | 768 | ramBase = ioremap(req.Base, 0x8000); |
@@ -787,7 +770,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { | |||
787 | 770 | ||
788 | dev->irq = link->irq.AssignedIRQ; | 771 | dev->irq = link->irq.AssignedIRQ; |
789 | dev->base_addr = link->io.BasePort1; | 772 | dev->base_addr = link->io.BasePort1; |
790 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 773 | SET_NETDEV_DEV(dev, &link->dev); |
791 | 774 | ||
792 | if (register_netdev(dev) != 0) { | 775 | if (register_netdev(dev) != 0) { |
793 | printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n"); | 776 | printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n"); |
@@ -818,8 +801,6 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { | |||
818 | get_uint16(ramBase + NETWAVE_EREG_ARW+2)); | 801 | get_uint16(ramBase + NETWAVE_EREG_ARW+2)); |
819 | return 0; | 802 | return 0; |
820 | 803 | ||
821 | cs_failed: | ||
822 | cs_error(link, last_fn, last_ret); | ||
823 | failed: | 804 | failed: |
824 | netwave_release(link); | 805 | netwave_release(link); |
825 | return -ENODEV; | 806 | return -ENODEV; |
@@ -837,7 +818,7 @@ static void netwave_release(struct pcmcia_device *link) | |||
837 | struct net_device *dev = link->priv; | 818 | struct net_device *dev = link->priv; |
838 | netwave_private *priv = netdev_priv(dev); | 819 | netwave_private *priv = netdev_priv(dev); |
839 | 820 | ||
840 | DEBUG(0, "netwave_release(0x%p)\n", link); | 821 | dev_dbg(&link->dev, "netwave_release\n"); |
841 | 822 | ||
842 | pcmcia_disable_device(link); | 823 | pcmcia_disable_device(link); |
843 | if (link->win) | 824 | if (link->win) |
@@ -892,7 +873,7 @@ static void netwave_reset(struct net_device *dev) { | |||
892 | u_char __iomem *ramBase = priv->ramBase; | 873 | u_char __iomem *ramBase = priv->ramBase; |
893 | unsigned int iobase = dev->base_addr; | 874 | unsigned int iobase = dev->base_addr; |
894 | 875 | ||
895 | DEBUG(0, "netwave_reset: Done with hardware reset\n"); | 876 | pr_debug("netwave_reset: Done with hardware reset\n"); |
896 | 877 | ||
897 | priv->timeoutCounter = 0; | 878 | priv->timeoutCounter = 0; |
898 | 879 | ||
@@ -988,7 +969,7 @@ static int netwave_hw_xmit(unsigned char* data, int len, | |||
988 | 969 | ||
989 | dev->stats.tx_bytes += len; | 970 | dev->stats.tx_bytes += len; |
990 | 971 | ||
991 | DEBUG(3, "Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n", | 972 | pr_debug("Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n", |
992 | readb(ramBase + NETWAVE_EREG_SPCQ), | 973 | readb(ramBase + NETWAVE_EREG_SPCQ), |
993 | readb(ramBase + NETWAVE_EREG_SPU), | 974 | readb(ramBase + NETWAVE_EREG_SPU), |
994 | readb(ramBase + NETWAVE_EREG_LIF), | 975 | readb(ramBase + NETWAVE_EREG_LIF), |
@@ -1000,7 +981,7 @@ static int netwave_hw_xmit(unsigned char* data, int len, | |||
1000 | MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2); | 981 | MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2); |
1001 | DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4); | 982 | DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4); |
1002 | 983 | ||
1003 | DEBUG(3, "TxFreeList %x, MaxData %x, DataOffset %x\n", | 984 | pr_debug("TxFreeList %x, MaxData %x, DataOffset %x\n", |
1004 | TxFreeList, MaxData, DataOffset); | 985 | TxFreeList, MaxData, DataOffset); |
1005 | 986 | ||
1006 | /* Copy packet to the adapter fragment buffers */ | 987 | /* Copy packet to the adapter fragment buffers */ |
@@ -1088,7 +1069,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1088 | status = inb(iobase + NETWAVE_REG_ASR); | 1069 | status = inb(iobase + NETWAVE_REG_ASR); |
1089 | 1070 | ||
1090 | if (!pcmcia_dev_present(link)) { | 1071 | if (!pcmcia_dev_present(link)) { |
1091 | DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x " | 1072 | pr_debug("netwave_interrupt: Interrupt with status 0x%x " |
1092 | "from removed or suspended card!\n", status); | 1073 | "from removed or suspended card!\n", status); |
1093 | break; | 1074 | break; |
1094 | } | 1075 | } |
@@ -1132,7 +1113,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1132 | int txStatus; | 1113 | int txStatus; |
1133 | 1114 | ||
1134 | txStatus = readb(ramBase + NETWAVE_EREG_TSER); | 1115 | txStatus = readb(ramBase + NETWAVE_EREG_TSER); |
1135 | DEBUG(3, "Transmit done. TSER = %x id %x\n", | 1116 | pr_debug("Transmit done. TSER = %x id %x\n", |
1136 | txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1)); | 1117 | txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1)); |
1137 | 1118 | ||
1138 | if (txStatus & 0x20) { | 1119 | if (txStatus & 0x20) { |
@@ -1156,7 +1137,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1156 | * TxGU and TxNOAP is set. (Those are the only ones | 1137 | * TxGU and TxNOAP is set. (Those are the only ones |
1157 | * to set TxErr). | 1138 | * to set TxErr). |
1158 | */ | 1139 | */ |
1159 | DEBUG(3, "netwave_interrupt: TxDN with error status %x\n", | 1140 | pr_debug("netwave_interrupt: TxDN with error status %x\n", |
1160 | txStatus); | 1141 | txStatus); |
1161 | 1142 | ||
1162 | /* Clear out TxGU, TxNOAP, TxErr and TxTrys */ | 1143 | /* Clear out TxGU, TxNOAP, TxErr and TxTrys */ |
@@ -1164,7 +1145,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1164 | writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4); | 1145 | writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4); |
1165 | ++dev->stats.tx_errors; | 1146 | ++dev->stats.tx_errors; |
1166 | } | 1147 | } |
1167 | DEBUG(3, "New status is TSER %x ASR %x\n", | 1148 | pr_debug("New status is TSER %x ASR %x\n", |
1168 | readb(ramBase + NETWAVE_EREG_TSER), | 1149 | readb(ramBase + NETWAVE_EREG_TSER), |
1169 | inb(iobase + NETWAVE_REG_ASR)); | 1150 | inb(iobase + NETWAVE_REG_ASR)); |
1170 | 1151 | ||
@@ -1172,7 +1153,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1172 | } | 1153 | } |
1173 | /* TxBA, this would trigger on all error packets received */ | 1154 | /* TxBA, this would trigger on all error packets received */ |
1174 | /* if (status & 0x01) { | 1155 | /* if (status & 0x01) { |
1175 | DEBUG(4, "Transmit buffers available, %x\n", status); | 1156 | pr_debug("Transmit buffers available, %x\n", status); |
1176 | } | 1157 | } |
1177 | */ | 1158 | */ |
1178 | } | 1159 | } |
@@ -1190,7 +1171,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id) | |||
1190 | */ | 1171 | */ |
1191 | static void netwave_watchdog(struct net_device *dev) { | 1172 | static void netwave_watchdog(struct net_device *dev) { |
1192 | 1173 | ||
1193 | DEBUG(1, "%s: netwave_watchdog: watchdog timer expired\n", dev->name); | 1174 | pr_debug("%s: netwave_watchdog: watchdog timer expired\n", dev->name); |
1194 | netwave_reset(dev); | 1175 | netwave_reset(dev); |
1195 | dev->trans_start = jiffies; | 1176 | dev->trans_start = jiffies; |
1196 | netif_wake_queue(dev); | 1177 | netif_wake_queue(dev); |
@@ -1211,7 +1192,7 @@ static int netwave_rx(struct net_device *dev) | |||
1211 | int i; | 1192 | int i; |
1212 | u_char *ptr; | 1193 | u_char *ptr; |
1213 | 1194 | ||
1214 | DEBUG(3, "xinw_rx: Receiving ... \n"); | 1195 | pr_debug("xinw_rx: Receiving ... \n"); |
1215 | 1196 | ||
1216 | /* Receive max 10 packets for now. */ | 1197 | /* Receive max 10 packets for now. */ |
1217 | for (i = 0; i < 10; i++) { | 1198 | for (i = 0; i < 10; i++) { |
@@ -1237,7 +1218,7 @@ static int netwave_rx(struct net_device *dev) | |||
1237 | 1218 | ||
1238 | skb = dev_alloc_skb(rcvLen+5); | 1219 | skb = dev_alloc_skb(rcvLen+5); |
1239 | if (skb == NULL) { | 1220 | if (skb == NULL) { |
1240 | DEBUG(1, "netwave_rx: Could not allocate an sk_buff of " | 1221 | pr_debug("netwave_rx: Could not allocate an sk_buff of " |
1241 | "length %d\n", rcvLen); | 1222 | "length %d\n", rcvLen); |
1242 | ++dev->stats.rx_dropped; | 1223 | ++dev->stats.rx_dropped; |
1243 | /* Tell the adapter to skip the packet */ | 1224 | /* Tell the adapter to skip the packet */ |
@@ -1279,7 +1260,7 @@ static int netwave_rx(struct net_device *dev) | |||
1279 | wait_WOC(iobase); | 1260 | wait_WOC(iobase); |
1280 | writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0); | 1261 | writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0); |
1281 | writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1); | 1262 | writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1); |
1282 | DEBUG(3, "Packet reception ok\n"); | 1263 | pr_debug("Packet reception ok\n"); |
1283 | } | 1264 | } |
1284 | return 0; | 1265 | return 0; |
1285 | } | 1266 | } |
@@ -1288,7 +1269,7 @@ static int netwave_open(struct net_device *dev) { | |||
1288 | netwave_private *priv = netdev_priv(dev); | 1269 | netwave_private *priv = netdev_priv(dev); |
1289 | struct pcmcia_device *link = priv->p_dev; | 1270 | struct pcmcia_device *link = priv->p_dev; |
1290 | 1271 | ||
1291 | DEBUG(1, "netwave_open: starting.\n"); | 1272 | dev_dbg(&link->dev, "netwave_open: starting.\n"); |
1292 | 1273 | ||
1293 | if (!pcmcia_dev_present(link)) | 1274 | if (!pcmcia_dev_present(link)) |
1294 | return -ENODEV; | 1275 | return -ENODEV; |
@@ -1305,7 +1286,7 @@ static int netwave_close(struct net_device *dev) { | |||
1305 | netwave_private *priv = netdev_priv(dev); | 1286 | netwave_private *priv = netdev_priv(dev); |
1306 | struct pcmcia_device *link = priv->p_dev; | 1287 | struct pcmcia_device *link = priv->p_dev; |
1307 | 1288 | ||
1308 | DEBUG(1, "netwave_close: finishing.\n"); | 1289 | dev_dbg(&link->dev, "netwave_close: finishing.\n"); |
1309 | 1290 | ||
1310 | link->open--; | 1291 | link->open--; |
1311 | netif_stop_queue(dev); | 1292 | netif_stop_queue(dev); |
@@ -1358,11 +1339,11 @@ static void set_multicast_list(struct net_device *dev) | |||
1358 | u_char rcvMode = 0; | 1339 | u_char rcvMode = 0; |
1359 | 1340 | ||
1360 | #ifdef PCMCIA_DEBUG | 1341 | #ifdef PCMCIA_DEBUG |
1361 | if (pc_debug > 2) { | 1342 | { |
1362 | static int old; | 1343 | xstatic int old; |
1363 | if (old != dev->mc_count) { | 1344 | if (old != dev->mc_count) { |
1364 | old = dev->mc_count; | 1345 | old = dev->mc_count; |
1365 | DEBUG(0, "%s: setting Rx mode to %d addresses.\n", | 1346 | pr_debug("%s: setting Rx mode to %d addresses.\n", |
1366 | dev->name, dev->mc_count); | 1347 | dev->name, dev->mc_count); |
1367 | } | 1348 | } |
1368 | } | 1349 | } |
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c index 38c1c9d2abb8..f27bb8367c98 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/orinoco/orinoco_cs.c | |||
@@ -109,7 +109,7 @@ orinoco_cs_probe(struct pcmcia_device *link) | |||
109 | struct orinoco_private *priv; | 109 | struct orinoco_private *priv; |
110 | struct orinoco_pccard *card; | 110 | struct orinoco_pccard *card; |
111 | 111 | ||
112 | priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), | 112 | priv = alloc_orinocodev(sizeof(*card), &link->dev, |
113 | orinoco_cs_hard_reset, NULL); | 113 | orinoco_cs_hard_reset, NULL); |
114 | if (!priv) | 114 | if (!priv) |
115 | return -ENOMEM; | 115 | return -ENOMEM; |
@@ -120,10 +120,8 @@ orinoco_cs_probe(struct pcmcia_device *link) | |||
120 | link->priv = priv; | 120 | link->priv = priv; |
121 | 121 | ||
122 | /* Interrupt setup */ | 122 | /* Interrupt setup */ |
123 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 123 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
124 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
125 | link->irq.Handler = orinoco_interrupt; | 124 | link->irq.Handler = orinoco_interrupt; |
126 | link->irq.Instance = priv; | ||
127 | 125 | ||
128 | /* General socket configuration defaults can go here. In this | 126 | /* General socket configuration defaults can go here. In this |
129 | * client, we assume very little, and rely on the CIS for | 127 | * client, we assume very little, and rely on the CIS for |
@@ -160,12 +158,6 @@ static void orinoco_cs_detach(struct pcmcia_device *link) | |||
160 | * device available to the system. | 158 | * device available to the system. |
161 | */ | 159 | */ |
162 | 160 | ||
163 | #define CS_CHECK(fn, ret) do { \ | ||
164 | last_fn = (fn); \ | ||
165 | if ((last_ret = (ret)) != 0) \ | ||
166 | goto cs_failed; \ | ||
167 | } while (0) | ||
168 | |||
169 | static int orinoco_cs_config_check(struct pcmcia_device *p_dev, | 161 | static int orinoco_cs_config_check(struct pcmcia_device *p_dev, |
170 | cistpl_cftable_entry_t *cfg, | 162 | cistpl_cftable_entry_t *cfg, |
171 | cistpl_cftable_entry_t *dflt, | 163 | cistpl_cftable_entry_t *dflt, |
@@ -240,7 +232,7 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
240 | struct orinoco_private *priv = link->priv; | 232 | struct orinoco_private *priv = link->priv; |
241 | struct orinoco_pccard *card = priv->card; | 233 | struct orinoco_pccard *card = priv->card; |
242 | hermes_t *hw = &priv->hw; | 234 | hermes_t *hw = &priv->hw; |
243 | int last_fn, last_ret; | 235 | int ret; |
244 | void __iomem *mem; | 236 | void __iomem *mem; |
245 | 237 | ||
246 | /* | 238 | /* |
@@ -257,13 +249,12 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
257 | * and most client drivers will only use the CIS to fill in | 249 | * and most client drivers will only use the CIS to fill in |
258 | * implementation-defined details. | 250 | * implementation-defined details. |
259 | */ | 251 | */ |
260 | last_ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); | 252 | ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); |
261 | if (last_ret) { | 253 | if (ret) { |
262 | if (!ignore_cis_vcc) | 254 | if (!ignore_cis_vcc) |
263 | printk(KERN_ERR PFX "GetNextTuple(): No matching " | 255 | printk(KERN_ERR PFX "GetNextTuple(): No matching " |
264 | "CIS configuration. Maybe you need the " | 256 | "CIS configuration. Maybe you need the " |
265 | "ignore_cis_vcc=1 parameter.\n"); | 257 | "ignore_cis_vcc=1 parameter.\n"); |
266 | cs_error(link, RequestIO, last_ret); | ||
267 | goto failed; | 258 | goto failed; |
268 | } | 259 | } |
269 | 260 | ||
@@ -272,14 +263,16 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
272 | * a handler to the interrupt, unless the 'Handler' member of | 263 | * a handler to the interrupt, unless the 'Handler' member of |
273 | * the irq structure is initialized. | 264 | * the irq structure is initialized. |
274 | */ | 265 | */ |
275 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 266 | ret = pcmcia_request_irq(link, &link->irq); |
267 | if (ret) | ||
268 | goto failed; | ||
276 | 269 | ||
277 | /* We initialize the hermes structure before completing PCMCIA | 270 | /* We initialize the hermes structure before completing PCMCIA |
278 | * configuration just in case the interrupt handler gets | 271 | * configuration just in case the interrupt handler gets |
279 | * called. */ | 272 | * called. */ |
280 | mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); | 273 | mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); |
281 | if (!mem) | 274 | if (!mem) |
282 | goto cs_failed; | 275 | goto failed; |
283 | 276 | ||
284 | hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); | 277 | hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); |
285 | 278 | ||
@@ -288,8 +281,9 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
288 | * the I/O windows and the interrupt mapping, and putting the | 281 | * the I/O windows and the interrupt mapping, and putting the |
289 | * card and host interface into "Memory and IO" mode. | 282 | * card and host interface into "Memory and IO" mode. |
290 | */ | 283 | */ |
291 | CS_CHECK(RequestConfiguration, | 284 | ret = pcmcia_request_configuration(link, &link->conf); |
292 | pcmcia_request_configuration(link, &link->conf)); | 285 | if (ret) |
286 | goto failed; | ||
293 | 287 | ||
294 | /* Ok, we have the configuration, prepare to register the netdev */ | 288 | /* Ok, we have the configuration, prepare to register the netdev */ |
295 | card->node.major = card->node.minor = 0; | 289 | card->node.major = card->node.minor = 0; |
@@ -315,9 +309,6 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
315 | * net_device has been registered */ | 309 | * net_device has been registered */ |
316 | return 0; | 310 | return 0; |
317 | 311 | ||
318 | cs_failed: | ||
319 | cs_error(link, last_fn, last_ret); | ||
320 | |||
321 | failed: | 312 | failed: |
322 | orinoco_cs_release(link); | 313 | orinoco_cs_release(link); |
323 | return -ENODEV; | 314 | return -ENODEV; |
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c index c361310b885d..59bda240fdc2 100644 --- a/drivers/net/wireless/orinoco/spectrum_cs.c +++ b/drivers/net/wireless/orinoco/spectrum_cs.c | |||
@@ -73,9 +73,6 @@ static void spectrum_cs_release(struct pcmcia_device *link); | |||
73 | #define HCR_MEM16 0x10 /* memory width bit, should be preserved */ | 73 | #define HCR_MEM16 0x10 /* memory width bit, should be preserved */ |
74 | 74 | ||
75 | 75 | ||
76 | #define CS_CHECK(fn, ret) \ | ||
77 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
78 | |||
79 | /* | 76 | /* |
80 | * Reset the card using configuration registers COR and CCSR. | 77 | * Reset the card using configuration registers COR and CCSR. |
81 | * If IDLE is 1, stop the firmware, so that it can be safely rewritten. | 78 | * If IDLE is 1, stop the firmware, so that it can be safely rewritten. |
@@ -83,7 +80,7 @@ static void spectrum_cs_release(struct pcmcia_device *link); | |||
83 | static int | 80 | static int |
84 | spectrum_reset(struct pcmcia_device *link, int idle) | 81 | spectrum_reset(struct pcmcia_device *link, int idle) |
85 | { | 82 | { |
86 | int last_ret, last_fn; | 83 | int ret; |
87 | conf_reg_t reg; | 84 | conf_reg_t reg; |
88 | u_int save_cor; | 85 | u_int save_cor; |
89 | 86 | ||
@@ -95,23 +92,26 @@ spectrum_reset(struct pcmcia_device *link, int idle) | |||
95 | reg.Function = 0; | 92 | reg.Function = 0; |
96 | reg.Action = CS_READ; | 93 | reg.Action = CS_READ; |
97 | reg.Offset = CISREG_COR; | 94 | reg.Offset = CISREG_COR; |
98 | CS_CHECK(AccessConfigurationRegister, | 95 | ret = pcmcia_access_configuration_register(link, ®); |
99 | pcmcia_access_configuration_register(link, ®)); | 96 | if (ret) |
97 | goto failed; | ||
100 | save_cor = reg.Value; | 98 | save_cor = reg.Value; |
101 | 99 | ||
102 | /* Soft-Reset card */ | 100 | /* Soft-Reset card */ |
103 | reg.Action = CS_WRITE; | 101 | reg.Action = CS_WRITE; |
104 | reg.Offset = CISREG_COR; | 102 | reg.Offset = CISREG_COR; |
105 | reg.Value = (save_cor | COR_SOFT_RESET); | 103 | reg.Value = (save_cor | COR_SOFT_RESET); |
106 | CS_CHECK(AccessConfigurationRegister, | 104 | ret = pcmcia_access_configuration_register(link, ®); |
107 | pcmcia_access_configuration_register(link, ®)); | 105 | if (ret) |
106 | goto failed; | ||
108 | udelay(1000); | 107 | udelay(1000); |
109 | 108 | ||
110 | /* Read CCSR */ | 109 | /* Read CCSR */ |
111 | reg.Action = CS_READ; | 110 | reg.Action = CS_READ; |
112 | reg.Offset = CISREG_CCSR; | 111 | reg.Offset = CISREG_CCSR; |
113 | CS_CHECK(AccessConfigurationRegister, | 112 | ret = pcmcia_access_configuration_register(link, ®); |
114 | pcmcia_access_configuration_register(link, ®)); | 113 | if (ret) |
114 | goto failed; | ||
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Start or stop the firmware. Memory width bit should be | 117 | * Start or stop the firmware. Memory width bit should be |
@@ -120,21 +120,22 @@ spectrum_reset(struct pcmcia_device *link, int idle) | |||
120 | reg.Action = CS_WRITE; | 120 | reg.Action = CS_WRITE; |
121 | reg.Offset = CISREG_CCSR; | 121 | reg.Offset = CISREG_CCSR; |
122 | reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); | 122 | reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); |
123 | CS_CHECK(AccessConfigurationRegister, | 123 | ret = pcmcia_access_configuration_register(link, ®); |
124 | pcmcia_access_configuration_register(link, ®)); | 124 | if (ret) |
125 | goto failed; | ||
125 | udelay(1000); | 126 | udelay(1000); |
126 | 127 | ||
127 | /* Restore original COR configuration index */ | 128 | /* Restore original COR configuration index */ |
128 | reg.Action = CS_WRITE; | 129 | reg.Action = CS_WRITE; |
129 | reg.Offset = CISREG_COR; | 130 | reg.Offset = CISREG_COR; |
130 | reg.Value = (save_cor & ~COR_SOFT_RESET); | 131 | reg.Value = (save_cor & ~COR_SOFT_RESET); |
131 | CS_CHECK(AccessConfigurationRegister, | 132 | ret = pcmcia_access_configuration_register(link, ®); |
132 | pcmcia_access_configuration_register(link, ®)); | 133 | if (ret) |
134 | goto failed; | ||
133 | udelay(1000); | 135 | udelay(1000); |
134 | return 0; | 136 | return 0; |
135 | 137 | ||
136 | cs_failed: | 138 | failed: |
137 | cs_error(link, last_fn, last_ret); | ||
138 | return -ENODEV; | 139 | return -ENODEV; |
139 | } | 140 | } |
140 | 141 | ||
@@ -181,7 +182,7 @@ spectrum_cs_probe(struct pcmcia_device *link) | |||
181 | struct orinoco_private *priv; | 182 | struct orinoco_private *priv; |
182 | struct orinoco_pccard *card; | 183 | struct orinoco_pccard *card; |
183 | 184 | ||
184 | priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), | 185 | priv = alloc_orinocodev(sizeof(*card), &link->dev, |
185 | spectrum_cs_hard_reset, | 186 | spectrum_cs_hard_reset, |
186 | spectrum_cs_stop_firmware); | 187 | spectrum_cs_stop_firmware); |
187 | if (!priv) | 188 | if (!priv) |
@@ -193,10 +194,8 @@ spectrum_cs_probe(struct pcmcia_device *link) | |||
193 | link->priv = priv; | 194 | link->priv = priv; |
194 | 195 | ||
195 | /* Interrupt setup */ | 196 | /* Interrupt setup */ |
196 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 197 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
197 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
198 | link->irq.Handler = orinoco_interrupt; | 198 | link->irq.Handler = orinoco_interrupt; |
199 | link->irq.Instance = priv; | ||
200 | 199 | ||
201 | /* General socket configuration defaults can go here. In this | 200 | /* General socket configuration defaults can go here. In this |
202 | * client, we assume very little, and rely on the CIS for | 201 | * client, we assume very little, and rely on the CIS for |
@@ -307,7 +306,7 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
307 | struct orinoco_private *priv = link->priv; | 306 | struct orinoco_private *priv = link->priv; |
308 | struct orinoco_pccard *card = priv->card; | 307 | struct orinoco_pccard *card = priv->card; |
309 | hermes_t *hw = &priv->hw; | 308 | hermes_t *hw = &priv->hw; |
310 | int last_fn, last_ret; | 309 | int ret; |
311 | void __iomem *mem; | 310 | void __iomem *mem; |
312 | 311 | ||
313 | /* | 312 | /* |
@@ -324,13 +323,12 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
324 | * and most client drivers will only use the CIS to fill in | 323 | * and most client drivers will only use the CIS to fill in |
325 | * implementation-defined details. | 324 | * implementation-defined details. |
326 | */ | 325 | */ |
327 | last_ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); | 326 | ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); |
328 | if (last_ret) { | 327 | if (ret) { |
329 | if (!ignore_cis_vcc) | 328 | if (!ignore_cis_vcc) |
330 | printk(KERN_ERR PFX "GetNextTuple(): No matching " | 329 | printk(KERN_ERR PFX "GetNextTuple(): No matching " |
331 | "CIS configuration. Maybe you need the " | 330 | "CIS configuration. Maybe you need the " |
332 | "ignore_cis_vcc=1 parameter.\n"); | 331 | "ignore_cis_vcc=1 parameter.\n"); |
333 | cs_error(link, RequestIO, last_ret); | ||
334 | goto failed; | 332 | goto failed; |
335 | } | 333 | } |
336 | 334 | ||
@@ -339,14 +337,16 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
339 | * a handler to the interrupt, unless the 'Handler' member of | 337 | * a handler to the interrupt, unless the 'Handler' member of |
340 | * the irq structure is initialized. | 338 | * the irq structure is initialized. |
341 | */ | 339 | */ |
342 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 340 | ret = pcmcia_request_irq(link, &link->irq); |
341 | if (ret) | ||
342 | goto failed; | ||
343 | 343 | ||
344 | /* We initialize the hermes structure before completing PCMCIA | 344 | /* We initialize the hermes structure before completing PCMCIA |
345 | * configuration just in case the interrupt handler gets | 345 | * configuration just in case the interrupt handler gets |
346 | * called. */ | 346 | * called. */ |
347 | mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); | 347 | mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); |
348 | if (!mem) | 348 | if (!mem) |
349 | goto cs_failed; | 349 | goto failed; |
350 | 350 | ||
351 | hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); | 351 | hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); |
352 | 352 | ||
@@ -355,8 +355,9 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
355 | * the I/O windows and the interrupt mapping, and putting the | 355 | * the I/O windows and the interrupt mapping, and putting the |
356 | * card and host interface into "Memory and IO" mode. | 356 | * card and host interface into "Memory and IO" mode. |
357 | */ | 357 | */ |
358 | CS_CHECK(RequestConfiguration, | 358 | ret = pcmcia_request_configuration(link, &link->conf); |
359 | pcmcia_request_configuration(link, &link->conf)); | 359 | if (ret) |
360 | goto failed; | ||
360 | 361 | ||
361 | /* Ok, we have the configuration, prepare to register the netdev */ | 362 | /* Ok, we have the configuration, prepare to register the netdev */ |
362 | card->node.major = card->node.minor = 0; | 363 | card->node.major = card->node.minor = 0; |
@@ -386,9 +387,6 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
386 | * net_device has been registered */ | 387 | * net_device has been registered */ |
387 | return 0; | 388 | return 0; |
388 | 389 | ||
389 | cs_failed: | ||
390 | cs_error(link, last_fn, last_ret); | ||
391 | |||
392 | failed: | 390 | failed: |
393 | spectrum_cs_release(link); | 391 | spectrum_cs_release(link); |
394 | return -ENODEV; | 392 | return -ENODEV; |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 1c88c2ea59aa..5b8e3e4cdd9f 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c | |||
@@ -71,25 +71,7 @@ typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */ | |||
71 | #include "rayctl.h" | 71 | #include "rayctl.h" |
72 | #include "ray_cs.h" | 72 | #include "ray_cs.h" |
73 | 73 | ||
74 | /* All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
75 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
76 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
77 | be present but disabled -- but it can then be enabled for specific | ||
78 | modules at load time with a 'pc_debug=#' option to insmod. | ||
79 | */ | ||
80 | 74 | ||
81 | #ifdef RAYLINK_DEBUG | ||
82 | #define PCMCIA_DEBUG RAYLINK_DEBUG | ||
83 | #endif | ||
84 | #ifdef PCMCIA_DEBUG | ||
85 | static int ray_debug; | ||
86 | static int pc_debug = PCMCIA_DEBUG; | ||
87 | module_param(pc_debug, int, 0); | ||
88 | /* #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); */ | ||
89 | #define DEBUG(n, args...) if (pc_debug > (n)) printk(args); | ||
90 | #else | ||
91 | #define DEBUG(n, args...) | ||
92 | #endif | ||
93 | /** Prototypes based on PCMCIA skeleton driver *******************************/ | 75 | /** Prototypes based on PCMCIA skeleton driver *******************************/ |
94 | static int ray_config(struct pcmcia_device *link); | 76 | static int ray_config(struct pcmcia_device *link); |
95 | static void ray_release(struct pcmcia_device *link); | 77 | static void ray_release(struct pcmcia_device *link); |
@@ -325,7 +307,7 @@ static int ray_probe(struct pcmcia_device *p_dev) | |||
325 | ray_dev_t *local; | 307 | ray_dev_t *local; |
326 | struct net_device *dev; | 308 | struct net_device *dev; |
327 | 309 | ||
328 | DEBUG(1, "ray_attach()\n"); | 310 | dev_dbg(&p_dev->dev, "ray_attach()\n"); |
329 | 311 | ||
330 | /* Allocate space for private device-specific data */ | 312 | /* Allocate space for private device-specific data */ |
331 | dev = alloc_etherdev(sizeof(ray_dev_t)); | 313 | dev = alloc_etherdev(sizeof(ray_dev_t)); |
@@ -341,8 +323,7 @@ static int ray_probe(struct pcmcia_device *p_dev) | |||
341 | p_dev->io.IOAddrLines = 5; | 323 | p_dev->io.IOAddrLines = 5; |
342 | 324 | ||
343 | /* Interrupt setup. For PCMCIA, driver takes what's given */ | 325 | /* Interrupt setup. For PCMCIA, driver takes what's given */ |
344 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 326 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
345 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
346 | p_dev->irq.Handler = &ray_interrupt; | 327 | p_dev->irq.Handler = &ray_interrupt; |
347 | 328 | ||
348 | /* General socket configuration */ | 329 | /* General socket configuration */ |
@@ -351,13 +332,12 @@ static int ray_probe(struct pcmcia_device *p_dev) | |||
351 | p_dev->conf.ConfigIndex = 1; | 332 | p_dev->conf.ConfigIndex = 1; |
352 | 333 | ||
353 | p_dev->priv = dev; | 334 | p_dev->priv = dev; |
354 | p_dev->irq.Instance = dev; | ||
355 | 335 | ||
356 | local->finder = p_dev; | 336 | local->finder = p_dev; |
357 | local->card_status = CARD_INSERTED; | 337 | local->card_status = CARD_INSERTED; |
358 | local->authentication_state = UNAUTHENTICATED; | 338 | local->authentication_state = UNAUTHENTICATED; |
359 | local->num_multi = 0; | 339 | local->num_multi = 0; |
360 | DEBUG(2, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n", | 340 | dev_dbg(&p_dev->dev, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n", |
361 | p_dev, dev, local, &ray_interrupt); | 341 | p_dev, dev, local, &ray_interrupt); |
362 | 342 | ||
363 | /* Raylink entries in the device structure */ | 343 | /* Raylink entries in the device structure */ |
@@ -370,7 +350,7 @@ static int ray_probe(struct pcmcia_device *p_dev) | |||
370 | #endif /* WIRELESS_SPY */ | 350 | #endif /* WIRELESS_SPY */ |
371 | 351 | ||
372 | 352 | ||
373 | DEBUG(2, "ray_cs ray_attach calling ether_setup.)\n"); | 353 | dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n"); |
374 | netif_stop_queue(dev); | 354 | netif_stop_queue(dev); |
375 | 355 | ||
376 | init_timer(&local->timer); | 356 | init_timer(&local->timer); |
@@ -393,7 +373,7 @@ static void ray_detach(struct pcmcia_device *link) | |||
393 | struct net_device *dev; | 373 | struct net_device *dev; |
394 | ray_dev_t *local; | 374 | ray_dev_t *local; |
395 | 375 | ||
396 | DEBUG(1, "ray_detach(0x%p)\n", link); | 376 | dev_dbg(&link->dev, "ray_detach\n"); |
397 | 377 | ||
398 | this_device = NULL; | 378 | this_device = NULL; |
399 | dev = link->priv; | 379 | dev = link->priv; |
@@ -408,7 +388,7 @@ static void ray_detach(struct pcmcia_device *link) | |||
408 | unregister_netdev(dev); | 388 | unregister_netdev(dev); |
409 | free_netdev(dev); | 389 | free_netdev(dev); |
410 | } | 390 | } |
411 | DEBUG(2, "ray_cs ray_detach ending\n"); | 391 | dev_dbg(&link->dev, "ray_cs ray_detach ending\n"); |
412 | } /* ray_detach */ | 392 | } /* ray_detach */ |
413 | 393 | ||
414 | /*============================================================================= | 394 | /*============================================================================= |
@@ -416,19 +396,17 @@ static void ray_detach(struct pcmcia_device *link) | |||
416 | is received, to configure the PCMCIA socket, and to make the | 396 | is received, to configure the PCMCIA socket, and to make the |
417 | ethernet device available to the system. | 397 | ethernet device available to the system. |
418 | =============================================================================*/ | 398 | =============================================================================*/ |
419 | #define CS_CHECK(fn, ret) \ | ||
420 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
421 | #define MAX_TUPLE_SIZE 128 | 399 | #define MAX_TUPLE_SIZE 128 |
422 | static int ray_config(struct pcmcia_device *link) | 400 | static int ray_config(struct pcmcia_device *link) |
423 | { | 401 | { |
424 | int last_fn = 0, last_ret = 0; | 402 | int ret = 0; |
425 | int i; | 403 | int i; |
426 | win_req_t req; | 404 | win_req_t req; |
427 | memreq_t mem; | 405 | memreq_t mem; |
428 | struct net_device *dev = (struct net_device *)link->priv; | 406 | struct net_device *dev = (struct net_device *)link->priv; |
429 | ray_dev_t *local = netdev_priv(dev); | 407 | ray_dev_t *local = netdev_priv(dev); |
430 | 408 | ||
431 | DEBUG(1, "ray_config(0x%p)\n", link); | 409 | dev_dbg(&link->dev, "ray_config\n"); |
432 | 410 | ||
433 | /* Determine card type and firmware version */ | 411 | /* Determine card type and firmware version */ |
434 | printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n", | 412 | printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n", |
@@ -440,14 +418,17 @@ static int ray_config(struct pcmcia_device *link) | |||
440 | /* Now allocate an interrupt line. Note that this does not | 418 | /* Now allocate an interrupt line. Note that this does not |
441 | actually assign a handler to the interrupt. | 419 | actually assign a handler to the interrupt. |
442 | */ | 420 | */ |
443 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 421 | ret = pcmcia_request_irq(link, &link->irq); |
422 | if (ret) | ||
423 | goto failed; | ||
444 | dev->irq = link->irq.AssignedIRQ; | 424 | dev->irq = link->irq.AssignedIRQ; |
445 | 425 | ||
446 | /* This actually configures the PCMCIA socket -- setting up | 426 | /* This actually configures the PCMCIA socket -- setting up |
447 | the I/O windows and the interrupt mapping. | 427 | the I/O windows and the interrupt mapping. |
448 | */ | 428 | */ |
449 | CS_CHECK(RequestConfiguration, | 429 | ret = pcmcia_request_configuration(link, &link->conf); |
450 | pcmcia_request_configuration(link, &link->conf)); | 430 | if (ret) |
431 | goto failed; | ||
451 | 432 | ||
452 | /*** Set up 32k window for shared memory (transmit and control) ************/ | 433 | /*** Set up 32k window for shared memory (transmit and control) ************/ |
453 | req.Attributes = | 434 | req.Attributes = |
@@ -455,10 +436,14 @@ static int ray_config(struct pcmcia_device *link) | |||
455 | req.Base = 0; | 436 | req.Base = 0; |
456 | req.Size = 0x8000; | 437 | req.Size = 0x8000; |
457 | req.AccessSpeed = ray_mem_speed; | 438 | req.AccessSpeed = ray_mem_speed; |
458 | CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); | 439 | ret = pcmcia_request_window(link, &req, &link->win); |
440 | if (ret) | ||
441 | goto failed; | ||
459 | mem.CardOffset = 0x0000; | 442 | mem.CardOffset = 0x0000; |
460 | mem.Page = 0; | 443 | mem.Page = 0; |
461 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); | 444 | ret = pcmcia_map_mem_page(link, link->win, &mem); |
445 | if (ret) | ||
446 | goto failed; | ||
462 | local->sram = ioremap(req.Base, req.Size); | 447 | local->sram = ioremap(req.Base, req.Size); |
463 | 448 | ||
464 | /*** Set up 16k window for shared memory (receive buffer) ***************/ | 449 | /*** Set up 16k window for shared memory (receive buffer) ***************/ |
@@ -467,11 +452,14 @@ static int ray_config(struct pcmcia_device *link) | |||
467 | req.Base = 0; | 452 | req.Base = 0; |
468 | req.Size = 0x4000; | 453 | req.Size = 0x4000; |
469 | req.AccessSpeed = ray_mem_speed; | 454 | req.AccessSpeed = ray_mem_speed; |
470 | CS_CHECK(RequestWindow, | 455 | ret = pcmcia_request_window(link, &req, &local->rmem_handle); |
471 | pcmcia_request_window(&link, &req, &local->rmem_handle)); | 456 | if (ret) |
457 | goto failed; | ||
472 | mem.CardOffset = 0x8000; | 458 | mem.CardOffset = 0x8000; |
473 | mem.Page = 0; | 459 | mem.Page = 0; |
474 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem)); | 460 | ret = pcmcia_map_mem_page(link, local->rmem_handle, &mem); |
461 | if (ret) | ||
462 | goto failed; | ||
475 | local->rmem = ioremap(req.Base, req.Size); | 463 | local->rmem = ioremap(req.Base, req.Size); |
476 | 464 | ||
477 | /*** Set up window for attribute memory ***********************************/ | 465 | /*** Set up window for attribute memory ***********************************/ |
@@ -480,22 +468,25 @@ static int ray_config(struct pcmcia_device *link) | |||
480 | req.Base = 0; | 468 | req.Base = 0; |
481 | req.Size = 0x1000; | 469 | req.Size = 0x1000; |
482 | req.AccessSpeed = ray_mem_speed; | 470 | req.AccessSpeed = ray_mem_speed; |
483 | CS_CHECK(RequestWindow, | 471 | ret = pcmcia_request_window(link, &req, &local->amem_handle); |
484 | pcmcia_request_window(&link, &req, &local->amem_handle)); | 472 | if (ret) |
473 | goto failed; | ||
485 | mem.CardOffset = 0x0000; | 474 | mem.CardOffset = 0x0000; |
486 | mem.Page = 0; | 475 | mem.Page = 0; |
487 | CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem)); | 476 | ret = pcmcia_map_mem_page(link, local->amem_handle, &mem); |
477 | if (ret) | ||
478 | goto failed; | ||
488 | local->amem = ioremap(req.Base, req.Size); | 479 | local->amem = ioremap(req.Base, req.Size); |
489 | 480 | ||
490 | DEBUG(3, "ray_config sram=%p\n", local->sram); | 481 | dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram); |
491 | DEBUG(3, "ray_config rmem=%p\n", local->rmem); | 482 | dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem); |
492 | DEBUG(3, "ray_config amem=%p\n", local->amem); | 483 | dev_dbg(&link->dev, "ray_config amem=%p\n", local->amem); |
493 | if (ray_init(dev) < 0) { | 484 | if (ray_init(dev) < 0) { |
494 | ray_release(link); | 485 | ray_release(link); |
495 | return -ENODEV; | 486 | return -ENODEV; |
496 | } | 487 | } |
497 | 488 | ||
498 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 489 | SET_NETDEV_DEV(dev, &link->dev); |
499 | i = register_netdev(dev); | 490 | i = register_netdev(dev); |
500 | if (i != 0) { | 491 | if (i != 0) { |
501 | printk("ray_config register_netdev() failed\n"); | 492 | printk("ray_config register_netdev() failed\n"); |
@@ -511,9 +502,7 @@ static int ray_config(struct pcmcia_device *link) | |||
511 | 502 | ||
512 | return 0; | 503 | return 0; |
513 | 504 | ||
514 | cs_failed: | 505 | failed: |
515 | cs_error(link, last_fn, last_ret); | ||
516 | |||
517 | ray_release(link); | 506 | ray_release(link); |
518 | return -ENODEV; | 507 | return -ENODEV; |
519 | } /* ray_config */ | 508 | } /* ray_config */ |
@@ -543,9 +532,9 @@ static int ray_init(struct net_device *dev) | |||
543 | struct ccs __iomem *pccs; | 532 | struct ccs __iomem *pccs; |
544 | ray_dev_t *local = netdev_priv(dev); | 533 | ray_dev_t *local = netdev_priv(dev); |
545 | struct pcmcia_device *link = local->finder; | 534 | struct pcmcia_device *link = local->finder; |
546 | DEBUG(1, "ray_init(0x%p)\n", dev); | 535 | dev_dbg(&link->dev, "ray_init(0x%p)\n", dev); |
547 | if (!(pcmcia_dev_present(link))) { | 536 | if (!(pcmcia_dev_present(link))) { |
548 | DEBUG(0, "ray_init - device not present\n"); | 537 | dev_dbg(&link->dev, "ray_init - device not present\n"); |
549 | return -1; | 538 | return -1; |
550 | } | 539 | } |
551 | 540 | ||
@@ -567,13 +556,13 @@ static int ray_init(struct net_device *dev) | |||
567 | local->fw_ver = local->startup_res.firmware_version[0]; | 556 | local->fw_ver = local->startup_res.firmware_version[0]; |
568 | local->fw_bld = local->startup_res.firmware_version[1]; | 557 | local->fw_bld = local->startup_res.firmware_version[1]; |
569 | local->fw_var = local->startup_res.firmware_version[2]; | 558 | local->fw_var = local->startup_res.firmware_version[2]; |
570 | DEBUG(1, "ray_init firmware version %d.%d \n", local->fw_ver, | 559 | dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver, |
571 | local->fw_bld); | 560 | local->fw_bld); |
572 | 561 | ||
573 | local->tib_length = 0x20; | 562 | local->tib_length = 0x20; |
574 | if ((local->fw_ver == 5) && (local->fw_bld >= 30)) | 563 | if ((local->fw_ver == 5) && (local->fw_bld >= 30)) |
575 | local->tib_length = local->startup_res.tib_length; | 564 | local->tib_length = local->startup_res.tib_length; |
576 | DEBUG(2, "ray_init tib_length = 0x%02x\n", local->tib_length); | 565 | dev_dbg(&link->dev, "ray_init tib_length = 0x%02x\n", local->tib_length); |
577 | /* Initialize CCS's to buffer free state */ | 566 | /* Initialize CCS's to buffer free state */ |
578 | pccs = ccs_base(local); | 567 | pccs = ccs_base(local); |
579 | for (i = 0; i < NUMBER_OF_CCS; i++) { | 568 | for (i = 0; i < NUMBER_OF_CCS; i++) { |
@@ -592,7 +581,7 @@ static int ray_init(struct net_device *dev) | |||
592 | 581 | ||
593 | clear_interrupt(local); /* Clear any interrupt from the card */ | 582 | clear_interrupt(local); /* Clear any interrupt from the card */ |
594 | local->card_status = CARD_AWAITING_PARAM; | 583 | local->card_status = CARD_AWAITING_PARAM; |
595 | DEBUG(2, "ray_init ending\n"); | 584 | dev_dbg(&link->dev, "ray_init ending\n"); |
596 | return 0; | 585 | return 0; |
597 | } /* ray_init */ | 586 | } /* ray_init */ |
598 | 587 | ||
@@ -605,9 +594,9 @@ static int dl_startup_params(struct net_device *dev) | |||
605 | struct ccs __iomem *pccs; | 594 | struct ccs __iomem *pccs; |
606 | struct pcmcia_device *link = local->finder; | 595 | struct pcmcia_device *link = local->finder; |
607 | 596 | ||
608 | DEBUG(1, "dl_startup_params entered\n"); | 597 | dev_dbg(&link->dev, "dl_startup_params entered\n"); |
609 | if (!(pcmcia_dev_present(link))) { | 598 | if (!(pcmcia_dev_present(link))) { |
610 | DEBUG(2, "ray_cs dl_startup_params - device not present\n"); | 599 | dev_dbg(&link->dev, "ray_cs dl_startup_params - device not present\n"); |
611 | return -1; | 600 | return -1; |
612 | } | 601 | } |
613 | 602 | ||
@@ -625,7 +614,7 @@ static int dl_startup_params(struct net_device *dev) | |||
625 | local->dl_param_ccs = ccsindex; | 614 | local->dl_param_ccs = ccsindex; |
626 | pccs = ccs_base(local) + ccsindex; | 615 | pccs = ccs_base(local) + ccsindex; |
627 | writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd); | 616 | writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd); |
628 | DEBUG(2, "dl_startup_params start ccsindex = %d\n", | 617 | dev_dbg(&link->dev, "dl_startup_params start ccsindex = %d\n", |
629 | local->dl_param_ccs); | 618 | local->dl_param_ccs); |
630 | /* Interrupt the firmware to process the command */ | 619 | /* Interrupt the firmware to process the command */ |
631 | if (interrupt_ecf(local, ccsindex)) { | 620 | if (interrupt_ecf(local, ccsindex)) { |
@@ -641,7 +630,7 @@ static int dl_startup_params(struct net_device *dev) | |||
641 | local->timer.data = (long)local; | 630 | local->timer.data = (long)local; |
642 | local->timer.function = &verify_dl_startup; | 631 | local->timer.function = &verify_dl_startup; |
643 | add_timer(&local->timer); | 632 | add_timer(&local->timer); |
644 | DEBUG(2, | 633 | dev_dbg(&link->dev, |
645 | "ray_cs dl_startup_params started timer for verify_dl_startup\n"); | 634 | "ray_cs dl_startup_params started timer for verify_dl_startup\n"); |
646 | return 0; | 635 | return 0; |
647 | } /* dl_startup_params */ | 636 | } /* dl_startup_params */ |
@@ -717,11 +706,11 @@ static void verify_dl_startup(u_long data) | |||
717 | struct pcmcia_device *link = local->finder; | 706 | struct pcmcia_device *link = local->finder; |
718 | 707 | ||
719 | if (!(pcmcia_dev_present(link))) { | 708 | if (!(pcmcia_dev_present(link))) { |
720 | DEBUG(2, "ray_cs verify_dl_startup - device not present\n"); | 709 | dev_dbg(&link->dev, "ray_cs verify_dl_startup - device not present\n"); |
721 | return; | 710 | return; |
722 | } | 711 | } |
723 | #ifdef PCMCIA_DEBUG | 712 | #if 0 |
724 | if (pc_debug > 2) { | 713 | { |
725 | int i; | 714 | int i; |
726 | printk(KERN_DEBUG | 715 | printk(KERN_DEBUG |
727 | "verify_dl_startup parameters sent via ccs %d:\n", | 716 | "verify_dl_startup parameters sent via ccs %d:\n", |
@@ -760,7 +749,7 @@ static void start_net(u_long data) | |||
760 | int ccsindex; | 749 | int ccsindex; |
761 | struct pcmcia_device *link = local->finder; | 750 | struct pcmcia_device *link = local->finder; |
762 | if (!(pcmcia_dev_present(link))) { | 751 | if (!(pcmcia_dev_present(link))) { |
763 | DEBUG(2, "ray_cs start_net - device not present\n"); | 752 | dev_dbg(&link->dev, "ray_cs start_net - device not present\n"); |
764 | return; | 753 | return; |
765 | } | 754 | } |
766 | /* Fill in the CCS fields for the ECF */ | 755 | /* Fill in the CCS fields for the ECF */ |
@@ -771,7 +760,7 @@ static void start_net(u_long data) | |||
771 | writeb(0, &pccs->var.start_network.update_param); | 760 | writeb(0, &pccs->var.start_network.update_param); |
772 | /* Interrupt the firmware to process the command */ | 761 | /* Interrupt the firmware to process the command */ |
773 | if (interrupt_ecf(local, ccsindex)) { | 762 | if (interrupt_ecf(local, ccsindex)) { |
774 | DEBUG(1, "ray start net failed - card not ready for intr\n"); | 763 | dev_dbg(&link->dev, "ray start net failed - card not ready for intr\n"); |
775 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 764 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
776 | return; | 765 | return; |
777 | } | 766 | } |
@@ -790,7 +779,7 @@ static void join_net(u_long data) | |||
790 | struct pcmcia_device *link = local->finder; | 779 | struct pcmcia_device *link = local->finder; |
791 | 780 | ||
792 | if (!(pcmcia_dev_present(link))) { | 781 | if (!(pcmcia_dev_present(link))) { |
793 | DEBUG(2, "ray_cs join_net - device not present\n"); | 782 | dev_dbg(&link->dev, "ray_cs join_net - device not present\n"); |
794 | return; | 783 | return; |
795 | } | 784 | } |
796 | /* Fill in the CCS fields for the ECF */ | 785 | /* Fill in the CCS fields for the ECF */ |
@@ -802,7 +791,7 @@ static void join_net(u_long data) | |||
802 | writeb(0, &pccs->var.join_network.net_initiated); | 791 | writeb(0, &pccs->var.join_network.net_initiated); |
803 | /* Interrupt the firmware to process the command */ | 792 | /* Interrupt the firmware to process the command */ |
804 | if (interrupt_ecf(local, ccsindex)) { | 793 | if (interrupt_ecf(local, ccsindex)) { |
805 | DEBUG(1, "ray join net failed - card not ready for intr\n"); | 794 | dev_dbg(&link->dev, "ray join net failed - card not ready for intr\n"); |
806 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 795 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
807 | return; | 796 | return; |
808 | } | 797 | } |
@@ -821,7 +810,7 @@ static void ray_release(struct pcmcia_device *link) | |||
821 | ray_dev_t *local = netdev_priv(dev); | 810 | ray_dev_t *local = netdev_priv(dev); |
822 | int i; | 811 | int i; |
823 | 812 | ||
824 | DEBUG(1, "ray_release(0x%p)\n", link); | 813 | dev_dbg(&link->dev, "ray_release\n"); |
825 | 814 | ||
826 | del_timer(&local->timer); | 815 | del_timer(&local->timer); |
827 | 816 | ||
@@ -829,15 +818,15 @@ static void ray_release(struct pcmcia_device *link) | |||
829 | iounmap(local->rmem); | 818 | iounmap(local->rmem); |
830 | iounmap(local->amem); | 819 | iounmap(local->amem); |
831 | /* Do bother checking to see if these succeed or not */ | 820 | /* Do bother checking to see if these succeed or not */ |
832 | i = pcmcia_release_window(local->amem_handle); | 821 | i = pcmcia_release_window(link, local->amem_handle); |
833 | if (i != 0) | 822 | if (i != 0) |
834 | DEBUG(0, "ReleaseWindow(local->amem) ret = %x\n", i); | 823 | dev_dbg(&link->dev, "ReleaseWindow(local->amem) ret = %x\n", i); |
835 | i = pcmcia_release_window(local->rmem_handle); | 824 | i = pcmcia_release_window(link, local->rmem_handle); |
836 | if (i != 0) | 825 | if (i != 0) |
837 | DEBUG(0, "ReleaseWindow(local->rmem) ret = %x\n", i); | 826 | dev_dbg(&link->dev, "ReleaseWindow(local->rmem) ret = %x\n", i); |
838 | pcmcia_disable_device(link); | 827 | pcmcia_disable_device(link); |
839 | 828 | ||
840 | DEBUG(2, "ray_release ending\n"); | 829 | dev_dbg(&link->dev, "ray_release ending\n"); |
841 | } | 830 | } |
842 | 831 | ||
843 | static int ray_suspend(struct pcmcia_device *link) | 832 | static int ray_suspend(struct pcmcia_device *link) |
@@ -871,9 +860,9 @@ static int ray_dev_init(struct net_device *dev) | |||
871 | ray_dev_t *local = netdev_priv(dev); | 860 | ray_dev_t *local = netdev_priv(dev); |
872 | struct pcmcia_device *link = local->finder; | 861 | struct pcmcia_device *link = local->finder; |
873 | 862 | ||
874 | DEBUG(1, "ray_dev_init(dev=%p)\n", dev); | 863 | dev_dbg(&link->dev, "ray_dev_init(dev=%p)\n", dev); |
875 | if (!(pcmcia_dev_present(link))) { | 864 | if (!(pcmcia_dev_present(link))) { |
876 | DEBUG(2, "ray_dev_init - device not present\n"); | 865 | dev_dbg(&link->dev, "ray_dev_init - device not present\n"); |
877 | return -1; | 866 | return -1; |
878 | } | 867 | } |
879 | #ifdef RAY_IMMEDIATE_INIT | 868 | #ifdef RAY_IMMEDIATE_INIT |
@@ -887,7 +876,7 @@ static int ray_dev_init(struct net_device *dev) | |||
887 | /* Postpone the card init so that we can still configure the card, | 876 | /* Postpone the card init so that we can still configure the card, |
888 | * for example using the Wireless Extensions. The init will happen | 877 | * for example using the Wireless Extensions. The init will happen |
889 | * in ray_open() - Jean II */ | 878 | * in ray_open() - Jean II */ |
890 | DEBUG(1, | 879 | dev_dbg(&link->dev, |
891 | "ray_dev_init: postponing card init to ray_open() ; Status = %d\n", | 880 | "ray_dev_init: postponing card init to ray_open() ; Status = %d\n", |
892 | local->card_status); | 881 | local->card_status); |
893 | #endif /* RAY_IMMEDIATE_INIT */ | 882 | #endif /* RAY_IMMEDIATE_INIT */ |
@@ -896,7 +885,7 @@ static int ray_dev_init(struct net_device *dev) | |||
896 | memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); | 885 | memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); |
897 | memset(dev->broadcast, 0xff, ETH_ALEN); | 886 | memset(dev->broadcast, 0xff, ETH_ALEN); |
898 | 887 | ||
899 | DEBUG(2, "ray_dev_init ending\n"); | 888 | dev_dbg(&link->dev, "ray_dev_init ending\n"); |
900 | return 0; | 889 | return 0; |
901 | } | 890 | } |
902 | 891 | ||
@@ -906,9 +895,9 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map) | |||
906 | ray_dev_t *local = netdev_priv(dev); | 895 | ray_dev_t *local = netdev_priv(dev); |
907 | struct pcmcia_device *link = local->finder; | 896 | struct pcmcia_device *link = local->finder; |
908 | /* Dummy routine to satisfy device structure */ | 897 | /* Dummy routine to satisfy device structure */ |
909 | DEBUG(1, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map); | 898 | dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map); |
910 | if (!(pcmcia_dev_present(link))) { | 899 | if (!(pcmcia_dev_present(link))) { |
911 | DEBUG(2, "ray_dev_config - device not present\n"); | 900 | dev_dbg(&link->dev, "ray_dev_config - device not present\n"); |
912 | return -1; | 901 | return -1; |
913 | } | 902 | } |
914 | 903 | ||
@@ -924,14 +913,14 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb, | |||
924 | short length = skb->len; | 913 | short length = skb->len; |
925 | 914 | ||
926 | if (!pcmcia_dev_present(link)) { | 915 | if (!pcmcia_dev_present(link)) { |
927 | DEBUG(2, "ray_dev_start_xmit - device not present\n"); | 916 | dev_dbg(&link->dev, "ray_dev_start_xmit - device not present\n"); |
928 | dev_kfree_skb(skb); | 917 | dev_kfree_skb(skb); |
929 | return NETDEV_TX_OK; | 918 | return NETDEV_TX_OK; |
930 | } | 919 | } |
931 | 920 | ||
932 | DEBUG(3, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev); | 921 | dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev); |
933 | if (local->authentication_state == NEED_TO_AUTH) { | 922 | if (local->authentication_state == NEED_TO_AUTH) { |
934 | DEBUG(0, "ray_cs Sending authentication request.\n"); | 923 | dev_dbg(&link->dev, "ray_cs Sending authentication request.\n"); |
935 | if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) { | 924 | if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) { |
936 | local->authentication_state = AUTHENTICATED; | 925 | local->authentication_state = AUTHENTICATED; |
937 | netif_stop_queue(dev); | 926 | netif_stop_queue(dev); |
@@ -971,7 +960,7 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, | |||
971 | struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */ | 960 | struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */ |
972 | short int addr; /* Address of xmit buffer in card space */ | 961 | short int addr; /* Address of xmit buffer in card space */ |
973 | 962 | ||
974 | DEBUG(3, "ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev); | 963 | pr_debug("ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev); |
975 | if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) { | 964 | if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) { |
976 | printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n", | 965 | printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n", |
977 | len); | 966 | len); |
@@ -979,9 +968,9 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, | |||
979 | } | 968 | } |
980 | switch (ccsindex = get_free_tx_ccs(local)) { | 969 | switch (ccsindex = get_free_tx_ccs(local)) { |
981 | case ECCSBUSY: | 970 | case ECCSBUSY: |
982 | DEBUG(2, "ray_hw_xmit tx_ccs table busy\n"); | 971 | pr_debug("ray_hw_xmit tx_ccs table busy\n"); |
983 | case ECCSFULL: | 972 | case ECCSFULL: |
984 | DEBUG(2, "ray_hw_xmit No free tx ccs\n"); | 973 | pr_debug("ray_hw_xmit No free tx ccs\n"); |
985 | case ECARDGONE: | 974 | case ECARDGONE: |
986 | netif_stop_queue(dev); | 975 | netif_stop_queue(dev); |
987 | return XMIT_NO_CCS; | 976 | return XMIT_NO_CCS; |
@@ -1018,12 +1007,12 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, | |||
1018 | writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode); | 1007 | writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode); |
1019 | writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate); | 1008 | writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate); |
1020 | writeb(0, &pccs->var.tx_request.antenna); | 1009 | writeb(0, &pccs->var.tx_request.antenna); |
1021 | DEBUG(3, "ray_hw_xmit default_tx_rate = 0x%x\n", | 1010 | pr_debug("ray_hw_xmit default_tx_rate = 0x%x\n", |
1022 | local->net_default_tx_rate); | 1011 | local->net_default_tx_rate); |
1023 | 1012 | ||
1024 | /* Interrupt the firmware to process the command */ | 1013 | /* Interrupt the firmware to process the command */ |
1025 | if (interrupt_ecf(local, ccsindex)) { | 1014 | if (interrupt_ecf(local, ccsindex)) { |
1026 | DEBUG(2, "ray_hw_xmit failed - ECF not ready for intr\n"); | 1015 | pr_debug("ray_hw_xmit failed - ECF not ready for intr\n"); |
1027 | /* TBD very inefficient to copy packet to buffer, and then not | 1016 | /* TBD very inefficient to copy packet to buffer, and then not |
1028 | send it, but the alternative is to queue the messages and that | 1017 | send it, but the alternative is to queue the messages and that |
1029 | won't be done for a while. Maybe set tbusy until a CCS is free? | 1018 | won't be done for a while. Maybe set tbusy until a CCS is free? |
@@ -1040,7 +1029,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, | |||
1040 | { | 1029 | { |
1041 | __be16 proto = ((struct ethhdr *)data)->h_proto; | 1030 | __be16 proto = ((struct ethhdr *)data)->h_proto; |
1042 | if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ | 1031 | if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ |
1043 | DEBUG(3, "ray_cs translate_frame DIX II\n"); | 1032 | pr_debug("ray_cs translate_frame DIX II\n"); |
1044 | /* Copy LLC header to card buffer */ | 1033 | /* Copy LLC header to card buffer */ |
1045 | memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); | 1034 | memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); |
1046 | memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc), | 1035 | memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc), |
@@ -1056,9 +1045,9 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, | |||
1056 | len - ETH_HLEN); | 1045 | len - ETH_HLEN); |
1057 | return (int)sizeof(struct snaphdr_t) - ETH_HLEN; | 1046 | return (int)sizeof(struct snaphdr_t) - ETH_HLEN; |
1058 | } else { /* already 802 type, and proto is length */ | 1047 | } else { /* already 802 type, and proto is length */ |
1059 | DEBUG(3, "ray_cs translate_frame 802\n"); | 1048 | pr_debug("ray_cs translate_frame 802\n"); |
1060 | if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */ | 1049 | if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */ |
1061 | DEBUG(3, "ray_cs translate_frame evil IPX\n"); | 1050 | pr_debug("ray_cs translate_frame evil IPX\n"); |
1062 | memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN); | 1051 | memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN); |
1063 | return 0 - ETH_HLEN; | 1052 | return 0 - ETH_HLEN; |
1064 | } | 1053 | } |
@@ -1603,7 +1592,7 @@ static int ray_open(struct net_device *dev) | |||
1603 | struct pcmcia_device *link; | 1592 | struct pcmcia_device *link; |
1604 | link = local->finder; | 1593 | link = local->finder; |
1605 | 1594 | ||
1606 | DEBUG(1, "ray_open('%s')\n", dev->name); | 1595 | dev_dbg(&link->dev, "ray_open('%s')\n", dev->name); |
1607 | 1596 | ||
1608 | if (link->open == 0) | 1597 | if (link->open == 0) |
1609 | local->num_multi = 0; | 1598 | local->num_multi = 0; |
@@ -1613,7 +1602,7 @@ static int ray_open(struct net_device *dev) | |||
1613 | if (local->card_status == CARD_AWAITING_PARAM) { | 1602 | if (local->card_status == CARD_AWAITING_PARAM) { |
1614 | int i; | 1603 | int i; |
1615 | 1604 | ||
1616 | DEBUG(1, "ray_open: doing init now !\n"); | 1605 | dev_dbg(&link->dev, "ray_open: doing init now !\n"); |
1617 | 1606 | ||
1618 | /* Download startup parameters */ | 1607 | /* Download startup parameters */ |
1619 | if ((i = dl_startup_params(dev)) < 0) { | 1608 | if ((i = dl_startup_params(dev)) < 0) { |
@@ -1629,7 +1618,7 @@ static int ray_open(struct net_device *dev) | |||
1629 | else | 1618 | else |
1630 | netif_start_queue(dev); | 1619 | netif_start_queue(dev); |
1631 | 1620 | ||
1632 | DEBUG(2, "ray_open ending\n"); | 1621 | dev_dbg(&link->dev, "ray_open ending\n"); |
1633 | return 0; | 1622 | return 0; |
1634 | } /* end ray_open */ | 1623 | } /* end ray_open */ |
1635 | 1624 | ||
@@ -1640,7 +1629,7 @@ static int ray_dev_close(struct net_device *dev) | |||
1640 | struct pcmcia_device *link; | 1629 | struct pcmcia_device *link; |
1641 | link = local->finder; | 1630 | link = local->finder; |
1642 | 1631 | ||
1643 | DEBUG(1, "ray_dev_close('%s')\n", dev->name); | 1632 | dev_dbg(&link->dev, "ray_dev_close('%s')\n", dev->name); |
1644 | 1633 | ||
1645 | link->open--; | 1634 | link->open--; |
1646 | netif_stop_queue(dev); | 1635 | netif_stop_queue(dev); |
@@ -1656,7 +1645,7 @@ static int ray_dev_close(struct net_device *dev) | |||
1656 | /*===========================================================================*/ | 1645 | /*===========================================================================*/ |
1657 | static void ray_reset(struct net_device *dev) | 1646 | static void ray_reset(struct net_device *dev) |
1658 | { | 1647 | { |
1659 | DEBUG(1, "ray_reset entered\n"); | 1648 | pr_debug("ray_reset entered\n"); |
1660 | return; | 1649 | return; |
1661 | } | 1650 | } |
1662 | 1651 | ||
@@ -1669,17 +1658,17 @@ static int interrupt_ecf(ray_dev_t *local, int ccs) | |||
1669 | struct pcmcia_device *link = local->finder; | 1658 | struct pcmcia_device *link = local->finder; |
1670 | 1659 | ||
1671 | if (!(pcmcia_dev_present(link))) { | 1660 | if (!(pcmcia_dev_present(link))) { |
1672 | DEBUG(2, "ray_cs interrupt_ecf - device not present\n"); | 1661 | dev_dbg(&link->dev, "ray_cs interrupt_ecf - device not present\n"); |
1673 | return -1; | 1662 | return -1; |
1674 | } | 1663 | } |
1675 | DEBUG(2, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs); | 1664 | dev_dbg(&link->dev, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs); |
1676 | 1665 | ||
1677 | while (i && | 1666 | while (i && |
1678 | (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) & | 1667 | (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) & |
1679 | ECF_INTR_SET)) | 1668 | ECF_INTR_SET)) |
1680 | i--; | 1669 | i--; |
1681 | if (i == 0) { | 1670 | if (i == 0) { |
1682 | DEBUG(2, "ray_cs interrupt_ecf card not ready for interrupt\n"); | 1671 | dev_dbg(&link->dev, "ray_cs interrupt_ecf card not ready for interrupt\n"); |
1683 | return -1; | 1672 | return -1; |
1684 | } | 1673 | } |
1685 | /* Fill the mailbox, then kick the card */ | 1674 | /* Fill the mailbox, then kick the card */ |
@@ -1698,12 +1687,12 @@ static int get_free_tx_ccs(ray_dev_t *local) | |||
1698 | struct pcmcia_device *link = local->finder; | 1687 | struct pcmcia_device *link = local->finder; |
1699 | 1688 | ||
1700 | if (!(pcmcia_dev_present(link))) { | 1689 | if (!(pcmcia_dev_present(link))) { |
1701 | DEBUG(2, "ray_cs get_free_tx_ccs - device not present\n"); | 1690 | dev_dbg(&link->dev, "ray_cs get_free_tx_ccs - device not present\n"); |
1702 | return ECARDGONE; | 1691 | return ECARDGONE; |
1703 | } | 1692 | } |
1704 | 1693 | ||
1705 | if (test_and_set_bit(0, &local->tx_ccs_lock)) { | 1694 | if (test_and_set_bit(0, &local->tx_ccs_lock)) { |
1706 | DEBUG(1, "ray_cs tx_ccs_lock busy\n"); | 1695 | dev_dbg(&link->dev, "ray_cs tx_ccs_lock busy\n"); |
1707 | return ECCSBUSY; | 1696 | return ECCSBUSY; |
1708 | } | 1697 | } |
1709 | 1698 | ||
@@ -1716,7 +1705,7 @@ static int get_free_tx_ccs(ray_dev_t *local) | |||
1716 | } | 1705 | } |
1717 | } | 1706 | } |
1718 | local->tx_ccs_lock = 0; | 1707 | local->tx_ccs_lock = 0; |
1719 | DEBUG(2, "ray_cs ERROR no free tx CCS for raylink card\n"); | 1708 | dev_dbg(&link->dev, "ray_cs ERROR no free tx CCS for raylink card\n"); |
1720 | return ECCSFULL; | 1709 | return ECCSFULL; |
1721 | } /* get_free_tx_ccs */ | 1710 | } /* get_free_tx_ccs */ |
1722 | 1711 | ||
@@ -1730,11 +1719,11 @@ static int get_free_ccs(ray_dev_t *local) | |||
1730 | struct pcmcia_device *link = local->finder; | 1719 | struct pcmcia_device *link = local->finder; |
1731 | 1720 | ||
1732 | if (!(pcmcia_dev_present(link))) { | 1721 | if (!(pcmcia_dev_present(link))) { |
1733 | DEBUG(2, "ray_cs get_free_ccs - device not present\n"); | 1722 | dev_dbg(&link->dev, "ray_cs get_free_ccs - device not present\n"); |
1734 | return ECARDGONE; | 1723 | return ECARDGONE; |
1735 | } | 1724 | } |
1736 | if (test_and_set_bit(0, &local->ccs_lock)) { | 1725 | if (test_and_set_bit(0, &local->ccs_lock)) { |
1737 | DEBUG(1, "ray_cs ccs_lock busy\n"); | 1726 | dev_dbg(&link->dev, "ray_cs ccs_lock busy\n"); |
1738 | return ECCSBUSY; | 1727 | return ECCSBUSY; |
1739 | } | 1728 | } |
1740 | 1729 | ||
@@ -1747,7 +1736,7 @@ static int get_free_ccs(ray_dev_t *local) | |||
1747 | } | 1736 | } |
1748 | } | 1737 | } |
1749 | local->ccs_lock = 0; | 1738 | local->ccs_lock = 0; |
1750 | DEBUG(1, "ray_cs ERROR no free CCS for raylink card\n"); | 1739 | dev_dbg(&link->dev, "ray_cs ERROR no free CCS for raylink card\n"); |
1751 | return ECCSFULL; | 1740 | return ECCSFULL; |
1752 | } /* get_free_ccs */ | 1741 | } /* get_free_ccs */ |
1753 | 1742 | ||
@@ -1823,7 +1812,7 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev) | |||
1823 | struct pcmcia_device *link = local->finder; | 1812 | struct pcmcia_device *link = local->finder; |
1824 | struct status __iomem *p = local->sram + STATUS_BASE; | 1813 | struct status __iomem *p = local->sram + STATUS_BASE; |
1825 | if (!(pcmcia_dev_present(link))) { | 1814 | if (!(pcmcia_dev_present(link))) { |
1826 | DEBUG(2, "ray_cs net_device_stats - device not present\n"); | 1815 | dev_dbg(&link->dev, "ray_cs net_device_stats - device not present\n"); |
1827 | return &local->stats; | 1816 | return &local->stats; |
1828 | } | 1817 | } |
1829 | if (readb(&p->mrx_overflow_for_host)) { | 1818 | if (readb(&p->mrx_overflow_for_host)) { |
@@ -1856,12 +1845,12 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, | |||
1856 | struct ccs __iomem *pccs; | 1845 | struct ccs __iomem *pccs; |
1857 | 1846 | ||
1858 | if (!(pcmcia_dev_present(link))) { | 1847 | if (!(pcmcia_dev_present(link))) { |
1859 | DEBUG(2, "ray_update_parm - device not present\n"); | 1848 | dev_dbg(&link->dev, "ray_update_parm - device not present\n"); |
1860 | return; | 1849 | return; |
1861 | } | 1850 | } |
1862 | 1851 | ||
1863 | if ((ccsindex = get_free_ccs(local)) < 0) { | 1852 | if ((ccsindex = get_free_ccs(local)) < 0) { |
1864 | DEBUG(0, "ray_update_parm - No free ccs\n"); | 1853 | dev_dbg(&link->dev, "ray_update_parm - No free ccs\n"); |
1865 | return; | 1854 | return; |
1866 | } | 1855 | } |
1867 | pccs = ccs_base(local) + ccsindex; | 1856 | pccs = ccs_base(local) + ccsindex; |
@@ -1874,7 +1863,7 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, | |||
1874 | } | 1863 | } |
1875 | /* Interrupt the firmware to process the command */ | 1864 | /* Interrupt the firmware to process the command */ |
1876 | if (interrupt_ecf(local, ccsindex)) { | 1865 | if (interrupt_ecf(local, ccsindex)) { |
1877 | DEBUG(0, "ray_cs associate failed - ECF not ready for intr\n"); | 1866 | dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n"); |
1878 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 1867 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
1879 | } | 1868 | } |
1880 | } | 1869 | } |
@@ -1891,12 +1880,12 @@ static void ray_update_multi_list(struct net_device *dev, int all) | |||
1891 | void __iomem *p = local->sram + HOST_TO_ECF_BASE; | 1880 | void __iomem *p = local->sram + HOST_TO_ECF_BASE; |
1892 | 1881 | ||
1893 | if (!(pcmcia_dev_present(link))) { | 1882 | if (!(pcmcia_dev_present(link))) { |
1894 | DEBUG(2, "ray_update_multi_list - device not present\n"); | 1883 | dev_dbg(&link->dev, "ray_update_multi_list - device not present\n"); |
1895 | return; | 1884 | return; |
1896 | } else | 1885 | } else |
1897 | DEBUG(2, "ray_update_multi_list(%p)\n", dev); | 1886 | dev_dbg(&link->dev, "ray_update_multi_list(%p)\n", dev); |
1898 | if ((ccsindex = get_free_ccs(local)) < 0) { | 1887 | if ((ccsindex = get_free_ccs(local)) < 0) { |
1899 | DEBUG(1, "ray_update_multi - No free ccs\n"); | 1888 | dev_dbg(&link->dev, "ray_update_multi - No free ccs\n"); |
1900 | return; | 1889 | return; |
1901 | } | 1890 | } |
1902 | pccs = ccs_base(local) + ccsindex; | 1891 | pccs = ccs_base(local) + ccsindex; |
@@ -1910,7 +1899,7 @@ static void ray_update_multi_list(struct net_device *dev, int all) | |||
1910 | for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; | 1899 | for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; |
1911 | dmip = &dmi->next) { | 1900 | dmip = &dmi->next) { |
1912 | memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); | 1901 | memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); |
1913 | DEBUG(1, | 1902 | dev_dbg(&link->dev, |
1914 | "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", | 1903 | "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", |
1915 | dmi->dmi_addr[0], dmi->dmi_addr[1], | 1904 | dmi->dmi_addr[0], dmi->dmi_addr[1], |
1916 | dmi->dmi_addr[2], dmi->dmi_addr[3], | 1905 | dmi->dmi_addr[2], dmi->dmi_addr[3], |
@@ -1921,12 +1910,12 @@ static void ray_update_multi_list(struct net_device *dev, int all) | |||
1921 | if (i > 256 / ADDRLEN) | 1910 | if (i > 256 / ADDRLEN) |
1922 | i = 256 / ADDRLEN; | 1911 | i = 256 / ADDRLEN; |
1923 | writeb((UCHAR) i, &pccs->var); | 1912 | writeb((UCHAR) i, &pccs->var); |
1924 | DEBUG(1, "ray_cs update_multi %d addresses in list\n", i); | 1913 | dev_dbg(&link->dev, "ray_cs update_multi %d addresses in list\n", i); |
1925 | /* Interrupt the firmware to process the command */ | 1914 | /* Interrupt the firmware to process the command */ |
1926 | local->num_multi = i; | 1915 | local->num_multi = i; |
1927 | } | 1916 | } |
1928 | if (interrupt_ecf(local, ccsindex)) { | 1917 | if (interrupt_ecf(local, ccsindex)) { |
1929 | DEBUG(1, | 1918 | dev_dbg(&link->dev, |
1930 | "ray_cs update_multi failed - ECF not ready for intr\n"); | 1919 | "ray_cs update_multi failed - ECF not ready for intr\n"); |
1931 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 1920 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
1932 | } | 1921 | } |
@@ -1938,11 +1927,11 @@ static void set_multicast_list(struct net_device *dev) | |||
1938 | ray_dev_t *local = netdev_priv(dev); | 1927 | ray_dev_t *local = netdev_priv(dev); |
1939 | UCHAR promisc; | 1928 | UCHAR promisc; |
1940 | 1929 | ||
1941 | DEBUG(2, "ray_cs set_multicast_list(%p)\n", dev); | 1930 | pr_debug("ray_cs set_multicast_list(%p)\n", dev); |
1942 | 1931 | ||
1943 | if (dev->flags & IFF_PROMISC) { | 1932 | if (dev->flags & IFF_PROMISC) { |
1944 | if (local->sparm.b5.a_promiscuous_mode == 0) { | 1933 | if (local->sparm.b5.a_promiscuous_mode == 0) { |
1945 | DEBUG(1, "ray_cs set_multicast_list promisc on\n"); | 1934 | pr_debug("ray_cs set_multicast_list promisc on\n"); |
1946 | local->sparm.b5.a_promiscuous_mode = 1; | 1935 | local->sparm.b5.a_promiscuous_mode = 1; |
1947 | promisc = 1; | 1936 | promisc = 1; |
1948 | ray_update_parm(dev, OBJID_promiscuous_mode, | 1937 | ray_update_parm(dev, OBJID_promiscuous_mode, |
@@ -1950,7 +1939,7 @@ static void set_multicast_list(struct net_device *dev) | |||
1950 | } | 1939 | } |
1951 | } else { | 1940 | } else { |
1952 | if (local->sparm.b5.a_promiscuous_mode == 1) { | 1941 | if (local->sparm.b5.a_promiscuous_mode == 1) { |
1953 | DEBUG(1, "ray_cs set_multicast_list promisc off\n"); | 1942 | pr_debug("ray_cs set_multicast_list promisc off\n"); |
1954 | local->sparm.b5.a_promiscuous_mode = 0; | 1943 | local->sparm.b5.a_promiscuous_mode = 0; |
1955 | promisc = 0; | 1944 | promisc = 0; |
1956 | ray_update_parm(dev, OBJID_promiscuous_mode, | 1945 | ray_update_parm(dev, OBJID_promiscuous_mode, |
@@ -1984,19 +1973,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
1984 | if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */ | 1973 | if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */ |
1985 | return IRQ_NONE; | 1974 | return IRQ_NONE; |
1986 | 1975 | ||
1987 | DEBUG(4, "ray_cs: interrupt for *dev=%p\n", dev); | 1976 | pr_debug("ray_cs: interrupt for *dev=%p\n", dev); |
1988 | 1977 | ||
1989 | local = netdev_priv(dev); | 1978 | local = netdev_priv(dev); |
1990 | link = (struct pcmcia_device *)local->finder; | 1979 | link = (struct pcmcia_device *)local->finder; |
1991 | if (!pcmcia_dev_present(link)) { | 1980 | if (!pcmcia_dev_present(link)) { |
1992 | DEBUG(2, | 1981 | pr_debug( |
1993 | "ray_cs interrupt from device not present or suspended.\n"); | 1982 | "ray_cs interrupt from device not present or suspended.\n"); |
1994 | return IRQ_NONE; | 1983 | return IRQ_NONE; |
1995 | } | 1984 | } |
1996 | rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index); | 1985 | rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index); |
1997 | 1986 | ||
1998 | if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { | 1987 | if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { |
1999 | DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex); | 1988 | dev_dbg(&link->dev, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex); |
2000 | clear_interrupt(local); | 1989 | clear_interrupt(local); |
2001 | return IRQ_HANDLED; | 1990 | return IRQ_HANDLED; |
2002 | } | 1991 | } |
@@ -2008,33 +1997,33 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2008 | case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */ | 1997 | case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */ |
2009 | del_timer(&local->timer); | 1998 | del_timer(&local->timer); |
2010 | if (status == CCS_COMMAND_COMPLETE) { | 1999 | if (status == CCS_COMMAND_COMPLETE) { |
2011 | DEBUG(1, | 2000 | dev_dbg(&link->dev, |
2012 | "ray_cs interrupt download_startup_parameters OK\n"); | 2001 | "ray_cs interrupt download_startup_parameters OK\n"); |
2013 | } else { | 2002 | } else { |
2014 | DEBUG(1, | 2003 | dev_dbg(&link->dev, |
2015 | "ray_cs interrupt download_startup_parameters fail\n"); | 2004 | "ray_cs interrupt download_startup_parameters fail\n"); |
2016 | } | 2005 | } |
2017 | break; | 2006 | break; |
2018 | case CCS_UPDATE_PARAMS: | 2007 | case CCS_UPDATE_PARAMS: |
2019 | DEBUG(1, "ray_cs interrupt update params done\n"); | 2008 | dev_dbg(&link->dev, "ray_cs interrupt update params done\n"); |
2020 | if (status != CCS_COMMAND_COMPLETE) { | 2009 | if (status != CCS_COMMAND_COMPLETE) { |
2021 | tmp = | 2010 | tmp = |
2022 | readb(&pccs->var.update_param. | 2011 | readb(&pccs->var.update_param. |
2023 | failure_cause); | 2012 | failure_cause); |
2024 | DEBUG(0, | 2013 | dev_dbg(&link->dev, |
2025 | "ray_cs interrupt update params failed - reason %d\n", | 2014 | "ray_cs interrupt update params failed - reason %d\n", |
2026 | tmp); | 2015 | tmp); |
2027 | } | 2016 | } |
2028 | break; | 2017 | break; |
2029 | case CCS_REPORT_PARAMS: | 2018 | case CCS_REPORT_PARAMS: |
2030 | DEBUG(1, "ray_cs interrupt report params done\n"); | 2019 | dev_dbg(&link->dev, "ray_cs interrupt report params done\n"); |
2031 | break; | 2020 | break; |
2032 | case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */ | 2021 | case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */ |
2033 | DEBUG(1, | 2022 | dev_dbg(&link->dev, |
2034 | "ray_cs interrupt CCS Update Multicast List done\n"); | 2023 | "ray_cs interrupt CCS Update Multicast List done\n"); |
2035 | break; | 2024 | break; |
2036 | case CCS_UPDATE_POWER_SAVINGS_MODE: | 2025 | case CCS_UPDATE_POWER_SAVINGS_MODE: |
2037 | DEBUG(1, | 2026 | dev_dbg(&link->dev, |
2038 | "ray_cs interrupt update power save mode done\n"); | 2027 | "ray_cs interrupt update power save mode done\n"); |
2039 | break; | 2028 | break; |
2040 | case CCS_START_NETWORK: | 2029 | case CCS_START_NETWORK: |
@@ -2043,11 +2032,11 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2043 | if (readb | 2032 | if (readb |
2044 | (&pccs->var.start_network.net_initiated) == | 2033 | (&pccs->var.start_network.net_initiated) == |
2045 | 1) { | 2034 | 1) { |
2046 | DEBUG(0, | 2035 | dev_dbg(&link->dev, |
2047 | "ray_cs interrupt network \"%s\" started\n", | 2036 | "ray_cs interrupt network \"%s\" started\n", |
2048 | local->sparm.b4.a_current_ess_id); | 2037 | local->sparm.b4.a_current_ess_id); |
2049 | } else { | 2038 | } else { |
2050 | DEBUG(0, | 2039 | dev_dbg(&link->dev, |
2051 | "ray_cs interrupt network \"%s\" joined\n", | 2040 | "ray_cs interrupt network \"%s\" joined\n", |
2052 | local->sparm.b4.a_current_ess_id); | 2041 | local->sparm.b4.a_current_ess_id); |
2053 | } | 2042 | } |
@@ -2075,12 +2064,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2075 | local->timer.expires = jiffies + HZ * 5; | 2064 | local->timer.expires = jiffies + HZ * 5; |
2076 | local->timer.data = (long)local; | 2065 | local->timer.data = (long)local; |
2077 | if (status == CCS_START_NETWORK) { | 2066 | if (status == CCS_START_NETWORK) { |
2078 | DEBUG(0, | 2067 | dev_dbg(&link->dev, |
2079 | "ray_cs interrupt network \"%s\" start failed\n", | 2068 | "ray_cs interrupt network \"%s\" start failed\n", |
2080 | local->sparm.b4.a_current_ess_id); | 2069 | local->sparm.b4.a_current_ess_id); |
2081 | local->timer.function = &start_net; | 2070 | local->timer.function = &start_net; |
2082 | } else { | 2071 | } else { |
2083 | DEBUG(0, | 2072 | dev_dbg(&link->dev, |
2084 | "ray_cs interrupt network \"%s\" join failed\n", | 2073 | "ray_cs interrupt network \"%s\" join failed\n", |
2085 | local->sparm.b4.a_current_ess_id); | 2074 | local->sparm.b4.a_current_ess_id); |
2086 | local->timer.function = &join_net; | 2075 | local->timer.function = &join_net; |
@@ -2091,19 +2080,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2091 | case CCS_START_ASSOCIATION: | 2080 | case CCS_START_ASSOCIATION: |
2092 | if (status == CCS_COMMAND_COMPLETE) { | 2081 | if (status == CCS_COMMAND_COMPLETE) { |
2093 | local->card_status = CARD_ASSOC_COMPLETE; | 2082 | local->card_status = CARD_ASSOC_COMPLETE; |
2094 | DEBUG(0, "ray_cs association successful\n"); | 2083 | dev_dbg(&link->dev, "ray_cs association successful\n"); |
2095 | } else { | 2084 | } else { |
2096 | DEBUG(0, "ray_cs association failed,\n"); | 2085 | dev_dbg(&link->dev, "ray_cs association failed,\n"); |
2097 | local->card_status = CARD_ASSOC_FAILED; | 2086 | local->card_status = CARD_ASSOC_FAILED; |
2098 | join_net((u_long) local); | 2087 | join_net((u_long) local); |
2099 | } | 2088 | } |
2100 | break; | 2089 | break; |
2101 | case CCS_TX_REQUEST: | 2090 | case CCS_TX_REQUEST: |
2102 | if (status == CCS_COMMAND_COMPLETE) { | 2091 | if (status == CCS_COMMAND_COMPLETE) { |
2103 | DEBUG(3, | 2092 | dev_dbg(&link->dev, |
2104 | "ray_cs interrupt tx request complete\n"); | 2093 | "ray_cs interrupt tx request complete\n"); |
2105 | } else { | 2094 | } else { |
2106 | DEBUG(1, | 2095 | dev_dbg(&link->dev, |
2107 | "ray_cs interrupt tx request failed\n"); | 2096 | "ray_cs interrupt tx request failed\n"); |
2108 | } | 2097 | } |
2109 | if (!sniffer) | 2098 | if (!sniffer) |
@@ -2111,21 +2100,21 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2111 | netif_wake_queue(dev); | 2100 | netif_wake_queue(dev); |
2112 | break; | 2101 | break; |
2113 | case CCS_TEST_MEMORY: | 2102 | case CCS_TEST_MEMORY: |
2114 | DEBUG(1, "ray_cs interrupt mem test done\n"); | 2103 | dev_dbg(&link->dev, "ray_cs interrupt mem test done\n"); |
2115 | break; | 2104 | break; |
2116 | case CCS_SHUTDOWN: | 2105 | case CCS_SHUTDOWN: |
2117 | DEBUG(1, | 2106 | dev_dbg(&link->dev, |
2118 | "ray_cs interrupt Unexpected CCS returned - Shutdown\n"); | 2107 | "ray_cs interrupt Unexpected CCS returned - Shutdown\n"); |
2119 | break; | 2108 | break; |
2120 | case CCS_DUMP_MEMORY: | 2109 | case CCS_DUMP_MEMORY: |
2121 | DEBUG(1, "ray_cs interrupt dump memory done\n"); | 2110 | dev_dbg(&link->dev, "ray_cs interrupt dump memory done\n"); |
2122 | break; | 2111 | break; |
2123 | case CCS_START_TIMER: | 2112 | case CCS_START_TIMER: |
2124 | DEBUG(2, | 2113 | dev_dbg(&link->dev, |
2125 | "ray_cs interrupt DING - raylink timer expired\n"); | 2114 | "ray_cs interrupt DING - raylink timer expired\n"); |
2126 | break; | 2115 | break; |
2127 | default: | 2116 | default: |
2128 | DEBUG(1, | 2117 | dev_dbg(&link->dev, |
2129 | "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n", | 2118 | "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n", |
2130 | rcsindex, cmd); | 2119 | rcsindex, cmd); |
2131 | } | 2120 | } |
@@ -2139,7 +2128,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2139 | ray_rx(dev, local, prcs); | 2128 | ray_rx(dev, local, prcs); |
2140 | break; | 2129 | break; |
2141 | case REJOIN_NET_COMPLETE: | 2130 | case REJOIN_NET_COMPLETE: |
2142 | DEBUG(1, "ray_cs interrupt rejoin net complete\n"); | 2131 | dev_dbg(&link->dev, "ray_cs interrupt rejoin net complete\n"); |
2143 | local->card_status = CARD_ACQ_COMPLETE; | 2132 | local->card_status = CARD_ACQ_COMPLETE; |
2144 | /* do we need to clear tx buffers CCS's? */ | 2133 | /* do we need to clear tx buffers CCS's? */ |
2145 | if (local->sparm.b4.a_network_type == ADHOC) { | 2134 | if (local->sparm.b4.a_network_type == ADHOC) { |
@@ -2149,7 +2138,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2149 | memcpy_fromio(&local->bss_id, | 2138 | memcpy_fromio(&local->bss_id, |
2150 | prcs->var.rejoin_net_complete. | 2139 | prcs->var.rejoin_net_complete. |
2151 | bssid, ADDRLEN); | 2140 | bssid, ADDRLEN); |
2152 | DEBUG(1, | 2141 | dev_dbg(&link->dev, |
2153 | "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n", | 2142 | "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n", |
2154 | local->bss_id[0], local->bss_id[1], | 2143 | local->bss_id[0], local->bss_id[1], |
2155 | local->bss_id[2], local->bss_id[3], | 2144 | local->bss_id[2], local->bss_id[3], |
@@ -2159,15 +2148,15 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) | |||
2159 | } | 2148 | } |
2160 | break; | 2149 | break; |
2161 | case ROAMING_INITIATED: | 2150 | case ROAMING_INITIATED: |
2162 | DEBUG(1, "ray_cs interrupt roaming initiated\n"); | 2151 | dev_dbg(&link->dev, "ray_cs interrupt roaming initiated\n"); |
2163 | netif_stop_queue(dev); | 2152 | netif_stop_queue(dev); |
2164 | local->card_status = CARD_DOING_ACQ; | 2153 | local->card_status = CARD_DOING_ACQ; |
2165 | break; | 2154 | break; |
2166 | case JAPAN_CALL_SIGN_RXD: | 2155 | case JAPAN_CALL_SIGN_RXD: |
2167 | DEBUG(1, "ray_cs interrupt japan call sign rx\n"); | 2156 | dev_dbg(&link->dev, "ray_cs interrupt japan call sign rx\n"); |
2168 | break; | 2157 | break; |
2169 | default: | 2158 | default: |
2170 | DEBUG(1, | 2159 | dev_dbg(&link->dev, |
2171 | "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n", | 2160 | "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n", |
2172 | rcsindex, | 2161 | rcsindex, |
2173 | (unsigned int)readb(&prcs->interrupt_id)); | 2162 | (unsigned int)readb(&prcs->interrupt_id)); |
@@ -2186,7 +2175,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local, | |||
2186 | int rx_len; | 2175 | int rx_len; |
2187 | unsigned int pkt_addr; | 2176 | unsigned int pkt_addr; |
2188 | void __iomem *pmsg; | 2177 | void __iomem *pmsg; |
2189 | DEBUG(4, "ray_rx process rx packet\n"); | 2178 | pr_debug("ray_rx process rx packet\n"); |
2190 | 2179 | ||
2191 | /* Calculate address of packet within Rx buffer */ | 2180 | /* Calculate address of packet within Rx buffer */ |
2192 | pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8) | 2181 | pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8) |
@@ -2199,28 +2188,28 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local, | |||
2199 | pmsg = local->rmem + pkt_addr; | 2188 | pmsg = local->rmem + pkt_addr; |
2200 | switch (readb(pmsg)) { | 2189 | switch (readb(pmsg)) { |
2201 | case DATA_TYPE: | 2190 | case DATA_TYPE: |
2202 | DEBUG(4, "ray_rx data type\n"); | 2191 | pr_debug("ray_rx data type\n"); |
2203 | rx_data(dev, prcs, pkt_addr, rx_len); | 2192 | rx_data(dev, prcs, pkt_addr, rx_len); |
2204 | break; | 2193 | break; |
2205 | case AUTHENTIC_TYPE: | 2194 | case AUTHENTIC_TYPE: |
2206 | DEBUG(4, "ray_rx authentic type\n"); | 2195 | pr_debug("ray_rx authentic type\n"); |
2207 | if (sniffer) | 2196 | if (sniffer) |
2208 | rx_data(dev, prcs, pkt_addr, rx_len); | 2197 | rx_data(dev, prcs, pkt_addr, rx_len); |
2209 | else | 2198 | else |
2210 | rx_authenticate(local, prcs, pkt_addr, rx_len); | 2199 | rx_authenticate(local, prcs, pkt_addr, rx_len); |
2211 | break; | 2200 | break; |
2212 | case DEAUTHENTIC_TYPE: | 2201 | case DEAUTHENTIC_TYPE: |
2213 | DEBUG(4, "ray_rx deauth type\n"); | 2202 | pr_debug("ray_rx deauth type\n"); |
2214 | if (sniffer) | 2203 | if (sniffer) |
2215 | rx_data(dev, prcs, pkt_addr, rx_len); | 2204 | rx_data(dev, prcs, pkt_addr, rx_len); |
2216 | else | 2205 | else |
2217 | rx_deauthenticate(local, prcs, pkt_addr, rx_len); | 2206 | rx_deauthenticate(local, prcs, pkt_addr, rx_len); |
2218 | break; | 2207 | break; |
2219 | case NULL_MSG_TYPE: | 2208 | case NULL_MSG_TYPE: |
2220 | DEBUG(3, "ray_cs rx NULL msg\n"); | 2209 | pr_debug("ray_cs rx NULL msg\n"); |
2221 | break; | 2210 | break; |
2222 | case BEACON_TYPE: | 2211 | case BEACON_TYPE: |
2223 | DEBUG(4, "ray_rx beacon type\n"); | 2212 | pr_debug("ray_rx beacon type\n"); |
2224 | if (sniffer) | 2213 | if (sniffer) |
2225 | rx_data(dev, prcs, pkt_addr, rx_len); | 2214 | rx_data(dev, prcs, pkt_addr, rx_len); |
2226 | 2215 | ||
@@ -2233,7 +2222,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local, | |||
2233 | ray_get_stats(dev); | 2222 | ray_get_stats(dev); |
2234 | break; | 2223 | break; |
2235 | default: | 2224 | default: |
2236 | DEBUG(0, "ray_cs unknown pkt type %2x\n", | 2225 | pr_debug("ray_cs unknown pkt type %2x\n", |
2237 | (unsigned int)readb(pmsg)); | 2226 | (unsigned int)readb(pmsg)); |
2238 | break; | 2227 | break; |
2239 | } | 2228 | } |
@@ -2262,7 +2251,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2262 | rx_len > | 2251 | rx_len > |
2263 | (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + | 2252 | (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + |
2264 | FCS_LEN)) { | 2253 | FCS_LEN)) { |
2265 | DEBUG(0, | 2254 | pr_debug( |
2266 | "ray_cs invalid packet length %d received \n", | 2255 | "ray_cs invalid packet length %d received \n", |
2267 | rx_len); | 2256 | rx_len); |
2268 | return; | 2257 | return; |
@@ -2273,17 +2262,17 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2273 | rx_len > | 2262 | rx_len > |
2274 | (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + | 2263 | (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + |
2275 | FCS_LEN)) { | 2264 | FCS_LEN)) { |
2276 | DEBUG(0, | 2265 | pr_debug( |
2277 | "ray_cs invalid packet length %d received \n", | 2266 | "ray_cs invalid packet length %d received \n", |
2278 | rx_len); | 2267 | rx_len); |
2279 | return; | 2268 | return; |
2280 | } | 2269 | } |
2281 | } | 2270 | } |
2282 | } | 2271 | } |
2283 | DEBUG(4, "ray_cs rx_data packet\n"); | 2272 | pr_debug("ray_cs rx_data packet\n"); |
2284 | /* If fragmented packet, verify sizes of fragments add up */ | 2273 | /* If fragmented packet, verify sizes of fragments add up */ |
2285 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { | 2274 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { |
2286 | DEBUG(1, "ray_cs rx'ed fragment\n"); | 2275 | pr_debug("ray_cs rx'ed fragment\n"); |
2287 | tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8) | 2276 | tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8) |
2288 | + readb(&prcs->var.rx_packet.totalpacketlength[1]); | 2277 | + readb(&prcs->var.rx_packet.totalpacketlength[1]); |
2289 | total_len = tmp; | 2278 | total_len = tmp; |
@@ -2301,7 +2290,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2301 | } while (1); | 2290 | } while (1); |
2302 | 2291 | ||
2303 | if (tmp < 0) { | 2292 | if (tmp < 0) { |
2304 | DEBUG(0, | 2293 | pr_debug( |
2305 | "ray_cs rx_data fragment lengths don't add up\n"); | 2294 | "ray_cs rx_data fragment lengths don't add up\n"); |
2306 | local->stats.rx_dropped++; | 2295 | local->stats.rx_dropped++; |
2307 | release_frag_chain(local, prcs); | 2296 | release_frag_chain(local, prcs); |
@@ -2313,7 +2302,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2313 | 2302 | ||
2314 | skb = dev_alloc_skb(total_len + 5); | 2303 | skb = dev_alloc_skb(total_len + 5); |
2315 | if (skb == NULL) { | 2304 | if (skb == NULL) { |
2316 | DEBUG(0, "ray_cs rx_data could not allocate skb\n"); | 2305 | pr_debug("ray_cs rx_data could not allocate skb\n"); |
2317 | local->stats.rx_dropped++; | 2306 | local->stats.rx_dropped++; |
2318 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) | 2307 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) |
2319 | release_frag_chain(local, prcs); | 2308 | release_frag_chain(local, prcs); |
@@ -2321,7 +2310,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2321 | } | 2310 | } |
2322 | skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */ | 2311 | skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */ |
2323 | 2312 | ||
2324 | DEBUG(4, "ray_cs rx_data total_len = %x, rx_len = %x\n", total_len, | 2313 | pr_debug("ray_cs rx_data total_len = %x, rx_len = %x\n", total_len, |
2325 | rx_len); | 2314 | rx_len); |
2326 | 2315 | ||
2327 | /************************/ | 2316 | /************************/ |
@@ -2354,7 +2343,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, | |||
2354 | tmp = 17; | 2343 | tmp = 17; |
2355 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { | 2344 | if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { |
2356 | prcslink = prcs; | 2345 | prcslink = prcs; |
2357 | DEBUG(1, "ray_cs rx_data in fragment loop\n"); | 2346 | pr_debug("ray_cs rx_data in fragment loop\n"); |
2358 | do { | 2347 | do { |
2359 | prcslink = rcs_base(local) | 2348 | prcslink = rcs_base(local) |
2360 | + | 2349 | + |
@@ -2426,8 +2415,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) | |||
2426 | memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); | 2415 | memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); |
2427 | memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); | 2416 | memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); |
2428 | 2417 | ||
2429 | #ifdef PCMCIA_DEBUG | 2418 | #if 0 |
2430 | if (pc_debug > 3) { | 2419 | if { |
2431 | print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ", | 2420 | print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ", |
2432 | DUMP_PREFIX_NONE, 16, 1, | 2421 | DUMP_PREFIX_NONE, 16, 1, |
2433 | skb->data, 64, true); | 2422 | skb->data, 64, true); |
@@ -2441,7 +2430,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) | |||
2441 | 2430 | ||
2442 | if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) { | 2431 | if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) { |
2443 | /* not a snap type so leave it alone */ | 2432 | /* not a snap type so leave it alone */ |
2444 | DEBUG(3, "ray_cs untranslate NOT SNAP %02x %02x %02x\n", | 2433 | pr_debug("ray_cs untranslate NOT SNAP %02x %02x %02x\n", |
2445 | psnap->dsap, psnap->ssap, psnap->ctrl); | 2434 | psnap->dsap, psnap->ssap, psnap->ctrl); |
2446 | 2435 | ||
2447 | delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; | 2436 | delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; |
@@ -2450,7 +2439,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) | |||
2450 | } else { /* Its a SNAP */ | 2439 | } else { /* Its a SNAP */ |
2451 | if (memcmp(psnap->org, org_bridge, 3) == 0) { | 2440 | if (memcmp(psnap->org, org_bridge, 3) == 0) { |
2452 | /* EtherII and nuke the LLC */ | 2441 | /* EtherII and nuke the LLC */ |
2453 | DEBUG(3, "ray_cs untranslate Bridge encap\n"); | 2442 | pr_debug("ray_cs untranslate Bridge encap\n"); |
2454 | delta = RX_MAC_HEADER_LENGTH | 2443 | delta = RX_MAC_HEADER_LENGTH |
2455 | + sizeof(struct snaphdr_t) - ETH_HLEN; | 2444 | + sizeof(struct snaphdr_t) - ETH_HLEN; |
2456 | peth = (struct ethhdr *)(skb->data + delta); | 2445 | peth = (struct ethhdr *)(skb->data + delta); |
@@ -2459,14 +2448,14 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) | |||
2459 | switch (ntohs(type)) { | 2448 | switch (ntohs(type)) { |
2460 | case ETH_P_IPX: | 2449 | case ETH_P_IPX: |
2461 | case ETH_P_AARP: | 2450 | case ETH_P_AARP: |
2462 | DEBUG(3, "ray_cs untranslate RFC IPX/AARP\n"); | 2451 | pr_debug("ray_cs untranslate RFC IPX/AARP\n"); |
2463 | delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; | 2452 | delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; |
2464 | peth = (struct ethhdr *)(skb->data + delta); | 2453 | peth = (struct ethhdr *)(skb->data + delta); |
2465 | peth->h_proto = | 2454 | peth->h_proto = |
2466 | htons(len - RX_MAC_HEADER_LENGTH); | 2455 | htons(len - RX_MAC_HEADER_LENGTH); |
2467 | break; | 2456 | break; |
2468 | default: | 2457 | default: |
2469 | DEBUG(3, "ray_cs untranslate RFC default\n"); | 2458 | pr_debug("ray_cs untranslate RFC default\n"); |
2470 | delta = RX_MAC_HEADER_LENGTH + | 2459 | delta = RX_MAC_HEADER_LENGTH + |
2471 | sizeof(struct snaphdr_t) - ETH_HLEN; | 2460 | sizeof(struct snaphdr_t) - ETH_HLEN; |
2472 | peth = (struct ethhdr *)(skb->data + delta); | 2461 | peth = (struct ethhdr *)(skb->data + delta); |
@@ -2482,12 +2471,12 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len) | |||
2482 | } | 2471 | } |
2483 | /* TBD reserve skb_reserve(skb, delta); */ | 2472 | /* TBD reserve skb_reserve(skb, delta); */ |
2484 | skb_pull(skb, delta); | 2473 | skb_pull(skb, delta); |
2485 | DEBUG(3, "untranslate after skb_pull(%d), skb->data = %p\n", delta, | 2474 | pr_debug("untranslate after skb_pull(%d), skb->data = %p\n", delta, |
2486 | skb->data); | 2475 | skb->data); |
2487 | memcpy(peth->h_dest, destaddr, ADDRLEN); | 2476 | memcpy(peth->h_dest, destaddr, ADDRLEN); |
2488 | memcpy(peth->h_source, srcaddr, ADDRLEN); | 2477 | memcpy(peth->h_source, srcaddr, ADDRLEN); |
2489 | #ifdef PCMCIA_DEBUG | 2478 | #if 0 |
2490 | if (pc_debug > 3) { | 2479 | { |
2491 | int i; | 2480 | int i; |
2492 | printk(KERN_DEBUG "skb->data after untranslate:"); | 2481 | printk(KERN_DEBUG "skb->data after untranslate:"); |
2493 | for (i = 0; i < 64; i++) | 2482 | for (i = 0; i < 64; i++) |
@@ -2529,7 +2518,7 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs) | |||
2529 | while (tmp--) { | 2518 | while (tmp--) { |
2530 | writeb(CCS_BUFFER_FREE, &prcslink->buffer_status); | 2519 | writeb(CCS_BUFFER_FREE, &prcslink->buffer_status); |
2531 | if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { | 2520 | if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { |
2532 | DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", | 2521 | pr_debug("ray_cs interrupt bad rcsindex = 0x%x\n", |
2533 | rcsindex); | 2522 | rcsindex); |
2534 | break; | 2523 | break; |
2535 | } | 2524 | } |
@@ -2543,9 +2532,9 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs) | |||
2543 | static void authenticate(ray_dev_t *local) | 2532 | static void authenticate(ray_dev_t *local) |
2544 | { | 2533 | { |
2545 | struct pcmcia_device *link = local->finder; | 2534 | struct pcmcia_device *link = local->finder; |
2546 | DEBUG(0, "ray_cs Starting authentication.\n"); | 2535 | dev_dbg(&link->dev, "ray_cs Starting authentication.\n"); |
2547 | if (!(pcmcia_dev_present(link))) { | 2536 | if (!(pcmcia_dev_present(link))) { |
2548 | DEBUG(2, "ray_cs authenticate - device not present\n"); | 2537 | dev_dbg(&link->dev, "ray_cs authenticate - device not present\n"); |
2549 | return; | 2538 | return; |
2550 | } | 2539 | } |
2551 | 2540 | ||
@@ -2573,11 +2562,11 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs, | |||
2573 | copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); | 2562 | copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); |
2574 | /* if we are trying to get authenticated */ | 2563 | /* if we are trying to get authenticated */ |
2575 | if (local->sparm.b4.a_network_type == ADHOC) { | 2564 | if (local->sparm.b4.a_network_type == ADHOC) { |
2576 | DEBUG(1, "ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", | 2565 | pr_debug("ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", |
2577 | msg->var[0], msg->var[1], msg->var[2], msg->var[3], | 2566 | msg->var[0], msg->var[1], msg->var[2], msg->var[3], |
2578 | msg->var[4], msg->var[5]); | 2567 | msg->var[4], msg->var[5]); |
2579 | if (msg->var[2] == 1) { | 2568 | if (msg->var[2] == 1) { |
2580 | DEBUG(0, "ray_cs Sending authentication response.\n"); | 2569 | pr_debug("ray_cs Sending authentication response.\n"); |
2581 | if (!build_auth_frame | 2570 | if (!build_auth_frame |
2582 | (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) { | 2571 | (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) { |
2583 | local->authentication_state = NEED_TO_AUTH; | 2572 | local->authentication_state = NEED_TO_AUTH; |
@@ -2591,13 +2580,13 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs, | |||
2591 | /* Verify authentication sequence #2 and success */ | 2580 | /* Verify authentication sequence #2 and success */ |
2592 | if (msg->var[2] == 2) { | 2581 | if (msg->var[2] == 2) { |
2593 | if ((msg->var[3] | msg->var[4]) == 0) { | 2582 | if ((msg->var[3] | msg->var[4]) == 0) { |
2594 | DEBUG(1, "Authentication successful\n"); | 2583 | pr_debug("Authentication successful\n"); |
2595 | local->card_status = CARD_AUTH_COMPLETE; | 2584 | local->card_status = CARD_AUTH_COMPLETE; |
2596 | associate(local); | 2585 | associate(local); |
2597 | local->authentication_state = | 2586 | local->authentication_state = |
2598 | AUTHENTICATED; | 2587 | AUTHENTICATED; |
2599 | } else { | 2588 | } else { |
2600 | DEBUG(0, "Authentication refused\n"); | 2589 | pr_debug("Authentication refused\n"); |
2601 | local->card_status = CARD_AUTH_REFUSED; | 2590 | local->card_status = CARD_AUTH_REFUSED; |
2602 | join_net((u_long) local); | 2591 | join_net((u_long) local); |
2603 | local->authentication_state = | 2592 | local->authentication_state = |
@@ -2617,22 +2606,22 @@ static void associate(ray_dev_t *local) | |||
2617 | struct net_device *dev = link->priv; | 2606 | struct net_device *dev = link->priv; |
2618 | int ccsindex; | 2607 | int ccsindex; |
2619 | if (!(pcmcia_dev_present(link))) { | 2608 | if (!(pcmcia_dev_present(link))) { |
2620 | DEBUG(2, "ray_cs associate - device not present\n"); | 2609 | dev_dbg(&link->dev, "ray_cs associate - device not present\n"); |
2621 | return; | 2610 | return; |
2622 | } | 2611 | } |
2623 | /* If no tx buffers available, return */ | 2612 | /* If no tx buffers available, return */ |
2624 | if ((ccsindex = get_free_ccs(local)) < 0) { | 2613 | if ((ccsindex = get_free_ccs(local)) < 0) { |
2625 | /* TBD should never be here but... what if we are? */ | 2614 | /* TBD should never be here but... what if we are? */ |
2626 | DEBUG(1, "ray_cs associate - No free ccs\n"); | 2615 | dev_dbg(&link->dev, "ray_cs associate - No free ccs\n"); |
2627 | return; | 2616 | return; |
2628 | } | 2617 | } |
2629 | DEBUG(1, "ray_cs Starting association with access point\n"); | 2618 | dev_dbg(&link->dev, "ray_cs Starting association with access point\n"); |
2630 | pccs = ccs_base(local) + ccsindex; | 2619 | pccs = ccs_base(local) + ccsindex; |
2631 | /* fill in the CCS */ | 2620 | /* fill in the CCS */ |
2632 | writeb(CCS_START_ASSOCIATION, &pccs->cmd); | 2621 | writeb(CCS_START_ASSOCIATION, &pccs->cmd); |
2633 | /* Interrupt the firmware to process the command */ | 2622 | /* Interrupt the firmware to process the command */ |
2634 | if (interrupt_ecf(local, ccsindex)) { | 2623 | if (interrupt_ecf(local, ccsindex)) { |
2635 | DEBUG(1, "ray_cs associate failed - ECF not ready for intr\n"); | 2624 | dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n"); |
2636 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 2625 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
2637 | 2626 | ||
2638 | del_timer(&local->timer); | 2627 | del_timer(&local->timer); |
@@ -2655,7 +2644,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs, | |||
2655 | /* UCHAR buff[256]; | 2644 | /* UCHAR buff[256]; |
2656 | struct rx_msg *msg = (struct rx_msg *)buff; | 2645 | struct rx_msg *msg = (struct rx_msg *)buff; |
2657 | */ | 2646 | */ |
2658 | DEBUG(0, "Deauthentication frame received\n"); | 2647 | pr_debug("Deauthentication frame received\n"); |
2659 | local->authentication_state = UNAUTHENTICATED; | 2648 | local->authentication_state = UNAUTHENTICATED; |
2660 | /* Need to reauthenticate or rejoin depending on reason code */ | 2649 | /* Need to reauthenticate or rejoin depending on reason code */ |
2661 | /* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); | 2650 | /* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); |
@@ -2823,7 +2812,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type) | |||
2823 | 2812 | ||
2824 | /* If no tx buffers available, return */ | 2813 | /* If no tx buffers available, return */ |
2825 | if ((ccsindex = get_free_tx_ccs(local)) < 0) { | 2814 | if ((ccsindex = get_free_tx_ccs(local)) < 0) { |
2826 | DEBUG(1, "ray_cs send authenticate - No free tx ccs\n"); | 2815 | pr_debug("ray_cs send authenticate - No free tx ccs\n"); |
2827 | return -1; | 2816 | return -1; |
2828 | } | 2817 | } |
2829 | 2818 | ||
@@ -2855,7 +2844,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type) | |||
2855 | 2844 | ||
2856 | /* Interrupt the firmware to process the command */ | 2845 | /* Interrupt the firmware to process the command */ |
2857 | if (interrupt_ecf(local, ccsindex)) { | 2846 | if (interrupt_ecf(local, ccsindex)) { |
2858 | DEBUG(1, | 2847 | pr_debug( |
2859 | "ray_cs send authentication request failed - ECF not ready for intr\n"); | 2848 | "ray_cs send authentication request failed - ECF not ready for intr\n"); |
2860 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); | 2849 | writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); |
2861 | return -1; | 2850 | return -1; |
@@ -2942,9 +2931,9 @@ static int __init init_ray_cs(void) | |||
2942 | { | 2931 | { |
2943 | int rc; | 2932 | int rc; |
2944 | 2933 | ||
2945 | DEBUG(1, "%s\n", rcsid); | 2934 | pr_debug("%s\n", rcsid); |
2946 | rc = pcmcia_register_driver(&ray_driver); | 2935 | rc = pcmcia_register_driver(&ray_driver); |
2947 | DEBUG(1, "raylink init_module register_pcmcia_driver returns 0x%x\n", | 2936 | pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n", |
2948 | rc); | 2937 | rc); |
2949 | 2938 | ||
2950 | #ifdef CONFIG_PROC_FS | 2939 | #ifdef CONFIG_PROC_FS |
@@ -2964,7 +2953,7 @@ static int __init init_ray_cs(void) | |||
2964 | 2953 | ||
2965 | static void __exit exit_ray_cs(void) | 2954 | static void __exit exit_ray_cs(void) |
2966 | { | 2955 | { |
2967 | DEBUG(0, "ray_cs: cleanup_module\n"); | 2956 | pr_debug("ray_cs: cleanup_module\n"); |
2968 | 2957 | ||
2969 | #ifdef CONFIG_PROC_FS | 2958 | #ifdef CONFIG_PROC_FS |
2970 | remove_proc_entry("driver/ray_cs/ray_cs", NULL); | 2959 | remove_proc_entry("driver/ray_cs/ray_cs", NULL); |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 431a20ec6db6..33918fd5b231 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -3656,10 +3656,7 @@ wv_pcmcia_reset(struct net_device * dev) | |||
3656 | 3656 | ||
3657 | i = pcmcia_access_configuration_register(link, ®); | 3657 | i = pcmcia_access_configuration_register(link, ®); |
3658 | if (i != 0) | 3658 | if (i != 0) |
3659 | { | ||
3660 | cs_error(link, AccessConfigurationRegister, i); | ||
3661 | return FALSE; | 3659 | return FALSE; |
3662 | } | ||
3663 | 3660 | ||
3664 | #ifdef DEBUG_CONFIG_INFO | 3661 | #ifdef DEBUG_CONFIG_INFO |
3665 | printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n", | 3662 | printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n", |
@@ -3670,19 +3667,13 @@ wv_pcmcia_reset(struct net_device * dev) | |||
3670 | reg.Value = reg.Value | COR_SW_RESET; | 3667 | reg.Value = reg.Value | COR_SW_RESET; |
3671 | i = pcmcia_access_configuration_register(link, ®); | 3668 | i = pcmcia_access_configuration_register(link, ®); |
3672 | if (i != 0) | 3669 | if (i != 0) |
3673 | { | ||
3674 | cs_error(link, AccessConfigurationRegister, i); | ||
3675 | return FALSE; | 3670 | return FALSE; |
3676 | } | ||
3677 | 3671 | ||
3678 | reg.Action = CS_WRITE; | 3672 | reg.Action = CS_WRITE; |
3679 | reg.Value = COR_LEVEL_IRQ | COR_CONFIG; | 3673 | reg.Value = COR_LEVEL_IRQ | COR_CONFIG; |
3680 | i = pcmcia_access_configuration_register(link, ®); | 3674 | i = pcmcia_access_configuration_register(link, ®); |
3681 | if (i != 0) | 3675 | if (i != 0) |
3682 | { | ||
3683 | cs_error(link, AccessConfigurationRegister, i); | ||
3684 | return FALSE; | 3676 | return FALSE; |
3685 | } | ||
3686 | 3677 | ||
3687 | #ifdef DEBUG_CONFIG_TRACE | 3678 | #ifdef DEBUG_CONFIG_TRACE |
3688 | printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name); | 3679 | printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name); |
@@ -3857,10 +3848,7 @@ wv_pcmcia_config(struct pcmcia_device * link) | |||
3857 | { | 3848 | { |
3858 | i = pcmcia_request_io(link, &link->io); | 3849 | i = pcmcia_request_io(link, &link->io); |
3859 | if (i != 0) | 3850 | if (i != 0) |
3860 | { | ||
3861 | cs_error(link, RequestIO, i); | ||
3862 | break; | 3851 | break; |
3863 | } | ||
3864 | 3852 | ||
3865 | /* | 3853 | /* |
3866 | * Now allocate an interrupt line. Note that this does not | 3854 | * Now allocate an interrupt line. Note that this does not |
@@ -3868,10 +3856,7 @@ wv_pcmcia_config(struct pcmcia_device * link) | |||
3868 | */ | 3856 | */ |
3869 | i = pcmcia_request_irq(link, &link->irq); | 3857 | i = pcmcia_request_irq(link, &link->irq); |
3870 | if (i != 0) | 3858 | if (i != 0) |
3871 | { | ||
3872 | cs_error(link, RequestIRQ, i); | ||
3873 | break; | 3859 | break; |
3874 | } | ||
3875 | 3860 | ||
3876 | /* | 3861 | /* |
3877 | * This actually configures the PCMCIA socket -- setting up | 3862 | * This actually configures the PCMCIA socket -- setting up |
@@ -3880,10 +3865,7 @@ wv_pcmcia_config(struct pcmcia_device * link) | |||
3880 | link->conf.ConfigIndex = 1; | 3865 | link->conf.ConfigIndex = 1; |
3881 | i = pcmcia_request_configuration(link, &link->conf); | 3866 | i = pcmcia_request_configuration(link, &link->conf); |
3882 | if (i != 0) | 3867 | if (i != 0) |
3883 | { | ||
3884 | cs_error(link, RequestConfiguration, i); | ||
3885 | break; | 3868 | break; |
3886 | } | ||
3887 | 3869 | ||
3888 | /* | 3870 | /* |
3889 | * Allocate a small memory window. Note that the struct pcmcia_device | 3871 | * Allocate a small memory window. Note that the struct pcmcia_device |
@@ -3894,24 +3876,18 @@ wv_pcmcia_config(struct pcmcia_device * link) | |||
3894 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; | 3876 | req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; |
3895 | req.Base = req.Size = 0; | 3877 | req.Base = req.Size = 0; |
3896 | req.AccessSpeed = mem_speed; | 3878 | req.AccessSpeed = mem_speed; |
3897 | i = pcmcia_request_window(&link, &req, &link->win); | 3879 | i = pcmcia_request_window(link, &req, &link->win); |
3898 | if (i != 0) | 3880 | if (i != 0) |
3899 | { | ||
3900 | cs_error(link, RequestWindow, i); | ||
3901 | break; | 3881 | break; |
3902 | } | ||
3903 | 3882 | ||
3904 | lp->mem = ioremap(req.Base, req.Size); | 3883 | lp->mem = ioremap(req.Base, req.Size); |
3905 | dev->mem_start = (u_long)lp->mem; | 3884 | dev->mem_start = (u_long)lp->mem; |
3906 | dev->mem_end = dev->mem_start + req.Size; | 3885 | dev->mem_end = dev->mem_start + req.Size; |
3907 | 3886 | ||
3908 | mem.CardOffset = 0; mem.Page = 0; | 3887 | mem.CardOffset = 0; mem.Page = 0; |
3909 | i = pcmcia_map_mem_page(link->win, &mem); | 3888 | i = pcmcia_map_mem_page(link, link->win, &mem); |
3910 | if (i != 0) | 3889 | if (i != 0) |
3911 | { | ||
3912 | cs_error(link, MapMemPage, i); | ||
3913 | break; | 3890 | break; |
3914 | } | ||
3915 | 3891 | ||
3916 | /* Feed device with this info... */ | 3892 | /* Feed device with this info... */ |
3917 | dev->irq = link->irq.AssignedIRQ; | 3893 | dev->irq = link->irq.AssignedIRQ; |
@@ -3923,7 +3899,7 @@ wv_pcmcia_config(struct pcmcia_device * link) | |||
3923 | lp->mem, dev->irq, (u_int) dev->base_addr); | 3899 | lp->mem, dev->irq, (u_int) dev->base_addr); |
3924 | #endif | 3900 | #endif |
3925 | 3901 | ||
3926 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 3902 | SET_NETDEV_DEV(dev, &link->dev); |
3927 | i = register_netdev(dev); | 3903 | i = register_netdev(dev); |
3928 | if(i != 0) | 3904 | if(i != 0) |
3929 | { | 3905 | { |
@@ -4462,8 +4438,7 @@ wavelan_probe(struct pcmcia_device *p_dev) | |||
4462 | p_dev->io.IOAddrLines = 3; | 4438 | p_dev->io.IOAddrLines = 3; |
4463 | 4439 | ||
4464 | /* Interrupt setup */ | 4440 | /* Interrupt setup */ |
4465 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 4441 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
4466 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
4467 | p_dev->irq.Handler = wavelan_interrupt; | 4442 | p_dev->irq.Handler = wavelan_interrupt; |
4468 | 4443 | ||
4469 | /* General socket configuration */ | 4444 | /* General socket configuration */ |
@@ -4475,7 +4450,7 @@ wavelan_probe(struct pcmcia_device *p_dev) | |||
4475 | if (!dev) | 4450 | if (!dev) |
4476 | return -ENOMEM; | 4451 | return -ENOMEM; |
4477 | 4452 | ||
4478 | p_dev->priv = p_dev->irq.Instance = dev; | 4453 | p_dev->priv = dev; |
4479 | 4454 | ||
4480 | lp = netdev_priv(dev); | 4455 | lp = netdev_priv(dev); |
4481 | 4456 | ||
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 4f1e0cfe609b..5f0401a52cff 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c | |||
@@ -67,23 +67,7 @@ | |||
67 | /* For rough constant delay */ | 67 | /* For rough constant delay */ |
68 | #define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } | 68 | #define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } |
69 | 69 | ||
70 | /* | 70 | |
71 | * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If you do not | ||
72 | * define PCMCIA_DEBUG at all, all the debug code will be left out. If you | ||
73 | * compile with PCMCIA_DEBUG=0, the debug code will be present but disabled -- | ||
74 | * but it can then be enabled for specific modules at load time with a | ||
75 | * 'pc_debug=#' option to insmod. | ||
76 | */ | ||
77 | #define PCMCIA_DEBUG 0 | ||
78 | #ifdef PCMCIA_DEBUG | ||
79 | static int pc_debug = PCMCIA_DEBUG; | ||
80 | module_param(pc_debug, int, 0); | ||
81 | #define dprintk(n, format, args...) \ | ||
82 | { if (pc_debug > (n)) \ | ||
83 | printk(KERN_INFO "%s: " format "\n", __func__ , ##args); } | ||
84 | #else | ||
85 | #define dprintk(n, format, args...) | ||
86 | #endif | ||
87 | 71 | ||
88 | #define wl3501_outb(a, b) { outb(a, b); slow_down_io(); } | 72 | #define wl3501_outb(a, b) { outb(a, b); slow_down_io(); } |
89 | #define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); } | 73 | #define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); } |
@@ -684,10 +668,10 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) | |||
684 | int matchflag = 0; | 668 | int matchflag = 0; |
685 | struct wl3501_scan_confirm sig; | 669 | struct wl3501_scan_confirm sig; |
686 | 670 | ||
687 | dprintk(3, "entry"); | 671 | pr_debug("entry"); |
688 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 672 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
689 | if (sig.status == WL3501_STATUS_SUCCESS) { | 673 | if (sig.status == WL3501_STATUS_SUCCESS) { |
690 | dprintk(3, "success"); | 674 | pr_debug("success"); |
691 | if ((this->net_type == IW_MODE_INFRA && | 675 | if ((this->net_type == IW_MODE_INFRA && |
692 | (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || | 676 | (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || |
693 | (this->net_type == IW_MODE_ADHOC && | 677 | (this->net_type == IW_MODE_ADHOC && |
@@ -722,7 +706,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) | |||
722 | } | 706 | } |
723 | } | 707 | } |
724 | } else if (sig.status == WL3501_STATUS_TIMEOUT) { | 708 | } else if (sig.status == WL3501_STATUS_TIMEOUT) { |
725 | dprintk(3, "timeout"); | 709 | pr_debug("timeout"); |
726 | this->join_sta_bss = 0; | 710 | this->join_sta_bss = 0; |
727 | for (i = this->join_sta_bss; i < this->bss_cnt; i++) | 711 | for (i = this->join_sta_bss; i < this->bss_cnt; i++) |
728 | if (!wl3501_mgmt_join(this, i)) | 712 | if (!wl3501_mgmt_join(this, i)) |
@@ -879,7 +863,7 @@ static int wl3501_mgmt_auth(struct wl3501_card *this) | |||
879 | .timeout = 1000, | 863 | .timeout = 1000, |
880 | }; | 864 | }; |
881 | 865 | ||
882 | dprintk(3, "entry"); | 866 | pr_debug("entry"); |
883 | memcpy(sig.mac_addr, this->bssid, ETH_ALEN); | 867 | memcpy(sig.mac_addr, this->bssid, ETH_ALEN); |
884 | return wl3501_esbq_exec(this, &sig, sizeof(sig)); | 868 | return wl3501_esbq_exec(this, &sig, sizeof(sig)); |
885 | } | 869 | } |
@@ -893,7 +877,7 @@ static int wl3501_mgmt_association(struct wl3501_card *this) | |||
893 | .cap_info = this->cap_info, | 877 | .cap_info = this->cap_info, |
894 | }; | 878 | }; |
895 | 879 | ||
896 | dprintk(3, "entry"); | 880 | pr_debug("entry"); |
897 | memcpy(sig.mac_addr, this->bssid, ETH_ALEN); | 881 | memcpy(sig.mac_addr, this->bssid, ETH_ALEN); |
898 | return wl3501_esbq_exec(this, &sig, sizeof(sig)); | 882 | return wl3501_esbq_exec(this, &sig, sizeof(sig)); |
899 | } | 883 | } |
@@ -903,7 +887,7 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) | |||
903 | struct wl3501_card *this = netdev_priv(dev); | 887 | struct wl3501_card *this = netdev_priv(dev); |
904 | struct wl3501_join_confirm sig; | 888 | struct wl3501_join_confirm sig; |
905 | 889 | ||
906 | dprintk(3, "entry"); | 890 | pr_debug("entry"); |
907 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 891 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
908 | if (sig.status == WL3501_STATUS_SUCCESS) { | 892 | if (sig.status == WL3501_STATUS_SUCCESS) { |
909 | if (this->net_type == IW_MODE_INFRA) { | 893 | if (this->net_type == IW_MODE_INFRA) { |
@@ -962,7 +946,7 @@ static inline void wl3501_md_confirm_interrupt(struct net_device *dev, | |||
962 | { | 946 | { |
963 | struct wl3501_md_confirm sig; | 947 | struct wl3501_md_confirm sig; |
964 | 948 | ||
965 | dprintk(3, "entry"); | 949 | pr_debug("entry"); |
966 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 950 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
967 | wl3501_free_tx_buffer(this, sig.data); | 951 | wl3501_free_tx_buffer(this, sig.data); |
968 | if (netif_queue_stopped(dev)) | 952 | if (netif_queue_stopped(dev)) |
@@ -1017,7 +1001,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, | |||
1017 | static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this, | 1001 | static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this, |
1018 | u16 addr, void *sig, int size) | 1002 | u16 addr, void *sig, int size) |
1019 | { | 1003 | { |
1020 | dprintk(3, "entry"); | 1004 | pr_debug("entry"); |
1021 | wl3501_get_from_wla(this, addr, &this->sig_get_confirm, | 1005 | wl3501_get_from_wla(this, addr, &this->sig_get_confirm, |
1022 | sizeof(this->sig_get_confirm)); | 1006 | sizeof(this->sig_get_confirm)); |
1023 | wake_up(&this->wait); | 1007 | wake_up(&this->wait); |
@@ -1029,7 +1013,7 @@ static inline void wl3501_start_confirm_interrupt(struct net_device *dev, | |||
1029 | { | 1013 | { |
1030 | struct wl3501_start_confirm sig; | 1014 | struct wl3501_start_confirm sig; |
1031 | 1015 | ||
1032 | dprintk(3, "entry"); | 1016 | pr_debug("entry"); |
1033 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 1017 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
1034 | if (sig.status == WL3501_STATUS_SUCCESS) | 1018 | if (sig.status == WL3501_STATUS_SUCCESS) |
1035 | netif_wake_queue(dev); | 1019 | netif_wake_queue(dev); |
@@ -1041,7 +1025,7 @@ static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev, | |||
1041 | struct wl3501_card *this = netdev_priv(dev); | 1025 | struct wl3501_card *this = netdev_priv(dev); |
1042 | struct wl3501_assoc_confirm sig; | 1026 | struct wl3501_assoc_confirm sig; |
1043 | 1027 | ||
1044 | dprintk(3, "entry"); | 1028 | pr_debug("entry"); |
1045 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 1029 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
1046 | 1030 | ||
1047 | if (sig.status == WL3501_STATUS_SUCCESS) | 1031 | if (sig.status == WL3501_STATUS_SUCCESS) |
@@ -1053,7 +1037,7 @@ static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this, | |||
1053 | { | 1037 | { |
1054 | struct wl3501_auth_confirm sig; | 1038 | struct wl3501_auth_confirm sig; |
1055 | 1039 | ||
1056 | dprintk(3, "entry"); | 1040 | pr_debug("entry"); |
1057 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); | 1041 | wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); |
1058 | 1042 | ||
1059 | if (sig.status == WL3501_STATUS_SUCCESS) | 1043 | if (sig.status == WL3501_STATUS_SUCCESS) |
@@ -1069,7 +1053,7 @@ static inline void wl3501_rx_interrupt(struct net_device *dev) | |||
1069 | u8 sig_id; | 1053 | u8 sig_id; |
1070 | struct wl3501_card *this = netdev_priv(dev); | 1054 | struct wl3501_card *this = netdev_priv(dev); |
1071 | 1055 | ||
1072 | dprintk(3, "entry"); | 1056 | pr_debug("entry"); |
1073 | loop: | 1057 | loop: |
1074 | morepkts = 0; | 1058 | morepkts = 0; |
1075 | if (!wl3501_esbq_confirm(this)) | 1059 | if (!wl3501_esbq_confirm(this)) |
@@ -1302,7 +1286,7 @@ static int wl3501_reset(struct net_device *dev) | |||
1302 | wl3501_ack_interrupt(this); | 1286 | wl3501_ack_interrupt(this); |
1303 | wl3501_unblock_interrupt(this); | 1287 | wl3501_unblock_interrupt(this); |
1304 | wl3501_mgmt_scan(this, 100); | 1288 | wl3501_mgmt_scan(this, 100); |
1305 | dprintk(1, "%s: device reset", dev->name); | 1289 | pr_debug("%s: device reset", dev->name); |
1306 | rc = 0; | 1290 | rc = 0; |
1307 | out: | 1291 | out: |
1308 | return rc; | 1292 | return rc; |
@@ -1376,7 +1360,7 @@ static int wl3501_open(struct net_device *dev) | |||
1376 | link->open++; | 1360 | link->open++; |
1377 | 1361 | ||
1378 | /* Initial WL3501 firmware */ | 1362 | /* Initial WL3501 firmware */ |
1379 | dprintk(1, "%s: Initialize WL3501 firmware...", dev->name); | 1363 | pr_debug("%s: Initialize WL3501 firmware...", dev->name); |
1380 | if (wl3501_init_firmware(this)) | 1364 | if (wl3501_init_firmware(this)) |
1381 | goto fail; | 1365 | goto fail; |
1382 | /* Initial device variables */ | 1366 | /* Initial device variables */ |
@@ -1388,7 +1372,7 @@ static int wl3501_open(struct net_device *dev) | |||
1388 | wl3501_unblock_interrupt(this); | 1372 | wl3501_unblock_interrupt(this); |
1389 | wl3501_mgmt_scan(this, 100); | 1373 | wl3501_mgmt_scan(this, 100); |
1390 | rc = 0; | 1374 | rc = 0; |
1391 | dprintk(1, "%s: WL3501 opened", dev->name); | 1375 | pr_debug("%s: WL3501 opened", dev->name); |
1392 | printk(KERN_INFO "%s: Card Name: %s\n" | 1376 | printk(KERN_INFO "%s: Card Name: %s\n" |
1393 | "%s: Firmware Date: %s\n", | 1377 | "%s: Firmware Date: %s\n", |
1394 | dev->name, this->card_name, | 1378 | dev->name, this->card_name, |
@@ -1914,8 +1898,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev) | |||
1914 | p_dev->io.IOAddrLines = 5; | 1898 | p_dev->io.IOAddrLines = 5; |
1915 | 1899 | ||
1916 | /* Interrupt setup */ | 1900 | /* Interrupt setup */ |
1917 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 1901 | p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
1918 | p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
1919 | p_dev->irq.Handler = wl3501_interrupt; | 1902 | p_dev->irq.Handler = wl3501_interrupt; |
1920 | 1903 | ||
1921 | /* General socket configuration */ | 1904 | /* General socket configuration */ |
@@ -1938,16 +1921,13 @@ static int wl3501_probe(struct pcmcia_device *p_dev) | |||
1938 | dev->wireless_handlers = &wl3501_handler_def; | 1921 | dev->wireless_handlers = &wl3501_handler_def; |
1939 | SET_ETHTOOL_OPS(dev, &ops); | 1922 | SET_ETHTOOL_OPS(dev, &ops); |
1940 | netif_stop_queue(dev); | 1923 | netif_stop_queue(dev); |
1941 | p_dev->priv = p_dev->irq.Instance = dev; | 1924 | p_dev->priv = dev; |
1942 | 1925 | ||
1943 | return wl3501_config(p_dev); | 1926 | return wl3501_config(p_dev); |
1944 | out_link: | 1927 | out_link: |
1945 | return -ENOMEM; | 1928 | return -ENOMEM; |
1946 | } | 1929 | } |
1947 | 1930 | ||
1948 | #define CS_CHECK(fn, ret) \ | ||
1949 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
1950 | |||
1951 | /** | 1931 | /** |
1952 | * wl3501_config - configure the PCMCIA socket and make eth device available | 1932 | * wl3501_config - configure the PCMCIA socket and make eth device available |
1953 | * @link - FILL_IN | 1933 | * @link - FILL_IN |
@@ -1959,7 +1939,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | |||
1959 | static int wl3501_config(struct pcmcia_device *link) | 1939 | static int wl3501_config(struct pcmcia_device *link) |
1960 | { | 1940 | { |
1961 | struct net_device *dev = link->priv; | 1941 | struct net_device *dev = link->priv; |
1962 | int i = 0, j, last_fn, last_ret; | 1942 | int i = 0, j, ret; |
1963 | struct wl3501_card *this; | 1943 | struct wl3501_card *this; |
1964 | 1944 | ||
1965 | /* Try allocating IO ports. This tries a few fixed addresses. If you | 1945 | /* Try allocating IO ports. This tries a few fixed addresses. If you |
@@ -1975,24 +1955,26 @@ static int wl3501_config(struct pcmcia_device *link) | |||
1975 | if (i == 0) | 1955 | if (i == 0) |
1976 | break; | 1956 | break; |
1977 | } | 1957 | } |
1978 | if (i != 0) { | 1958 | if (i != 0) |
1979 | cs_error(link, RequestIO, i); | ||
1980 | goto failed; | 1959 | goto failed; |
1981 | } | ||
1982 | 1960 | ||
1983 | /* Now allocate an interrupt line. Note that this does not actually | 1961 | /* Now allocate an interrupt line. Note that this does not actually |
1984 | * assign a handler to the interrupt. */ | 1962 | * assign a handler to the interrupt. */ |
1985 | 1963 | ||
1986 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 1964 | ret = pcmcia_request_irq(link, &link->irq); |
1965 | if (ret) | ||
1966 | goto failed; | ||
1987 | 1967 | ||
1988 | /* This actually configures the PCMCIA socket -- setting up the I/O | 1968 | /* This actually configures the PCMCIA socket -- setting up the I/O |
1989 | * windows and the interrupt mapping. */ | 1969 | * windows and the interrupt mapping. */ |
1990 | 1970 | ||
1991 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 1971 | ret = pcmcia_request_configuration(link, &link->conf); |
1972 | if (ret) | ||
1973 | goto failed; | ||
1992 | 1974 | ||
1993 | dev->irq = link->irq.AssignedIRQ; | 1975 | dev->irq = link->irq.AssignedIRQ; |
1994 | dev->base_addr = link->io.BasePort1; | 1976 | dev->base_addr = link->io.BasePort1; |
1995 | SET_NETDEV_DEV(dev, &handle_to_dev(link)); | 1977 | SET_NETDEV_DEV(dev, &link->dev); |
1996 | if (register_netdev(dev)) { | 1978 | if (register_netdev(dev)) { |
1997 | printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); | 1979 | printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); |
1998 | goto failed; | 1980 | goto failed; |
@@ -2041,8 +2023,6 @@ static int wl3501_config(struct pcmcia_device *link) | |||
2041 | netif_start_queue(dev); | 2023 | netif_start_queue(dev); |
2042 | return 0; | 2024 | return 0; |
2043 | 2025 | ||
2044 | cs_failed: | ||
2045 | cs_error(link, last_fn, last_ret); | ||
2046 | failed: | 2026 | failed: |
2047 | wl3501_release(link); | 2027 | wl3501_release(link); |
2048 | return -ENODEV; | 2028 | return -ENODEV; |
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c index 8fdfa4f537a6..7dd370fa3439 100644 --- a/drivers/parport/parport_cs.c +++ b/drivers/parport/parport_cs.c | |||
@@ -67,14 +67,6 @@ MODULE_LICENSE("Dual MPL/GPL"); | |||
67 | 67 | ||
68 | INT_MODULE_PARM(epp_mode, 1); | 68 | INT_MODULE_PARM(epp_mode, 1); |
69 | 69 | ||
70 | #ifdef PCMCIA_DEBUG | ||
71 | INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); | ||
72 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
73 | static char *version = | ||
74 | "parport_cs.c 1.29 2002/10/11 06:57:41 (David Hinds)"; | ||
75 | #else | ||
76 | #define DEBUG(n, args...) | ||
77 | #endif | ||
78 | 70 | ||
79 | /*====================================================================*/ | 71 | /*====================================================================*/ |
80 | 72 | ||
@@ -103,7 +95,7 @@ static int parport_probe(struct pcmcia_device *link) | |||
103 | { | 95 | { |
104 | parport_info_t *info; | 96 | parport_info_t *info; |
105 | 97 | ||
106 | DEBUG(0, "parport_attach()\n"); | 98 | dev_dbg(&link->dev, "parport_attach()\n"); |
107 | 99 | ||
108 | /* Create new parport device */ | 100 | /* Create new parport device */ |
109 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 101 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -114,7 +106,6 @@ static int parport_probe(struct pcmcia_device *link) | |||
114 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 106 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
115 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 107 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
116 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 108 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
117 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
118 | link->conf.Attributes = CONF_ENABLE_IRQ; | 109 | link->conf.Attributes = CONF_ENABLE_IRQ; |
119 | link->conf.IntType = INT_MEMORY_AND_IO; | 110 | link->conf.IntType = INT_MEMORY_AND_IO; |
120 | 111 | ||
@@ -132,7 +123,7 @@ static int parport_probe(struct pcmcia_device *link) | |||
132 | 123 | ||
133 | static void parport_detach(struct pcmcia_device *link) | 124 | static void parport_detach(struct pcmcia_device *link) |
134 | { | 125 | { |
135 | DEBUG(0, "parport_detach(0x%p)\n", link); | 126 | dev_dbg(&link->dev, "parport_detach\n"); |
136 | 127 | ||
137 | parport_cs_release(link); | 128 | parport_cs_release(link); |
138 | 129 | ||
@@ -147,9 +138,6 @@ static void parport_detach(struct pcmcia_device *link) | |||
147 | 138 | ||
148 | ======================================================================*/ | 139 | ======================================================================*/ |
149 | 140 | ||
150 | #define CS_CHECK(fn, ret) \ | ||
151 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
152 | |||
153 | static int parport_config_check(struct pcmcia_device *p_dev, | 141 | static int parport_config_check(struct pcmcia_device *p_dev, |
154 | cistpl_cftable_entry_t *cfg, | 142 | cistpl_cftable_entry_t *cfg, |
155 | cistpl_cftable_entry_t *dflt, | 143 | cistpl_cftable_entry_t *dflt, |
@@ -178,18 +166,20 @@ static int parport_config(struct pcmcia_device *link) | |||
178 | { | 166 | { |
179 | parport_info_t *info = link->priv; | 167 | parport_info_t *info = link->priv; |
180 | struct parport *p; | 168 | struct parport *p; |
181 | int last_ret, last_fn; | 169 | int ret; |
182 | 170 | ||
183 | DEBUG(0, "parport_config(0x%p)\n", link); | 171 | dev_dbg(&link->dev, "parport_config\n"); |
184 | 172 | ||
185 | last_ret = pcmcia_loop_config(link, parport_config_check, NULL); | 173 | ret = pcmcia_loop_config(link, parport_config_check, NULL); |
186 | if (last_ret) { | 174 | if (ret) |
187 | cs_error(link, RequestIO, last_ret); | ||
188 | goto failed; | 175 | goto failed; |
189 | } | ||
190 | 176 | ||
191 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 177 | ret = pcmcia_request_irq(link, &link->irq); |
192 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 178 | if (ret) |
179 | goto failed; | ||
180 | ret = pcmcia_request_configuration(link, &link->conf); | ||
181 | if (ret) | ||
182 | goto failed; | ||
193 | 183 | ||
194 | p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, | 184 | p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, |
195 | link->irq.AssignedIRQ, PARPORT_DMA_NONE, | 185 | link->irq.AssignedIRQ, PARPORT_DMA_NONE, |
@@ -213,8 +203,6 @@ static int parport_config(struct pcmcia_device *link) | |||
213 | 203 | ||
214 | return 0; | 204 | return 0; |
215 | 205 | ||
216 | cs_failed: | ||
217 | cs_error(link, last_fn, last_ret); | ||
218 | failed: | 206 | failed: |
219 | parport_cs_release(link); | 207 | parport_cs_release(link); |
220 | return -ENODEV; | 208 | return -ENODEV; |
@@ -232,7 +220,7 @@ static void parport_cs_release(struct pcmcia_device *link) | |||
232 | { | 220 | { |
233 | parport_info_t *info = link->priv; | 221 | parport_info_t *info = link->priv; |
234 | 222 | ||
235 | DEBUG(0, "parport_release(0x%p)\n", link); | 223 | dev_dbg(&link->dev, "parport_release\n"); |
236 | 224 | ||
237 | if (info->ndev) { | 225 | if (info->ndev) { |
238 | struct parport *p = info->port; | 226 | struct parport *p = info->port; |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index b952ebc7a78b..416f6ac65b76 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -645,10 +645,13 @@ void __init detect_intel_iommu(void) | |||
645 | "x2apic and Intr-remapping.\n"); | 645 | "x2apic and Intr-remapping.\n"); |
646 | #endif | 646 | #endif |
647 | #ifdef CONFIG_DMAR | 647 | #ifdef CONFIG_DMAR |
648 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 648 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) |
649 | !dmar_disabled) | ||
650 | iommu_detected = 1; | 649 | iommu_detected = 1; |
651 | #endif | 650 | #endif |
651 | #ifdef CONFIG_X86 | ||
652 | if (ret) | ||
653 | x86_init.iommu.iommu_init = intel_iommu_init; | ||
654 | #endif | ||
652 | } | 655 | } |
653 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | 656 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
654 | dmar_tbl = NULL; | 657 | dmar_tbl = NULL; |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 1840a0578a42..9261327b49f3 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -3266,7 +3266,7 @@ int __init intel_iommu_init(void) | |||
3266 | * Check the need for DMA-remapping initialization now. | 3266 | * Check the need for DMA-remapping initialization now. |
3267 | * Above initialization will also be used by Interrupt-remapping. | 3267 | * Above initialization will also be used by Interrupt-remapping. |
3268 | */ | 3268 | */ |
3269 | if (no_iommu || swiotlb || dmar_disabled) | 3269 | if (no_iommu || dmar_disabled) |
3270 | return -ENODEV; | 3270 | return -ENODEV; |
3271 | 3271 | ||
3272 | iommu_init_mempool(); | 3272 | iommu_init_mempool(); |
@@ -3287,7 +3287,9 @@ int __init intel_iommu_init(void) | |||
3287 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); | 3287 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
3288 | 3288 | ||
3289 | init_timer(&unmap_timer); | 3289 | init_timer(&unmap_timer); |
3290 | force_iommu = 1; | 3290 | #ifdef CONFIG_SWIOTLB |
3291 | swiotlb = 0; | ||
3292 | #endif | ||
3291 | dma_ops = &intel_dma_ops; | 3293 | dma_ops = &intel_dma_ops; |
3292 | 3294 | ||
3293 | init_iommu_sysfs(); | 3295 | init_iommu_sysfs(); |
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 17f38a781d47..cd5082d3ca19 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig | |||
@@ -17,24 +17,6 @@ menuconfig PCCARD | |||
17 | 17 | ||
18 | if PCCARD | 18 | if PCCARD |
19 | 19 | ||
20 | config PCMCIA_DEBUG | ||
21 | bool "Enable PCCARD debugging" | ||
22 | help | ||
23 | Say Y here to enable PCMCIA subsystem debugging. You | ||
24 | will need to choose the debugging level either via the | ||
25 | kernel command line, or module options depending whether | ||
26 | you build the PCMCIA as modules. | ||
27 | |||
28 | The kernel command line options are: | ||
29 | pcmcia_core.pc_debug=N | ||
30 | pcmcia.pc_debug=N | ||
31 | sa11xx_core.pc_debug=N | ||
32 | |||
33 | The module option is called pc_debug=N | ||
34 | |||
35 | In all the above examples, N is the debugging verbosity | ||
36 | level. | ||
37 | |||
38 | config PCMCIA | 20 | config PCMCIA |
39 | tristate "16-bit PCMCIA support" | 21 | tristate "16-bit PCMCIA support" |
40 | select CRC32 | 22 | select CRC32 |
@@ -196,9 +178,13 @@ config PCMCIA_BCM63XX | |||
196 | tristate "bcm63xx pcmcia support" | 178 | tristate "bcm63xx pcmcia support" |
197 | depends on BCM63XX && PCMCIA | 179 | depends on BCM63XX && PCMCIA |
198 | 180 | ||
181 | config PCMCIA_SOC_COMMON | ||
182 | tristate | ||
183 | |||
199 | config PCMCIA_SA1100 | 184 | config PCMCIA_SA1100 |
200 | tristate "SA1100 support" | 185 | tristate "SA1100 support" |
201 | depends on ARM && ARCH_SA1100 && PCMCIA | 186 | depends on ARM && ARCH_SA1100 && PCMCIA |
187 | select PCMCIA_SOC_COMMON | ||
202 | help | 188 | help |
203 | Say Y here to include support for SA11x0-based PCMCIA or CF | 189 | Say Y here to include support for SA11x0-based PCMCIA or CF |
204 | sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ | 190 | sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ |
@@ -209,6 +195,7 @@ config PCMCIA_SA1100 | |||
209 | config PCMCIA_SA1111 | 195 | config PCMCIA_SA1111 |
210 | tristate "SA1111 support" | 196 | tristate "SA1111 support" |
211 | depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA | 197 | depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA |
198 | select PCMCIA_SOC_COMMON | ||
212 | help | 199 | help |
213 | Say Y here to include support for SA1111-based PCMCIA or CF | 200 | Say Y here to include support for SA1111-based PCMCIA or CF |
214 | sockets, found on the Jornada 720, Graphicsmaster and other | 201 | sockets, found on the Jornada 720, Graphicsmaster and other |
@@ -222,9 +209,28 @@ config PCMCIA_PXA2XX | |||
222 | depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ | 209 | depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ |
223 | || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ | 210 | || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ |
224 | || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) | 211 | || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) |
212 | select PCMCIA_SOC_COMMON | ||
225 | help | 213 | help |
226 | Say Y here to include support for the PXA2xx PCMCIA controller | 214 | Say Y here to include support for the PXA2xx PCMCIA controller |
227 | 215 | ||
216 | config PCMCIA_DEBUG | ||
217 | bool "Enable debugging" | ||
218 | depends on (PCMCIA_SA1111 || PCMCIA_SA1100 || PCMCIA_PXA2XX) | ||
219 | help | ||
220 | Say Y here to enable debugging for the SoC PCMCIA layer. | ||
221 | You will need to choose the debugging level either via the | ||
222 | kernel command line, or module options depending whether | ||
223 | you build the drivers as modules. | ||
224 | |||
225 | The kernel command line options are: | ||
226 | sa11xx_core.pc_debug=N | ||
227 | pxa2xx_core.pc_debug=N | ||
228 | |||
229 | The module option is called pc_debug=N | ||
230 | |||
231 | In all the above examples, N is the debugging verbosity | ||
232 | level. | ||
233 | |||
228 | config PCMCIA_PROBE | 234 | config PCMCIA_PROBE |
229 | bool | 235 | bool |
230 | default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC | 236 | default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC |
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index a03a38acd77d..382938313991 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -22,8 +22,9 @@ obj-$(CONFIG_I82365) += i82365.o | |||
22 | obj-$(CONFIG_I82092) += i82092.o | 22 | obj-$(CONFIG_I82092) += i82092.o |
23 | obj-$(CONFIG_TCIC) += tcic.o | 23 | obj-$(CONFIG_TCIC) += tcic.o |
24 | obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o | 24 | obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o |
25 | obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o | 25 | obj-$(CONFIG_PCMCIA_SOC_COMMON) += soc_common.o |
26 | obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o | 26 | obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_base.o sa1100_cs.o |
27 | obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_base.o sa1111_cs.o | ||
27 | obj-$(CONFIG_M32R_PCC) += m32r_pcc.o | 28 | obj-$(CONFIG_M32R_PCC) += m32r_pcc.o |
28 | obj-$(CONFIG_M32R_CFC) += m32r_cfc.o | 29 | obj-$(CONFIG_M32R_CFC) += m32r_cfc.o |
29 | obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o | 30 | obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o |
@@ -35,9 +36,6 @@ obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o | |||
35 | obj-$(CONFIG_AT91_CF) += at91_cf.o | 36 | obj-$(CONFIG_AT91_CF) += at91_cf.o |
36 | obj-$(CONFIG_ELECTRA_CF) += electra_cf.o | 37 | obj-$(CONFIG_ELECTRA_CF) += electra_cf.o |
37 | 38 | ||
38 | sa11xx_core-y += soc_common.o sa11xx_base.o | ||
39 | pxa2xx_core-y += soc_common.o pxa2xx_base.o | ||
40 | |||
41 | au1x00_ss-y += au1000_generic.o | 39 | au1x00_ss-y += au1000_generic.o |
42 | au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o | 40 | au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o |
43 | au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o | 41 | au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o |
@@ -77,4 +75,4 @@ pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o | |||
77 | pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o | 75 | pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o |
78 | pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o | 76 | pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o |
79 | 77 | ||
80 | obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) | 78 | obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) |
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index db77e1f3309a..4cd70d056810 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c | |||
@@ -91,7 +91,7 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr) | |||
91 | static void cb_release_cis_mem(struct pcmcia_socket * s) | 91 | static void cb_release_cis_mem(struct pcmcia_socket * s) |
92 | { | 92 | { |
93 | if (s->cb_cis_virt) { | 93 | if (s->cb_cis_virt) { |
94 | cs_dbg(s, 1, "cb_release_cis_mem()\n"); | 94 | dev_dbg(&s->dev, "cb_release_cis_mem()\n"); |
95 | iounmap(s->cb_cis_virt); | 95 | iounmap(s->cb_cis_virt); |
96 | s->cb_cis_virt = NULL; | 96 | s->cb_cis_virt = NULL; |
97 | s->cb_cis_res = NULL; | 97 | s->cb_cis_res = NULL; |
@@ -132,7 +132,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void | |||
132 | struct pci_dev *dev; | 132 | struct pci_dev *dev; |
133 | struct resource *res; | 133 | struct resource *res; |
134 | 134 | ||
135 | cs_dbg(s, 3, "read_cb_mem(%d, %#x, %u)\n", space, addr, len); | 135 | dev_dbg(&s->dev, "read_cb_mem(%d, %#x, %u)\n", space, addr, len); |
136 | 136 | ||
137 | dev = pci_get_slot(s->cb_dev->subordinate, 0); | 137 | dev = pci_get_slot(s->cb_dev->subordinate, 0); |
138 | if (!dev) | 138 | if (!dev) |
diff --git a/drivers/pcmcia/cirrus.h b/drivers/pcmcia/cirrus.h index ecd4fc7f666f..446a4576e73e 100644 --- a/drivers/pcmcia/cirrus.h +++ b/drivers/pcmcia/cirrus.h | |||
@@ -30,16 +30,6 @@ | |||
30 | #ifndef _LINUX_CIRRUS_H | 30 | #ifndef _LINUX_CIRRUS_H |
31 | #define _LINUX_CIRRUS_H | 31 | #define _LINUX_CIRRUS_H |
32 | 32 | ||
33 | #ifndef PCI_VENDOR_ID_CIRRUS | ||
34 | #define PCI_VENDOR_ID_CIRRUS 0x1013 | ||
35 | #endif | ||
36 | #ifndef PCI_DEVICE_ID_CIRRUS_6729 | ||
37 | #define PCI_DEVICE_ID_CIRRUS_6729 0x1100 | ||
38 | #endif | ||
39 | #ifndef PCI_DEVICE_ID_CIRRUS_6832 | ||
40 | #define PCI_DEVICE_ID_CIRRUS_6832 0x1110 | ||
41 | #endif | ||
42 | |||
43 | #define PD67_MISC_CTL_1 0x16 /* Misc control 1 */ | 33 | #define PD67_MISC_CTL_1 0x16 /* Misc control 1 */ |
44 | #define PD67_FIFO_CTL 0x17 /* FIFO control */ | 34 | #define PD67_FIFO_CTL 0x17 /* FIFO control */ |
45 | #define PD67_MISC_CTL_2 0x1E /* Misc control 2 */ | 35 | #define PD67_MISC_CTL_2 0x1E /* Misc control 2 */ |
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 6c4a4fc83630..8c1b73cf021b 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c | |||
@@ -138,7 +138,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, | |||
138 | void __iomem *sys, *end; | 138 | void __iomem *sys, *end; |
139 | unsigned char *buf = ptr; | 139 | unsigned char *buf = ptr; |
140 | 140 | ||
141 | cs_dbg(s, 3, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); | 141 | dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); |
142 | 142 | ||
143 | if (attr & IS_INDIRECT) { | 143 | if (attr & IS_INDIRECT) { |
144 | /* Indirect accesses use a bunch of special registers at fixed | 144 | /* Indirect accesses use a bunch of special registers at fixed |
@@ -190,7 +190,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, | |||
190 | addr = 0; | 190 | addr = 0; |
191 | } | 191 | } |
192 | } | 192 | } |
193 | cs_dbg(s, 3, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", | 193 | dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", |
194 | *(u_char *)(ptr+0), *(u_char *)(ptr+1), | 194 | *(u_char *)(ptr+0), *(u_char *)(ptr+1), |
195 | *(u_char *)(ptr+2), *(u_char *)(ptr+3)); | 195 | *(u_char *)(ptr+2), *(u_char *)(ptr+3)); |
196 | return 0; | 196 | return 0; |
@@ -204,7 +204,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, | |||
204 | void __iomem *sys, *end; | 204 | void __iomem *sys, *end; |
205 | unsigned char *buf = ptr; | 205 | unsigned char *buf = ptr; |
206 | 206 | ||
207 | cs_dbg(s, 3, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); | 207 | dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); |
208 | 208 | ||
209 | if (attr & IS_INDIRECT) { | 209 | if (attr & IS_INDIRECT) { |
210 | /* Indirect accesses use a bunch of special registers at fixed | 210 | /* Indirect accesses use a bunch of special registers at fixed |
@@ -584,7 +584,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_ | |||
584 | ofs += link[1] + 2; | 584 | ofs += link[1] + 2; |
585 | } | 585 | } |
586 | if (i == MAX_TUPLES) { | 586 | if (i == MAX_TUPLES) { |
587 | cs_dbg(s, 1, "cs: overrun in pcmcia_get_next_tuple\n"); | 587 | dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); |
588 | return -ENOSPC; | 588 | return -ENOSPC; |
589 | } | 589 | } |
590 | 590 | ||
@@ -1440,7 +1440,7 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) | |||
1440 | break; | 1440 | break; |
1441 | } | 1441 | } |
1442 | if (ret) | 1442 | if (ret) |
1443 | __cs_dbg(0, "parse_tuple failed %d\n", ret); | 1443 | pr_debug("parse_tuple failed %d\n", ret); |
1444 | return ret; | 1444 | return ret; |
1445 | } | 1445 | } |
1446 | EXPORT_SYMBOL(pcmcia_parse_tuple); | 1446 | EXPORT_SYMBOL(pcmcia_parse_tuple); |
@@ -1482,6 +1482,67 @@ done: | |||
1482 | } | 1482 | } |
1483 | EXPORT_SYMBOL(pccard_read_tuple); | 1483 | EXPORT_SYMBOL(pccard_read_tuple); |
1484 | 1484 | ||
1485 | |||
1486 | /** | ||
1487 | * pccard_loop_tuple() - loop over tuples in the CIS | ||
1488 | * @s: the struct pcmcia_socket where the card is inserted | ||
1489 | * @function: the device function we loop for | ||
1490 | * @code: which CIS code shall we look for? | ||
1491 | * @parse: buffer where the tuple shall be parsed (or NULL, if no parse) | ||
1492 | * @priv_data: private data to be passed to the loop_tuple function. | ||
1493 | * @loop_tuple: function to call for each CIS entry of type @function. IT | ||
1494 | * gets passed the raw tuple, the paresed tuple (if @parse is | ||
1495 | * set) and @priv_data. | ||
1496 | * | ||
1497 | * pccard_loop_tuple() loops over all CIS entries of type @function, and | ||
1498 | * calls the @loop_tuple function for each entry. If the call to @loop_tuple | ||
1499 | * returns 0, the loop exits. Returns 0 on success or errorcode otherwise. | ||
1500 | */ | ||
1501 | int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, | ||
1502 | cisdata_t code, cisparse_t *parse, void *priv_data, | ||
1503 | int (*loop_tuple) (tuple_t *tuple, | ||
1504 | cisparse_t *parse, | ||
1505 | void *priv_data)) | ||
1506 | { | ||
1507 | tuple_t tuple; | ||
1508 | cisdata_t *buf; | ||
1509 | int ret; | ||
1510 | |||
1511 | buf = kzalloc(256, GFP_KERNEL); | ||
1512 | if (buf == NULL) { | ||
1513 | dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); | ||
1514 | return -ENOMEM; | ||
1515 | } | ||
1516 | |||
1517 | tuple.TupleData = buf; | ||
1518 | tuple.TupleDataMax = 255; | ||
1519 | tuple.TupleOffset = 0; | ||
1520 | tuple.DesiredTuple = code; | ||
1521 | tuple.Attributes = 0; | ||
1522 | |||
1523 | ret = pccard_get_first_tuple(s, function, &tuple); | ||
1524 | while (!ret) { | ||
1525 | if (pccard_get_tuple_data(s, &tuple)) | ||
1526 | goto next_entry; | ||
1527 | |||
1528 | if (parse) | ||
1529 | if (pcmcia_parse_tuple(&tuple, parse)) | ||
1530 | goto next_entry; | ||
1531 | |||
1532 | ret = loop_tuple(&tuple, parse, priv_data); | ||
1533 | if (!ret) | ||
1534 | break; | ||
1535 | |||
1536 | next_entry: | ||
1537 | ret = pccard_get_next_tuple(s, function, &tuple); | ||
1538 | } | ||
1539 | |||
1540 | kfree(buf); | ||
1541 | return ret; | ||
1542 | } | ||
1543 | EXPORT_SYMBOL(pccard_loop_tuple); | ||
1544 | |||
1545 | |||
1485 | /*====================================================================== | 1546 | /*====================================================================== |
1486 | 1547 | ||
1487 | This tries to determine if a card has a sensible CIS. It returns | 1548 | This tries to determine if a card has a sensible CIS. It returns |
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 698d75cda084..790af87a922f 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c | |||
@@ -61,17 +61,6 @@ INT_MODULE_PARM(unreset_limit, 30); /* unreset_check's */ | |||
61 | /* Access speed for attribute memory windows */ | 61 | /* Access speed for attribute memory windows */ |
62 | INT_MODULE_PARM(cis_speed, 300); /* ns */ | 62 | INT_MODULE_PARM(cis_speed, 300); /* ns */ |
63 | 63 | ||
64 | #ifdef CONFIG_PCMCIA_DEBUG | ||
65 | static int pc_debug; | ||
66 | |||
67 | module_param(pc_debug, int, 0644); | ||
68 | |||
69 | int cs_debug_level(int level) | ||
70 | { | ||
71 | return pc_debug > level; | ||
72 | } | ||
73 | #endif | ||
74 | |||
75 | 64 | ||
76 | socket_state_t dead_socket = { | 65 | socket_state_t dead_socket = { |
77 | .csc_mask = SS_DETECT, | 66 | .csc_mask = SS_DETECT, |
@@ -190,7 +179,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket) | |||
190 | if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops) | 179 | if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops) |
191 | return -EINVAL; | 180 | return -EINVAL; |
192 | 181 | ||
193 | cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops); | 182 | dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops); |
194 | 183 | ||
195 | spin_lock_init(&socket->lock); | 184 | spin_lock_init(&socket->lock); |
196 | 185 | ||
@@ -262,6 +251,13 @@ int pcmcia_register_socket(struct pcmcia_socket *socket) | |||
262 | 251 | ||
263 | pcmcia_parse_events(socket, SS_DETECT); | 252 | pcmcia_parse_events(socket, SS_DETECT); |
264 | 253 | ||
254 | /* | ||
255 | * Let's try to get the PCMCIA module for 16-bit PCMCIA support. | ||
256 | * If it fails, it doesn't matter -- we still have 32-bit CardBus | ||
257 | * support to offer, so this is not a failure mode. | ||
258 | */ | ||
259 | request_module_nowait("pcmcia"); | ||
260 | |||
265 | return 0; | 261 | return 0; |
266 | 262 | ||
267 | err: | 263 | err: |
@@ -282,7 +278,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket) | |||
282 | if (!socket) | 278 | if (!socket) |
283 | return; | 279 | return; |
284 | 280 | ||
285 | cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops); | 281 | dev_dbg(&socket->dev, "pcmcia_unregister_socket(0x%p)\n", socket->ops); |
286 | 282 | ||
287 | if (socket->thread) | 283 | if (socket->thread) |
288 | kthread_stop(socket->thread); | 284 | kthread_stop(socket->thread); |
@@ -335,7 +331,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority) | |||
335 | if (s->state & SOCKET_CARDBUS) | 331 | if (s->state & SOCKET_CARDBUS) |
336 | return 0; | 332 | return 0; |
337 | 333 | ||
338 | cs_dbg(s, 1, "send_event(event %d, pri %d, callback 0x%p)\n", | 334 | dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n", |
339 | event, priority, s->callback); | 335 | event, priority, s->callback); |
340 | 336 | ||
341 | if (!s->callback) | 337 | if (!s->callback) |
@@ -352,7 +348,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority) | |||
352 | 348 | ||
353 | static void socket_remove_drivers(struct pcmcia_socket *skt) | 349 | static void socket_remove_drivers(struct pcmcia_socket *skt) |
354 | { | 350 | { |
355 | cs_dbg(skt, 4, "remove_drivers\n"); | 351 | dev_dbg(&skt->dev, "remove_drivers\n"); |
356 | 352 | ||
357 | send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH); | 353 | send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH); |
358 | } | 354 | } |
@@ -361,7 +357,7 @@ static int socket_reset(struct pcmcia_socket *skt) | |||
361 | { | 357 | { |
362 | int status, i; | 358 | int status, i; |
363 | 359 | ||
364 | cs_dbg(skt, 4, "reset\n"); | 360 | dev_dbg(&skt->dev, "reset\n"); |
365 | 361 | ||
366 | skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET; | 362 | skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET; |
367 | skt->ops->set_socket(skt, &skt->socket); | 363 | skt->ops->set_socket(skt, &skt->socket); |
@@ -383,7 +379,7 @@ static int socket_reset(struct pcmcia_socket *skt) | |||
383 | msleep(unreset_check * 10); | 379 | msleep(unreset_check * 10); |
384 | } | 380 | } |
385 | 381 | ||
386 | cs_err(skt, "time out after reset.\n"); | 382 | dev_printk(KERN_ERR, &skt->dev, "time out after reset.\n"); |
387 | return -ETIMEDOUT; | 383 | return -ETIMEDOUT; |
388 | } | 384 | } |
389 | 385 | ||
@@ -397,7 +393,7 @@ static void socket_shutdown(struct pcmcia_socket *s) | |||
397 | { | 393 | { |
398 | int status; | 394 | int status; |
399 | 395 | ||
400 | cs_dbg(s, 4, "shutdown\n"); | 396 | dev_dbg(&s->dev, "shutdown\n"); |
401 | 397 | ||
402 | socket_remove_drivers(s); | 398 | socket_remove_drivers(s); |
403 | s->state &= SOCKET_INUSE | SOCKET_PRESENT; | 399 | s->state &= SOCKET_INUSE | SOCKET_PRESENT; |
@@ -432,7 +428,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) | |||
432 | { | 428 | { |
433 | int status, i; | 429 | int status, i; |
434 | 430 | ||
435 | cs_dbg(skt, 4, "setup\n"); | 431 | dev_dbg(&skt->dev, "setup\n"); |
436 | 432 | ||
437 | skt->ops->get_status(skt, &status); | 433 | skt->ops->get_status(skt, &status); |
438 | if (!(status & SS_DETECT)) | 434 | if (!(status & SS_DETECT)) |
@@ -452,13 +448,15 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) | |||
452 | } | 448 | } |
453 | 449 | ||
454 | if (status & SS_PENDING) { | 450 | if (status & SS_PENDING) { |
455 | cs_err(skt, "voltage interrogation timed out.\n"); | 451 | dev_printk(KERN_ERR, &skt->dev, |
452 | "voltage interrogation timed out.\n"); | ||
456 | return -ETIMEDOUT; | 453 | return -ETIMEDOUT; |
457 | } | 454 | } |
458 | 455 | ||
459 | if (status & SS_CARDBUS) { | 456 | if (status & SS_CARDBUS) { |
460 | if (!(skt->features & SS_CAP_CARDBUS)) { | 457 | if (!(skt->features & SS_CAP_CARDBUS)) { |
461 | cs_err(skt, "cardbus cards are not supported.\n"); | 458 | dev_printk(KERN_ERR, &skt->dev, |
459 | "cardbus cards are not supported.\n"); | ||
462 | return -EINVAL; | 460 | return -EINVAL; |
463 | } | 461 | } |
464 | skt->state |= SOCKET_CARDBUS; | 462 | skt->state |= SOCKET_CARDBUS; |
@@ -472,7 +470,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) | |||
472 | else if (!(status & SS_XVCARD)) | 470 | else if (!(status & SS_XVCARD)) |
473 | skt->socket.Vcc = skt->socket.Vpp = 50; | 471 | skt->socket.Vcc = skt->socket.Vpp = 50; |
474 | else { | 472 | else { |
475 | cs_err(skt, "unsupported voltage key.\n"); | 473 | dev_printk(KERN_ERR, &skt->dev, "unsupported voltage key.\n"); |
476 | return -EIO; | 474 | return -EIO; |
477 | } | 475 | } |
478 | 476 | ||
@@ -489,7 +487,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) | |||
489 | 487 | ||
490 | skt->ops->get_status(skt, &status); | 488 | skt->ops->get_status(skt, &status); |
491 | if (!(status & SS_POWERON)) { | 489 | if (!(status & SS_POWERON)) { |
492 | cs_err(skt, "unable to apply power.\n"); | 490 | dev_printk(KERN_ERR, &skt->dev, "unable to apply power.\n"); |
493 | return -EIO; | 491 | return -EIO; |
494 | } | 492 | } |
495 | 493 | ||
@@ -509,7 +507,7 @@ static int socket_insert(struct pcmcia_socket *skt) | |||
509 | { | 507 | { |
510 | int ret; | 508 | int ret; |
511 | 509 | ||
512 | cs_dbg(skt, 4, "insert\n"); | 510 | dev_dbg(&skt->dev, "insert\n"); |
513 | 511 | ||
514 | if (!cs_socket_get(skt)) | 512 | if (!cs_socket_get(skt)) |
515 | return -ENODEV; | 513 | return -ENODEV; |
@@ -529,7 +527,7 @@ static int socket_insert(struct pcmcia_socket *skt) | |||
529 | skt->state |= SOCKET_CARDBUS_CONFIG; | 527 | skt->state |= SOCKET_CARDBUS_CONFIG; |
530 | } | 528 | } |
531 | #endif | 529 | #endif |
532 | cs_dbg(skt, 4, "insert done\n"); | 530 | dev_dbg(&skt->dev, "insert done\n"); |
533 | 531 | ||
534 | send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); | 532 | send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); |
535 | } else { | 533 | } else { |
@@ -576,7 +574,7 @@ static int socket_late_resume(struct pcmcia_socket *skt) | |||
576 | * FIXME: need a better check here for cardbus cards. | 574 | * FIXME: need a better check here for cardbus cards. |
577 | */ | 575 | */ |
578 | if (verify_cis_cache(skt) != 0) { | 576 | if (verify_cis_cache(skt) != 0) { |
579 | cs_dbg(skt, 4, "cis mismatch - different card\n"); | 577 | dev_dbg(&skt->dev, "cis mismatch - different card\n"); |
580 | socket_remove_drivers(skt); | 578 | socket_remove_drivers(skt); |
581 | destroy_cis_cache(skt); | 579 | destroy_cis_cache(skt); |
582 | /* | 580 | /* |
@@ -587,7 +585,7 @@ static int socket_late_resume(struct pcmcia_socket *skt) | |||
587 | msleep(200); | 585 | msleep(200); |
588 | send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); | 586 | send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); |
589 | } else { | 587 | } else { |
590 | cs_dbg(skt, 4, "cis matches cache\n"); | 588 | dev_dbg(&skt->dev, "cis matches cache\n"); |
591 | send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW); | 589 | send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW); |
592 | } | 590 | } |
593 | } else { | 591 | } else { |
@@ -723,7 +721,7 @@ static int pccardd(void *__skt) | |||
723 | void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) | 721 | void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) |
724 | { | 722 | { |
725 | unsigned long flags; | 723 | unsigned long flags; |
726 | cs_dbg(s, 4, "parse_events: events %08x\n", events); | 724 | dev_dbg(&s->dev, "parse_events: events %08x\n", events); |
727 | if (s->thread) { | 725 | if (s->thread) { |
728 | spin_lock_irqsave(&s->thread_lock, flags); | 726 | spin_lock_irqsave(&s->thread_lock, flags); |
729 | s->thread_events |= events; | 727 | s->thread_events |= events; |
@@ -773,19 +771,22 @@ int pcmcia_reset_card(struct pcmcia_socket *skt) | |||
773 | { | 771 | { |
774 | int ret; | 772 | int ret; |
775 | 773 | ||
776 | cs_dbg(skt, 1, "resetting socket\n"); | 774 | dev_dbg(&skt->dev, "resetting socket\n"); |
777 | 775 | ||
778 | mutex_lock(&skt->skt_mutex); | 776 | mutex_lock(&skt->skt_mutex); |
779 | do { | 777 | do { |
780 | if (!(skt->state & SOCKET_PRESENT)) { | 778 | if (!(skt->state & SOCKET_PRESENT)) { |
779 | dev_dbg(&skt->dev, "can't reset, not present\n"); | ||
781 | ret = -ENODEV; | 780 | ret = -ENODEV; |
782 | break; | 781 | break; |
783 | } | 782 | } |
784 | if (skt->state & SOCKET_SUSPEND) { | 783 | if (skt->state & SOCKET_SUSPEND) { |
784 | dev_dbg(&skt->dev, "can't reset, suspended\n"); | ||
785 | ret = -EBUSY; | 785 | ret = -EBUSY; |
786 | break; | 786 | break; |
787 | } | 787 | } |
788 | if (skt->state & SOCKET_CARDBUS) { | 788 | if (skt->state & SOCKET_CARDBUS) { |
789 | dev_dbg(&skt->dev, "can't reset, is cardbus\n"); | ||
789 | ret = -EPERM; | 790 | ret = -EPERM; |
790 | break; | 791 | break; |
791 | } | 792 | } |
@@ -818,7 +819,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt) | |||
818 | { | 819 | { |
819 | int ret; | 820 | int ret; |
820 | 821 | ||
821 | cs_dbg(skt, 1, "suspending socket\n"); | 822 | dev_dbg(&skt->dev, "suspending socket\n"); |
822 | 823 | ||
823 | mutex_lock(&skt->skt_mutex); | 824 | mutex_lock(&skt->skt_mutex); |
824 | do { | 825 | do { |
@@ -848,7 +849,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt) | |||
848 | { | 849 | { |
849 | int ret; | 850 | int ret; |
850 | 851 | ||
851 | cs_dbg(skt, 1, "waking up socket\n"); | 852 | dev_dbg(&skt->dev, "waking up socket\n"); |
852 | 853 | ||
853 | mutex_lock(&skt->skt_mutex); | 854 | mutex_lock(&skt->skt_mutex); |
854 | do { | 855 | do { |
@@ -876,7 +877,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt) | |||
876 | { | 877 | { |
877 | int ret; | 878 | int ret; |
878 | 879 | ||
879 | cs_dbg(skt, 1, "user eject request\n"); | 880 | dev_dbg(&skt->dev, "user eject request\n"); |
880 | 881 | ||
881 | mutex_lock(&skt->skt_mutex); | 882 | mutex_lock(&skt->skt_mutex); |
882 | do { | 883 | do { |
@@ -905,7 +906,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt) | |||
905 | { | 906 | { |
906 | int ret; | 907 | int ret; |
907 | 908 | ||
908 | cs_dbg(skt, 1, "user insert request\n"); | 909 | dev_dbg(&skt->dev, "user insert request\n"); |
909 | 910 | ||
910 | mutex_lock(&skt->skt_mutex); | 911 | mutex_lock(&skt->skt_mutex); |
911 | do { | 912 | do { |
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index 1f4098f1354d..3bc02d53a3a3 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h | |||
@@ -107,28 +107,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt) | |||
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | #ifdef CONFIG_PCMCIA_DEBUG | ||
111 | extern int cs_debug_level(int); | ||
112 | |||
113 | #define cs_dbg(skt, lvl, fmt, arg...) do { \ | ||
114 | if (cs_debug_level(lvl)) \ | ||
115 | dev_printk(KERN_DEBUG, &skt->dev, \ | ||
116 | "cs: " fmt, ## arg); \ | ||
117 | } while (0) | ||
118 | #define __cs_dbg(lvl, fmt, arg...) do { \ | ||
119 | if (cs_debug_level(lvl)) \ | ||
120 | printk(KERN_DEBUG \ | ||
121 | "cs: " fmt, ## arg); \ | ||
122 | } while (0) | ||
123 | |||
124 | #else | ||
125 | #define cs_dbg(skt, lvl, fmt, arg...) do { } while (0) | ||
126 | #define __cs_dbg(lvl, fmt, arg...) do { } while (0) | ||
127 | #endif | ||
128 | |||
129 | #define cs_err(skt, fmt, arg...) \ | ||
130 | dev_printk(KERN_ERR, &skt->dev, "cs: " fmt, ## arg) | ||
131 | |||
132 | 110 | ||
133 | /* | 111 | /* |
134 | * Stuff internal to module "pcmcia_core": | 112 | * Stuff internal to module "pcmcia_core": |
@@ -170,10 +148,6 @@ extern struct rw_semaphore pcmcia_socket_list_rwsem; | |||
170 | extern struct list_head pcmcia_socket_list; | 148 | extern struct list_head pcmcia_socket_list; |
171 | extern struct class pcmcia_socket_class; | 149 | extern struct class pcmcia_socket_class; |
172 | 150 | ||
173 | int pcmcia_get_window(struct pcmcia_socket *s, | ||
174 | window_handle_t *handle, | ||
175 | int idx, | ||
176 | win_req_t *req); | ||
177 | int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); | 151 | int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); |
178 | struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr); | 152 | struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr); |
179 | 153 | ||
@@ -199,6 +173,22 @@ int pcmcia_replace_cis(struct pcmcia_socket *s, | |||
199 | const u8 *data, const size_t len); | 173 | const u8 *data, const size_t len); |
200 | int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); | 174 | int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); |
201 | 175 | ||
176 | /* loop over CIS entries */ | ||
177 | int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, | ||
178 | cisdata_t code, cisparse_t *parse, void *priv_data, | ||
179 | int (*loop_tuple) (tuple_t *tuple, | ||
180 | cisparse_t *parse, | ||
181 | void *priv_data)); | ||
182 | |||
183 | int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, | ||
184 | tuple_t *tuple); | ||
185 | |||
186 | int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, | ||
187 | tuple_t *tuple); | ||
188 | |||
189 | int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); | ||
190 | |||
191 | |||
202 | /* rsrc_mgr.c */ | 192 | /* rsrc_mgr.c */ |
203 | int pcmcia_validate_mem(struct pcmcia_socket *s); | 193 | int pcmcia_validate_mem(struct pcmcia_socket *s); |
204 | struct resource *pcmcia_find_io_region(unsigned long base, | 194 | struct resource *pcmcia_find_io_region(unsigned long base, |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index f5b7079f13d3..05893d41dd41 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -41,129 +41,11 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | |||
41 | MODULE_DESCRIPTION("PCMCIA Driver Services"); | 41 | MODULE_DESCRIPTION("PCMCIA Driver Services"); |
42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
43 | 43 | ||
44 | #ifdef CONFIG_PCMCIA_DEBUG | ||
45 | int ds_pc_debug; | ||
46 | |||
47 | module_param_named(pc_debug, ds_pc_debug, int, 0644); | ||
48 | |||
49 | #define ds_dbg(lvl, fmt, arg...) do { \ | ||
50 | if (ds_pc_debug > (lvl)) \ | ||
51 | printk(KERN_DEBUG "ds: " fmt , ## arg); \ | ||
52 | } while (0) | ||
53 | #define ds_dev_dbg(lvl, dev, fmt, arg...) do { \ | ||
54 | if (ds_pc_debug > (lvl)) \ | ||
55 | dev_printk(KERN_DEBUG, dev, "ds: " fmt , ## arg); \ | ||
56 | } while (0) | ||
57 | #else | ||
58 | #define ds_dbg(lvl, fmt, arg...) do { } while (0) | ||
59 | #define ds_dev_dbg(lvl, dev, fmt, arg...) do { } while (0) | ||
60 | #endif | ||
61 | 44 | ||
62 | spinlock_t pcmcia_dev_list_lock; | 45 | spinlock_t pcmcia_dev_list_lock; |
63 | 46 | ||
64 | /*====================================================================*/ | 47 | /*====================================================================*/ |
65 | 48 | ||
66 | /* code which was in cs.c before */ | ||
67 | |||
68 | /* String tables for error messages */ | ||
69 | |||
70 | typedef struct lookup_t { | ||
71 | const int key; | ||
72 | const char *msg; | ||
73 | } lookup_t; | ||
74 | |||
75 | static const lookup_t error_table[] = { | ||
76 | { 0, "Operation succeeded" }, | ||
77 | { -EIO, "Input/Output error" }, | ||
78 | { -ENODEV, "No card present" }, | ||
79 | { -EINVAL, "Bad parameter" }, | ||
80 | { -EACCES, "Configuration locked" }, | ||
81 | { -EBUSY, "Resource in use" }, | ||
82 | { -ENOSPC, "No more items" }, | ||
83 | { -ENOMEM, "Out of resource" }, | ||
84 | }; | ||
85 | |||
86 | |||
87 | static const lookup_t service_table[] = { | ||
88 | { AccessConfigurationRegister, "AccessConfigurationRegister" }, | ||
89 | { AddSocketServices, "AddSocketServices" }, | ||
90 | { AdjustResourceInfo, "AdjustResourceInfo" }, | ||
91 | { CheckEraseQueue, "CheckEraseQueue" }, | ||
92 | { CloseMemory, "CloseMemory" }, | ||
93 | { DeregisterClient, "DeregisterClient" }, | ||
94 | { DeregisterEraseQueue, "DeregisterEraseQueue" }, | ||
95 | { GetCardServicesInfo, "GetCardServicesInfo" }, | ||
96 | { GetClientInfo, "GetClientInfo" }, | ||
97 | { GetConfigurationInfo, "GetConfigurationInfo" }, | ||
98 | { GetEventMask, "GetEventMask" }, | ||
99 | { GetFirstClient, "GetFirstClient" }, | ||
100 | { GetFirstRegion, "GetFirstRegion" }, | ||
101 | { GetFirstTuple, "GetFirstTuple" }, | ||
102 | { GetNextClient, "GetNextClient" }, | ||
103 | { GetNextRegion, "GetNextRegion" }, | ||
104 | { GetNextTuple, "GetNextTuple" }, | ||
105 | { GetStatus, "GetStatus" }, | ||
106 | { GetTupleData, "GetTupleData" }, | ||
107 | { MapMemPage, "MapMemPage" }, | ||
108 | { ModifyConfiguration, "ModifyConfiguration" }, | ||
109 | { ModifyWindow, "ModifyWindow" }, | ||
110 | { OpenMemory, "OpenMemory" }, | ||
111 | { ParseTuple, "ParseTuple" }, | ||
112 | { ReadMemory, "ReadMemory" }, | ||
113 | { RegisterClient, "RegisterClient" }, | ||
114 | { RegisterEraseQueue, "RegisterEraseQueue" }, | ||
115 | { RegisterMTD, "RegisterMTD" }, | ||
116 | { ReleaseConfiguration, "ReleaseConfiguration" }, | ||
117 | { ReleaseIO, "ReleaseIO" }, | ||
118 | { ReleaseIRQ, "ReleaseIRQ" }, | ||
119 | { ReleaseWindow, "ReleaseWindow" }, | ||
120 | { RequestConfiguration, "RequestConfiguration" }, | ||
121 | { RequestIO, "RequestIO" }, | ||
122 | { RequestIRQ, "RequestIRQ" }, | ||
123 | { RequestSocketMask, "RequestSocketMask" }, | ||
124 | { RequestWindow, "RequestWindow" }, | ||
125 | { ResetCard, "ResetCard" }, | ||
126 | { SetEventMask, "SetEventMask" }, | ||
127 | { ValidateCIS, "ValidateCIS" }, | ||
128 | { WriteMemory, "WriteMemory" }, | ||
129 | { BindDevice, "BindDevice" }, | ||
130 | { BindMTD, "BindMTD" }, | ||
131 | { ReportError, "ReportError" }, | ||
132 | { SuspendCard, "SuspendCard" }, | ||
133 | { ResumeCard, "ResumeCard" }, | ||
134 | { EjectCard, "EjectCard" }, | ||
135 | { InsertCard, "InsertCard" }, | ||
136 | { ReplaceCIS, "ReplaceCIS" } | ||
137 | }; | ||
138 | |||
139 | const char *pcmcia_error_func(int func) | ||
140 | { | ||
141 | int i; | ||
142 | |||
143 | for (i = 0; i < ARRAY_SIZE(service_table); i++) | ||
144 | if (service_table[i].key == func) | ||
145 | return service_table[i].msg; | ||
146 | |||
147 | return "Unknown service number"; | ||
148 | } | ||
149 | EXPORT_SYMBOL(pcmcia_error_func); | ||
150 | |||
151 | const char *pcmcia_error_ret(int ret) | ||
152 | { | ||
153 | int i; | ||
154 | |||
155 | for (i = 0; i < ARRAY_SIZE(error_table); i++) | ||
156 | if (error_table[i].key == ret) | ||
157 | return error_table[i].msg; | ||
158 | |||
159 | return "unknown"; | ||
160 | } | ||
161 | EXPORT_SYMBOL(pcmcia_error_ret); | ||
162 | |||
163 | /*======================================================================*/ | ||
164 | |||
165 | |||
166 | |||
167 | static void pcmcia_check_driver(struct pcmcia_driver *p_drv) | 49 | static void pcmcia_check_driver(struct pcmcia_driver *p_drv) |
168 | { | 50 | { |
169 | struct pcmcia_device_id *did = p_drv->id_table; | 51 | struct pcmcia_device_id *did = p_drv->id_table; |
@@ -303,7 +185,7 @@ int pcmcia_register_driver(struct pcmcia_driver *driver) | |||
303 | spin_lock_init(&driver->dynids.lock); | 185 | spin_lock_init(&driver->dynids.lock); |
304 | INIT_LIST_HEAD(&driver->dynids.list); | 186 | INIT_LIST_HEAD(&driver->dynids.list); |
305 | 187 | ||
306 | ds_dbg(3, "registering driver %s\n", driver->drv.name); | 188 | pr_debug("registering driver %s\n", driver->drv.name); |
307 | 189 | ||
308 | error = driver_register(&driver->drv); | 190 | error = driver_register(&driver->drv); |
309 | if (error < 0) | 191 | if (error < 0) |
@@ -323,7 +205,7 @@ EXPORT_SYMBOL(pcmcia_register_driver); | |||
323 | */ | 205 | */ |
324 | void pcmcia_unregister_driver(struct pcmcia_driver *driver) | 206 | void pcmcia_unregister_driver(struct pcmcia_driver *driver) |
325 | { | 207 | { |
326 | ds_dbg(3, "unregistering driver %s\n", driver->drv.name); | 208 | pr_debug("unregistering driver %s\n", driver->drv.name); |
327 | driver_unregister(&driver->drv); | 209 | driver_unregister(&driver->drv); |
328 | pcmcia_free_dynids(driver); | 210 | pcmcia_free_dynids(driver); |
329 | } | 211 | } |
@@ -350,14 +232,14 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev) | |||
350 | static void pcmcia_release_function(struct kref *ref) | 232 | static void pcmcia_release_function(struct kref *ref) |
351 | { | 233 | { |
352 | struct config_t *c = container_of(ref, struct config_t, ref); | 234 | struct config_t *c = container_of(ref, struct config_t, ref); |
353 | ds_dbg(1, "releasing config_t\n"); | 235 | pr_debug("releasing config_t\n"); |
354 | kfree(c); | 236 | kfree(c); |
355 | } | 237 | } |
356 | 238 | ||
357 | static void pcmcia_release_dev(struct device *dev) | 239 | static void pcmcia_release_dev(struct device *dev) |
358 | { | 240 | { |
359 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); | 241 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); |
360 | ds_dev_dbg(1, dev, "releasing device\n"); | 242 | dev_dbg(dev, "releasing device\n"); |
361 | pcmcia_put_socket(p_dev->socket); | 243 | pcmcia_put_socket(p_dev->socket); |
362 | kfree(p_dev->devname); | 244 | kfree(p_dev->devname); |
363 | kref_put(&p_dev->function_config->ref, pcmcia_release_function); | 245 | kref_put(&p_dev->function_config->ref, pcmcia_release_function); |
@@ -367,7 +249,7 @@ static void pcmcia_release_dev(struct device *dev) | |||
367 | static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc) | 249 | static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc) |
368 | { | 250 | { |
369 | if (!s->pcmcia_state.device_add_pending) { | 251 | if (!s->pcmcia_state.device_add_pending) { |
370 | ds_dev_dbg(1, &s->dev, "scheduling to add %s secondary" | 252 | dev_dbg(&s->dev, "scheduling to add %s secondary" |
371 | " device to %d\n", mfc ? "mfc" : "pfc", s->sock); | 253 | " device to %d\n", mfc ? "mfc" : "pfc", s->sock); |
372 | s->pcmcia_state.device_add_pending = 1; | 254 | s->pcmcia_state.device_add_pending = 1; |
373 | s->pcmcia_state.mfc_pfc = mfc; | 255 | s->pcmcia_state.mfc_pfc = mfc; |
@@ -405,7 +287,7 @@ static int pcmcia_device_probe(struct device * dev) | |||
405 | */ | 287 | */ |
406 | did = dev_get_drvdata(&p_dev->dev); | 288 | did = dev_get_drvdata(&p_dev->dev); |
407 | 289 | ||
408 | ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name); | 290 | dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name); |
409 | 291 | ||
410 | if ((!p_drv->probe) || (!p_dev->function_config) || | 292 | if ((!p_drv->probe) || (!p_dev->function_config) || |
411 | (!try_module_get(p_drv->owner))) { | 293 | (!try_module_get(p_drv->owner))) { |
@@ -428,7 +310,7 @@ static int pcmcia_device_probe(struct device * dev) | |||
428 | 310 | ||
429 | ret = p_drv->probe(p_dev); | 311 | ret = p_drv->probe(p_dev); |
430 | if (ret) { | 312 | if (ret) { |
431 | ds_dev_dbg(1, dev, "binding to %s failed with %d\n", | 313 | dev_dbg(dev, "binding to %s failed with %d\n", |
432 | p_drv->drv.name, ret); | 314 | p_drv->drv.name, ret); |
433 | goto put_module; | 315 | goto put_module; |
434 | } | 316 | } |
@@ -456,7 +338,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le | |||
456 | struct pcmcia_device *tmp; | 338 | struct pcmcia_device *tmp; |
457 | unsigned long flags; | 339 | unsigned long flags; |
458 | 340 | ||
459 | ds_dev_dbg(2, leftover ? &leftover->dev : &s->dev, | 341 | dev_dbg(leftover ? &leftover->dev : &s->dev, |
460 | "pcmcia_card_remove(%d) %s\n", s->sock, | 342 | "pcmcia_card_remove(%d) %s\n", s->sock, |
461 | leftover ? leftover->devname : ""); | 343 | leftover ? leftover->devname : ""); |
462 | 344 | ||
@@ -475,7 +357,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le | |||
475 | p_dev->_removed=1; | 357 | p_dev->_removed=1; |
476 | spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); | 358 | spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); |
477 | 359 | ||
478 | ds_dev_dbg(2, &p_dev->dev, "unregistering device\n"); | 360 | dev_dbg(&p_dev->dev, "unregistering device\n"); |
479 | device_unregister(&p_dev->dev); | 361 | device_unregister(&p_dev->dev); |
480 | } | 362 | } |
481 | 363 | ||
@@ -492,7 +374,7 @@ static int pcmcia_device_remove(struct device * dev) | |||
492 | p_dev = to_pcmcia_dev(dev); | 374 | p_dev = to_pcmcia_dev(dev); |
493 | p_drv = to_pcmcia_drv(dev->driver); | 375 | p_drv = to_pcmcia_drv(dev->driver); |
494 | 376 | ||
495 | ds_dev_dbg(1, dev, "removing device\n"); | 377 | dev_dbg(dev, "removing device\n"); |
496 | 378 | ||
497 | /* If we're removing the primary module driving a | 379 | /* If we're removing the primary module driving a |
498 | * pseudo multi-function card, we need to unbind | 380 | * pseudo multi-function card, we need to unbind |
@@ -572,7 +454,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev) | |||
572 | } | 454 | } |
573 | if (!pccard_read_tuple(p_dev->socket, p_dev->func, | 455 | if (!pccard_read_tuple(p_dev->socket, p_dev->func, |
574 | CISTPL_DEVICE_GEO, devgeo)) { | 456 | CISTPL_DEVICE_GEO, devgeo)) { |
575 | ds_dev_dbg(0, &p_dev->dev, | 457 | dev_dbg(&p_dev->dev, |
576 | "mem device geometry probably means " | 458 | "mem device geometry probably means " |
577 | "FUNCID_MEMORY\n"); | 459 | "FUNCID_MEMORY\n"); |
578 | p_dev->func_id = CISTPL_FUNCID_MEMORY; | 460 | p_dev->func_id = CISTPL_FUNCID_MEMORY; |
@@ -628,7 +510,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f | |||
628 | 510 | ||
629 | mutex_lock(&device_add_lock); | 511 | mutex_lock(&device_add_lock); |
630 | 512 | ||
631 | ds_dbg(3, "adding device to %d, function %d\n", s->sock, function); | 513 | pr_debug("adding device to %d, function %d\n", s->sock, function); |
632 | 514 | ||
633 | /* max of 4 devices per card */ | 515 | /* max of 4 devices per card */ |
634 | if (s->device_count == 4) | 516 | if (s->device_count == 4) |
@@ -654,7 +536,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f | |||
654 | p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); | 536 | p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); |
655 | if (!p_dev->devname) | 537 | if (!p_dev->devname) |
656 | goto err_free; | 538 | goto err_free; |
657 | ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname); | 539 | dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname); |
658 | 540 | ||
659 | spin_lock_irqsave(&pcmcia_dev_list_lock, flags); | 541 | spin_lock_irqsave(&pcmcia_dev_list_lock, flags); |
660 | 542 | ||
@@ -677,7 +559,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f | |||
677 | spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); | 559 | spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); |
678 | 560 | ||
679 | if (!p_dev->function_config) { | 561 | if (!p_dev->function_config) { |
680 | ds_dev_dbg(3, &p_dev->dev, "creating config_t\n"); | 562 | dev_dbg(&p_dev->dev, "creating config_t\n"); |
681 | p_dev->function_config = kzalloc(sizeof(struct config_t), | 563 | p_dev->function_config = kzalloc(sizeof(struct config_t), |
682 | GFP_KERNEL); | 564 | GFP_KERNEL); |
683 | if (!p_dev->function_config) | 565 | if (!p_dev->function_config) |
@@ -722,20 +604,20 @@ static int pcmcia_card_add(struct pcmcia_socket *s) | |||
722 | int ret = 0; | 604 | int ret = 0; |
723 | 605 | ||
724 | if (!(s->resource_setup_done)) { | 606 | if (!(s->resource_setup_done)) { |
725 | ds_dev_dbg(3, &s->dev, | 607 | dev_dbg(&s->dev, |
726 | "no resources available, delaying card_add\n"); | 608 | "no resources available, delaying card_add\n"); |
727 | return -EAGAIN; /* try again, but later... */ | 609 | return -EAGAIN; /* try again, but later... */ |
728 | } | 610 | } |
729 | 611 | ||
730 | if (pcmcia_validate_mem(s)) { | 612 | if (pcmcia_validate_mem(s)) { |
731 | ds_dev_dbg(3, &s->dev, "validating mem resources failed, " | 613 | dev_dbg(&s->dev, "validating mem resources failed, " |
732 | "delaying card_add\n"); | 614 | "delaying card_add\n"); |
733 | return -EAGAIN; /* try again, but later... */ | 615 | return -EAGAIN; /* try again, but later... */ |
734 | } | 616 | } |
735 | 617 | ||
736 | ret = pccard_validate_cis(s, &no_chains); | 618 | ret = pccard_validate_cis(s, &no_chains); |
737 | if (ret || !no_chains) { | 619 | if (ret || !no_chains) { |
738 | ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n"); | 620 | dev_dbg(&s->dev, "invalid CIS or invalid resources\n"); |
739 | return -ENODEV; | 621 | return -ENODEV; |
740 | } | 622 | } |
741 | 623 | ||
@@ -756,7 +638,7 @@ static void pcmcia_delayed_add_device(struct work_struct *work) | |||
756 | { | 638 | { |
757 | struct pcmcia_socket *s = | 639 | struct pcmcia_socket *s = |
758 | container_of(work, struct pcmcia_socket, device_add); | 640 | container_of(work, struct pcmcia_socket, device_add); |
759 | ds_dev_dbg(1, &s->dev, "adding additional device to %d\n", s->sock); | 641 | dev_dbg(&s->dev, "adding additional device to %d\n", s->sock); |
760 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); | 642 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); |
761 | s->pcmcia_state.device_add_pending = 0; | 643 | s->pcmcia_state.device_add_pending = 0; |
762 | s->pcmcia_state.mfc_pfc = 0; | 644 | s->pcmcia_state.mfc_pfc = 0; |
@@ -766,7 +648,7 @@ static int pcmcia_requery(struct device *dev, void * _data) | |||
766 | { | 648 | { |
767 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); | 649 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); |
768 | if (!p_dev->dev.driver) { | 650 | if (!p_dev->dev.driver) { |
769 | ds_dev_dbg(1, dev, "update device information\n"); | 651 | dev_dbg(dev, "update device information\n"); |
770 | pcmcia_device_query(p_dev); | 652 | pcmcia_device_query(p_dev); |
771 | } | 653 | } |
772 | 654 | ||
@@ -780,7 +662,7 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis) | |||
780 | unsigned long flags; | 662 | unsigned long flags; |
781 | 663 | ||
782 | /* must be called with skt_mutex held */ | 664 | /* must be called with skt_mutex held */ |
783 | ds_dev_dbg(0, &skt->dev, "re-scanning socket %d\n", skt->sock); | 665 | dev_dbg(&skt->dev, "re-scanning socket %d\n", skt->sock); |
784 | 666 | ||
785 | spin_lock_irqsave(&pcmcia_dev_list_lock, flags); | 667 | spin_lock_irqsave(&pcmcia_dev_list_lock, flags); |
786 | if (list_empty(&skt->devices_list)) | 668 | if (list_empty(&skt->devices_list)) |
@@ -835,7 +717,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
835 | if (!filename) | 717 | if (!filename) |
836 | return -EINVAL; | 718 | return -EINVAL; |
837 | 719 | ||
838 | ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename); | 720 | dev_dbg(&dev->dev, "trying to load CIS file %s\n", filename); |
839 | 721 | ||
840 | if (request_firmware(&fw, filename, &dev->dev) == 0) { | 722 | if (request_firmware(&fw, filename, &dev->dev) == 0) { |
841 | if (fw->size >= CISTPL_MAX_CIS_SIZE) { | 723 | if (fw->size >= CISTPL_MAX_CIS_SIZE) { |
@@ -953,14 +835,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
953 | * after it has re-checked that there is no possible module | 835 | * after it has re-checked that there is no possible module |
954 | * with a prod_id/manf_id/card_id match. | 836 | * with a prod_id/manf_id/card_id match. |
955 | */ | 837 | */ |
956 | ds_dev_dbg(0, &dev->dev, | 838 | dev_dbg(&dev->dev, |
957 | "skipping FUNC_ID match until userspace interaction\n"); | 839 | "skipping FUNC_ID match until userspace interaction\n"); |
958 | if (!dev->allow_func_id_match) | 840 | if (!dev->allow_func_id_match) |
959 | return 0; | 841 | return 0; |
960 | } | 842 | } |
961 | 843 | ||
962 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { | 844 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { |
963 | ds_dev_dbg(0, &dev->dev, "device needs a fake CIS\n"); | 845 | dev_dbg(&dev->dev, "device needs a fake CIS\n"); |
964 | if (!dev->socket->fake_cis) | 846 | if (!dev->socket->fake_cis) |
965 | pcmcia_load_firmware(dev, did->cisfile); | 847 | pcmcia_load_firmware(dev, did->cisfile); |
966 | 848 | ||
@@ -992,9 +874,9 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) { | |||
992 | /* match dynamic devices first */ | 874 | /* match dynamic devices first */ |
993 | spin_lock(&p_drv->dynids.lock); | 875 | spin_lock(&p_drv->dynids.lock); |
994 | list_for_each_entry(dynid, &p_drv->dynids.list, node) { | 876 | list_for_each_entry(dynid, &p_drv->dynids.list, node) { |
995 | ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); | 877 | dev_dbg(dev, "trying to match to %s\n", drv->name); |
996 | if (pcmcia_devmatch(p_dev, &dynid->id)) { | 878 | if (pcmcia_devmatch(p_dev, &dynid->id)) { |
997 | ds_dev_dbg(0, dev, "matched to %s\n", drv->name); | 879 | dev_dbg(dev, "matched to %s\n", drv->name); |
998 | spin_unlock(&p_drv->dynids.lock); | 880 | spin_unlock(&p_drv->dynids.lock); |
999 | return 1; | 881 | return 1; |
1000 | } | 882 | } |
@@ -1004,15 +886,15 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) { | |||
1004 | #ifdef CONFIG_PCMCIA_IOCTL | 886 | #ifdef CONFIG_PCMCIA_IOCTL |
1005 | /* matching by cardmgr */ | 887 | /* matching by cardmgr */ |
1006 | if (p_dev->cardmgr == p_drv) { | 888 | if (p_dev->cardmgr == p_drv) { |
1007 | ds_dev_dbg(0, dev, "cardmgr matched to %s\n", drv->name); | 889 | dev_dbg(dev, "cardmgr matched to %s\n", drv->name); |
1008 | return 1; | 890 | return 1; |
1009 | } | 891 | } |
1010 | #endif | 892 | #endif |
1011 | 893 | ||
1012 | while (did && did->match_flags) { | 894 | while (did && did->match_flags) { |
1013 | ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); | 895 | dev_dbg(dev, "trying to match to %s\n", drv->name); |
1014 | if (pcmcia_devmatch(p_dev, did)) { | 896 | if (pcmcia_devmatch(p_dev, did)) { |
1015 | ds_dev_dbg(0, dev, "matched to %s\n", drv->name); | 897 | dev_dbg(dev, "matched to %s\n", drv->name); |
1016 | return 1; | 898 | return 1; |
1017 | } | 899 | } |
1018 | did++; | 900 | did++; |
@@ -1218,7 +1100,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state) | |||
1218 | if (p_dev->suspended) | 1100 | if (p_dev->suspended) |
1219 | return 0; | 1101 | return 0; |
1220 | 1102 | ||
1221 | ds_dev_dbg(2, dev, "suspending\n"); | 1103 | dev_dbg(dev, "suspending\n"); |
1222 | 1104 | ||
1223 | if (dev->driver) | 1105 | if (dev->driver) |
1224 | p_drv = to_pcmcia_drv(dev->driver); | 1106 | p_drv = to_pcmcia_drv(dev->driver); |
@@ -1238,7 +1120,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state) | |||
1238 | } | 1120 | } |
1239 | 1121 | ||
1240 | if (p_dev->device_no == p_dev->func) { | 1122 | if (p_dev->device_no == p_dev->func) { |
1241 | ds_dev_dbg(2, dev, "releasing configuration\n"); | 1123 | dev_dbg(dev, "releasing configuration\n"); |
1242 | pcmcia_release_configuration(p_dev); | 1124 | pcmcia_release_configuration(p_dev); |
1243 | } | 1125 | } |
1244 | 1126 | ||
@@ -1258,7 +1140,7 @@ static int pcmcia_dev_resume(struct device * dev) | |||
1258 | if (!p_dev->suspended) | 1140 | if (!p_dev->suspended) |
1259 | return 0; | 1141 | return 0; |
1260 | 1142 | ||
1261 | ds_dev_dbg(2, dev, "resuming\n"); | 1143 | dev_dbg(dev, "resuming\n"); |
1262 | 1144 | ||
1263 | if (dev->driver) | 1145 | if (dev->driver) |
1264 | p_drv = to_pcmcia_drv(dev->driver); | 1146 | p_drv = to_pcmcia_drv(dev->driver); |
@@ -1267,7 +1149,7 @@ static int pcmcia_dev_resume(struct device * dev) | |||
1267 | goto out; | 1149 | goto out; |
1268 | 1150 | ||
1269 | if (p_dev->device_no == p_dev->func) { | 1151 | if (p_dev->device_no == p_dev->func) { |
1270 | ds_dev_dbg(2, dev, "requesting configuration\n"); | 1152 | dev_dbg(dev, "requesting configuration\n"); |
1271 | ret = pcmcia_request_configuration(p_dev, &p_dev->conf); | 1153 | ret = pcmcia_request_configuration(p_dev, &p_dev->conf); |
1272 | if (ret) | 1154 | if (ret) |
1273 | goto out; | 1155 | goto out; |
@@ -1309,14 +1191,14 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data) | |||
1309 | 1191 | ||
1310 | static int pcmcia_bus_resume(struct pcmcia_socket *skt) | 1192 | static int pcmcia_bus_resume(struct pcmcia_socket *skt) |
1311 | { | 1193 | { |
1312 | ds_dev_dbg(2, &skt->dev, "resuming socket %d\n", skt->sock); | 1194 | dev_dbg(&skt->dev, "resuming socket %d\n", skt->sock); |
1313 | bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); | 1195 | bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); |
1314 | return 0; | 1196 | return 0; |
1315 | } | 1197 | } |
1316 | 1198 | ||
1317 | static int pcmcia_bus_suspend(struct pcmcia_socket *skt) | 1199 | static int pcmcia_bus_suspend(struct pcmcia_socket *skt) |
1318 | { | 1200 | { |
1319 | ds_dev_dbg(2, &skt->dev, "suspending socket %d\n", skt->sock); | 1201 | dev_dbg(&skt->dev, "suspending socket %d\n", skt->sock); |
1320 | if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, | 1202 | if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, |
1321 | pcmcia_bus_suspend_callback)) { | 1203 | pcmcia_bus_suspend_callback)) { |
1322 | pcmcia_bus_resume(skt); | 1204 | pcmcia_bus_resume(skt); |
@@ -1348,7 +1230,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1348 | return -ENODEV; | 1230 | return -ENODEV; |
1349 | } | 1231 | } |
1350 | 1232 | ||
1351 | ds_dev_dbg(1, &skt->dev, "ds_event(0x%06x, %d, 0x%p)\n", | 1233 | dev_dbg(&skt->dev, "ds_event(0x%06x, %d, 0x%p)\n", |
1352 | event, priority, skt); | 1234 | event, priority, skt); |
1353 | 1235 | ||
1354 | switch (event) { | 1236 | switch (event) { |
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index a4aacb830b80..c13fd9360511 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c | |||
@@ -63,21 +63,6 @@ | |||
63 | #include "vg468.h" | 63 | #include "vg468.h" |
64 | #include "ricoh.h" | 64 | #include "ricoh.h" |
65 | 65 | ||
66 | #ifdef CONFIG_PCMCIA_DEBUG | ||
67 | static const char version[] = | ||
68 | "i82365.c 1.265 1999/11/10 18:36:21 (David Hinds)"; | ||
69 | |||
70 | static int pc_debug; | ||
71 | |||
72 | module_param(pc_debug, int, 0644); | ||
73 | |||
74 | #define debug(lvl, fmt, arg...) do { \ | ||
75 | if (pc_debug > (lvl)) \ | ||
76 | printk(KERN_DEBUG "i82365: " fmt , ## arg); \ | ||
77 | } while (0) | ||
78 | #else | ||
79 | #define debug(lvl, fmt, arg...) do { } while (0) | ||
80 | #endif | ||
81 | 66 | ||
82 | static irqreturn_t i365_count_irq(int, void *); | 67 | static irqreturn_t i365_count_irq(int, void *); |
83 | static inline int _check_irq(int irq, int flags) | 68 | static inline int _check_irq(int irq, int flags) |
@@ -501,13 +486,13 @@ static irqreturn_t i365_count_irq(int irq, void *dev) | |||
501 | { | 486 | { |
502 | i365_get(irq_sock, I365_CSC); | 487 | i365_get(irq_sock, I365_CSC); |
503 | irq_hits++; | 488 | irq_hits++; |
504 | debug(2, "-> hit on irq %d\n", irq); | 489 | pr_debug("i82365: -> hit on irq %d\n", irq); |
505 | return IRQ_HANDLED; | 490 | return IRQ_HANDLED; |
506 | } | 491 | } |
507 | 492 | ||
508 | static u_int __init test_irq(u_short sock, int irq) | 493 | static u_int __init test_irq(u_short sock, int irq) |
509 | { | 494 | { |
510 | debug(2, " testing ISA irq %d\n", irq); | 495 | pr_debug("i82365: testing ISA irq %d\n", irq); |
511 | if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan", | 496 | if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan", |
512 | i365_count_irq) != 0) | 497 | i365_count_irq) != 0) |
513 | return 1; | 498 | return 1; |
@@ -515,7 +500,7 @@ static u_int __init test_irq(u_short sock, int irq) | |||
515 | msleep(10); | 500 | msleep(10); |
516 | if (irq_hits) { | 501 | if (irq_hits) { |
517 | free_irq(irq, i365_count_irq); | 502 | free_irq(irq, i365_count_irq); |
518 | debug(2, " spurious hit!\n"); | 503 | pr_debug("i82365: spurious hit!\n"); |
519 | return 1; | 504 | return 1; |
520 | } | 505 | } |
521 | 506 | ||
@@ -528,7 +513,7 @@ static u_int __init test_irq(u_short sock, int irq) | |||
528 | 513 | ||
529 | /* mask all interrupts */ | 514 | /* mask all interrupts */ |
530 | i365_set(sock, I365_CSCINT, 0); | 515 | i365_set(sock, I365_CSCINT, 0); |
531 | debug(2, " hits = %d\n", irq_hits); | 516 | pr_debug("i82365: hits = %d\n", irq_hits); |
532 | 517 | ||
533 | return (irq_hits != 1); | 518 | return (irq_hits != 1); |
534 | } | 519 | } |
@@ -854,7 +839,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev) | |||
854 | u_long flags = 0; | 839 | u_long flags = 0; |
855 | int handled = 0; | 840 | int handled = 0; |
856 | 841 | ||
857 | debug(4, "pcic_interrupt(%d)\n", irq); | 842 | pr_debug("pcic_interrupt(%d)\n", irq); |
858 | 843 | ||
859 | for (j = 0; j < 20; j++) { | 844 | for (j = 0; j < 20; j++) { |
860 | active = 0; | 845 | active = 0; |
@@ -878,7 +863,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev) | |||
878 | events |= (csc & I365_CSC_READY) ? SS_READY : 0; | 863 | events |= (csc & I365_CSC_READY) ? SS_READY : 0; |
879 | } | 864 | } |
880 | ISA_UNLOCK(i, flags); | 865 | ISA_UNLOCK(i, flags); |
881 | debug(2, "socket %d event 0x%02x\n", i, events); | 866 | pr_debug("socket %d event 0x%02x\n", i, events); |
882 | 867 | ||
883 | if (events) | 868 | if (events) |
884 | pcmcia_parse_events(&socket[i].socket, events); | 869 | pcmcia_parse_events(&socket[i].socket, events); |
@@ -890,7 +875,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev) | |||
890 | if (j == 20) | 875 | if (j == 20) |
891 | printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n"); | 876 | printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n"); |
892 | 877 | ||
893 | debug(4, "interrupt done\n"); | 878 | pr_debug("pcic_interrupt done\n"); |
894 | return IRQ_RETVAL(handled); | 879 | return IRQ_RETVAL(handled); |
895 | } /* pcic_interrupt */ | 880 | } /* pcic_interrupt */ |
896 | 881 | ||
@@ -932,7 +917,7 @@ static int i365_get_status(u_short sock, u_int *value) | |||
932 | } | 917 | } |
933 | } | 918 | } |
934 | 919 | ||
935 | debug(1, "GetStatus(%d) = %#4.4x\n", sock, *value); | 920 | pr_debug("GetStatus(%d) = %#4.4x\n", sock, *value); |
936 | return 0; | 921 | return 0; |
937 | } /* i365_get_status */ | 922 | } /* i365_get_status */ |
938 | 923 | ||
@@ -943,7 +928,7 @@ static int i365_set_socket(u_short sock, socket_state_t *state) | |||
943 | struct i82365_socket *t = &socket[sock]; | 928 | struct i82365_socket *t = &socket[sock]; |
944 | u_char reg; | 929 | u_char reg; |
945 | 930 | ||
946 | debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " | 931 | pr_debug("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " |
947 | "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, | 932 | "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, |
948 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); | 933 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); |
949 | 934 | ||
@@ -1052,7 +1037,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io) | |||
1052 | { | 1037 | { |
1053 | u_char map, ioctl; | 1038 | u_char map, ioctl; |
1054 | 1039 | ||
1055 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " | 1040 | pr_debug("SetIOMap(%d, %d, %#2.2x, %d ns, " |
1056 | "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, | 1041 | "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, |
1057 | (unsigned long long)io->start, (unsigned long long)io->stop); | 1042 | (unsigned long long)io->start, (unsigned long long)io->stop); |
1058 | map = io->map; | 1043 | map = io->map; |
@@ -1082,7 +1067,7 @@ static int i365_set_mem_map(u_short sock, struct pccard_mem_map *mem) | |||
1082 | u_short base, i; | 1067 | u_short base, i; |
1083 | u_char map; | 1068 | u_char map; |
1084 | 1069 | ||
1085 | debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, " | 1070 | pr_debug("SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, " |
1086 | "%#x)\n", sock, mem->map, mem->flags, mem->speed, | 1071 | "%#x)\n", sock, mem->map, mem->flags, mem->speed, |
1087 | (unsigned long long)mem->res->start, | 1072 | (unsigned long long)mem->res->start, |
1088 | (unsigned long long)mem->res->end, mem->card_start); | 1073 | (unsigned long long)mem->res->end, mem->card_start); |
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 7dfbee1dcd76..26a621c9e2fc 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c | |||
@@ -38,17 +38,6 @@ | |||
38 | 38 | ||
39 | #include "m32r_cfc.h" | 39 | #include "m32r_cfc.h" |
40 | 40 | ||
41 | #ifdef CONFIG_PCMCIA_DEBUG | ||
42 | static int m32r_cfc_debug; | ||
43 | module_param(m32r_cfc_debug, int, 0644); | ||
44 | #define debug(lvl, fmt, arg...) do { \ | ||
45 | if (m32r_cfc_debug > (lvl)) \ | ||
46 | printk(KERN_DEBUG "m32r_cfc: " fmt , ## arg); \ | ||
47 | } while (0) | ||
48 | #else | ||
49 | #define debug(n, args...) do { } while (0) | ||
50 | #endif | ||
51 | |||
52 | /* Poll status interval -- 0 means default to interrupt */ | 41 | /* Poll status interval -- 0 means default to interrupt */ |
53 | static int poll_interval = 0; | 42 | static int poll_interval = 0; |
54 | 43 | ||
@@ -123,7 +112,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size, | |||
123 | unsigned char *bp = (unsigned char *)buf; | 112 | unsigned char *bp = (unsigned char *)buf; |
124 | unsigned long flags; | 113 | unsigned long flags; |
125 | 114 | ||
126 | debug(3, "m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, " | 115 | pr_debug("m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, " |
127 | "size=%u, nmemb=%d, flag=%d\n", | 116 | "size=%u, nmemb=%d, flag=%d\n", |
128 | sock, port, buf, size, nmemb, flag); | 117 | sock, port, buf, size, nmemb, flag); |
129 | 118 | ||
@@ -132,7 +121,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size, | |||
132 | printk("m32r_cfc:ioread_byte null port :%#lx\n",port); | 121 | printk("m32r_cfc:ioread_byte null port :%#lx\n",port); |
133 | return; | 122 | return; |
134 | } | 123 | } |
135 | debug(3, "m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr); | 124 | pr_debug("m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr); |
136 | 125 | ||
137 | spin_lock_irqsave(&pcc_lock, flags); | 126 | spin_lock_irqsave(&pcc_lock, flags); |
138 | /* read Byte */ | 127 | /* read Byte */ |
@@ -148,7 +137,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size, | |||
148 | unsigned short *bp = (unsigned short *)buf; | 137 | unsigned short *bp = (unsigned short *)buf; |
149 | unsigned long flags; | 138 | unsigned long flags; |
150 | 139 | ||
151 | debug(3, "m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, " | 140 | pr_debug("m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, " |
152 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", | 141 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", |
153 | sock, port, buf, size, nmemb, flag); | 142 | sock, port, buf, size, nmemb, flag); |
154 | 143 | ||
@@ -163,7 +152,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size, | |||
163 | printk("m32r_cfc:ioread_word null port :%#lx\n",port); | 152 | printk("m32r_cfc:ioread_word null port :%#lx\n",port); |
164 | return; | 153 | return; |
165 | } | 154 | } |
166 | debug(3, "m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr); | 155 | pr_debug("m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr); |
167 | 156 | ||
168 | spin_lock_irqsave(&pcc_lock, flags); | 157 | spin_lock_irqsave(&pcc_lock, flags); |
169 | /* read Word */ | 158 | /* read Word */ |
@@ -179,7 +168,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size, | |||
179 | unsigned char *bp = (unsigned char *)buf; | 168 | unsigned char *bp = (unsigned char *)buf; |
180 | unsigned long flags; | 169 | unsigned long flags; |
181 | 170 | ||
182 | debug(3, "m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, " | 171 | pr_debug("m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, " |
183 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", | 172 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", |
184 | sock, port, buf, size, nmemb, flag); | 173 | sock, port, buf, size, nmemb, flag); |
185 | 174 | ||
@@ -189,7 +178,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size, | |||
189 | printk("m32r_cfc:iowrite_byte null port:%#lx\n",port); | 178 | printk("m32r_cfc:iowrite_byte null port:%#lx\n",port); |
190 | return; | 179 | return; |
191 | } | 180 | } |
192 | debug(3, "m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr); | 181 | pr_debug("m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr); |
193 | 182 | ||
194 | spin_lock_irqsave(&pcc_lock, flags); | 183 | spin_lock_irqsave(&pcc_lock, flags); |
195 | while (nmemb--) | 184 | while (nmemb--) |
@@ -204,7 +193,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size, | |||
204 | unsigned short *bp = (unsigned short *)buf; | 193 | unsigned short *bp = (unsigned short *)buf; |
205 | unsigned long flags; | 194 | unsigned long flags; |
206 | 195 | ||
207 | debug(3, "m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, " | 196 | pr_debug("m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, " |
208 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", | 197 | "buf=%p, size=%u, nmemb=%d, flag=%d\n", |
209 | sock, port, buf, size, nmemb, flag); | 198 | sock, port, buf, size, nmemb, flag); |
210 | 199 | ||
@@ -226,7 +215,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size, | |||
226 | return; | 215 | return; |
227 | } | 216 | } |
228 | #endif | 217 | #endif |
229 | debug(3, "m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr); | 218 | pr_debug("m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr); |
230 | 219 | ||
231 | spin_lock_irqsave(&pcc_lock, flags); | 220 | spin_lock_irqsave(&pcc_lock, flags); |
232 | while (nmemb--) | 221 | while (nmemb--) |
@@ -262,7 +251,7 @@ static struct timer_list poll_timer; | |||
262 | static unsigned int pcc_get(u_short sock, unsigned int reg) | 251 | static unsigned int pcc_get(u_short sock, unsigned int reg) |
263 | { | 252 | { |
264 | unsigned int val = inw(reg); | 253 | unsigned int val = inw(reg); |
265 | debug(3, "m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val); | 254 | pr_debug("m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val); |
266 | return val; | 255 | return val; |
267 | } | 256 | } |
268 | 257 | ||
@@ -270,7 +259,7 @@ static unsigned int pcc_get(u_short sock, unsigned int reg) | |||
270 | static void pcc_set(u_short sock, unsigned int reg, unsigned int data) | 259 | static void pcc_set(u_short sock, unsigned int reg, unsigned int data) |
271 | { | 260 | { |
272 | outw(data, reg); | 261 | outw(data, reg); |
273 | debug(3, "m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data); | 262 | pr_debug("m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data); |
274 | } | 263 | } |
275 | 264 | ||
276 | /*====================================================================== | 265 | /*====================================================================== |
@@ -286,14 +275,14 @@ static int __init is_alive(u_short sock) | |||
286 | { | 275 | { |
287 | unsigned int stat; | 276 | unsigned int stat; |
288 | 277 | ||
289 | debug(3, "m32r_cfc: is_alive:\n"); | 278 | pr_debug("m32r_cfc: is_alive:\n"); |
290 | 279 | ||
291 | printk("CF: "); | 280 | printk("CF: "); |
292 | stat = pcc_get(sock, (unsigned int)PLD_CFSTS); | 281 | stat = pcc_get(sock, (unsigned int)PLD_CFSTS); |
293 | if (!stat) | 282 | if (!stat) |
294 | printk("No "); | 283 | printk("No "); |
295 | printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat); | 284 | printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat); |
296 | debug(3, "m32r_cfc: is_alive: sock stat is 0x%04x\n", stat); | 285 | pr_debug("m32r_cfc: is_alive: sock stat is 0x%04x\n", stat); |
297 | 286 | ||
298 | return 0; | 287 | return 0; |
299 | } | 288 | } |
@@ -303,7 +292,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr, | |||
303 | { | 292 | { |
304 | pcc_socket_t *t = &socket[pcc_sockets]; | 293 | pcc_socket_t *t = &socket[pcc_sockets]; |
305 | 294 | ||
306 | debug(3, "m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, " | 295 | pr_debug("m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, " |
307 | "mapaddr=%#lx, ioaddr=%08x\n", | 296 | "mapaddr=%#lx, ioaddr=%08x\n", |
308 | base, irq, mapaddr, ioaddr); | 297 | base, irq, mapaddr, ioaddr); |
309 | 298 | ||
@@ -358,7 +347,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr, | |||
358 | /* eject interrupt */ | 347 | /* eject interrupt */ |
359 | request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); | 348 | request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); |
360 | #endif | 349 | #endif |
361 | debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n"); | 350 | pr_debug("m32r_cfc: enable CFMSK, RDYSEL\n"); |
362 | pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); | 351 | pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); |
363 | #endif /* CONFIG_PLAT_USRV */ | 352 | #endif /* CONFIG_PLAT_USRV */ |
364 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) | 353 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) |
@@ -378,26 +367,26 @@ static irqreturn_t pcc_interrupt(int irq, void *dev) | |||
378 | u_int events = 0; | 367 | u_int events = 0; |
379 | int handled = 0; | 368 | int handled = 0; |
380 | 369 | ||
381 | debug(3, "m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev); | 370 | pr_debug("m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev); |
382 | for (i = 0; i < pcc_sockets; i++) { | 371 | for (i = 0; i < pcc_sockets; i++) { |
383 | if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq) | 372 | if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq) |
384 | continue; | 373 | continue; |
385 | 374 | ||
386 | handled = 1; | 375 | handled = 1; |
387 | debug(3, "m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ", | 376 | pr_debug("m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ", |
388 | i, irq); | 377 | i, irq); |
389 | events |= SS_DETECT; /* insert or eject */ | 378 | events |= SS_DETECT; /* insert or eject */ |
390 | if (events) | 379 | if (events) |
391 | pcmcia_parse_events(&socket[i].socket, events); | 380 | pcmcia_parse_events(&socket[i].socket, events); |
392 | } | 381 | } |
393 | debug(3, "m32r_cfc: pcc_interrupt: done\n"); | 382 | pr_debug("m32r_cfc: pcc_interrupt: done\n"); |
394 | 383 | ||
395 | return IRQ_RETVAL(handled); | 384 | return IRQ_RETVAL(handled); |
396 | } /* pcc_interrupt */ | 385 | } /* pcc_interrupt */ |
397 | 386 | ||
398 | static void pcc_interrupt_wrapper(u_long data) | 387 | static void pcc_interrupt_wrapper(u_long data) |
399 | { | 388 | { |
400 | debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n"); | 389 | pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n"); |
401 | pcc_interrupt(0, NULL); | 390 | pcc_interrupt(0, NULL); |
402 | init_timer(&poll_timer); | 391 | init_timer(&poll_timer); |
403 | poll_timer.expires = jiffies + poll_interval; | 392 | poll_timer.expires = jiffies + poll_interval; |
@@ -410,17 +399,17 @@ static int _pcc_get_status(u_short sock, u_int *value) | |||
410 | { | 399 | { |
411 | u_int status; | 400 | u_int status; |
412 | 401 | ||
413 | debug(3, "m32r_cfc: _pcc_get_status:\n"); | 402 | pr_debug("m32r_cfc: _pcc_get_status:\n"); |
414 | status = pcc_get(sock, (unsigned int)PLD_CFSTS); | 403 | status = pcc_get(sock, (unsigned int)PLD_CFSTS); |
415 | *value = (status) ? SS_DETECT : 0; | 404 | *value = (status) ? SS_DETECT : 0; |
416 | debug(3, "m32r_cfc: _pcc_get_status: status=0x%08x\n", status); | 405 | pr_debug("m32r_cfc: _pcc_get_status: status=0x%08x\n", status); |
417 | 406 | ||
418 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) | 407 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) |
419 | if ( status ) { | 408 | if ( status ) { |
420 | /* enable CF power */ | 409 | /* enable CF power */ |
421 | status = inw((unsigned int)PLD_CPCR); | 410 | status = inw((unsigned int)PLD_CPCR); |
422 | if (!(status & PLD_CPCR_CF)) { | 411 | if (!(status & PLD_CPCR_CF)) { |
423 | debug(3, "m32r_cfc: _pcc_get_status: " | 412 | pr_debug("m32r_cfc: _pcc_get_status: " |
424 | "power on (CPCR=0x%08x)\n", status); | 413 | "power on (CPCR=0x%08x)\n", status); |
425 | status |= PLD_CPCR_CF; | 414 | status |= PLD_CPCR_CF; |
426 | outw(status, (unsigned int)PLD_CPCR); | 415 | outw(status, (unsigned int)PLD_CPCR); |
@@ -439,7 +428,7 @@ static int _pcc_get_status(u_short sock, u_int *value) | |||
439 | status &= ~PLD_CPCR_CF; | 428 | status &= ~PLD_CPCR_CF; |
440 | outw(status, (unsigned int)PLD_CPCR); | 429 | outw(status, (unsigned int)PLD_CPCR); |
441 | udelay(100); | 430 | udelay(100); |
442 | debug(3, "m32r_cfc: _pcc_get_status: " | 431 | pr_debug("m32r_cfc: _pcc_get_status: " |
443 | "power off (CPCR=0x%08x)\n", status); | 432 | "power off (CPCR=0x%08x)\n", status); |
444 | } | 433 | } |
445 | #elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) | 434 | #elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) |
@@ -465,13 +454,13 @@ static int _pcc_get_status(u_short sock, u_int *value) | |||
465 | /* disable CF power */ | 454 | /* disable CF power */ |
466 | pcc_set(sock, (unsigned int)PLD_CPCR, 0); | 455 | pcc_set(sock, (unsigned int)PLD_CPCR, 0); |
467 | udelay(100); | 456 | udelay(100); |
468 | debug(3, "m32r_cfc: _pcc_get_status: " | 457 | pr_debug("m32r_cfc: _pcc_get_status: " |
469 | "power off (CPCR=0x%08x)\n", status); | 458 | "power off (CPCR=0x%08x)\n", status); |
470 | } | 459 | } |
471 | #else | 460 | #else |
472 | #error no platform configuration | 461 | #error no platform configuration |
473 | #endif | 462 | #endif |
474 | debug(3, "m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n", | 463 | pr_debug("m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n", |
475 | sock, *value); | 464 | sock, *value); |
476 | return 0; | 465 | return 0; |
477 | } /* _get_status */ | 466 | } /* _get_status */ |
@@ -480,7 +469,7 @@ static int _pcc_get_status(u_short sock, u_int *value) | |||
480 | 469 | ||
481 | static int _pcc_set_socket(u_short sock, socket_state_t *state) | 470 | static int _pcc_set_socket(u_short sock, socket_state_t *state) |
482 | { | 471 | { |
483 | debug(3, "m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " | 472 | pr_debug("m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " |
484 | "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, | 473 | "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, |
485 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); | 474 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); |
486 | 475 | ||
@@ -492,41 +481,39 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state) | |||
492 | } | 481 | } |
493 | #endif | 482 | #endif |
494 | if (state->flags & SS_RESET) { | 483 | if (state->flags & SS_RESET) { |
495 | debug(3, ":RESET\n"); | 484 | pr_debug(":RESET\n"); |
496 | pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101); | 485 | pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101); |
497 | }else{ | 486 | }else{ |
498 | pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100); | 487 | pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100); |
499 | } | 488 | } |
500 | if (state->flags & SS_OUTPUT_ENA){ | 489 | if (state->flags & SS_OUTPUT_ENA){ |
501 | debug(3, ":OUTPUT_ENA\n"); | 490 | pr_debug(":OUTPUT_ENA\n"); |
502 | /* bit clear */ | 491 | /* bit clear */ |
503 | pcc_set(sock,(unsigned int)PLD_CFBUFCR,0); | 492 | pcc_set(sock,(unsigned int)PLD_CFBUFCR,0); |
504 | } else { | 493 | } else { |
505 | pcc_set(sock,(unsigned int)PLD_CFBUFCR,1); | 494 | pcc_set(sock,(unsigned int)PLD_CFBUFCR,1); |
506 | } | 495 | } |
507 | 496 | ||
508 | #ifdef CONFIG_PCMCIA_DEBUG | ||
509 | if(state->flags & SS_IOCARD){ | 497 | if(state->flags & SS_IOCARD){ |
510 | debug(3, ":IOCARD"); | 498 | pr_debug(":IOCARD"); |
511 | } | 499 | } |
512 | if (state->flags & SS_PWR_AUTO) { | 500 | if (state->flags & SS_PWR_AUTO) { |
513 | debug(3, ":PWR_AUTO"); | 501 | pr_debug(":PWR_AUTO"); |
514 | } | 502 | } |
515 | if (state->csc_mask & SS_DETECT) | 503 | if (state->csc_mask & SS_DETECT) |
516 | debug(3, ":csc-SS_DETECT"); | 504 | pr_debug(":csc-SS_DETECT"); |
517 | if (state->flags & SS_IOCARD) { | 505 | if (state->flags & SS_IOCARD) { |
518 | if (state->csc_mask & SS_STSCHG) | 506 | if (state->csc_mask & SS_STSCHG) |
519 | debug(3, ":STSCHG"); | 507 | pr_debug(":STSCHG"); |
520 | } else { | 508 | } else { |
521 | if (state->csc_mask & SS_BATDEAD) | 509 | if (state->csc_mask & SS_BATDEAD) |
522 | debug(3, ":BATDEAD"); | 510 | pr_debug(":BATDEAD"); |
523 | if (state->csc_mask & SS_BATWARN) | 511 | if (state->csc_mask & SS_BATWARN) |
524 | debug(3, ":BATWARN"); | 512 | pr_debug(":BATWARN"); |
525 | if (state->csc_mask & SS_READY) | 513 | if (state->csc_mask & SS_READY) |
526 | debug(3, ":READY"); | 514 | pr_debug(":READY"); |
527 | } | 515 | } |
528 | debug(3, "\n"); | 516 | pr_debug("\n"); |
529 | #endif | ||
530 | return 0; | 517 | return 0; |
531 | } /* _set_socket */ | 518 | } /* _set_socket */ |
532 | 519 | ||
@@ -536,7 +523,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) | |||
536 | { | 523 | { |
537 | u_char map; | 524 | u_char map; |
538 | 525 | ||
539 | debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " | 526 | pr_debug("m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " |
540 | "%#llx-%#llx)\n", sock, io->map, io->flags, | 527 | "%#llx-%#llx)\n", sock, io->map, io->flags, |
541 | io->speed, (unsigned long long)io->start, | 528 | io->speed, (unsigned long long)io->start, |
542 | (unsigned long long)io->stop); | 529 | (unsigned long long)io->stop); |
@@ -554,7 +541,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) | |||
554 | u_long addr; | 541 | u_long addr; |
555 | pcc_socket_t *t = &socket[sock]; | 542 | pcc_socket_t *t = &socket[sock]; |
556 | 543 | ||
557 | debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " | 544 | pr_debug("m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " |
558 | "%#llx, %#x)\n", sock, map, mem->flags, | 545 | "%#llx, %#x)\n", sock, map, mem->flags, |
559 | mem->speed, (unsigned long long)mem->static_start, | 546 | mem->speed, (unsigned long long)mem->static_start, |
560 | mem->card_start); | 547 | mem->card_start); |
@@ -640,11 +627,11 @@ static int pcc_get_status(struct pcmcia_socket *s, u_int *value) | |||
640 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; | 627 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; |
641 | 628 | ||
642 | if (socket[sock].flags & IS_ALIVE) { | 629 | if (socket[sock].flags & IS_ALIVE) { |
643 | debug(3, "m32r_cfc: pcc_get_status: sock(%d) -EINVAL\n", sock); | 630 | dev_dbg(&s->dev, "pcc_get_status: sock(%d) -EINVAL\n", sock); |
644 | *value = 0; | 631 | *value = 0; |
645 | return -EINVAL; | 632 | return -EINVAL; |
646 | } | 633 | } |
647 | debug(3, "m32r_cfc: pcc_get_status: sock(%d)\n", sock); | 634 | dev_dbg(&s->dev, "pcc_get_status: sock(%d)\n", sock); |
648 | LOCKED(_pcc_get_status(sock, value)); | 635 | LOCKED(_pcc_get_status(sock, value)); |
649 | } | 636 | } |
650 | 637 | ||
@@ -653,10 +640,10 @@ static int pcc_set_socket(struct pcmcia_socket *s, socket_state_t *state) | |||
653 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; | 640 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; |
654 | 641 | ||
655 | if (socket[sock].flags & IS_ALIVE) { | 642 | if (socket[sock].flags & IS_ALIVE) { |
656 | debug(3, "m32r_cfc: pcc_set_socket: sock(%d) -EINVAL\n", sock); | 643 | dev_dbg(&s->dev, "pcc_set_socket: sock(%d) -EINVAL\n", sock); |
657 | return -EINVAL; | 644 | return -EINVAL; |
658 | } | 645 | } |
659 | debug(3, "m32r_cfc: pcc_set_socket: sock(%d)\n", sock); | 646 | dev_dbg(&s->dev, "pcc_set_socket: sock(%d)\n", sock); |
660 | LOCKED(_pcc_set_socket(sock, state)); | 647 | LOCKED(_pcc_set_socket(sock, state)); |
661 | } | 648 | } |
662 | 649 | ||
@@ -665,10 +652,10 @@ static int pcc_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io) | |||
665 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; | 652 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; |
666 | 653 | ||
667 | if (socket[sock].flags & IS_ALIVE) { | 654 | if (socket[sock].flags & IS_ALIVE) { |
668 | debug(3, "m32r_cfc: pcc_set_io_map: sock(%d) -EINVAL\n", sock); | 655 | dev_dbg(&s->dev, "pcc_set_io_map: sock(%d) -EINVAL\n", sock); |
669 | return -EINVAL; | 656 | return -EINVAL; |
670 | } | 657 | } |
671 | debug(3, "m32r_cfc: pcc_set_io_map: sock(%d)\n", sock); | 658 | dev_dbg(&s->dev, "pcc_set_io_map: sock(%d)\n", sock); |
672 | LOCKED(_pcc_set_io_map(sock, io)); | 659 | LOCKED(_pcc_set_io_map(sock, io)); |
673 | } | 660 | } |
674 | 661 | ||
@@ -677,16 +664,16 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem) | |||
677 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; | 664 | unsigned int sock = container_of(s, struct pcc_socket, socket)->number; |
678 | 665 | ||
679 | if (socket[sock].flags & IS_ALIVE) { | 666 | if (socket[sock].flags & IS_ALIVE) { |
680 | debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d) -EINVAL\n", sock); | 667 | dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d) -EINVAL\n", sock); |
681 | return -EINVAL; | 668 | return -EINVAL; |
682 | } | 669 | } |
683 | debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d)\n", sock); | 670 | dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d)\n", sock); |
684 | LOCKED(_pcc_set_mem_map(sock, mem)); | 671 | LOCKED(_pcc_set_mem_map(sock, mem)); |
685 | } | 672 | } |
686 | 673 | ||
687 | static int pcc_init(struct pcmcia_socket *s) | 674 | static int pcc_init(struct pcmcia_socket *s) |
688 | { | 675 | { |
689 | debug(3, "m32r_cfc: pcc_init()\n"); | 676 | dev_dbg(&s->dev, "pcc_init()\n"); |
690 | return 0; | 677 | return 0; |
691 | } | 678 | } |
692 | 679 | ||
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index c6524f99ccc3..72844c5a6d05 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c | |||
@@ -45,16 +45,6 @@ | |||
45 | 45 | ||
46 | #define PCC_DEBUG_DBEX | 46 | #define PCC_DEBUG_DBEX |
47 | 47 | ||
48 | #ifdef CONFIG_PCMCIA_DEBUG | ||
49 | static int m32r_pcc_debug; | ||
50 | module_param(m32r_pcc_debug, int, 0644); | ||
51 | #define debug(lvl, fmt, arg...) do { \ | ||
52 | if (m32r_pcc_debug > (lvl)) \ | ||
53 | printk(KERN_DEBUG "m32r_pcc: " fmt , ## arg); \ | ||
54 | } while (0) | ||
55 | #else | ||
56 | #define debug(n, args...) do { } while (0) | ||
57 | #endif | ||
58 | 48 | ||
59 | /* Poll status interval -- 0 means default to interrupt */ | 49 | /* Poll status interval -- 0 means default to interrupt */ |
60 | static int poll_interval = 0; | 50 | static int poll_interval = 0; |
@@ -358,7 +348,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev) | |||
358 | u_int events, active; | 348 | u_int events, active; |
359 | int handled = 0; | 349 | int handled = 0; |
360 | 350 | ||
361 | debug(4, "m32r: pcc_interrupt(%d)\n", irq); | 351 | pr_debug("m32r_pcc: pcc_interrupt(%d)\n", irq); |
362 | 352 | ||
363 | for (j = 0; j < 20; j++) { | 353 | for (j = 0; j < 20; j++) { |
364 | active = 0; | 354 | active = 0; |
@@ -369,13 +359,14 @@ static irqreturn_t pcc_interrupt(int irq, void *dev) | |||
369 | handled = 1; | 359 | handled = 1; |
370 | irc = pcc_get(i, PCIRC); | 360 | irc = pcc_get(i, PCIRC); |
371 | irc >>=16; | 361 | irc >>=16; |
372 | debug(2, "m32r-pcc:interrupt: socket %d pcirc 0x%02x ", i, irc); | 362 | pr_debug("m32r_pcc: interrupt: socket %d pcirc 0x%02x ", |
363 | i, irc); | ||
373 | if (!irc) | 364 | if (!irc) |
374 | continue; | 365 | continue; |
375 | 366 | ||
376 | events = (irc) ? SS_DETECT : 0; | 367 | events = (irc) ? SS_DETECT : 0; |
377 | events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0; | 368 | events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0; |
378 | debug(2, " event 0x%02x\n", events); | 369 | pr_debug("m32r_pcc: event 0x%02x\n", events); |
379 | 370 | ||
380 | if (events) | 371 | if (events) |
381 | pcmcia_parse_events(&socket[i].socket, events); | 372 | pcmcia_parse_events(&socket[i].socket, events); |
@@ -388,7 +379,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev) | |||
388 | if (j == 20) | 379 | if (j == 20) |
389 | printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n"); | 380 | printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n"); |
390 | 381 | ||
391 | debug(4, "m32r-pcc: interrupt done\n"); | 382 | pr_debug("m32r_pcc: interrupt done\n"); |
392 | 383 | ||
393 | return IRQ_RETVAL(handled); | 384 | return IRQ_RETVAL(handled); |
394 | } /* pcc_interrupt */ | 385 | } /* pcc_interrupt */ |
@@ -422,7 +413,7 @@ static int _pcc_get_status(u_short sock, u_int *value) | |||
422 | status = pcc_get(sock,PCCSIGCR); | 413 | status = pcc_get(sock,PCCSIGCR); |
423 | *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0; | 414 | *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0; |
424 | 415 | ||
425 | debug(3, "m32r-pcc: GetStatus(%d) = %#4.4x\n", sock, *value); | 416 | pr_debug("m32r_pcc: GetStatus(%d) = %#4.4x\n", sock, *value); |
426 | return 0; | 417 | return 0; |
427 | } /* _get_status */ | 418 | } /* _get_status */ |
428 | 419 | ||
@@ -432,7 +423,7 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state) | |||
432 | { | 423 | { |
433 | u_long reg = 0; | 424 | u_long reg = 0; |
434 | 425 | ||
435 | debug(3, "m32r-pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " | 426 | pr_debug("m32r_pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " |
436 | "io_irq %d, csc_mask %#2.2x)", sock, state->flags, | 427 | "io_irq %d, csc_mask %#2.2x)", sock, state->flags, |
437 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); | 428 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); |
438 | 429 | ||
@@ -448,11 +439,11 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state) | |||
448 | } | 439 | } |
449 | 440 | ||
450 | if (state->flags & SS_RESET) { | 441 | if (state->flags & SS_RESET) { |
451 | debug(3, ":RESET\n"); | 442 | pr_debug("m32r_pcc: :RESET\n"); |
452 | reg |= PCCSIGCR_CRST; | 443 | reg |= PCCSIGCR_CRST; |
453 | } | 444 | } |
454 | if (state->flags & SS_OUTPUT_ENA){ | 445 | if (state->flags & SS_OUTPUT_ENA){ |
455 | debug(3, ":OUTPUT_ENA\n"); | 446 | pr_debug("m32r_pcc: :OUTPUT_ENA\n"); |
456 | /* bit clear */ | 447 | /* bit clear */ |
457 | } else { | 448 | } else { |
458 | reg |= PCCSIGCR_SEN; | 449 | reg |= PCCSIGCR_SEN; |
@@ -460,28 +451,26 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state) | |||
460 | 451 | ||
461 | pcc_set(sock,PCCSIGCR,reg); | 452 | pcc_set(sock,PCCSIGCR,reg); |
462 | 453 | ||
463 | #ifdef CONFIG_PCMCIA_DEBUG | ||
464 | if(state->flags & SS_IOCARD){ | 454 | if(state->flags & SS_IOCARD){ |
465 | debug(3, ":IOCARD"); | 455 | pr_debug("m32r_pcc: :IOCARD"); |
466 | } | 456 | } |
467 | if (state->flags & SS_PWR_AUTO) { | 457 | if (state->flags & SS_PWR_AUTO) { |
468 | debug(3, ":PWR_AUTO"); | 458 | pr_debug("m32r_pcc: :PWR_AUTO"); |
469 | } | 459 | } |
470 | if (state->csc_mask & SS_DETECT) | 460 | if (state->csc_mask & SS_DETECT) |
471 | debug(3, ":csc-SS_DETECT"); | 461 | pr_debug("m32r_pcc: :csc-SS_DETECT"); |
472 | if (state->flags & SS_IOCARD) { | 462 | if (state->flags & SS_IOCARD) { |
473 | if (state->csc_mask & SS_STSCHG) | 463 | if (state->csc_mask & SS_STSCHG) |
474 | debug(3, ":STSCHG"); | 464 | pr_debug("m32r_pcc: :STSCHG"); |
475 | } else { | 465 | } else { |
476 | if (state->csc_mask & SS_BATDEAD) | 466 | if (state->csc_mask & SS_BATDEAD) |
477 | debug(3, ":BATDEAD"); | 467 | pr_debug("m32r_pcc: :BATDEAD"); |
478 | if (state->csc_mask & SS_BATWARN) | 468 | if (state->csc_mask & SS_BATWARN) |
479 | debug(3, ":BATWARN"); | 469 | pr_debug("m32r_pcc: :BATWARN"); |
480 | if (state->csc_mask & SS_READY) | 470 | if (state->csc_mask & SS_READY) |
481 | debug(3, ":READY"); | 471 | pr_debug("m32r_pcc: :READY"); |
482 | } | 472 | } |
483 | debug(3, "\n"); | 473 | pr_debug("m32r_pcc: \n"); |
484 | #endif | ||
485 | return 0; | 474 | return 0; |
486 | } /* _set_socket */ | 475 | } /* _set_socket */ |
487 | 476 | ||
@@ -491,7 +480,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) | |||
491 | { | 480 | { |
492 | u_char map; | 481 | u_char map; |
493 | 482 | ||
494 | debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " | 483 | pr_debug("m32r_pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " |
495 | "%#llx-%#llx)\n", sock, io->map, io->flags, | 484 | "%#llx-%#llx)\n", sock, io->map, io->flags, |
496 | io->speed, (unsigned long long)io->start, | 485 | io->speed, (unsigned long long)io->start, |
497 | (unsigned long long)io->stop); | 486 | (unsigned long long)io->stop); |
@@ -515,7 +504,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) | |||
515 | #endif | 504 | #endif |
516 | #endif | 505 | #endif |
517 | 506 | ||
518 | debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " | 507 | pr_debug("m32r_pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " |
519 | "%#llx, %#x)\n", sock, map, mem->flags, | 508 | "%#llx, %#x)\n", sock, map, mem->flags, |
520 | mem->speed, (unsigned long long)mem->static_start, | 509 | mem->speed, (unsigned long long)mem->static_start, |
521 | mem->card_start); | 510 | mem->card_start); |
@@ -662,7 +651,7 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem) | |||
662 | 651 | ||
663 | static int pcc_init(struct pcmcia_socket *s) | 652 | static int pcc_init(struct pcmcia_socket *s) |
664 | { | 653 | { |
665 | debug(4, "m32r-pcc: init call\n"); | 654 | pr_debug("m32r_pcc: init call\n"); |
666 | return 0; | 655 | return 0; |
667 | } | 656 | } |
668 | 657 | ||
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 403559ba49dd..7f79c4e169ae 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c | |||
@@ -64,14 +64,6 @@ | |||
64 | #include <pcmcia/cs.h> | 64 | #include <pcmcia/cs.h> |
65 | #include <pcmcia/ss.h> | 65 | #include <pcmcia/ss.h> |
66 | 66 | ||
67 | #ifdef CONFIG_PCMCIA_DEBUG | ||
68 | static int pc_debug; | ||
69 | module_param(pc_debug, int, 0); | ||
70 | #define dprintk(args...) printk(KERN_DEBUG "m8xx_pcmcia: " args); | ||
71 | #else | ||
72 | #define dprintk(args...) | ||
73 | #endif | ||
74 | |||
75 | #define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) | 67 | #define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) |
76 | #define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) | 68 | #define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) |
77 | 69 | ||
@@ -565,7 +557,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev) | |||
565 | unsigned int i, events, pscr, pipr, per; | 557 | unsigned int i, events, pscr, pipr, per; |
566 | pcmconf8xx_t *pcmcia = socket[0].pcmcia; | 558 | pcmconf8xx_t *pcmcia = socket[0].pcmcia; |
567 | 559 | ||
568 | dprintk("Interrupt!\n"); | 560 | pr_debug("m8xx_pcmcia: Interrupt!\n"); |
569 | /* get interrupt sources */ | 561 | /* get interrupt sources */ |
570 | 562 | ||
571 | pscr = in_be32(&pcmcia->pcmc_pscr); | 563 | pscr = in_be32(&pcmcia->pcmc_pscr); |
@@ -614,7 +606,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev) | |||
614 | 606 | ||
615 | /* call the handler */ | 607 | /* call the handler */ |
616 | 608 | ||
617 | dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " | 609 | pr_debug("m8xx_pcmcia: slot %u: events = 0x%02x, pscr = 0x%08x, " |
618 | "pipr = 0x%08x\n", i, events, pscr, pipr); | 610 | "pipr = 0x%08x\n", i, events, pscr, pipr); |
619 | 611 | ||
620 | if (events) { | 612 | if (events) { |
@@ -641,7 +633,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev) | |||
641 | /* clear the interrupt sources */ | 633 | /* clear the interrupt sources */ |
642 | out_be32(&pcmcia->pcmc_pscr, pscr); | 634 | out_be32(&pcmcia->pcmc_pscr, pscr); |
643 | 635 | ||
644 | dprintk("Interrupt done.\n"); | 636 | pr_debug("m8xx_pcmcia: Interrupt done.\n"); |
645 | 637 | ||
646 | return IRQ_HANDLED; | 638 | return IRQ_HANDLED; |
647 | } | 639 | } |
@@ -815,7 +807,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value) | |||
815 | }; | 807 | }; |
816 | } | 808 | } |
817 | 809 | ||
818 | dprintk("GetStatus(%d) = %#2.2x\n", lsock, *value); | 810 | pr_debug("m8xx_pcmcia: GetStatus(%d) = %#2.2x\n", lsock, *value); |
819 | return 0; | 811 | return 0; |
820 | } | 812 | } |
821 | 813 | ||
@@ -828,7 +820,7 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state) | |||
828 | unsigned long flags; | 820 | unsigned long flags; |
829 | pcmconf8xx_t *pcmcia = socket[0].pcmcia; | 821 | pcmconf8xx_t *pcmcia = socket[0].pcmcia; |
830 | 822 | ||
831 | dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " | 823 | pr_debug("m8xx_pcmcia: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " |
832 | "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, | 824 | "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, |
833 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); | 825 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); |
834 | 826 | ||
@@ -974,7 +966,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
974 | #define M8XX_SIZE (io->stop - io->start + 1) | 966 | #define M8XX_SIZE (io->stop - io->start + 1) |
975 | #define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) | 967 | #define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) |
976 | 968 | ||
977 | dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " | 969 | pr_debug("m8xx_pcmcia: SetIOMap(%d, %d, %#2.2x, %d ns, " |
978 | "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, | 970 | "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, |
979 | io->speed, (unsigned long long)io->start, | 971 | io->speed, (unsigned long long)io->start, |
980 | (unsigned long long)io->stop); | 972 | (unsigned long long)io->stop); |
@@ -988,7 +980,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
988 | 980 | ||
989 | if (io->flags & MAP_ACTIVE) { | 981 | if (io->flags & MAP_ACTIVE) { |
990 | 982 | ||
991 | dprintk("io->flags & MAP_ACTIVE\n"); | 983 | pr_debug("m8xx_pcmcia: io->flags & MAP_ACTIVE\n"); |
992 | 984 | ||
993 | winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) | 985 | winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) |
994 | + (lsock * PCMCIA_IO_WIN_NO) + io->map; | 986 | + (lsock * PCMCIA_IO_WIN_NO) + io->map; |
@@ -1018,8 +1010,8 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
1018 | 1010 | ||
1019 | out_be32(&w->or, reg); | 1011 | out_be32(&w->or, reg); |
1020 | 1012 | ||
1021 | dprintk("Socket %u: Mapped io window %u at %#8.8x, " | 1013 | pr_debug("m8xx_pcmcia: Socket %u: Mapped io window %u at " |
1022 | "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); | 1014 | "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or); |
1023 | } else { | 1015 | } else { |
1024 | /* shutdown IO window */ | 1016 | /* shutdown IO window */ |
1025 | winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) | 1017 | winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) |
@@ -1033,14 +1025,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
1033 | out_be32(&w->or, 0); /* turn off window */ | 1025 | out_be32(&w->or, 0); /* turn off window */ |
1034 | out_be32(&w->br, 0); /* turn off base address */ | 1026 | out_be32(&w->br, 0); /* turn off base address */ |
1035 | 1027 | ||
1036 | dprintk("Socket %u: Unmapped io window %u at %#8.8x, " | 1028 | pr_debug("m8xx_pcmcia: Socket %u: Unmapped io window %u at " |
1037 | "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); | 1029 | "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or); |
1038 | } | 1030 | } |
1039 | 1031 | ||
1040 | /* copy the struct and modify the copy */ | 1032 | /* copy the struct and modify the copy */ |
1041 | s->io_win[io->map] = *io; | 1033 | s->io_win[io->map] = *io; |
1042 | s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); | 1034 | s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); |
1043 | dprintk("SetIOMap exit\n"); | 1035 | pr_debug("m8xx_pcmcia: SetIOMap exit\n"); |
1044 | 1036 | ||
1045 | return 0; | 1037 | return 0; |
1046 | } | 1038 | } |
@@ -1055,7 +1047,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, | |||
1055 | unsigned int reg, winnr; | 1047 | unsigned int reg, winnr; |
1056 | pcmconf8xx_t *pcmcia = s->pcmcia; | 1048 | pcmconf8xx_t *pcmcia = s->pcmcia; |
1057 | 1049 | ||
1058 | dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " | 1050 | pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, " |
1059 | "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, | 1051 | "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, |
1060 | mem->speed, (unsigned long long)mem->static_start, | 1052 | mem->speed, (unsigned long long)mem->static_start, |
1061 | mem->card_start); | 1053 | mem->card_start); |
@@ -1098,7 +1090,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, | |||
1098 | 1090 | ||
1099 | out_be32(&w->or, reg); | 1091 | out_be32(&w->or, reg); |
1100 | 1092 | ||
1101 | dprintk("Socket %u: Mapped memory window %u at %#8.8x, " | 1093 | pr_debug("m8xx_pcmcia: Socket %u: Mapped memory window %u at %#8.8x, " |
1102 | "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); | 1094 | "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); |
1103 | 1095 | ||
1104 | if (mem->flags & MAP_ACTIVE) { | 1096 | if (mem->flags & MAP_ACTIVE) { |
@@ -1108,7 +1100,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, | |||
1108 | + mem->card_start; | 1100 | + mem->card_start; |
1109 | } | 1101 | } |
1110 | 1102 | ||
1111 | dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " | 1103 | pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, " |
1112 | "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, | 1104 | "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, |
1113 | mem->speed, (unsigned long long)mem->static_start, | 1105 | mem->speed, (unsigned long long)mem->static_start, |
1114 | mem->card_start); | 1106 | mem->card_start); |
@@ -1129,7 +1121,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock) | |||
1129 | pccard_io_map io = { 0, 0, 0, 0, 1 }; | 1121 | pccard_io_map io = { 0, 0, 0, 0, 1 }; |
1130 | pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; | 1122 | pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; |
1131 | 1123 | ||
1132 | dprintk("sock_init(%d)\n", s); | 1124 | pr_debug("m8xx_pcmcia: sock_init(%d)\n", s); |
1133 | 1125 | ||
1134 | m8xx_set_socket(sock, &dead_socket); | 1126 | m8xx_set_socket(sock, &dead_socket); |
1135 | for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { | 1127 | for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { |
diff --git a/drivers/pcmcia/o2micro.h b/drivers/pcmcia/o2micro.h index 72188c462c9c..624442fc0d35 100644 --- a/drivers/pcmcia/o2micro.h +++ b/drivers/pcmcia/o2micro.h | |||
@@ -30,28 +30,6 @@ | |||
30 | #ifndef _LINUX_O2MICRO_H | 30 | #ifndef _LINUX_O2MICRO_H |
31 | #define _LINUX_O2MICRO_H | 31 | #define _LINUX_O2MICRO_H |
32 | 32 | ||
33 | #ifndef PCI_VENDOR_ID_O2 | ||
34 | #define PCI_VENDOR_ID_O2 0x1217 | ||
35 | #endif | ||
36 | #ifndef PCI_DEVICE_ID_O2_6729 | ||
37 | #define PCI_DEVICE_ID_O2_6729 0x6729 | ||
38 | #endif | ||
39 | #ifndef PCI_DEVICE_ID_O2_6730 | ||
40 | #define PCI_DEVICE_ID_O2_6730 0x673a | ||
41 | #endif | ||
42 | #ifndef PCI_DEVICE_ID_O2_6832 | ||
43 | #define PCI_DEVICE_ID_O2_6832 0x6832 | ||
44 | #endif | ||
45 | #ifndef PCI_DEVICE_ID_O2_6836 | ||
46 | #define PCI_DEVICE_ID_O2_6836 0x6836 | ||
47 | #endif | ||
48 | #ifndef PCI_DEVICE_ID_O2_6812 | ||
49 | #define PCI_DEVICE_ID_O2_6812 0x6872 | ||
50 | #endif | ||
51 | #ifndef PCI_DEVICE_ID_O2_6933 | ||
52 | #define PCI_DEVICE_ID_O2_6933 0x6933 | ||
53 | #endif | ||
54 | |||
55 | /* Additional PCI configuration registers */ | 33 | /* Additional PCI configuration registers */ |
56 | 34 | ||
57 | #define O2_MUX_CONTROL 0x90 /* 32 bit */ | 35 | #define O2_MUX_CONTROL 0x90 /* 32 bit */ |
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index 30cf71d2ee23..c4d7908fa37f 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c | |||
@@ -58,17 +58,6 @@ typedef struct user_info_t { | |||
58 | } user_info_t; | 58 | } user_info_t; |
59 | 59 | ||
60 | 60 | ||
61 | #ifdef CONFIG_PCMCIA_DEBUG | ||
62 | extern int ds_pc_debug; | ||
63 | |||
64 | #define ds_dbg(lvl, fmt, arg...) do { \ | ||
65 | if (ds_pc_debug >= lvl) \ | ||
66 | printk(KERN_DEBUG "ds: " fmt , ## arg); \ | ||
67 | } while (0) | ||
68 | #else | ||
69 | #define ds_dbg(lvl, fmt, arg...) do { } while (0) | ||
70 | #endif | ||
71 | |||
72 | static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s, | 61 | static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s, |
73 | unsigned int function) | 62 | unsigned int function) |
74 | { | 63 | { |
@@ -229,6 +218,61 @@ static int pcmcia_adjust_resource_info(adjust_t *adj) | |||
229 | return (ret); | 218 | return (ret); |
230 | } | 219 | } |
231 | 220 | ||
221 | |||
222 | /** pcmcia_get_window | ||
223 | */ | ||
224 | static int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *wh_out, | ||
225 | window_handle_t wh, win_req_t *req) | ||
226 | { | ||
227 | pccard_mem_map *win; | ||
228 | window_handle_t w; | ||
229 | |||
230 | wh--; | ||
231 | if (!s || !(s->state & SOCKET_PRESENT)) | ||
232 | return -ENODEV; | ||
233 | if (wh >= MAX_WIN) | ||
234 | return -EINVAL; | ||
235 | for (w = wh; w < MAX_WIN; w++) | ||
236 | if (s->state & SOCKET_WIN_REQ(w)) | ||
237 | break; | ||
238 | if (w == MAX_WIN) | ||
239 | return -EINVAL; | ||
240 | win = &s->win[w]; | ||
241 | req->Base = win->res->start; | ||
242 | req->Size = win->res->end - win->res->start + 1; | ||
243 | req->AccessSpeed = win->speed; | ||
244 | req->Attributes = 0; | ||
245 | if (win->flags & MAP_ATTRIB) | ||
246 | req->Attributes |= WIN_MEMORY_TYPE_AM; | ||
247 | if (win->flags & MAP_ACTIVE) | ||
248 | req->Attributes |= WIN_ENABLE; | ||
249 | if (win->flags & MAP_16BIT) | ||
250 | req->Attributes |= WIN_DATA_WIDTH_16; | ||
251 | if (win->flags & MAP_USE_WAIT) | ||
252 | req->Attributes |= WIN_USE_WAIT; | ||
253 | |||
254 | *wh_out = w + 1; | ||
255 | return 0; | ||
256 | } /* pcmcia_get_window */ | ||
257 | |||
258 | |||
259 | /** pcmcia_get_mem_page | ||
260 | * | ||
261 | * Change the card address of an already open memory window. | ||
262 | */ | ||
263 | static int pcmcia_get_mem_page(struct pcmcia_socket *skt, window_handle_t wh, | ||
264 | memreq_t *req) | ||
265 | { | ||
266 | wh--; | ||
267 | if (wh >= MAX_WIN) | ||
268 | return -EINVAL; | ||
269 | |||
270 | req->Page = 0; | ||
271 | req->CardOffset = skt->win[wh].card_start; | ||
272 | return 0; | ||
273 | } /* pcmcia_get_mem_page */ | ||
274 | |||
275 | |||
232 | /** pccard_get_status | 276 | /** pccard_get_status |
233 | * | 277 | * |
234 | * Get the current socket state bits. We don't support the latched | 278 | * Get the current socket state bits. We don't support the latched |
@@ -431,7 +475,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info) | |||
431 | if (!s) | 475 | if (!s) |
432 | return -EINVAL; | 476 | return -EINVAL; |
433 | 477 | ||
434 | ds_dbg(2, "bind_request(%d, '%s')\n", s->sock, | 478 | pr_debug("bind_request(%d, '%s')\n", s->sock, |
435 | (char *)bind_info->dev_info); | 479 | (char *)bind_info->dev_info); |
436 | 480 | ||
437 | p_drv = get_pcmcia_driver(&bind_info->dev_info); | 481 | p_drv = get_pcmcia_driver(&bind_info->dev_info); |
@@ -623,7 +667,7 @@ static int ds_open(struct inode *inode, struct file *file) | |||
623 | static int warning_printed = 0; | 667 | static int warning_printed = 0; |
624 | int ret = 0; | 668 | int ret = 0; |
625 | 669 | ||
626 | ds_dbg(0, "ds_open(socket %d)\n", i); | 670 | pr_debug("ds_open(socket %d)\n", i); |
627 | 671 | ||
628 | lock_kernel(); | 672 | lock_kernel(); |
629 | s = pcmcia_get_socket_by_nr(i); | 673 | s = pcmcia_get_socket_by_nr(i); |
@@ -685,7 +729,7 @@ static int ds_release(struct inode *inode, struct file *file) | |||
685 | struct pcmcia_socket *s; | 729 | struct pcmcia_socket *s; |
686 | user_info_t *user, **link; | 730 | user_info_t *user, **link; |
687 | 731 | ||
688 | ds_dbg(0, "ds_release(socket %d)\n", iminor(inode)); | 732 | pr_debug("ds_release(socket %d)\n", iminor(inode)); |
689 | 733 | ||
690 | user = file->private_data; | 734 | user = file->private_data; |
691 | if (CHECK_USER(user)) | 735 | if (CHECK_USER(user)) |
@@ -719,7 +763,7 @@ static ssize_t ds_read(struct file *file, char __user *buf, | |||
719 | user_info_t *user; | 763 | user_info_t *user; |
720 | int ret; | 764 | int ret; |
721 | 765 | ||
722 | ds_dbg(2, "ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode)); | 766 | pr_debug("ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode)); |
723 | 767 | ||
724 | if (count < 4) | 768 | if (count < 4) |
725 | return -EINVAL; | 769 | return -EINVAL; |
@@ -744,7 +788,7 @@ static ssize_t ds_read(struct file *file, char __user *buf, | |||
744 | static ssize_t ds_write(struct file *file, const char __user *buf, | 788 | static ssize_t ds_write(struct file *file, const char __user *buf, |
745 | size_t count, loff_t *ppos) | 789 | size_t count, loff_t *ppos) |
746 | { | 790 | { |
747 | ds_dbg(2, "ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode)); | 791 | pr_debug("ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode)); |
748 | 792 | ||
749 | if (count != 4) | 793 | if (count != 4) |
750 | return -EINVAL; | 794 | return -EINVAL; |
@@ -762,7 +806,7 @@ static u_int ds_poll(struct file *file, poll_table *wait) | |||
762 | struct pcmcia_socket *s; | 806 | struct pcmcia_socket *s; |
763 | user_info_t *user; | 807 | user_info_t *user; |
764 | 808 | ||
765 | ds_dbg(2, "ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode)); | 809 | pr_debug("ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode)); |
766 | 810 | ||
767 | user = file->private_data; | 811 | user = file->private_data; |
768 | if (CHECK_USER(user)) | 812 | if (CHECK_USER(user)) |
@@ -790,7 +834,7 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
790 | ds_ioctl_arg_t *buf; | 834 | ds_ioctl_arg_t *buf; |
791 | user_info_t *user; | 835 | user_info_t *user; |
792 | 836 | ||
793 | ds_dbg(2, "ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg); | 837 | pr_debug("ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg); |
794 | 838 | ||
795 | user = file->private_data; | 839 | user = file->private_data; |
796 | if (CHECK_USER(user)) | 840 | if (CHECK_USER(user)) |
@@ -809,13 +853,13 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
809 | 853 | ||
810 | if (cmd & IOC_IN) { | 854 | if (cmd & IOC_IN) { |
811 | if (!access_ok(VERIFY_READ, uarg, size)) { | 855 | if (!access_ok(VERIFY_READ, uarg, size)) { |
812 | ds_dbg(3, "ds_ioctl(): verify_read = %d\n", -EFAULT); | 856 | pr_debug("ds_ioctl(): verify_read = %d\n", -EFAULT); |
813 | return -EFAULT; | 857 | return -EFAULT; |
814 | } | 858 | } |
815 | } | 859 | } |
816 | if (cmd & IOC_OUT) { | 860 | if (cmd & IOC_OUT) { |
817 | if (!access_ok(VERIFY_WRITE, uarg, size)) { | 861 | if (!access_ok(VERIFY_WRITE, uarg, size)) { |
818 | ds_dbg(3, "ds_ioctl(): verify_write = %d\n", -EFAULT); | 862 | pr_debug("ds_ioctl(): verify_write = %d\n", -EFAULT); |
819 | return -EFAULT; | 863 | return -EFAULT; |
820 | } | 864 | } |
821 | } | 865 | } |
@@ -927,15 +971,15 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
927 | goto free_out; | 971 | goto free_out; |
928 | break; | 972 | break; |
929 | case DS_GET_FIRST_WINDOW: | 973 | case DS_GET_FIRST_WINDOW: |
930 | ret = pcmcia_get_window(s, &buf->win_info.handle, 0, | 974 | ret = pcmcia_get_window(s, &buf->win_info.handle, 1, |
931 | &buf->win_info.window); | 975 | &buf->win_info.window); |
932 | break; | 976 | break; |
933 | case DS_GET_NEXT_WINDOW: | 977 | case DS_GET_NEXT_WINDOW: |
934 | ret = pcmcia_get_window(s, &buf->win_info.handle, | 978 | ret = pcmcia_get_window(s, &buf->win_info.handle, |
935 | buf->win_info.handle->index + 1, &buf->win_info.window); | 979 | buf->win_info.handle + 1, &buf->win_info.window); |
936 | break; | 980 | break; |
937 | case DS_GET_MEM_PAGE: | 981 | case DS_GET_MEM_PAGE: |
938 | ret = pcmcia_get_mem_page(buf->win_info.handle, | 982 | ret = pcmcia_get_mem_page(s, buf->win_info.handle, |
939 | &buf->win_info.map); | 983 | &buf->win_info.map); |
940 | break; | 984 | break; |
941 | case DS_REPLACE_CIS: | 985 | case DS_REPLACE_CIS: |
@@ -962,7 +1006,7 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
962 | } | 1006 | } |
963 | 1007 | ||
964 | if ((err == 0) && (ret != 0)) { | 1008 | if ((err == 0) && (ret != 0)) { |
965 | ds_dbg(2, "ds_ioctl: ret = %d\n", ret); | 1009 | pr_debug("ds_ioctl: ret = %d\n", ret); |
966 | switch (ret) { | 1010 | switch (ret) { |
967 | case -ENODEV: | 1011 | case -ENODEV: |
968 | case -EINVAL: | 1012 | case -EINVAL: |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index d919e96c0afd..a8bf8c1b45ed 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/netdevice.h> | ||
23 | 24 | ||
24 | #include <pcmcia/cs_types.h> | 25 | #include <pcmcia/cs_types.h> |
25 | #include <pcmcia/ss.h> | 26 | #include <pcmcia/ss.h> |
@@ -43,21 +44,6 @@ static u8 pcmcia_used_irq[NR_IRQS]; | |||
43 | #endif | 44 | #endif |
44 | 45 | ||
45 | 46 | ||
46 | #ifdef CONFIG_PCMCIA_DEBUG | ||
47 | extern int ds_pc_debug; | ||
48 | |||
49 | #define ds_dbg(skt, lvl, fmt, arg...) do { \ | ||
50 | if (ds_pc_debug >= lvl) \ | ||
51 | dev_printk(KERN_DEBUG, &skt->dev, \ | ||
52 | "pcmcia_resource: " fmt, \ | ||
53 | ## arg); \ | ||
54 | } while (0) | ||
55 | #else | ||
56 | #define ds_dbg(skt, lvl, fmt, arg...) do { } while (0) | ||
57 | #endif | ||
58 | |||
59 | |||
60 | |||
61 | /** alloc_io_space | 47 | /** alloc_io_space |
62 | * | 48 | * |
63 | * Special stuff for managing IO windows, because they are scarce | 49 | * Special stuff for managing IO windows, because they are scarce |
@@ -72,14 +58,14 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, | |||
72 | align = (*base) ? (lines ? 1<<lines : 0) : 1; | 58 | align = (*base) ? (lines ? 1<<lines : 0) : 1; |
73 | if (align && (align < num)) { | 59 | if (align && (align < num)) { |
74 | if (*base) { | 60 | if (*base) { |
75 | ds_dbg(s, 0, "odd IO request: num %#x align %#x\n", | 61 | dev_dbg(&s->dev, "odd IO request: num %#x align %#x\n", |
76 | num, align); | 62 | num, align); |
77 | align = 0; | 63 | align = 0; |
78 | } else | 64 | } else |
79 | while (align && (align < num)) align <<= 1; | 65 | while (align && (align < num)) align <<= 1; |
80 | } | 66 | } |
81 | if (*base & ~(align-1)) { | 67 | if (*base & ~(align-1)) { |
82 | ds_dbg(s, 0, "odd IO request: base %#x align %#x\n", | 68 | dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n", |
83 | *base, align); | 69 | *base, align); |
84 | align = 0; | 70 | align = 0; |
85 | } | 71 | } |
@@ -173,8 +159,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev, | |||
173 | s = p_dev->socket; | 159 | s = p_dev->socket; |
174 | c = p_dev->function_config; | 160 | c = p_dev->function_config; |
175 | 161 | ||
176 | if (!(c->state & CONFIG_LOCKED)) | 162 | if (!(c->state & CONFIG_LOCKED)) { |
163 | dev_dbg(&s->dev, "Configuration isnt't locked\n"); | ||
177 | return -EACCES; | 164 | return -EACCES; |
165 | } | ||
178 | 166 | ||
179 | addr = (c->ConfigBase + reg->Offset) >> 1; | 167 | addr = (c->ConfigBase + reg->Offset) >> 1; |
180 | 168 | ||
@@ -188,6 +176,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev, | |||
188 | pcmcia_write_cis_mem(s, 1, addr, 1, &val); | 176 | pcmcia_write_cis_mem(s, 1, addr, 1, &val); |
189 | break; | 177 | break; |
190 | default: | 178 | default: |
179 | dev_dbg(&s->dev, "Invalid conf register request\n"); | ||
191 | return -EINVAL; | 180 | return -EINVAL; |
192 | break; | 181 | break; |
193 | } | 182 | } |
@@ -196,68 +185,21 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev, | |||
196 | EXPORT_SYMBOL(pcmcia_access_configuration_register); | 185 | EXPORT_SYMBOL(pcmcia_access_configuration_register); |
197 | 186 | ||
198 | 187 | ||
199 | /** pcmcia_get_window | 188 | int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh, |
200 | */ | 189 | memreq_t *req) |
201 | int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, | ||
202 | int idx, win_req_t *req) | ||
203 | { | ||
204 | window_t *win; | ||
205 | int w; | ||
206 | |||
207 | if (!s || !(s->state & SOCKET_PRESENT)) | ||
208 | return -ENODEV; | ||
209 | for (w = idx; w < MAX_WIN; w++) | ||
210 | if (s->state & SOCKET_WIN_REQ(w)) | ||
211 | break; | ||
212 | if (w == MAX_WIN) | ||
213 | return -EINVAL; | ||
214 | win = &s->win[w]; | ||
215 | req->Base = win->ctl.res->start; | ||
216 | req->Size = win->ctl.res->end - win->ctl.res->start + 1; | ||
217 | req->AccessSpeed = win->ctl.speed; | ||
218 | req->Attributes = 0; | ||
219 | if (win->ctl.flags & MAP_ATTRIB) | ||
220 | req->Attributes |= WIN_MEMORY_TYPE_AM; | ||
221 | if (win->ctl.flags & MAP_ACTIVE) | ||
222 | req->Attributes |= WIN_ENABLE; | ||
223 | if (win->ctl.flags & MAP_16BIT) | ||
224 | req->Attributes |= WIN_DATA_WIDTH_16; | ||
225 | if (win->ctl.flags & MAP_USE_WAIT) | ||
226 | req->Attributes |= WIN_USE_WAIT; | ||
227 | *handle = win; | ||
228 | return 0; | ||
229 | } /* pcmcia_get_window */ | ||
230 | EXPORT_SYMBOL(pcmcia_get_window); | ||
231 | |||
232 | |||
233 | /** pcmcia_get_mem_page | ||
234 | * | ||
235 | * Change the card address of an already open memory window. | ||
236 | */ | ||
237 | int pcmcia_get_mem_page(window_handle_t win, memreq_t *req) | ||
238 | { | 190 | { |
239 | if ((win == NULL) || (win->magic != WINDOW_MAGIC)) | 191 | struct pcmcia_socket *s = p_dev->socket; |
240 | return -EINVAL; | ||
241 | req->Page = 0; | ||
242 | req->CardOffset = win->ctl.card_start; | ||
243 | return 0; | ||
244 | } /* pcmcia_get_mem_page */ | ||
245 | EXPORT_SYMBOL(pcmcia_get_mem_page); | ||
246 | |||
247 | 192 | ||
248 | int pcmcia_map_mem_page(window_handle_t win, memreq_t *req) | 193 | wh--; |
249 | { | 194 | if (wh >= MAX_WIN) |
250 | struct pcmcia_socket *s; | ||
251 | if ((win == NULL) || (win->magic != WINDOW_MAGIC)) | ||
252 | return -EINVAL; | 195 | return -EINVAL; |
253 | s = win->sock; | ||
254 | if (req->Page != 0) { | 196 | if (req->Page != 0) { |
255 | ds_dbg(s, 0, "failure: requested page is zero\n"); | 197 | dev_dbg(&s->dev, "failure: requested page is zero\n"); |
256 | return -EINVAL; | 198 | return -EINVAL; |
257 | } | 199 | } |
258 | win->ctl.card_start = req->CardOffset; | 200 | s->win[wh].card_start = req->CardOffset; |
259 | if (s->ops->set_mem_map(s, &win->ctl) != 0) { | 201 | if (s->ops->set_mem_map(s, &s->win[wh]) != 0) { |
260 | ds_dbg(s, 0, "failed to set_mem_map\n"); | 202 | dev_dbg(&s->dev, "failed to set_mem_map\n"); |
261 | return -EIO; | 203 | return -EIO; |
262 | } | 204 | } |
263 | return 0; | 205 | return 0; |
@@ -278,10 +220,14 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
278 | s = p_dev->socket; | 220 | s = p_dev->socket; |
279 | c = p_dev->function_config; | 221 | c = p_dev->function_config; |
280 | 222 | ||
281 | if (!(s->state & SOCKET_PRESENT)) | 223 | if (!(s->state & SOCKET_PRESENT)) { |
224 | dev_dbg(&s->dev, "No card present\n"); | ||
282 | return -ENODEV; | 225 | return -ENODEV; |
283 | if (!(c->state & CONFIG_LOCKED)) | 226 | } |
227 | if (!(c->state & CONFIG_LOCKED)) { | ||
228 | dev_dbg(&s->dev, "Configuration isnt't locked\n"); | ||
284 | return -EACCES; | 229 | return -EACCES; |
230 | } | ||
285 | 231 | ||
286 | if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { | 232 | if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { |
287 | if (mod->Attributes & CONF_ENABLE_IRQ) { | 233 | if (mod->Attributes & CONF_ENABLE_IRQ) { |
@@ -295,7 +241,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
295 | } | 241 | } |
296 | 242 | ||
297 | if (mod->Attributes & CONF_VCC_CHANGE_VALID) { | 243 | if (mod->Attributes & CONF_VCC_CHANGE_VALID) { |
298 | ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); | 244 | dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); |
299 | return -EINVAL; | 245 | return -EINVAL; |
300 | } | 246 | } |
301 | 247 | ||
@@ -303,7 +249,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
303 | if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && | 249 | if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && |
304 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { | 250 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { |
305 | if (mod->Vpp1 != mod->Vpp2) { | 251 | if (mod->Vpp1 != mod->Vpp2) { |
306 | ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n"); | 252 | dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); |
307 | return -EINVAL; | 253 | return -EINVAL; |
308 | } | 254 | } |
309 | s->socket.Vpp = mod->Vpp1; | 255 | s->socket.Vpp = mod->Vpp1; |
@@ -314,7 +260,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, | |||
314 | } | 260 | } |
315 | } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || | 261 | } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || |
316 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { | 262 | (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { |
317 | ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); | 263 | dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); |
318 | return -EINVAL; | 264 | return -EINVAL; |
319 | } | 265 | } |
320 | 266 | ||
@@ -425,11 +371,11 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
425 | if (c->state & CONFIG_LOCKED) | 371 | if (c->state & CONFIG_LOCKED) |
426 | return -EACCES; | 372 | return -EACCES; |
427 | if (c->irq.Attributes != req->Attributes) { | 373 | if (c->irq.Attributes != req->Attributes) { |
428 | ds_dbg(s, 0, "IRQ attributes must match assigned ones\n"); | 374 | dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n"); |
429 | return -EINVAL; | 375 | return -EINVAL; |
430 | } | 376 | } |
431 | if (s->irq.AssignedIRQ != req->AssignedIRQ) { | 377 | if (s->irq.AssignedIRQ != req->AssignedIRQ) { |
432 | ds_dbg(s, 0, "IRQ must match assigned one\n"); | 378 | dev_dbg(&s->dev, "IRQ must match assigned one\n"); |
433 | return -EINVAL; | 379 | return -EINVAL; |
434 | } | 380 | } |
435 | if (--s->irq.Config == 0) { | 381 | if (--s->irq.Config == 0) { |
@@ -437,8 +383,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
437 | s->irq.AssignedIRQ = 0; | 383 | s->irq.AssignedIRQ = 0; |
438 | } | 384 | } |
439 | 385 | ||
440 | if (req->Attributes & IRQ_HANDLE_PRESENT) { | 386 | if (req->Handler) { |
441 | free_irq(req->AssignedIRQ, req->Instance); | 387 | free_irq(req->AssignedIRQ, p_dev->priv); |
442 | } | 388 | } |
443 | 389 | ||
444 | #ifdef CONFIG_PCMCIA_PROBE | 390 | #ifdef CONFIG_PCMCIA_PROBE |
@@ -449,30 +395,34 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
449 | } /* pcmcia_release_irq */ | 395 | } /* pcmcia_release_irq */ |
450 | 396 | ||
451 | 397 | ||
452 | int pcmcia_release_window(window_handle_t win) | 398 | int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh) |
453 | { | 399 | { |
454 | struct pcmcia_socket *s; | 400 | struct pcmcia_socket *s = p_dev->socket; |
401 | pccard_mem_map *win; | ||
455 | 402 | ||
456 | if ((win == NULL) || (win->magic != WINDOW_MAGIC)) | 403 | wh--; |
404 | if (wh >= MAX_WIN) | ||
457 | return -EINVAL; | 405 | return -EINVAL; |
458 | s = win->sock; | 406 | |
459 | if (!(win->handle->_win & CLIENT_WIN_REQ(win->index))) | 407 | win = &s->win[wh]; |
408 | |||
409 | if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) { | ||
410 | dev_dbg(&s->dev, "not releasing unknown window\n"); | ||
460 | return -EINVAL; | 411 | return -EINVAL; |
412 | } | ||
461 | 413 | ||
462 | /* Shut down memory window */ | 414 | /* Shut down memory window */ |
463 | win->ctl.flags &= ~MAP_ACTIVE; | 415 | win->flags &= ~MAP_ACTIVE; |
464 | s->ops->set_mem_map(s, &win->ctl); | 416 | s->ops->set_mem_map(s, win); |
465 | s->state &= ~SOCKET_WIN_REQ(win->index); | 417 | s->state &= ~SOCKET_WIN_REQ(wh); |
466 | 418 | ||
467 | /* Release system memory */ | 419 | /* Release system memory */ |
468 | if (win->ctl.res) { | 420 | if (win->res) { |
469 | release_resource(win->ctl.res); | 421 | release_resource(win->res); |
470 | kfree(win->ctl.res); | 422 | kfree(win->res); |
471 | win->ctl.res = NULL; | 423 | win->res = NULL; |
472 | } | 424 | } |
473 | win->handle->_win &= ~CLIENT_WIN_REQ(win->index); | 425 | p_dev->_win &= ~CLIENT_WIN_REQ(wh); |
474 | |||
475 | win->magic = 0; | ||
476 | 426 | ||
477 | return 0; | 427 | return 0; |
478 | } /* pcmcia_release_window */ | 428 | } /* pcmcia_release_window */ |
@@ -492,12 +442,14 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, | |||
492 | return -ENODEV; | 442 | return -ENODEV; |
493 | 443 | ||
494 | if (req->IntType & INT_CARDBUS) { | 444 | if (req->IntType & INT_CARDBUS) { |
495 | ds_dbg(p_dev->socket, 0, "IntType may not be INT_CARDBUS\n"); | 445 | dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); |
496 | return -EINVAL; | 446 | return -EINVAL; |
497 | } | 447 | } |
498 | c = p_dev->function_config; | 448 | c = p_dev->function_config; |
499 | if (c->state & CONFIG_LOCKED) | 449 | if (c->state & CONFIG_LOCKED) { |
450 | dev_dbg(&s->dev, "Configuration is locked\n"); | ||
500 | return -EACCES; | 451 | return -EACCES; |
452 | } | ||
501 | 453 | ||
502 | /* Do power control. We don't allow changes in Vcc. */ | 454 | /* Do power control. We don't allow changes in Vcc. */ |
503 | s->socket.Vpp = req->Vpp; | 455 | s->socket.Vpp = req->Vpp; |
@@ -609,40 +561,44 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req) | |||
609 | struct pcmcia_socket *s = p_dev->socket; | 561 | struct pcmcia_socket *s = p_dev->socket; |
610 | config_t *c; | 562 | config_t *c; |
611 | 563 | ||
612 | if (!(s->state & SOCKET_PRESENT)) | 564 | if (!(s->state & SOCKET_PRESENT)) { |
565 | dev_dbg(&s->dev, "No card present\n"); | ||
613 | return -ENODEV; | 566 | return -ENODEV; |
567 | } | ||
614 | 568 | ||
615 | if (!req) | 569 | if (!req) |
616 | return -EINVAL; | 570 | return -EINVAL; |
617 | c = p_dev->function_config; | 571 | c = p_dev->function_config; |
618 | if (c->state & CONFIG_LOCKED) | 572 | if (c->state & CONFIG_LOCKED) { |
573 | dev_dbg(&s->dev, "Configuration is locked\n"); | ||
619 | return -EACCES; | 574 | return -EACCES; |
575 | } | ||
620 | if (c->state & CONFIG_IO_REQ) { | 576 | if (c->state & CONFIG_IO_REQ) { |
621 | ds_dbg(s, 0, "IO already configured\n"); | 577 | dev_dbg(&s->dev, "IO already configured\n"); |
622 | return -EBUSY; | 578 | return -EBUSY; |
623 | } | 579 | } |
624 | if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) { | 580 | if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) { |
625 | ds_dbg(s, 0, "bad attribute setting for IO region 1\n"); | 581 | dev_dbg(&s->dev, "bad attribute setting for IO region 1\n"); |
626 | return -EINVAL; | 582 | return -EINVAL; |
627 | } | 583 | } |
628 | if ((req->NumPorts2 > 0) && | 584 | if ((req->NumPorts2 > 0) && |
629 | (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) { | 585 | (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) { |
630 | ds_dbg(s, 0, "bad attribute setting for IO region 2\n"); | 586 | dev_dbg(&s->dev, "bad attribute setting for IO region 2\n"); |
631 | return -EINVAL; | 587 | return -EINVAL; |
632 | } | 588 | } |
633 | 589 | ||
634 | ds_dbg(s, 1, "trying to allocate resource 1\n"); | 590 | dev_dbg(&s->dev, "trying to allocate resource 1\n"); |
635 | if (alloc_io_space(s, req->Attributes1, &req->BasePort1, | 591 | if (alloc_io_space(s, req->Attributes1, &req->BasePort1, |
636 | req->NumPorts1, req->IOAddrLines)) { | 592 | req->NumPorts1, req->IOAddrLines)) { |
637 | ds_dbg(s, 0, "allocation of resource 1 failed\n"); | 593 | dev_dbg(&s->dev, "allocation of resource 1 failed\n"); |
638 | return -EBUSY; | 594 | return -EBUSY; |
639 | } | 595 | } |
640 | 596 | ||
641 | if (req->NumPorts2) { | 597 | if (req->NumPorts2) { |
642 | ds_dbg(s, 1, "trying to allocate resource 2\n"); | 598 | dev_dbg(&s->dev, "trying to allocate resource 2\n"); |
643 | if (alloc_io_space(s, req->Attributes2, &req->BasePort2, | 599 | if (alloc_io_space(s, req->Attributes2, &req->BasePort2, |
644 | req->NumPorts2, req->IOAddrLines)) { | 600 | req->NumPorts2, req->IOAddrLines)) { |
645 | ds_dbg(s, 0, "allocation of resource 2 failed\n"); | 601 | dev_dbg(&s->dev, "allocation of resource 2 failed\n"); |
646 | release_io_space(s, req->BasePort1, req->NumPorts1); | 602 | release_io_space(s, req->BasePort1, req->NumPorts1); |
647 | return -EBUSY; | 603 | return -EBUSY; |
648 | } | 604 | } |
@@ -680,13 +636,17 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
680 | int ret = -EINVAL, irq = 0; | 636 | int ret = -EINVAL, irq = 0; |
681 | int type; | 637 | int type; |
682 | 638 | ||
683 | if (!(s->state & SOCKET_PRESENT)) | 639 | if (!(s->state & SOCKET_PRESENT)) { |
640 | dev_dbg(&s->dev, "No card present\n"); | ||
684 | return -ENODEV; | 641 | return -ENODEV; |
642 | } | ||
685 | c = p_dev->function_config; | 643 | c = p_dev->function_config; |
686 | if (c->state & CONFIG_LOCKED) | 644 | if (c->state & CONFIG_LOCKED) { |
645 | dev_dbg(&s->dev, "Configuration is locked\n"); | ||
687 | return -EACCES; | 646 | return -EACCES; |
647 | } | ||
688 | if (c->state & CONFIG_IRQ_REQ) { | 648 | if (c->state & CONFIG_IRQ_REQ) { |
689 | ds_dbg(s, 0, "IRQ already configured\n"); | 649 | dev_dbg(&s->dev, "IRQ already configured\n"); |
690 | return -EBUSY; | 650 | return -EBUSY; |
691 | } | 651 | } |
692 | 652 | ||
@@ -704,7 +664,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
704 | /* if the underlying IRQ infrastructure allows for it, only allocate | 664 | /* if the underlying IRQ infrastructure allows for it, only allocate |
705 | * the IRQ, but do not enable it | 665 | * the IRQ, but do not enable it |
706 | */ | 666 | */ |
707 | if (!(req->Attributes & IRQ_HANDLE_PRESENT)) | 667 | if (!(req->Handler)) |
708 | type |= IRQ_NOAUTOEN; | 668 | type |= IRQ_NOAUTOEN; |
709 | #endif /* IRQ_NOAUTOEN */ | 669 | #endif /* IRQ_NOAUTOEN */ |
710 | 670 | ||
@@ -714,7 +674,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
714 | } else { | 674 | } else { |
715 | int try; | 675 | int try; |
716 | u32 mask = s->irq_mask; | 676 | u32 mask = s->irq_mask; |
717 | void *data = &p_dev->dev.driver; /* something unique to this device */ | 677 | void *data = p_dev; /* something unique to this device */ |
718 | 678 | ||
719 | for (try = 0; try < 64; try++) { | 679 | for (try = 0; try < 64; try++) { |
720 | irq = try % 32; | 680 | irq = try % 32; |
@@ -731,12 +691,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
731 | * registering a dummy handle works, i.e. if the IRQ isn't | 691 | * registering a dummy handle works, i.e. if the IRQ isn't |
732 | * marked as used by the kernel resource management core */ | 692 | * marked as used by the kernel resource management core */ |
733 | ret = request_irq(irq, | 693 | ret = request_irq(irq, |
734 | (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Handler : test_action, | 694 | (req->Handler) ? req->Handler : test_action, |
735 | type, | 695 | type, |
736 | p_dev->devname, | 696 | p_dev->devname, |
737 | (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Instance : data); | 697 | (req->Handler) ? p_dev->priv : data); |
738 | if (!ret) { | 698 | if (!ret) { |
739 | if (!(req->Attributes & IRQ_HANDLE_PRESENT)) | 699 | if (!req->Handler) |
740 | free_irq(irq, data); | 700 | free_irq(irq, data); |
741 | break; | 701 | break; |
742 | } | 702 | } |
@@ -745,17 +705,22 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
745 | #endif | 705 | #endif |
746 | /* only assign PCI irq if no IRQ already assigned */ | 706 | /* only assign PCI irq if no IRQ already assigned */ |
747 | if (ret && !s->irq.AssignedIRQ) { | 707 | if (ret && !s->irq.AssignedIRQ) { |
748 | if (!s->pci_irq) | 708 | if (!s->pci_irq) { |
709 | dev_printk(KERN_INFO, &s->dev, "no IRQ found\n"); | ||
749 | return ret; | 710 | return ret; |
711 | } | ||
750 | type = IRQF_SHARED; | 712 | type = IRQF_SHARED; |
751 | irq = s->pci_irq; | 713 | irq = s->pci_irq; |
752 | } | 714 | } |
753 | 715 | ||
754 | if (ret && (req->Attributes & IRQ_HANDLE_PRESENT)) { | 716 | if (ret && req->Handler) { |
755 | ret = request_irq(irq, req->Handler, type, | 717 | ret = request_irq(irq, req->Handler, type, |
756 | p_dev->devname, req->Instance); | 718 | p_dev->devname, p_dev->priv); |
757 | if (ret) | 719 | if (ret) { |
720 | dev_printk(KERN_INFO, &s->dev, | ||
721 | "request_irq() failed\n"); | ||
758 | return ret; | 722 | return ret; |
723 | } | ||
759 | } | 724 | } |
760 | 725 | ||
761 | /* Make sure the fact the request type was overridden is passed back */ | 726 | /* Make sure the fact the request type was overridden is passed back */ |
@@ -787,17 +752,19 @@ EXPORT_SYMBOL(pcmcia_request_irq); | |||
787 | * Request_window() establishes a mapping between card memory space | 752 | * Request_window() establishes a mapping between card memory space |
788 | * and system memory space. | 753 | * and system memory space. |
789 | */ | 754 | */ |
790 | int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh) | 755 | int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_handle_t *wh) |
791 | { | 756 | { |
792 | struct pcmcia_socket *s = (*p_dev)->socket; | 757 | struct pcmcia_socket *s = p_dev->socket; |
793 | window_t *win; | 758 | pccard_mem_map *win; |
794 | u_long align; | 759 | u_long align; |
795 | int w; | 760 | int w; |
796 | 761 | ||
797 | if (!(s->state & SOCKET_PRESENT)) | 762 | if (!(s->state & SOCKET_PRESENT)) { |
763 | dev_dbg(&s->dev, "No card present\n"); | ||
798 | return -ENODEV; | 764 | return -ENODEV; |
765 | } | ||
799 | if (req->Attributes & (WIN_PAGED | WIN_SHARED)) { | 766 | if (req->Attributes & (WIN_PAGED | WIN_SHARED)) { |
800 | ds_dbg(s, 0, "bad attribute setting for iomem region\n"); | 767 | dev_dbg(&s->dev, "bad attribute setting for iomem region\n"); |
801 | return -EINVAL; | 768 | return -EINVAL; |
802 | } | 769 | } |
803 | 770 | ||
@@ -808,12 +775,12 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h | |||
808 | (req->Attributes & WIN_STRICT_ALIGN)) ? | 775 | (req->Attributes & WIN_STRICT_ALIGN)) ? |
809 | req->Size : s->map_size); | 776 | req->Size : s->map_size); |
810 | if (req->Size & (s->map_size-1)) { | 777 | if (req->Size & (s->map_size-1)) { |
811 | ds_dbg(s, 0, "invalid map size\n"); | 778 | dev_dbg(&s->dev, "invalid map size\n"); |
812 | return -EINVAL; | 779 | return -EINVAL; |
813 | } | 780 | } |
814 | if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || | 781 | if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || |
815 | (req->Base & (align-1))) { | 782 | (req->Base & (align-1))) { |
816 | ds_dbg(s, 0, "invalid base address\n"); | 783 | dev_dbg(&s->dev, "invalid base address\n"); |
817 | return -EINVAL; | 784 | return -EINVAL; |
818 | } | 785 | } |
819 | if (req->Base) | 786 | if (req->Base) |
@@ -823,52 +790,48 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h | |||
823 | for (w = 0; w < MAX_WIN; w++) | 790 | for (w = 0; w < MAX_WIN; w++) |
824 | if (!(s->state & SOCKET_WIN_REQ(w))) break; | 791 | if (!(s->state & SOCKET_WIN_REQ(w))) break; |
825 | if (w == MAX_WIN) { | 792 | if (w == MAX_WIN) { |
826 | ds_dbg(s, 0, "all windows are used already\n"); | 793 | dev_dbg(&s->dev, "all windows are used already\n"); |
827 | return -EINVAL; | 794 | return -EINVAL; |
828 | } | 795 | } |
829 | 796 | ||
830 | win = &s->win[w]; | 797 | win = &s->win[w]; |
831 | win->magic = WINDOW_MAGIC; | ||
832 | win->index = w; | ||
833 | win->handle = *p_dev; | ||
834 | win->sock = s; | ||
835 | 798 | ||
836 | if (!(s->features & SS_CAP_STATIC_MAP)) { | 799 | if (!(s->features & SS_CAP_STATIC_MAP)) { |
837 | win->ctl.res = pcmcia_find_mem_region(req->Base, req->Size, align, | 800 | win->res = pcmcia_find_mem_region(req->Base, req->Size, align, |
838 | (req->Attributes & WIN_MAP_BELOW_1MB), s); | 801 | (req->Attributes & WIN_MAP_BELOW_1MB), s); |
839 | if (!win->ctl.res) { | 802 | if (!win->res) { |
840 | ds_dbg(s, 0, "allocating mem region failed\n"); | 803 | dev_dbg(&s->dev, "allocating mem region failed\n"); |
841 | return -EINVAL; | 804 | return -EINVAL; |
842 | } | 805 | } |
843 | } | 806 | } |
844 | (*p_dev)->_win |= CLIENT_WIN_REQ(w); | 807 | p_dev->_win |= CLIENT_WIN_REQ(w); |
845 | 808 | ||
846 | /* Configure the socket controller */ | 809 | /* Configure the socket controller */ |
847 | win->ctl.map = w+1; | 810 | win->map = w+1; |
848 | win->ctl.flags = 0; | 811 | win->flags = 0; |
849 | win->ctl.speed = req->AccessSpeed; | 812 | win->speed = req->AccessSpeed; |
850 | if (req->Attributes & WIN_MEMORY_TYPE) | 813 | if (req->Attributes & WIN_MEMORY_TYPE) |
851 | win->ctl.flags |= MAP_ATTRIB; | 814 | win->flags |= MAP_ATTRIB; |
852 | if (req->Attributes & WIN_ENABLE) | 815 | if (req->Attributes & WIN_ENABLE) |
853 | win->ctl.flags |= MAP_ACTIVE; | 816 | win->flags |= MAP_ACTIVE; |
854 | if (req->Attributes & WIN_DATA_WIDTH_16) | 817 | if (req->Attributes & WIN_DATA_WIDTH_16) |
855 | win->ctl.flags |= MAP_16BIT; | 818 | win->flags |= MAP_16BIT; |
856 | if (req->Attributes & WIN_USE_WAIT) | 819 | if (req->Attributes & WIN_USE_WAIT) |
857 | win->ctl.flags |= MAP_USE_WAIT; | 820 | win->flags |= MAP_USE_WAIT; |
858 | win->ctl.card_start = 0; | 821 | win->card_start = 0; |
859 | if (s->ops->set_mem_map(s, &win->ctl) != 0) { | 822 | if (s->ops->set_mem_map(s, win) != 0) { |
860 | ds_dbg(s, 0, "failed to set memory mapping\n"); | 823 | dev_dbg(&s->dev, "failed to set memory mapping\n"); |
861 | return -EIO; | 824 | return -EIO; |
862 | } | 825 | } |
863 | s->state |= SOCKET_WIN_REQ(w); | 826 | s->state |= SOCKET_WIN_REQ(w); |
864 | 827 | ||
865 | /* Return window handle */ | 828 | /* Return window handle */ |
866 | if (s->features & SS_CAP_STATIC_MAP) { | 829 | if (s->features & SS_CAP_STATIC_MAP) { |
867 | req->Base = win->ctl.static_start; | 830 | req->Base = win->static_start; |
868 | } else { | 831 | } else { |
869 | req->Base = win->ctl.res->start; | 832 | req->Base = win->res->start; |
870 | } | 833 | } |
871 | *wh = win; | 834 | *wh = w + 1; |
872 | 835 | ||
873 | return 0; | 836 | return 0; |
874 | } /* pcmcia_request_window */ | 837 | } /* pcmcia_request_window */ |
@@ -879,19 +842,46 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev) { | |||
879 | pcmcia_release_io(p_dev, &p_dev->io); | 842 | pcmcia_release_io(p_dev, &p_dev->io); |
880 | pcmcia_release_irq(p_dev, &p_dev->irq); | 843 | pcmcia_release_irq(p_dev, &p_dev->irq); |
881 | if (p_dev->win) | 844 | if (p_dev->win) |
882 | pcmcia_release_window(p_dev->win); | 845 | pcmcia_release_window(p_dev, p_dev->win); |
883 | } | 846 | } |
884 | EXPORT_SYMBOL(pcmcia_disable_device); | 847 | EXPORT_SYMBOL(pcmcia_disable_device); |
885 | 848 | ||
886 | 849 | ||
887 | struct pcmcia_cfg_mem { | 850 | struct pcmcia_cfg_mem { |
888 | tuple_t tuple; | 851 | struct pcmcia_device *p_dev; |
852 | void *priv_data; | ||
853 | int (*conf_check) (struct pcmcia_device *p_dev, | ||
854 | cistpl_cftable_entry_t *cfg, | ||
855 | cistpl_cftable_entry_t *dflt, | ||
856 | unsigned int vcc, | ||
857 | void *priv_data); | ||
889 | cisparse_t parse; | 858 | cisparse_t parse; |
890 | u8 buf[256]; | ||
891 | cistpl_cftable_entry_t dflt; | 859 | cistpl_cftable_entry_t dflt; |
892 | }; | 860 | }; |
893 | 861 | ||
894 | /** | 862 | /** |
863 | * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config() | ||
864 | * | ||
865 | * pcmcia_do_loop_config() is the internal callback for the call from | ||
866 | * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred | ||
867 | * by a struct pcmcia_cfg_mem. | ||
868 | */ | ||
869 | static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv) | ||
870 | { | ||
871 | cistpl_cftable_entry_t *cfg = &parse->cftable_entry; | ||
872 | struct pcmcia_cfg_mem *cfg_mem = priv; | ||
873 | |||
874 | /* default values */ | ||
875 | cfg_mem->p_dev->conf.ConfigIndex = cfg->index; | ||
876 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
877 | cfg_mem->dflt = *cfg; | ||
878 | |||
879 | return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt, | ||
880 | cfg_mem->p_dev->socket->socket.Vcc, | ||
881 | cfg_mem->priv_data); | ||
882 | } | ||
883 | |||
884 | /** | ||
895 | * pcmcia_loop_config() - loop over configuration options | 885 | * pcmcia_loop_config() - loop over configuration options |
896 | * @p_dev: the struct pcmcia_device which we need to loop for. | 886 | * @p_dev: the struct pcmcia_device which we need to loop for. |
897 | * @conf_check: function to call for each configuration option. | 887 | * @conf_check: function to call for each configuration option. |
@@ -913,48 +903,174 @@ int pcmcia_loop_config(struct pcmcia_device *p_dev, | |||
913 | void *priv_data) | 903 | void *priv_data) |
914 | { | 904 | { |
915 | struct pcmcia_cfg_mem *cfg_mem; | 905 | struct pcmcia_cfg_mem *cfg_mem; |
916 | |||
917 | tuple_t *tuple; | ||
918 | int ret; | 906 | int ret; |
919 | unsigned int vcc; | ||
920 | 907 | ||
921 | cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL); | 908 | cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL); |
922 | if (cfg_mem == NULL) | 909 | if (cfg_mem == NULL) |
923 | return -ENOMEM; | 910 | return -ENOMEM; |
924 | 911 | ||
925 | /* get the current Vcc setting */ | 912 | cfg_mem->p_dev = p_dev; |
926 | vcc = p_dev->socket->socket.Vcc; | 913 | cfg_mem->conf_check = conf_check; |
914 | cfg_mem->priv_data = priv_data; | ||
927 | 915 | ||
928 | tuple = &cfg_mem->tuple; | 916 | ret = pccard_loop_tuple(p_dev->socket, p_dev->func, |
929 | tuple->TupleData = cfg_mem->buf; | 917 | CISTPL_CFTABLE_ENTRY, &cfg_mem->parse, |
930 | tuple->TupleDataMax = 255; | 918 | cfg_mem, pcmcia_do_loop_config); |
931 | tuple->TupleOffset = 0; | ||
932 | tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
933 | tuple->Attributes = 0; | ||
934 | 919 | ||
935 | ret = pcmcia_get_first_tuple(p_dev, tuple); | 920 | kfree(cfg_mem); |
936 | while (!ret) { | 921 | return ret; |
937 | cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry; | 922 | } |
923 | EXPORT_SYMBOL(pcmcia_loop_config); | ||
924 | |||
925 | |||
926 | struct pcmcia_loop_mem { | ||
927 | struct pcmcia_device *p_dev; | ||
928 | void *priv_data; | ||
929 | int (*loop_tuple) (struct pcmcia_device *p_dev, | ||
930 | tuple_t *tuple, | ||
931 | void *priv_data); | ||
932 | }; | ||
933 | |||
934 | /** | ||
935 | * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config() | ||
936 | * | ||
937 | * pcmcia_do_loop_tuple() is the internal callback for the call from | ||
938 | * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred | ||
939 | * by a struct pcmcia_cfg_mem. | ||
940 | */ | ||
941 | static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv) | ||
942 | { | ||
943 | struct pcmcia_loop_mem *loop = priv; | ||
944 | |||
945 | return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data); | ||
946 | }; | ||
947 | |||
948 | /** | ||
949 | * pcmcia_loop_tuple() - loop over tuples in the CIS | ||
950 | * @p_dev: the struct pcmcia_device which we need to loop for. | ||
951 | * @code: which CIS code shall we look for? | ||
952 | * @priv_data: private data to be passed to the loop_tuple function. | ||
953 | * @loop_tuple: function to call for each CIS entry of type @function. IT | ||
954 | * gets passed the raw tuple and @priv_data. | ||
955 | * | ||
956 | * pcmcia_loop_tuple() loops over all CIS entries of type @function, and | ||
957 | * calls the @loop_tuple function for each entry. If the call to @loop_tuple | ||
958 | * returns 0, the loop exits. Returns 0 on success or errorcode otherwise. | ||
959 | */ | ||
960 | int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, | ||
961 | int (*loop_tuple) (struct pcmcia_device *p_dev, | ||
962 | tuple_t *tuple, | ||
963 | void *priv_data), | ||
964 | void *priv_data) | ||
965 | { | ||
966 | struct pcmcia_loop_mem loop = { | ||
967 | .p_dev = p_dev, | ||
968 | .loop_tuple = loop_tuple, | ||
969 | .priv_data = priv_data}; | ||
938 | 970 | ||
939 | if (pcmcia_get_tuple_data(p_dev, tuple)) | 971 | return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL, |
940 | goto next_entry; | 972 | &loop, pcmcia_do_loop_tuple); |
973 | }; | ||
974 | EXPORT_SYMBOL(pcmcia_loop_tuple); | ||
941 | 975 | ||
942 | if (pcmcia_parse_tuple(tuple, &cfg_mem->parse)) | ||
943 | goto next_entry; | ||
944 | 976 | ||
945 | /* default values */ | 977 | struct pcmcia_loop_get { |
946 | p_dev->conf.ConfigIndex = cfg->index; | 978 | size_t len; |
947 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | 979 | cisdata_t **buf; |
948 | cfg_mem->dflt = *cfg; | 980 | }; |
949 | 981 | ||
950 | ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data); | 982 | /** |
951 | if (!ret) | 983 | * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple() |
952 | break; | 984 | * |
985 | * pcmcia_do_get_tuple() is the internal callback for the call from | ||
986 | * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in | ||
987 | * the first tuple, return 0 unconditionally. Create a memory buffer large | ||
988 | * enough to hold the content of the tuple, and fill it with the tuple data. | ||
989 | * The caller is responsible to free the buffer. | ||
990 | */ | ||
991 | static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple, | ||
992 | void *priv) | ||
993 | { | ||
994 | struct pcmcia_loop_get *get = priv; | ||
995 | |||
996 | *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL); | ||
997 | if (*get->buf) { | ||
998 | get->len = tuple->TupleDataLen; | ||
999 | memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen); | ||
1000 | } else | ||
1001 | dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n"); | ||
1002 | return 0; | ||
1003 | }; | ||
953 | 1004 | ||
954 | next_entry: | 1005 | /** |
955 | ret = pcmcia_get_next_tuple(p_dev, tuple); | 1006 | * pcmcia_get_tuple() - get first tuple from CIS |
1007 | * @p_dev: the struct pcmcia_device which we need to loop for. | ||
1008 | * @code: which CIS code shall we look for? | ||
1009 | * @buf: pointer to store the buffer to. | ||
1010 | * | ||
1011 | * pcmcia_get_tuple() gets the content of the first CIS entry of type @code. | ||
1012 | * It returns the buffer length (or zero). The caller is responsible to free | ||
1013 | * the buffer passed in @buf. | ||
1014 | */ | ||
1015 | size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code, | ||
1016 | unsigned char **buf) | ||
1017 | { | ||
1018 | struct pcmcia_loop_get get = { | ||
1019 | .len = 0, | ||
1020 | .buf = buf, | ||
1021 | }; | ||
1022 | |||
1023 | *get.buf = NULL; | ||
1024 | pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get); | ||
1025 | |||
1026 | return get.len; | ||
1027 | }; | ||
1028 | EXPORT_SYMBOL(pcmcia_get_tuple); | ||
1029 | |||
1030 | |||
1031 | /** | ||
1032 | * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis() | ||
1033 | * | ||
1034 | * pcmcia_do_get_mac() is the internal callback for the call from | ||
1035 | * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the | ||
1036 | * tuple contains a proper LAN_NODE_ID of length 6, and copy the data | ||
1037 | * to struct net_device->dev_addr[i]. | ||
1038 | */ | ||
1039 | static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple, | ||
1040 | void *priv) | ||
1041 | { | ||
1042 | struct net_device *dev = priv; | ||
1043 | int i; | ||
1044 | |||
1045 | if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID) | ||
1046 | return -EINVAL; | ||
1047 | if (tuple->TupleDataLen < ETH_ALEN + 2) { | ||
1048 | dev_warn(&p_dev->dev, "Invalid CIS tuple length for " | ||
1049 | "LAN_NODE_ID\n"); | ||
1050 | return -EINVAL; | ||
956 | } | 1051 | } |
957 | 1052 | ||
958 | return ret; | 1053 | if (tuple->TupleData[1] != ETH_ALEN) { |
959 | } | 1054 | dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n"); |
960 | EXPORT_SYMBOL(pcmcia_loop_config); | 1055 | return -EINVAL; |
1056 | } | ||
1057 | for (i = 0; i < 6; i++) | ||
1058 | dev->dev_addr[i] = tuple->TupleData[i+2]; | ||
1059 | return 0; | ||
1060 | }; | ||
1061 | |||
1062 | /** | ||
1063 | * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE | ||
1064 | * @p_dev: the struct pcmcia_device for which we want the address. | ||
1065 | * @dev: a properly prepared struct net_device to store the info to. | ||
1066 | * | ||
1067 | * pcmcia_get_mac_from_cis() reads out the hardware MAC address from | ||
1068 | * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which | ||
1069 | * must be set up properly by the driver (see examples!). | ||
1070 | */ | ||
1071 | int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev) | ||
1072 | { | ||
1073 | return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev); | ||
1074 | }; | ||
1075 | EXPORT_SYMBOL(pcmcia_get_mac_from_cis); | ||
1076 | |||
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 70a33468bcd0..e1741cd875aa 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c | |||
@@ -213,7 +213,8 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev) | |||
213 | 213 | ||
214 | if (csc & I365_CSC_DETECT) { | 214 | if (csc & I365_CSC_DETECT) { |
215 | events |= SS_DETECT; | 215 | events |= SS_DETECT; |
216 | dprintk("Card detected in socket %i!\n", i); | 216 | dev_vdbg(&socket[i].socket.dev, |
217 | "Card detected in socket %i!\n", i); | ||
217 | } | 218 | } |
218 | 219 | ||
219 | if (indirect_read(&socket[i], I365_INTCTL) | 220 | if (indirect_read(&socket[i], I365_INTCTL) |
@@ -331,11 +332,11 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
331 | reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ | 332 | reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ |
332 | 333 | ||
333 | if (state->flags & SS_PWR_AUTO) { | 334 | if (state->flags & SS_PWR_AUTO) { |
334 | dprintk("Auto power\n"); | 335 | dev_dbg(&sock->dev, "Auto power\n"); |
335 | reg |= I365_PWR_AUTO; /* automatic power mngmnt */ | 336 | reg |= I365_PWR_AUTO; /* automatic power mngmnt */ |
336 | } | 337 | } |
337 | if (state->flags & SS_OUTPUT_ENA) { | 338 | if (state->flags & SS_OUTPUT_ENA) { |
338 | dprintk("Power Enabled\n"); | 339 | dev_dbg(&sock->dev, "Power Enabled\n"); |
339 | reg |= I365_PWR_OUT; /* enable power */ | 340 | reg |= I365_PWR_OUT; /* enable power */ |
340 | } | 341 | } |
341 | 342 | ||
@@ -343,40 +344,44 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
343 | case 0: | 344 | case 0: |
344 | break; | 345 | break; |
345 | case 33: | 346 | case 33: |
346 | dprintk("setting voltage to Vcc to 3.3V on socket %i\n", | 347 | dev_dbg(&sock->dev, |
348 | "setting voltage to Vcc to 3.3V on socket %i\n", | ||
347 | socket->number); | 349 | socket->number); |
348 | reg |= I365_VCC_5V; | 350 | reg |= I365_VCC_5V; |
349 | indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); | 351 | indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); |
350 | break; | 352 | break; |
351 | case 50: | 353 | case 50: |
352 | dprintk("setting voltage to Vcc to 5V on socket %i\n", | 354 | dev_dbg(&sock->dev, |
355 | "setting voltage to Vcc to 5V on socket %i\n", | ||
353 | socket->number); | 356 | socket->number); |
354 | reg |= I365_VCC_5V; | 357 | reg |= I365_VCC_5V; |
355 | indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); | 358 | indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); |
356 | break; | 359 | break; |
357 | default: | 360 | default: |
358 | dprintk("pd6729: pd6729_set_socket called with " | 361 | dev_dbg(&sock->dev, |
359 | "invalid VCC power value: %i\n", | 362 | "pd6729_set_socket called with invalid VCC power " |
360 | state->Vcc); | 363 | "value: %i\n", state->Vcc); |
361 | return -EINVAL; | 364 | return -EINVAL; |
362 | } | 365 | } |
363 | 366 | ||
364 | switch (state->Vpp) { | 367 | switch (state->Vpp) { |
365 | case 0: | 368 | case 0: |
366 | dprintk("not setting Vpp on socket %i\n", socket->number); | 369 | dev_dbg(&sock->dev, "not setting Vpp on socket %i\n", |
370 | socket->number); | ||
367 | break; | 371 | break; |
368 | case 33: | 372 | case 33: |
369 | case 50: | 373 | case 50: |
370 | dprintk("setting Vpp to Vcc for socket %i\n", socket->number); | 374 | dev_dbg(&sock->dev, "setting Vpp to Vcc for socket %i\n", |
375 | socket->number); | ||
371 | reg |= I365_VPP1_5V; | 376 | reg |= I365_VPP1_5V; |
372 | break; | 377 | break; |
373 | case 120: | 378 | case 120: |
374 | dprintk("setting Vpp to 12.0\n"); | 379 | dev_dbg(&sock->dev, "setting Vpp to 12.0\n"); |
375 | reg |= I365_VPP1_12V; | 380 | reg |= I365_VPP1_12V; |
376 | break; | 381 | break; |
377 | default: | 382 | default: |
378 | dprintk("pd6729: pd6729_set_socket called with invalid VPP power value: %i\n", | 383 | dev_dbg(&sock->dev, "pd6729: pd6729_set_socket called with " |
379 | state->Vpp); | 384 | "invalid VPP power value: %i\n", state->Vpp); |
380 | return -EINVAL; | 385 | return -EINVAL; |
381 | } | 386 | } |
382 | 387 | ||
@@ -438,7 +443,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock, | |||
438 | 443 | ||
439 | /* Check error conditions */ | 444 | /* Check error conditions */ |
440 | if (map > 1) { | 445 | if (map > 1) { |
441 | dprintk("pd6729_set_io_map with invalid map"); | 446 | dev_dbg(&sock->dev, "pd6729_set_io_map with invalid map\n"); |
442 | return -EINVAL; | 447 | return -EINVAL; |
443 | } | 448 | } |
444 | 449 | ||
@@ -446,7 +451,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock, | |||
446 | if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) | 451 | if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) |
447 | indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); | 452 | indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); |
448 | 453 | ||
449 | /* dprintk("set_io_map: Setting range to %x - %x\n", | 454 | /* dev_dbg(&sock->dev, "set_io_map: Setting range to %x - %x\n", |
450 | io->start, io->stop);*/ | 455 | io->start, io->stop);*/ |
451 | 456 | ||
452 | /* write the new values */ | 457 | /* write the new values */ |
@@ -478,12 +483,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock, | |||
478 | 483 | ||
479 | map = mem->map; | 484 | map = mem->map; |
480 | if (map > 4) { | 485 | if (map > 4) { |
481 | printk("pd6729_set_mem_map: invalid map"); | 486 | dev_warn(&sock->dev, "invalid map requested\n"); |
482 | return -EINVAL; | 487 | return -EINVAL; |
483 | } | 488 | } |
484 | 489 | ||
485 | if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) { | 490 | if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) { |
486 | printk("pd6729_set_mem_map: invalid address / speed"); | 491 | dev_warn(&sock->dev, "invalid invalid address / speed\n"); |
487 | return -EINVAL; | 492 | return -EINVAL; |
488 | } | 493 | } |
489 | 494 | ||
@@ -529,12 +534,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock, | |||
529 | if (mem->flags & MAP_WRPROT) | 534 | if (mem->flags & MAP_WRPROT) |
530 | i |= I365_MEM_WRPROT; | 535 | i |= I365_MEM_WRPROT; |
531 | if (mem->flags & MAP_ATTRIB) { | 536 | if (mem->flags & MAP_ATTRIB) { |
532 | /* dprintk("requesting attribute memory for socket %i\n", | 537 | /* dev_dbg(&sock->dev, "requesting attribute memory for " |
533 | socket->number);*/ | 538 | "socket %i\n", socket->number);*/ |
534 | i |= I365_MEM_REG; | 539 | i |= I365_MEM_REG; |
535 | } else { | 540 | } else { |
536 | /* dprintk("requesting normal memory for socket %i\n", | 541 | /* dev_dbg(&sock->dev, "requesting normal memory for " |
537 | socket->number);*/ | 542 | "socket %i\n", socket->number);*/ |
538 | } | 543 | } |
539 | indirect_write16(socket, base + I365_W_OFF, i); | 544 | indirect_write16(socket, base + I365_W_OFF, i); |
540 | 545 | ||
@@ -577,7 +582,7 @@ static struct pccard_operations pd6729_operations = { | |||
577 | 582 | ||
578 | static irqreturn_t pd6729_test(int irq, void *dev) | 583 | static irqreturn_t pd6729_test(int irq, void *dev) |
579 | { | 584 | { |
580 | dprintk("-> hit on irq %d\n", irq); | 585 | pr_devel("-> hit on irq %d\n", irq); |
581 | return IRQ_HANDLED; | 586 | return IRQ_HANDLED; |
582 | } | 587 | } |
583 | 588 | ||
@@ -642,13 +647,13 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
642 | goto err_out_free_mem; | 647 | goto err_out_free_mem; |
643 | 648 | ||
644 | if (!pci_resource_start(dev, 0)) { | 649 | if (!pci_resource_start(dev, 0)) { |
645 | printk(KERN_INFO "pd6729: refusing to load the driver " | 650 | dev_warn(&dev->dev, "refusing to load the driver as the " |
646 | "as the io_base is 0.\n"); | 651 | "io_base is NULL.\n"); |
647 | goto err_out_free_mem; | 652 | goto err_out_free_mem; |
648 | } | 653 | } |
649 | 654 | ||
650 | printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge " | 655 | dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " |
651 | "at 0x%llx on irq %d\n", | 656 | "on irq %d\n", |
652 | (unsigned long long)pci_resource_start(dev, 0), dev->irq); | 657 | (unsigned long long)pci_resource_start(dev, 0), dev->irq); |
653 | /* | 658 | /* |
654 | * Since we have no memory BARs some firmware may not | 659 | * Since we have no memory BARs some firmware may not |
@@ -656,14 +661,14 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
656 | */ | 661 | */ |
657 | pci_read_config_byte(dev, PCI_COMMAND, &configbyte); | 662 | pci_read_config_byte(dev, PCI_COMMAND, &configbyte); |
658 | if (!(configbyte & PCI_COMMAND_MEMORY)) { | 663 | if (!(configbyte & PCI_COMMAND_MEMORY)) { |
659 | printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); | 664 | dev_dbg(&dev->dev, "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); |
660 | configbyte |= PCI_COMMAND_MEMORY; | 665 | configbyte |= PCI_COMMAND_MEMORY; |
661 | pci_write_config_byte(dev, PCI_COMMAND, configbyte); | 666 | pci_write_config_byte(dev, PCI_COMMAND, configbyte); |
662 | } | 667 | } |
663 | 668 | ||
664 | ret = pci_request_regions(dev, "pd6729"); | 669 | ret = pci_request_regions(dev, "pd6729"); |
665 | if (ret) { | 670 | if (ret) { |
666 | printk(KERN_INFO "pd6729: pci request region failed.\n"); | 671 | dev_warn(&dev->dev, "pci request region failed.\n"); |
667 | goto err_out_disable; | 672 | goto err_out_disable; |
668 | } | 673 | } |
669 | 674 | ||
@@ -672,7 +677,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
672 | 677 | ||
673 | mask = pd6729_isa_scan(); | 678 | mask = pd6729_isa_scan(); |
674 | if (irq_mode == 0 && mask == 0) { | 679 | if (irq_mode == 0 && mask == 0) { |
675 | printk(KERN_INFO "pd6729: no ISA interrupt is available.\n"); | 680 | dev_warn(&dev->dev, "no ISA interrupt is available.\n"); |
676 | goto err_out_free_res; | 681 | goto err_out_free_res; |
677 | } | 682 | } |
678 | 683 | ||
@@ -697,8 +702,8 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
697 | /* Register the interrupt handler */ | 702 | /* Register the interrupt handler */ |
698 | if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, | 703 | if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, |
699 | "pd6729", socket))) { | 704 | "pd6729", socket))) { |
700 | printk(KERN_ERR "pd6729: Failed to register irq %d, " | 705 | dev_err(&dev->dev, "Failed to register irq %d\n", |
701 | "aborting\n", dev->irq); | 706 | dev->irq); |
702 | goto err_out_free_res; | 707 | goto err_out_free_res; |
703 | } | 708 | } |
704 | } else { | 709 | } else { |
@@ -713,8 +718,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, | |||
713 | for (i = 0; i < MAX_SOCKETS; i++) { | 718 | for (i = 0; i < MAX_SOCKETS; i++) { |
714 | ret = pcmcia_register_socket(&socket[i].socket); | 719 | ret = pcmcia_register_socket(&socket[i].socket); |
715 | if (ret) { | 720 | if (ret) { |
716 | printk(KERN_INFO "pd6729: pcmcia_register_socket " | 721 | dev_warn(&dev->dev, "pcmcia_register_socket failed.\n"); |
717 | "failed.\n"); | ||
718 | for (j = 0; j < i ; j++) | 722 | for (j = 0; j < i ; j++) |
719 | pcmcia_unregister_socket(&socket[j].socket); | 723 | pcmcia_unregister_socket(&socket[j].socket); |
720 | goto err_out_free_res2; | 724 | goto err_out_free_res2; |
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h index f392e458cdfd..41418d394c55 100644 --- a/drivers/pcmcia/pd6729.h +++ b/drivers/pcmcia/pd6729.h | |||
@@ -1,13 +1,6 @@ | |||
1 | #ifndef _INCLUDE_GUARD_PD6729_H_ | 1 | #ifndef _INCLUDE_GUARD_PD6729_H_ |
2 | #define _INCLUDE_GUARD_PD6729_H_ | 2 | #define _INCLUDE_GUARD_PD6729_H_ |
3 | 3 | ||
4 | /* Debuging defines */ | ||
5 | #ifdef NOTRACE | ||
6 | #define dprintk(fmt, args...) printk(fmt , ## args) | ||
7 | #else | ||
8 | #define dprintk(fmt, args...) do {} while (0) | ||
9 | #endif | ||
10 | |||
11 | /* Flags for I365_GENCTL */ | 4 | /* Flags for I365_GENCTL */ |
12 | #define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ | 5 | #define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ |
13 | #define I365_DF_VS2 0x80 | 6 | #define I365_DF_VS2 0x80 |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 0e35acb1366b..84dde7768ad5 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -228,9 +228,43 @@ static const char *skt_names[] = { | |||
228 | #define SKT_DEV_INFO_SIZE(n) \ | 228 | #define SKT_DEV_INFO_SIZE(n) \ |
229 | (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) | 229 | (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) |
230 | 230 | ||
231 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt) | ||
232 | { | ||
233 | skt->res_skt.start = _PCMCIA(skt->nr); | ||
234 | skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; | ||
235 | skt->res_skt.name = skt_names[skt->nr]; | ||
236 | skt->res_skt.flags = IORESOURCE_MEM; | ||
237 | |||
238 | skt->res_io.start = _PCMCIAIO(skt->nr); | ||
239 | skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; | ||
240 | skt->res_io.name = "io"; | ||
241 | skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
242 | |||
243 | skt->res_mem.start = _PCMCIAMem(skt->nr); | ||
244 | skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; | ||
245 | skt->res_mem.name = "memory"; | ||
246 | skt->res_mem.flags = IORESOURCE_MEM; | ||
247 | |||
248 | skt->res_attr.start = _PCMCIAAttr(skt->nr); | ||
249 | skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; | ||
250 | skt->res_attr.name = "attribute"; | ||
251 | skt->res_attr.flags = IORESOURCE_MEM; | ||
252 | |||
253 | return soc_pcmcia_add_one(skt); | ||
254 | } | ||
255 | |||
256 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops) | ||
257 | { | ||
258 | /* Provide our PXA2xx specific timing routines. */ | ||
259 | ops->set_timing = pxa2xx_pcmcia_set_timing; | ||
260 | #ifdef CONFIG_CPU_FREQ | ||
261 | ops->frequency_change = pxa2xx_pcmcia_frequency_change; | ||
262 | #endif | ||
263 | } | ||
264 | |||
231 | int __pxa2xx_drv_pcmcia_probe(struct device *dev) | 265 | int __pxa2xx_drv_pcmcia_probe(struct device *dev) |
232 | { | 266 | { |
233 | int i, ret; | 267 | int i, ret = 0; |
234 | struct pcmcia_low_level *ops; | 268 | struct pcmcia_low_level *ops; |
235 | struct skt_dev_info *sinfo; | 269 | struct skt_dev_info *sinfo; |
236 | struct soc_pcmcia_socket *skt; | 270 | struct soc_pcmcia_socket *skt; |
@@ -240,6 +274,8 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev) | |||
240 | 274 | ||
241 | ops = (struct pcmcia_low_level *)dev->platform_data; | 275 | ops = (struct pcmcia_low_level *)dev->platform_data; |
242 | 276 | ||
277 | pxa2xx_drv_pcmcia_ops(ops); | ||
278 | |||
243 | sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); | 279 | sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); |
244 | if (!sinfo) | 280 | if (!sinfo) |
245 | return -ENOMEM; | 281 | return -ENOMEM; |
@@ -250,40 +286,25 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev) | |||
250 | for (i = 0; i < ops->nr; i++) { | 286 | for (i = 0; i < ops->nr; i++) { |
251 | skt = &sinfo->skt[i]; | 287 | skt = &sinfo->skt[i]; |
252 | 288 | ||
253 | skt->nr = ops->first + i; | 289 | skt->nr = ops->first + i; |
254 | skt->irq = NO_IRQ; | 290 | skt->ops = ops; |
255 | 291 | skt->socket.owner = ops->owner; | |
256 | skt->res_skt.start = _PCMCIA(skt->nr); | 292 | skt->socket.dev.parent = dev; |
257 | skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; | 293 | skt->socket.pci_irq = NO_IRQ; |
258 | skt->res_skt.name = skt_names[skt->nr]; | ||
259 | skt->res_skt.flags = IORESOURCE_MEM; | ||
260 | |||
261 | skt->res_io.start = _PCMCIAIO(skt->nr); | ||
262 | skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; | ||
263 | skt->res_io.name = "io"; | ||
264 | skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
265 | 294 | ||
266 | skt->res_mem.start = _PCMCIAMem(skt->nr); | 295 | ret = pxa2xx_drv_pcmcia_add_one(skt); |
267 | skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; | 296 | if (ret) |
268 | skt->res_mem.name = "memory"; | 297 | break; |
269 | skt->res_mem.flags = IORESOURCE_MEM; | ||
270 | |||
271 | skt->res_attr.start = _PCMCIAAttr(skt->nr); | ||
272 | skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; | ||
273 | skt->res_attr.name = "attribute"; | ||
274 | skt->res_attr.flags = IORESOURCE_MEM; | ||
275 | } | 298 | } |
276 | 299 | ||
277 | /* Provide our PXA2xx specific timing routines. */ | 300 | if (ret) { |
278 | ops->set_timing = pxa2xx_pcmcia_set_timing; | 301 | while (--i >= 0) |
279 | #ifdef CONFIG_CPU_FREQ | 302 | soc_pcmcia_remove_one(&sinfo->skt[i]); |
280 | ops->frequency_change = pxa2xx_pcmcia_frequency_change; | 303 | kfree(sinfo); |
281 | #endif | 304 | } else { |
282 | |||
283 | ret = soc_common_drv_pcmcia_probe(dev, ops, sinfo); | ||
284 | |||
285 | if (!ret) | ||
286 | pxa2xx_configure_sockets(dev); | 305 | pxa2xx_configure_sockets(dev); |
306 | dev_set_drvdata(dev, sinfo); | ||
307 | } | ||
287 | 308 | ||
288 | return ret; | 309 | return ret; |
289 | } | 310 | } |
@@ -297,7 +318,16 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev) | |||
297 | 318 | ||
298 | static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) | 319 | static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) |
299 | { | 320 | { |
300 | return soc_common_drv_pcmcia_remove(&dev->dev); | 321 | struct skt_dev_info *sinfo = platform_get_drvdata(dev); |
322 | int i; | ||
323 | |||
324 | platform_set_drvdata(dev, NULL); | ||
325 | |||
326 | for (i = 0; i < sinfo->nskt; i++) | ||
327 | soc_pcmcia_remove_one(&sinfo->skt[i]); | ||
328 | |||
329 | kfree(sinfo); | ||
330 | return 0; | ||
301 | } | 331 | } |
302 | 332 | ||
303 | static int pxa2xx_drv_pcmcia_suspend(struct device *dev) | 333 | static int pxa2xx_drv_pcmcia_suspend(struct device *dev) |
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index 235d681652c3..cb5efaec886f 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h | |||
@@ -1,3 +1,6 @@ | |||
1 | /* temporary measure */ | 1 | /* temporary measure */ |
2 | extern int __pxa2xx_drv_pcmcia_probe(struct device *); | 2 | extern int __pxa2xx_drv_pcmcia_probe(struct device *); |
3 | 3 | ||
4 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); | ||
5 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); | ||
6 | |||
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c index 5143a760153b..05913d0bbdbe 100644 --- a/drivers/pcmcia/pxa2xx_cm_x255.c +++ b/drivers/pcmcia/pxa2xx_cm_x255.c | |||
@@ -44,7 +44,7 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
44 | return ret; | 44 | return ret; |
45 | gpio_direction_output(GPIO_PCMCIA_RESET, 0); | 45 | gpio_direction_output(GPIO_PCMCIA_RESET, 0); |
46 | 46 | ||
47 | skt->irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT; | 47 | skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT; |
48 | ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 48 | ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
49 | if (!ret) | 49 | if (!ret) |
50 | gpio_free(GPIO_PCMCIA_RESET); | 50 | gpio_free(GPIO_PCMCIA_RESET); |
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c index a7b943d01e34..5662646b84da 100644 --- a/drivers/pcmcia/pxa2xx_cm_x270.c +++ b/drivers/pcmcia/pxa2xx_cm_x270.c | |||
@@ -38,7 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
38 | return ret; | 38 | return ret; |
39 | gpio_direction_output(GPIO_PCMCIA_RESET, 0); | 39 | gpio_direction_output(GPIO_PCMCIA_RESET, 0); |
40 | 40 | ||
41 | skt->irq = PCMCIA_S0_RDYINT; | 41 | skt->socket.pci_irq = PCMCIA_S0_RDYINT; |
42 | ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 42 | ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
43 | if (!ret) | 43 | if (!ret) |
44 | gpio_free(GPIO_PCMCIA_RESET); | 44 | gpio_free(GPIO_PCMCIA_RESET); |
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c index d09c0dc4a31a..8bfbd4dca131 100644 --- a/drivers/pcmcia/pxa2xx_e740.c +++ b/drivers/pcmcia/pxa2xx_e740.c | |||
@@ -38,7 +38,7 @@ static struct pcmcia_irqs cd_irqs[] = { | |||
38 | 38 | ||
39 | static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 39 | static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
40 | { | 40 | { |
41 | skt->irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) : | 41 | skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) : |
42 | IRQ_GPIO(GPIO_E740_PCMCIA_RDY1); | 42 | IRQ_GPIO(GPIO_E740_PCMCIA_RDY1); |
43 | 43 | ||
44 | return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1); | 44 | return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1); |
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c index 6cbb1b1f7cfd..b9f8c8fb42bd 100644 --- a/drivers/pcmcia/pxa2xx_lubbock.c +++ b/drivers/pcmcia/pxa2xx_lubbock.c | |||
@@ -32,6 +32,7 @@ static int | |||
32 | lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | 32 | lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, |
33 | const socket_state_t *state) | 33 | const socket_state_t *state) |
34 | { | 34 | { |
35 | struct sa1111_pcmcia_socket *s = to_skt(skt); | ||
35 | unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; | 36 | unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; |
36 | int ret = 0; | 37 | int ret = 0; |
37 | 38 | ||
@@ -149,7 +150,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | |||
149 | 150 | ||
150 | if (ret == 0) { | 151 | if (ret == 0) { |
151 | lubbock_set_misc_wr(misc_mask, misc_set); | 152 | lubbock_set_misc_wr(misc_mask, misc_set); |
152 | sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); | 153 | sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); |
153 | } | 154 | } |
154 | 155 | ||
155 | #if 1 | 156 | #if 1 |
@@ -175,7 +176,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | |||
175 | * Switch to 5V, Configure socket with 5V voltage | 176 | * Switch to 5V, Configure socket with 5V voltage |
176 | */ | 177 | */ |
177 | lubbock_set_misc_wr(misc_mask, 0); | 178 | lubbock_set_misc_wr(misc_mask, 0); |
178 | sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, 0); | 179 | sa1111_set_io(s->dev, pa_dwr_mask, 0); |
179 | 180 | ||
180 | /* | 181 | /* |
181 | * It takes about 100ms to turn off Vcc. | 182 | * It takes about 100ms to turn off Vcc. |
@@ -200,12 +201,8 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | |||
200 | 201 | ||
201 | static struct pcmcia_low_level lubbock_pcmcia_ops = { | 202 | static struct pcmcia_low_level lubbock_pcmcia_ops = { |
202 | .owner = THIS_MODULE, | 203 | .owner = THIS_MODULE, |
203 | .hw_init = sa1111_pcmcia_hw_init, | ||
204 | .hw_shutdown = sa1111_pcmcia_hw_shutdown, | ||
205 | .socket_state = sa1111_pcmcia_socket_state, | ||
206 | .configure_socket = lubbock_pcmcia_configure_socket, | 204 | .configure_socket = lubbock_pcmcia_configure_socket, |
207 | .socket_init = sa1111_pcmcia_socket_init, | 205 | .socket_init = sa1111_pcmcia_socket_init, |
208 | .socket_suspend = sa1111_pcmcia_socket_suspend, | ||
209 | .first = 0, | 206 | .first = 0, |
210 | .nr = 2, | 207 | .nr = 2, |
211 | }; | 208 | }; |
@@ -228,8 +225,9 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev) | |||
228 | /* Set CF Socket 1 power to standby mode. */ | 225 | /* Set CF Socket 1 power to standby mode. */ |
229 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); | 226 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); |
230 | 227 | ||
231 | sadev->dev.platform_data = &lubbock_pcmcia_ops; | 228 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); |
232 | ret = __pxa2xx_drv_pcmcia_probe(&sadev->dev); | 229 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, |
230 | pxa2xx_drv_pcmcia_add_one); | ||
233 | } | 231 | } |
234 | 232 | ||
235 | return ret; | 233 | return ret; |
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c index 1138551ba8f6..92016fe932b4 100644 --- a/drivers/pcmcia/pxa2xx_mainstone.c +++ b/drivers/pcmcia/pxa2xx_mainstone.c | |||
@@ -44,7 +44,7 @@ static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
44 | * before we enable them as outputs. | 44 | * before we enable them as outputs. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | skt->irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ; | 47 | skt->socket.pci_irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ; |
48 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 48 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
49 | } | 49 | } |
50 | 50 | ||
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c index 5ba9b3664a00..6fb6f7f0672e 100644 --- a/drivers/pcmcia/pxa2xx_palmld.c +++ b/drivers/pcmcia/pxa2xx_palmld.c | |||
@@ -45,7 +45,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
45 | if (ret) | 45 | if (ret) |
46 | goto err4; | 46 | goto err4; |
47 | 47 | ||
48 | skt->irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); | 48 | skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); |
49 | return 0; | 49 | return 0; |
50 | 50 | ||
51 | err4: | 51 | err4: |
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c index e07b5c51ec5b..b07b247a399f 100644 --- a/drivers/pcmcia/pxa2xx_palmtx.c +++ b/drivers/pcmcia/pxa2xx_palmtx.c | |||
@@ -53,7 +53,7 @@ static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
53 | if (ret) | 53 | if (ret) |
54 | goto err5; | 54 | goto err5; |
55 | 55 | ||
56 | skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); | 56 | skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); |
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | err5: | 59 | err5: |
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index bc43f78f6f0b..0ea3b29440e6 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c | |||
@@ -66,7 +66,7 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
66 | } | 66 | } |
67 | } | 67 | } |
68 | 68 | ||
69 | skt->irq = SCOOP_DEV[skt->nr].irq; | 69 | skt->socket.pci_irq = SCOOP_DEV[skt->nr].irq; |
70 | 70 | ||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c index e0e5cb339b4a..b7e596620db1 100644 --- a/drivers/pcmcia/pxa2xx_trizeps4.c +++ b/drivers/pcmcia/pxa2xx_trizeps4.c | |||
@@ -53,7 +53,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
53 | gpio_free(GPIO_PRDY); | 53 | gpio_free(GPIO_PRDY); |
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | } | 55 | } |
56 | skt->irq = IRQ_GPIO(GPIO_PRDY); | 56 | skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY); |
57 | break; | 57 | break; |
58 | 58 | ||
59 | #ifndef CONFIG_MACH_TRIZEPS_CONXS | 59 | #ifndef CONFIG_MACH_TRIZEPS_CONXS |
@@ -63,7 +63,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | /* release the reset of this card */ | 65 | /* release the reset of this card */ |
66 | pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->irq); | 66 | pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq); |
67 | 67 | ||
68 | /* supplementory irqs for the socket */ | 68 | /* supplementory irqs for the socket */ |
69 | for (i = 0; i < ARRAY_SIZE(irqs); i++) { | 69 | for (i = 0; i < ARRAY_SIZE(irqs); i++) { |
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c index 17871360fe99..27be2e154df2 100644 --- a/drivers/pcmcia/pxa2xx_viper.c +++ b/drivers/pcmcia/pxa2xx_viper.c | |||
@@ -40,7 +40,7 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
40 | { | 40 | { |
41 | unsigned long flags; | 41 | unsigned long flags; |
42 | 42 | ||
43 | skt->irq = gpio_to_irq(VIPER_CF_RDY_GPIO); | 43 | skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO); |
44 | 44 | ||
45 | if (gpio_request(VIPER_CF_CD_GPIO, "CF detect")) | 45 | if (gpio_request(VIPER_CF_CD_GPIO, "CF detect")) |
46 | goto err_request_cd; | 46 | goto err_request_cd; |
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index e592e0e0d7ed..de0e770ce6a3 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <pcmcia/cs_types.h> | 18 | #include <pcmcia/cs_types.h> |
19 | #include <pcmcia/ss.h> | 19 | #include <pcmcia/ss.h> |
20 | #include <pcmcia/cs.h> | 20 | #include <pcmcia/cs.h> |
21 | #include <pcmcia/cistpl.h> | ||
21 | #include "cs_internal.h" | 22 | #include "cs_internal.h" |
22 | 23 | ||
23 | 24 | ||
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c index ac8aa09ba0da..fd013a1ef47a 100644 --- a/drivers/pcmcia/sa1100_assabet.c +++ b/drivers/pcmcia/sa1100_assabet.c | |||
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = { | |||
27 | 27 | ||
28 | static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 28 | static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
29 | { | 29 | { |
30 | skt->irq = ASSABET_IRQ_GPIO_CF_IRQ; | 30 | skt->socket.pci_irq = ASSABET_IRQ_GPIO_CF_IRQ; |
31 | 31 | ||
32 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 32 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
33 | } | 33 | } |
diff --git a/drivers/pcmcia/sa1100_badge4.c b/drivers/pcmcia/sa1100_badge4.c index 1ca9737ea79e..1ce53f493bef 100644 --- a/drivers/pcmcia/sa1100_badge4.c +++ b/drivers/pcmcia/sa1100_badge4.c | |||
@@ -127,13 +127,10 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state | |||
127 | 127 | ||
128 | static struct pcmcia_low_level badge4_pcmcia_ops = { | 128 | static struct pcmcia_low_level badge4_pcmcia_ops = { |
129 | .owner = THIS_MODULE, | 129 | .owner = THIS_MODULE, |
130 | .hw_init = sa1111_pcmcia_hw_init, | ||
131 | .hw_shutdown = sa1111_pcmcia_hw_shutdown, | ||
132 | .socket_state = sa1111_pcmcia_socket_state, | ||
133 | .configure_socket = badge4_pcmcia_configure_socket, | 130 | .configure_socket = badge4_pcmcia_configure_socket, |
134 | |||
135 | .socket_init = sa1111_pcmcia_socket_init, | 131 | .socket_init = sa1111_pcmcia_socket_init, |
136 | .socket_suspend = sa1111_pcmcia_socket_suspend, | 132 | .first = 0, |
133 | .nr = 2, | ||
137 | }; | 134 | }; |
138 | 135 | ||
139 | int pcmcia_badge4_init(struct device *dev) | 136 | int pcmcia_badge4_init(struct device *dev) |
@@ -146,7 +143,9 @@ int pcmcia_badge4_init(struct device *dev) | |||
146 | __func__, | 143 | __func__, |
147 | badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); | 144 | badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); |
148 | 145 | ||
149 | ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2); | 146 | sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops); |
147 | ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops, | ||
148 | sa11xx_drv_pcmcia_add_one); | ||
150 | } | 149 | } |
151 | 150 | ||
152 | return ret; | 151 | return ret; |
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c index 63e6bc431a0d..9bf088b17275 100644 --- a/drivers/pcmcia/sa1100_cerf.c +++ b/drivers/pcmcia/sa1100_cerf.c | |||
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = { | |||
27 | 27 | ||
28 | static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 28 | static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
29 | { | 29 | { |
30 | skt->irq = CERF_IRQ_GPIO_CF_IRQ; | 30 | skt->socket.pci_irq = CERF_IRQ_GPIO_CF_IRQ; |
31 | 31 | ||
32 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 32 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
33 | } | 33 | } |
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index 2d0e99751530..8db86b90c200 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c | |||
@@ -51,7 +51,7 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = { | |||
51 | #ifdef CONFIG_SA1100_CERF | 51 | #ifdef CONFIG_SA1100_CERF |
52 | pcmcia_cerf_init, | 52 | pcmcia_cerf_init, |
53 | #endif | 53 | #endif |
54 | #ifdef CONFIG_SA1100_H3600 | 54 | #if defined(CONFIG_SA1100_H3100) || defined(CONFIG_SA1100_H3600) |
55 | pcmcia_h3600_init, | 55 | pcmcia_h3600_init, |
56 | #endif | 56 | #endif |
57 | #ifdef CONFIG_SA1100_SHANNON | 57 | #ifdef CONFIG_SA1100_SHANNON |
@@ -83,7 +83,16 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *dev) | |||
83 | 83 | ||
84 | static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) | 84 | static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) |
85 | { | 85 | { |
86 | return soc_common_drv_pcmcia_remove(&dev->dev); | 86 | struct skt_dev_info *sinfo = platform_get_drvdata(dev); |
87 | int i; | ||
88 | |||
89 | platform_set_drvdata(dev, NULL); | ||
90 | |||
91 | for (i = 0; i < sinfo->nskt; i++) | ||
92 | soc_pcmcia_remove_one(&sinfo->skt[i]); | ||
93 | |||
94 | kfree(sinfo); | ||
95 | return 0; | ||
87 | } | 96 | } |
88 | 97 | ||
89 | static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, | 98 | static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, |
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c index 0cc3748f3758..56329ad575a9 100644 --- a/drivers/pcmcia/sa1100_h3600.c +++ b/drivers/pcmcia/sa1100_h3600.c | |||
@@ -10,47 +10,139 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/gpio.h> | ||
13 | 14 | ||
14 | #include <mach/hardware.h> | 15 | #include <mach/hardware.h> |
15 | #include <asm/irq.h> | 16 | #include <asm/irq.h> |
16 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
17 | #include <mach/h3600.h> | 18 | #include <mach/h3xxx.h> |
18 | 19 | ||
19 | #include "sa1100_generic.h" | 20 | #include "sa1100_generic.h" |
20 | 21 | ||
21 | static struct pcmcia_irqs irqs[] = { | 22 | static struct pcmcia_irqs irqs[] = { |
22 | { 0, IRQ_GPIO_H3600_PCMCIA_CD0, "PCMCIA CD0" }, | 23 | { .sock = 0, .str = "PCMCIA CD0" }, /* .irq will be filled later */ |
23 | { 1, IRQ_GPIO_H3600_PCMCIA_CD1, "PCMCIA CD1" } | 24 | { .sock = 1, .str = "PCMCIA CD1" } |
24 | }; | 25 | }; |
25 | 26 | ||
26 | static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 27 | static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
27 | { | 28 | { |
28 | skt->irq = skt->nr ? IRQ_GPIO_H3600_PCMCIA_IRQ1 | 29 | int err; |
29 | : IRQ_GPIO_H3600_PCMCIA_IRQ0; | ||
30 | 30 | ||
31 | switch (skt->nr) { | ||
32 | case 0: | ||
33 | err = gpio_request(H3XXX_GPIO_PCMCIA_IRQ0, "PCMCIA IRQ0"); | ||
34 | if (err) | ||
35 | goto err00; | ||
36 | err = gpio_direction_input(H3XXX_GPIO_PCMCIA_IRQ0); | ||
37 | if (err) | ||
38 | goto err01; | ||
39 | skt->socket.pci_irq = gpio_to_irq(H3XXX_GPIO_PCMCIA_IRQ0); | ||
40 | |||
41 | err = gpio_request(H3XXX_GPIO_PCMCIA_CD0, "PCMCIA CD0"); | ||
42 | if (err) | ||
43 | goto err01; | ||
44 | err = gpio_direction_input(H3XXX_GPIO_PCMCIA_CD0); | ||
45 | if (err) | ||
46 | goto err02; | ||
47 | irqs[0].irq = gpio_to_irq(H3XXX_GPIO_PCMCIA_CD0); | ||
48 | |||
49 | err = gpio_request(H3XXX_EGPIO_OPT_NVRAM_ON, "OPT NVRAM ON"); | ||
50 | if (err) | ||
51 | goto err02; | ||
52 | err = gpio_direction_output(H3XXX_EGPIO_OPT_NVRAM_ON, 0); | ||
53 | if (err) | ||
54 | goto err03; | ||
55 | err = gpio_request(H3XXX_EGPIO_OPT_ON, "OPT ON"); | ||
56 | if (err) | ||
57 | goto err03; | ||
58 | err = gpio_direction_output(H3XXX_EGPIO_OPT_ON, 0); | ||
59 | if (err) | ||
60 | goto err04; | ||
61 | err = gpio_request(H3XXX_EGPIO_OPT_RESET, "OPT RESET"); | ||
62 | if (err) | ||
63 | goto err04; | ||
64 | err = gpio_direction_output(H3XXX_EGPIO_OPT_RESET, 0); | ||
65 | if (err) | ||
66 | goto err05; | ||
67 | err = gpio_request(H3XXX_EGPIO_CARD_RESET, "PCMCIA CARD RESET"); | ||
68 | if (err) | ||
69 | goto err05; | ||
70 | err = gpio_direction_output(H3XXX_EGPIO_CARD_RESET, 0); | ||
71 | if (err) | ||
72 | goto err06; | ||
73 | err = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | ||
74 | if (err) | ||
75 | goto err06; | ||
76 | break; | ||
77 | case 1: | ||
78 | err = gpio_request(H3XXX_GPIO_PCMCIA_IRQ1, "PCMCIA IRQ1"); | ||
79 | if (err) | ||
80 | goto err10; | ||
81 | err = gpio_direction_input(H3XXX_GPIO_PCMCIA_IRQ1); | ||
82 | if (err) | ||
83 | goto err11; | ||
84 | skt->socket.pci_irq = gpio_to_irq(H3XXX_GPIO_PCMCIA_IRQ1); | ||
85 | |||
86 | err = gpio_request(H3XXX_GPIO_PCMCIA_CD1, "PCMCIA CD1"); | ||
87 | if (err) | ||
88 | goto err11; | ||
89 | err = gpio_direction_input(H3XXX_GPIO_PCMCIA_CD1); | ||
90 | if (err) | ||
91 | goto err12; | ||
92 | irqs[1].irq = gpio_to_irq(H3XXX_GPIO_PCMCIA_CD1); | ||
93 | |||
94 | err = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | ||
95 | if (err) | ||
96 | goto err12; | ||
97 | break; | ||
98 | } | ||
99 | return 0; | ||
31 | 100 | ||
32 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 101 | err06: gpio_free(H3XXX_EGPIO_CARD_RESET); |
102 | err05: gpio_free(H3XXX_EGPIO_OPT_RESET); | ||
103 | err04: gpio_free(H3XXX_EGPIO_OPT_ON); | ||
104 | err03: gpio_free(H3XXX_EGPIO_OPT_NVRAM_ON); | ||
105 | err02: gpio_free(H3XXX_GPIO_PCMCIA_CD0); | ||
106 | err01: gpio_free(H3XXX_GPIO_PCMCIA_IRQ0); | ||
107 | err00: return err; | ||
108 | |||
109 | err12: gpio_free(H3XXX_GPIO_PCMCIA_CD0); | ||
110 | err11: gpio_free(H3XXX_GPIO_PCMCIA_IRQ0); | ||
111 | err10: return err; | ||
33 | } | 112 | } |
34 | 113 | ||
35 | static void h3600_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 114 | static void h3600_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
36 | { | 115 | { |
37 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 116 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
38 | 117 | ||
39 | /* Disable CF bus: */ | 118 | switch (skt->nr) { |
40 | assign_h3600_egpio(IPAQ_EGPIO_OPT_NVRAM_ON, 0); | 119 | case 0: |
41 | assign_h3600_egpio(IPAQ_EGPIO_OPT_ON, 0); | 120 | /* Disable CF bus: */ |
42 | assign_h3600_egpio(IPAQ_EGPIO_OPT_RESET, 1); | 121 | gpio_set_value(H3XXX_EGPIO_OPT_NVRAM_ON, 0); |
122 | gpio_set_value(H3XXX_EGPIO_OPT_ON, 0); | ||
123 | gpio_set_value(H3XXX_EGPIO_OPT_RESET, 1); | ||
124 | |||
125 | gpio_free(H3XXX_EGPIO_CARD_RESET); | ||
126 | gpio_free(H3XXX_EGPIO_OPT_RESET); | ||
127 | gpio_free(H3XXX_EGPIO_OPT_ON); | ||
128 | gpio_free(H3XXX_EGPIO_OPT_NVRAM_ON); | ||
129 | gpio_free(H3XXX_GPIO_PCMCIA_CD0); | ||
130 | gpio_free(H3XXX_GPIO_PCMCIA_IRQ0); | ||
131 | break; | ||
132 | case 1: | ||
133 | gpio_free(H3XXX_GPIO_PCMCIA_CD1); | ||
134 | gpio_free(H3XXX_GPIO_PCMCIA_IRQ1); | ||
135 | break; | ||
136 | } | ||
43 | } | 137 | } |
44 | 138 | ||
45 | static void | 139 | static void |
46 | h3600_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) | 140 | h3600_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) |
47 | { | 141 | { |
48 | unsigned long levels = GPLR; | ||
49 | |||
50 | switch (skt->nr) { | 142 | switch (skt->nr) { |
51 | case 0: | 143 | case 0: |
52 | state->detect = levels & GPIO_H3600_PCMCIA_CD0 ? 0 : 1; | 144 | state->detect = !gpio_get_value(H3XXX_GPIO_PCMCIA_CD0); |
53 | state->ready = levels & GPIO_H3600_PCMCIA_IRQ0 ? 1 : 0; | 145 | state->ready = !!gpio_get_value(H3XXX_GPIO_PCMCIA_IRQ0); |
54 | state->bvd1 = 0; | 146 | state->bvd1 = 0; |
55 | state->bvd2 = 0; | 147 | state->bvd2 = 0; |
56 | state->wrprot = 0; /* Not available on H3600. */ | 148 | state->wrprot = 0; /* Not available on H3600. */ |
@@ -59,8 +151,8 @@ h3600_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *st | |||
59 | break; | 151 | break; |
60 | 152 | ||
61 | case 1: | 153 | case 1: |
62 | state->detect = levels & GPIO_H3600_PCMCIA_CD1 ? 0 : 1; | 154 | state->detect = !gpio_get_value(H3XXX_GPIO_PCMCIA_CD1); |
63 | state->ready = levels & GPIO_H3600_PCMCIA_IRQ1 ? 1 : 0; | 155 | state->ready = !!gpio_get_value(H3XXX_GPIO_PCMCIA_IRQ1); |
64 | state->bvd1 = 0; | 156 | state->bvd1 = 0; |
65 | state->bvd2 = 0; | 157 | state->bvd2 = 0; |
66 | state->wrprot = 0; /* Not available on H3600. */ | 158 | state->wrprot = 0; /* Not available on H3600. */ |
@@ -79,7 +171,7 @@ h3600_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_ | |||
79 | return -1; | 171 | return -1; |
80 | } | 172 | } |
81 | 173 | ||
82 | assign_h3600_egpio(IPAQ_EGPIO_CARD_RESET, !!(state->flags & SS_RESET)); | 174 | gpio_set_value(H3XXX_EGPIO_CARD_RESET, !!(state->flags & SS_RESET)); |
83 | 175 | ||
84 | /* Silently ignore Vpp, output enable, speaker enable. */ | 176 | /* Silently ignore Vpp, output enable, speaker enable. */ |
85 | 177 | ||
@@ -89,9 +181,9 @@ h3600_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_ | |||
89 | static void h3600_pcmcia_socket_init(struct soc_pcmcia_socket *skt) | 181 | static void h3600_pcmcia_socket_init(struct soc_pcmcia_socket *skt) |
90 | { | 182 | { |
91 | /* Enable CF bus: */ | 183 | /* Enable CF bus: */ |
92 | assign_h3600_egpio(IPAQ_EGPIO_OPT_NVRAM_ON, 1); | 184 | gpio_set_value(H3XXX_EGPIO_OPT_NVRAM_ON, 1); |
93 | assign_h3600_egpio(IPAQ_EGPIO_OPT_ON, 1); | 185 | gpio_set_value(H3XXX_EGPIO_OPT_ON, 1); |
94 | assign_h3600_egpio(IPAQ_EGPIO_OPT_RESET, 0); | 186 | gpio_set_value(H3XXX_EGPIO_OPT_RESET, 0); |
95 | 187 | ||
96 | msleep(10); | 188 | msleep(10); |
97 | 189 | ||
@@ -109,10 +201,10 @@ static void h3600_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) | |||
109 | * socket 0 then socket 1. | 201 | * socket 0 then socket 1. |
110 | */ | 202 | */ |
111 | if (skt->nr == 1) { | 203 | if (skt->nr == 1) { |
112 | assign_h3600_egpio(IPAQ_EGPIO_OPT_ON, 0); | 204 | gpio_set_value(H3XXX_EGPIO_OPT_ON, 0); |
113 | assign_h3600_egpio(IPAQ_EGPIO_OPT_NVRAM_ON, 0); | 205 | gpio_set_value(H3XXX_EGPIO_OPT_NVRAM_ON, 0); |
114 | /* hmm, does this suck power? */ | 206 | /* hmm, does this suck power? */ |
115 | assign_h3600_egpio(IPAQ_EGPIO_OPT_RESET, 1); | 207 | gpio_set_value(H3XXX_EGPIO_OPT_RESET, 1); |
116 | } | 208 | } |
117 | } | 209 | } |
118 | 210 | ||
@@ -131,7 +223,7 @@ int __init pcmcia_h3600_init(struct device *dev) | |||
131 | { | 223 | { |
132 | int ret = -ENODEV; | 224 | int ret = -ENODEV; |
133 | 225 | ||
134 | if (machine_is_h3600()) | 226 | if (machine_is_h3600() || machine_is_h3100()) |
135 | ret = sa11xx_drv_pcmcia_probe(dev, &h3600_pcmcia_ops, 0, 2); | 227 | ret = sa11xx_drv_pcmcia_probe(dev, &h3600_pcmcia_ops, 0, 2); |
136 | 228 | ||
137 | return ret; | 229 | return ret; |
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c index 7eedb42f800c..6bcabee6bde4 100644 --- a/drivers/pcmcia/sa1100_jornada720.c +++ b/drivers/pcmcia/sa1100_jornada720.c | |||
@@ -22,25 +22,10 @@ | |||
22 | #define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3) | 22 | #define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3) |
23 | #define SOCKET1_3V GPIO_GPIO3 | 23 | #define SOCKET1_3V GPIO_GPIO3 |
24 | 24 | ||
25 | static int jornada720_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | ||
26 | { | ||
27 | unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; | ||
28 | |||
29 | /* | ||
30 | * What is all this crap for? | ||
31 | */ | ||
32 | GRER |= 0x00000002; | ||
33 | /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ | ||
34 | sa1111_set_io_dir(SA1111_DEV(skt->dev), pin, 0, 0); | ||
35 | sa1111_set_io(SA1111_DEV(skt->dev), pin, 0); | ||
36 | sa1111_set_sleep_io(SA1111_DEV(skt->dev), pin, 0); | ||
37 | |||
38 | return sa1111_pcmcia_hw_init(skt); | ||
39 | } | ||
40 | |||
41 | static int | 25 | static int |
42 | jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) | 26 | jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) |
43 | { | 27 | { |
28 | struct sa1111_pcmcia_socket *s = to_skt(skt); | ||
44 | unsigned int pa_dwr_mask, pa_dwr_set; | 29 | unsigned int pa_dwr_mask, pa_dwr_set; |
45 | int ret; | 30 | int ret; |
46 | 31 | ||
@@ -97,7 +82,7 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s | |||
97 | unsigned long flags; | 82 | unsigned long flags; |
98 | 83 | ||
99 | local_irq_save(flags); | 84 | local_irq_save(flags); |
100 | sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); | 85 | sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); |
101 | local_irq_restore(flags); | 86 | local_irq_restore(flags); |
102 | } | 87 | } |
103 | 88 | ||
@@ -106,21 +91,30 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s | |||
106 | 91 | ||
107 | static struct pcmcia_low_level jornada720_pcmcia_ops = { | 92 | static struct pcmcia_low_level jornada720_pcmcia_ops = { |
108 | .owner = THIS_MODULE, | 93 | .owner = THIS_MODULE, |
109 | .hw_init = jornada720_pcmcia_hw_init, | ||
110 | .hw_shutdown = sa1111_pcmcia_hw_shutdown, | ||
111 | .socket_state = sa1111_pcmcia_socket_state, | ||
112 | .configure_socket = jornada720_pcmcia_configure_socket, | 94 | .configure_socket = jornada720_pcmcia_configure_socket, |
113 | |||
114 | .socket_init = sa1111_pcmcia_socket_init, | 95 | .socket_init = sa1111_pcmcia_socket_init, |
115 | .socket_suspend = sa1111_pcmcia_socket_suspend, | 96 | .first = 0, |
97 | .nr = 2, | ||
116 | }; | 98 | }; |
117 | 99 | ||
118 | int __devinit pcmcia_jornada720_init(struct device *dev) | 100 | int __devinit pcmcia_jornada720_init(struct device *dev) |
119 | { | 101 | { |
120 | int ret = -ENODEV; | 102 | int ret = -ENODEV; |
121 | 103 | ||
122 | if (machine_is_jornada720()) | 104 | if (machine_is_jornada720()) { |
123 | ret = sa11xx_drv_pcmcia_probe(dev, &jornada720_pcmcia_ops, 0, 2); | 105 | unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; |
106 | |||
107 | GRER |= 0x00000002; | ||
108 | |||
109 | /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ | ||
110 | sa1111_set_io_dir(dev, pin, 0, 0); | ||
111 | sa1111_set_io(dev, pin, 0); | ||
112 | sa1111_set_sleep_io(dev, pin, 0); | ||
113 | |||
114 | sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); | ||
115 | ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops, | ||
116 | sa11xx_drv_pcmcia_add_one); | ||
117 | } | ||
124 | 118 | ||
125 | return ret; | 119 | return ret; |
126 | } | 120 | } |
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c index 0c76d337815b..c95639b5f2a0 100644 --- a/drivers/pcmcia/sa1100_neponset.c +++ b/drivers/pcmcia/sa1100_neponset.c | |||
@@ -43,6 +43,7 @@ | |||
43 | static int | 43 | static int |
44 | neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) | 44 | neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) |
45 | { | 45 | { |
46 | struct sa1111_pcmcia_socket *s = to_skt(skt); | ||
46 | unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; | 47 | unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; |
47 | int ret; | 48 | int ret; |
48 | 49 | ||
@@ -99,7 +100,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta | |||
99 | NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set; | 100 | NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set; |
100 | 101 | ||
101 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
102 | sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); | 103 | sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); |
103 | } | 104 | } |
104 | 105 | ||
105 | return 0; | 106 | return 0; |
@@ -115,12 +116,10 @@ static void neponset_pcmcia_socket_init(struct soc_pcmcia_socket *skt) | |||
115 | 116 | ||
116 | static struct pcmcia_low_level neponset_pcmcia_ops = { | 117 | static struct pcmcia_low_level neponset_pcmcia_ops = { |
117 | .owner = THIS_MODULE, | 118 | .owner = THIS_MODULE, |
118 | .hw_init = sa1111_pcmcia_hw_init, | ||
119 | .hw_shutdown = sa1111_pcmcia_hw_shutdown, | ||
120 | .socket_state = sa1111_pcmcia_socket_state, | ||
121 | .configure_socket = neponset_pcmcia_configure_socket, | 119 | .configure_socket = neponset_pcmcia_configure_socket, |
122 | .socket_init = neponset_pcmcia_socket_init, | 120 | .socket_init = neponset_pcmcia_socket_init, |
123 | .socket_suspend = sa1111_pcmcia_socket_suspend, | 121 | .first = 0, |
122 | .nr = 2, | ||
124 | }; | 123 | }; |
125 | 124 | ||
126 | int pcmcia_neponset_init(struct sa1111_dev *sadev) | 125 | int pcmcia_neponset_init(struct sa1111_dev *sadev) |
@@ -135,7 +134,9 @@ int pcmcia_neponset_init(struct sa1111_dev *sadev) | |||
135 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); | 134 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); |
136 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | 135 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
137 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | 136 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
138 | ret = sa11xx_drv_pcmcia_probe(&sadev->dev, &neponset_pcmcia_ops, 0, 2); | 137 | sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops); |
138 | ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops, | ||
139 | sa11xx_drv_pcmcia_add_one); | ||
139 | } | 140 | } |
140 | 141 | ||
141 | return ret; | 142 | return ret; |
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c index 46d8c1977c2a..c4d51867a050 100644 --- a/drivers/pcmcia/sa1100_shannon.c +++ b/drivers/pcmcia/sa1100_shannon.c | |||
@@ -28,7 +28,7 @@ static int shannon_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
28 | GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 | | 28 | GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 | |
29 | SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1); | 29 | SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1); |
30 | 30 | ||
31 | skt->irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0; | 31 | skt->socket.pci_irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0; |
32 | 32 | ||
33 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 33 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
34 | } | 34 | } |
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index 33a08ae09fdf..05bd504e6f18 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c | |||
@@ -28,7 +28,7 @@ static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
28 | 28 | ||
29 | clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); | 29 | clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); |
30 | 30 | ||
31 | skt->irq = IRQ_GPIO_CF_IRQ; | 31 | skt->socket.pci_irq = IRQ_GPIO_CF_IRQ; |
32 | 32 | ||
33 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 33 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
34 | } | 34 | } |
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c index 4be4e172ffa1..de6bc333d299 100644 --- a/drivers/pcmcia/sa1111_generic.c +++ b/drivers/pcmcia/sa1111_generic.c | |||
@@ -28,23 +28,20 @@ static struct pcmcia_irqs irqs[] = { | |||
28 | { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" }, | 28 | { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" }, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 31 | static int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
32 | { | 32 | { |
33 | if (skt->irq == NO_IRQ) | ||
34 | skt->irq = skt->nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT; | ||
35 | |||
36 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 33 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
37 | } | 34 | } |
38 | 35 | ||
39 | void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 36 | static void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
40 | { | 37 | { |
41 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 38 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
42 | } | 39 | } |
43 | 40 | ||
44 | void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) | 41 | void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) |
45 | { | 42 | { |
46 | struct sa1111_dev *sadev = SA1111_DEV(skt->dev); | 43 | struct sa1111_pcmcia_socket *s = to_skt(skt); |
47 | unsigned long status = sa1111_readl(sadev->mapbase + SA1111_PCSR); | 44 | unsigned long status = sa1111_readl(s->dev->mapbase + SA1111_PCSR); |
48 | 45 | ||
49 | switch (skt->nr) { | 46 | switch (skt->nr) { |
50 | case 0: | 47 | case 0: |
@@ -71,7 +68,7 @@ void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_sta | |||
71 | 68 | ||
72 | int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) | 69 | int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) |
73 | { | 70 | { |
74 | struct sa1111_dev *sadev = SA1111_DEV(skt->dev); | 71 | struct sa1111_pcmcia_socket *s = to_skt(skt); |
75 | unsigned int pccr_skt_mask, pccr_set_mask, val; | 72 | unsigned int pccr_skt_mask, pccr_set_mask, val; |
76 | unsigned long flags; | 73 | unsigned long flags; |
77 | 74 | ||
@@ -100,10 +97,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s | |||
100 | pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; | 97 | pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; |
101 | 98 | ||
102 | local_irq_save(flags); | 99 | local_irq_save(flags); |
103 | val = sa1111_readl(sadev->mapbase + SA1111_PCCR); | 100 | val = sa1111_readl(s->dev->mapbase + SA1111_PCCR); |
104 | val &= ~pccr_skt_mask; | 101 | val &= ~pccr_skt_mask; |
105 | val |= pccr_set_mask & pccr_skt_mask; | 102 | val |= pccr_set_mask & pccr_skt_mask; |
106 | sa1111_writel(val, sadev->mapbase + SA1111_PCCR); | 103 | sa1111_writel(val, s->dev->mapbase + SA1111_PCCR); |
107 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
108 | 105 | ||
109 | return 0; | 106 | return 0; |
@@ -114,15 +111,51 @@ void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *skt) | |||
114 | soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 111 | soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
115 | } | 112 | } |
116 | 113 | ||
117 | void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) | 114 | static void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) |
118 | { | 115 | { |
119 | soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 116 | soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
120 | } | 117 | } |
121 | 118 | ||
119 | int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops, | ||
120 | int (*add)(struct soc_pcmcia_socket *)) | ||
121 | { | ||
122 | struct sa1111_pcmcia_socket *s; | ||
123 | int i, ret = 0; | ||
124 | |||
125 | ops->hw_init = sa1111_pcmcia_hw_init; | ||
126 | ops->hw_shutdown = sa1111_pcmcia_hw_shutdown; | ||
127 | ops->socket_state = sa1111_pcmcia_socket_state; | ||
128 | ops->socket_suspend = sa1111_pcmcia_socket_suspend; | ||
129 | |||
130 | for (i = 0; i < ops->nr; i++) { | ||
131 | s = kzalloc(sizeof(*s), GFP_KERNEL); | ||
132 | if (!s) | ||
133 | return -ENOMEM; | ||
134 | |||
135 | s->soc.nr = ops->first + i; | ||
136 | s->soc.ops = ops; | ||
137 | s->soc.socket.owner = ops->owner; | ||
138 | s->soc.socket.dev.parent = &dev->dev; | ||
139 | s->soc.socket.pci_irq = s->soc.nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT; | ||
140 | s->dev = dev; | ||
141 | |||
142 | ret = add(&s->soc); | ||
143 | if (ret == 0) { | ||
144 | s->next = dev_get_drvdata(&dev->dev); | ||
145 | dev_set_drvdata(&dev->dev, s); | ||
146 | } else | ||
147 | kfree(s); | ||
148 | } | ||
149 | |||
150 | return ret; | ||
151 | } | ||
152 | |||
122 | static int pcmcia_probe(struct sa1111_dev *dev) | 153 | static int pcmcia_probe(struct sa1111_dev *dev) |
123 | { | 154 | { |
124 | void __iomem *base; | 155 | void __iomem *base; |
125 | 156 | ||
157 | dev_set_drvdata(&dev->dev, NULL); | ||
158 | |||
126 | if (!request_mem_region(dev->res.start, 512, | 159 | if (!request_mem_region(dev->res.start, 512, |
127 | SA1111_DRIVER_NAME(dev))) | 160 | SA1111_DRIVER_NAME(dev))) |
128 | return -EBUSY; | 161 | return -EBUSY; |
@@ -152,7 +185,15 @@ static int pcmcia_probe(struct sa1111_dev *dev) | |||
152 | 185 | ||
153 | static int __devexit pcmcia_remove(struct sa1111_dev *dev) | 186 | static int __devexit pcmcia_remove(struct sa1111_dev *dev) |
154 | { | 187 | { |
155 | soc_common_drv_pcmcia_remove(&dev->dev); | 188 | struct sa1111_pcmcia_socket *next, *s = dev_get_drvdata(&dev->dev); |
189 | |||
190 | dev_set_drvdata(&dev->dev, NULL); | ||
191 | |||
192 | for (; next = s->next, s; s = next) { | ||
193 | soc_pcmcia_remove_one(&s->soc); | ||
194 | kfree(s); | ||
195 | } | ||
196 | |||
156 | release_mem_region(dev->res.start, 512); | 197 | release_mem_region(dev->res.start, 512); |
157 | return 0; | 198 | return 0; |
158 | } | 199 | } |
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h index 10ced4a210d7..02dc8577cdaf 100644 --- a/drivers/pcmcia/sa1111_generic.h +++ b/drivers/pcmcia/sa1111_generic.h | |||
@@ -1,12 +1,23 @@ | |||
1 | #include "soc_common.h" | 1 | #include "soc_common.h" |
2 | #include "sa11xx_base.h" | 2 | #include "sa11xx_base.h" |
3 | 3 | ||
4 | extern int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *); | 4 | struct sa1111_pcmcia_socket { |
5 | extern void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *); | 5 | struct soc_pcmcia_socket soc; |
6 | struct sa1111_dev *dev; | ||
7 | struct sa1111_pcmcia_socket *next; | ||
8 | }; | ||
9 | |||
10 | static inline struct sa1111_pcmcia_socket *to_skt(struct soc_pcmcia_socket *s) | ||
11 | { | ||
12 | return container_of(s, struct sa1111_pcmcia_socket, soc); | ||
13 | } | ||
14 | |||
15 | int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops, | ||
16 | int (*add)(struct soc_pcmcia_socket *)); | ||
17 | |||
6 | extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *); | 18 | extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *); |
7 | extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *); | 19 | extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *); |
8 | extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *); | 20 | extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *); |
9 | extern void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *); | ||
10 | 21 | ||
11 | extern int pcmcia_badge4_init(struct device *); | 22 | extern int pcmcia_badge4_init(struct device *); |
12 | extern int pcmcia_jornada720_init(struct device *); | 23 | extern int pcmcia_jornada720_init(struct device *); |
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c index e15d59f2d8a9..fc9a6527019b 100644 --- a/drivers/pcmcia/sa11xx_base.c +++ b/drivers/pcmcia/sa11xx_base.c | |||
@@ -171,12 +171,58 @@ static const char *skt_names[] = { | |||
171 | #define SKT_DEV_INFO_SIZE(n) \ | 171 | #define SKT_DEV_INFO_SIZE(n) \ |
172 | (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) | 172 | (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) |
173 | 173 | ||
174 | int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt) | ||
175 | { | ||
176 | skt->res_skt.start = _PCMCIA(skt->nr); | ||
177 | skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; | ||
178 | skt->res_skt.name = skt_names[skt->nr]; | ||
179 | skt->res_skt.flags = IORESOURCE_MEM; | ||
180 | |||
181 | skt->res_io.start = _PCMCIAIO(skt->nr); | ||
182 | skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; | ||
183 | skt->res_io.name = "io"; | ||
184 | skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
185 | |||
186 | skt->res_mem.start = _PCMCIAMem(skt->nr); | ||
187 | skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; | ||
188 | skt->res_mem.name = "memory"; | ||
189 | skt->res_mem.flags = IORESOURCE_MEM; | ||
190 | |||
191 | skt->res_attr.start = _PCMCIAAttr(skt->nr); | ||
192 | skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; | ||
193 | skt->res_attr.name = "attribute"; | ||
194 | skt->res_attr.flags = IORESOURCE_MEM; | ||
195 | |||
196 | return soc_pcmcia_add_one(skt); | ||
197 | } | ||
198 | EXPORT_SYMBOL(sa11xx_drv_pcmcia_add_one); | ||
199 | |||
200 | void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops) | ||
201 | { | ||
202 | /* | ||
203 | * set default MECR calculation if the board specific | ||
204 | * code did not specify one... | ||
205 | */ | ||
206 | if (!ops->get_timing) | ||
207 | ops->get_timing = sa1100_pcmcia_default_mecr_timing; | ||
208 | |||
209 | /* Provide our SA11x0 specific timing routines. */ | ||
210 | ops->set_timing = sa1100_pcmcia_set_timing; | ||
211 | ops->show_timing = sa1100_pcmcia_show_timing; | ||
212 | #ifdef CONFIG_CPU_FREQ | ||
213 | ops->frequency_change = sa1100_pcmcia_frequency_change; | ||
214 | #endif | ||
215 | } | ||
216 | EXPORT_SYMBOL(sa11xx_drv_pcmcia_ops); | ||
217 | |||
174 | int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, | 218 | int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, |
175 | int first, int nr) | 219 | int first, int nr) |
176 | { | 220 | { |
177 | struct skt_dev_info *sinfo; | 221 | struct skt_dev_info *sinfo; |
178 | struct soc_pcmcia_socket *skt; | 222 | struct soc_pcmcia_socket *skt; |
179 | int i; | 223 | int i, ret = 0; |
224 | |||
225 | sa11xx_drv_pcmcia_ops(ops); | ||
180 | 226 | ||
181 | sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); | 227 | sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); |
182 | if (!sinfo) | 228 | if (!sinfo) |
@@ -188,45 +234,26 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, | |||
188 | for (i = 0; i < nr; i++) { | 234 | for (i = 0; i < nr; i++) { |
189 | skt = &sinfo->skt[i]; | 235 | skt = &sinfo->skt[i]; |
190 | 236 | ||
191 | skt->nr = first + i; | 237 | skt->nr = first + i; |
192 | skt->irq = NO_IRQ; | 238 | skt->ops = ops; |
193 | 239 | skt->socket.owner = ops->owner; | |
194 | skt->res_skt.start = _PCMCIA(skt->nr); | 240 | skt->socket.dev.parent = dev; |
195 | skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; | 241 | skt->socket.pci_irq = NO_IRQ; |
196 | skt->res_skt.name = skt_names[skt->nr]; | ||
197 | skt->res_skt.flags = IORESOURCE_MEM; | ||
198 | |||
199 | skt->res_io.start = _PCMCIAIO(skt->nr); | ||
200 | skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1; | ||
201 | skt->res_io.name = "io"; | ||
202 | skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
203 | 242 | ||
204 | skt->res_mem.start = _PCMCIAMem(skt->nr); | 243 | ret = sa11xx_drv_pcmcia_add_one(skt); |
205 | skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; | 244 | if (ret) |
206 | skt->res_mem.name = "memory"; | 245 | break; |
207 | skt->res_mem.flags = IORESOURCE_MEM; | ||
208 | |||
209 | skt->res_attr.start = _PCMCIAAttr(skt->nr); | ||
210 | skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1; | ||
211 | skt->res_attr.name = "attribute"; | ||
212 | skt->res_attr.flags = IORESOURCE_MEM; | ||
213 | } | 246 | } |
214 | 247 | ||
215 | /* | 248 | if (ret) { |
216 | * set default MECR calculation if the board specific | 249 | while (--i >= 0) |
217 | * code did not specify one... | 250 | soc_pcmcia_remove_one(&sinfo->skt[i]); |
218 | */ | 251 | kfree(sinfo); |
219 | if (!ops->get_timing) | 252 | } else { |
220 | ops->get_timing = sa1100_pcmcia_default_mecr_timing; | 253 | dev_set_drvdata(dev, sinfo); |
221 | 254 | } | |
222 | /* Provide our SA11x0 specific timing routines. */ | ||
223 | ops->set_timing = sa1100_pcmcia_set_timing; | ||
224 | ops->show_timing = sa1100_pcmcia_show_timing; | ||
225 | #ifdef CONFIG_CPU_FREQ | ||
226 | ops->frequency_change = sa1100_pcmcia_frequency_change; | ||
227 | #endif | ||
228 | 255 | ||
229 | return soc_common_drv_pcmcia_probe(dev, ops, sinfo); | 256 | return ret; |
230 | } | 257 | } |
231 | EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe); | 258 | EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe); |
232 | 259 | ||
diff --git a/drivers/pcmcia/sa11xx_base.h b/drivers/pcmcia/sa11xx_base.h index 7bc208280527..3d76d720f463 100644 --- a/drivers/pcmcia/sa11xx_base.h +++ b/drivers/pcmcia/sa11xx_base.h | |||
@@ -118,6 +118,8 @@ static inline unsigned int sa1100_pcmcia_cmd_time(unsigned int cpu_clock_khz, | |||
118 | } | 118 | } |
119 | 119 | ||
120 | 120 | ||
121 | int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); | ||
122 | void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); | ||
121 | extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr); | 123 | extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr); |
122 | 124 | ||
123 | #endif /* !defined(_PCMCIA_SA1100_H) */ | 125 | #endif /* !defined(_PCMCIA_SA1100_H) */ |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index ef7e9e58782b..6f1a86b43c60 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -144,10 +144,10 @@ soc_common_pcmcia_config_skt(struct soc_pcmcia_socket *skt, socket_state_t *stat | |||
144 | */ | 144 | */ |
145 | if (skt->irq_state != 1 && state->io_irq) { | 145 | if (skt->irq_state != 1 && state->io_irq) { |
146 | skt->irq_state = 1; | 146 | skt->irq_state = 1; |
147 | set_irq_type(skt->irq, IRQ_TYPE_EDGE_FALLING); | 147 | set_irq_type(skt->socket.pci_irq, IRQ_TYPE_EDGE_FALLING); |
148 | } else if (skt->irq_state == 1 && state->io_irq == 0) { | 148 | } else if (skt->irq_state == 1 && state->io_irq == 0) { |
149 | skt->irq_state = 0; | 149 | skt->irq_state = 0; |
150 | set_irq_type(skt->irq, IRQ_TYPE_NONE); | 150 | set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); |
151 | } | 151 | } |
152 | 152 | ||
153 | skt->cs_state = *state; | 153 | skt->cs_state = *state; |
@@ -492,7 +492,8 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr, ch | |||
492 | 492 | ||
493 | p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc); | 493 | p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc); |
494 | p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp); | 494 | p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp); |
495 | p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq, skt->irq); | 495 | p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq, |
496 | skt->socket.pci_irq); | ||
496 | if (skt->ops->show_timing) | 497 | if (skt->ops->show_timing) |
497 | p+=skt->ops->show_timing(skt, p); | 498 | p+=skt->ops->show_timing(skt, p); |
498 | 499 | ||
@@ -574,7 +575,7 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, | |||
574 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); | 575 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); |
575 | 576 | ||
576 | 577 | ||
577 | LIST_HEAD(soc_pcmcia_sockets); | 578 | static LIST_HEAD(soc_pcmcia_sockets); |
578 | static DEFINE_MUTEX(soc_pcmcia_sockets_lock); | 579 | static DEFINE_MUTEX(soc_pcmcia_sockets_lock); |
579 | 580 | ||
580 | #ifdef CONFIG_CPU_FREQ | 581 | #ifdef CONFIG_CPU_FREQ |
@@ -609,177 +610,137 @@ static int soc_pcmcia_cpufreq_register(void) | |||
609 | "notifier for PCMCIA (%d)\n", ret); | 610 | "notifier for PCMCIA (%d)\n", ret); |
610 | return ret; | 611 | return ret; |
611 | } | 612 | } |
613 | fs_initcall(soc_pcmcia_cpufreq_register); | ||
612 | 614 | ||
613 | static void soc_pcmcia_cpufreq_unregister(void) | 615 | static void soc_pcmcia_cpufreq_unregister(void) |
614 | { | 616 | { |
615 | cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); | 617 | cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); |
616 | } | 618 | } |
619 | module_exit(soc_pcmcia_cpufreq_unregister); | ||
617 | 620 | ||
618 | #else | ||
619 | static int soc_pcmcia_cpufreq_register(void) { return 0; } | ||
620 | static void soc_pcmcia_cpufreq_unregister(void) {} | ||
621 | #endif | 621 | #endif |
622 | 622 | ||
623 | int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, | 623 | void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt) |
624 | struct skt_dev_info *sinfo) | ||
625 | { | 624 | { |
626 | struct soc_pcmcia_socket *skt; | ||
627 | int ret, i; | ||
628 | |||
629 | mutex_lock(&soc_pcmcia_sockets_lock); | 625 | mutex_lock(&soc_pcmcia_sockets_lock); |
626 | del_timer_sync(&skt->poll_timer); | ||
630 | 627 | ||
631 | /* | 628 | pcmcia_unregister_socket(&skt->socket); |
632 | * Initialise the per-socket structure. | ||
633 | */ | ||
634 | for (i = 0; i < sinfo->nskt; i++) { | ||
635 | skt = &sinfo->skt[i]; | ||
636 | 629 | ||
637 | skt->socket.ops = &soc_common_pcmcia_operations; | 630 | flush_scheduled_work(); |
638 | skt->socket.owner = ops->owner; | ||
639 | skt->socket.dev.parent = dev; | ||
640 | 631 | ||
641 | init_timer(&skt->poll_timer); | 632 | skt->ops->hw_shutdown(skt); |
642 | skt->poll_timer.function = soc_common_pcmcia_poll_event; | ||
643 | skt->poll_timer.data = (unsigned long)skt; | ||
644 | skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; | ||
645 | 633 | ||
646 | skt->dev = dev; | 634 | soc_common_pcmcia_config_skt(skt, &dead_socket); |
647 | skt->ops = ops; | ||
648 | 635 | ||
649 | ret = request_resource(&iomem_resource, &skt->res_skt); | 636 | list_del(&skt->node); |
650 | if (ret) | 637 | mutex_unlock(&soc_pcmcia_sockets_lock); |
651 | goto out_err_1; | ||
652 | 638 | ||
653 | ret = request_resource(&skt->res_skt, &skt->res_io); | 639 | iounmap(skt->virt_io); |
654 | if (ret) | 640 | skt->virt_io = NULL; |
655 | goto out_err_2; | 641 | release_resource(&skt->res_attr); |
642 | release_resource(&skt->res_mem); | ||
643 | release_resource(&skt->res_io); | ||
644 | release_resource(&skt->res_skt); | ||
645 | } | ||
646 | EXPORT_SYMBOL(soc_pcmcia_remove_one); | ||
656 | 647 | ||
657 | ret = request_resource(&skt->res_skt, &skt->res_mem); | 648 | int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt) |
658 | if (ret) | 649 | { |
659 | goto out_err_3; | 650 | int ret; |
660 | 651 | ||
661 | ret = request_resource(&skt->res_skt, &skt->res_attr); | 652 | init_timer(&skt->poll_timer); |
662 | if (ret) | 653 | skt->poll_timer.function = soc_common_pcmcia_poll_event; |
663 | goto out_err_4; | 654 | skt->poll_timer.data = (unsigned long)skt; |
655 | skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; | ||
664 | 656 | ||
665 | skt->virt_io = ioremap(skt->res_io.start, 0x10000); | 657 | ret = request_resource(&iomem_resource, &skt->res_skt); |
666 | if (skt->virt_io == NULL) { | 658 | if (ret) |
667 | ret = -ENOMEM; | 659 | goto out_err_1; |
668 | goto out_err_5; | ||
669 | } | ||
670 | 660 | ||
671 | if (list_empty(&soc_pcmcia_sockets)) | 661 | ret = request_resource(&skt->res_skt, &skt->res_io); |
672 | soc_pcmcia_cpufreq_register(); | 662 | if (ret) |
663 | goto out_err_2; | ||
673 | 664 | ||
674 | list_add(&skt->node, &soc_pcmcia_sockets); | 665 | ret = request_resource(&skt->res_skt, &skt->res_mem); |
666 | if (ret) | ||
667 | goto out_err_3; | ||
675 | 668 | ||
676 | /* | 669 | ret = request_resource(&skt->res_skt, &skt->res_attr); |
677 | * We initialize default socket timing here, because | 670 | if (ret) |
678 | * we are not guaranteed to see a SetIOMap operation at | 671 | goto out_err_4; |
679 | * runtime. | ||
680 | */ | ||
681 | ops->set_timing(skt); | ||
682 | 672 | ||
683 | ret = ops->hw_init(skt); | 673 | skt->virt_io = ioremap(skt->res_io.start, 0x10000); |
684 | if (ret) | 674 | if (skt->virt_io == NULL) { |
685 | goto out_err_6; | 675 | ret = -ENOMEM; |
676 | goto out_err_5; | ||
677 | } | ||
686 | 678 | ||
687 | skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD; | 679 | mutex_lock(&soc_pcmcia_sockets_lock); |
688 | skt->socket.resource_ops = &pccard_static_ops; | ||
689 | skt->socket.irq_mask = 0; | ||
690 | skt->socket.map_size = PAGE_SIZE; | ||
691 | skt->socket.pci_irq = skt->irq; | ||
692 | skt->socket.io_offset = (unsigned long)skt->virt_io; | ||
693 | 680 | ||
694 | skt->status = soc_common_pcmcia_skt_state(skt); | 681 | list_add(&skt->node, &soc_pcmcia_sockets); |
695 | 682 | ||
696 | ret = pcmcia_register_socket(&skt->socket); | 683 | /* |
697 | if (ret) | 684 | * We initialize default socket timing here, because |
698 | goto out_err_7; | 685 | * we are not guaranteed to see a SetIOMap operation at |
686 | * runtime. | ||
687 | */ | ||
688 | skt->ops->set_timing(skt); | ||
699 | 689 | ||
700 | WARN_ON(skt->socket.sock != i); | 690 | ret = skt->ops->hw_init(skt); |
691 | if (ret) | ||
692 | goto out_err_6; | ||
701 | 693 | ||
702 | add_timer(&skt->poll_timer); | 694 | skt->socket.ops = &soc_common_pcmcia_operations; |
695 | skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD; | ||
696 | skt->socket.resource_ops = &pccard_static_ops; | ||
697 | skt->socket.irq_mask = 0; | ||
698 | skt->socket.map_size = PAGE_SIZE; | ||
699 | skt->socket.io_offset = (unsigned long)skt->virt_io; | ||
703 | 700 | ||
704 | ret = device_create_file(&skt->socket.dev, &dev_attr_status); | 701 | skt->status = soc_common_pcmcia_skt_state(skt); |
705 | if (ret) | ||
706 | goto out_err_8; | ||
707 | } | ||
708 | 702 | ||
709 | dev_set_drvdata(dev, sinfo); | 703 | ret = pcmcia_register_socket(&skt->socket); |
710 | ret = 0; | 704 | if (ret) |
711 | goto out; | 705 | goto out_err_7; |
712 | 706 | ||
713 | do { | 707 | add_timer(&skt->poll_timer); |
714 | skt = &sinfo->skt[i]; | 708 | |
709 | mutex_unlock(&soc_pcmcia_sockets_lock); | ||
710 | |||
711 | ret = device_create_file(&skt->socket.dev, &dev_attr_status); | ||
712 | if (ret) | ||
713 | goto out_err_8; | ||
714 | |||
715 | return ret; | ||
715 | 716 | ||
716 | device_remove_file(&skt->socket.dev, &dev_attr_status); | ||
717 | out_err_8: | 717 | out_err_8: |
718 | del_timer_sync(&skt->poll_timer); | 718 | mutex_lock(&soc_pcmcia_sockets_lock); |
719 | pcmcia_unregister_socket(&skt->socket); | 719 | del_timer_sync(&skt->poll_timer); |
720 | pcmcia_unregister_socket(&skt->socket); | ||
720 | 721 | ||
721 | out_err_7: | 722 | out_err_7: |
722 | flush_scheduled_work(); | 723 | flush_scheduled_work(); |
723 | 724 | ||
724 | ops->hw_shutdown(skt); | 725 | skt->ops->hw_shutdown(skt); |
725 | out_err_6: | 726 | out_err_6: |
726 | list_del(&skt->node); | 727 | list_del(&skt->node); |
727 | iounmap(skt->virt_io); | 728 | mutex_unlock(&soc_pcmcia_sockets_lock); |
729 | iounmap(skt->virt_io); | ||
728 | out_err_5: | 730 | out_err_5: |
729 | release_resource(&skt->res_attr); | 731 | release_resource(&skt->res_attr); |
730 | out_err_4: | 732 | out_err_4: |
731 | release_resource(&skt->res_mem); | 733 | release_resource(&skt->res_mem); |
732 | out_err_3: | 734 | out_err_3: |
733 | release_resource(&skt->res_io); | 735 | release_resource(&skt->res_io); |
734 | out_err_2: | 736 | out_err_2: |
735 | release_resource(&skt->res_skt); | 737 | release_resource(&skt->res_skt); |
736 | out_err_1: | 738 | out_err_1: |
737 | i--; | ||
738 | } while (i > 0); | ||
739 | 739 | ||
740 | kfree(sinfo); | ||
741 | |||
742 | out: | ||
743 | mutex_unlock(&soc_pcmcia_sockets_lock); | ||
744 | return ret; | 740 | return ret; |
745 | } | 741 | } |
742 | EXPORT_SYMBOL(soc_pcmcia_add_one); | ||
746 | 743 | ||
747 | int soc_common_drv_pcmcia_remove(struct device *dev) | 744 | MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>"); |
748 | { | 745 | MODULE_DESCRIPTION("Linux PCMCIA Card Services: Common SoC support"); |
749 | struct skt_dev_info *sinfo = dev_get_drvdata(dev); | 746 | MODULE_LICENSE("Dual MPL/GPL"); |
750 | int i; | ||
751 | |||
752 | dev_set_drvdata(dev, NULL); | ||
753 | |||
754 | mutex_lock(&soc_pcmcia_sockets_lock); | ||
755 | for (i = 0; i < sinfo->nskt; i++) { | ||
756 | struct soc_pcmcia_socket *skt = &sinfo->skt[i]; | ||
757 | |||
758 | del_timer_sync(&skt->poll_timer); | ||
759 | |||
760 | pcmcia_unregister_socket(&skt->socket); | ||
761 | |||
762 | flush_scheduled_work(); | ||
763 | |||
764 | skt->ops->hw_shutdown(skt); | ||
765 | |||
766 | soc_common_pcmcia_config_skt(skt, &dead_socket); | ||
767 | |||
768 | list_del(&skt->node); | ||
769 | iounmap(skt->virt_io); | ||
770 | skt->virt_io = NULL; | ||
771 | release_resource(&skt->res_attr); | ||
772 | release_resource(&skt->res_mem); | ||
773 | release_resource(&skt->res_io); | ||
774 | release_resource(&skt->res_skt); | ||
775 | } | ||
776 | if (list_empty(&soc_pcmcia_sockets)) | ||
777 | soc_pcmcia_cpufreq_unregister(); | ||
778 | |||
779 | mutex_unlock(&soc_pcmcia_sockets_lock); | ||
780 | |||
781 | kfree(sinfo); | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | EXPORT_SYMBOL(soc_common_drv_pcmcia_remove); | ||
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h index 290e143839ee..e40824ce6b0b 100644 --- a/drivers/pcmcia/soc_common.h +++ b/drivers/pcmcia/soc_common.h | |||
@@ -30,14 +30,12 @@ struct soc_pcmcia_socket { | |||
30 | /* | 30 | /* |
31 | * Info from low level handler | 31 | * Info from low level handler |
32 | */ | 32 | */ |
33 | struct device *dev; | ||
34 | unsigned int nr; | 33 | unsigned int nr; |
35 | unsigned int irq; | ||
36 | 34 | ||
37 | /* | 35 | /* |
38 | * Core PCMCIA state | 36 | * Core PCMCIA state |
39 | */ | 37 | */ |
40 | struct pcmcia_low_level *ops; | 38 | const struct pcmcia_low_level *ops; |
41 | 39 | ||
42 | unsigned int status; | 40 | unsigned int status; |
43 | socket_state_t cs_state; | 41 | socket_state_t cs_state; |
@@ -135,10 +133,8 @@ extern void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, struct pcmcia_ | |||
135 | extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *); | 133 | extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *); |
136 | 134 | ||
137 | 135 | ||
138 | extern struct list_head soc_pcmcia_sockets; | 136 | void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt); |
139 | 137 | int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt); | |
140 | extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, struct skt_dev_info *sinfo); | ||
141 | extern int soc_common_drv_pcmcia_remove(struct device *dev); | ||
142 | 138 | ||
143 | 139 | ||
144 | #ifdef CONFIG_PCMCIA_DEBUG | 140 | #ifdef CONFIG_PCMCIA_DEBUG |
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 6918849d511e..12c49ee135e1 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c | |||
@@ -55,21 +55,6 @@ | |||
55 | #include <pcmcia/ss.h> | 55 | #include <pcmcia/ss.h> |
56 | #include "tcic.h" | 56 | #include "tcic.h" |
57 | 57 | ||
58 | #ifdef CONFIG_PCMCIA_DEBUG | ||
59 | static int pc_debug; | ||
60 | |||
61 | module_param(pc_debug, int, 0644); | ||
62 | static const char version[] = | ||
63 | "tcic.c 1.111 2000/02/15 04:13:12 (David Hinds)"; | ||
64 | |||
65 | #define debug(lvl, fmt, arg...) do { \ | ||
66 | if (pc_debug > (lvl)) \ | ||
67 | printk(KERN_DEBUG "tcic: " fmt , ## arg); \ | ||
68 | } while (0) | ||
69 | #else | ||
70 | #define debug(lvl, fmt, arg...) do { } while (0) | ||
71 | #endif | ||
72 | |||
73 | MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | 58 | MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); |
74 | MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); | 59 | MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); |
75 | MODULE_LICENSE("Dual MPL/GPL"); | 60 | MODULE_LICENSE("Dual MPL/GPL"); |
@@ -574,7 +559,7 @@ static irqreturn_t tcic_interrupt(int irq, void *dev) | |||
574 | } else | 559 | } else |
575 | active = 1; | 560 | active = 1; |
576 | 561 | ||
577 | debug(2, "tcic_interrupt()\n"); | 562 | pr_debug("tcic_interrupt()\n"); |
578 | 563 | ||
579 | for (i = 0; i < sockets; i++) { | 564 | for (i = 0; i < sockets; i++) { |
580 | psock = socket_table[i].psock; | 565 | psock = socket_table[i].psock; |
@@ -611,13 +596,13 @@ static irqreturn_t tcic_interrupt(int irq, void *dev) | |||
611 | } | 596 | } |
612 | active = 0; | 597 | active = 0; |
613 | 598 | ||
614 | debug(2, "interrupt done\n"); | 599 | pr_debug("interrupt done\n"); |
615 | return IRQ_HANDLED; | 600 | return IRQ_HANDLED; |
616 | } /* tcic_interrupt */ | 601 | } /* tcic_interrupt */ |
617 | 602 | ||
618 | static void tcic_timer(u_long data) | 603 | static void tcic_timer(u_long data) |
619 | { | 604 | { |
620 | debug(2, "tcic_timer()\n"); | 605 | pr_debug("tcic_timer()\n"); |
621 | tcic_timer_pending = 0; | 606 | tcic_timer_pending = 0; |
622 | tcic_interrupt(0, NULL); | 607 | tcic_interrupt(0, NULL); |
623 | } /* tcic_timer */ | 608 | } /* tcic_timer */ |
@@ -644,7 +629,7 @@ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) | |||
644 | reg = tcic_getb(TCIC_PWR); | 629 | reg = tcic_getb(TCIC_PWR); |
645 | if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) | 630 | if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) |
646 | *value |= SS_POWERON; | 631 | *value |= SS_POWERON; |
647 | debug(1, "GetStatus(%d) = %#2.2x\n", psock, *value); | 632 | dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value); |
648 | return 0; | 633 | return 0; |
649 | } /* tcic_get_status */ | 634 | } /* tcic_get_status */ |
650 | 635 | ||
@@ -656,7 +641,7 @@ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) | |||
656 | u_char reg; | 641 | u_char reg; |
657 | u_short scf1, scf2; | 642 | u_short scf1, scf2; |
658 | 643 | ||
659 | debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " | 644 | dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " |
660 | "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, | 645 | "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, |
661 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); | 646 | state->Vcc, state->Vpp, state->io_irq, state->csc_mask); |
662 | tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); | 647 | tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); |
@@ -731,7 +716,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) | |||
731 | u_int addr; | 716 | u_int addr; |
732 | u_short base, len, ioctl; | 717 | u_short base, len, ioctl; |
733 | 718 | ||
734 | debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " | 719 | dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, " |
735 | "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, | 720 | "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, |
736 | (unsigned long long)io->start, (unsigned long long)io->stop); | 721 | (unsigned long long)io->start, (unsigned long long)io->stop); |
737 | if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || | 722 | if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || |
@@ -768,7 +753,7 @@ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m | |||
768 | u_short addr, ctl; | 753 | u_short addr, ctl; |
769 | u_long base, len, mmap; | 754 | u_long base, len, mmap; |
770 | 755 | ||
771 | debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, " | 756 | dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, " |
772 | "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, | 757 | "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, |
773 | mem->speed, (unsigned long long)mem->res->start, | 758 | mem->speed, (unsigned long long)mem->res->start, |
774 | (unsigned long long)mem->res->end, mem->card_start); | 759 | (unsigned long long)mem->res->end, mem->card_start); |
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h index edccfa5bb400..615a45a8fe86 100644 --- a/drivers/pcmcia/topic.h +++ b/drivers/pcmcia/topic.h | |||
@@ -114,22 +114,17 @@ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff) | |||
114 | reg_zv |= TOPIC97_ZV_CONTROL_ENABLE; | 114 | reg_zv |= TOPIC97_ZV_CONTROL_ENABLE; |
115 | config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); | 115 | config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); |
116 | 116 | ||
117 | reg = config_readb(socket, TOPIC97_MISC2); | ||
118 | reg |= TOPIC97_MISC2_ZV_ENABLE; | ||
119 | config_writeb(socket, TOPIC97_MISC2, reg); | ||
120 | |||
121 | /* not sure this is needed, doc is unclear */ | ||
122 | #if 0 | ||
123 | reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH); | 117 | reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH); |
124 | reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL; | 118 | reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL; |
125 | config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg); | 119 | config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg); |
126 | #endif | 120 | } else { |
127 | } | ||
128 | else { | ||
129 | reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE; | 121 | reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE; |
130 | config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); | 122 | config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); |
131 | } | ||
132 | 123 | ||
124 | reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH); | ||
125 | reg &= ~(TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL); | ||
126 | config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg); | ||
127 | } | ||
133 | } | 128 | } |
134 | 129 | ||
135 | static int topic97_override(struct yenta_socket *socket) | 130 | static int topic97_override(struct yenta_socket *socket) |
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index cea6cef27e89..118674925516 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -77,6 +77,13 @@ config BATTERY_TOSA | |||
77 | Say Y to enable support for the battery on the Sharp Zaurus | 77 | Say Y to enable support for the battery on the Sharp Zaurus |
78 | SL-6000 (tosa) models. | 78 | SL-6000 (tosa) models. |
79 | 79 | ||
80 | config BATTERY_COLLIE | ||
81 | tristate "Sharp SL-5500 (collie) battery" | ||
82 | depends on SA1100_COLLIE && MCP_UCB1200 | ||
83 | help | ||
84 | Say Y to enable support for the battery on the Sharp Zaurus | ||
85 | SL-5500 (collie) models. | ||
86 | |||
80 | config BATTERY_WM97XX | 87 | config BATTERY_WM97XX |
81 | bool "WM97xx generic battery driver" | 88 | bool "WM97xx generic battery driver" |
82 | depends on TOUCHSCREEN_WM97XX=y | 89 | depends on TOUCHSCREEN_WM97XX=y |
diff --git a/drivers/power/Makefile b/drivers/power/Makefile index b96f29d91c28..356cdfd3c8b2 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile | |||
@@ -24,6 +24,7 @@ obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o | |||
24 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o | 24 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o |
25 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o | 25 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o |
26 | obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o | 26 | obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o |
27 | obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o | ||
27 | obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o | 28 | obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o |
28 | obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o | 29 | obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o |
29 | obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o | 30 | obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o |
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c new file mode 100644 index 000000000000..039f41ae217d --- /dev/null +++ b/drivers/power/collie_battery.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * Battery and Power Management code for the Sharp SL-5x00 | ||
3 | * | ||
4 | * Copyright (C) 2009 Thomas Kunze | ||
5 | * | ||
6 | * based on tosa_battery.c | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/power_supply.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/mfd/ucb1x00.h> | ||
21 | |||
22 | #include <asm/mach/sharpsl_param.h> | ||
23 | #include <asm/mach-types.h> | ||
24 | #include <mach/collie.h> | ||
25 | |||
26 | static DEFINE_MUTEX(bat_lock); /* protects gpio pins */ | ||
27 | static struct work_struct bat_work; | ||
28 | static struct ucb1x00 *ucb; | ||
29 | |||
30 | struct collie_bat { | ||
31 | int status; | ||
32 | struct power_supply psy; | ||
33 | int full_chrg; | ||
34 | |||
35 | struct mutex work_lock; /* protects data */ | ||
36 | |||
37 | bool (*is_present)(struct collie_bat *bat); | ||
38 | int gpio_full; | ||
39 | int gpio_charge_on; | ||
40 | |||
41 | int technology; | ||
42 | |||
43 | int gpio_bat; | ||
44 | int adc_bat; | ||
45 | int adc_bat_divider; | ||
46 | int bat_max; | ||
47 | int bat_min; | ||
48 | |||
49 | int gpio_temp; | ||
50 | int adc_temp; | ||
51 | int adc_temp_divider; | ||
52 | }; | ||
53 | |||
54 | static struct collie_bat collie_bat_main; | ||
55 | |||
56 | static unsigned long collie_read_bat(struct collie_bat *bat) | ||
57 | { | ||
58 | unsigned long value = 0; | ||
59 | |||
60 | if (bat->gpio_bat < 0 || bat->adc_bat < 0) | ||
61 | return 0; | ||
62 | mutex_lock(&bat_lock); | ||
63 | gpio_set_value(bat->gpio_bat, 1); | ||
64 | msleep(5); | ||
65 | ucb1x00_adc_enable(ucb); | ||
66 | value = ucb1x00_adc_read(ucb, bat->adc_bat, UCB_SYNC); | ||
67 | ucb1x00_adc_disable(ucb); | ||
68 | gpio_set_value(bat->gpio_bat, 0); | ||
69 | mutex_unlock(&bat_lock); | ||
70 | value = value * 1000000 / bat->adc_bat_divider; | ||
71 | |||
72 | return value; | ||
73 | } | ||
74 | |||
75 | static unsigned long collie_read_temp(struct collie_bat *bat) | ||
76 | { | ||
77 | unsigned long value = 0; | ||
78 | if (bat->gpio_temp < 0 || bat->adc_temp < 0) | ||
79 | return 0; | ||
80 | |||
81 | mutex_lock(&bat_lock); | ||
82 | gpio_set_value(bat->gpio_temp, 1); | ||
83 | msleep(5); | ||
84 | ucb1x00_adc_enable(ucb); | ||
85 | value = ucb1x00_adc_read(ucb, bat->adc_temp, UCB_SYNC); | ||
86 | ucb1x00_adc_disable(ucb); | ||
87 | gpio_set_value(bat->gpio_temp, 0); | ||
88 | mutex_unlock(&bat_lock); | ||
89 | |||
90 | value = value * 10000 / bat->adc_temp_divider; | ||
91 | |||
92 | return value; | ||
93 | } | ||
94 | |||
95 | static int collie_bat_get_property(struct power_supply *psy, | ||
96 | enum power_supply_property psp, | ||
97 | union power_supply_propval *val) | ||
98 | { | ||
99 | int ret = 0; | ||
100 | struct collie_bat *bat = container_of(psy, struct collie_bat, psy); | ||
101 | |||
102 | if (bat->is_present && !bat->is_present(bat) | ||
103 | && psp != POWER_SUPPLY_PROP_PRESENT) { | ||
104 | return -ENODEV; | ||
105 | } | ||
106 | |||
107 | switch (psp) { | ||
108 | case POWER_SUPPLY_PROP_STATUS: | ||
109 | val->intval = bat->status; | ||
110 | break; | ||
111 | case POWER_SUPPLY_PROP_TECHNOLOGY: | ||
112 | val->intval = bat->technology; | ||
113 | break; | ||
114 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | ||
115 | val->intval = collie_read_bat(bat); | ||
116 | break; | ||
117 | case POWER_SUPPLY_PROP_VOLTAGE_MAX: | ||
118 | if (bat->full_chrg == -1) | ||
119 | val->intval = bat->bat_max; | ||
120 | else | ||
121 | val->intval = bat->full_chrg; | ||
122 | break; | ||
123 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: | ||
124 | val->intval = bat->bat_max; | ||
125 | break; | ||
126 | case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: | ||
127 | val->intval = bat->bat_min; | ||
128 | break; | ||
129 | case POWER_SUPPLY_PROP_TEMP: | ||
130 | val->intval = collie_read_temp(bat); | ||
131 | break; | ||
132 | case POWER_SUPPLY_PROP_PRESENT: | ||
133 | val->intval = bat->is_present ? bat->is_present(bat) : 1; | ||
134 | break; | ||
135 | default: | ||
136 | ret = -EINVAL; | ||
137 | break; | ||
138 | } | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | static void collie_bat_external_power_changed(struct power_supply *psy) | ||
143 | { | ||
144 | schedule_work(&bat_work); | ||
145 | } | ||
146 | |||
147 | static irqreturn_t collie_bat_gpio_isr(int irq, void *data) | ||
148 | { | ||
149 | pr_info("collie_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq))); | ||
150 | schedule_work(&bat_work); | ||
151 | return IRQ_HANDLED; | ||
152 | } | ||
153 | |||
154 | static void collie_bat_update(struct collie_bat *bat) | ||
155 | { | ||
156 | int old; | ||
157 | struct power_supply *psy = &bat->psy; | ||
158 | |||
159 | mutex_lock(&bat->work_lock); | ||
160 | |||
161 | old = bat->status; | ||
162 | |||
163 | if (bat->is_present && !bat->is_present(bat)) { | ||
164 | printk(KERN_NOTICE "%s not present\n", psy->name); | ||
165 | bat->status = POWER_SUPPLY_STATUS_UNKNOWN; | ||
166 | bat->full_chrg = -1; | ||
167 | } else if (power_supply_am_i_supplied(psy)) { | ||
168 | if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) { | ||
169 | gpio_set_value(bat->gpio_charge_on, 1); | ||
170 | mdelay(15); | ||
171 | } | ||
172 | |||
173 | if (gpio_get_value(bat->gpio_full)) { | ||
174 | if (old == POWER_SUPPLY_STATUS_CHARGING || | ||
175 | bat->full_chrg == -1) | ||
176 | bat->full_chrg = collie_read_bat(bat); | ||
177 | |||
178 | gpio_set_value(bat->gpio_charge_on, 0); | ||
179 | bat->status = POWER_SUPPLY_STATUS_FULL; | ||
180 | } else { | ||
181 | gpio_set_value(bat->gpio_charge_on, 1); | ||
182 | bat->status = POWER_SUPPLY_STATUS_CHARGING; | ||
183 | } | ||
184 | } else { | ||
185 | gpio_set_value(bat->gpio_charge_on, 0); | ||
186 | bat->status = POWER_SUPPLY_STATUS_DISCHARGING; | ||
187 | } | ||
188 | |||
189 | if (old != bat->status) | ||
190 | power_supply_changed(psy); | ||
191 | |||
192 | mutex_unlock(&bat->work_lock); | ||
193 | } | ||
194 | |||
195 | static void collie_bat_work(struct work_struct *work) | ||
196 | { | ||
197 | collie_bat_update(&collie_bat_main); | ||
198 | } | ||
199 | |||
200 | |||
201 | static enum power_supply_property collie_bat_main_props[] = { | ||
202 | POWER_SUPPLY_PROP_STATUS, | ||
203 | POWER_SUPPLY_PROP_TECHNOLOGY, | ||
204 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | ||
205 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | ||
206 | POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, | ||
207 | POWER_SUPPLY_PROP_VOLTAGE_MAX, | ||
208 | POWER_SUPPLY_PROP_PRESENT, | ||
209 | POWER_SUPPLY_PROP_TEMP, | ||
210 | }; | ||
211 | |||
212 | static enum power_supply_property collie_bat_bu_props[] = { | ||
213 | POWER_SUPPLY_PROP_STATUS, | ||
214 | POWER_SUPPLY_PROP_TECHNOLOGY, | ||
215 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | ||
216 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | ||
217 | POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, | ||
218 | POWER_SUPPLY_PROP_VOLTAGE_MAX, | ||
219 | POWER_SUPPLY_PROP_PRESENT, | ||
220 | }; | ||
221 | |||
222 | static struct collie_bat collie_bat_main = { | ||
223 | .status = POWER_SUPPLY_STATUS_DISCHARGING, | ||
224 | .full_chrg = -1, | ||
225 | .psy = { | ||
226 | .name = "main-battery", | ||
227 | .type = POWER_SUPPLY_TYPE_BATTERY, | ||
228 | .properties = collie_bat_main_props, | ||
229 | .num_properties = ARRAY_SIZE(collie_bat_main_props), | ||
230 | .get_property = collie_bat_get_property, | ||
231 | .external_power_changed = collie_bat_external_power_changed, | ||
232 | .use_for_apm = 1, | ||
233 | }, | ||
234 | |||
235 | .gpio_full = COLLIE_GPIO_CO, | ||
236 | .gpio_charge_on = COLLIE_GPIO_CHARGE_ON, | ||
237 | |||
238 | .technology = POWER_SUPPLY_TECHNOLOGY_LIPO, | ||
239 | |||
240 | .gpio_bat = COLLIE_GPIO_MBAT_ON, | ||
241 | .adc_bat = UCB_ADC_INP_AD1, | ||
242 | .adc_bat_divider = 155, | ||
243 | .bat_max = 4310000, | ||
244 | .bat_min = 1551 * 1000000 / 414, | ||
245 | |||
246 | .gpio_temp = COLLIE_GPIO_TMP_ON, | ||
247 | .adc_temp = UCB_ADC_INP_AD0, | ||
248 | .adc_temp_divider = 10000, | ||
249 | }; | ||
250 | |||
251 | static struct collie_bat collie_bat_bu = { | ||
252 | .status = POWER_SUPPLY_STATUS_UNKNOWN, | ||
253 | .full_chrg = -1, | ||
254 | |||
255 | .psy = { | ||
256 | .name = "backup-battery", | ||
257 | .type = POWER_SUPPLY_TYPE_BATTERY, | ||
258 | .properties = collie_bat_bu_props, | ||
259 | .num_properties = ARRAY_SIZE(collie_bat_bu_props), | ||
260 | .get_property = collie_bat_get_property, | ||
261 | .external_power_changed = collie_bat_external_power_changed, | ||
262 | }, | ||
263 | |||
264 | .gpio_full = -1, | ||
265 | .gpio_charge_on = -1, | ||
266 | |||
267 | .technology = POWER_SUPPLY_TECHNOLOGY_LiMn, | ||
268 | |||
269 | .gpio_bat = COLLIE_GPIO_BBAT_ON, | ||
270 | .adc_bat = UCB_ADC_INP_AD1, | ||
271 | .adc_bat_divider = 155, | ||
272 | .bat_max = 3000000, | ||
273 | .bat_min = 1900000, | ||
274 | |||
275 | .gpio_temp = -1, | ||
276 | .adc_temp = -1, | ||
277 | .adc_temp_divider = -1, | ||
278 | }; | ||
279 | |||
280 | static struct { | ||
281 | int gpio; | ||
282 | char *name; | ||
283 | bool output; | ||
284 | int value; | ||
285 | } gpios[] = { | ||
286 | { COLLIE_GPIO_CO, "main battery full", 0, 0 }, | ||
287 | { COLLIE_GPIO_MAIN_BAT_LOW, "main battery low", 0, 0 }, | ||
288 | { COLLIE_GPIO_CHARGE_ON, "main charge on", 1, 0 }, | ||
289 | { COLLIE_GPIO_MBAT_ON, "main battery", 1, 0 }, | ||
290 | { COLLIE_GPIO_TMP_ON, "main battery temp", 1, 0 }, | ||
291 | { COLLIE_GPIO_BBAT_ON, "backup battery", 1, 0 }, | ||
292 | }; | ||
293 | |||
294 | #ifdef CONFIG_PM | ||
295 | static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state) | ||
296 | { | ||
297 | /* flush all pending status updates */ | ||
298 | flush_scheduled_work(); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static int collie_bat_resume(struct ucb1x00_dev *dev) | ||
303 | { | ||
304 | /* things may have changed while we were away */ | ||
305 | schedule_work(&bat_work); | ||
306 | return 0; | ||
307 | } | ||
308 | #else | ||
309 | #define collie_bat_suspend NULL | ||
310 | #define collie_bat_resume NULL | ||
311 | #endif | ||
312 | |||
313 | static int __devinit collie_bat_probe(struct ucb1x00_dev *dev) | ||
314 | { | ||
315 | int ret; | ||
316 | int i; | ||
317 | |||
318 | if (!machine_is_collie()) | ||
319 | return -ENODEV; | ||
320 | |||
321 | ucb = dev->ucb; | ||
322 | |||
323 | for (i = 0; i < ARRAY_SIZE(gpios); i++) { | ||
324 | ret = gpio_request(gpios[i].gpio, gpios[i].name); | ||
325 | if (ret) { | ||
326 | i--; | ||
327 | goto err_gpio; | ||
328 | } | ||
329 | |||
330 | if (gpios[i].output) | ||
331 | ret = gpio_direction_output(gpios[i].gpio, | ||
332 | gpios[i].value); | ||
333 | else | ||
334 | ret = gpio_direction_input(gpios[i].gpio); | ||
335 | |||
336 | if (ret) | ||
337 | goto err_gpio; | ||
338 | } | ||
339 | |||
340 | mutex_init(&collie_bat_main.work_lock); | ||
341 | |||
342 | INIT_WORK(&bat_work, collie_bat_work); | ||
343 | |||
344 | ret = power_supply_register(&dev->ucb->dev, &collie_bat_main.psy); | ||
345 | if (ret) | ||
346 | goto err_psy_reg_main; | ||
347 | ret = power_supply_register(&dev->ucb->dev, &collie_bat_bu.psy); | ||
348 | if (ret) | ||
349 | goto err_psy_reg_bu; | ||
350 | |||
351 | ret = request_irq(gpio_to_irq(COLLIE_GPIO_CO), | ||
352 | collie_bat_gpio_isr, | ||
353 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
354 | "main full", &collie_bat_main); | ||
355 | if (!ret) { | ||
356 | schedule_work(&bat_work); | ||
357 | return 0; | ||
358 | } | ||
359 | power_supply_unregister(&collie_bat_bu.psy); | ||
360 | err_psy_reg_bu: | ||
361 | power_supply_unregister(&collie_bat_main.psy); | ||
362 | err_psy_reg_main: | ||
363 | |||
364 | /* see comment in collie_bat_remove */ | ||
365 | flush_scheduled_work(); | ||
366 | |||
367 | i--; | ||
368 | err_gpio: | ||
369 | for (; i >= 0; i--) | ||
370 | gpio_free(gpios[i].gpio); | ||
371 | |||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static void __devexit collie_bat_remove(struct ucb1x00_dev *dev) | ||
376 | { | ||
377 | int i; | ||
378 | |||
379 | free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main); | ||
380 | |||
381 | power_supply_unregister(&collie_bat_bu.psy); | ||
382 | power_supply_unregister(&collie_bat_main.psy); | ||
383 | |||
384 | /* | ||
385 | * now flush all pending work. | ||
386 | * we won't get any more schedules, since all | ||
387 | * sources (isr and external_power_changed) | ||
388 | * are unregistered now. | ||
389 | */ | ||
390 | flush_scheduled_work(); | ||
391 | |||
392 | for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--) | ||
393 | gpio_free(gpios[i].gpio); | ||
394 | } | ||
395 | |||
396 | static struct ucb1x00_driver collie_bat_driver = { | ||
397 | .add = collie_bat_probe, | ||
398 | .remove = __devexit_p(collie_bat_remove), | ||
399 | .suspend = collie_bat_suspend, | ||
400 | .resume = collie_bat_resume, | ||
401 | }; | ||
402 | |||
403 | static int __init collie_bat_init(void) | ||
404 | { | ||
405 | return ucb1x00_register_driver(&collie_bat_driver); | ||
406 | } | ||
407 | |||
408 | static void __exit collie_bat_exit(void) | ||
409 | { | ||
410 | ucb1x00_unregister_driver(&collie_bat_driver); | ||
411 | } | ||
412 | |||
413 | module_init(collie_bat_init); | ||
414 | module_exit(collie_bat_exit); | ||
415 | |||
416 | MODULE_LICENSE("GPL"); | ||
417 | MODULE_AUTHOR("Thomas Kunze"); | ||
418 | MODULE_DESCRIPTION("Collie battery driver"); | ||
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c index 1d8d9879d3a1..48857008758c 100644 --- a/drivers/regulator/wm831x-isink.c +++ b/drivers/regulator/wm831x-isink.c | |||
@@ -167,6 +167,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev) | |||
167 | return -ENOMEM; | 167 | return -ENOMEM; |
168 | } | 168 | } |
169 | 169 | ||
170 | isink->wm831x = wm831x; | ||
171 | |||
170 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 172 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
171 | if (res == NULL) { | 173 | if (res == NULL) { |
172 | dev_err(&pdev->dev, "No I/O resource\n"); | 174 | dev_err(&pdev->dev, "No I/O resource\n"); |
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c index 33a10c47260e..4c5d5d0c4cfc 100644 --- a/drivers/rtc/rtc-pcf50633.c +++ b/drivers/rtc/rtc-pcf50633.c | |||
@@ -292,8 +292,9 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) | |||
292 | &pcf50633_rtc_ops, THIS_MODULE); | 292 | &pcf50633_rtc_ops, THIS_MODULE); |
293 | 293 | ||
294 | if (IS_ERR(rtc->rtc_dev)) { | 294 | if (IS_ERR(rtc->rtc_dev)) { |
295 | int ret = PTR_ERR(rtc->rtc_dev); | ||
295 | kfree(rtc); | 296 | kfree(rtc); |
296 | return PTR_ERR(rtc->rtc_dev); | 297 | return ret; |
297 | } | 298 | } |
298 | 299 | ||
299 | pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, | 300 | pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, |
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index 310c10795e9a..6583c1a8b070 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c | |||
@@ -195,7 +195,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, | |||
195 | /* year, since the rtc epoch*/ | 195 | /* year, since the rtc epoch*/ |
196 | buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100); | 196 | buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100); |
197 | buf[CCR_WDAY] = tm->tm_wday & 0x07; | 197 | buf[CCR_WDAY] = tm->tm_wday & 0x07; |
198 | buf[CCR_Y2K] = bin2bcd(tm->tm_year / 100); | 198 | buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100); |
199 | } | 199 | } |
200 | 200 | ||
201 | /* If writing alarm registers, set compare bits on registers 0-4 */ | 201 | /* If writing alarm registers, set compare bits on registers 0-4 */ |
@@ -280,9 +280,9 @@ static int x1205_fix_osc(struct i2c_client *client) | |||
280 | int err; | 280 | int err; |
281 | struct rtc_time tm; | 281 | struct rtc_time tm; |
282 | 282 | ||
283 | tm.tm_hour = tm.tm_min = tm.tm_sec = 0; | 283 | memset(&tm, 0, sizeof(tm)); |
284 | 284 | ||
285 | err = x1205_set_datetime(client, &tm, 0, X1205_CCR_BASE, 0); | 285 | err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0); |
286 | if (err < 0) | 286 | if (err < 0) |
287 | dev_err(&client->dev, "unable to restart the oscillator\n"); | 287 | dev_err(&client->dev, "unable to restart the oscillator\n"); |
288 | 288 | ||
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c index 67cde0138061..528733b4a392 100644 --- a/drivers/scsi/pcmcia/aha152x_stub.c +++ b/drivers/scsi/pcmcia/aha152x_stub.c | |||
@@ -54,15 +54,6 @@ | |||
54 | #include <pcmcia/cistpl.h> | 54 | #include <pcmcia/cistpl.h> |
55 | #include <pcmcia/ds.h> | 55 | #include <pcmcia/ds.h> |
56 | 56 | ||
57 | #ifdef PCMCIA_DEBUG | ||
58 | static int pc_debug = PCMCIA_DEBUG; | ||
59 | module_param(pc_debug, int, 0644); | ||
60 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
61 | static char *version = | ||
62 | "aha152x_cs.c 1.54 2000/06/12 21:27:25 (David Hinds)"; | ||
63 | #else | ||
64 | #define DEBUG(n, args...) | ||
65 | #endif | ||
66 | 57 | ||
67 | /*====================================================================*/ | 58 | /*====================================================================*/ |
68 | 59 | ||
@@ -103,7 +94,7 @@ static int aha152x_probe(struct pcmcia_device *link) | |||
103 | { | 94 | { |
104 | scsi_info_t *info; | 95 | scsi_info_t *info; |
105 | 96 | ||
106 | DEBUG(0, "aha152x_attach()\n"); | 97 | dev_dbg(&link->dev, "aha152x_attach()\n"); |
107 | 98 | ||
108 | /* Create new SCSI device */ | 99 | /* Create new SCSI device */ |
109 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 100 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -115,7 +106,6 @@ static int aha152x_probe(struct pcmcia_device *link) | |||
115 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 106 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
116 | link->io.IOAddrLines = 10; | 107 | link->io.IOAddrLines = 10; |
117 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 108 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
118 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
119 | link->conf.Attributes = CONF_ENABLE_IRQ; | 109 | link->conf.Attributes = CONF_ENABLE_IRQ; |
120 | link->conf.IntType = INT_MEMORY_AND_IO; | 110 | link->conf.IntType = INT_MEMORY_AND_IO; |
121 | link->conf.Present = PRESENT_OPTION; | 111 | link->conf.Present = PRESENT_OPTION; |
@@ -127,7 +117,7 @@ static int aha152x_probe(struct pcmcia_device *link) | |||
127 | 117 | ||
128 | static void aha152x_detach(struct pcmcia_device *link) | 118 | static void aha152x_detach(struct pcmcia_device *link) |
129 | { | 119 | { |
130 | DEBUG(0, "aha152x_detach(0x%p)\n", link); | 120 | dev_dbg(&link->dev, "aha152x_detach\n"); |
131 | 121 | ||
132 | aha152x_release_cs(link); | 122 | aha152x_release_cs(link); |
133 | 123 | ||
@@ -137,9 +127,6 @@ static void aha152x_detach(struct pcmcia_device *link) | |||
137 | 127 | ||
138 | /*====================================================================*/ | 128 | /*====================================================================*/ |
139 | 129 | ||
140 | #define CS_CHECK(fn, ret) \ | ||
141 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
142 | |||
143 | static int aha152x_config_check(struct pcmcia_device *p_dev, | 130 | static int aha152x_config_check(struct pcmcia_device *p_dev, |
144 | cistpl_cftable_entry_t *cfg, | 131 | cistpl_cftable_entry_t *cfg, |
145 | cistpl_cftable_entry_t *dflt, | 132 | cistpl_cftable_entry_t *dflt, |
@@ -164,19 +151,22 @@ static int aha152x_config_cs(struct pcmcia_device *link) | |||
164 | { | 151 | { |
165 | scsi_info_t *info = link->priv; | 152 | scsi_info_t *info = link->priv; |
166 | struct aha152x_setup s; | 153 | struct aha152x_setup s; |
167 | int last_ret, last_fn; | 154 | int ret; |
168 | struct Scsi_Host *host; | 155 | struct Scsi_Host *host; |
169 | 156 | ||
170 | DEBUG(0, "aha152x_config(0x%p)\n", link); | 157 | dev_dbg(&link->dev, "aha152x_config\n"); |
171 | 158 | ||
172 | last_ret = pcmcia_loop_config(link, aha152x_config_check, NULL); | 159 | ret = pcmcia_loop_config(link, aha152x_config_check, NULL); |
173 | if (last_ret) { | 160 | if (ret) |
174 | cs_error(link, RequestIO, last_ret); | 161 | goto failed; |
175 | goto failed; | ||
176 | } | ||
177 | 162 | ||
178 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 163 | ret = pcmcia_request_irq(link, &link->irq); |
179 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 164 | if (ret) |
165 | goto failed; | ||
166 | |||
167 | ret = pcmcia_request_configuration(link, &link->conf); | ||
168 | if (ret) | ||
169 | goto failed; | ||
180 | 170 | ||
181 | /* Set configuration options for the aha152x driver */ | 171 | /* Set configuration options for the aha152x driver */ |
182 | memset(&s, 0, sizeof(s)); | 172 | memset(&s, 0, sizeof(s)); |
@@ -194,7 +184,7 @@ static int aha152x_config_cs(struct pcmcia_device *link) | |||
194 | host = aha152x_probe_one(&s); | 184 | host = aha152x_probe_one(&s); |
195 | if (host == NULL) { | 185 | if (host == NULL) { |
196 | printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); | 186 | printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); |
197 | goto cs_failed; | 187 | goto failed; |
198 | } | 188 | } |
199 | 189 | ||
200 | sprintf(info->node.dev_name, "scsi%d", host->host_no); | 190 | sprintf(info->node.dev_name, "scsi%d", host->host_no); |
@@ -203,8 +193,6 @@ static int aha152x_config_cs(struct pcmcia_device *link) | |||
203 | 193 | ||
204 | return 0; | 194 | return 0; |
205 | 195 | ||
206 | cs_failed: | ||
207 | cs_error(link, last_fn, last_ret); | ||
208 | failed: | 196 | failed: |
209 | aha152x_release_cs(link); | 197 | aha152x_release_cs(link); |
210 | return -ENODEV; | 198 | return -ENODEV; |
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c index 06254f46a0dd..914040684079 100644 --- a/drivers/scsi/pcmcia/fdomain_stub.c +++ b/drivers/scsi/pcmcia/fdomain_stub.c | |||
@@ -59,16 +59,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | |||
59 | MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); | 59 | MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); |
60 | MODULE_LICENSE("Dual MPL/GPL"); | 60 | MODULE_LICENSE("Dual MPL/GPL"); |
61 | 61 | ||
62 | #ifdef PCMCIA_DEBUG | ||
63 | static int pc_debug = PCMCIA_DEBUG; | ||
64 | module_param(pc_debug, int, 0); | ||
65 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
66 | static char *version = | ||
67 | "fdomain_cs.c 1.47 2001/10/13 00:08:52 (David Hinds)"; | ||
68 | #else | ||
69 | #define DEBUG(n, args...) | ||
70 | #endif | ||
71 | |||
72 | /*====================================================================*/ | 62 | /*====================================================================*/ |
73 | 63 | ||
74 | typedef struct scsi_info_t { | 64 | typedef struct scsi_info_t { |
@@ -86,7 +76,7 @@ static int fdomain_probe(struct pcmcia_device *link) | |||
86 | { | 76 | { |
87 | scsi_info_t *info; | 77 | scsi_info_t *info; |
88 | 78 | ||
89 | DEBUG(0, "fdomain_attach()\n"); | 79 | dev_dbg(&link->dev, "fdomain_attach()\n"); |
90 | 80 | ||
91 | /* Create new SCSI device */ | 81 | /* Create new SCSI device */ |
92 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 82 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -99,7 +89,6 @@ static int fdomain_probe(struct pcmcia_device *link) | |||
99 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 89 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
100 | link->io.IOAddrLines = 10; | 90 | link->io.IOAddrLines = 10; |
101 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 91 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
102 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
103 | link->conf.Attributes = CONF_ENABLE_IRQ; | 92 | link->conf.Attributes = CONF_ENABLE_IRQ; |
104 | link->conf.IntType = INT_MEMORY_AND_IO; | 93 | link->conf.IntType = INT_MEMORY_AND_IO; |
105 | link->conf.Present = PRESENT_OPTION; | 94 | link->conf.Present = PRESENT_OPTION; |
@@ -111,7 +100,7 @@ static int fdomain_probe(struct pcmcia_device *link) | |||
111 | 100 | ||
112 | static void fdomain_detach(struct pcmcia_device *link) | 101 | static void fdomain_detach(struct pcmcia_device *link) |
113 | { | 102 | { |
114 | DEBUG(0, "fdomain_detach(0x%p)\n", link); | 103 | dev_dbg(&link->dev, "fdomain_detach\n"); |
115 | 104 | ||
116 | fdomain_release(link); | 105 | fdomain_release(link); |
117 | 106 | ||
@@ -120,9 +109,6 @@ static void fdomain_detach(struct pcmcia_device *link) | |||
120 | 109 | ||
121 | /*====================================================================*/ | 110 | /*====================================================================*/ |
122 | 111 | ||
123 | #define CS_CHECK(fn, ret) \ | ||
124 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
125 | |||
126 | static int fdomain_config_check(struct pcmcia_device *p_dev, | 112 | static int fdomain_config_check(struct pcmcia_device *p_dev, |
127 | cistpl_cftable_entry_t *cfg, | 113 | cistpl_cftable_entry_t *cfg, |
128 | cistpl_cftable_entry_t *dflt, | 114 | cistpl_cftable_entry_t *dflt, |
@@ -137,20 +123,22 @@ static int fdomain_config_check(struct pcmcia_device *p_dev, | |||
137 | static int fdomain_config(struct pcmcia_device *link) | 123 | static int fdomain_config(struct pcmcia_device *link) |
138 | { | 124 | { |
139 | scsi_info_t *info = link->priv; | 125 | scsi_info_t *info = link->priv; |
140 | int last_ret, last_fn; | 126 | int ret; |
141 | char str[22]; | 127 | char str[22]; |
142 | struct Scsi_Host *host; | 128 | struct Scsi_Host *host; |
143 | 129 | ||
144 | DEBUG(0, "fdomain_config(0x%p)\n", link); | 130 | dev_dbg(&link->dev, "fdomain_config\n"); |
145 | 131 | ||
146 | last_ret = pcmcia_loop_config(link, fdomain_config_check, NULL); | 132 | ret = pcmcia_loop_config(link, fdomain_config_check, NULL); |
147 | if (last_ret) { | 133 | if (ret) |
148 | cs_error(link, RequestIO, last_ret); | ||
149 | goto failed; | 134 | goto failed; |
150 | } | ||
151 | 135 | ||
152 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 136 | ret = pcmcia_request_irq(link, &link->irq); |
153 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 137 | if (ret) |
138 | goto failed; | ||
139 | ret = pcmcia_request_configuration(link, &link->conf); | ||
140 | if (ret) | ||
141 | goto failed; | ||
154 | 142 | ||
155 | /* A bad hack... */ | 143 | /* A bad hack... */ |
156 | release_region(link->io.BasePort1, link->io.NumPorts1); | 144 | release_region(link->io.BasePort1, link->io.NumPorts1); |
@@ -162,11 +150,11 @@ static int fdomain_config(struct pcmcia_device *link) | |||
162 | host = __fdomain_16x0_detect(&fdomain_driver_template); | 150 | host = __fdomain_16x0_detect(&fdomain_driver_template); |
163 | if (!host) { | 151 | if (!host) { |
164 | printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); | 152 | printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); |
165 | goto cs_failed; | 153 | goto failed; |
166 | } | 154 | } |
167 | 155 | ||
168 | if (scsi_add_host(host, NULL)) | 156 | if (scsi_add_host(host, NULL)) |
169 | goto cs_failed; | 157 | goto failed; |
170 | scsi_scan_host(host); | 158 | scsi_scan_host(host); |
171 | 159 | ||
172 | sprintf(info->node.dev_name, "scsi%d", host->host_no); | 160 | sprintf(info->node.dev_name, "scsi%d", host->host_no); |
@@ -175,8 +163,6 @@ static int fdomain_config(struct pcmcia_device *link) | |||
175 | 163 | ||
176 | return 0; | 164 | return 0; |
177 | 165 | ||
178 | cs_failed: | ||
179 | cs_error(link, last_fn, last_ret); | ||
180 | failed: | 166 | failed: |
181 | fdomain_release(link); | 167 | fdomain_release(link); |
182 | return -ENODEV; | 168 | return -ENODEV; |
@@ -188,7 +174,7 @@ static void fdomain_release(struct pcmcia_device *link) | |||
188 | { | 174 | { |
189 | scsi_info_t *info = link->priv; | 175 | scsi_info_t *info = link->priv; |
190 | 176 | ||
191 | DEBUG(0, "fdomain_release(0x%p)\n", link); | 177 | dev_dbg(&link->dev, "fdomain_release\n"); |
192 | 178 | ||
193 | scsi_remove_host(info->host); | 179 | scsi_remove_host(info->host); |
194 | pcmcia_disable_device(link); | 180 | pcmcia_disable_device(link); |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index e32c344d7ad8..c2341af587a3 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -1564,12 +1564,10 @@ static int nsp_cs_probe(struct pcmcia_device *link) | |||
1564 | link->io.IOAddrLines = 10; /* not used */ | 1564 | link->io.IOAddrLines = 10; /* not used */ |
1565 | 1565 | ||
1566 | /* Interrupt setup */ | 1566 | /* Interrupt setup */ |
1567 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 1567 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
1568 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
1569 | 1568 | ||
1570 | /* Interrupt handler */ | 1569 | /* Interrupt handler */ |
1571 | link->irq.Handler = &nspintr; | 1570 | link->irq.Handler = &nspintr; |
1572 | link->irq.Instance = info; | ||
1573 | link->irq.Attributes |= IRQF_SHARED; | 1571 | link->irq.Attributes |= IRQF_SHARED; |
1574 | 1572 | ||
1575 | /* General socket configuration */ | 1573 | /* General socket configuration */ |
@@ -1684,10 +1682,10 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, | |||
1684 | if (cfg_mem->req.Size < 0x1000) | 1682 | if (cfg_mem->req.Size < 0x1000) |
1685 | cfg_mem->req.Size = 0x1000; | 1683 | cfg_mem->req.Size = 0x1000; |
1686 | cfg_mem->req.AccessSpeed = 0; | 1684 | cfg_mem->req.AccessSpeed = 0; |
1687 | if (pcmcia_request_window(&p_dev, &cfg_mem->req, &p_dev->win) != 0) | 1685 | if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0) |
1688 | goto next_entry; | 1686 | goto next_entry; |
1689 | map.Page = 0; map.CardOffset = mem->win[0].card_addr; | 1687 | map.Page = 0; map.CardOffset = mem->win[0].card_addr; |
1690 | if (pcmcia_map_mem_page(p_dev->win, &map) != 0) | 1688 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0) |
1691 | goto next_entry; | 1689 | goto next_entry; |
1692 | 1690 | ||
1693 | cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); | 1691 | cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); |
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c index 20c3e5e6d88a..f85f094870b4 100644 --- a/drivers/scsi/pcmcia/qlogic_stub.c +++ b/drivers/scsi/pcmcia/qlogic_stub.c | |||
@@ -62,15 +62,6 @@ | |||
62 | 62 | ||
63 | static char qlogic_name[] = "qlogic_cs"; | 63 | static char qlogic_name[] = "qlogic_cs"; |
64 | 64 | ||
65 | #ifdef PCMCIA_DEBUG | ||
66 | static int pc_debug = PCMCIA_DEBUG; | ||
67 | module_param(pc_debug, int, 0644); | ||
68 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
69 | static char *version = "qlogic_cs.c 1.79-ac 2002/10/26 (David Hinds)"; | ||
70 | #else | ||
71 | #define DEBUG(n, args...) | ||
72 | #endif | ||
73 | |||
74 | static struct scsi_host_template qlogicfas_driver_template = { | 65 | static struct scsi_host_template qlogicfas_driver_template = { |
75 | .module = THIS_MODULE, | 66 | .module = THIS_MODULE, |
76 | .name = qlogic_name, | 67 | .name = qlogic_name, |
@@ -159,7 +150,7 @@ static int qlogic_probe(struct pcmcia_device *link) | |||
159 | { | 150 | { |
160 | scsi_info_t *info; | 151 | scsi_info_t *info; |
161 | 152 | ||
162 | DEBUG(0, "qlogic_attach()\n"); | 153 | dev_dbg(&link->dev, "qlogic_attach()\n"); |
163 | 154 | ||
164 | /* Create new SCSI device */ | 155 | /* Create new SCSI device */ |
165 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 156 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -171,7 +162,6 @@ static int qlogic_probe(struct pcmcia_device *link) | |||
171 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 162 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
172 | link->io.IOAddrLines = 10; | 163 | link->io.IOAddrLines = 10; |
173 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 164 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
174 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
175 | link->conf.Attributes = CONF_ENABLE_IRQ; | 165 | link->conf.Attributes = CONF_ENABLE_IRQ; |
176 | link->conf.IntType = INT_MEMORY_AND_IO; | 166 | link->conf.IntType = INT_MEMORY_AND_IO; |
177 | link->conf.Present = PRESENT_OPTION; | 167 | link->conf.Present = PRESENT_OPTION; |
@@ -183,7 +173,7 @@ static int qlogic_probe(struct pcmcia_device *link) | |||
183 | 173 | ||
184 | static void qlogic_detach(struct pcmcia_device *link) | 174 | static void qlogic_detach(struct pcmcia_device *link) |
185 | { | 175 | { |
186 | DEBUG(0, "qlogic_detach(0x%p)\n", link); | 176 | dev_dbg(&link->dev, "qlogic_detach\n"); |
187 | 177 | ||
188 | qlogic_release(link); | 178 | qlogic_release(link); |
189 | kfree(link->priv); | 179 | kfree(link->priv); |
@@ -192,9 +182,6 @@ static void qlogic_detach(struct pcmcia_device *link) | |||
192 | 182 | ||
193 | /*====================================================================*/ | 183 | /*====================================================================*/ |
194 | 184 | ||
195 | #define CS_CHECK(fn, ret) \ | ||
196 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
197 | |||
198 | static int qlogic_config_check(struct pcmcia_device *p_dev, | 185 | static int qlogic_config_check(struct pcmcia_device *p_dev, |
199 | cistpl_cftable_entry_t *cfg, | 186 | cistpl_cftable_entry_t *cfg, |
200 | cistpl_cftable_entry_t *dflt, | 187 | cistpl_cftable_entry_t *dflt, |
@@ -213,19 +200,22 @@ static int qlogic_config_check(struct pcmcia_device *p_dev, | |||
213 | static int qlogic_config(struct pcmcia_device * link) | 200 | static int qlogic_config(struct pcmcia_device * link) |
214 | { | 201 | { |
215 | scsi_info_t *info = link->priv; | 202 | scsi_info_t *info = link->priv; |
216 | int last_ret, last_fn; | 203 | int ret; |
217 | struct Scsi_Host *host; | 204 | struct Scsi_Host *host; |
218 | 205 | ||
219 | DEBUG(0, "qlogic_config(0x%p)\n", link); | 206 | dev_dbg(&link->dev, "qlogic_config\n"); |
220 | 207 | ||
221 | last_ret = pcmcia_loop_config(link, qlogic_config_check, NULL); | 208 | ret = pcmcia_loop_config(link, qlogic_config_check, NULL); |
222 | if (last_ret) { | 209 | if (ret) |
223 | cs_error(link, RequestIO, last_ret); | 210 | goto failed; |
211 | |||
212 | ret = pcmcia_request_irq(link, &link->irq); | ||
213 | if (ret) | ||
224 | goto failed; | 214 | goto failed; |
225 | } | ||
226 | 215 | ||
227 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 216 | ret = pcmcia_request_configuration(link, &link->conf); |
228 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 217 | if (ret) |
218 | goto failed; | ||
229 | 219 | ||
230 | if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { | 220 | if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { |
231 | /* set ATAcmd */ | 221 | /* set ATAcmd */ |
@@ -244,7 +234,7 @@ static int qlogic_config(struct pcmcia_device * link) | |||
244 | 234 | ||
245 | if (!host) { | 235 | if (!host) { |
246 | printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); | 236 | printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); |
247 | goto cs_failed; | 237 | goto failed; |
248 | } | 238 | } |
249 | 239 | ||
250 | sprintf(info->node.dev_name, "scsi%d", host->host_no); | 240 | sprintf(info->node.dev_name, "scsi%d", host->host_no); |
@@ -253,12 +243,9 @@ static int qlogic_config(struct pcmcia_device * link) | |||
253 | 243 | ||
254 | return 0; | 244 | return 0; |
255 | 245 | ||
256 | cs_failed: | ||
257 | cs_error(link, last_fn, last_ret); | ||
258 | pcmcia_disable_device(link); | ||
259 | failed: | 246 | failed: |
247 | pcmcia_disable_device(link); | ||
260 | return -ENODEV; | 248 | return -ENODEV; |
261 | |||
262 | } /* qlogic_config */ | 249 | } /* qlogic_config */ |
263 | 250 | ||
264 | /*====================================================================*/ | 251 | /*====================================================================*/ |
@@ -267,7 +254,7 @@ static void qlogic_release(struct pcmcia_device *link) | |||
267 | { | 254 | { |
268 | scsi_info_t *info = link->priv; | 255 | scsi_info_t *info = link->priv; |
269 | 256 | ||
270 | DEBUG(0, "qlogic_release(0x%p)\n", link); | 257 | dev_dbg(&link->dev, "qlogic_release\n"); |
271 | 258 | ||
272 | scsi_remove_host(info->host); | 259 | scsi_remove_host(info->host); |
273 | 260 | ||
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index b330c11a1752..e7564d8f0cbf 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c | |||
@@ -77,17 +77,6 @@ | |||
77 | #include <pcmcia/ds.h> | 77 | #include <pcmcia/ds.h> |
78 | #include <pcmcia/ciscode.h> | 78 | #include <pcmcia/ciscode.h> |
79 | 79 | ||
80 | /* ================================================================== */ | ||
81 | |||
82 | #ifdef PCMCIA_DEBUG | ||
83 | static int pc_debug = PCMCIA_DEBUG; | ||
84 | module_param(pc_debug, int, 0); | ||
85 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
86 | static char *version = | ||
87 | "sym53c500_cs.c 0.9c 2004/10/27 (Bob Tracy)"; | ||
88 | #else | ||
89 | #define DEBUG(n, args...) | ||
90 | #endif | ||
91 | 80 | ||
92 | /* ================================================================== */ | 81 | /* ================================================================== */ |
93 | 82 | ||
@@ -525,7 +514,7 @@ SYM53C500_release(struct pcmcia_device *link) | |||
525 | struct scsi_info_t *info = link->priv; | 514 | struct scsi_info_t *info = link->priv; |
526 | struct Scsi_Host *shost = info->host; | 515 | struct Scsi_Host *shost = info->host; |
527 | 516 | ||
528 | DEBUG(0, "SYM53C500_release(0x%p)\n", link); | 517 | dev_dbg(&link->dev, "SYM53C500_release\n"); |
529 | 518 | ||
530 | /* | 519 | /* |
531 | * Do this before releasing/freeing resources. | 520 | * Do this before releasing/freeing resources. |
@@ -697,9 +686,6 @@ static struct scsi_host_template sym53c500_driver_template = { | |||
697 | .shost_attrs = SYM53C500_shost_attrs | 686 | .shost_attrs = SYM53C500_shost_attrs |
698 | }; | 687 | }; |
699 | 688 | ||
700 | #define CS_CHECK(fn, ret) \ | ||
701 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
702 | |||
703 | static int SYM53C500_config_check(struct pcmcia_device *p_dev, | 689 | static int SYM53C500_config_check(struct pcmcia_device *p_dev, |
704 | cistpl_cftable_entry_t *cfg, | 690 | cistpl_cftable_entry_t *cfg, |
705 | cistpl_cftable_entry_t *dflt, | 691 | cistpl_cftable_entry_t *dflt, |
@@ -719,24 +705,27 @@ static int | |||
719 | SYM53C500_config(struct pcmcia_device *link) | 705 | SYM53C500_config(struct pcmcia_device *link) |
720 | { | 706 | { |
721 | struct scsi_info_t *info = link->priv; | 707 | struct scsi_info_t *info = link->priv; |
722 | int last_ret, last_fn; | 708 | int ret; |
723 | int irq_level, port_base; | 709 | int irq_level, port_base; |
724 | struct Scsi_Host *host; | 710 | struct Scsi_Host *host; |
725 | struct scsi_host_template *tpnt = &sym53c500_driver_template; | 711 | struct scsi_host_template *tpnt = &sym53c500_driver_template; |
726 | struct sym53c500_data *data; | 712 | struct sym53c500_data *data; |
727 | 713 | ||
728 | DEBUG(0, "SYM53C500_config(0x%p)\n", link); | 714 | dev_dbg(&link->dev, "SYM53C500_config\n"); |
729 | 715 | ||
730 | info->manf_id = link->manf_id; | 716 | info->manf_id = link->manf_id; |
731 | 717 | ||
732 | last_ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); | 718 | ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); |
733 | if (last_ret) { | 719 | if (ret) |
734 | cs_error(link, RequestIO, last_ret); | 720 | goto failed; |
721 | |||
722 | ret = pcmcia_request_irq(link, &link->irq); | ||
723 | if (ret) | ||
735 | goto failed; | 724 | goto failed; |
736 | } | ||
737 | 725 | ||
738 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 726 | ret = pcmcia_request_configuration(link, &link->conf); |
739 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 727 | if (ret) |
728 | goto failed; | ||
740 | 729 | ||
741 | /* | 730 | /* |
742 | * That's the trouble with copying liberally from another driver. | 731 | * That's the trouble with copying liberally from another driver. |
@@ -824,8 +813,6 @@ err_release: | |||
824 | printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); | 813 | printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); |
825 | return -ENODEV; | 814 | return -ENODEV; |
826 | 815 | ||
827 | cs_failed: | ||
828 | cs_error(link, last_fn, last_ret); | ||
829 | failed: | 816 | failed: |
830 | SYM53C500_release(link); | 817 | SYM53C500_release(link); |
831 | return -ENODEV; | 818 | return -ENODEV; |
@@ -855,7 +842,7 @@ static int sym53c500_resume(struct pcmcia_device *link) | |||
855 | static void | 842 | static void |
856 | SYM53C500_detach(struct pcmcia_device *link) | 843 | SYM53C500_detach(struct pcmcia_device *link) |
857 | { | 844 | { |
858 | DEBUG(0, "SYM53C500_detach(0x%p)\n", link); | 845 | dev_dbg(&link->dev, "SYM53C500_detach\n"); |
859 | 846 | ||
860 | SYM53C500_release(link); | 847 | SYM53C500_release(link); |
861 | 848 | ||
@@ -868,7 +855,7 @@ SYM53C500_probe(struct pcmcia_device *link) | |||
868 | { | 855 | { |
869 | struct scsi_info_t *info; | 856 | struct scsi_info_t *info; |
870 | 857 | ||
871 | DEBUG(0, "SYM53C500_attach()\n"); | 858 | dev_dbg(&link->dev, "SYM53C500_attach()\n"); |
872 | 859 | ||
873 | /* Create new SCSI device */ | 860 | /* Create new SCSI device */ |
874 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 861 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
@@ -880,7 +867,6 @@ SYM53C500_probe(struct pcmcia_device *link) | |||
880 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 867 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
881 | link->io.IOAddrLines = 10; | 868 | link->io.IOAddrLines = 10; |
882 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 869 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
883 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
884 | link->conf.Attributes = CONF_ENABLE_IRQ; | 870 | link->conf.Attributes = CONF_ENABLE_IRQ; |
885 | link->conf.IntType = INT_MEMORY_AND_IO; | 871 | link->conf.IntType = INT_MEMORY_AND_IO; |
886 | 872 | ||
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c index beddaa6e9069..37ad0c449937 100644 --- a/drivers/serial/bcm63xx_uart.c +++ b/drivers/serial/bcm63xx_uart.c | |||
@@ -242,7 +242,7 @@ static void bcm_uart_do_rx(struct uart_port *port) | |||
242 | * higher than fifo size anyway since we're much faster than | 242 | * higher than fifo size anyway since we're much faster than |
243 | * serial port */ | 243 | * serial port */ |
244 | max_count = 32; | 244 | max_count = 32; |
245 | tty = port->info->port.tty; | 245 | tty = port->state->port.tty; |
246 | do { | 246 | do { |
247 | unsigned int iestat, c, cstat; | 247 | unsigned int iestat, c, cstat; |
248 | char flag; | 248 | char flag; |
@@ -318,7 +318,7 @@ static void bcm_uart_do_tx(struct uart_port *port) | |||
318 | return; | 318 | return; |
319 | } | 319 | } |
320 | 320 | ||
321 | xmit = &port->info->xmit; | 321 | xmit = &port->state->xmit; |
322 | if (uart_circ_empty(xmit)) | 322 | if (uart_circ_empty(xmit)) |
323 | goto txq_empty; | 323 | goto txq_empty; |
324 | 324 | ||
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 02406ba6da1c..cdf172eda2e3 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c | |||
@@ -161,6 +161,7 @@ static int of_platform_serial_remove(struct of_device *ofdev) | |||
161 | static struct of_device_id __devinitdata of_platform_serial_table[] = { | 161 | static struct of_device_id __devinitdata of_platform_serial_table[] = { |
162 | { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, }, | 162 | { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, }, |
163 | { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, | 163 | { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, |
164 | { .type = "serial", .compatible = "ns16550a", .data = (void *)PORT_16550A, }, | ||
164 | { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, | 165 | { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, |
165 | { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, | 166 | { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, |
166 | { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, }, | 167 | { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, }, |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 7c7914f5fa02..fc413f0f8dd2 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -54,14 +54,6 @@ | |||
54 | 54 | ||
55 | #include "8250.h" | 55 | #include "8250.h" |
56 | 56 | ||
57 | #ifdef PCMCIA_DEBUG | ||
58 | static int pc_debug = PCMCIA_DEBUG; | ||
59 | module_param(pc_debug, int, 0644); | ||
60 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
61 | static char *version = "serial_cs.c 1.134 2002/05/04 05:48:53 (David Hinds)"; | ||
62 | #else | ||
63 | #define DEBUG(n, args...) | ||
64 | #endif | ||
65 | 57 | ||
66 | /*====================================================================*/ | 58 | /*====================================================================*/ |
67 | 59 | ||
@@ -121,24 +113,20 @@ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_ | |||
121 | static int quirk_post_ibm(struct pcmcia_device *link) | 113 | static int quirk_post_ibm(struct pcmcia_device *link) |
122 | { | 114 | { |
123 | conf_reg_t reg = { 0, CS_READ, 0x800, 0 }; | 115 | conf_reg_t reg = { 0, CS_READ, 0x800, 0 }; |
124 | int last_ret, last_fn; | 116 | int ret; |
117 | |||
118 | ret = pcmcia_access_configuration_register(link, ®); | ||
119 | if (ret) | ||
120 | goto failed; | ||
125 | 121 | ||
126 | last_ret = pcmcia_access_configuration_register(link, ®); | ||
127 | if (last_ret) { | ||
128 | last_fn = AccessConfigurationRegister; | ||
129 | goto cs_failed; | ||
130 | } | ||
131 | reg.Action = CS_WRITE; | 122 | reg.Action = CS_WRITE; |
132 | reg.Value = reg.Value | 1; | 123 | reg.Value = reg.Value | 1; |
133 | last_ret = pcmcia_access_configuration_register(link, ®); | 124 | ret = pcmcia_access_configuration_register(link, ®); |
134 | if (last_ret) { | 125 | if (ret) |
135 | last_fn = AccessConfigurationRegister; | 126 | goto failed; |
136 | goto cs_failed; | ||
137 | } | ||
138 | return 0; | 127 | return 0; |
139 | 128 | ||
140 | cs_failed: | 129 | failed: |
141 | cs_error(link, last_fn, last_ret); | ||
142 | return -ENODEV; | 130 | return -ENODEV; |
143 | } | 131 | } |
144 | 132 | ||
@@ -283,7 +271,7 @@ static void serial_remove(struct pcmcia_device *link) | |||
283 | struct serial_info *info = link->priv; | 271 | struct serial_info *info = link->priv; |
284 | int i; | 272 | int i; |
285 | 273 | ||
286 | DEBUG(0, "serial_release(0x%p)\n", link); | 274 | dev_dbg(&link->dev, "serial_release\n"); |
287 | 275 | ||
288 | /* | 276 | /* |
289 | * Recheck to see if the device is still configured. | 277 | * Recheck to see if the device is still configured. |
@@ -334,7 +322,7 @@ static int serial_probe(struct pcmcia_device *link) | |||
334 | { | 322 | { |
335 | struct serial_info *info; | 323 | struct serial_info *info; |
336 | 324 | ||
337 | DEBUG(0, "serial_attach()\n"); | 325 | dev_dbg(&link->dev, "serial_attach()\n"); |
338 | 326 | ||
339 | /* Create new serial device */ | 327 | /* Create new serial device */ |
340 | info = kzalloc(sizeof (*info), GFP_KERNEL); | 328 | info = kzalloc(sizeof (*info), GFP_KERNEL); |
@@ -346,7 +334,6 @@ static int serial_probe(struct pcmcia_device *link) | |||
346 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 334 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
347 | link->io.NumPorts1 = 8; | 335 | link->io.NumPorts1 = 8; |
348 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 336 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
349 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
350 | link->conf.Attributes = CONF_ENABLE_IRQ; | 337 | link->conf.Attributes = CONF_ENABLE_IRQ; |
351 | if (do_sound) { | 338 | if (do_sound) { |
352 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 339 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
@@ -370,7 +357,7 @@ static void serial_detach(struct pcmcia_device *link) | |||
370 | { | 357 | { |
371 | struct serial_info *info = link->priv; | 358 | struct serial_info *info = link->priv; |
372 | 359 | ||
373 | DEBUG(0, "serial_detach(0x%p)\n", link); | 360 | dev_dbg(&link->dev, "serial_detach\n"); |
374 | 361 | ||
375 | /* | 362 | /* |
376 | * Ensure any outstanding scheduled tasks are completed. | 363 | * Ensure any outstanding scheduled tasks are completed. |
@@ -399,7 +386,7 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, | |||
399 | port.irq = irq; | 386 | port.irq = irq; |
400 | port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; | 387 | port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; |
401 | port.uartclk = 1843200; | 388 | port.uartclk = 1843200; |
402 | port.dev = &handle_to_dev(handle); | 389 | port.dev = &handle->dev; |
403 | if (buggy_uart) | 390 | if (buggy_uart) |
404 | port.flags |= UPF_BUGGY_UART; | 391 | port.flags |= UPF_BUGGY_UART; |
405 | 392 | ||
@@ -426,21 +413,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info, | |||
426 | 413 | ||
427 | /*====================================================================*/ | 414 | /*====================================================================*/ |
428 | 415 | ||
429 | static int | ||
430 | first_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse) | ||
431 | { | ||
432 | int i; | ||
433 | i = pcmcia_get_first_tuple(handle, tuple); | ||
434 | if (i != 0) | ||
435 | return i; | ||
436 | i = pcmcia_get_tuple_data(handle, tuple); | ||
437 | if (i != 0) | ||
438 | return i; | ||
439 | return pcmcia_parse_tuple(tuple, parse); | ||
440 | } | ||
441 | |||
442 | /*====================================================================*/ | ||
443 | |||
444 | static int simple_config_check(struct pcmcia_device *p_dev, | 416 | static int simple_config_check(struct pcmcia_device *p_dev, |
445 | cistpl_cftable_entry_t *cf, | 417 | cistpl_cftable_entry_t *cf, |
446 | cistpl_cftable_entry_t *dflt, | 418 | cistpl_cftable_entry_t *dflt, |
@@ -522,15 +494,13 @@ static int simple_config(struct pcmcia_device *link) | |||
522 | 494 | ||
523 | printk(KERN_NOTICE | 495 | printk(KERN_NOTICE |
524 | "serial_cs: no usable port range found, giving up\n"); | 496 | "serial_cs: no usable port range found, giving up\n"); |
525 | cs_error(link, RequestIO, i); | ||
526 | return -1; | 497 | return -1; |
527 | 498 | ||
528 | found_port: | 499 | found_port: |
529 | i = pcmcia_request_irq(link, &link->irq); | 500 | i = pcmcia_request_irq(link, &link->irq); |
530 | if (i != 0) { | 501 | if (i != 0) |
531 | cs_error(link, RequestIRQ, i); | ||
532 | link->irq.AssignedIRQ = 0; | 502 | link->irq.AssignedIRQ = 0; |
533 | } | 503 | |
534 | if (info->multi && (info->manfid == MANFID_3COM)) | 504 | if (info->multi && (info->manfid == MANFID_3COM)) |
535 | link->conf.ConfigIndex &= ~(0x08); | 505 | link->conf.ConfigIndex &= ~(0x08); |
536 | 506 | ||
@@ -541,10 +511,8 @@ found_port: | |||
541 | info->quirk->config(link); | 511 | info->quirk->config(link); |
542 | 512 | ||
543 | i = pcmcia_request_configuration(link, &link->conf); | 513 | i = pcmcia_request_configuration(link, &link->conf); |
544 | if (i != 0) { | 514 | if (i != 0) |
545 | cs_error(link, RequestConfiguration, i); | ||
546 | return -1; | 515 | return -1; |
547 | } | ||
548 | return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); | 516 | return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); |
549 | } | 517 | } |
550 | 518 | ||
@@ -613,7 +581,6 @@ static int multi_config(struct pcmcia_device *link) | |||
613 | /* FIXME: comment does not fit, error handling does not fit */ | 581 | /* FIXME: comment does not fit, error handling does not fit */ |
614 | printk(KERN_NOTICE | 582 | printk(KERN_NOTICE |
615 | "serial_cs: no usable port range found, giving up\n"); | 583 | "serial_cs: no usable port range found, giving up\n"); |
616 | cs_error(link, RequestIRQ, i); | ||
617 | link->irq.AssignedIRQ = 0; | 584 | link->irq.AssignedIRQ = 0; |
618 | } | 585 | } |
619 | 586 | ||
@@ -624,10 +591,8 @@ static int multi_config(struct pcmcia_device *link) | |||
624 | info->quirk->config(link); | 591 | info->quirk->config(link); |
625 | 592 | ||
626 | i = pcmcia_request_configuration(link, &link->conf); | 593 | i = pcmcia_request_configuration(link, &link->conf); |
627 | if (i != 0) { | 594 | if (i != 0) |
628 | cs_error(link, RequestConfiguration, i); | ||
629 | return -ENODEV; | 595 | return -ENODEV; |
630 | } | ||
631 | 596 | ||
632 | /* The Oxford Semiconductor OXCF950 cards are in fact single-port: | 597 | /* The Oxford Semiconductor OXCF950 cards are in fact single-port: |
633 | * 8 registers are for the UART, the others are extra registers. | 598 | * 8 registers are for the UART, the others are extra registers. |
@@ -665,6 +630,25 @@ static int multi_config(struct pcmcia_device *link) | |||
665 | return 0; | 630 | return 0; |
666 | } | 631 | } |
667 | 632 | ||
633 | static int serial_check_for_multi(struct pcmcia_device *p_dev, | ||
634 | cistpl_cftable_entry_t *cf, | ||
635 | cistpl_cftable_entry_t *dflt, | ||
636 | unsigned int vcc, | ||
637 | void *priv_data) | ||
638 | { | ||
639 | struct serial_info *info = p_dev->priv; | ||
640 | |||
641 | if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0)) | ||
642 | info->multi = cf->io.win[0].len >> 3; | ||
643 | |||
644 | if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) && | ||
645 | (cf->io.win[1].len == 8)) | ||
646 | info->multi = 2; | ||
647 | |||
648 | return 0; /* break */ | ||
649 | } | ||
650 | |||
651 | |||
668 | /*====================================================================== | 652 | /*====================================================================== |
669 | 653 | ||
670 | serial_config() is scheduled to run after a CARD_INSERTION event | 654 | serial_config() is scheduled to run after a CARD_INSERTION event |
@@ -676,46 +660,14 @@ static int multi_config(struct pcmcia_device *link) | |||
676 | static int serial_config(struct pcmcia_device * link) | 660 | static int serial_config(struct pcmcia_device * link) |
677 | { | 661 | { |
678 | struct serial_info *info = link->priv; | 662 | struct serial_info *info = link->priv; |
679 | struct serial_cfg_mem *cfg_mem; | 663 | int i; |
680 | tuple_t *tuple; | ||
681 | u_char *buf; | ||
682 | cisparse_t *parse; | ||
683 | cistpl_cftable_entry_t *cf; | ||
684 | int i, last_ret, last_fn; | ||
685 | |||
686 | DEBUG(0, "serial_config(0x%p)\n", link); | ||
687 | |||
688 | cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL); | ||
689 | if (!cfg_mem) | ||
690 | goto failed; | ||
691 | 664 | ||
692 | tuple = &cfg_mem->tuple; | 665 | dev_dbg(&link->dev, "serial_config\n"); |
693 | parse = &cfg_mem->parse; | ||
694 | cf = &parse->cftable_entry; | ||
695 | buf = cfg_mem->buf; | ||
696 | |||
697 | tuple->TupleData = (cisdata_t *) buf; | ||
698 | tuple->TupleOffset = 0; | ||
699 | tuple->TupleDataMax = 255; | ||
700 | tuple->Attributes = 0; | ||
701 | |||
702 | /* Get configuration register information */ | ||
703 | tuple->DesiredTuple = CISTPL_CONFIG; | ||
704 | last_ret = first_tuple(link, tuple, parse); | ||
705 | if (last_ret != 0) { | ||
706 | last_fn = ParseTuple; | ||
707 | goto cs_failed; | ||
708 | } | ||
709 | link->conf.ConfigBase = parse->config.base; | ||
710 | link->conf.Present = parse->config.rmask[0]; | ||
711 | 666 | ||
712 | /* Is this a compliant multifunction card? */ | 667 | /* Is this a compliant multifunction card? */ |
713 | tuple->DesiredTuple = CISTPL_LONGLINK_MFC; | 668 | info->multi = (link->socket->functions > 1); |
714 | tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK; | ||
715 | info->multi = (first_tuple(link, tuple, parse) == 0); | ||
716 | 669 | ||
717 | /* Is this a multiport card? */ | 670 | /* Is this a multiport card? */ |
718 | tuple->DesiredTuple = CISTPL_MANFID; | ||
719 | info->manfid = link->manf_id; | 671 | info->manfid = link->manf_id; |
720 | info->prodid = link->card_id; | 672 | info->prodid = link->card_id; |
721 | 673 | ||
@@ -730,20 +682,11 @@ static int serial_config(struct pcmcia_device * link) | |||
730 | 682 | ||
731 | /* Another check for dual-serial cards: look for either serial or | 683 | /* Another check for dual-serial cards: look for either serial or |
732 | multifunction cards that ask for appropriate IO port ranges */ | 684 | multifunction cards that ask for appropriate IO port ranges */ |
733 | tuple->DesiredTuple = CISTPL_FUNCID; | ||
734 | if ((info->multi == 0) && | 685 | if ((info->multi == 0) && |
735 | (link->has_func_id) && | 686 | (link->has_func_id) && |
736 | ((link->func_id == CISTPL_FUNCID_MULTI) || | 687 | ((link->func_id == CISTPL_FUNCID_MULTI) || |
737 | (link->func_id == CISTPL_FUNCID_SERIAL))) { | 688 | (link->func_id == CISTPL_FUNCID_SERIAL))) |
738 | tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; | 689 | pcmcia_loop_config(link, serial_check_for_multi, info); |
739 | if (first_tuple(link, tuple, parse) == 0) { | ||
740 | if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0)) | ||
741 | info->multi = cf->io.win[0].len >> 3; | ||
742 | if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) && | ||
743 | (cf->io.win[1].len == 8)) | ||
744 | info->multi = 2; | ||
745 | } | ||
746 | } | ||
747 | 690 | ||
748 | /* | 691 | /* |
749 | * Apply any multi-port quirk. | 692 | * Apply any multi-port quirk. |
@@ -768,14 +711,10 @@ static int serial_config(struct pcmcia_device * link) | |||
768 | goto failed; | 711 | goto failed; |
769 | 712 | ||
770 | link->dev_node = &info->node[0]; | 713 | link->dev_node = &info->node[0]; |
771 | kfree(cfg_mem); | ||
772 | return 0; | 714 | return 0; |
773 | 715 | ||
774 | cs_failed: | ||
775 | cs_error(link, last_fn, last_ret); | ||
776 | failed: | 716 | failed: |
777 | serial_remove(link); | 717 | serial_remove(link); |
778 | kfree(cfg_mem); | ||
779 | return -ENODEV; | 718 | return -ENODEV; |
780 | } | 719 | } |
781 | 720 | ||
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c index 96057de133ad..19f75627c3de 100644 --- a/drivers/spi/spi_txx9.c +++ b/drivers/spi/spi_txx9.c | |||
@@ -29,6 +29,8 @@ | |||
29 | 29 | ||
30 | 30 | ||
31 | #define SPI_FIFO_SIZE 4 | 31 | #define SPI_FIFO_SIZE 4 |
32 | #define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ | ||
33 | #define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ | ||
32 | 34 | ||
33 | #define TXx9_SPMCR 0x00 | 35 | #define TXx9_SPMCR 0x00 |
34 | #define TXx9_SPCR0 0x04 | 36 | #define TXx9_SPCR0 0x04 |
@@ -193,11 +195,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) | |||
193 | 195 | ||
194 | if (prev_speed_hz != speed_hz | 196 | if (prev_speed_hz != speed_hz |
195 | || prev_bits_per_word != bits_per_word) { | 197 | || prev_bits_per_word != bits_per_word) { |
196 | u32 n = (c->baseclk + speed_hz - 1) / speed_hz; | 198 | int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; |
197 | if (n < 1) | 199 | n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); |
198 | n = 1; | ||
199 | else if (n > 0xff) | ||
200 | n = 0xff; | ||
201 | /* enter config mode */ | 200 | /* enter config mode */ |
202 | txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, | 201 | txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, |
203 | TXx9_SPMCR); | 202 | TXx9_SPMCR); |
@@ -370,8 +369,8 @@ static int __init txx9spi_probe(struct platform_device *dev) | |||
370 | goto exit; | 369 | goto exit; |
371 | } | 370 | } |
372 | c->baseclk = clk_get_rate(c->clk); | 371 | c->baseclk = clk_get_rate(c->clk); |
373 | c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff; | 372 | c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); |
374 | c->max_speed_hz = c->baseclk; | 373 | c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); |
375 | 374 | ||
376 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); | 375 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); |
377 | if (!res) | 376 | if (!res) |
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index 100e7a5c5ea1..e72f4046a5e0 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c | |||
@@ -617,136 +617,140 @@ static int ssb_pcmcia_sprom_check_crc(const u16 *sprom, size_t size) | |||
617 | } \ | 617 | } \ |
618 | } while (0) | 618 | } while (0) |
619 | 619 | ||
620 | int ssb_pcmcia_get_invariants(struct ssb_bus *bus, | 620 | static int ssb_pcmcia_get_mac(struct pcmcia_device *p_dev, |
621 | struct ssb_init_invariants *iv) | 621 | tuple_t *tuple, |
622 | void *priv) | ||
622 | { | 623 | { |
623 | tuple_t tuple; | 624 | struct ssb_sprom *sprom = priv; |
624 | int res; | 625 | |
625 | unsigned char buf[32]; | 626 | if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID) |
627 | return -EINVAL; | ||
628 | if (tuple->TupleDataLen != ETH_ALEN + 2) | ||
629 | return -EINVAL; | ||
630 | if (tuple->TupleData[1] != ETH_ALEN) | ||
631 | return -EINVAL; | ||
632 | memcpy(sprom->il0mac, &tuple->TupleData[2], ETH_ALEN); | ||
633 | return 0; | ||
634 | }; | ||
635 | |||
636 | static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev, | ||
637 | tuple_t *tuple, | ||
638 | void *priv) | ||
639 | { | ||
640 | struct ssb_init_invariants *iv = priv; | ||
626 | struct ssb_sprom *sprom = &iv->sprom; | 641 | struct ssb_sprom *sprom = &iv->sprom; |
627 | struct ssb_boardinfo *bi = &iv->boardinfo; | 642 | struct ssb_boardinfo *bi = &iv->boardinfo; |
628 | const char *error_description; | 643 | const char *error_description; |
629 | 644 | ||
645 | GOTO_ERROR_ON(tuple->TupleDataLen < 1, "VEN tpl < 1"); | ||
646 | switch (tuple->TupleData[0]) { | ||
647 | case SSB_PCMCIA_CIS_ID: | ||
648 | GOTO_ERROR_ON((tuple->TupleDataLen != 5) && | ||
649 | (tuple->TupleDataLen != 7), | ||
650 | "id tpl size"); | ||
651 | bi->vendor = tuple->TupleData[1] | | ||
652 | ((u16)tuple->TupleData[2] << 8); | ||
653 | break; | ||
654 | case SSB_PCMCIA_CIS_BOARDREV: | ||
655 | GOTO_ERROR_ON(tuple->TupleDataLen != 2, | ||
656 | "boardrev tpl size"); | ||
657 | sprom->board_rev = tuple->TupleData[1]; | ||
658 | break; | ||
659 | case SSB_PCMCIA_CIS_PA: | ||
660 | GOTO_ERROR_ON((tuple->TupleDataLen != 9) && | ||
661 | (tuple->TupleDataLen != 10), | ||
662 | "pa tpl size"); | ||
663 | sprom->pa0b0 = tuple->TupleData[1] | | ||
664 | ((u16)tuple->TupleData[2] << 8); | ||
665 | sprom->pa0b1 = tuple->TupleData[3] | | ||
666 | ((u16)tuple->TupleData[4] << 8); | ||
667 | sprom->pa0b2 = tuple->TupleData[5] | | ||
668 | ((u16)tuple->TupleData[6] << 8); | ||
669 | sprom->itssi_a = tuple->TupleData[7]; | ||
670 | sprom->itssi_bg = tuple->TupleData[7]; | ||
671 | sprom->maxpwr_a = tuple->TupleData[8]; | ||
672 | sprom->maxpwr_bg = tuple->TupleData[8]; | ||
673 | break; | ||
674 | case SSB_PCMCIA_CIS_OEMNAME: | ||
675 | /* We ignore this. */ | ||
676 | break; | ||
677 | case SSB_PCMCIA_CIS_CCODE: | ||
678 | GOTO_ERROR_ON(tuple->TupleDataLen != 2, | ||
679 | "ccode tpl size"); | ||
680 | sprom->country_code = tuple->TupleData[1]; | ||
681 | break; | ||
682 | case SSB_PCMCIA_CIS_ANTENNA: | ||
683 | GOTO_ERROR_ON(tuple->TupleDataLen != 2, | ||
684 | "ant tpl size"); | ||
685 | sprom->ant_available_a = tuple->TupleData[1]; | ||
686 | sprom->ant_available_bg = tuple->TupleData[1]; | ||
687 | break; | ||
688 | case SSB_PCMCIA_CIS_ANTGAIN: | ||
689 | GOTO_ERROR_ON(tuple->TupleDataLen != 2, | ||
690 | "antg tpl size"); | ||
691 | sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1]; | ||
692 | sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1]; | ||
693 | sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1]; | ||
694 | sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1]; | ||
695 | sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1]; | ||
696 | sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1]; | ||
697 | sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1]; | ||
698 | sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1]; | ||
699 | break; | ||
700 | case SSB_PCMCIA_CIS_BFLAGS: | ||
701 | GOTO_ERROR_ON((tuple->TupleDataLen != 3) && | ||
702 | (tuple->TupleDataLen != 5), | ||
703 | "bfl tpl size"); | ||
704 | sprom->boardflags_lo = tuple->TupleData[1] | | ||
705 | ((u16)tuple->TupleData[2] << 8); | ||
706 | break; | ||
707 | case SSB_PCMCIA_CIS_LEDS: | ||
708 | GOTO_ERROR_ON(tuple->TupleDataLen != 5, | ||
709 | "leds tpl size"); | ||
710 | sprom->gpio0 = tuple->TupleData[1]; | ||
711 | sprom->gpio1 = tuple->TupleData[2]; | ||
712 | sprom->gpio2 = tuple->TupleData[3]; | ||
713 | sprom->gpio3 = tuple->TupleData[4]; | ||
714 | break; | ||
715 | } | ||
716 | return -ENOSPC; /* continue with next entry */ | ||
717 | |||
718 | error: | ||
719 | ssb_printk(KERN_ERR PFX | ||
720 | "PCMCIA: Failed to fetch device invariants: %s\n", | ||
721 | error_description); | ||
722 | return -ENODEV; | ||
723 | } | ||
724 | |||
725 | |||
726 | int ssb_pcmcia_get_invariants(struct ssb_bus *bus, | ||
727 | struct ssb_init_invariants *iv) | ||
728 | { | ||
729 | struct ssb_sprom *sprom = &iv->sprom; | ||
730 | int res; | ||
731 | |||
630 | memset(sprom, 0xFF, sizeof(*sprom)); | 732 | memset(sprom, 0xFF, sizeof(*sprom)); |
631 | sprom->revision = 1; | 733 | sprom->revision = 1; |
632 | sprom->boardflags_lo = 0; | 734 | sprom->boardflags_lo = 0; |
633 | sprom->boardflags_hi = 0; | 735 | sprom->boardflags_hi = 0; |
634 | 736 | ||
635 | /* First fetch the MAC address. */ | 737 | /* First fetch the MAC address. */ |
636 | memset(&tuple, 0, sizeof(tuple)); | 738 | res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE, |
637 | tuple.DesiredTuple = CISTPL_FUNCE; | 739 | ssb_pcmcia_get_mac, sprom); |
638 | tuple.TupleData = buf; | 740 | if (res != 0) { |
639 | tuple.TupleDataMax = sizeof(buf); | 741 | ssb_printk(KERN_ERR PFX |
640 | res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple); | 742 | "PCMCIA: Failed to fetch MAC address\n"); |
641 | GOTO_ERROR_ON(res != 0, "MAC first tpl"); | 743 | return -ENODEV; |
642 | res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple); | ||
643 | GOTO_ERROR_ON(res != 0, "MAC first tpl data"); | ||
644 | while (1) { | ||
645 | GOTO_ERROR_ON(tuple.TupleDataLen < 1, "MAC tpl < 1"); | ||
646 | if (tuple.TupleData[0] == CISTPL_FUNCE_LAN_NODE_ID) | ||
647 | break; | ||
648 | res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple); | ||
649 | GOTO_ERROR_ON(res != 0, "MAC next tpl"); | ||
650 | res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple); | ||
651 | GOTO_ERROR_ON(res != 0, "MAC next tpl data"); | ||
652 | } | 744 | } |
653 | GOTO_ERROR_ON(tuple.TupleDataLen != ETH_ALEN + 2, "MAC tpl size"); | ||
654 | memcpy(sprom->il0mac, &tuple.TupleData[2], ETH_ALEN); | ||
655 | 745 | ||
656 | /* Fetch the vendor specific tuples. */ | 746 | /* Fetch the vendor specific tuples. */ |
657 | memset(&tuple, 0, sizeof(tuple)); | 747 | res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, |
658 | tuple.DesiredTuple = SSB_PCMCIA_CIS; | 748 | ssb_pcmcia_do_get_invariants, sprom); |
659 | tuple.TupleData = buf; | 749 | if ((res == 0) || (res == -ENOSPC)) |
660 | tuple.TupleDataMax = sizeof(buf); | 750 | return 0; |
661 | res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple); | ||
662 | GOTO_ERROR_ON(res != 0, "VEN first tpl"); | ||
663 | res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple); | ||
664 | GOTO_ERROR_ON(res != 0, "VEN first tpl data"); | ||
665 | while (1) { | ||
666 | GOTO_ERROR_ON(tuple.TupleDataLen < 1, "VEN tpl < 1"); | ||
667 | switch (tuple.TupleData[0]) { | ||
668 | case SSB_PCMCIA_CIS_ID: | ||
669 | GOTO_ERROR_ON((tuple.TupleDataLen != 5) && | ||
670 | (tuple.TupleDataLen != 7), | ||
671 | "id tpl size"); | ||
672 | bi->vendor = tuple.TupleData[1] | | ||
673 | ((u16)tuple.TupleData[2] << 8); | ||
674 | break; | ||
675 | case SSB_PCMCIA_CIS_BOARDREV: | ||
676 | GOTO_ERROR_ON(tuple.TupleDataLen != 2, | ||
677 | "boardrev tpl size"); | ||
678 | sprom->board_rev = tuple.TupleData[1]; | ||
679 | break; | ||
680 | case SSB_PCMCIA_CIS_PA: | ||
681 | GOTO_ERROR_ON((tuple.TupleDataLen != 9) && | ||
682 | (tuple.TupleDataLen != 10), | ||
683 | "pa tpl size"); | ||
684 | sprom->pa0b0 = tuple.TupleData[1] | | ||
685 | ((u16)tuple.TupleData[2] << 8); | ||
686 | sprom->pa0b1 = tuple.TupleData[3] | | ||
687 | ((u16)tuple.TupleData[4] << 8); | ||
688 | sprom->pa0b2 = tuple.TupleData[5] | | ||
689 | ((u16)tuple.TupleData[6] << 8); | ||
690 | sprom->itssi_a = tuple.TupleData[7]; | ||
691 | sprom->itssi_bg = tuple.TupleData[7]; | ||
692 | sprom->maxpwr_a = tuple.TupleData[8]; | ||
693 | sprom->maxpwr_bg = tuple.TupleData[8]; | ||
694 | break; | ||
695 | case SSB_PCMCIA_CIS_OEMNAME: | ||
696 | /* We ignore this. */ | ||
697 | break; | ||
698 | case SSB_PCMCIA_CIS_CCODE: | ||
699 | GOTO_ERROR_ON(tuple.TupleDataLen != 2, | ||
700 | "ccode tpl size"); | ||
701 | sprom->country_code = tuple.TupleData[1]; | ||
702 | break; | ||
703 | case SSB_PCMCIA_CIS_ANTENNA: | ||
704 | GOTO_ERROR_ON(tuple.TupleDataLen != 2, | ||
705 | "ant tpl size"); | ||
706 | sprom->ant_available_a = tuple.TupleData[1]; | ||
707 | sprom->ant_available_bg = tuple.TupleData[1]; | ||
708 | break; | ||
709 | case SSB_PCMCIA_CIS_ANTGAIN: | ||
710 | GOTO_ERROR_ON(tuple.TupleDataLen != 2, | ||
711 | "antg tpl size"); | ||
712 | sprom->antenna_gain.ghz24.a0 = tuple.TupleData[1]; | ||
713 | sprom->antenna_gain.ghz24.a1 = tuple.TupleData[1]; | ||
714 | sprom->antenna_gain.ghz24.a2 = tuple.TupleData[1]; | ||
715 | sprom->antenna_gain.ghz24.a3 = tuple.TupleData[1]; | ||
716 | sprom->antenna_gain.ghz5.a0 = tuple.TupleData[1]; | ||
717 | sprom->antenna_gain.ghz5.a1 = tuple.TupleData[1]; | ||
718 | sprom->antenna_gain.ghz5.a2 = tuple.TupleData[1]; | ||
719 | sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1]; | ||
720 | break; | ||
721 | case SSB_PCMCIA_CIS_BFLAGS: | ||
722 | GOTO_ERROR_ON((tuple.TupleDataLen != 3) && | ||
723 | (tuple.TupleDataLen != 5), | ||
724 | "bfl tpl size"); | ||
725 | sprom->boardflags_lo = tuple.TupleData[1] | | ||
726 | ((u16)tuple.TupleData[2] << 8); | ||
727 | break; | ||
728 | case SSB_PCMCIA_CIS_LEDS: | ||
729 | GOTO_ERROR_ON(tuple.TupleDataLen != 5, | ||
730 | "leds tpl size"); | ||
731 | sprom->gpio0 = tuple.TupleData[1]; | ||
732 | sprom->gpio1 = tuple.TupleData[2]; | ||
733 | sprom->gpio2 = tuple.TupleData[3]; | ||
734 | sprom->gpio3 = tuple.TupleData[4]; | ||
735 | break; | ||
736 | } | ||
737 | res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple); | ||
738 | if (res == -ENOSPC) | ||
739 | break; | ||
740 | GOTO_ERROR_ON(res != 0, "VEN next tpl"); | ||
741 | res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple); | ||
742 | GOTO_ERROR_ON(res != 0, "VEN next tpl data"); | ||
743 | } | ||
744 | 751 | ||
745 | return 0; | ||
746 | error: | ||
747 | ssb_printk(KERN_ERR PFX | 752 | ssb_printk(KERN_ERR PFX |
748 | "PCMCIA: Failed to fetch device invariants: %s\n", | 753 | "PCMCIA: Failed to fetch device invariants\n"); |
749 | error_description); | ||
750 | return -ENODEV; | 754 | return -ENODEV; |
751 | } | 755 | } |
752 | 756 | ||
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c index 80c0df8656f3..39923cb388be 100644 --- a/drivers/staging/comedi/drivers/cb_das16_cs.c +++ b/drivers/staging/comedi/drivers/cb_das16_cs.c | |||
@@ -141,37 +141,14 @@ static int das16cs_timer_insn_config(struct comedi_device *dev, | |||
141 | struct comedi_insn *insn, | 141 | struct comedi_insn *insn, |
142 | unsigned int *data); | 142 | unsigned int *data); |
143 | 143 | ||
144 | static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link) | ||
145 | { | ||
146 | tuple_t tuple; | ||
147 | u_short buf[128]; | ||
148 | int prodid = 0; | ||
149 | |||
150 | tuple.TupleData = (cisdata_t *) buf; | ||
151 | tuple.TupleOffset = 0; | ||
152 | tuple.TupleDataMax = 255; | ||
153 | tuple.DesiredTuple = CISTPL_MANFID; | ||
154 | tuple.Attributes = TUPLE_RETURN_COMMON; | ||
155 | if ((pcmcia_get_first_tuple(link, &tuple) == 0) && | ||
156 | (pcmcia_get_tuple_data(link, &tuple) == 0)) { | ||
157 | prodid = le16_to_cpu(buf[1]); | ||
158 | } | ||
159 | |||
160 | return prodid; | ||
161 | } | ||
162 | |||
163 | static const struct das16cs_board *das16cs_probe(struct comedi_device *dev, | 144 | static const struct das16cs_board *das16cs_probe(struct comedi_device *dev, |
164 | struct pcmcia_device *link) | 145 | struct pcmcia_device *link) |
165 | { | 146 | { |
166 | int id; | ||
167 | int i; | 147 | int i; |
168 | 148 | ||
169 | id = get_prodid(dev, link); | ||
170 | |||
171 | for (i = 0; i < n_boards; i++) { | 149 | for (i = 0; i < n_boards; i++) { |
172 | if (das16cs_boards[i].device_id == id) { | 150 | if (das16cs_boards[i].device_id == link->card_id) |
173 | return das16cs_boards + i; | 151 | return das16cs_boards + i; |
174 | } | ||
175 | } | 152 | } |
176 | 153 | ||
177 | printk("unknown board!\n"); | 154 | printk("unknown board!\n"); |
@@ -660,27 +637,8 @@ static int das16cs_timer_insn_config(struct comedi_device *dev, | |||
660 | 637 | ||
661 | ======================================================================*/ | 638 | ======================================================================*/ |
662 | 639 | ||
663 | /* | ||
664 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
665 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
666 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
667 | be present but disabled -- but it can then be enabled for specific | ||
668 | modules at load time with a 'pc_debug=#' option to insmod. | ||
669 | */ | ||
670 | #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) | 640 | #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) |
671 | 641 | ||
672 | #ifdef PCMCIA_DEBUG | ||
673 | static int pc_debug = PCMCIA_DEBUG; | ||
674 | module_param(pc_debug, int, 0644); | ||
675 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
676 | static char *version = | ||
677 | "cb_das16_cs.c pcmcia code (David Schleef), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)"; | ||
678 | #else | ||
679 | #define DEBUG(n, args...) | ||
680 | #endif | ||
681 | |||
682 | /*====================================================================*/ | ||
683 | |||
684 | static void das16cs_pcmcia_config(struct pcmcia_device *link); | 642 | static void das16cs_pcmcia_config(struct pcmcia_device *link); |
685 | static void das16cs_pcmcia_release(struct pcmcia_device *link); | 643 | static void das16cs_pcmcia_release(struct pcmcia_device *link); |
686 | static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev); | 644 | static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev); |
@@ -733,7 +691,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link) | |||
733 | { | 691 | { |
734 | struct local_info_t *local; | 692 | struct local_info_t *local; |
735 | 693 | ||
736 | DEBUG(0, "das16cs_pcmcia_attach()\n"); | 694 | dev_dbg(&link->dev, "das16cs_pcmcia_attach()\n"); |
737 | 695 | ||
738 | /* Allocate space for private device-specific data */ | 696 | /* Allocate space for private device-specific data */ |
739 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); | 697 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); |
@@ -745,7 +703,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link) | |||
745 | /* Initialize the pcmcia_device structure */ | 703 | /* Initialize the pcmcia_device structure */ |
746 | /* Interrupt setup */ | 704 | /* Interrupt setup */ |
747 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 705 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
748 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
749 | link->irq.Handler = NULL; | 706 | link->irq.Handler = NULL; |
750 | 707 | ||
751 | link->conf.Attributes = 0; | 708 | link->conf.Attributes = 0; |
@@ -760,7 +717,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link) | |||
760 | 717 | ||
761 | static void das16cs_pcmcia_detach(struct pcmcia_device *link) | 718 | static void das16cs_pcmcia_detach(struct pcmcia_device *link) |
762 | { | 719 | { |
763 | DEBUG(0, "das16cs_pcmcia_detach(0x%p)\n", link); | 720 | dev_dbg(&link->dev, "das16cs_pcmcia_detach\n"); |
764 | 721 | ||
765 | if (link->dev_node) { | 722 | if (link->dev_node) { |
766 | ((struct local_info_t *)link->priv)->stop = 1; | 723 | ((struct local_info_t *)link->priv)->stop = 1; |
@@ -771,118 +728,55 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link) | |||
771 | kfree(link->priv); | 728 | kfree(link->priv); |
772 | } /* das16cs_pcmcia_detach */ | 729 | } /* das16cs_pcmcia_detach */ |
773 | 730 | ||
774 | static void das16cs_pcmcia_config(struct pcmcia_device *link) | ||
775 | { | ||
776 | struct local_info_t *dev = link->priv; | ||
777 | tuple_t tuple; | ||
778 | cisparse_t parse; | ||
779 | int last_fn, last_ret; | ||
780 | u_char buf[64]; | ||
781 | cistpl_cftable_entry_t dflt = { 0 }; | ||
782 | 731 | ||
783 | DEBUG(0, "das16cs_pcmcia_config(0x%p)\n", link); | 732 | static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev, |
784 | 733 | cistpl_cftable_entry_t *cfg, | |
785 | /* | 734 | cistpl_cftable_entry_t *dflt, |
786 | This reads the card's CONFIG tuple to find its configuration | 735 | unsigned int vcc, |
787 | registers. | 736 | void *priv_data) |
788 | */ | 737 | { |
789 | tuple.DesiredTuple = CISTPL_CONFIG; | 738 | if (cfg->index == 0) |
790 | tuple.Attributes = 0; | 739 | return -EINVAL; |
791 | tuple.TupleData = buf; | ||
792 | tuple.TupleDataMax = sizeof(buf); | ||
793 | tuple.TupleOffset = 0; | ||
794 | |||
795 | last_fn = GetFirstTuple; | ||
796 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
797 | if (last_ret != 0) | ||
798 | goto cs_failed; | ||
799 | |||
800 | last_fn = GetTupleData; | ||
801 | last_ret = pcmcia_get_tuple_data(link, &tuple); | ||
802 | if (last_ret != 0) | ||
803 | goto cs_failed; | ||
804 | |||
805 | last_fn = ParseTuple; | ||
806 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | ||
807 | if (last_ret != 0) | ||
808 | goto cs_failed; | ||
809 | |||
810 | link->conf.ConfigBase = parse.config.base; | ||
811 | link->conf.Present = parse.config.rmask[0]; | ||
812 | 740 | ||
813 | /* | 741 | /* Do we need to allocate an interrupt? */ |
814 | In this loop, we scan the CIS for configuration table entries, | 742 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) |
815 | each of which describes a valid card configuration, including | 743 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; |
816 | voltage, IO window, memory window, and interrupt settings. | 744 | |
817 | 745 | /* IO window settings */ | |
818 | We make no assumptions about the card to be configured: we use | 746 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; |
819 | just the information available in the CIS. In an ideal world, | 747 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { |
820 | this would work for any PCMCIA card, but it requires a complete | 748 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; |
821 | and accurate CIS. In practice, a driver usually "knows" most of | 749 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
822 | these things without consulting the CIS, and most client drivers | 750 | if (!(io->flags & CISTPL_IO_8BIT)) |
823 | will only use the CIS to fill in implementation-defined details. | 751 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
824 | */ | 752 | if (!(io->flags & CISTPL_IO_16BIT)) |
825 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | 753 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
826 | last_fn = GetFirstTuple; | 754 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; |
827 | 755 | p_dev->io.BasePort1 = io->win[0].base; | |
828 | last_ret = pcmcia_get_first_tuple(link, &tuple); | 756 | p_dev->io.NumPorts1 = io->win[0].len; |
829 | if (last_ret) | 757 | if (io->nwin > 1) { |
830 | goto cs_failed; | 758 | p_dev->io.Attributes2 = p_dev->io.Attributes1; |
831 | 759 | p_dev->io.BasePort2 = io->win[1].base; | |
832 | while (1) { | 760 | p_dev->io.NumPorts2 = io->win[1].len; |
833 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | ||
834 | if (pcmcia_get_tuple_data(link, &tuple)) | ||
835 | goto next_entry; | ||
836 | if (pcmcia_parse_tuple(&tuple, &parse)) | ||
837 | goto next_entry; | ||
838 | |||
839 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
840 | dflt = *cfg; | ||
841 | if (cfg->index == 0) | ||
842 | goto next_entry; | ||
843 | link->conf.ConfigIndex = cfg->index; | ||
844 | |||
845 | /* Does this card need audio output? */ | ||
846 | /* if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
847 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
848 | link->conf.Status = CCSR_AUDIO_ENA; | ||
849 | } | ||
850 | */ | ||
851 | /* Do we need to allocate an interrupt? */ | ||
852 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | ||
853 | link->conf.Attributes |= CONF_ENABLE_IRQ; | ||
854 | |||
855 | /* IO window settings */ | ||
856 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
857 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
858 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
859 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
860 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
861 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
862 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
863 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
864 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
865 | link->io.BasePort1 = io->win[0].base; | ||
866 | link->io.NumPorts1 = io->win[0].len; | ||
867 | if (io->nwin > 1) { | ||
868 | link->io.Attributes2 = link->io.Attributes1; | ||
869 | link->io.BasePort2 = io->win[1].base; | ||
870 | link->io.NumPorts2 = io->win[1].len; | ||
871 | } | ||
872 | /* This reserves IO space but doesn't actually enable it */ | ||
873 | if (pcmcia_request_io(link, &link->io)) | ||
874 | goto next_entry; | ||
875 | } | 761 | } |
762 | /* This reserves IO space but doesn't actually enable it */ | ||
763 | return pcmcia_request_io(p_dev, &p_dev->io); | ||
764 | } | ||
876 | 765 | ||
877 | /* If we got this far, we're cool! */ | 766 | return 0; |
878 | break; | 767 | } |
768 | |||
769 | static void das16cs_pcmcia_config(struct pcmcia_device *link) | ||
770 | { | ||
771 | struct local_info_t *dev = link->priv; | ||
772 | int ret; | ||
879 | 773 | ||
880 | next_entry: | 774 | dev_dbg(&link->dev, "das16cs_pcmcia_config\n"); |
881 | last_fn = GetNextTuple; | ||
882 | 775 | ||
883 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 776 | ret = pcmcia_loop_config(link, das16cs_pcmcia_config_loop, NULL); |
884 | if (last_ret) | 777 | if (ret) { |
885 | goto cs_failed; | 778 | dev_warn(&link->dev, "no configuration found\n"); |
779 | goto failed; | ||
886 | } | 780 | } |
887 | 781 | ||
888 | /* | 782 | /* |
@@ -891,21 +785,18 @@ next_entry: | |||
891 | irq structure is initialized. | 785 | irq structure is initialized. |
892 | */ | 786 | */ |
893 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 787 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
894 | last_fn = RequestIRQ; | 788 | ret = pcmcia_request_irq(link, &link->irq); |
895 | 789 | if (ret) | |
896 | last_ret = pcmcia_request_irq(link, &link->irq); | 790 | goto failed; |
897 | if (last_ret) | ||
898 | goto cs_failed; | ||
899 | } | 791 | } |
900 | /* | 792 | /* |
901 | This actually configures the PCMCIA socket -- setting up | 793 | This actually configures the PCMCIA socket -- setting up |
902 | the I/O windows and the interrupt mapping, and putting the | 794 | the I/O windows and the interrupt mapping, and putting the |
903 | card and host interface into "Memory and IO" mode. | 795 | card and host interface into "Memory and IO" mode. |
904 | */ | 796 | */ |
905 | last_fn = RequestConfiguration; | 797 | ret = pcmcia_request_configuration(link, &link->conf); |
906 | last_ret = pcmcia_request_configuration(link, &link->conf); | 798 | if (ret) |
907 | if (last_ret) | 799 | goto failed; |
908 | goto cs_failed; | ||
909 | 800 | ||
910 | /* | 801 | /* |
911 | At this point, the dev_node_t structure(s) need to be | 802 | At this point, the dev_node_t structure(s) need to be |
@@ -930,14 +821,13 @@ next_entry: | |||
930 | 821 | ||
931 | return; | 822 | return; |
932 | 823 | ||
933 | cs_failed: | 824 | failed: |
934 | cs_error(link, last_fn, last_ret); | ||
935 | das16cs_pcmcia_release(link); | 825 | das16cs_pcmcia_release(link); |
936 | } /* das16cs_pcmcia_config */ | 826 | } /* das16cs_pcmcia_config */ |
937 | 827 | ||
938 | static void das16cs_pcmcia_release(struct pcmcia_device *link) | 828 | static void das16cs_pcmcia_release(struct pcmcia_device *link) |
939 | { | 829 | { |
940 | DEBUG(0, "das16cs_pcmcia_release(0x%p)\n", link); | 830 | dev_dbg(&link->dev, "das16cs_pcmcia_release\n"); |
941 | pcmcia_disable_device(link); | 831 | pcmcia_disable_device(link); |
942 | } /* das16cs_pcmcia_release */ | 832 | } /* das16cs_pcmcia_release */ |
943 | 833 | ||
@@ -983,14 +873,13 @@ struct pcmcia_driver das16cs_driver = { | |||
983 | 873 | ||
984 | static int __init init_das16cs_pcmcia_cs(void) | 874 | static int __init init_das16cs_pcmcia_cs(void) |
985 | { | 875 | { |
986 | DEBUG(0, "%s\n", version); | ||
987 | pcmcia_register_driver(&das16cs_driver); | 876 | pcmcia_register_driver(&das16cs_driver); |
988 | return 0; | 877 | return 0; |
989 | } | 878 | } |
990 | 879 | ||
991 | static void __exit exit_das16cs_pcmcia_cs(void) | 880 | static void __exit exit_das16cs_pcmcia_cs(void) |
992 | { | 881 | { |
993 | DEBUG(0, "das16cs_pcmcia_cs: unloading\n"); | 882 | pr_debug("das16cs_pcmcia_cs: unloading\n"); |
994 | pcmcia_unregister_driver(&das16cs_driver); | 883 | pcmcia_unregister_driver(&das16cs_driver); |
995 | } | 884 | } |
996 | 885 | ||
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c index 9cab21eaaa18..9b945e5fdd32 100644 --- a/drivers/staging/comedi/drivers/das08_cs.c +++ b/drivers/staging/comedi/drivers/das08_cs.c | |||
@@ -110,25 +110,6 @@ static int das08_cs_attach(struct comedi_device *dev, | |||
110 | 110 | ||
111 | ======================================================================*/ | 111 | ======================================================================*/ |
112 | 112 | ||
113 | /* | ||
114 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
115 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
116 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
117 | be present but disabled -- but it can then be enabled for specific | ||
118 | modules at load time with a 'pc_debug=#' option to insmod. | ||
119 | */ | ||
120 | |||
121 | #ifdef PCMCIA_DEBUG | ||
122 | static int pc_debug = PCMCIA_DEBUG; | ||
123 | module_param(pc_debug, int, 0644); | ||
124 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
125 | static const char *version = | ||
126 | "das08.c pcmcia code (Frank Hess), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)"; | ||
127 | #else | ||
128 | #define DEBUG(n, args...) | ||
129 | #endif | ||
130 | |||
131 | /*====================================================================*/ | ||
132 | static void das08_pcmcia_config(struct pcmcia_device *link); | 113 | static void das08_pcmcia_config(struct pcmcia_device *link); |
133 | static void das08_pcmcia_release(struct pcmcia_device *link); | 114 | static void das08_pcmcia_release(struct pcmcia_device *link); |
134 | static int das08_pcmcia_suspend(struct pcmcia_device *p_dev); | 115 | static int das08_pcmcia_suspend(struct pcmcia_device *p_dev); |
@@ -181,7 +162,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link) | |||
181 | { | 162 | { |
182 | struct local_info_t *local; | 163 | struct local_info_t *local; |
183 | 164 | ||
184 | DEBUG(0, "das08_pcmcia_attach()\n"); | 165 | dev_dbg(&link->dev, "das08_pcmcia_attach()\n"); |
185 | 166 | ||
186 | /* Allocate space for private device-specific data */ | 167 | /* Allocate space for private device-specific data */ |
187 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); | 168 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); |
@@ -192,7 +173,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link) | |||
192 | 173 | ||
193 | /* Interrupt setup */ | 174 | /* Interrupt setup */ |
194 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 175 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
195 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
196 | link->irq.Handler = NULL; | 176 | link->irq.Handler = NULL; |
197 | 177 | ||
198 | /* | 178 | /* |
@@ -224,7 +204,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link) | |||
224 | static void das08_pcmcia_detach(struct pcmcia_device *link) | 204 | static void das08_pcmcia_detach(struct pcmcia_device *link) |
225 | { | 205 | { |
226 | 206 | ||
227 | DEBUG(0, "das08_pcmcia_detach(0x%p)\n", link); | 207 | dev_dbg(&link->dev, "das08_pcmcia_detach\n"); |
228 | 208 | ||
229 | if (link->dev_node) { | 209 | if (link->dev_node) { |
230 | ((struct local_info_t *)link->priv)->stop = 1; | 210 | ((struct local_info_t *)link->priv)->stop = 1; |
@@ -237,6 +217,44 @@ static void das08_pcmcia_detach(struct pcmcia_device *link) | |||
237 | 217 | ||
238 | } /* das08_pcmcia_detach */ | 218 | } /* das08_pcmcia_detach */ |
239 | 219 | ||
220 | |||
221 | static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev, | ||
222 | cistpl_cftable_entry_t *cfg, | ||
223 | cistpl_cftable_entry_t *dflt, | ||
224 | unsigned int vcc, | ||
225 | void *priv_data) | ||
226 | { | ||
227 | if (cfg->index == 0) | ||
228 | return -ENODEV; | ||
229 | |||
230 | /* Do we need to allocate an interrupt? */ | ||
231 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) | ||
232 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; | ||
233 | |||
234 | /* IO window settings */ | ||
235 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; | ||
236 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { | ||
237 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; | ||
238 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
239 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
240 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
241 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
242 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
243 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
244 | p_dev->io.BasePort1 = io->win[0].base; | ||
245 | p_dev->io.NumPorts1 = io->win[0].len; | ||
246 | if (io->nwin > 1) { | ||
247 | p_dev->io.Attributes2 = p_dev->io.Attributes1; | ||
248 | p_dev->io.BasePort2 = io->win[1].base; | ||
249 | p_dev->io.NumPorts2 = io->win[1].len; | ||
250 | } | ||
251 | /* This reserves IO space but doesn't actually enable it */ | ||
252 | return pcmcia_request_io(p_dev, &p_dev->io); | ||
253 | } | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | |||
240 | /*====================================================================== | 258 | /*====================================================================== |
241 | 259 | ||
242 | das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event | 260 | das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event |
@@ -248,128 +266,20 @@ static void das08_pcmcia_detach(struct pcmcia_device *link) | |||
248 | static void das08_pcmcia_config(struct pcmcia_device *link) | 266 | static void das08_pcmcia_config(struct pcmcia_device *link) |
249 | { | 267 | { |
250 | struct local_info_t *dev = link->priv; | 268 | struct local_info_t *dev = link->priv; |
251 | tuple_t tuple; | 269 | int ret; |
252 | cisparse_t parse; | ||
253 | int last_fn, last_ret; | ||
254 | u_char buf[64]; | ||
255 | cistpl_cftable_entry_t dflt = { 0 }; | ||
256 | |||
257 | DEBUG(0, "das08_pcmcia_config(0x%p)\n", link); | ||
258 | |||
259 | /* | ||
260 | This reads the card's CONFIG tuple to find its configuration | ||
261 | registers. | ||
262 | */ | ||
263 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
264 | tuple.Attributes = 0; | ||
265 | tuple.TupleData = buf; | ||
266 | tuple.TupleDataMax = sizeof(buf); | ||
267 | tuple.TupleOffset = 0; | ||
268 | last_fn = GetFirstTuple; | ||
269 | |||
270 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
271 | if (last_ret) | ||
272 | goto cs_failed; | ||
273 | |||
274 | last_fn = GetTupleData; | ||
275 | |||
276 | last_ret = pcmcia_get_tuple_data(link, &tuple); | ||
277 | if (last_ret) | ||
278 | goto cs_failed; | ||
279 | |||
280 | last_fn = ParseTuple; | ||
281 | |||
282 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | ||
283 | if (last_ret) | ||
284 | goto cs_failed; | ||
285 | |||
286 | link->conf.ConfigBase = parse.config.base; | ||
287 | link->conf.Present = parse.config.rmask[0]; | ||
288 | |||
289 | /* | ||
290 | In this loop, we scan the CIS for configuration table entries, | ||
291 | each of which describes a valid card configuration, including | ||
292 | voltage, IO window, memory window, and interrupt settings. | ||
293 | |||
294 | We make no assumptions about the card to be configured: we use | ||
295 | just the information available in the CIS. In an ideal world, | ||
296 | this would work for any PCMCIA card, but it requires a complete | ||
297 | and accurate CIS. In practice, a driver usually "knows" most of | ||
298 | these things without consulting the CIS, and most client drivers | ||
299 | will only use the CIS to fill in implementation-defined details. | ||
300 | */ | ||
301 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
302 | last_fn = GetFirstTuple; | ||
303 | |||
304 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
305 | if (last_ret) | ||
306 | goto cs_failed; | ||
307 | |||
308 | while (1) { | ||
309 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | ||
310 | |||
311 | last_ret = pcmcia_get_tuple_data(link, &tuple); | ||
312 | if (last_ret) | ||
313 | goto next_entry; | ||
314 | |||
315 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | ||
316 | if (last_ret) | ||
317 | goto next_entry; | ||
318 | |||
319 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
320 | dflt = *cfg; | ||
321 | if (cfg->index == 0) | ||
322 | goto next_entry; | ||
323 | link->conf.ConfigIndex = cfg->index; | ||
324 | |||
325 | /* Does this card need audio output? */ | ||
326 | /* if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
327 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
328 | link->conf.Status = CCSR_AUDIO_ENA; | ||
329 | } | ||
330 | */ | ||
331 | /* Do we need to allocate an interrupt? */ | ||
332 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | ||
333 | link->conf.Attributes |= CONF_ENABLE_IRQ; | ||
334 | |||
335 | /* IO window settings */ | ||
336 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
337 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
338 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
339 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
340 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
341 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
342 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
343 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
344 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
345 | link->io.BasePort1 = io->win[0].base; | ||
346 | link->io.NumPorts1 = io->win[0].len; | ||
347 | if (io->nwin > 1) { | ||
348 | link->io.Attributes2 = link->io.Attributes1; | ||
349 | link->io.BasePort2 = io->win[1].base; | ||
350 | link->io.NumPorts2 = io->win[1].len; | ||
351 | } | ||
352 | /* This reserves IO space but doesn't actually enable it */ | ||
353 | if (pcmcia_request_io(link, &link->io) != 0) | ||
354 | goto next_entry; | ||
355 | } | ||
356 | |||
357 | /* If we got this far, we're cool! */ | ||
358 | break; | ||
359 | 270 | ||
360 | next_entry: | 271 | dev_dbg(&link->dev, "das08_pcmcia_config\n"); |
361 | last_fn = GetNextTuple; | ||
362 | 272 | ||
363 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 273 | ret = pcmcia_loop_config(link, das08_pcmcia_config_loop, NULL); |
364 | if (last_ret) | 274 | if (ret) { |
365 | goto cs_failed; | 275 | dev_warn(&link->dev, "no configuration found\n"); |
276 | goto failed; | ||
366 | } | 277 | } |
367 | 278 | ||
368 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 279 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
369 | last_fn = RequestIRQ; | 280 | ret = pcmcia_request_irq(link, &link->irq); |
370 | last_ret = pcmcia_request_irq(link, &link->irq); | 281 | if (ret) |
371 | if (last_ret) | 282 | goto failed; |
372 | goto cs_failed; | ||
373 | } | 283 | } |
374 | 284 | ||
375 | /* | 285 | /* |
@@ -377,10 +287,9 @@ next_entry: | |||
377 | the I/O windows and the interrupt mapping, and putting the | 287 | the I/O windows and the interrupt mapping, and putting the |
378 | card and host interface into "Memory and IO" mode. | 288 | card and host interface into "Memory and IO" mode. |
379 | */ | 289 | */ |
380 | last_fn = RequestConfiguration; | 290 | ret = pcmcia_request_configuration(link, &link->conf); |
381 | last_ret = pcmcia_request_configuration(link, &link->conf); | 291 | if (ret) |
382 | if (last_ret) | 292 | goto failed; |
383 | goto cs_failed; | ||
384 | 293 | ||
385 | /* | 294 | /* |
386 | At this point, the dev_node_t structure(s) need to be | 295 | At this point, the dev_node_t structure(s) need to be |
@@ -405,8 +314,7 @@ next_entry: | |||
405 | 314 | ||
406 | return; | 315 | return; |
407 | 316 | ||
408 | cs_failed: | 317 | failed: |
409 | cs_error(link, last_fn, last_ret); | ||
410 | das08_pcmcia_release(link); | 318 | das08_pcmcia_release(link); |
411 | 319 | ||
412 | } /* das08_pcmcia_config */ | 320 | } /* das08_pcmcia_config */ |
@@ -421,7 +329,7 @@ cs_failed: | |||
421 | 329 | ||
422 | static void das08_pcmcia_release(struct pcmcia_device *link) | 330 | static void das08_pcmcia_release(struct pcmcia_device *link) |
423 | { | 331 | { |
424 | DEBUG(0, "das08_pcmcia_release(0x%p)\n", link); | 332 | dev_dbg(&link->dev, "das08_pcmcia_release\n"); |
425 | pcmcia_disable_device(link); | 333 | pcmcia_disable_device(link); |
426 | } /* das08_pcmcia_release */ | 334 | } /* das08_pcmcia_release */ |
427 | 335 | ||
@@ -477,14 +385,13 @@ struct pcmcia_driver das08_cs_driver = { | |||
477 | 385 | ||
478 | static int __init init_das08_pcmcia_cs(void) | 386 | static int __init init_das08_pcmcia_cs(void) |
479 | { | 387 | { |
480 | DEBUG(0, "%s\n", version); | ||
481 | pcmcia_register_driver(&das08_cs_driver); | 388 | pcmcia_register_driver(&das08_cs_driver); |
482 | return 0; | 389 | return 0; |
483 | } | 390 | } |
484 | 391 | ||
485 | static void __exit exit_das08_pcmcia_cs(void) | 392 | static void __exit exit_das08_pcmcia_cs(void) |
486 | { | 393 | { |
487 | DEBUG(0, "das08_pcmcia_cs: unloading\n"); | 394 | pr_debug("das08_pcmcia_cs: unloading\n"); |
488 | pcmcia_unregister_driver(&das08_cs_driver); | 395 | pcmcia_unregister_driver(&das08_cs_driver); |
489 | } | 396 | } |
490 | 397 | ||
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c index ec31a3970664..ef5e1183d47d 100644 --- a/drivers/staging/comedi/drivers/ni_daq_700.c +++ b/drivers/staging/comedi/drivers/ni_daq_700.c | |||
@@ -436,25 +436,7 @@ static int dio700_detach(struct comedi_device *dev) | |||
436 | return 0; | 436 | return 0; |
437 | }; | 437 | }; |
438 | 438 | ||
439 | /* PCMCIA crap */ | 439 | /* PCMCIA crap -- watch your words, please! */ |
440 | |||
441 | /* | ||
442 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
443 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
444 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
445 | be present but disabled -- but it can then be enabled for specific | ||
446 | modules at load time with a 'pc_debug=#' option to insmod. | ||
447 | */ | ||
448 | #ifdef PCMCIA_DEBUG | ||
449 | static int pc_debug = PCMCIA_DEBUG; | ||
450 | module_param(pc_debug, int, 0644); | ||
451 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
452 | static char *version = "ni_daq_700.c, based on dummy_cs.c"; | ||
453 | #else | ||
454 | #define DEBUG(n, args...) | ||
455 | #endif | ||
456 | |||
457 | /*====================================================================*/ | ||
458 | 440 | ||
459 | static void dio700_config(struct pcmcia_device *link); | 441 | static void dio700_config(struct pcmcia_device *link); |
460 | static void dio700_release(struct pcmcia_device *link); | 442 | static void dio700_release(struct pcmcia_device *link); |
@@ -510,7 +492,7 @@ static int dio700_cs_attach(struct pcmcia_device *link) | |||
510 | 492 | ||
511 | printk(KERN_INFO "ni_daq_700: cs-attach\n"); | 493 | printk(KERN_INFO "ni_daq_700: cs-attach\n"); |
512 | 494 | ||
513 | DEBUG(0, "dio700_cs_attach()\n"); | 495 | dev_dbg(&link->dev, "dio700_cs_attach()\n"); |
514 | 496 | ||
515 | /* Allocate space for private device-specific data */ | 497 | /* Allocate space for private device-specific data */ |
516 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); | 498 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); |
@@ -521,7 +503,6 @@ static int dio700_cs_attach(struct pcmcia_device *link) | |||
521 | 503 | ||
522 | /* Interrupt setup */ | 504 | /* Interrupt setup */ |
523 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 505 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
524 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
525 | link->irq.Handler = NULL; | 506 | link->irq.Handler = NULL; |
526 | 507 | ||
527 | /* | 508 | /* |
@@ -555,7 +536,7 @@ static void dio700_cs_detach(struct pcmcia_device *link) | |||
555 | 536 | ||
556 | printk(KERN_INFO "ni_daq_700: cs-detach!\n"); | 537 | printk(KERN_INFO "ni_daq_700: cs-detach!\n"); |
557 | 538 | ||
558 | DEBUG(0, "dio700_cs_detach(0x%p)\n", link); | 539 | dev_dbg(&link->dev, "dio700_cs_detach\n"); |
559 | 540 | ||
560 | if (link->dev_node) { | 541 | if (link->dev_node) { |
561 | ((struct local_info_t *)link->priv)->stop = 1; | 542 | ((struct local_info_t *)link->priv)->stop = 1; |
@@ -576,141 +557,85 @@ static void dio700_cs_detach(struct pcmcia_device *link) | |||
576 | 557 | ||
577 | ======================================================================*/ | 558 | ======================================================================*/ |
578 | 559 | ||
579 | static void dio700_config(struct pcmcia_device *link) | 560 | static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev, |
561 | cistpl_cftable_entry_t *cfg, | ||
562 | cistpl_cftable_entry_t *dflt, | ||
563 | unsigned int vcc, | ||
564 | void *priv_data) | ||
580 | { | 565 | { |
581 | struct local_info_t *dev = link->priv; | 566 | win_req_t *req = priv_data; |
582 | tuple_t tuple; | ||
583 | cisparse_t parse; | ||
584 | int last_ret; | ||
585 | u_char buf[64]; | ||
586 | win_req_t req; | ||
587 | memreq_t map; | 567 | memreq_t map; |
588 | cistpl_cftable_entry_t dflt = { 0 }; | ||
589 | 568 | ||
590 | printk(KERN_INFO "ni_daq_700: cs-config\n"); | 569 | if (cfg->index == 0) |
591 | 570 | return -ENODEV; | |
592 | DEBUG(0, "dio700_config(0x%p)\n", link); | ||
593 | 571 | ||
594 | /* | 572 | /* Does this card need audio output? */ |
595 | This reads the card's CONFIG tuple to find its configuration | 573 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { |
596 | registers. | 574 | p_dev->conf.Attributes |= CONF_ENABLE_SPKR; |
597 | */ | 575 | p_dev->conf.Status = CCSR_AUDIO_ENA; |
598 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
599 | tuple.Attributes = 0; | ||
600 | tuple.TupleData = buf; | ||
601 | tuple.TupleDataMax = sizeof(buf); | ||
602 | tuple.TupleOffset = 0; | ||
603 | |||
604 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
605 | if (last_ret) { | ||
606 | cs_error(link, GetFirstTuple, last_ret); | ||
607 | goto cs_failed; | ||
608 | } | 576 | } |
609 | 577 | ||
610 | last_ret = pcmcia_get_tuple_data(link, &tuple); | 578 | /* Do we need to allocate an interrupt? */ |
611 | if (last_ret) { | 579 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) |
612 | cs_error(link, GetTupleData, last_ret); | 580 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; |
613 | goto cs_failed; | 581 | |
582 | /* IO window settings */ | ||
583 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; | ||
584 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { | ||
585 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; | ||
586 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
587 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
588 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
589 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
590 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
591 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
592 | p_dev->io.BasePort1 = io->win[0].base; | ||
593 | p_dev->io.NumPorts1 = io->win[0].len; | ||
594 | if (io->nwin > 1) { | ||
595 | p_dev->io.Attributes2 = p_dev->io.Attributes1; | ||
596 | p_dev->io.BasePort2 = io->win[1].base; | ||
597 | p_dev->io.NumPorts2 = io->win[1].len; | ||
598 | } | ||
599 | /* This reserves IO space but doesn't actually enable it */ | ||
600 | if (pcmcia_request_io(p_dev, &p_dev->io) != 0) | ||
601 | return -ENODEV; | ||
614 | } | 602 | } |
615 | 603 | ||
616 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | 604 | if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { |
617 | if (last_ret) { | 605 | cistpl_mem_t *mem = |
618 | cs_error(link, ParseTuple, last_ret); | 606 | (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; |
619 | goto cs_failed; | 607 | req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; |
608 | req->Attributes |= WIN_ENABLE; | ||
609 | req->Base = mem->win[0].host_addr; | ||
610 | req->Size = mem->win[0].len; | ||
611 | if (req->Size < 0x1000) | ||
612 | req->Size = 0x1000; | ||
613 | req->AccessSpeed = 0; | ||
614 | if (pcmcia_request_window(p_dev, req, &p_dev->win)) | ||
615 | return -ENODEV; | ||
616 | map.Page = 0; | ||
617 | map.CardOffset = mem->win[0].card_addr; | ||
618 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map)) | ||
619 | return -ENODEV; | ||
620 | } | 620 | } |
621 | link->conf.ConfigBase = parse.config.base; | 621 | /* If we got this far, we're cool! */ |
622 | link->conf.Present = parse.config.rmask[0]; | 622 | return 0; |
623 | 623 | } | |
624 | /* | ||
625 | In this loop, we scan the CIS for configuration table entries, | ||
626 | each of which describes a valid card configuration, including | ||
627 | voltage, IO window, memory window, and interrupt settings. | ||
628 | |||
629 | We make no assumptions about the card to be configured: we use | ||
630 | just the information available in the CIS. In an ideal world, | ||
631 | this would work for any PCMCIA card, but it requires a complete | ||
632 | and accurate CIS. In practice, a driver usually "knows" most of | ||
633 | these things without consulting the CIS, and most client drivers | ||
634 | will only use the CIS to fill in implementation-defined details. | ||
635 | */ | ||
636 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
637 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
638 | if (last_ret != 0) { | ||
639 | cs_error(link, GetFirstTuple, last_ret); | ||
640 | goto cs_failed; | ||
641 | } | ||
642 | while (1) { | ||
643 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | ||
644 | if (pcmcia_get_tuple_data(link, &tuple) != 0) | ||
645 | goto next_entry; | ||
646 | if (pcmcia_parse_tuple(&tuple, &parse) != 0) | ||
647 | goto next_entry; | ||
648 | |||
649 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
650 | dflt = *cfg; | ||
651 | if (cfg->index == 0) | ||
652 | goto next_entry; | ||
653 | link->conf.ConfigIndex = cfg->index; | ||
654 | |||
655 | /* Does this card need audio output? */ | ||
656 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
657 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
658 | link->conf.Status = CCSR_AUDIO_ENA; | ||
659 | } | ||
660 | 624 | ||
661 | /* Do we need to allocate an interrupt? */ | 625 | static void dio700_config(struct pcmcia_device *link) |
662 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | 626 | { |
663 | link->conf.Attributes |= CONF_ENABLE_IRQ; | 627 | struct local_info_t *dev = link->priv; |
664 | 628 | win_req_t req; | |
665 | /* IO window settings */ | 629 | int ret; |
666 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
667 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
668 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
669 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
670 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
671 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
672 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
673 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
674 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
675 | link->io.BasePort1 = io->win[0].base; | ||
676 | link->io.NumPorts1 = io->win[0].len; | ||
677 | if (io->nwin > 1) { | ||
678 | link->io.Attributes2 = link->io.Attributes1; | ||
679 | link->io.BasePort2 = io->win[1].base; | ||
680 | link->io.NumPorts2 = io->win[1].len; | ||
681 | } | ||
682 | /* This reserves IO space but doesn't actually enable it */ | ||
683 | if (pcmcia_request_io(link, &link->io) != 0) | ||
684 | goto next_entry; | ||
685 | } | ||
686 | 630 | ||
687 | if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { | 631 | printk(KERN_INFO "ni_daq_700: cs-config\n"); |
688 | cistpl_mem_t *mem = | ||
689 | (cfg->mem.nwin) ? &cfg->mem : &dflt.mem; | ||
690 | req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; | ||
691 | req.Attributes |= WIN_ENABLE; | ||
692 | req.Base = mem->win[0].host_addr; | ||
693 | req.Size = mem->win[0].len; | ||
694 | if (req.Size < 0x1000) | ||
695 | req.Size = 0x1000; | ||
696 | req.AccessSpeed = 0; | ||
697 | if (pcmcia_request_window(&link, &req, &link->win)) | ||
698 | goto next_entry; | ||
699 | map.Page = 0; | ||
700 | map.CardOffset = mem->win[0].card_addr; | ||
701 | if (pcmcia_map_mem_page(link->win, &map)) | ||
702 | goto next_entry; | ||
703 | } | ||
704 | /* If we got this far, we're cool! */ | ||
705 | break; | ||
706 | 632 | ||
707 | next_entry: | 633 | dev_dbg(&link->dev, "dio700_config\n"); |
708 | 634 | ||
709 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 635 | ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, &req); |
710 | if (last_ret) { | 636 | if (ret) { |
711 | cs_error(link, GetNextTuple, last_ret); | 637 | dev_warn(&link->dev, "no configuration found\n"); |
712 | goto cs_failed; | 638 | goto failed; |
713 | } | ||
714 | } | 639 | } |
715 | 640 | ||
716 | /* | 641 | /* |
@@ -719,11 +644,9 @@ next_entry: | |||
719 | irq structure is initialized. | 644 | irq structure is initialized. |
720 | */ | 645 | */ |
721 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 646 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
722 | last_ret = pcmcia_request_irq(link, &link->irq); | 647 | ret = pcmcia_request_irq(link, &link->irq); |
723 | if (last_ret) { | 648 | if (ret) |
724 | cs_error(link, RequestIRQ, last_ret); | 649 | goto failed; |
725 | goto cs_failed; | ||
726 | } | ||
727 | } | 650 | } |
728 | 651 | ||
729 | /* | 652 | /* |
@@ -731,11 +654,9 @@ next_entry: | |||
731 | the I/O windows and the interrupt mapping, and putting the | 654 | the I/O windows and the interrupt mapping, and putting the |
732 | card and host interface into "Memory and IO" mode. | 655 | card and host interface into "Memory and IO" mode. |
733 | */ | 656 | */ |
734 | last_ret = pcmcia_request_configuration(link, &link->conf); | 657 | ret = pcmcia_request_configuration(link, &link->conf); |
735 | if (last_ret != 0) { | 658 | if (ret != 0) |
736 | cs_error(link, RequestConfiguration, last_ret); | 659 | goto failed; |
737 | goto cs_failed; | ||
738 | } | ||
739 | 660 | ||
740 | /* | 661 | /* |
741 | At this point, the dev_node_t structure(s) need to be | 662 | At this point, the dev_node_t structure(s) need to be |
@@ -763,7 +684,7 @@ next_entry: | |||
763 | 684 | ||
764 | return; | 685 | return; |
765 | 686 | ||
766 | cs_failed: | 687 | failed: |
767 | printk(KERN_INFO "ni_daq_700 cs failed"); | 688 | printk(KERN_INFO "ni_daq_700 cs failed"); |
768 | dio700_release(link); | 689 | dio700_release(link); |
769 | 690 | ||
@@ -771,7 +692,7 @@ cs_failed: | |||
771 | 692 | ||
772 | static void dio700_release(struct pcmcia_device *link) | 693 | static void dio700_release(struct pcmcia_device *link) |
773 | { | 694 | { |
774 | DEBUG(0, "dio700_release(0x%p)\n", link); | 695 | dev_dbg(&link->dev, "dio700_release\n"); |
775 | 696 | ||
776 | pcmcia_disable_device(link); | 697 | pcmcia_disable_device(link); |
777 | } /* dio700_release */ | 698 | } /* dio700_release */ |
@@ -830,15 +751,13 @@ struct pcmcia_driver dio700_cs_driver = { | |||
830 | 751 | ||
831 | static int __init init_dio700_cs(void) | 752 | static int __init init_dio700_cs(void) |
832 | { | 753 | { |
833 | printk("ni_daq_700: cs-init \n"); | ||
834 | DEBUG(0, "%s\n", version); | ||
835 | pcmcia_register_driver(&dio700_cs_driver); | 754 | pcmcia_register_driver(&dio700_cs_driver); |
836 | return 0; | 755 | return 0; |
837 | } | 756 | } |
838 | 757 | ||
839 | static void __exit exit_dio700_cs(void) | 758 | static void __exit exit_dio700_cs(void) |
840 | { | 759 | { |
841 | DEBUG(0, "ni_daq_700: unloading\n"); | 760 | pr_debug("ni_daq_700: unloading\n"); |
842 | pcmcia_unregister_driver(&dio700_cs_driver); | 761 | pcmcia_unregister_driver(&dio700_cs_driver); |
843 | } | 762 | } |
844 | 763 | ||
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c index 0700a8bddd1e..9017be3a92f1 100644 --- a/drivers/staging/comedi/drivers/ni_daq_dio24.c +++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c | |||
@@ -187,25 +187,7 @@ static int dio24_detach(struct comedi_device *dev) | |||
187 | return 0; | 187 | return 0; |
188 | }; | 188 | }; |
189 | 189 | ||
190 | /* PCMCIA crap */ | 190 | /* PCMCIA crap -- watch your words! */ |
191 | |||
192 | /* | ||
193 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
194 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
195 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
196 | be present but disabled -- but it can then be enabled for specific | ||
197 | modules at load time with a 'pc_debug=#' option to insmod. | ||
198 | */ | ||
199 | #ifdef PCMCIA_DEBUG | ||
200 | static int pc_debug = PCMCIA_DEBUG; | ||
201 | module_param(pc_debug, int, 0644); | ||
202 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
203 | static char *version = "ni_daq_dio24.c, based on dummy_cs.c"; | ||
204 | #else | ||
205 | #define DEBUG(n, args...) | ||
206 | #endif | ||
207 | |||
208 | /*====================================================================*/ | ||
209 | 191 | ||
210 | static void dio24_config(struct pcmcia_device *link); | 192 | static void dio24_config(struct pcmcia_device *link); |
211 | static void dio24_release(struct pcmcia_device *link); | 193 | static void dio24_release(struct pcmcia_device *link); |
@@ -261,7 +243,7 @@ static int dio24_cs_attach(struct pcmcia_device *link) | |||
261 | 243 | ||
262 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); | 244 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); |
263 | 245 | ||
264 | DEBUG(0, "dio24_cs_attach()\n"); | 246 | dev_dbg(&link->dev, "dio24_cs_attach()\n"); |
265 | 247 | ||
266 | /* Allocate space for private device-specific data */ | 248 | /* Allocate space for private device-specific data */ |
267 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); | 249 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); |
@@ -272,7 +254,6 @@ static int dio24_cs_attach(struct pcmcia_device *link) | |||
272 | 254 | ||
273 | /* Interrupt setup */ | 255 | /* Interrupt setup */ |
274 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 256 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
275 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
276 | link->irq.Handler = NULL; | 257 | link->irq.Handler = NULL; |
277 | 258 | ||
278 | /* | 259 | /* |
@@ -306,7 +287,7 @@ static void dio24_cs_detach(struct pcmcia_device *link) | |||
306 | 287 | ||
307 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); | 288 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); |
308 | 289 | ||
309 | DEBUG(0, "dio24_cs_detach(0x%p)\n", link); | 290 | dev_dbg(&link->dev, "dio24_cs_detach\n"); |
310 | 291 | ||
311 | if (link->dev_node) { | 292 | if (link->dev_node) { |
312 | ((struct local_info_t *)link->priv)->stop = 1; | 293 | ((struct local_info_t *)link->priv)->stop = 1; |
@@ -327,142 +308,85 @@ static void dio24_cs_detach(struct pcmcia_device *link) | |||
327 | 308 | ||
328 | ======================================================================*/ | 309 | ======================================================================*/ |
329 | 310 | ||
330 | static void dio24_config(struct pcmcia_device *link) | 311 | static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev, |
312 | cistpl_cftable_entry_t *cfg, | ||
313 | cistpl_cftable_entry_t *dflt, | ||
314 | unsigned int vcc, | ||
315 | void *priv_data) | ||
331 | { | 316 | { |
332 | struct local_info_t *dev = link->priv; | 317 | win_req_t *req = priv_data; |
333 | tuple_t tuple; | ||
334 | cisparse_t parse; | ||
335 | int last_ret; | ||
336 | u_char buf[64]; | ||
337 | win_req_t req; | ||
338 | memreq_t map; | 318 | memreq_t map; |
339 | cistpl_cftable_entry_t dflt = { 0 }; | ||
340 | 319 | ||
341 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); | 320 | if (cfg->index == 0) |
342 | 321 | return -ENODEV; | |
343 | DEBUG(0, "dio24_config(0x%p)\n", link); | ||
344 | |||
345 | /* | ||
346 | This reads the card's CONFIG tuple to find its configuration | ||
347 | registers. | ||
348 | */ | ||
349 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
350 | tuple.Attributes = 0; | ||
351 | tuple.TupleData = buf; | ||
352 | tuple.TupleDataMax = sizeof(buf); | ||
353 | tuple.TupleOffset = 0; | ||
354 | |||
355 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
356 | if (last_ret) { | ||
357 | cs_error(link, GetFirstTuple, last_ret); | ||
358 | goto cs_failed; | ||
359 | } | ||
360 | 322 | ||
361 | last_ret = pcmcia_get_tuple_data(link, &tuple); | 323 | /* Does this card need audio output? */ |
362 | if (last_ret) { | 324 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { |
363 | cs_error(link, GetTupleData, last_ret); | 325 | p_dev->conf.Attributes |= CONF_ENABLE_SPKR; |
364 | goto cs_failed; | 326 | p_dev->conf.Status = CCSR_AUDIO_ENA; |
365 | } | 327 | } |
366 | 328 | ||
367 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | 329 | /* Do we need to allocate an interrupt? */ |
368 | if (last_ret) { | 330 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) |
369 | cs_error(link, ParseTuple, last_ret); | 331 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; |
370 | goto cs_failed; | 332 | |
333 | /* IO window settings */ | ||
334 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; | ||
335 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { | ||
336 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; | ||
337 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
338 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
339 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
340 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
341 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
342 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
343 | p_dev->io.BasePort1 = io->win[0].base; | ||
344 | p_dev->io.NumPorts1 = io->win[0].len; | ||
345 | if (io->nwin > 1) { | ||
346 | p_dev->io.Attributes2 = p_dev->io.Attributes1; | ||
347 | p_dev->io.BasePort2 = io->win[1].base; | ||
348 | p_dev->io.NumPorts2 = io->win[1].len; | ||
349 | } | ||
350 | /* This reserves IO space but doesn't actually enable it */ | ||
351 | if (pcmcia_request_io(p_dev, &p_dev->io) != 0) | ||
352 | return -ENODEV; | ||
371 | } | 353 | } |
372 | link->conf.ConfigBase = parse.config.base; | ||
373 | link->conf.Present = parse.config.rmask[0]; | ||
374 | |||
375 | /* | ||
376 | In this loop, we scan the CIS for configuration table entries, | ||
377 | each of which describes a valid card configuration, including | ||
378 | voltage, IO window, memory window, and interrupt settings. | ||
379 | |||
380 | We make no assumptions about the card to be configured: we use | ||
381 | just the information available in the CIS. In an ideal world, | ||
382 | this would work for any PCMCIA card, but it requires a complete | ||
383 | and accurate CIS. In practice, a driver usually "knows" most of | ||
384 | these things without consulting the CIS, and most client drivers | ||
385 | will only use the CIS to fill in implementation-defined details. | ||
386 | */ | ||
387 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
388 | 354 | ||
389 | last_ret = pcmcia_get_first_tuple(link, &tuple); | 355 | if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { |
390 | if (last_ret) { | 356 | cistpl_mem_t *mem = |
391 | cs_error(link, GetFirstTuple, last_ret); | 357 | (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; |
392 | goto cs_failed; | 358 | req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; |
359 | req->Attributes |= WIN_ENABLE; | ||
360 | req->Base = mem->win[0].host_addr; | ||
361 | req->Size = mem->win[0].len; | ||
362 | if (req->Size < 0x1000) | ||
363 | req->Size = 0x1000; | ||
364 | req->AccessSpeed = 0; | ||
365 | if (pcmcia_request_window(p_dev, req, &p_dev->win)) | ||
366 | return -ENODEV; | ||
367 | map.Page = 0; | ||
368 | map.CardOffset = mem->win[0].card_addr; | ||
369 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map)) | ||
370 | return -ENODEV; | ||
393 | } | 371 | } |
394 | while (1) { | 372 | /* If we got this far, we're cool! */ |
395 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | 373 | return 0; |
396 | if (pcmcia_get_tuple_data(link, &tuple) != 0) | 374 | } |
397 | goto next_entry; | ||
398 | if (pcmcia_parse_tuple(&tuple, &parse) != 0) | ||
399 | goto next_entry; | ||
400 | |||
401 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
402 | dflt = *cfg; | ||
403 | if (cfg->index == 0) | ||
404 | goto next_entry; | ||
405 | link->conf.ConfigIndex = cfg->index; | ||
406 | |||
407 | /* Does this card need audio output? */ | ||
408 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
409 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
410 | link->conf.Status = CCSR_AUDIO_ENA; | ||
411 | } | ||
412 | 375 | ||
413 | /* Do we need to allocate an interrupt? */ | 376 | static void dio24_config(struct pcmcia_device *link) |
414 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | 377 | { |
415 | link->conf.Attributes |= CONF_ENABLE_IRQ; | 378 | struct local_info_t *dev = link->priv; |
416 | 379 | int ret; | |
417 | /* IO window settings */ | 380 | win_req_t req; |
418 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
419 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
420 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
421 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
422 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
423 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
424 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
425 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
426 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
427 | link->io.BasePort1 = io->win[0].base; | ||
428 | link->io.NumPorts1 = io->win[0].len; | ||
429 | if (io->nwin > 1) { | ||
430 | link->io.Attributes2 = link->io.Attributes1; | ||
431 | link->io.BasePort2 = io->win[1].base; | ||
432 | link->io.NumPorts2 = io->win[1].len; | ||
433 | } | ||
434 | /* This reserves IO space but doesn't actually enable it */ | ||
435 | if (pcmcia_request_io(link, &link->io) != 0) | ||
436 | goto next_entry; | ||
437 | } | ||
438 | 381 | ||
439 | if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { | 382 | printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); |
440 | cistpl_mem_t *mem = | ||
441 | (cfg->mem.nwin) ? &cfg->mem : &dflt.mem; | ||
442 | req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; | ||
443 | req.Attributes |= WIN_ENABLE; | ||
444 | req.Base = mem->win[0].host_addr; | ||
445 | req.Size = mem->win[0].len; | ||
446 | if (req.Size < 0x1000) | ||
447 | req.Size = 0x1000; | ||
448 | req.AccessSpeed = 0; | ||
449 | if (pcmcia_request_window(&link, &req, &link->win)) | ||
450 | goto next_entry; | ||
451 | map.Page = 0; | ||
452 | map.CardOffset = mem->win[0].card_addr; | ||
453 | if (pcmcia_map_mem_page(link->win, &map)) | ||
454 | goto next_entry; | ||
455 | } | ||
456 | /* If we got this far, we're cool! */ | ||
457 | break; | ||
458 | 383 | ||
459 | next_entry: | 384 | dev_dbg(&link->dev, "dio24_config\n"); |
460 | 385 | ||
461 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 386 | ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, &req); |
462 | if (last_ret) { | 387 | if (ret) { |
463 | cs_error(link, GetNextTuple, last_ret); | 388 | dev_warn(&link->dev, "no configuration found\n"); |
464 | goto cs_failed; | 389 | goto failed; |
465 | } | ||
466 | } | 390 | } |
467 | 391 | ||
468 | /* | 392 | /* |
@@ -471,11 +395,9 @@ next_entry: | |||
471 | irq structure is initialized. | 395 | irq structure is initialized. |
472 | */ | 396 | */ |
473 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 397 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
474 | last_ret = pcmcia_request_irq(link, &link->irq); | 398 | ret = pcmcia_request_irq(link, &link->irq); |
475 | if (last_ret) { | 399 | if (ret) |
476 | cs_error(link, RequestIRQ, last_ret); | 400 | goto failed; |
477 | goto cs_failed; | ||
478 | } | ||
479 | } | 401 | } |
480 | 402 | ||
481 | /* | 403 | /* |
@@ -483,11 +405,9 @@ next_entry: | |||
483 | the I/O windows and the interrupt mapping, and putting the | 405 | the I/O windows and the interrupt mapping, and putting the |
484 | card and host interface into "Memory and IO" mode. | 406 | card and host interface into "Memory and IO" mode. |
485 | */ | 407 | */ |
486 | last_ret = pcmcia_request_configuration(link, &link->conf); | 408 | ret = pcmcia_request_configuration(link, &link->conf); |
487 | if (last_ret) { | 409 | if (ret) |
488 | cs_error(link, RequestConfiguration, last_ret); | 410 | goto failed; |
489 | goto cs_failed; | ||
490 | } | ||
491 | 411 | ||
492 | /* | 412 | /* |
493 | At this point, the dev_node_t structure(s) need to be | 413 | At this point, the dev_node_t structure(s) need to be |
@@ -515,7 +435,7 @@ next_entry: | |||
515 | 435 | ||
516 | return; | 436 | return; |
517 | 437 | ||
518 | cs_failed: | 438 | failed: |
519 | printk(KERN_INFO "Fallo"); | 439 | printk(KERN_INFO "Fallo"); |
520 | dio24_release(link); | 440 | dio24_release(link); |
521 | 441 | ||
@@ -523,7 +443,7 @@ cs_failed: | |||
523 | 443 | ||
524 | static void dio24_release(struct pcmcia_device *link) | 444 | static void dio24_release(struct pcmcia_device *link) |
525 | { | 445 | { |
526 | DEBUG(0, "dio24_release(0x%p)\n", link); | 446 | dev_dbg(&link->dev, "dio24_release\n"); |
527 | 447 | ||
528 | pcmcia_disable_device(link); | 448 | pcmcia_disable_device(link); |
529 | } /* dio24_release */ | 449 | } /* dio24_release */ |
@@ -582,14 +502,12 @@ struct pcmcia_driver dio24_cs_driver = { | |||
582 | static int __init init_dio24_cs(void) | 502 | static int __init init_dio24_cs(void) |
583 | { | 503 | { |
584 | printk("ni_daq_dio24: HOLA SOY YO!\n"); | 504 | printk("ni_daq_dio24: HOLA SOY YO!\n"); |
585 | DEBUG(0, "%s\n", version); | ||
586 | pcmcia_register_driver(&dio24_cs_driver); | 505 | pcmcia_register_driver(&dio24_cs_driver); |
587 | return 0; | 506 | return 0; |
588 | } | 507 | } |
589 | 508 | ||
590 | static void __exit exit_dio24_cs(void) | 509 | static void __exit exit_dio24_cs(void) |
591 | { | 510 | { |
592 | DEBUG(0, "ni_dio24: unloading\n"); | ||
593 | pcmcia_unregister_driver(&dio24_cs_driver); | 511 | pcmcia_unregister_driver(&dio24_cs_driver); |
594 | } | 512 | } |
595 | 513 | ||
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c index a3053b8da1c6..7d514b3ee754 100644 --- a/drivers/staging/comedi/drivers/ni_labpc_cs.c +++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c | |||
@@ -153,23 +153,6 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
153 | return labpc_common_attach(dev, iobase, irq, 0); | 153 | return labpc_common_attach(dev, iobase, irq, 0); |
154 | } | 154 | } |
155 | 155 | ||
156 | /* | ||
157 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
158 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
159 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
160 | be present but disabled -- but it can then be enabled for specific | ||
161 | modules at load time with a 'pc_debug=#' option to insmod. | ||
162 | */ | ||
163 | #ifdef PCMCIA_DEBUG | ||
164 | static int pc_debug = PCMCIA_DEBUG; | ||
165 | module_param(pc_debug, int, 0644); | ||
166 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
167 | static const char *version = | ||
168 | "ni_labpc.c, based on dummy_cs.c 1.31 2001/08/24 12:13:13"; | ||
169 | #else | ||
170 | #define DEBUG(n, args...) | ||
171 | #endif | ||
172 | |||
173 | /*====================================================================*/ | 156 | /*====================================================================*/ |
174 | 157 | ||
175 | /* | 158 | /* |
@@ -236,7 +219,7 @@ static int labpc_cs_attach(struct pcmcia_device *link) | |||
236 | { | 219 | { |
237 | struct local_info_t *local; | 220 | struct local_info_t *local; |
238 | 221 | ||
239 | DEBUG(0, "labpc_cs_attach()\n"); | 222 | dev_dbg(&link->dev, "labpc_cs_attach()\n"); |
240 | 223 | ||
241 | /* Allocate space for private device-specific data */ | 224 | /* Allocate space for private device-specific data */ |
242 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); | 225 | local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); |
@@ -247,7 +230,6 @@ static int labpc_cs_attach(struct pcmcia_device *link) | |||
247 | 230 | ||
248 | /* Interrupt setup */ | 231 | /* Interrupt setup */ |
249 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; | 232 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; |
250 | link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_PULSE_ID; | ||
251 | link->irq.Handler = NULL; | 233 | link->irq.Handler = NULL; |
252 | 234 | ||
253 | /* | 235 | /* |
@@ -278,7 +260,7 @@ static int labpc_cs_attach(struct pcmcia_device *link) | |||
278 | 260 | ||
279 | static void labpc_cs_detach(struct pcmcia_device *link) | 261 | static void labpc_cs_detach(struct pcmcia_device *link) |
280 | { | 262 | { |
281 | DEBUG(0, "labpc_cs_detach(0x%p)\n", link); | 263 | dev_dbg(&link->dev, "labpc_cs_detach\n"); |
282 | 264 | ||
283 | /* | 265 | /* |
284 | If the device is currently configured and active, we won't | 266 | If the device is currently configured and active, we won't |
@@ -305,135 +287,84 @@ static void labpc_cs_detach(struct pcmcia_device *link) | |||
305 | 287 | ||
306 | ======================================================================*/ | 288 | ======================================================================*/ |
307 | 289 | ||
308 | static void labpc_config(struct pcmcia_device *link) | 290 | static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev, |
291 | cistpl_cftable_entry_t *cfg, | ||
292 | cistpl_cftable_entry_t *dflt, | ||
293 | unsigned int vcc, | ||
294 | void *priv_data) | ||
309 | { | 295 | { |
310 | struct local_info_t *dev = link->priv; | 296 | win_req_t *req = priv_data; |
311 | tuple_t tuple; | ||
312 | cisparse_t parse; | ||
313 | int last_ret; | ||
314 | u_char buf[64]; | ||
315 | win_req_t req; | ||
316 | memreq_t map; | 297 | memreq_t map; |
317 | cistpl_cftable_entry_t dflt = { 0 }; | ||
318 | 298 | ||
319 | DEBUG(0, "labpc_config(0x%p)\n", link); | 299 | if (cfg->index == 0) |
300 | return -ENODEV; | ||
320 | 301 | ||
321 | /* | 302 | /* Does this card need audio output? */ |
322 | This reads the card's CONFIG tuple to find its configuration | 303 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { |
323 | registers. | 304 | p_dev->conf.Attributes |= CONF_ENABLE_SPKR; |
324 | */ | 305 | p_dev->conf.Status = CCSR_AUDIO_ENA; |
325 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
326 | tuple.Attributes = 0; | ||
327 | tuple.TupleData = buf; | ||
328 | tuple.TupleDataMax = sizeof(buf); | ||
329 | tuple.TupleOffset = 0; | ||
330 | |||
331 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
332 | if (last_ret) { | ||
333 | cs_error(link, GetFirstTuple, last_ret); | ||
334 | goto cs_failed; | ||
335 | } | 306 | } |
336 | 307 | ||
337 | last_ret = pcmcia_get_tuple_data(link, &tuple); | 308 | /* Do we need to allocate an interrupt? */ |
338 | if (last_ret) { | 309 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) |
339 | cs_error(link, GetTupleData, last_ret); | 310 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; |
340 | goto cs_failed; | 311 | |
312 | /* IO window settings */ | ||
313 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; | ||
314 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { | ||
315 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; | ||
316 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
317 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
318 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
319 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
320 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
321 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
322 | p_dev->io.BasePort1 = io->win[0].base; | ||
323 | p_dev->io.NumPorts1 = io->win[0].len; | ||
324 | if (io->nwin > 1) { | ||
325 | p_dev->io.Attributes2 = p_dev->io.Attributes1; | ||
326 | p_dev->io.BasePort2 = io->win[1].base; | ||
327 | p_dev->io.NumPorts2 = io->win[1].len; | ||
328 | } | ||
329 | /* This reserves IO space but doesn't actually enable it */ | ||
330 | if (pcmcia_request_io(p_dev, &p_dev->io) != 0) | ||
331 | return -ENODEV; | ||
341 | } | 332 | } |
342 | 333 | ||
343 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | 334 | if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { |
344 | if (last_ret) { | 335 | cistpl_mem_t *mem = |
345 | cs_error(link, ParseTuple, last_ret); | 336 | (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; |
346 | goto cs_failed; | 337 | req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; |
338 | req->Attributes |= WIN_ENABLE; | ||
339 | req->Base = mem->win[0].host_addr; | ||
340 | req->Size = mem->win[0].len; | ||
341 | if (req->Size < 0x1000) | ||
342 | req->Size = 0x1000; | ||
343 | req->AccessSpeed = 0; | ||
344 | if (pcmcia_request_window(p_dev, req, &p_dev->win)) | ||
345 | return -ENODEV; | ||
346 | map.Page = 0; | ||
347 | map.CardOffset = mem->win[0].card_addr; | ||
348 | if (pcmcia_map_mem_page(p_dev, p_dev->win, &map)) | ||
349 | return -ENODEV; | ||
347 | } | 350 | } |
348 | link->conf.ConfigBase = parse.config.base; | 351 | /* If we got this far, we're cool! */ |
349 | link->conf.Present = parse.config.rmask[0]; | 352 | return 0; |
353 | } | ||
350 | 354 | ||
351 | /* | ||
352 | In this loop, we scan the CIS for configuration table entries, | ||
353 | each of which describes a valid card configuration, including | ||
354 | voltage, IO window, memory window, and interrupt settings. | ||
355 | |||
356 | We make no assumptions about the card to be configured: we use | ||
357 | just the information available in the CIS. In an ideal world, | ||
358 | this would work for any PCMCIA card, but it requires a complete | ||
359 | and accurate CIS. In practice, a driver usually "knows" most of | ||
360 | these things without consulting the CIS, and most client drivers | ||
361 | will only use the CIS to fill in implementation-defined details. | ||
362 | */ | ||
363 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
364 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
365 | if (last_ret) { | ||
366 | cs_error(link, GetFirstTuple, last_ret); | ||
367 | goto cs_failed; | ||
368 | } | ||
369 | while (1) { | ||
370 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | ||
371 | if (pcmcia_get_tuple_data(link, &tuple)) | ||
372 | goto next_entry; | ||
373 | if (pcmcia_parse_tuple(&tuple, &parse)) | ||
374 | goto next_entry; | ||
375 | |||
376 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
377 | dflt = *cfg; | ||
378 | if (cfg->index == 0) | ||
379 | goto next_entry; | ||
380 | link->conf.ConfigIndex = cfg->index; | ||
381 | |||
382 | /* Does this card need audio output? */ | ||
383 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
384 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
385 | link->conf.Status = CCSR_AUDIO_ENA; | ||
386 | } | ||
387 | 355 | ||
388 | /* Do we need to allocate an interrupt? */ | 356 | static void labpc_config(struct pcmcia_device *link) |
389 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | 357 | { |
390 | link->conf.Attributes |= CONF_ENABLE_IRQ; | 358 | struct local_info_t *dev = link->priv; |
391 | 359 | int ret; | |
392 | /* IO window settings */ | 360 | win_req_t req; |
393 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
394 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
395 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
396 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
397 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
398 | link->io.BasePort1 = io->win[0].base; | ||
399 | link->io.NumPorts1 = io->win[0].len; | ||
400 | if (io->nwin > 1) { | ||
401 | link->io.Attributes2 = link->io.Attributes1; | ||
402 | link->io.BasePort2 = io->win[1].base; | ||
403 | link->io.NumPorts2 = io->win[1].len; | ||
404 | } | ||
405 | /* This reserves IO space but doesn't actually enable it */ | ||
406 | if (pcmcia_request_io(link, &link->io)) | ||
407 | goto next_entry; | ||
408 | } | ||
409 | 361 | ||
410 | if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { | 362 | dev_dbg(&link->dev, "labpc_config\n"); |
411 | cistpl_mem_t *mem = | ||
412 | (cfg->mem.nwin) ? &cfg->mem : &dflt.mem; | ||
413 | req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; | ||
414 | req.Attributes |= WIN_ENABLE; | ||
415 | req.Base = mem->win[0].host_addr; | ||
416 | req.Size = mem->win[0].len; | ||
417 | if (req.Size < 0x1000) | ||
418 | req.Size = 0x1000; | ||
419 | req.AccessSpeed = 0; | ||
420 | link->win = (window_handle_t) link; | ||
421 | if (pcmcia_request_window(&link, &req, &link->win)) | ||
422 | goto next_entry; | ||
423 | map.Page = 0; | ||
424 | map.CardOffset = mem->win[0].card_addr; | ||
425 | if (pcmcia_map_mem_page(link->win, &map)) | ||
426 | goto next_entry; | ||
427 | } | ||
428 | /* If we got this far, we're cool! */ | ||
429 | break; | ||
430 | 363 | ||
431 | next_entry: | 364 | ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, &req); |
432 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 365 | if (ret) { |
433 | if (last_ret) { | 366 | dev_warn(&link->dev, "no configuration found\n"); |
434 | cs_error(link, GetNextTuple, last_ret); | 367 | goto failed; |
435 | goto cs_failed; | ||
436 | } | ||
437 | } | 368 | } |
438 | 369 | ||
439 | /* | 370 | /* |
@@ -442,11 +373,9 @@ next_entry: | |||
442 | irq structure is initialized. | 373 | irq structure is initialized. |
443 | */ | 374 | */ |
444 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 375 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
445 | last_ret = pcmcia_request_irq(link, &link->irq); | 376 | ret = pcmcia_request_irq(link, &link->irq); |
446 | if (last_ret) { | 377 | if (ret) |
447 | cs_error(link, RequestIRQ, last_ret); | 378 | goto failed; |
448 | goto cs_failed; | ||
449 | } | ||
450 | } | 379 | } |
451 | 380 | ||
452 | /* | 381 | /* |
@@ -454,11 +383,9 @@ next_entry: | |||
454 | the I/O windows and the interrupt mapping, and putting the | 383 | the I/O windows and the interrupt mapping, and putting the |
455 | card and host interface into "Memory and IO" mode. | 384 | card and host interface into "Memory and IO" mode. |
456 | */ | 385 | */ |
457 | last_ret = pcmcia_request_configuration(link, &link->conf); | 386 | ret = pcmcia_request_configuration(link, &link->conf); |
458 | if (last_ret) { | 387 | if (ret) |
459 | cs_error(link, RequestConfiguration, last_ret); | 388 | goto failed; |
460 | goto cs_failed; | ||
461 | } | ||
462 | 389 | ||
463 | /* | 390 | /* |
464 | At this point, the dev_node_t structure(s) need to be | 391 | At this point, the dev_node_t structure(s) need to be |
@@ -486,14 +413,14 @@ next_entry: | |||
486 | 413 | ||
487 | return; | 414 | return; |
488 | 415 | ||
489 | cs_failed: | 416 | failed: |
490 | labpc_release(link); | 417 | labpc_release(link); |
491 | 418 | ||
492 | } /* labpc_config */ | 419 | } /* labpc_config */ |
493 | 420 | ||
494 | static void labpc_release(struct pcmcia_device *link) | 421 | static void labpc_release(struct pcmcia_device *link) |
495 | { | 422 | { |
496 | DEBUG(0, "labpc_release(0x%p)\n", link); | 423 | dev_dbg(&link->dev, "labpc_release\n"); |
497 | 424 | ||
498 | pcmcia_disable_device(link); | 425 | pcmcia_disable_device(link); |
499 | } /* labpc_release */ | 426 | } /* labpc_release */ |
@@ -551,14 +478,12 @@ struct pcmcia_driver labpc_cs_driver = { | |||
551 | 478 | ||
552 | static int __init init_labpc_cs(void) | 479 | static int __init init_labpc_cs(void) |
553 | { | 480 | { |
554 | DEBUG(0, "%s\n", version); | ||
555 | pcmcia_register_driver(&labpc_cs_driver); | 481 | pcmcia_register_driver(&labpc_cs_driver); |
556 | return 0; | 482 | return 0; |
557 | } | 483 | } |
558 | 484 | ||
559 | static void __exit exit_labpc_cs(void) | 485 | static void __exit exit_labpc_cs(void) |
560 | { | 486 | { |
561 | DEBUG(0, "ni_labpc: unloading\n"); | ||
562 | pcmcia_unregister_driver(&labpc_cs_driver); | 487 | pcmcia_unregister_driver(&labpc_cs_driver); |
563 | } | 488 | } |
564 | 489 | ||
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c index 9aef87fc81dc..d692f4bb47ea 100644 --- a/drivers/staging/comedi/drivers/ni_mio_cs.c +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c | |||
@@ -274,7 +274,6 @@ static int cs_attach(struct pcmcia_device *link) | |||
274 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 274 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
275 | link->io.NumPorts1 = 16; | 275 | link->io.NumPorts1 = 16; |
276 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; | 276 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
277 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
278 | link->conf.Attributes = CONF_ENABLE_IRQ; | 277 | link->conf.Attributes = CONF_ENABLE_IRQ; |
279 | link->conf.IntType = INT_MEMORY_AND_IO; | 278 | link->conf.IntType = INT_MEMORY_AND_IO; |
280 | 279 | ||
@@ -312,96 +311,47 @@ static int mio_cs_resume(struct pcmcia_device *link) | |||
312 | return 0; | 311 | return 0; |
313 | } | 312 | } |
314 | 313 | ||
315 | static void mio_cs_config(struct pcmcia_device *link) | ||
316 | { | ||
317 | tuple_t tuple; | ||
318 | u_short buf[128]; | ||
319 | cisparse_t parse; | ||
320 | int manfid = 0, prodid = 0; | ||
321 | int ret; | ||
322 | |||
323 | DPRINTK("mio_cs_config(link=%p)\n", link); | ||
324 | 314 | ||
325 | tuple.TupleData = (cisdata_t *) buf; | 315 | static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, |
326 | tuple.TupleOffset = 0; | 316 | cistpl_cftable_entry_t *cfg, |
327 | tuple.TupleDataMax = 255; | 317 | cistpl_cftable_entry_t *dflt, |
328 | tuple.Attributes = 0; | 318 | unsigned int vcc, |
319 | void *priv_data) | ||
320 | { | ||
321 | int base, ret; | ||
329 | 322 | ||
330 | tuple.DesiredTuple = CISTPL_CONFIG; | 323 | p_dev->io.NumPorts1 = cfg->io.win[0].len; |
331 | ret = pcmcia_get_first_tuple(link, &tuple); | 324 | p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; |
332 | ret = pcmcia_get_tuple_data(link, &tuple); | 325 | p_dev->io.NumPorts2 = 0; |
333 | ret = pcmcia_parse_tuple(&tuple, &parse); | ||
334 | link->conf.ConfigBase = parse.config.base; | ||
335 | link->conf.Present = parse.config.rmask[0]; | ||
336 | 326 | ||
337 | #if 0 | 327 | for (base = 0x000; base < 0x400; base += 0x20) { |
338 | tuple.DesiredTuple = CISTPL_LONGLINK_MFC; | 328 | p_dev->io.BasePort1 = base; |
339 | tuple.Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK; | 329 | ret = pcmcia_request_io(p_dev, &p_dev->io); |
340 | info->multi(first_tuple(link, &tuple, &parse) == 0); | 330 | if (!ret) |
341 | #endif | 331 | return 0; |
342 | |||
343 | tuple.DesiredTuple = CISTPL_MANFID; | ||
344 | tuple.Attributes = TUPLE_RETURN_COMMON; | ||
345 | if ((pcmcia_get_first_tuple(link, &tuple) == 0) && | ||
346 | (pcmcia_get_tuple_data(link, &tuple) == 0)) { | ||
347 | manfid = le16_to_cpu(buf[0]); | ||
348 | prodid = le16_to_cpu(buf[1]); | ||
349 | } | 332 | } |
350 | /* printk("manfid = 0x%04x, 0x%04x\n",manfid,prodid); */ | 333 | return -ENODEV; |
334 | } | ||
351 | 335 | ||
352 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | ||
353 | tuple.Attributes = 0; | ||
354 | ret = pcmcia_get_first_tuple(link, &tuple); | ||
355 | ret = pcmcia_get_tuple_data(link, &tuple); | ||
356 | ret = pcmcia_parse_tuple(&tuple, &parse); | ||
357 | 336 | ||
358 | #if 0 | 337 | static void mio_cs_config(struct pcmcia_device *link) |
359 | printk(" index: 0x%x\n", parse.cftable_entry.index); | 338 | { |
360 | printk(" flags: 0x%x\n", parse.cftable_entry.flags); | 339 | int ret; |
361 | printk(" io flags: 0x%x\n", parse.cftable_entry.io.flags); | ||
362 | printk(" io nwin: 0x%x\n", parse.cftable_entry.io.nwin); | ||
363 | printk(" io base: 0x%x\n", parse.cftable_entry.io.win[0].base); | ||
364 | printk(" io len: 0x%x\n", parse.cftable_entry.io.win[0].len); | ||
365 | printk(" irq1: 0x%x\n", parse.cftable_entry.irq.IRQInfo1); | ||
366 | printk(" irq2: 0x%x\n", parse.cftable_entry.irq.IRQInfo2); | ||
367 | printk(" mem flags: 0x%x\n", parse.cftable_entry.mem.flags); | ||
368 | printk(" mem nwin: 0x%x\n", parse.cftable_entry.mem.nwin); | ||
369 | printk(" subtuples: 0x%x\n", parse.cftable_entry.subtuples); | ||
370 | #endif | ||
371 | 340 | ||
372 | #if 0 | 341 | DPRINTK("mio_cs_config(link=%p)\n", link); |
373 | link->io.NumPorts1 = 0x20; | ||
374 | link->io.IOAddrLines = 5; | ||
375 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
376 | #endif | ||
377 | link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; | ||
378 | link->io.IOAddrLines = | ||
379 | parse.cftable_entry.io.flags & CISTPL_IO_LINES_MASK; | ||
380 | link->io.NumPorts2 = 0; | ||
381 | 342 | ||
382 | { | 343 | ret = pcmcia_loop_config(link, mio_pcmcia_config_loop, NULL); |
383 | int base; | 344 | if (ret) { |
384 | for (base = 0x000; base < 0x400; base += 0x20) { | 345 | dev_warn(&link->dev, "no configuration found\n"); |
385 | link->io.BasePort1 = base; | 346 | return; |
386 | ret = pcmcia_request_io(link, &link->io); | ||
387 | /* printk("RequestIO 0x%02x\n",ret); */ | ||
388 | if (!ret) | ||
389 | break; | ||
390 | } | ||
391 | } | 347 | } |
392 | 348 | ||
393 | link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1; | ||
394 | link->irq.IRQInfo2 = parse.cftable_entry.irq.IRQInfo2; | ||
395 | ret = pcmcia_request_irq(link, &link->irq); | 349 | ret = pcmcia_request_irq(link, &link->irq); |
396 | if (ret) { | 350 | if (ret) { |
397 | printk("pcmcia_request_irq() returned error: %i\n", ret); | 351 | printk("pcmcia_request_irq() returned error: %i\n", ret); |
398 | } | 352 | } |
399 | /* printk("RequestIRQ 0x%02x\n",ret); */ | ||
400 | |||
401 | link->conf.ConfigIndex = 1; | ||
402 | 353 | ||
403 | ret = pcmcia_request_configuration(link, &link->conf); | 354 | ret = pcmcia_request_configuration(link, &link->conf); |
404 | /* printk("RequestConfiguration %d\n",ret); */ | ||
405 | 355 | ||
406 | link->dev_node = &dev_node; | 356 | link->dev_node = &dev_node; |
407 | } | 357 | } |
@@ -475,40 +425,17 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
475 | return 0; | 425 | return 0; |
476 | } | 426 | } |
477 | 427 | ||
478 | static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link) | ||
479 | { | ||
480 | tuple_t tuple; | ||
481 | u_short buf[128]; | ||
482 | int prodid = 0; | ||
483 | |||
484 | tuple.TupleData = (cisdata_t *) buf; | ||
485 | tuple.TupleOffset = 0; | ||
486 | tuple.TupleDataMax = 255; | ||
487 | tuple.DesiredTuple = CISTPL_MANFID; | ||
488 | tuple.Attributes = TUPLE_RETURN_COMMON; | ||
489 | if ((pcmcia_get_first_tuple(link, &tuple) == 0) && | ||
490 | (pcmcia_get_tuple_data(link, &tuple) == 0)) { | ||
491 | prodid = le16_to_cpu(buf[1]); | ||
492 | } | ||
493 | |||
494 | return prodid; | ||
495 | } | ||
496 | |||
497 | static int ni_getboardtype(struct comedi_device *dev, | 428 | static int ni_getboardtype(struct comedi_device *dev, |
498 | struct pcmcia_device *link) | 429 | struct pcmcia_device *link) |
499 | { | 430 | { |
500 | int id; | ||
501 | int i; | 431 | int i; |
502 | 432 | ||
503 | id = get_prodid(dev, link); | ||
504 | |||
505 | for (i = 0; i < n_ni_boards; i++) { | 433 | for (i = 0; i < n_ni_boards; i++) { |
506 | if (ni_boards[i].device_id == id) { | 434 | if (ni_boards[i].device_id == link->card_id) |
507 | return i; | 435 | return i; |
508 | } | ||
509 | } | 436 | } |
510 | 437 | ||
511 | printk("unknown board 0x%04x -- pretend it is a ", id); | 438 | printk("unknown board 0x%04x -- pretend it is a ", link->card_id); |
512 | 439 | ||
513 | return 0; | 440 | return 0; |
514 | } | 441 | } |
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c index 344b82353e08..5256fd933162 100644 --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c | |||
@@ -55,23 +55,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308 | |||
55 | #include <pcmcia/cisreg.h> | 55 | #include <pcmcia/cisreg.h> |
56 | #include <pcmcia/ds.h> | 56 | #include <pcmcia/ds.h> |
57 | 57 | ||
58 | /* | ||
59 | All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If | ||
60 | you do not define PCMCIA_DEBUG at all, all the debug code will be | ||
61 | left out. If you compile with PCMCIA_DEBUG=0, the debug code will | ||
62 | be present but disabled -- but it can then be enabled for specific | ||
63 | modules at load time with a 'pc_debug=#' option to insmod. | ||
64 | */ | ||
65 | |||
66 | #ifdef PCMCIA_DEBUG | ||
67 | static int pc_debug = PCMCIA_DEBUG; | ||
68 | module_param(pc_debug, int, 0644); | ||
69 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
70 | static char *version = "quatech_daqp_cs.c 1.10 2003/04/21 (Brent Baccala)"; | ||
71 | #else | ||
72 | #define DEBUG(n, args...) | ||
73 | #endif | ||
74 | |||
75 | /* Maximum number of separate DAQP devices we'll allow */ | 58 | /* Maximum number of separate DAQP devices we'll allow */ |
76 | #define MAX_DEV 4 | 59 | #define MAX_DEV 4 |
77 | 60 | ||
@@ -863,8 +846,6 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
863 | { | 846 | { |
864 | int ret; | 847 | int ret; |
865 | struct local_info_t *local = dev_table[it->options[0]]; | 848 | struct local_info_t *local = dev_table[it->options[0]]; |
866 | tuple_t tuple; | ||
867 | int i; | ||
868 | struct comedi_subdevice *s; | 849 | struct comedi_subdevice *s; |
869 | 850 | ||
870 | if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) { | 851 | if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) { |
@@ -883,29 +864,10 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
883 | 864 | ||
884 | strcpy(local->board_name, "DAQP"); | 865 | strcpy(local->board_name, "DAQP"); |
885 | dev->board_name = local->board_name; | 866 | dev->board_name = local->board_name; |
886 | 867 | if (local->link->prod_id[2]) { | |
887 | tuple.DesiredTuple = CISTPL_VERS_1; | 868 | if (strncmp(local->link->prod_id[2], "DAQP", 4) == 0) { |
888 | if (pcmcia_get_first_tuple(local->link, &tuple) == 0) { | 869 | strncpy(local->board_name, local->link->prod_id[2], |
889 | u_char buf[128]; | 870 | sizeof(local->board_name)); |
890 | |||
891 | buf[0] = buf[sizeof(buf) - 1] = 0; | ||
892 | tuple.TupleData = buf; | ||
893 | tuple.TupleDataMax = sizeof(buf); | ||
894 | tuple.TupleOffset = 2; | ||
895 | if (pcmcia_get_tuple_data(local->link, &tuple) == 0) { | ||
896 | |||
897 | for (i = 0; i < tuple.TupleDataLen - 4; i++) | ||
898 | if (buf[i] == 0) | ||
899 | break; | ||
900 | for (i++; i < tuple.TupleDataLen - 4; i++) | ||
901 | if (buf[i] == 0) | ||
902 | break; | ||
903 | i++; | ||
904 | if ((i < tuple.TupleDataLen - 4) | ||
905 | && (strncmp(buf + i, "DAQP", 4) == 0)) { | ||
906 | strncpy(local->board_name, buf + i, | ||
907 | sizeof(local->board_name)); | ||
908 | } | ||
909 | } | 871 | } |
910 | } | 872 | } |
911 | 873 | ||
@@ -1058,7 +1020,7 @@ static int daqp_cs_attach(struct pcmcia_device *link) | |||
1058 | struct local_info_t *local; | 1020 | struct local_info_t *local; |
1059 | int i; | 1021 | int i; |
1060 | 1022 | ||
1061 | DEBUG(0, "daqp_cs_attach()\n"); | 1023 | dev_dbg(&link->dev, "daqp_cs_attach()\n"); |
1062 | 1024 | ||
1063 | for (i = 0; i < MAX_DEV; i++) | 1025 | for (i = 0; i < MAX_DEV; i++) |
1064 | if (dev_table[i] == NULL) | 1026 | if (dev_table[i] == NULL) |
@@ -1079,10 +1041,8 @@ static int daqp_cs_attach(struct pcmcia_device *link) | |||
1079 | link->priv = local; | 1041 | link->priv = local; |
1080 | 1042 | ||
1081 | /* Interrupt setup */ | 1043 | /* Interrupt setup */ |
1082 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; | 1044 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
1083 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
1084 | link->irq.Handler = daqp_interrupt; | 1045 | link->irq.Handler = daqp_interrupt; |
1085 | link->irq.Instance = local; | ||
1086 | 1046 | ||
1087 | /* | 1047 | /* |
1088 | General socket configuration defaults can go here. In this | 1048 | General socket configuration defaults can go here. In this |
@@ -1112,7 +1072,7 @@ static void daqp_cs_detach(struct pcmcia_device *link) | |||
1112 | { | 1072 | { |
1113 | struct local_info_t *dev = link->priv; | 1073 | struct local_info_t *dev = link->priv; |
1114 | 1074 | ||
1115 | DEBUG(0, "daqp_cs_detach(0x%p)\n", link); | 1075 | dev_dbg(&link->dev, "daqp_cs_detach\n"); |
1116 | 1076 | ||
1117 | if (link->dev_node) { | 1077 | if (link->dev_node) { |
1118 | dev->stop = 1; | 1078 | dev->stop = 1; |
@@ -1134,115 +1094,54 @@ static void daqp_cs_detach(struct pcmcia_device *link) | |||
1134 | 1094 | ||
1135 | ======================================================================*/ | 1095 | ======================================================================*/ |
1136 | 1096 | ||
1137 | static void daqp_cs_config(struct pcmcia_device *link) | ||
1138 | { | ||
1139 | struct local_info_t *dev = link->priv; | ||
1140 | tuple_t tuple; | ||
1141 | cisparse_t parse; | ||
1142 | int last_ret; | ||
1143 | u_char buf[64]; | ||
1144 | |||
1145 | DEBUG(0, "daqp_cs_config(0x%p)\n", link); | ||
1146 | |||
1147 | /* | ||
1148 | This reads the card's CONFIG tuple to find its configuration | ||
1149 | registers. | ||
1150 | */ | ||
1151 | tuple.DesiredTuple = CISTPL_CONFIG; | ||
1152 | tuple.Attributes = 0; | ||
1153 | tuple.TupleData = buf; | ||
1154 | tuple.TupleDataMax = sizeof(buf); | ||
1155 | tuple.TupleOffset = 0; | ||
1156 | |||
1157 | last_ret = pcmcia_get_first_tuple(link, &tuple); | ||
1158 | if (last_ret) { | ||
1159 | cs_error(link, GetFirstTuple, last_ret); | ||
1160 | goto cs_failed; | ||
1161 | } | ||
1162 | 1097 | ||
1163 | last_ret = pcmcia_get_tuple_data(link, &tuple); | 1098 | static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev, |
1164 | if (last_ret) { | 1099 | cistpl_cftable_entry_t *cfg, |
1165 | cs_error(link, GetTupleData, last_ret); | 1100 | cistpl_cftable_entry_t *dflt, |
1166 | goto cs_failed; | 1101 | unsigned int vcc, |
1167 | } | 1102 | void *priv_data) |
1168 | 1103 | { | |
1169 | last_ret = pcmcia_parse_tuple(&tuple, &parse); | 1104 | if (cfg->index == 0) |
1170 | if (last_ret) { | 1105 | return -ENODEV; |
1171 | cs_error(link, ParseTuple, last_ret); | ||
1172 | goto cs_failed; | ||
1173 | } | ||
1174 | link->conf.ConfigBase = parse.config.base; | ||
1175 | link->conf.Present = parse.config.rmask[0]; | ||
1176 | 1106 | ||
1177 | /* | 1107 | /* Do we need to allocate an interrupt? */ |
1178 | In this loop, we scan the CIS for configuration table entries, | 1108 | if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) |
1179 | each of which describes a valid card configuration, including | 1109 | p_dev->conf.Attributes |= CONF_ENABLE_IRQ; |
1180 | voltage, IO window, memory window, and interrupt settings. | 1110 | |
1181 | 1111 | /* IO window settings */ | |
1182 | We make no assumptions about the card to be configured: we use | 1112 | p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; |
1183 | just the information available in the CIS. In an ideal world, | 1113 | if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { |
1184 | this would work for any PCMCIA card, but it requires a complete | 1114 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; |
1185 | and accurate CIS. In practice, a driver usually "knows" most of | 1115 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
1186 | these things without consulting the CIS, and most client drivers | 1116 | if (!(io->flags & CISTPL_IO_8BIT)) |
1187 | will only use the CIS to fill in implementation-defined details. | 1117 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
1188 | */ | 1118 | if (!(io->flags & CISTPL_IO_16BIT)) |
1189 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | 1119 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
1190 | last_ret = pcmcia_get_first_tuple(link, &tuple); | 1120 | p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; |
1191 | if (last_ret) { | 1121 | p_dev->io.BasePort1 = io->win[0].base; |
1192 | cs_error(link, GetFirstTuple, last_ret); | 1122 | p_dev->io.NumPorts1 = io->win[0].len; |
1193 | goto cs_failed; | 1123 | if (io->nwin > 1) { |
1124 | p_dev->io.Attributes2 = p_dev->io.Attributes1; | ||
1125 | p_dev->io.BasePort2 = io->win[1].base; | ||
1126 | p_dev->io.NumPorts2 = io->win[1].len; | ||
1127 | } | ||
1194 | } | 1128 | } |
1195 | 1129 | ||
1196 | while (1) { | 1130 | /* This reserves IO space but doesn't actually enable it */ |
1197 | cistpl_cftable_entry_t dflt = { 0 }; | 1131 | return pcmcia_request_io(p_dev, &p_dev->io); |
1198 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | 1132 | } |
1199 | if (pcmcia_get_tuple_data(link, &tuple)) | ||
1200 | goto next_entry; | ||
1201 | if (pcmcia_parse_tuple(&tuple, &parse)) | ||
1202 | goto next_entry; | ||
1203 | |||
1204 | if (cfg->flags & CISTPL_CFTABLE_DEFAULT) | ||
1205 | dflt = *cfg; | ||
1206 | if (cfg->index == 0) | ||
1207 | goto next_entry; | ||
1208 | link->conf.ConfigIndex = cfg->index; | ||
1209 | |||
1210 | /* Do we need to allocate an interrupt? */ | ||
1211 | if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) | ||
1212 | link->conf.Attributes |= CONF_ENABLE_IRQ; | ||
1213 | |||
1214 | /* IO window settings */ | ||
1215 | link->io.NumPorts1 = link->io.NumPorts2 = 0; | ||
1216 | if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { | ||
1217 | cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; | ||
1218 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | ||
1219 | if (!(io->flags & CISTPL_IO_8BIT)) | ||
1220 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | ||
1221 | if (!(io->flags & CISTPL_IO_16BIT)) | ||
1222 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | ||
1223 | link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; | ||
1224 | link->io.BasePort1 = io->win[0].base; | ||
1225 | link->io.NumPorts1 = io->win[0].len; | ||
1226 | if (io->nwin > 1) { | ||
1227 | link->io.Attributes2 = link->io.Attributes1; | ||
1228 | link->io.BasePort2 = io->win[1].base; | ||
1229 | link->io.NumPorts2 = io->win[1].len; | ||
1230 | } | ||
1231 | } | ||
1232 | 1133 | ||
1233 | /* This reserves IO space but doesn't actually enable it */ | 1134 | static void daqp_cs_config(struct pcmcia_device *link) |
1234 | if (pcmcia_request_io(link, &link->io)) | 1135 | { |
1235 | goto next_entry; | 1136 | struct local_info_t *dev = link->priv; |
1137 | int ret; | ||
1236 | 1138 | ||
1237 | /* If we got this far, we're cool! */ | 1139 | dev_dbg(&link->dev, "daqp_cs_config\n"); |
1238 | break; | ||
1239 | 1140 | ||
1240 | next_entry: | 1141 | ret = pcmcia_loop_config(link, daqp_pcmcia_config_loop, NULL); |
1241 | last_ret = pcmcia_get_next_tuple(link, &tuple); | 1142 | if (ret) { |
1242 | if (last_ret) { | 1143 | dev_warn(&link->dev, "no configuration found\n"); |
1243 | cs_error(link, GetNextTuple, last_ret); | 1144 | goto failed; |
1244 | goto cs_failed; | ||
1245 | } | ||
1246 | } | 1145 | } |
1247 | 1146 | ||
1248 | /* | 1147 | /* |
@@ -1251,11 +1150,9 @@ next_entry: | |||
1251 | irq structure is initialized. | 1150 | irq structure is initialized. |
1252 | */ | 1151 | */ |
1253 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { | 1152 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
1254 | last_ret = pcmcia_request_irq(link, &link->irq); | 1153 | ret = pcmcia_request_irq(link, &link->irq); |
1255 | if (last_ret) { | 1154 | if (ret) |
1256 | cs_error(link, RequestIRQ, last_ret); | 1155 | goto failed; |
1257 | goto cs_failed; | ||
1258 | } | ||
1259 | } | 1156 | } |
1260 | 1157 | ||
1261 | /* | 1158 | /* |
@@ -1263,11 +1160,9 @@ next_entry: | |||
1263 | the I/O windows and the interrupt mapping, and putting the | 1160 | the I/O windows and the interrupt mapping, and putting the |
1264 | card and host interface into "Memory and IO" mode. | 1161 | card and host interface into "Memory and IO" mode. |
1265 | */ | 1162 | */ |
1266 | last_ret = pcmcia_request_configuration(link, &link->conf); | 1163 | ret = pcmcia_request_configuration(link, &link->conf); |
1267 | if (last_ret) { | 1164 | if (ret) |
1268 | cs_error(link, RequestConfiguration, last_ret); | 1165 | goto failed; |
1269 | goto cs_failed; | ||
1270 | } | ||
1271 | 1166 | ||
1272 | /* | 1167 | /* |
1273 | At this point, the dev_node_t structure(s) need to be | 1168 | At this point, the dev_node_t structure(s) need to be |
@@ -1296,14 +1191,14 @@ next_entry: | |||
1296 | 1191 | ||
1297 | return; | 1192 | return; |
1298 | 1193 | ||
1299 | cs_failed: | 1194 | failed: |
1300 | daqp_cs_release(link); | 1195 | daqp_cs_release(link); |
1301 | 1196 | ||
1302 | } /* daqp_cs_config */ | 1197 | } /* daqp_cs_config */ |
1303 | 1198 | ||
1304 | static void daqp_cs_release(struct pcmcia_device *link) | 1199 | static void daqp_cs_release(struct pcmcia_device *link) |
1305 | { | 1200 | { |
1306 | DEBUG(0, "daqp_cs_release(0x%p)\n", link); | 1201 | dev_dbg(&link->dev, "daqp_cs_release\n"); |
1307 | 1202 | ||
1308 | pcmcia_disable_device(link); | 1203 | pcmcia_disable_device(link); |
1309 | } /* daqp_cs_release */ | 1204 | } /* daqp_cs_release */ |
@@ -1363,7 +1258,6 @@ struct pcmcia_driver daqp_cs_driver = { | |||
1363 | 1258 | ||
1364 | int __init init_module(void) | 1259 | int __init init_module(void) |
1365 | { | 1260 | { |
1366 | DEBUG(0, "%s\n", version); | ||
1367 | pcmcia_register_driver(&daqp_cs_driver); | 1261 | pcmcia_register_driver(&daqp_cs_driver); |
1368 | comedi_driver_register(&driver_daqp); | 1262 | comedi_driver_register(&driver_daqp); |
1369 | return 0; | 1263 | return 0; |
@@ -1371,7 +1265,6 @@ int __init init_module(void) | |||
1371 | 1265 | ||
1372 | void __exit cleanup_module(void) | 1266 | void __exit cleanup_module(void) |
1373 | { | 1267 | { |
1374 | DEBUG(0, "daqp_cs: unloading\n"); | ||
1375 | comedi_driver_unregister(&driver_daqp); | 1268 | comedi_driver_unregister(&driver_daqp); |
1376 | pcmcia_unregister_driver(&daqp_cs_driver); | 1269 | pcmcia_unregister_driver(&daqp_cs_driver); |
1377 | } | 1270 | } |
diff --git a/drivers/staging/hv/BlkVsc.c b/drivers/staging/hv/BlkVsc.c index 51aa861292fc..a48ee3a12646 100644 --- a/drivers/staging/hv/BlkVsc.c +++ b/drivers/staging/hv/BlkVsc.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | * | 17 | * |
18 | * Authors: | 18 | * Authors: |
19 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
19 | * Hank Janssen <hjanssen@microsoft.com> | 20 | * Hank Janssen <hjanssen@microsoft.com> |
20 | * | 21 | * |
21 | */ | 22 | */ |
diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c index d649ee169d95..746370e82115 100644 --- a/drivers/staging/hv/Channel.c +++ b/drivers/staging/hv/Channel.c | |||
@@ -611,7 +611,7 @@ void VmbusChannelClose(struct vmbus_channel *Channel) | |||
611 | 611 | ||
612 | /* Stop callback and cancel the timer asap */ | 612 | /* Stop callback and cancel the timer asap */ |
613 | Channel->OnChannelCallback = NULL; | 613 | Channel->OnChannelCallback = NULL; |
614 | del_timer(&Channel->poll_timer); | 614 | del_timer_sync(&Channel->poll_timer); |
615 | 615 | ||
616 | /* Send a closing message */ | 616 | /* Send a closing message */ |
617 | info = kmalloc(sizeof(*info) + | 617 | info = kmalloc(sizeof(*info) + |
@@ -978,14 +978,10 @@ void VmbusChannelOnChannelEvent(struct vmbus_channel *Channel) | |||
978 | { | 978 | { |
979 | DumpVmbusChannel(Channel); | 979 | DumpVmbusChannel(Channel); |
980 | ASSERT(Channel->OnChannelCallback); | 980 | ASSERT(Channel->OnChannelCallback); |
981 | #ifdef ENABLE_POLLING | 981 | |
982 | del_timer(&Channel->poll_timer); | ||
983 | Channel->OnChannelCallback(Channel->ChannelCallbackContext); | ||
984 | channel->poll_timer.expires(jiffies + usecs_to_jiffies(100); | ||
985 | add_timer(&channel->poll_timer); | ||
986 | #else | ||
987 | Channel->OnChannelCallback(Channel->ChannelCallbackContext); | 982 | Channel->OnChannelCallback(Channel->ChannelCallbackContext); |
988 | #endif | 983 | |
984 | mod_timer(&Channel->poll_timer, jiffies + usecs_to_jiffies(100)); | ||
989 | } | 985 | } |
990 | 986 | ||
991 | /** | 987 | /** |
@@ -997,10 +993,6 @@ void VmbusChannelOnTimer(unsigned long data) | |||
997 | 993 | ||
998 | if (channel->OnChannelCallback) { | 994 | if (channel->OnChannelCallback) { |
999 | channel->OnChannelCallback(channel->ChannelCallbackContext); | 995 | channel->OnChannelCallback(channel->ChannelCallbackContext); |
1000 | #ifdef ENABLE_POLLING | ||
1001 | channel->poll_timer.expires(jiffies + usecs_to_jiffies(100); | ||
1002 | add_timer(&channel->poll_timer); | ||
1003 | #endif | ||
1004 | } | 996 | } |
1005 | } | 997 | } |
1006 | 998 | ||
diff --git a/drivers/staging/hv/ChannelMgmt.c b/drivers/staging/hv/ChannelMgmt.c index 3db62caedcff..ef38467ed4e2 100644 --- a/drivers/staging/hv/ChannelMgmt.c +++ b/drivers/staging/hv/ChannelMgmt.c | |||
@@ -119,7 +119,7 @@ static inline void ReleaseVmbusChannel(void *context) | |||
119 | */ | 119 | */ |
120 | void FreeVmbusChannel(struct vmbus_channel *Channel) | 120 | void FreeVmbusChannel(struct vmbus_channel *Channel) |
121 | { | 121 | { |
122 | del_timer(&Channel->poll_timer); | 122 | del_timer_sync(&Channel->poll_timer); |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * We have to release the channel's workqueue/thread in the vmbus's | 125 | * We have to release the channel's workqueue/thread in the vmbus's |
diff --git a/drivers/staging/hv/NetVsc.c b/drivers/staging/hv/NetVsc.c index d384c0ddf069..1c717f9a554e 100644 --- a/drivers/staging/hv/NetVsc.c +++ b/drivers/staging/hv/NetVsc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * | 16 | * |
17 | * Authors: | 17 | * Authors: |
18 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
18 | * Hank Janssen <hjanssen@microsoft.com> | 19 | * Hank Janssen <hjanssen@microsoft.com> |
19 | */ | 20 | */ |
20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
diff --git a/drivers/staging/hv/NetVsc.h b/drivers/staging/hv/NetVsc.h index 3e7112f7c755..6e0e03494126 100644 --- a/drivers/staging/hv/NetVsc.h +++ b/drivers/staging/hv/NetVsc.h | |||
@@ -16,6 +16,7 @@ | |||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
17 | * | 17 | * |
18 | * Authors: | 18 | * Authors: |
19 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
19 | * Hank Janssen <hjanssen@microsoft.com> | 20 | * Hank Janssen <hjanssen@microsoft.com> |
20 | * | 21 | * |
21 | */ | 22 | */ |
diff --git a/drivers/staging/hv/StorVsc.c b/drivers/staging/hv/StorVsc.c index 14015c927940..2f7c425896f7 100644 --- a/drivers/staging/hv/StorVsc.c +++ b/drivers/staging/hv/StorVsc.c | |||
@@ -196,7 +196,7 @@ static int StorVscChannelInit(struct hv_device *Device) | |||
196 | * Now, initiate the vsc/vsp initialization protocol on the open | 196 | * Now, initiate the vsc/vsp initialization protocol on the open |
197 | * channel | 197 | * channel |
198 | */ | 198 | */ |
199 | memset(request, sizeof(struct storvsc_request_extension), 0); | 199 | memset(request, 0, sizeof(struct storvsc_request_extension)); |
200 | request->WaitEvent = osd_WaitEventCreate(); | 200 | request->WaitEvent = osd_WaitEventCreate(); |
201 | 201 | ||
202 | vstorPacket->Operation = VStorOperationBeginInitialization; | 202 | vstorPacket->Operation = VStorOperationBeginInitialization; |
@@ -233,7 +233,7 @@ static int StorVscChannelInit(struct hv_device *Device) | |||
233 | DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION..."); | 233 | DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION..."); |
234 | 234 | ||
235 | /* reuse the packet for version range supported */ | 235 | /* reuse the packet for version range supported */ |
236 | memset(vstorPacket, sizeof(struct vstor_packet), 0); | 236 | memset(vstorPacket, 0, sizeof(struct vstor_packet)); |
237 | vstorPacket->Operation = VStorOperationQueryProtocolVersion; | 237 | vstorPacket->Operation = VStorOperationQueryProtocolVersion; |
238 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; | 238 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; |
239 | 239 | ||
@@ -266,7 +266,7 @@ static int StorVscChannelInit(struct hv_device *Device) | |||
266 | /* Query channel properties */ | 266 | /* Query channel properties */ |
267 | DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION..."); | 267 | DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION..."); |
268 | 268 | ||
269 | memset(vstorPacket, sizeof(struct vstor_packet), 0); | 269 | memset(vstorPacket, 0, sizeof(struct vstor_packet)); |
270 | vstorPacket->Operation = VStorOperationQueryProperties; | 270 | vstorPacket->Operation = VStorOperationQueryProperties; |
271 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; | 271 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; |
272 | vstorPacket->StorageChannelProperties.PortNumber = | 272 | vstorPacket->StorageChannelProperties.PortNumber = |
@@ -305,7 +305,7 @@ static int StorVscChannelInit(struct hv_device *Device) | |||
305 | 305 | ||
306 | DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION..."); | 306 | DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION..."); |
307 | 307 | ||
308 | memset(vstorPacket, sizeof(struct vstor_packet), 0); | 308 | memset(vstorPacket, 0, sizeof(struct vstor_packet)); |
309 | vstorPacket->Operation = VStorOperationEndInitialization; | 309 | vstorPacket->Operation = VStorOperationEndInitialization; |
310 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; | 310 | vstorPacket->Flags = REQUEST_COMPLETION_FLAG; |
311 | 311 | ||
@@ -508,7 +508,7 @@ static int StorVscConnectToVsp(struct hv_device *Device) | |||
508 | int ret; | 508 | int ret; |
509 | 509 | ||
510 | storDriver = (struct storvsc_driver_object *)Device->Driver; | 510 | storDriver = (struct storvsc_driver_object *)Device->Driver; |
511 | memset(&props, sizeof(struct vmstorage_channel_properties), 0); | 511 | memset(&props, 0, sizeof(struct vmstorage_channel_properties)); |
512 | 512 | ||
513 | /* Open the channel */ | 513 | /* Open the channel */ |
514 | ret = Device->Driver->VmbusChannelInterface.Open(Device, | 514 | ret = Device->Driver->VmbusChannelInterface.Open(Device, |
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 99c49261a8b4..62b282844a53 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * | 16 | * |
17 | * Authors: | 17 | * Authors: |
18 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
18 | * Hank Janssen <hjanssen@microsoft.com> | 19 | * Hank Janssen <hjanssen@microsoft.com> |
19 | */ | 20 | */ |
20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 3192d50f7251..0d7459e2d036 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * | 16 | * |
17 | * Authors: | 17 | * Authors: |
18 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
18 | * Hank Janssen <hjanssen@microsoft.com> | 19 | * Hank Janssen <hjanssen@microsoft.com> |
19 | */ | 20 | */ |
20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
diff --git a/drivers/staging/rtl8187se/TODO b/drivers/staging/rtl8187se/TODO index c09a9160739d..a762e79873e9 100644 --- a/drivers/staging/rtl8187se/TODO +++ b/drivers/staging/rtl8187se/TODO | |||
@@ -11,5 +11,4 @@ TODO: | |||
11 | - sparse fixes | 11 | - sparse fixes |
12 | - integrate with drivers/net/wireless/rtl818x | 12 | - integrate with drivers/net/wireless/rtl818x |
13 | 13 | ||
14 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and | 14 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>. |
15 | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>. | ||
diff --git a/drivers/staging/rtl8192su/TODO b/drivers/staging/rtl8192su/TODO index b13be9edb278..f11eec700030 100644 --- a/drivers/staging/rtl8192su/TODO +++ b/drivers/staging/rtl8192su/TODO | |||
@@ -14,5 +14,4 @@ TODO: | |||
14 | - sparse fixes | 14 | - sparse fixes |
15 | - integrate with drivers/net/wireless/rtl818x | 15 | - integrate with drivers/net/wireless/rtl818x |
16 | 16 | ||
17 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and | 17 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>. |
18 | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>. | ||
diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO index 8462cd17eb61..cb04aaafc46f 100644 --- a/drivers/staging/vt6655/TODO +++ b/drivers/staging/vt6655/TODO | |||
@@ -16,6 +16,5 @@ TODO: | |||
16 | - sparse fixes | 16 | - sparse fixes |
17 | - integrate with drivers/net/wireless | 17 | - integrate with drivers/net/wireless |
18 | 18 | ||
19 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, | 19 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> |
20 | Forest Bond <forest@alittletooquiet.net> and Bartlomiej Zolnierkiewicz | 20 | and Forest Bond <forest@alittletooquiet.net>. |
21 | <bzolnier@gmail.com>. | ||
diff --git a/drivers/staging/vt6656/TODO b/drivers/staging/vt6656/TODO index 17cf50c6735e..a318995ba07f 100644 --- a/drivers/staging/vt6656/TODO +++ b/drivers/staging/vt6656/TODO | |||
@@ -15,6 +15,5 @@ TODO: | |||
15 | - sparse fixes | 15 | - sparse fixes |
16 | - integrate with drivers/net/wireless | 16 | - integrate with drivers/net/wireless |
17 | 17 | ||
18 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, | 18 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> |
19 | Forest Bond <forest@alittletooquiet.net> and Bartlomiej Zolnierkiewicz | 19 | and Forest Bond <forest@alittletooquiet.net>. |
20 | <bzolnier@gmail.com>. | ||
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c index 347c3ed1d9f1..d442fd35620a 100644 --- a/drivers/telephony/ixj_pcmcia.c +++ b/drivers/telephony/ixj_pcmcia.c | |||
@@ -19,13 +19,6 @@ | |||
19 | * PCMCIA service support for Quicknet cards | 19 | * PCMCIA service support for Quicknet cards |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifdef PCMCIA_DEBUG | ||
23 | static int pc_debug = PCMCIA_DEBUG; | ||
24 | module_param(pc_debug, int, 0644); | ||
25 | #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) | ||
26 | #else | ||
27 | #define DEBUG(n, args...) | ||
28 | #endif | ||
29 | 22 | ||
30 | typedef struct ixj_info_t { | 23 | typedef struct ixj_info_t { |
31 | int ndev; | 24 | int ndev; |
@@ -39,7 +32,7 @@ static void ixj_cs_release(struct pcmcia_device * link); | |||
39 | 32 | ||
40 | static int ixj_probe(struct pcmcia_device *p_dev) | 33 | static int ixj_probe(struct pcmcia_device *p_dev) |
41 | { | 34 | { |
42 | DEBUG(0, "ixj_attach()\n"); | 35 | dev_dbg(&p_dev->dev, "ixj_attach()\n"); |
43 | /* Create new ixj device */ | 36 | /* Create new ixj device */ |
44 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 37 | p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
45 | p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 38 | p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
@@ -55,33 +48,30 @@ static int ixj_probe(struct pcmcia_device *p_dev) | |||
55 | 48 | ||
56 | static void ixj_detach(struct pcmcia_device *link) | 49 | static void ixj_detach(struct pcmcia_device *link) |
57 | { | 50 | { |
58 | DEBUG(0, "ixj_detach(0x%p)\n", link); | 51 | dev_dbg(&link->dev, "ixj_detach\n"); |
59 | 52 | ||
60 | ixj_cs_release(link); | 53 | ixj_cs_release(link); |
61 | 54 | ||
62 | kfree(link->priv); | 55 | kfree(link->priv); |
63 | } | 56 | } |
64 | 57 | ||
65 | #define CS_CHECK(fn, ret) \ | ||
66 | do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) | ||
67 | |||
68 | static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) | 58 | static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) |
69 | { | 59 | { |
70 | char *str; | 60 | char *str; |
71 | int i, place; | 61 | int i, place; |
72 | DEBUG(0, "ixj_get_serial(0x%p)\n", link); | 62 | dev_dbg(&link->dev, "ixj_get_serial\n"); |
73 | 63 | ||
74 | str = link->prod_id[0]; | 64 | str = link->prod_id[0]; |
75 | if (!str) | 65 | if (!str) |
76 | goto cs_failed; | 66 | goto failed; |
77 | printk("%s", str); | 67 | printk("%s", str); |
78 | str = link->prod_id[1]; | 68 | str = link->prod_id[1]; |
79 | if (!str) | 69 | if (!str) |
80 | goto cs_failed; | 70 | goto failed; |
81 | printk(" %s", str); | 71 | printk(" %s", str); |
82 | str = link->prod_id[2]; | 72 | str = link->prod_id[2]; |
83 | if (!str) | 73 | if (!str) |
84 | goto cs_failed; | 74 | goto failed; |
85 | place = 1; | 75 | place = 1; |
86 | for (i = strlen(str) - 1; i >= 0; i--) { | 76 | for (i = strlen(str) - 1; i >= 0; i--) { |
87 | switch (str[i]) { | 77 | switch (str[i]) { |
@@ -118,9 +108,9 @@ static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) | |||
118 | } | 108 | } |
119 | str = link->prod_id[3]; | 109 | str = link->prod_id[3]; |
120 | if (!str) | 110 | if (!str) |
121 | goto cs_failed; | 111 | goto failed; |
122 | printk(" version %s\n", str); | 112 | printk(" version %s\n", str); |
123 | cs_failed: | 113 | failed: |
124 | return; | 114 | return; |
125 | } | 115 | } |
126 | 116 | ||
@@ -151,13 +141,13 @@ static int ixj_config(struct pcmcia_device * link) | |||
151 | cistpl_cftable_entry_t dflt = { 0 }; | 141 | cistpl_cftable_entry_t dflt = { 0 }; |
152 | 142 | ||
153 | info = link->priv; | 143 | info = link->priv; |
154 | DEBUG(0, "ixj_config(0x%p)\n", link); | 144 | dev_dbg(&link->dev, "ixj_config\n"); |
155 | 145 | ||
156 | if (pcmcia_loop_config(link, ixj_config_check, &dflt)) | 146 | if (pcmcia_loop_config(link, ixj_config_check, &dflt)) |
157 | goto cs_failed; | 147 | goto failed; |
158 | 148 | ||
159 | if (pcmcia_request_configuration(link, &link->conf)) | 149 | if (pcmcia_request_configuration(link, &link->conf)) |
160 | goto cs_failed; | 150 | goto failed; |
161 | 151 | ||
162 | /* | 152 | /* |
163 | * Register the card with the core. | 153 | * Register the card with the core. |
@@ -170,7 +160,7 @@ static int ixj_config(struct pcmcia_device * link) | |||
170 | ixj_get_serial(link, j); | 160 | ixj_get_serial(link, j); |
171 | return 0; | 161 | return 0; |
172 | 162 | ||
173 | cs_failed: | 163 | failed: |
174 | ixj_cs_release(link); | 164 | ixj_cs_release(link); |
175 | return -ENODEV; | 165 | return -ENODEV; |
176 | } | 166 | } |
@@ -178,7 +168,7 @@ static int ixj_config(struct pcmcia_device * link) | |||
178 | static void ixj_cs_release(struct pcmcia_device *link) | 168 | static void ixj_cs_release(struct pcmcia_device *link) |
179 | { | 169 | { |
180 | ixj_info_t *info = link->priv; | 170 | ixj_info_t *info = link->priv; |
181 | DEBUG(0, "ixj_cs_release(0x%p)\n", link); | 171 | dev_dbg(&link->dev, "ixj_cs_release\n"); |
182 | info->ndev = 0; | 172 | info->ndev = 0; |
183 | pcmcia_disable_device(link); | 173 | pcmcia_disable_device(link); |
184 | } | 174 | } |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5ce839137ad6..0f857e645058 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -444,7 +444,7 @@ resubmit: | |||
444 | static inline int | 444 | static inline int |
445 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) | 445 | hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) |
446 | { | 446 | { |
447 | return usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), | 447 | return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), |
448 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, | 448 | HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, |
449 | tt, NULL, 0, 1000); | 449 | tt, NULL, 0, 1000); |
450 | } | 450 | } |
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index d5b65962dd36..731150d4b1d9 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c | |||
@@ -1213,7 +1213,12 @@ udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) | |||
1213 | tmp &= AMD_UNMASK_BIT(ep->num); | 1213 | tmp &= AMD_UNMASK_BIT(ep->num); |
1214 | writel(tmp, &dev->regs->ep_irqmsk); | 1214 | writel(tmp, &dev->regs->ep_irqmsk); |
1215 | } | 1215 | } |
1216 | } | 1216 | } else if (ep->in) { |
1217 | /* enable ep irq */ | ||
1218 | tmp = readl(&dev->regs->ep_irqmsk); | ||
1219 | tmp &= AMD_UNMASK_BIT(ep->num); | ||
1220 | writel(tmp, &dev->regs->ep_irqmsk); | ||
1221 | } | ||
1217 | 1222 | ||
1218 | } else if (ep->dma) { | 1223 | } else if (ep->dma) { |
1219 | 1224 | ||
@@ -2005,18 +2010,17 @@ __acquires(dev->lock) | |||
2005 | { | 2010 | { |
2006 | int tmp; | 2011 | int tmp; |
2007 | 2012 | ||
2008 | /* empty queues and init hardware */ | ||
2009 | udc_basic_init(dev); | ||
2010 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | ||
2011 | empty_req_queue(&dev->ep[tmp]); | ||
2012 | } | ||
2013 | |||
2014 | if (dev->gadget.speed != USB_SPEED_UNKNOWN) { | 2013 | if (dev->gadget.speed != USB_SPEED_UNKNOWN) { |
2015 | spin_unlock(&dev->lock); | 2014 | spin_unlock(&dev->lock); |
2016 | driver->disconnect(&dev->gadget); | 2015 | driver->disconnect(&dev->gadget); |
2017 | spin_lock(&dev->lock); | 2016 | spin_lock(&dev->lock); |
2018 | } | 2017 | } |
2019 | /* init */ | 2018 | |
2019 | /* empty queues and init hardware */ | ||
2020 | udc_basic_init(dev); | ||
2021 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) | ||
2022 | empty_req_queue(&dev->ep[tmp]); | ||
2023 | |||
2020 | udc_setup_endpoints(dev); | 2024 | udc_setup_endpoints(dev); |
2021 | } | 2025 | } |
2022 | 2026 | ||
@@ -2472,6 +2476,13 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) | |||
2472 | } | 2476 | } |
2473 | } | 2477 | } |
2474 | 2478 | ||
2479 | } else if (!use_dma && ep->in) { | ||
2480 | /* disable interrupt */ | ||
2481 | tmp = readl( | ||
2482 | &dev->regs->ep_irqmsk); | ||
2483 | tmp |= AMD_BIT(ep->num); | ||
2484 | writel(tmp, | ||
2485 | &dev->regs->ep_irqmsk); | ||
2475 | } | 2486 | } |
2476 | } | 2487 | } |
2477 | /* clear status bits */ | 2488 | /* clear status bits */ |
@@ -3279,6 +3290,17 @@ static int udc_pci_probe( | |||
3279 | goto finished; | 3290 | goto finished; |
3280 | } | 3291 | } |
3281 | 3292 | ||
3293 | spin_lock_init(&dev->lock); | ||
3294 | /* udc csr registers base */ | ||
3295 | dev->csr = dev->virt_addr + UDC_CSR_ADDR; | ||
3296 | /* dev registers base */ | ||
3297 | dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; | ||
3298 | /* ep registers base */ | ||
3299 | dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; | ||
3300 | /* fifo's base */ | ||
3301 | dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); | ||
3302 | dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); | ||
3303 | |||
3282 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { | 3304 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { |
3283 | dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); | 3305 | dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); |
3284 | kfree(dev); | 3306 | kfree(dev); |
@@ -3331,7 +3353,6 @@ static int udc_probe(struct udc *dev) | |||
3331 | udc_pollstall_timer.data = 0; | 3353 | udc_pollstall_timer.data = 0; |
3332 | 3354 | ||
3333 | /* device struct setup */ | 3355 | /* device struct setup */ |
3334 | spin_lock_init(&dev->lock); | ||
3335 | dev->gadget.ops = &udc_ops; | 3356 | dev->gadget.ops = &udc_ops; |
3336 | 3357 | ||
3337 | dev_set_name(&dev->gadget.dev, "gadget"); | 3358 | dev_set_name(&dev->gadget.dev, "gadget"); |
@@ -3340,16 +3361,6 @@ static int udc_probe(struct udc *dev) | |||
3340 | dev->gadget.name = name; | 3361 | dev->gadget.name = name; |
3341 | dev->gadget.is_dualspeed = 1; | 3362 | dev->gadget.is_dualspeed = 1; |
3342 | 3363 | ||
3343 | /* udc csr registers base */ | ||
3344 | dev->csr = dev->virt_addr + UDC_CSR_ADDR; | ||
3345 | /* dev registers base */ | ||
3346 | dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; | ||
3347 | /* ep registers base */ | ||
3348 | dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; | ||
3349 | /* fifo's base */ | ||
3350 | dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); | ||
3351 | dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); | ||
3352 | |||
3353 | /* init registers, interrupts, ... */ | 3364 | /* init registers, interrupts, ... */ |
3354 | startup_registers(dev); | 3365 | startup_registers(dev); |
3355 | 3366 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 9835e0713943..f5f5601701c9 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/timer.h> | 30 | #include <linux/timer.h> |
31 | #include <linux/ktime.h> | ||
31 | #include <linux/list.h> | 32 | #include <linux/list.h> |
32 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
33 | #include <linux/usb.h> | 34 | #include <linux/usb.h> |
@@ -676,6 +677,7 @@ static int ehci_run (struct usb_hcd *hcd) | |||
676 | ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ | 677 | ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ |
677 | msleep(5); | 678 | msleep(5); |
678 | up_write(&ehci_cf_port_reset_rwsem); | 679 | up_write(&ehci_cf_port_reset_rwsem); |
680 | ehci->last_periodic_enable = ktime_get_real(); | ||
679 | 681 | ||
680 | temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 682 | temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); |
681 | ehci_info (ehci, | 683 | ehci_info (ehci, |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 378861b9d79a..ead5f4f2aa5a 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -111,6 +111,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
111 | switch (pdev->vendor) { | 111 | switch (pdev->vendor) { |
112 | case PCI_VENDOR_ID_INTEL: | 112 | case PCI_VENDOR_ID_INTEL: |
113 | ehci->need_io_watchdog = 0; | 113 | ehci->need_io_watchdog = 0; |
114 | if (pdev->device == 0x27cc) { | ||
115 | ehci->broken_periodic = 1; | ||
116 | ehci_info(ehci, "using broken periodic workaround\n"); | ||
117 | } | ||
114 | break; | 118 | break; |
115 | case PCI_VENDOR_ID_TDI: | 119 | case PCI_VENDOR_ID_TDI: |
116 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 120 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 00ad9ce392ed..139a2cc3f641 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -487,8 +487,20 @@ halt: | |||
487 | * we must clear the TT buffer (11.17.5). | 487 | * we must clear the TT buffer (11.17.5). |
488 | */ | 488 | */ |
489 | if (unlikely(last_status != -EINPROGRESS && | 489 | if (unlikely(last_status != -EINPROGRESS && |
490 | last_status != -EREMOTEIO)) | 490 | last_status != -EREMOTEIO)) { |
491 | ehci_clear_tt_buffer(ehci, qh, urb, token); | 491 | /* The TT's in some hubs malfunction when they |
492 | * receive this request following a STALL (they | ||
493 | * stop sending isochronous packets). Since a | ||
494 | * STALL can't leave the TT buffer in a busy | ||
495 | * state (if you believe Figures 11-48 - 11-51 | ||
496 | * in the USB 2.0 spec), we won't clear the TT | ||
497 | * buffer in this case. Strictly speaking this | ||
498 | * is a violation of the spec. | ||
499 | */ | ||
500 | if (last_status != -EPIPE) | ||
501 | ehci_clear_tt_buffer(ehci, qh, urb, | ||
502 | token); | ||
503 | } | ||
492 | } | 504 | } |
493 | 505 | ||
494 | /* if we're removing something not at the queue head, | 506 | /* if we're removing something not at the queue head, |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index b25cdea93a1f..a5535b5e3fe2 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -475,6 +475,8 @@ static int enable_periodic (struct ehci_hcd *ehci) | |||
475 | /* make sure ehci_work scans these */ | 475 | /* make sure ehci_work scans these */ |
476 | ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index) | 476 | ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index) |
477 | % (ehci->periodic_size << 3); | 477 | % (ehci->periodic_size << 3); |
478 | if (unlikely(ehci->broken_periodic)) | ||
479 | ehci->last_periodic_enable = ktime_get_real(); | ||
478 | return 0; | 480 | return 0; |
479 | } | 481 | } |
480 | 482 | ||
@@ -486,6 +488,16 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
486 | if (--ehci->periodic_sched) | 488 | if (--ehci->periodic_sched) |
487 | return 0; | 489 | return 0; |
488 | 490 | ||
491 | if (unlikely(ehci->broken_periodic)) { | ||
492 | /* delay experimentally determined */ | ||
493 | ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000); | ||
494 | ktime_t now = ktime_get_real(); | ||
495 | s64 delay = ktime_us_delta(safe, now); | ||
496 | |||
497 | if (unlikely(delay > 0)) | ||
498 | udelay(delay); | ||
499 | } | ||
500 | |||
489 | /* did setting PSE not take effect yet? | 501 | /* did setting PSE not take effect yet? |
490 | * takes effect only at frame boundaries... | 502 | * takes effect only at frame boundaries... |
491 | */ | 503 | */ |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 064e76821ff5..2d85e21ff282 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -118,6 +118,7 @@ struct ehci_hcd { /* one per controller */ | |||
118 | unsigned stamp; | 118 | unsigned stamp; |
119 | unsigned random_frame; | 119 | unsigned random_frame; |
120 | unsigned long next_statechange; | 120 | unsigned long next_statechange; |
121 | ktime_t last_periodic_enable; | ||
121 | u32 command; | 122 | u32 command; |
122 | 123 | ||
123 | /* SILICON QUIRKS */ | 124 | /* SILICON QUIRKS */ |
@@ -127,6 +128,7 @@ struct ehci_hcd { /* one per controller */ | |||
127 | unsigned big_endian_desc:1; | 128 | unsigned big_endian_desc:1; |
128 | unsigned has_amcc_usb23:1; | 129 | unsigned has_amcc_usb23:1; |
129 | unsigned need_io_watchdog:1; | 130 | unsigned need_io_watchdog:1; |
131 | unsigned broken_periodic:1; | ||
130 | 132 | ||
131 | /* required for usb32 quirk */ | 133 | /* required for usb32 quirk */ |
132 | #define OHCI_CTRL_HCFS (3 << 6) | 134 | #define OHCI_CTRL_HCFS (3 << 6) |
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c index 516848dd9b48..39d253e841f6 100644 --- a/drivers/usb/host/sl811_cs.c +++ b/drivers/usb/host/sl811_cs.c | |||
@@ -37,28 +37,8 @@ MODULE_LICENSE("GPL"); | |||
37 | /* MACROS */ | 37 | /* MACROS */ |
38 | /*====================================================================*/ | 38 | /*====================================================================*/ |
39 | 39 | ||
40 | #if defined(DEBUG) || defined(PCMCIA_DEBUG) | ||
41 | |||
42 | static int pc_debug = 0; | ||
43 | module_param(pc_debug, int, 0644); | ||
44 | |||
45 | #define DBG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG "sl811_cs: " args) | ||
46 | |||
47 | #else | ||
48 | #define DBG(n, args...) do{}while(0) | ||
49 | #endif /* no debugging */ | ||
50 | |||
51 | #define INFO(args...) printk(KERN_INFO "sl811_cs: " args) | 40 | #define INFO(args...) printk(KERN_INFO "sl811_cs: " args) |
52 | 41 | ||
53 | #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) | ||
54 | |||
55 | #define CS_CHECK(fn, ret) \ | ||
56 | do { \ | ||
57 | last_fn = (fn); \ | ||
58 | if ((last_ret = (ret)) != 0) \ | ||
59 | goto cs_failed; \ | ||
60 | } while (0) | ||
61 | |||
62 | /*====================================================================*/ | 42 | /*====================================================================*/ |
63 | /* VARIABLES */ | 43 | /* VARIABLES */ |
64 | /*====================================================================*/ | 44 | /*====================================================================*/ |
@@ -76,7 +56,7 @@ static void sl811_cs_release(struct pcmcia_device * link); | |||
76 | 56 | ||
77 | static void release_platform_dev(struct device * dev) | 57 | static void release_platform_dev(struct device * dev) |
78 | { | 58 | { |
79 | DBG(0, "sl811_cs platform_dev release\n"); | 59 | dev_dbg(dev, "sl811_cs platform_dev release\n"); |
80 | dev->parent = NULL; | 60 | dev->parent = NULL; |
81 | } | 61 | } |
82 | 62 | ||
@@ -140,7 +120,7 @@ static int sl811_hc_init(struct device *parent, resource_size_t base_addr, | |||
140 | 120 | ||
141 | static void sl811_cs_detach(struct pcmcia_device *link) | 121 | static void sl811_cs_detach(struct pcmcia_device *link) |
142 | { | 122 | { |
143 | DBG(0, "sl811_cs_detach(0x%p)\n", link); | 123 | dev_dbg(&link->dev, "sl811_cs_detach\n"); |
144 | 124 | ||
145 | sl811_cs_release(link); | 125 | sl811_cs_release(link); |
146 | 126 | ||
@@ -150,7 +130,7 @@ static void sl811_cs_detach(struct pcmcia_device *link) | |||
150 | 130 | ||
151 | static void sl811_cs_release(struct pcmcia_device * link) | 131 | static void sl811_cs_release(struct pcmcia_device * link) |
152 | { | 132 | { |
153 | DBG(0, "sl811_cs_release(0x%p)\n", link); | 133 | dev_dbg(&link->dev, "sl811_cs_release\n"); |
154 | 134 | ||
155 | pcmcia_disable_device(link); | 135 | pcmcia_disable_device(link); |
156 | platform_device_unregister(&platform_dev); | 136 | platform_device_unregister(&platform_dev); |
@@ -205,11 +185,11 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev, | |||
205 | 185 | ||
206 | static int sl811_cs_config(struct pcmcia_device *link) | 186 | static int sl811_cs_config(struct pcmcia_device *link) |
207 | { | 187 | { |
208 | struct device *parent = &handle_to_dev(link); | 188 | struct device *parent = &link->dev; |
209 | local_info_t *dev = link->priv; | 189 | local_info_t *dev = link->priv; |
210 | int last_fn, last_ret; | 190 | int ret; |
211 | 191 | ||
212 | DBG(0, "sl811_cs_config(0x%p)\n", link); | 192 | dev_dbg(&link->dev, "sl811_cs_config\n"); |
213 | 193 | ||
214 | if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) | 194 | if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) |
215 | goto failed; | 195 | goto failed; |
@@ -217,14 +197,16 @@ static int sl811_cs_config(struct pcmcia_device *link) | |||
217 | /* require an IRQ and two registers */ | 197 | /* require an IRQ and two registers */ |
218 | if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) | 198 | if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) |
219 | goto failed; | 199 | goto failed; |
220 | if (link->conf.Attributes & CONF_ENABLE_IRQ) | 200 | if (link->conf.Attributes & CONF_ENABLE_IRQ) { |
221 | CS_CHECK(RequestIRQ, | 201 | ret = pcmcia_request_irq(link, &link->irq); |
222 | pcmcia_request_irq(link, &link->irq)); | 202 | if (ret) |
223 | else | 203 | goto failed; |
204 | } else | ||
224 | goto failed; | 205 | goto failed; |
225 | 206 | ||
226 | CS_CHECK(RequestConfiguration, | 207 | ret = pcmcia_request_configuration(link, &link->conf); |
227 | pcmcia_request_configuration(link, &link->conf)); | 208 | if (ret) |
209 | goto failed; | ||
228 | 210 | ||
229 | sprintf(dev->node.dev_name, driver_name); | 211 | sprintf(dev->node.dev_name, driver_name); |
230 | dev->node.major = dev->node.minor = 0; | 212 | dev->node.major = dev->node.minor = 0; |
@@ -241,8 +223,6 @@ static int sl811_cs_config(struct pcmcia_device *link) | |||
241 | 223 | ||
242 | if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) | 224 | if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) |
243 | < 0) { | 225 | < 0) { |
244 | cs_failed: | ||
245 | cs_error(link, last_fn, last_ret); | ||
246 | failed: | 226 | failed: |
247 | printk(KERN_WARNING "sl811_cs_config failed\n"); | 227 | printk(KERN_WARNING "sl811_cs_config failed\n"); |
248 | sl811_cs_release(link); | 228 | sl811_cs_release(link); |
@@ -263,7 +243,6 @@ static int sl811_cs_probe(struct pcmcia_device *link) | |||
263 | 243 | ||
264 | /* Initialize */ | 244 | /* Initialize */ |
265 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 245 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
266 | link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID; | ||
267 | link->irq.Handler = NULL; | 246 | link->irq.Handler = NULL; |
268 | 247 | ||
269 | link->conf.Attributes = 0; | 248 | link->conf.Attributes = 0; |
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index c3577bbbae6c..ef2332a9941d 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c | |||
@@ -1442,11 +1442,6 @@ static int cppi_channel_abort(struct dma_channel *channel) | |||
1442 | musb_writew(regs, MUSB_TXCSR, value); | 1442 | musb_writew(regs, MUSB_TXCSR, value); |
1443 | musb_writew(regs, MUSB_TXCSR, value); | 1443 | musb_writew(regs, MUSB_TXCSR, value); |
1444 | 1444 | ||
1445 | /* re-enable interrupt */ | ||
1446 | if (enabled) | ||
1447 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
1448 | (1 << cppi_ch->index)); | ||
1449 | |||
1450 | /* While we scrub the TX state RAM, ensure that we clean | 1445 | /* While we scrub the TX state RAM, ensure that we clean |
1451 | * up any interrupt that's currently asserted: | 1446 | * up any interrupt that's currently asserted: |
1452 | * 1. Write to completion Ptr value 0x1(bit 0 set) | 1447 | * 1. Write to completion Ptr value 0x1(bit 0 set) |
@@ -1459,6 +1454,11 @@ static int cppi_channel_abort(struct dma_channel *channel) | |||
1459 | cppi_reset_tx(tx_ram, 1); | 1454 | cppi_reset_tx(tx_ram, 1); |
1460 | musb_writel(&tx_ram->tx_complete, 0, 0); | 1455 | musb_writel(&tx_ram->tx_complete, 0, 0); |
1461 | 1456 | ||
1457 | /* re-enable interrupt */ | ||
1458 | if (enabled) | ||
1459 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, | ||
1460 | (1 << cppi_ch->index)); | ||
1461 | |||
1462 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); | 1462 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); |
1463 | 1463 | ||
1464 | /* REVISIT tx side _should_ clean up the same way | 1464 | /* REVISIT tx side _should_ clean up the same way |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 3a61ddb62bd2..547e0e390726 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1450,7 +1450,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1450 | #endif | 1450 | #endif |
1451 | 1451 | ||
1452 | if (hw_ep->max_packet_sz_tx) { | 1452 | if (hw_ep->max_packet_sz_tx) { |
1453 | printk(KERN_DEBUG | 1453 | DBG(1, |
1454 | "%s: hw_ep %d%s, %smax %d\n", | 1454 | "%s: hw_ep %d%s, %smax %d\n", |
1455 | musb_driver_name, i, | 1455 | musb_driver_name, i, |
1456 | hw_ep->is_shared_fifo ? "shared" : "tx", | 1456 | hw_ep->is_shared_fifo ? "shared" : "tx", |
@@ -1459,7 +1459,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1459 | hw_ep->max_packet_sz_tx); | 1459 | hw_ep->max_packet_sz_tx); |
1460 | } | 1460 | } |
1461 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { | 1461 | if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { |
1462 | printk(KERN_DEBUG | 1462 | DBG(1, |
1463 | "%s: hw_ep %d%s, %smax %d\n", | 1463 | "%s: hw_ep %d%s, %smax %d\n", |
1464 | musb_driver_name, i, | 1464 | musb_driver_name, i, |
1465 | "rx", | 1465 | "rx", |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 8b3c4e2ed7b8..74073f9a43f0 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright 2005 Mentor Graphics Corporation | 4 | * Copyright 2005 Mentor Graphics Corporation |
5 | * Copyright (C) 2005-2006 by Texas Instruments | 5 | * Copyright (C) 2005-2006 by Texas Instruments |
6 | * Copyright (C) 2006-2007 Nokia Corporation | 6 | * Copyright (C) 2006-2007 Nokia Corporation |
7 | * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -436,14 +437,6 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
436 | csr |= MUSB_TXCSR_P_WZC_BITS; | 437 | csr |= MUSB_TXCSR_P_WZC_BITS; |
437 | csr &= ~MUSB_TXCSR_P_SENTSTALL; | 438 | csr &= ~MUSB_TXCSR_P_SENTSTALL; |
438 | musb_writew(epio, MUSB_TXCSR, csr); | 439 | musb_writew(epio, MUSB_TXCSR, csr); |
439 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
440 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
441 | musb->dma_controller->channel_abort(dma); | ||
442 | } | ||
443 | |||
444 | if (request) | ||
445 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
446 | |||
447 | break; | 440 | break; |
448 | } | 441 | } |
449 | 442 | ||
@@ -582,15 +575,25 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
582 | */ | 575 | */ |
583 | static void rxstate(struct musb *musb, struct musb_request *req) | 576 | static void rxstate(struct musb *musb, struct musb_request *req) |
584 | { | 577 | { |
585 | u16 csr = 0; | ||
586 | const u8 epnum = req->epnum; | 578 | const u8 epnum = req->epnum; |
587 | struct usb_request *request = &req->request; | 579 | struct usb_request *request = &req->request; |
588 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; | 580 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; |
589 | void __iomem *epio = musb->endpoints[epnum].regs; | 581 | void __iomem *epio = musb->endpoints[epnum].regs; |
590 | unsigned fifo_count = 0; | 582 | unsigned fifo_count = 0; |
591 | u16 len = musb_ep->packet_sz; | 583 | u16 len = musb_ep->packet_sz; |
584 | u16 csr = musb_readw(epio, MUSB_RXCSR); | ||
592 | 585 | ||
593 | csr = musb_readw(epio, MUSB_RXCSR); | 586 | /* We shouldn't get here while DMA is active, but we do... */ |
587 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | ||
588 | DBG(4, "DMA pending...\n"); | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | if (csr & MUSB_RXCSR_P_SENDSTALL) { | ||
593 | DBG(5, "%s stalling, RXCSR %04x\n", | ||
594 | musb_ep->end_point.name, csr); | ||
595 | return; | ||
596 | } | ||
594 | 597 | ||
595 | if (is_cppi_enabled() && musb_ep->dma) { | 598 | if (is_cppi_enabled() && musb_ep->dma) { |
596 | struct dma_controller *c = musb->dma_controller; | 599 | struct dma_controller *c = musb->dma_controller; |
@@ -761,19 +764,10 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
761 | csr, dma ? " (dma)" : "", request); | 764 | csr, dma ? " (dma)" : "", request); |
762 | 765 | ||
763 | if (csr & MUSB_RXCSR_P_SENTSTALL) { | 766 | if (csr & MUSB_RXCSR_P_SENTSTALL) { |
764 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
765 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
766 | (void) musb->dma_controller->channel_abort(dma); | ||
767 | request->actual += musb_ep->dma->actual_len; | ||
768 | } | ||
769 | |||
770 | csr |= MUSB_RXCSR_P_WZC_BITS; | 767 | csr |= MUSB_RXCSR_P_WZC_BITS; |
771 | csr &= ~MUSB_RXCSR_P_SENTSTALL; | 768 | csr &= ~MUSB_RXCSR_P_SENTSTALL; |
772 | musb_writew(epio, MUSB_RXCSR, csr); | 769 | musb_writew(epio, MUSB_RXCSR, csr); |
773 | 770 | return; | |
774 | if (request) | ||
775 | musb_g_giveback(musb_ep, request, -EPIPE); | ||
776 | goto done; | ||
777 | } | 771 | } |
778 | 772 | ||
779 | if (csr & MUSB_RXCSR_P_OVERRUN) { | 773 | if (csr & MUSB_RXCSR_P_OVERRUN) { |
@@ -795,7 +789,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
795 | DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, | 789 | DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, |
796 | "%s busy, csr %04x\n", | 790 | "%s busy, csr %04x\n", |
797 | musb_ep->end_point.name, csr); | 791 | musb_ep->end_point.name, csr); |
798 | goto done; | 792 | return; |
799 | } | 793 | } |
800 | 794 | ||
801 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { | 795 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { |
@@ -826,22 +820,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
826 | if ((request->actual < request->length) | 820 | if ((request->actual < request->length) |
827 | && (musb_ep->dma->actual_len | 821 | && (musb_ep->dma->actual_len |
828 | == musb_ep->packet_sz)) | 822 | == musb_ep->packet_sz)) |
829 | goto done; | 823 | return; |
830 | #endif | 824 | #endif |
831 | musb_g_giveback(musb_ep, request, 0); | 825 | musb_g_giveback(musb_ep, request, 0); |
832 | 826 | ||
833 | request = next_request(musb_ep); | 827 | request = next_request(musb_ep); |
834 | if (!request) | 828 | if (!request) |
835 | goto done; | 829 | return; |
836 | |||
837 | /* don't start more i/o till the stall clears */ | ||
838 | musb_ep_select(mbase, epnum); | ||
839 | csr = musb_readw(epio, MUSB_RXCSR); | ||
840 | if (csr & MUSB_RXCSR_P_SENDSTALL) | ||
841 | goto done; | ||
842 | } | 830 | } |
843 | 831 | ||
844 | |||
845 | /* analyze request if the ep is hot */ | 832 | /* analyze request if the ep is hot */ |
846 | if (request) | 833 | if (request) |
847 | rxstate(musb, to_musb_request(request)); | 834 | rxstate(musb, to_musb_request(request)); |
@@ -849,8 +836,6 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
849 | DBG(3, "packet waiting for %s%s request\n", | 836 | DBG(3, "packet waiting for %s%s request\n", |
850 | musb_ep->desc ? "" : "inactive ", | 837 | musb_ep->desc ? "" : "inactive ", |
851 | musb_ep->end_point.name); | 838 | musb_ep->end_point.name); |
852 | |||
853 | done: | ||
854 | return; | 839 | return; |
855 | } | 840 | } |
856 | 841 | ||
@@ -1244,7 +1229,7 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1244 | void __iomem *mbase; | 1229 | void __iomem *mbase; |
1245 | unsigned long flags; | 1230 | unsigned long flags; |
1246 | u16 csr; | 1231 | u16 csr; |
1247 | struct musb_request *request = NULL; | 1232 | struct musb_request *request; |
1248 | int status = 0; | 1233 | int status = 0; |
1249 | 1234 | ||
1250 | if (!ep) | 1235 | if (!ep) |
@@ -1260,24 +1245,29 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1260 | 1245 | ||
1261 | musb_ep_select(mbase, epnum); | 1246 | musb_ep_select(mbase, epnum); |
1262 | 1247 | ||
1263 | /* cannot portably stall with non-empty FIFO */ | ||
1264 | request = to_musb_request(next_request(musb_ep)); | 1248 | request = to_musb_request(next_request(musb_ep)); |
1265 | if (value && musb_ep->is_in) { | 1249 | if (value) { |
1266 | csr = musb_readw(epio, MUSB_TXCSR); | 1250 | if (request) { |
1267 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 1251 | DBG(3, "request in progress, cannot halt %s\n", |
1268 | DBG(3, "%s fifo busy, cannot halt\n", ep->name); | 1252 | ep->name); |
1269 | spin_unlock_irqrestore(&musb->lock, flags); | 1253 | status = -EAGAIN; |
1270 | return -EAGAIN; | 1254 | goto done; |
1255 | } | ||
1256 | /* Cannot portably stall with non-empty FIFO */ | ||
1257 | if (musb_ep->is_in) { | ||
1258 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1259 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
1260 | DBG(3, "FIFO busy, cannot halt %s\n", ep->name); | ||
1261 | status = -EAGAIN; | ||
1262 | goto done; | ||
1263 | } | ||
1271 | } | 1264 | } |
1272 | |||
1273 | } | 1265 | } |
1274 | 1266 | ||
1275 | /* set/clear the stall and toggle bits */ | 1267 | /* set/clear the stall and toggle bits */ |
1276 | DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); | 1268 | DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); |
1277 | if (musb_ep->is_in) { | 1269 | if (musb_ep->is_in) { |
1278 | csr = musb_readw(epio, MUSB_TXCSR); | 1270 | csr = musb_readw(epio, MUSB_TXCSR); |
1279 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) | ||
1280 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
1281 | csr |= MUSB_TXCSR_P_WZC_BITS | 1271 | csr |= MUSB_TXCSR_P_WZC_BITS |
1282 | | MUSB_TXCSR_CLRDATATOG; | 1272 | | MUSB_TXCSR_CLRDATATOG; |
1283 | if (value) | 1273 | if (value) |
@@ -1300,14 +1290,13 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value) | |||
1300 | musb_writew(epio, MUSB_RXCSR, csr); | 1290 | musb_writew(epio, MUSB_RXCSR, csr); |
1301 | } | 1291 | } |
1302 | 1292 | ||
1303 | done: | ||
1304 | |||
1305 | /* maybe start the first request in the queue */ | 1293 | /* maybe start the first request in the queue */ |
1306 | if (!musb_ep->busy && !value && request) { | 1294 | if (!musb_ep->busy && !value && request) { |
1307 | DBG(3, "restarting the request\n"); | 1295 | DBG(3, "restarting the request\n"); |
1308 | musb_ep_restart(musb, request); | 1296 | musb_ep_restart(musb, request); |
1309 | } | 1297 | } |
1310 | 1298 | ||
1299 | done: | ||
1311 | spin_unlock_irqrestore(&musb->lock, flags); | 1300 | spin_unlock_irqrestore(&musb->lock, flags); |
1312 | return status; | 1301 | return status; |
1313 | } | 1302 | } |
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 7a6778675ad3..522efb31b56b 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -511,7 +511,8 @@ static void ep0_txstate(struct musb *musb) | |||
511 | 511 | ||
512 | /* update the flags */ | 512 | /* update the flags */ |
513 | if (fifo_count < MUSB_MAX_END0_PACKET | 513 | if (fifo_count < MUSB_MAX_END0_PACKET |
514 | || request->actual == request->length) { | 514 | || (request->actual == request->length |
515 | && !request->zero)) { | ||
515 | musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; | 516 | musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; |
516 | csr |= MUSB_CSR0_P_DATAEND; | 517 | csr |= MUSB_CSR0_P_DATAEND; |
517 | } else | 518 | } else |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index cf94511485f2..e3ab40a966eb 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -1301,8 +1301,11 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1301 | return; | 1301 | return; |
1302 | } else if (usb_pipeisoc(pipe) && dma) { | 1302 | } else if (usb_pipeisoc(pipe) && dma) { |
1303 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | 1303 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, |
1304 | offset, length)) | 1304 | offset, length)) { |
1305 | if (is_cppi_enabled() || tusb_dma_omap()) | ||
1306 | musb_h_tx_dma_start(hw_ep); | ||
1305 | return; | 1307 | return; |
1308 | } | ||
1306 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | 1309 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { |
1307 | DBG(1, "not complete, but DMA enabled?\n"); | 1310 | DBG(1, "not complete, but DMA enabled?\n"); |
1308 | return; | 1311 | return; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9c60d6d4908a..ebcc6d0e2e91 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -1937,7 +1937,7 @@ static void ftdi_write_bulk_callback(struct urb *urb) | |||
1937 | return; | 1937 | return; |
1938 | } | 1938 | } |
1939 | /* account for transferred data */ | 1939 | /* account for transferred data */ |
1940 | countback = urb->actual_length; | 1940 | countback = urb->transfer_buffer_length; |
1941 | data_offset = priv->write_offset; | 1941 | data_offset = priv->write_offset; |
1942 | if (data_offset > 0) { | 1942 | if (data_offset > 0) { |
1943 | /* Subtract the control bytes */ | 1943 | /* Subtract the control bytes */ |
@@ -1950,7 +1950,6 @@ static void ftdi_write_bulk_callback(struct urb *urb) | |||
1950 | 1950 | ||
1951 | if (status) { | 1951 | if (status) { |
1952 | dbg("nonzero write bulk status received: %d", status); | 1952 | dbg("nonzero write bulk status received: %d", status); |
1953 | return; | ||
1954 | } | 1953 | } |
1955 | 1954 | ||
1956 | usb_serial_port_softint(port); | 1955 | usb_serial_port_softint(port); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 319aaf9725b3..0577e4b61114 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -336,6 +336,10 @@ static int option_resume(struct usb_serial *serial); | |||
336 | #define AIRPLUS_VENDOR_ID 0x1011 | 336 | #define AIRPLUS_VENDOR_ID 0x1011 |
337 | #define AIRPLUS_PRODUCT_MCD650 0x3198 | 337 | #define AIRPLUS_PRODUCT_MCD650 0x3198 |
338 | 338 | ||
339 | /* 4G Systems products */ | ||
340 | #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e | ||
341 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 | ||
342 | |||
339 | static struct usb_device_id option_ids[] = { | 343 | static struct usb_device_id option_ids[] = { |
340 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 344 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
341 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 345 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -599,6 +603,7 @@ static struct usb_device_id option_ids[] = { | |||
599 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | 603 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, |
600 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, | 604 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, |
601 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, | 605 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, |
606 | { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, | ||
602 | { } /* Terminating entry */ | 607 | { } /* Terminating entry */ |
603 | }; | 608 | }; |
604 | MODULE_DEVICE_TABLE(usb, option_ids); | 609 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 035d56835b75..ea1fd3f47511 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c | |||
@@ -554,11 +554,11 @@ static int fb_check_var(struct fb_var_screeninfo *var, | |||
554 | var->transp.length = 0; | 554 | var->transp.length = 0; |
555 | break; | 555 | break; |
556 | case 16: /* RGB 565 */ | 556 | case 16: /* RGB 565 */ |
557 | var->red.offset = 0; | 557 | var->red.offset = 11; |
558 | var->red.length = 5; | 558 | var->red.length = 5; |
559 | var->green.offset = 5; | 559 | var->green.offset = 5; |
560 | var->green.length = 6; | 560 | var->green.length = 6; |
561 | var->blue.offset = 11; | 561 | var->blue.offset = 0; |
562 | var->blue.length = 5; | 562 | var->blue.length = 5; |
563 | var->transp.offset = 0; | 563 | var->transp.offset = 0; |
564 | var->transp.length = 0; | 564 | var->transp.length = 0; |
@@ -591,7 +591,7 @@ static int __devexit fb_remove(struct platform_device *dev) | |||
591 | unregister_framebuffer(info); | 591 | unregister_framebuffer(info); |
592 | fb_dealloc_cmap(&info->cmap); | 592 | fb_dealloc_cmap(&info->cmap); |
593 | dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, | 593 | dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, |
594 | info->screen_base, | 594 | info->screen_base - PAGE_SIZE, |
595 | info->fix.smem_start); | 595 | info->fix.smem_start); |
596 | free_irq(par->irq, par); | 596 | free_irq(par->irq, par); |
597 | clk_disable(par->lcdc_clk); | 597 | clk_disable(par->lcdc_clk); |
@@ -749,6 +749,7 @@ static int __init fb_probe(struct platform_device *device) | |||
749 | (PAGE_SIZE - par->palette_sz); | 749 | (PAGE_SIZE - par->palette_sz); |
750 | 750 | ||
751 | /* the rest of the frame buffer is pixel data */ | 751 | /* the rest of the frame buffer is pixel data */ |
752 | da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz; | ||
752 | da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; | 753 | da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; |
753 | da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; | 754 | da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; |
754 | da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; | 755 | da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; |
@@ -787,6 +788,8 @@ static int __init fb_probe(struct platform_device *device) | |||
787 | da8xx_fb_info->var = da8xx_fb_var; | 788 | da8xx_fb_info->var = da8xx_fb_var; |
788 | da8xx_fb_info->fbops = &da8xx_fb_ops; | 789 | da8xx_fb_info->fbops = &da8xx_fb_ops; |
789 | da8xx_fb_info->pseudo_palette = par->pseudo_palette; | 790 | da8xx_fb_info->pseudo_palette = par->pseudo_palette; |
791 | da8xx_fb_info->fix.visual = (da8xx_fb_info->var.bits_per_pixel <= 8) ? | ||
792 | FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; | ||
790 | 793 | ||
791 | ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); | 794 | ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); |
792 | if (ret) | 795 | if (ret) |
@@ -825,7 +828,7 @@ err_free_irq: | |||
825 | 828 | ||
826 | err_release_fb_mem: | 829 | err_release_fb_mem: |
827 | dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, | 830 | dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, |
828 | da8xx_fb_info->screen_base, | 831 | da8xx_fb_info->screen_base - PAGE_SIZE, |
829 | da8xx_fb_info->fix.smem_start); | 832 | da8xx_fb_info->fix.smem_start); |
830 | 833 | ||
831 | err_release_fb: | 834 | err_release_fb: |
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index 1a83709f9611..f67db4268374 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c | |||
@@ -1147,7 +1147,7 @@ static int __init gbefb_probe(struct platform_device *p_dev) | |||
1147 | gbefb_setup(options); | 1147 | gbefb_setup(options); |
1148 | #endif | 1148 | #endif |
1149 | 1149 | ||
1150 | if (!request_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) { | 1150 | if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) { |
1151 | printk(KERN_ERR "gbefb: couldn't reserve mmio region\n"); | 1151 | printk(KERN_ERR "gbefb: couldn't reserve mmio region\n"); |
1152 | ret = -EBUSY; | 1152 | ret = -EBUSY; |
1153 | goto out_release_framebuffer; | 1153 | goto out_release_framebuffer; |
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index f6cccc9df022..bf12d06b5877 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c | |||
@@ -62,7 +62,7 @@ extern unsigned int idt_cpu_freq; | |||
62 | static int timeout = WATCHDOG_TIMEOUT; | 62 | static int timeout = WATCHDOG_TIMEOUT; |
63 | module_param(timeout, int, 0); | 63 | module_param(timeout, int, 0); |
64 | MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default=" | 64 | MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default=" |
65 | WATCHDOG_TIMEOUT ")"); | 65 | __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); |
66 | 66 | ||
67 | static int nowayout = WATCHDOG_NOWAYOUT; | 67 | static int nowayout = WATCHDOG_NOWAYOUT; |
68 | module_param(nowayout, int, 0); | 68 | module_param(nowayout, int, 0); |
@@ -276,7 +276,7 @@ static int __devinit rc32434_wdt_probe(struct platform_device *pdev) | |||
276 | return -ENODEV; | 276 | return -ENODEV; |
277 | } | 277 | } |
278 | 278 | ||
279 | wdt_reg = ioremap_nocache(r->start, r->end - r->start); | 279 | wdt_reg = ioremap_nocache(r->start, resource_size(r)); |
280 | if (!wdt_reg) { | 280 | if (!wdt_reg) { |
281 | printk(KERN_ERR PFX "failed to remap I/O resources\n"); | 281 | printk(KERN_ERR PFX "failed to remap I/O resources\n"); |
282 | return -ENXIO; | 282 | return -ENXIO; |
diff --git a/fs/9p/cache.c b/fs/9p/cache.c index bcc5357a9069..e777961939f3 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c | |||
@@ -343,7 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) | |||
343 | 343 | ||
344 | BUG_ON(!vcookie->fscache); | 344 | BUG_ON(!vcookie->fscache); |
345 | 345 | ||
346 | return fscache_maybe_release_page(vnode->cache, page, gfp); | 346 | return fscache_maybe_release_page(vcookie->fscache, page, gfp); |
347 | } | 347 | } |
348 | 348 | ||
349 | void __v9fs_fscache_invalidate_page(struct page *page) | 349 | void __v9fs_fscache_invalidate_page(struct page *page) |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 1d8332563863..a6c8c6fe8df9 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/mount.h> | 12 | #include <linux/mount.h> |
13 | #include <linux/file.h> | 13 | #include <linux/file.h> |
14 | #include <linux/ima.h> | ||
14 | #include "internal.h" | 15 | #include "internal.h" |
15 | 16 | ||
16 | /* | 17 | /* |
@@ -922,6 +923,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |||
922 | if (IS_ERR(file)) { | 923 | if (IS_ERR(file)) { |
923 | ret = PTR_ERR(file); | 924 | ret = PTR_ERR(file); |
924 | } else { | 925 | } else { |
926 | ima_counts_get(file); | ||
925 | ret = -EIO; | 927 | ret = -EIO; |
926 | if (file->f_op->write) { | 928 | if (file->f_op->write) { |
927 | pos = (loff_t) page->index << PAGE_SHIFT; | 929 | pos = (loff_t) page->index << PAGE_SHIFT; |
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/proc_fs.h> | 46 | #include <linux/proc_fs.h> |
47 | #include <linux/mount.h> | 47 | #include <linux/mount.h> |
48 | #include <linux/security.h> | 48 | #include <linux/security.h> |
49 | #include <linux/ima.h> | ||
50 | #include <linux/syscalls.h> | 49 | #include <linux/syscalls.h> |
51 | #include <linux/tsacct_kern.h> | 50 | #include <linux/tsacct_kern.h> |
52 | #include <linux/cn_proc.h> | 51 | #include <linux/cn_proc.h> |
@@ -1209,9 +1208,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
1209 | retval = security_bprm_check(bprm); | 1208 | retval = security_bprm_check(bprm); |
1210 | if (retval) | 1209 | if (retval) |
1211 | return retval; | 1210 | return retval; |
1212 | retval = ima_bprm_check(bprm); | ||
1213 | if (retval) | ||
1214 | return retval; | ||
1215 | 1211 | ||
1216 | /* kernel module loader fixup */ | 1212 | /* kernel module loader fixup */ |
1217 | /* so we don't try to load run modprobe in kernel space. */ | 1213 | /* so we don't try to load run modprobe in kernel space. */ |
diff --git a/fs/file_table.c b/fs/file_table.c index 8eb44042e009..4bef4c01ec6f 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/security.h> | 15 | #include <linux/security.h> |
16 | #include <linux/ima.h> | ||
17 | #include <linux/eventpoll.h> | 16 | #include <linux/eventpoll.h> |
18 | #include <linux/rcupdate.h> | 17 | #include <linux/rcupdate.h> |
19 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
@@ -280,7 +279,6 @@ void __fput(struct file *file) | |||
280 | if (file->f_op && file->f_op->release) | 279 | if (file->f_op && file->f_op->release) |
281 | file->f_op->release(inode, file); | 280 | file->f_op->release(inode, file); |
282 | security_file_free(file); | 281 | security_file_free(file); |
283 | ima_file_free(file); | ||
284 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) | 282 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) |
285 | cdev_put(inode->i_cdev); | 283 | cdev_put(inode->i_cdev); |
286 | fops_put(file->f_op); | 284 | fops_put(file->f_op); |
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index 5971359d2090..4dcddf83326f 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig | |||
@@ -8,6 +8,8 @@ config GFS2_FS | |||
8 | select FS_POSIX_ACL | 8 | select FS_POSIX_ACL |
9 | select CRC32 | 9 | select CRC32 |
10 | select SLOW_WORK | 10 | select SLOW_WORK |
11 | select QUOTA | ||
12 | select QUOTACTL | ||
11 | help | 13 | help |
12 | A cluster filesystem. | 14 | A cluster filesystem. |
13 | 15 | ||
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 3fc4e3ac7d84..3eb1ea846173 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
14 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
15 | #include <linux/xattr.h> | ||
15 | #include <linux/posix_acl.h> | 16 | #include <linux/posix_acl.h> |
16 | #include <linux/posix_acl_xattr.h> | 17 | #include <linux/posix_acl_xattr.h> |
17 | #include <linux/gfs2_ondisk.h> | 18 | #include <linux/gfs2_ondisk.h> |
@@ -26,108 +27,44 @@ | |||
26 | #include "trans.h" | 27 | #include "trans.h" |
27 | #include "util.h" | 28 | #include "util.h" |
28 | 29 | ||
29 | #define ACL_ACCESS 1 | 30 | static const char *gfs2_acl_name(int type) |
30 | #define ACL_DEFAULT 0 | ||
31 | |||
32 | int gfs2_acl_validate_set(struct gfs2_inode *ip, int access, | ||
33 | struct gfs2_ea_request *er, int *remove, mode_t *mode) | ||
34 | { | 31 | { |
35 | struct posix_acl *acl; | 32 | switch (type) { |
36 | int error; | 33 | case ACL_TYPE_ACCESS: |
37 | 34 | return GFS2_POSIX_ACL_ACCESS; | |
38 | error = gfs2_acl_validate_remove(ip, access); | 35 | case ACL_TYPE_DEFAULT: |
39 | if (error) | 36 | return GFS2_POSIX_ACL_DEFAULT; |
40 | return error; | ||
41 | |||
42 | if (!er->er_data) | ||
43 | return -EINVAL; | ||
44 | |||
45 | acl = posix_acl_from_xattr(er->er_data, er->er_data_len); | ||
46 | if (IS_ERR(acl)) | ||
47 | return PTR_ERR(acl); | ||
48 | if (!acl) { | ||
49 | *remove = 1; | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | error = posix_acl_valid(acl); | ||
54 | if (error) | ||
55 | goto out; | ||
56 | |||
57 | if (access) { | ||
58 | error = posix_acl_equiv_mode(acl, mode); | ||
59 | if (!error) | ||
60 | *remove = 1; | ||
61 | else if (error > 0) | ||
62 | error = 0; | ||
63 | } | 37 | } |
64 | 38 | return NULL; | |
65 | out: | ||
66 | posix_acl_release(acl); | ||
67 | return error; | ||
68 | } | ||
69 | |||
70 | int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access) | ||
71 | { | ||
72 | if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl) | ||
73 | return -EOPNOTSUPP; | ||
74 | if (!is_owner_or_cap(&ip->i_inode)) | ||
75 | return -EPERM; | ||
76 | if (S_ISLNK(ip->i_inode.i_mode)) | ||
77 | return -EOPNOTSUPP; | ||
78 | if (!access && !S_ISDIR(ip->i_inode.i_mode)) | ||
79 | return -EACCES; | ||
80 | |||
81 | return 0; | ||
82 | } | 39 | } |
83 | 40 | ||
84 | static int acl_get(struct gfs2_inode *ip, const char *name, | 41 | static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type) |
85 | struct posix_acl **acl, struct gfs2_ea_location *el, | ||
86 | char **datap, unsigned int *lenp) | ||
87 | { | 42 | { |
43 | struct posix_acl *acl; | ||
44 | const char *name; | ||
88 | char *data; | 45 | char *data; |
89 | unsigned int len; | 46 | int len; |
90 | int error; | ||
91 | |||
92 | el->el_bh = NULL; | ||
93 | 47 | ||
94 | if (!ip->i_eattr) | 48 | if (!ip->i_eattr) |
95 | return 0; | 49 | return NULL; |
96 | |||
97 | error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el); | ||
98 | if (error) | ||
99 | return error; | ||
100 | if (!el->el_ea) | ||
101 | return 0; | ||
102 | if (!GFS2_EA_DATA_LEN(el->el_ea)) | ||
103 | goto out; | ||
104 | 50 | ||
105 | len = GFS2_EA_DATA_LEN(el->el_ea); | 51 | acl = get_cached_acl(&ip->i_inode, type); |
106 | data = kmalloc(len, GFP_NOFS); | 52 | if (acl != ACL_NOT_CACHED) |
107 | error = -ENOMEM; | 53 | return acl; |
108 | if (!data) | ||
109 | goto out; | ||
110 | 54 | ||
111 | error = gfs2_ea_get_copy(ip, el, data, len); | 55 | name = gfs2_acl_name(type); |
112 | if (error < 0) | 56 | if (name == NULL) |
113 | goto out_kfree; | 57 | return ERR_PTR(-EINVAL); |
114 | error = 0; | ||
115 | 58 | ||
116 | if (acl) { | 59 | len = gfs2_xattr_acl_get(ip, name, &data); |
117 | *acl = posix_acl_from_xattr(data, len); | 60 | if (len < 0) |
118 | if (IS_ERR(*acl)) | 61 | return ERR_PTR(len); |
119 | error = PTR_ERR(*acl); | 62 | if (len == 0) |
120 | } | 63 | return NULL; |
121 | 64 | ||
122 | out_kfree: | 65 | acl = posix_acl_from_xattr(data, len); |
123 | if (error || !datap) { | 66 | kfree(data); |
124 | kfree(data); | 67 | return acl; |
125 | } else { | ||
126 | *datap = data; | ||
127 | *lenp = len; | ||
128 | } | ||
129 | out: | ||
130 | return error; | ||
131 | } | 68 | } |
132 | 69 | ||
133 | /** | 70 | /** |
@@ -140,14 +77,12 @@ out: | |||
140 | 77 | ||
141 | int gfs2_check_acl(struct inode *inode, int mask) | 78 | int gfs2_check_acl(struct inode *inode, int mask) |
142 | { | 79 | { |
143 | struct gfs2_ea_location el; | 80 | struct posix_acl *acl; |
144 | struct posix_acl *acl = NULL; | ||
145 | int error; | 81 | int error; |
146 | 82 | ||
147 | error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL); | 83 | acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS); |
148 | brelse(el.el_bh); | 84 | if (IS_ERR(acl)) |
149 | if (error) | 85 | return PTR_ERR(acl); |
150 | return error; | ||
151 | 86 | ||
152 | if (acl) { | 87 | if (acl) { |
153 | error = posix_acl_permission(inode, acl, mask); | 88 | error = posix_acl_permission(inode, acl, mask); |
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask) | |||
158 | return -EAGAIN; | 93 | return -EAGAIN; |
159 | } | 94 | } |
160 | 95 | ||
161 | static int munge_mode(struct gfs2_inode *ip, mode_t mode) | 96 | static int gfs2_set_mode(struct inode *inode, mode_t mode) |
162 | { | 97 | { |
163 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 98 | int error = 0; |
164 | struct buffer_head *dibh; | ||
165 | int error; | ||
166 | 99 | ||
167 | error = gfs2_trans_begin(sdp, RES_DINODE, 0); | 100 | if (mode != inode->i_mode) { |
168 | if (error) | 101 | struct iattr iattr; |
169 | return error; | ||
170 | 102 | ||
171 | error = gfs2_meta_inode_buffer(ip, &dibh); | 103 | iattr.ia_valid = ATTR_MODE; |
172 | if (!error) { | 104 | iattr.ia_mode = mode; |
173 | gfs2_assert_withdraw(sdp, | 105 | |
174 | (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); | 106 | error = gfs2_setattr_simple(GFS2_I(inode), &iattr); |
175 | ip->i_inode.i_mode = mode; | ||
176 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
177 | gfs2_dinode_out(ip, dibh->b_data); | ||
178 | brelse(dibh); | ||
179 | } | 107 | } |
180 | 108 | ||
181 | gfs2_trans_end(sdp); | 109 | return error; |
110 | } | ||
111 | |||
112 | static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl) | ||
113 | { | ||
114 | int error; | ||
115 | int len; | ||
116 | char *data; | ||
117 | const char *name = gfs2_acl_name(type); | ||
182 | 118 | ||
183 | return 0; | 119 | BUG_ON(name == NULL); |
120 | len = posix_acl_to_xattr(acl, NULL, 0); | ||
121 | if (len == 0) | ||
122 | return 0; | ||
123 | data = kmalloc(len, GFP_NOFS); | ||
124 | if (data == NULL) | ||
125 | return -ENOMEM; | ||
126 | error = posix_acl_to_xattr(acl, data, len); | ||
127 | if (error < 0) | ||
128 | goto out; | ||
129 | error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0); | ||
130 | if (!error) | ||
131 | set_cached_acl(inode, type, acl); | ||
132 | out: | ||
133 | kfree(data); | ||
134 | return error; | ||
184 | } | 135 | } |
185 | 136 | ||
186 | int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) | 137 | int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode) |
187 | { | 138 | { |
188 | struct gfs2_ea_location el; | ||
189 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); | 139 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); |
190 | struct posix_acl *acl = NULL, *clone; | 140 | struct posix_acl *acl, *clone; |
191 | mode_t mode = ip->i_inode.i_mode; | 141 | mode_t mode = inode->i_mode; |
192 | char *data = NULL; | 142 | int error = 0; |
193 | unsigned int len; | ||
194 | int error; | ||
195 | 143 | ||
196 | if (!sdp->sd_args.ar_posix_acl) | 144 | if (!sdp->sd_args.ar_posix_acl) |
197 | return 0; | 145 | return 0; |
198 | if (S_ISLNK(ip->i_inode.i_mode)) | 146 | if (S_ISLNK(inode->i_mode)) |
199 | return 0; | 147 | return 0; |
200 | 148 | ||
201 | error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len); | 149 | acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT); |
202 | brelse(el.el_bh); | 150 | if (IS_ERR(acl)) |
203 | if (error) | 151 | return PTR_ERR(acl); |
204 | return error; | ||
205 | if (!acl) { | 152 | if (!acl) { |
206 | mode &= ~current_umask(); | 153 | mode &= ~current_umask(); |
207 | if (mode != ip->i_inode.i_mode) | 154 | if (mode != inode->i_mode) |
208 | error = munge_mode(ip, mode); | 155 | error = gfs2_set_mode(inode, mode); |
209 | return error; | 156 | return error; |
210 | } | 157 | } |
211 | 158 | ||
159 | if (S_ISDIR(inode->i_mode)) { | ||
160 | error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl); | ||
161 | if (error) | ||
162 | goto out; | ||
163 | } | ||
164 | |||
212 | clone = posix_acl_clone(acl, GFP_NOFS); | 165 | clone = posix_acl_clone(acl, GFP_NOFS); |
213 | error = -ENOMEM; | 166 | error = -ENOMEM; |
214 | if (!clone) | 167 | if (!clone) |
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) | |||
216 | posix_acl_release(acl); | 169 | posix_acl_release(acl); |
217 | acl = clone; | 170 | acl = clone; |
218 | 171 | ||
219 | if (S_ISDIR(ip->i_inode.i_mode)) { | ||
220 | error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS, | ||
221 | GFS2_POSIX_ACL_DEFAULT, data, len, 0); | ||
222 | if (error) | ||
223 | goto out; | ||
224 | } | ||
225 | |||
226 | error = posix_acl_create_masq(acl, &mode); | 172 | error = posix_acl_create_masq(acl, &mode); |
227 | if (error < 0) | 173 | if (error < 0) |
228 | goto out; | 174 | goto out; |
229 | if (error == 0) | 175 | if (error == 0) |
230 | goto munge; | 176 | goto munge; |
231 | 177 | ||
232 | posix_acl_to_xattr(acl, data, len); | 178 | error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl); |
233 | error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS, | ||
234 | GFS2_POSIX_ACL_ACCESS, data, len, 0); | ||
235 | if (error) | 179 | if (error) |
236 | goto out; | 180 | goto out; |
237 | munge: | 181 | munge: |
238 | error = munge_mode(ip, mode); | 182 | error = gfs2_set_mode(inode, mode); |
239 | out: | 183 | out: |
240 | posix_acl_release(acl); | 184 | posix_acl_release(acl); |
241 | kfree(data); | ||
242 | return error; | 185 | return error; |
243 | } | 186 | } |
244 | 187 | ||
245 | int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) | 188 | int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) |
246 | { | 189 | { |
247 | struct posix_acl *acl = NULL, *clone; | 190 | struct posix_acl *acl, *clone; |
248 | struct gfs2_ea_location el; | ||
249 | char *data; | 191 | char *data; |
250 | unsigned int len; | 192 | unsigned int len; |
251 | int error; | 193 | int error; |
252 | 194 | ||
253 | error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len); | 195 | acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS); |
254 | if (error) | 196 | if (IS_ERR(acl)) |
255 | goto out_brelse; | 197 | return PTR_ERR(acl); |
256 | if (!acl) | 198 | if (!acl) |
257 | return gfs2_setattr_simple(ip, attr); | 199 | return gfs2_setattr_simple(ip, attr); |
258 | 200 | ||
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) | |||
265 | 207 | ||
266 | error = posix_acl_chmod_masq(acl, attr->ia_mode); | 208 | error = posix_acl_chmod_masq(acl, attr->ia_mode); |
267 | if (!error) { | 209 | if (!error) { |
210 | len = posix_acl_to_xattr(acl, NULL, 0); | ||
211 | data = kmalloc(len, GFP_NOFS); | ||
212 | error = -ENOMEM; | ||
213 | if (data == NULL) | ||
214 | goto out; | ||
268 | posix_acl_to_xattr(acl, data, len); | 215 | posix_acl_to_xattr(acl, data, len); |
269 | error = gfs2_ea_acl_chmod(ip, &el, attr, data); | 216 | error = gfs2_xattr_acl_chmod(ip, attr, data); |
217 | kfree(data); | ||
218 | set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl); | ||
270 | } | 219 | } |
271 | 220 | ||
272 | out: | 221 | out: |
273 | posix_acl_release(acl); | 222 | posix_acl_release(acl); |
274 | kfree(data); | ||
275 | out_brelse: | ||
276 | brelse(el.el_bh); | ||
277 | return error; | 223 | return error; |
278 | } | 224 | } |
279 | 225 | ||
226 | static int gfs2_acl_type(const char *name) | ||
227 | { | ||
228 | if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0) | ||
229 | return ACL_TYPE_ACCESS; | ||
230 | if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0) | ||
231 | return ACL_TYPE_DEFAULT; | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | static int gfs2_xattr_system_get(struct inode *inode, const char *name, | ||
236 | void *buffer, size_t size) | ||
237 | { | ||
238 | struct posix_acl *acl; | ||
239 | int type; | ||
240 | int error; | ||
241 | |||
242 | type = gfs2_acl_type(name); | ||
243 | if (type < 0) | ||
244 | return type; | ||
245 | |||
246 | acl = gfs2_acl_get(GFS2_I(inode), type); | ||
247 | if (IS_ERR(acl)) | ||
248 | return PTR_ERR(acl); | ||
249 | if (acl == NULL) | ||
250 | return -ENODATA; | ||
251 | |||
252 | error = posix_acl_to_xattr(acl, buffer, size); | ||
253 | posix_acl_release(acl); | ||
254 | |||
255 | return error; | ||
256 | } | ||
257 | |||
258 | static int gfs2_xattr_system_set(struct inode *inode, const char *name, | ||
259 | const void *value, size_t size, int flags) | ||
260 | { | ||
261 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
262 | struct posix_acl *acl = NULL; | ||
263 | int error = 0, type; | ||
264 | |||
265 | if (!sdp->sd_args.ar_posix_acl) | ||
266 | return -EOPNOTSUPP; | ||
267 | |||
268 | type = gfs2_acl_type(name); | ||
269 | if (type < 0) | ||
270 | return type; | ||
271 | if (flags & XATTR_CREATE) | ||
272 | return -EINVAL; | ||
273 | if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) | ||
274 | return value ? -EACCES : 0; | ||
275 | if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) | ||
276 | return -EPERM; | ||
277 | if (S_ISLNK(inode->i_mode)) | ||
278 | return -EOPNOTSUPP; | ||
279 | |||
280 | if (!value) | ||
281 | goto set_acl; | ||
282 | |||
283 | acl = posix_acl_from_xattr(value, size); | ||
284 | if (!acl) { | ||
285 | /* | ||
286 | * acl_set_file(3) may request that we set default ACLs with | ||
287 | * zero length -- defend (gracefully) against that here. | ||
288 | */ | ||
289 | goto out; | ||
290 | } | ||
291 | if (IS_ERR(acl)) { | ||
292 | error = PTR_ERR(acl); | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | error = posix_acl_valid(acl); | ||
297 | if (error) | ||
298 | goto out_release; | ||
299 | |||
300 | error = -EINVAL; | ||
301 | if (acl->a_count > GFS2_ACL_MAX_ENTRIES) | ||
302 | goto out_release; | ||
303 | |||
304 | if (type == ACL_TYPE_ACCESS) { | ||
305 | mode_t mode = inode->i_mode; | ||
306 | error = posix_acl_equiv_mode(acl, &mode); | ||
307 | |||
308 | if (error <= 0) { | ||
309 | posix_acl_release(acl); | ||
310 | acl = NULL; | ||
311 | |||
312 | if (error < 0) | ||
313 | return error; | ||
314 | } | ||
315 | |||
316 | error = gfs2_set_mode(inode, mode); | ||
317 | if (error) | ||
318 | goto out_release; | ||
319 | } | ||
320 | |||
321 | set_acl: | ||
322 | error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0); | ||
323 | if (!error) { | ||
324 | if (acl) | ||
325 | set_cached_acl(inode, type, acl); | ||
326 | else | ||
327 | forget_cached_acl(inode, type); | ||
328 | } | ||
329 | out_release: | ||
330 | posix_acl_release(acl); | ||
331 | out: | ||
332 | return error; | ||
333 | } | ||
334 | |||
335 | struct xattr_handler gfs2_xattr_system_handler = { | ||
336 | .prefix = XATTR_SYSTEM_PREFIX, | ||
337 | .get = gfs2_xattr_system_get, | ||
338 | .set = gfs2_xattr_system_set, | ||
339 | }; | ||
340 | |||
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h index 6751930bfb64..9306a2e6620c 100644 --- a/fs/gfs2/acl.h +++ b/fs/gfs2/acl.h | |||
@@ -13,26 +13,12 @@ | |||
13 | #include "incore.h" | 13 | #include "incore.h" |
14 | 14 | ||
15 | #define GFS2_POSIX_ACL_ACCESS "posix_acl_access" | 15 | #define GFS2_POSIX_ACL_ACCESS "posix_acl_access" |
16 | #define GFS2_POSIX_ACL_ACCESS_LEN 16 | ||
17 | #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" | 16 | #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" |
18 | #define GFS2_POSIX_ACL_DEFAULT_LEN 17 | 17 | #define GFS2_ACL_MAX_ENTRIES 25 |
19 | 18 | ||
20 | #define GFS2_ACL_IS_ACCESS(name, len) \ | 19 | extern int gfs2_check_acl(struct inode *inode, int mask); |
21 | ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ | 20 | extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode); |
22 | !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) | 21 | extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); |
23 | 22 | extern struct xattr_handler gfs2_xattr_system_handler; | |
24 | #define GFS2_ACL_IS_DEFAULT(name, len) \ | ||
25 | ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \ | ||
26 | !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len))) | ||
27 | |||
28 | struct gfs2_ea_request; | ||
29 | |||
30 | int gfs2_acl_validate_set(struct gfs2_inode *ip, int access, | ||
31 | struct gfs2_ea_request *er, | ||
32 | int *remove, mode_t *mode); | ||
33 | int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access); | ||
34 | int gfs2_check_acl(struct inode *inode, int mask); | ||
35 | int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip); | ||
36 | int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); | ||
37 | 23 | ||
38 | #endif /* __ACL_DOT_H__ */ | 24 | #endif /* __ACL_DOT_H__ */ |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 694b5d48f036..7b8da9415267 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, | |||
269 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 269 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; |
270 | unsigned offset = i_size & (PAGE_CACHE_SIZE-1); | 270 | unsigned offset = i_size & (PAGE_CACHE_SIZE-1); |
271 | unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); | 271 | unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); |
272 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
273 | int i; | 272 | int i; |
274 | int ret; | 273 | int ret; |
275 | 274 | ||
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, | |||
313 | 312 | ||
314 | if (ret || (--(wbc->nr_to_write) <= 0)) | 313 | if (ret || (--(wbc->nr_to_write) <= 0)) |
315 | ret = 1; | 314 | ret = 1; |
316 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
317 | wbc->encountered_congestion = 1; | ||
318 | ret = 1; | ||
319 | } | ||
320 | |||
321 | } | 315 | } |
322 | gfs2_trans_end(sdp); | 316 | gfs2_trans_end(sdp); |
323 | return ret; | 317 | return ret; |
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, | |||
338 | static int gfs2_write_cache_jdata(struct address_space *mapping, | 332 | static int gfs2_write_cache_jdata(struct address_space *mapping, |
339 | struct writeback_control *wbc) | 333 | struct writeback_control *wbc) |
340 | { | 334 | { |
341 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
342 | int ret = 0; | 335 | int ret = 0; |
343 | int done = 0; | 336 | int done = 0; |
344 | struct pagevec pvec; | 337 | struct pagevec pvec; |
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, | |||
348 | int scanned = 0; | 341 | int scanned = 0; |
349 | int range_whole = 0; | 342 | int range_whole = 0; |
350 | 343 | ||
351 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
352 | wbc->encountered_congestion = 1; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | pagevec_init(&pvec, 0); | 344 | pagevec_init(&pvec, 0); |
357 | if (wbc->range_cyclic) { | 345 | if (wbc->range_cyclic) { |
358 | index = mapping->writeback_index; /* Start from prev offset */ | 346 | index = mapping->writeback_index; /* Start from prev offset */ |
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, | |||
819 | mark_inode_dirty(inode); | 807 | mark_inode_dirty(inode); |
820 | } | 808 | } |
821 | 809 | ||
822 | if (inode == sdp->sd_rindex) | 810 | if (inode == sdp->sd_rindex) { |
823 | adjust_fs_space(inode); | 811 | adjust_fs_space(inode); |
812 | ip->i_gh.gh_flags |= GL_NOCACHE; | ||
813 | } | ||
824 | 814 | ||
825 | brelse(dibh); | 815 | brelse(dibh); |
826 | gfs2_trans_end(sdp); | 816 | gfs2_trans_end(sdp); |
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, | |||
889 | mark_inode_dirty(inode); | 879 | mark_inode_dirty(inode); |
890 | } | 880 | } |
891 | 881 | ||
892 | if (inode == sdp->sd_rindex) | 882 | if (inode == sdp->sd_rindex) { |
893 | adjust_fs_space(inode); | 883 | adjust_fs_space(inode); |
884 | ip->i_gh.gh_flags |= GL_NOCACHE; | ||
885 | } | ||
894 | 886 | ||
895 | brelse(dibh); | 887 | brelse(dibh); |
896 | gfs2_trans_end(sdp); | 888 | gfs2_trans_end(sdp); |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 297d7e5cebad..25fddc100f18 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -525,38 +525,6 @@ consist_inode: | |||
525 | return ERR_PTR(-EIO); | 525 | return ERR_PTR(-EIO); |
526 | } | 526 | } |
527 | 527 | ||
528 | |||
529 | /** | ||
530 | * dirent_first - Return the first dirent | ||
531 | * @dip: the directory | ||
532 | * @bh: The buffer | ||
533 | * @dent: Pointer to list of dirents | ||
534 | * | ||
535 | * return first dirent whether bh points to leaf or stuffed dinode | ||
536 | * | ||
537 | * Returns: IS_LEAF, IS_DINODE, or -errno | ||
538 | */ | ||
539 | |||
540 | static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh, | ||
541 | struct gfs2_dirent **dent) | ||
542 | { | ||
543 | struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data; | ||
544 | |||
545 | if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) { | ||
546 | if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh)) | ||
547 | return -EIO; | ||
548 | *dent = (struct gfs2_dirent *)(bh->b_data + | ||
549 | sizeof(struct gfs2_leaf)); | ||
550 | return IS_LEAF; | ||
551 | } else { | ||
552 | if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI)) | ||
553 | return -EIO; | ||
554 | *dent = (struct gfs2_dirent *)(bh->b_data + | ||
555 | sizeof(struct gfs2_dinode)); | ||
556 | return IS_DINODE; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | static int dirent_check_reclen(struct gfs2_inode *dip, | 528 | static int dirent_check_reclen(struct gfs2_inode *dip, |
561 | const struct gfs2_dirent *d, const void *end_p) | 529 | const struct gfs2_dirent *d, const void *end_p) |
562 | { | 530 | { |
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) | |||
1006 | divider = (start + half_len) << (32 - dip->i_depth); | 974 | divider = (start + half_len) << (32 - dip->i_depth); |
1007 | 975 | ||
1008 | /* Copy the entries */ | 976 | /* Copy the entries */ |
1009 | dirent_first(dip, obh, &dent); | 977 | dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf)); |
1010 | 978 | ||
1011 | do { | 979 | do { |
1012 | next = dent; | 980 | next = dent; |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 8b674b1f3a55..f455a03a09e2 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
241 | int rv = 0; | 241 | int rv = 0; |
242 | 242 | ||
243 | write_lock(gl_lock_addr(gl->gl_hash)); | 243 | write_lock(gl_lock_addr(gl->gl_hash)); |
244 | if (atomic_dec_and_test(&gl->gl_ref)) { | 244 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { |
245 | hlist_del(&gl->gl_list); | 245 | hlist_del(&gl->gl_list); |
246 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
247 | spin_lock(&lru_lock); | ||
248 | if (!list_empty(&gl->gl_lru)) { | 246 | if (!list_empty(&gl->gl_lru)) { |
249 | list_del_init(&gl->gl_lru); | 247 | list_del_init(&gl->gl_lru); |
250 | atomic_dec(&lru_count); | 248 | atomic_dec(&lru_count); |
251 | } | 249 | } |
252 | spin_unlock(&lru_lock); | 250 | spin_unlock(&lru_lock); |
251 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
253 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 252 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
254 | glock_free(gl); | 253 | glock_free(gl); |
255 | rv = 1; | 254 | rv = 1; |
@@ -513,7 +512,6 @@ retry: | |||
513 | GLOCK_BUG_ON(gl, 1); | 512 | GLOCK_BUG_ON(gl, 1); |
514 | } | 513 | } |
515 | spin_unlock(&gl->gl_spin); | 514 | spin_unlock(&gl->gl_spin); |
516 | gfs2_glock_put(gl); | ||
517 | return; | 515 | return; |
518 | } | 516 | } |
519 | 517 | ||
@@ -524,8 +522,6 @@ retry: | |||
524 | if (glops->go_xmote_bh) { | 522 | if (glops->go_xmote_bh) { |
525 | spin_unlock(&gl->gl_spin); | 523 | spin_unlock(&gl->gl_spin); |
526 | rv = glops->go_xmote_bh(gl, gh); | 524 | rv = glops->go_xmote_bh(gl, gh); |
527 | if (rv == -EAGAIN) | ||
528 | return; | ||
529 | spin_lock(&gl->gl_spin); | 525 | spin_lock(&gl->gl_spin); |
530 | if (rv) { | 526 | if (rv) { |
531 | do_error(gl, rv); | 527 | do_error(gl, rv); |
@@ -540,7 +536,6 @@ out: | |||
540 | clear_bit(GLF_LOCK, &gl->gl_flags); | 536 | clear_bit(GLF_LOCK, &gl->gl_flags); |
541 | out_locked: | 537 | out_locked: |
542 | spin_unlock(&gl->gl_spin); | 538 | spin_unlock(&gl->gl_spin); |
543 | gfs2_glock_put(gl); | ||
544 | } | 539 | } |
545 | 540 | ||
546 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | 541 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, |
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin) | |||
600 | 595 | ||
601 | if (!(ret & LM_OUT_ASYNC)) { | 596 | if (!(ret & LM_OUT_ASYNC)) { |
602 | finish_xmote(gl, ret); | 597 | finish_xmote(gl, ret); |
603 | gfs2_glock_hold(gl); | ||
604 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 598 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
605 | gfs2_glock_put(gl); | 599 | gfs2_glock_put(gl); |
606 | } else { | 600 | } else { |
@@ -672,12 +666,17 @@ out: | |||
672 | return; | 666 | return; |
673 | 667 | ||
674 | out_sched: | 668 | out_sched: |
669 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
670 | smp_mb__after_clear_bit(); | ||
675 | gfs2_glock_hold(gl); | 671 | gfs2_glock_hold(gl); |
676 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 672 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
677 | gfs2_glock_put_nolock(gl); | 673 | gfs2_glock_put_nolock(gl); |
674 | return; | ||
675 | |||
678 | out_unlock: | 676 | out_unlock: |
679 | clear_bit(GLF_LOCK, &gl->gl_flags); | 677 | clear_bit(GLF_LOCK, &gl->gl_flags); |
680 | goto out; | 678 | smp_mb__after_clear_bit(); |
679 | return; | ||
681 | } | 680 | } |
682 | 681 | ||
683 | static void delete_work_func(struct work_struct *work) | 682 | static void delete_work_func(struct work_struct *work) |
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work) | |||
707 | { | 706 | { |
708 | unsigned long delay = 0; | 707 | unsigned long delay = 0; |
709 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 708 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
709 | int drop_ref = 0; | ||
710 | 710 | ||
711 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) | 711 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { |
712 | finish_xmote(gl, gl->gl_reply); | 712 | finish_xmote(gl, gl->gl_reply); |
713 | drop_ref = 1; | ||
714 | } | ||
713 | down_read(&gfs2_umount_flush_sem); | 715 | down_read(&gfs2_umount_flush_sem); |
714 | spin_lock(&gl->gl_spin); | 716 | spin_lock(&gl->gl_spin); |
715 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && | 717 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && |
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work) | |||
727 | if (!delay || | 729 | if (!delay || |
728 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 730 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
729 | gfs2_glock_put(gl); | 731 | gfs2_glock_put(gl); |
732 | if (drop_ref) | ||
733 | gfs2_glock_put(gl); | ||
730 | } | 734 | } |
731 | 735 | ||
732 | /** | 736 | /** |
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1361 | list_del_init(&gl->gl_lru); | 1365 | list_del_init(&gl->gl_lru); |
1362 | atomic_dec(&lru_count); | 1366 | atomic_dec(&lru_count); |
1363 | 1367 | ||
1364 | /* Check if glock is about to be freed */ | ||
1365 | if (atomic_read(&gl->gl_ref) == 0) | ||
1366 | continue; | ||
1367 | |||
1368 | /* Test for being demotable */ | 1368 | /* Test for being demotable */ |
1369 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1369 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
1370 | gfs2_glock_hold(gl); | 1370 | gfs2_glock_hold(gl); |
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1375 | handle_callback(gl, LM_ST_UNLOCKED, 0); | 1375 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1376 | nr--; | 1376 | nr--; |
1377 | } | 1377 | } |
1378 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1379 | smp_mb__after_clear_bit(); | ||
1378 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1380 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1379 | gfs2_glock_put_nolock(gl); | 1381 | gfs2_glock_put_nolock(gl); |
1380 | spin_unlock(&gl->gl_spin); | 1382 | spin_unlock(&gl->gl_spin); |
1381 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1382 | spin_lock(&lru_lock); | 1383 | spin_lock(&lru_lock); |
1383 | continue; | 1384 | continue; |
1384 | } | 1385 | } |
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index c609894ec0d0..13f0bd228132 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h | |||
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) | |||
180 | return gl->gl_state == LM_ST_SHARED; | 180 | return gl->gl_state == LM_ST_SHARED; |
181 | } | 181 | } |
182 | 182 | ||
183 | static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl) | ||
184 | { | ||
185 | int ret; | ||
186 | spin_lock(&gl->gl_spin); | ||
187 | ret = test_bit(GLF_DEMOTE, &gl->gl_flags); | ||
188 | spin_unlock(&gl->gl_spin); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | int gfs2_glock_get(struct gfs2_sbd *sdp, | 183 | int gfs2_glock_get(struct gfs2_sbd *sdp, |
193 | u64 number, const struct gfs2_glock_operations *glops, | 184 | u64 number, const struct gfs2_glock_operations *glops, |
194 | int create, struct gfs2_glock **glp); | 185 | int create, struct gfs2_glock **glp); |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 6985eef06c39..78554acc0605 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/buffer_head.h> | 13 | #include <linux/buffer_head.h> |
14 | #include <linux/gfs2_ondisk.h> | 14 | #include <linux/gfs2_ondisk.h> |
15 | #include <linux/bio.h> | 15 | #include <linux/bio.h> |
16 | #include <linux/posix_acl.h> | ||
16 | 17 | ||
17 | #include "gfs2.h" | 18 | #include "gfs2.h" |
18 | #include "incore.h" | 19 | #include "incore.h" |
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) | |||
184 | if (flags & DIO_METADATA) { | 185 | if (flags & DIO_METADATA) { |
185 | struct address_space *mapping = gl->gl_aspace->i_mapping; | 186 | struct address_space *mapping = gl->gl_aspace->i_mapping; |
186 | truncate_inode_pages(mapping, 0); | 187 | truncate_inode_pages(mapping, 0); |
187 | if (ip) | 188 | if (ip) { |
188 | set_bit(GIF_INVALID, &ip->i_flags); | 189 | set_bit(GIF_INVALID, &ip->i_flags); |
190 | forget_all_cached_acls(&ip->i_inode); | ||
191 | } | ||
189 | } | 192 | } |
190 | 193 | ||
191 | if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) | 194 | if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 6edb423f90b3..4792200978c8 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -429,7 +429,11 @@ struct gfs2_args { | |||
429 | unsigned int ar_meta:1; /* mount metafs */ | 429 | unsigned int ar_meta:1; /* mount metafs */ |
430 | unsigned int ar_discard:1; /* discard requests */ | 430 | unsigned int ar_discard:1; /* discard requests */ |
431 | unsigned int ar_errors:2; /* errors=withdraw | panic */ | 431 | unsigned int ar_errors:2; /* errors=withdraw | panic */ |
432 | unsigned int ar_nobarrier:1; /* do not send barriers */ | ||
432 | int ar_commit; /* Commit interval */ | 433 | int ar_commit; /* Commit interval */ |
434 | int ar_statfs_quantum; /* The fast statfs interval */ | ||
435 | int ar_quota_quantum; /* The quota interval */ | ||
436 | int ar_statfs_percent; /* The % change to force sync */ | ||
433 | }; | 437 | }; |
434 | 438 | ||
435 | struct gfs2_tune { | 439 | struct gfs2_tune { |
@@ -558,6 +562,7 @@ struct gfs2_sbd { | |||
558 | spinlock_t sd_statfs_spin; | 562 | spinlock_t sd_statfs_spin; |
559 | struct gfs2_statfs_change_host sd_statfs_master; | 563 | struct gfs2_statfs_change_host sd_statfs_master; |
560 | struct gfs2_statfs_change_host sd_statfs_local; | 564 | struct gfs2_statfs_change_host sd_statfs_local; |
565 | int sd_statfs_force_sync; | ||
561 | 566 | ||
562 | /* Resource group stuff */ | 567 | /* Resource group stuff */ |
563 | 568 | ||
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index fb15d3b1f409..26ba2a4c4a2d 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, | |||
871 | if (error) | 871 | if (error) |
872 | goto fail_gunlock2; | 872 | goto fail_gunlock2; |
873 | 873 | ||
874 | error = gfs2_acl_create(dip, GFS2_I(inode)); | 874 | error = gfs2_acl_create(dip, inode); |
875 | if (error) | 875 | if (error) |
876 | goto fail_gunlock2; | 876 | goto fail_gunlock2; |
877 | 877 | ||
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) | |||
947 | 947 | ||
948 | str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 948 | str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
949 | str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); | 949 | str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); |
950 | str->di_header.__pad0 = 0; | ||
951 | str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); | 950 | str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); |
952 | str->di_header.__pad1 = 0; | ||
953 | str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); | 951 | str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); |
954 | str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); | 952 | str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); |
955 | str->di_mode = cpu_to_be32(ip->i_inode.i_mode); | 953 | str->di_mode = cpu_to_be32(ip->i_inode.i_mode); |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 13c6237c5f67..4511b08fc451 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
596 | memset(lh, 0, sizeof(struct gfs2_log_header)); | 596 | memset(lh, 0, sizeof(struct gfs2_log_header)); |
597 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 597 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
598 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | 598 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); |
599 | lh->lh_header.__pad0 = cpu_to_be64(0); | ||
599 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | 600 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); |
601 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | ||
600 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); | 602 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); |
601 | lh->lh_flags = cpu_to_be32(flags); | 603 | lh->lh_flags = cpu_to_be32(flags); |
602 | lh->lh_tail = cpu_to_be32(tail); | 604 | lh->lh_tail = cpu_to_be32(tail); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 9969ff062c5b..de97632ba32f 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) | |||
132 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | 132 | static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) |
133 | { | 133 | { |
134 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); | 134 | struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); |
135 | struct gfs2_meta_header *mh; | ||
135 | struct gfs2_trans *tr; | 136 | struct gfs2_trans *tr; |
136 | 137 | ||
137 | lock_buffer(bd->bd_bh); | 138 | lock_buffer(bd->bd_bh); |
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | |||
148 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | 149 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
149 | gfs2_meta_check(sdp, bd->bd_bh); | 150 | gfs2_meta_check(sdp, bd->bd_bh); |
150 | gfs2_pin(sdp, bd->bd_bh); | 151 | gfs2_pin(sdp, bd->bd_bh); |
152 | mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; | ||
153 | mh->__pad0 = cpu_to_be64(0); | ||
154 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | ||
151 | sdp->sd_log_num_buf++; | 155 | sdp->sd_log_num_buf++; |
152 | list_add(&le->le_list, &sdp->sd_log_le_buf); | 156 | list_add(&le->le_list, &sdp->sd_log_le_buf); |
153 | tr->tr_num_buf_new++; | 157 | tr->tr_num_buf_new++; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 52fb6c048981..edfee24f3636 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
19 | #include <linux/gfs2_ondisk.h> | 19 | #include <linux/gfs2_ondisk.h> |
20 | #include <linux/slow-work.h> | 20 | #include <linux/slow-work.h> |
21 | #include <linux/quotaops.h> | ||
21 | 22 | ||
22 | #include "gfs2.h" | 23 | #include "gfs2.h" |
23 | #include "incore.h" | 24 | #include "incore.h" |
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt) | |||
62 | gt->gt_quota_warn_period = 10; | 63 | gt->gt_quota_warn_period = 10; |
63 | gt->gt_quota_scale_num = 1; | 64 | gt->gt_quota_scale_num = 1; |
64 | gt->gt_quota_scale_den = 1; | 65 | gt->gt_quota_scale_den = 1; |
65 | gt->gt_quota_quantum = 60; | ||
66 | gt->gt_new_files_jdata = 0; | 66 | gt->gt_new_files_jdata = 0; |
67 | gt->gt_max_readahead = 1 << 18; | 67 | gt->gt_max_readahead = 1 << 18; |
68 | gt->gt_stall_secs = 600; | 68 | gt->gt_stall_secs = 600; |
69 | gt->gt_complain_secs = 10; | 69 | gt->gt_complain_secs = 10; |
70 | gt->gt_statfs_quantum = 30; | ||
71 | gt->gt_statfs_slow = 0; | ||
72 | } | 70 | } |
73 | 71 | ||
74 | static struct gfs2_sbd *init_sbd(struct super_block *sb) | 72 | static struct gfs2_sbd *init_sbd(struct super_block *sb) |
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp) | |||
1114 | * Returns: errno | 1112 | * Returns: errno |
1115 | */ | 1113 | */ |
1116 | 1114 | ||
1117 | static int fill_super(struct super_block *sb, void *data, int silent) | 1115 | static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent) |
1118 | { | 1116 | { |
1119 | struct gfs2_sbd *sdp; | 1117 | struct gfs2_sbd *sdp; |
1120 | struct gfs2_holder mount_gh; | 1118 | struct gfs2_holder mount_gh; |
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent) | |||
1125 | printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); | 1123 | printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); |
1126 | return -ENOMEM; | 1124 | return -ENOMEM; |
1127 | } | 1125 | } |
1128 | 1126 | sdp->sd_args = *args; | |
1129 | sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT; | ||
1130 | sdp->sd_args.ar_data = GFS2_DATA_DEFAULT; | ||
1131 | sdp->sd_args.ar_commit = 60; | ||
1132 | sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT; | ||
1133 | |||
1134 | error = gfs2_mount_args(sdp, &sdp->sd_args, data); | ||
1135 | if (error) { | ||
1136 | printk(KERN_WARNING "GFS2: can't parse mount arguments\n"); | ||
1137 | goto fail; | ||
1138 | } | ||
1139 | 1127 | ||
1140 | if (sdp->sd_args.ar_spectator) { | 1128 | if (sdp->sd_args.ar_spectator) { |
1141 | sb->s_flags |= MS_RDONLY; | 1129 | sb->s_flags |= MS_RDONLY; |
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent) | |||
1143 | } | 1131 | } |
1144 | if (sdp->sd_args.ar_posix_acl) | 1132 | if (sdp->sd_args.ar_posix_acl) |
1145 | sb->s_flags |= MS_POSIXACL; | 1133 | sb->s_flags |= MS_POSIXACL; |
1134 | if (sdp->sd_args.ar_nobarrier) | ||
1135 | set_bit(SDF_NOBARRIERS, &sdp->sd_flags); | ||
1146 | 1136 | ||
1147 | sb->s_magic = GFS2_MAGIC; | 1137 | sb->s_magic = GFS2_MAGIC; |
1148 | sb->s_op = &gfs2_super_ops; | 1138 | sb->s_op = &gfs2_super_ops; |
1149 | sb->s_export_op = &gfs2_export_ops; | 1139 | sb->s_export_op = &gfs2_export_ops; |
1150 | sb->s_xattr = gfs2_xattr_handlers; | 1140 | sb->s_xattr = gfs2_xattr_handlers; |
1141 | sb->s_qcop = &gfs2_quotactl_ops; | ||
1142 | sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; | ||
1151 | sb->s_time_gran = 1; | 1143 | sb->s_time_gran = 1; |
1152 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 1144 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
1153 | 1145 | ||
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent) | |||
1160 | sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; | 1152 | sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; |
1161 | 1153 | ||
1162 | sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; | 1154 | sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; |
1155 | sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; | ||
1156 | if (sdp->sd_args.ar_statfs_quantum) { | ||
1157 | sdp->sd_tune.gt_statfs_slow = 0; | ||
1158 | sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum; | ||
1159 | } | ||
1160 | else { | ||
1161 | sdp->sd_tune.gt_statfs_slow = 1; | ||
1162 | sdp->sd_tune.gt_statfs_quantum = 30; | ||
1163 | } | ||
1163 | 1164 | ||
1164 | error = init_names(sdp, silent); | 1165 | error = init_names(sdp, silent); |
1165 | if (error) | 1166 | if (error) |
@@ -1243,18 +1244,127 @@ fail: | |||
1243 | return error; | 1244 | return error; |
1244 | } | 1245 | } |
1245 | 1246 | ||
1246 | static int gfs2_get_sb(struct file_system_type *fs_type, int flags, | 1247 | static int set_gfs2_super(struct super_block *s, void *data) |
1247 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
1248 | { | 1248 | { |
1249 | return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); | 1249 | s->s_bdev = data; |
1250 | s->s_dev = s->s_bdev->bd_dev; | ||
1251 | |||
1252 | /* | ||
1253 | * We set the bdi here to the queue backing, file systems can | ||
1254 | * overwrite this in ->fill_super() | ||
1255 | */ | ||
1256 | s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; | ||
1257 | return 0; | ||
1250 | } | 1258 | } |
1251 | 1259 | ||
1252 | static int test_meta_super(struct super_block *s, void *ptr) | 1260 | static int test_gfs2_super(struct super_block *s, void *ptr) |
1253 | { | 1261 | { |
1254 | struct block_device *bdev = ptr; | 1262 | struct block_device *bdev = ptr; |
1255 | return (bdev == s->s_bdev); | 1263 | return (bdev == s->s_bdev); |
1256 | } | 1264 | } |
1257 | 1265 | ||
1266 | /** | ||
1267 | * gfs2_get_sb - Get the GFS2 superblock | ||
1268 | * @fs_type: The GFS2 filesystem type | ||
1269 | * @flags: Mount flags | ||
1270 | * @dev_name: The name of the device | ||
1271 | * @data: The mount arguments | ||
1272 | * @mnt: The vfsmnt for this mount | ||
1273 | * | ||
1274 | * Q. Why not use get_sb_bdev() ? | ||
1275 | * A. We need to select one of two root directories to mount, independent | ||
1276 | * of whether this is the initial, or subsequent, mount of this sb | ||
1277 | * | ||
1278 | * Returns: 0 or -ve on error | ||
1279 | */ | ||
1280 | |||
1281 | static int gfs2_get_sb(struct file_system_type *fs_type, int flags, | ||
1282 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
1283 | { | ||
1284 | struct block_device *bdev; | ||
1285 | struct super_block *s; | ||
1286 | fmode_t mode = FMODE_READ; | ||
1287 | int error; | ||
1288 | struct gfs2_args args; | ||
1289 | struct gfs2_sbd *sdp; | ||
1290 | |||
1291 | if (!(flags & MS_RDONLY)) | ||
1292 | mode |= FMODE_WRITE; | ||
1293 | |||
1294 | bdev = open_bdev_exclusive(dev_name, mode, fs_type); | ||
1295 | if (IS_ERR(bdev)) | ||
1296 | return PTR_ERR(bdev); | ||
1297 | |||
1298 | /* | ||
1299 | * once the super is inserted into the list by sget, s_umount | ||
1300 | * will protect the lockfs code from trying to start a snapshot | ||
1301 | * while we are mounting | ||
1302 | */ | ||
1303 | mutex_lock(&bdev->bd_fsfreeze_mutex); | ||
1304 | if (bdev->bd_fsfreeze_count > 0) { | ||
1305 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
1306 | error = -EBUSY; | ||
1307 | goto error_bdev; | ||
1308 | } | ||
1309 | s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev); | ||
1310 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
1311 | error = PTR_ERR(s); | ||
1312 | if (IS_ERR(s)) | ||
1313 | goto error_bdev; | ||
1314 | |||
1315 | memset(&args, 0, sizeof(args)); | ||
1316 | args.ar_quota = GFS2_QUOTA_DEFAULT; | ||
1317 | args.ar_data = GFS2_DATA_DEFAULT; | ||
1318 | args.ar_commit = 60; | ||
1319 | args.ar_statfs_quantum = 30; | ||
1320 | args.ar_quota_quantum = 60; | ||
1321 | args.ar_errors = GFS2_ERRORS_DEFAULT; | ||
1322 | |||
1323 | error = gfs2_mount_args(&args, data); | ||
1324 | if (error) { | ||
1325 | printk(KERN_WARNING "GFS2: can't parse mount arguments\n"); | ||
1326 | if (s->s_root) | ||
1327 | goto error_super; | ||
1328 | deactivate_locked_super(s); | ||
1329 | return error; | ||
1330 | } | ||
1331 | |||
1332 | if (s->s_root) { | ||
1333 | error = -EBUSY; | ||
1334 | if ((flags ^ s->s_flags) & MS_RDONLY) | ||
1335 | goto error_super; | ||
1336 | close_bdev_exclusive(bdev, mode); | ||
1337 | } else { | ||
1338 | char b[BDEVNAME_SIZE]; | ||
1339 | |||
1340 | s->s_flags = flags; | ||
1341 | s->s_mode = mode; | ||
1342 | strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); | ||
1343 | sb_set_blocksize(s, block_size(bdev)); | ||
1344 | error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0); | ||
1345 | if (error) { | ||
1346 | deactivate_locked_super(s); | ||
1347 | return error; | ||
1348 | } | ||
1349 | s->s_flags |= MS_ACTIVE; | ||
1350 | bdev->bd_super = s; | ||
1351 | } | ||
1352 | |||
1353 | sdp = s->s_fs_info; | ||
1354 | mnt->mnt_sb = s; | ||
1355 | if (args.ar_meta) | ||
1356 | mnt->mnt_root = dget(sdp->sd_master_dir); | ||
1357 | else | ||
1358 | mnt->mnt_root = dget(sdp->sd_root_dir); | ||
1359 | return 0; | ||
1360 | |||
1361 | error_super: | ||
1362 | deactivate_locked_super(s); | ||
1363 | error_bdev: | ||
1364 | close_bdev_exclusive(bdev, mode); | ||
1365 | return error; | ||
1366 | } | ||
1367 | |||
1258 | static int set_meta_super(struct super_block *s, void *ptr) | 1368 | static int set_meta_super(struct super_block *s, void *ptr) |
1259 | { | 1369 | { |
1260 | return -EINVAL; | 1370 | return -EINVAL; |
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, | |||
1274 | dev_name, error); | 1384 | dev_name, error); |
1275 | return error; | 1385 | return error; |
1276 | } | 1386 | } |
1277 | s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, | 1387 | s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, |
1278 | path.dentry->d_inode->i_sb->s_bdev); | 1388 | path.dentry->d_inode->i_sb->s_bdev); |
1279 | path_put(&path); | 1389 | path_put(&path); |
1280 | if (IS_ERR(s)) { | 1390 | if (IS_ERR(s)) { |
1281 | printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); | 1391 | printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); |
1282 | return PTR_ERR(s); | 1392 | return PTR_ERR(s); |
1283 | } | 1393 | } |
1394 | if ((flags ^ s->s_flags) & MS_RDONLY) { | ||
1395 | deactivate_locked_super(s); | ||
1396 | return -EBUSY; | ||
1397 | } | ||
1284 | sdp = s->s_fs_info; | 1398 | sdp = s->s_fs_info; |
1285 | mnt->mnt_sb = s; | 1399 | mnt->mnt_sb = s; |
1286 | mnt->mnt_root = dget(sdp->sd_master_dir); | 1400 | mnt->mnt_root = dget(sdp->sd_master_dir); |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 2e9b9326bfc9..e3bf6eab8750 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * fuzziness in the current usage value of IDs that are being used on different | 15 | * fuzziness in the current usage value of IDs that are being used on different |
16 | * nodes in the cluster simultaneously. So, it is possible for a user on | 16 | * nodes in the cluster simultaneously. So, it is possible for a user on |
17 | * multiple nodes to overrun their quota, but that overrun is controlable. | 17 | * multiple nodes to overrun their quota, but that overrun is controlable. |
18 | * Since quota tags are part of transactions, there is no need to a quota check | 18 | * Since quota tags are part of transactions, there is no need for a quota check |
19 | * program to be run on node crashes or anything like that. | 19 | * program to be run on node crashes or anything like that. |
20 | * | 20 | * |
21 | * There are couple of knobs that let the administrator manage the quota | 21 | * There are couple of knobs that let the administrator manage the quota |
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/gfs2_ondisk.h> | 47 | #include <linux/gfs2_ondisk.h> |
48 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
49 | #include <linux/freezer.h> | 49 | #include <linux/freezer.h> |
50 | #include <linux/quota.h> | ||
51 | #include <linux/dqblk_xfs.h> | ||
50 | 52 | ||
51 | #include "gfs2.h" | 53 | #include "gfs2.h" |
52 | #include "incore.h" | 54 | #include "incore.h" |
@@ -65,13 +67,6 @@ | |||
65 | #define QUOTA_USER 1 | 67 | #define QUOTA_USER 1 |
66 | #define QUOTA_GROUP 0 | 68 | #define QUOTA_GROUP 0 |
67 | 69 | ||
68 | struct gfs2_quota_host { | ||
69 | u64 qu_limit; | ||
70 | u64 qu_warn; | ||
71 | s64 qu_value; | ||
72 | u32 qu_ll_next; | ||
73 | }; | ||
74 | |||
75 | struct gfs2_quota_change_host { | 70 | struct gfs2_quota_change_host { |
76 | u64 qc_change; | 71 | u64 qc_change; |
77 | u32 qc_flags; /* GFS2_QCF_... */ | 72 | u32 qc_flags; /* GFS2_QCF_... */ |
@@ -164,7 +159,7 @@ fail: | |||
164 | return error; | 159 | return error; |
165 | } | 160 | } |
166 | 161 | ||
167 | static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | 162 | static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, |
168 | struct gfs2_quota_data **qdp) | 163 | struct gfs2_quota_data **qdp) |
169 | { | 164 | { |
170 | struct gfs2_quota_data *qd = NULL, *new_qd = NULL; | 165 | struct gfs2_quota_data *qd = NULL, *new_qd = NULL; |
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
202 | 197 | ||
203 | spin_unlock(&qd_lru_lock); | 198 | spin_unlock(&qd_lru_lock); |
204 | 199 | ||
205 | if (qd || !create) { | 200 | if (qd) { |
206 | if (new_qd) { | 201 | if (new_qd) { |
207 | gfs2_glock_put(new_qd->qd_gl); | 202 | gfs2_glock_put(new_qd->qd_gl); |
208 | kmem_cache_free(gfs2_quotad_cachep, new_qd); | 203 | kmem_cache_free(gfs2_quotad_cachep, new_qd); |
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd) | |||
461 | qd_put(qd); | 456 | qd_put(qd); |
462 | } | 457 | } |
463 | 458 | ||
464 | static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | 459 | static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, |
465 | struct gfs2_quota_data **qdp) | 460 | struct gfs2_quota_data **qdp) |
466 | { | 461 | { |
467 | int error; | 462 | int error; |
468 | 463 | ||
469 | error = qd_get(sdp, user, id, create, qdp); | 464 | error = qd_get(sdp, user, id, qdp); |
470 | if (error) | 465 | if (error) |
471 | return error; | 466 | return error; |
472 | 467 | ||
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
508 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | 503 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) |
509 | return 0; | 504 | return 0; |
510 | 505 | ||
511 | error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); | 506 | error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd); |
512 | if (error) | 507 | if (error) |
513 | goto out; | 508 | goto out; |
514 | al->al_qd_num++; | 509 | al->al_qd_num++; |
515 | qd++; | 510 | qd++; |
516 | 511 | ||
517 | error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); | 512 | error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd); |
518 | if (error) | 513 | if (error) |
519 | goto out; | 514 | goto out; |
520 | al->al_qd_num++; | 515 | al->al_qd_num++; |
521 | qd++; | 516 | qd++; |
522 | 517 | ||
523 | if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { | 518 | if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { |
524 | error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); | 519 | error = qdsb_get(sdp, QUOTA_USER, uid, qd); |
525 | if (error) | 520 | if (error) |
526 | goto out; | 521 | goto out; |
527 | al->al_qd_num++; | 522 | al->al_qd_num++; |
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
529 | } | 524 | } |
530 | 525 | ||
531 | if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { | 526 | if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { |
532 | error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); | 527 | error = qdsb_get(sdp, QUOTA_GROUP, gid, qd); |
533 | if (error) | 528 | if (error) |
534 | goto out; | 529 | goto out; |
535 | al->al_qd_num++; | 530 | al->al_qd_num++; |
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
617 | mutex_unlock(&sdp->sd_quota_mutex); | 612 | mutex_unlock(&sdp->sd_quota_mutex); |
618 | } | 613 | } |
619 | 614 | ||
620 | static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf) | ||
621 | { | ||
622 | const struct gfs2_quota *str = buf; | ||
623 | |||
624 | qu->qu_limit = be64_to_cpu(str->qu_limit); | ||
625 | qu->qu_warn = be64_to_cpu(str->qu_warn); | ||
626 | qu->qu_value = be64_to_cpu(str->qu_value); | ||
627 | qu->qu_ll_next = be32_to_cpu(str->qu_ll_next); | ||
628 | } | ||
629 | |||
630 | static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf) | ||
631 | { | ||
632 | struct gfs2_quota *str = buf; | ||
633 | |||
634 | str->qu_limit = cpu_to_be64(qu->qu_limit); | ||
635 | str->qu_warn = cpu_to_be64(qu->qu_warn); | ||
636 | str->qu_value = cpu_to_be64(qu->qu_value); | ||
637 | str->qu_ll_next = cpu_to_be32(qu->qu_ll_next); | ||
638 | memset(&str->qu_reserved, 0, sizeof(str->qu_reserved)); | ||
639 | } | ||
640 | |||
641 | /** | 615 | /** |
642 | * gfs2_adjust_quota | 616 | * gfs2_adjust_quota - adjust record of current block usage |
617 | * @ip: The quota inode | ||
618 | * @loc: Offset of the entry in the quota file | ||
619 | * @change: The amount of usage change to record | ||
620 | * @qd: The quota data | ||
621 | * @fdq: The updated limits to record | ||
643 | * | 622 | * |
644 | * This function was mostly borrowed from gfs2_block_truncate_page which was | 623 | * This function was mostly borrowed from gfs2_block_truncate_page which was |
645 | * in turn mostly borrowed from ext3 | 624 | * in turn mostly borrowed from ext3 |
625 | * | ||
626 | * Returns: 0 or -ve on error | ||
646 | */ | 627 | */ |
628 | |||
647 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 629 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
648 | s64 change, struct gfs2_quota_data *qd) | 630 | s64 change, struct gfs2_quota_data *qd, |
631 | struct fs_disk_quota *fdq) | ||
649 | { | 632 | { |
650 | struct inode *inode = &ip->i_inode; | 633 | struct inode *inode = &ip->i_inode; |
651 | struct address_space *mapping = inode->i_mapping; | 634 | struct address_space *mapping = inode->i_mapping; |
652 | unsigned long index = loc >> PAGE_CACHE_SHIFT; | 635 | unsigned long index = loc >> PAGE_CACHE_SHIFT; |
653 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); | 636 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); |
654 | unsigned blocksize, iblock, pos; | 637 | unsigned blocksize, iblock, pos; |
655 | struct buffer_head *bh; | 638 | struct buffer_head *bh, *dibh; |
656 | struct page *page; | 639 | struct page *page; |
657 | void *kaddr; | 640 | void *kaddr; |
658 | char *ptr; | 641 | struct gfs2_quota *qp; |
659 | struct gfs2_quota_host qp; | ||
660 | s64 value; | 642 | s64 value; |
661 | int err = -EIO; | 643 | int err = -EIO; |
644 | u64 size; | ||
662 | 645 | ||
663 | if (gfs2_is_stuffed(ip)) | 646 | if (gfs2_is_stuffed(ip)) |
664 | gfs2_unstuff_dinode(ip, NULL); | 647 | gfs2_unstuff_dinode(ip, NULL); |
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
700 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 683 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
701 | 684 | ||
702 | kaddr = kmap_atomic(page, KM_USER0); | 685 | kaddr = kmap_atomic(page, KM_USER0); |
703 | ptr = kaddr + offset; | 686 | qp = kaddr + offset; |
704 | gfs2_quota_in(&qp, ptr); | 687 | value = (s64)be64_to_cpu(qp->qu_value) + change; |
705 | qp.qu_value += change; | 688 | qp->qu_value = cpu_to_be64(value); |
706 | value = qp.qu_value; | 689 | qd->qd_qb.qb_value = qp->qu_value; |
707 | gfs2_quota_out(&qp, ptr); | 690 | if (fdq) { |
691 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
692 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
693 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
694 | } | ||
695 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
696 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
697 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
698 | } | ||
699 | } | ||
708 | flush_dcache_page(page); | 700 | flush_dcache_page(page); |
709 | kunmap_atomic(kaddr, KM_USER0); | 701 | kunmap_atomic(kaddr, KM_USER0); |
710 | err = 0; | 702 | |
711 | qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); | 703 | err = gfs2_meta_inode_buffer(ip, &dibh); |
712 | qd->qd_qb.qb_value = cpu_to_be64(value); | 704 | if (err) |
713 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); | 705 | goto unlock; |
714 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); | 706 | |
707 | size = loc + sizeof(struct gfs2_quota); | ||
708 | if (size > inode->i_size) { | ||
709 | ip->i_disksize = size; | ||
710 | i_size_write(inode, size); | ||
711 | } | ||
712 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | ||
713 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
714 | gfs2_dinode_out(ip, dibh->b_data); | ||
715 | brelse(dibh); | ||
716 | mark_inode_dirty(inode); | ||
717 | |||
715 | unlock: | 718 | unlock: |
716 | unlock_page(page); | 719 | unlock_page(page); |
717 | page_cache_release(page); | 720 | page_cache_release(page); |
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
739 | return -ENOMEM; | 742 | return -ENOMEM; |
740 | 743 | ||
741 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); | 744 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); |
745 | mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); | ||
742 | for (qx = 0; qx < num_qd; qx++) { | 746 | for (qx = 0; qx < num_qd; qx++) { |
743 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, | 747 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, |
744 | LM_ST_EXCLUSIVE, | ||
745 | GL_NOCACHE, &ghs[qx]); | 748 | GL_NOCACHE, &ghs[qx]); |
746 | if (error) | 749 | if (error) |
747 | goto out; | 750 | goto out; |
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
795 | for (x = 0; x < num_qd; x++) { | 798 | for (x = 0; x < num_qd; x++) { |
796 | qd = qda[x]; | 799 | qd = qda[x]; |
797 | offset = qd2offset(qd); | 800 | offset = qd2offset(qd); |
798 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, | 801 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); |
799 | (struct gfs2_quota_data *) | ||
800 | qd); | ||
801 | if (error) | 802 | if (error) |
802 | goto out_end_trans; | 803 | goto out_end_trans; |
803 | 804 | ||
@@ -817,21 +818,44 @@ out_gunlock: | |||
817 | out: | 818 | out: |
818 | while (qx--) | 819 | while (qx--) |
819 | gfs2_glock_dq_uninit(&ghs[qx]); | 820 | gfs2_glock_dq_uninit(&ghs[qx]); |
821 | mutex_unlock(&ip->i_inode.i_mutex); | ||
820 | kfree(ghs); | 822 | kfree(ghs); |
821 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); | 823 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); |
822 | return error; | 824 | return error; |
823 | } | 825 | } |
824 | 826 | ||
827 | static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) | ||
828 | { | ||
829 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
830 | struct gfs2_quota q; | ||
831 | struct gfs2_quota_lvb *qlvb; | ||
832 | loff_t pos; | ||
833 | int error; | ||
834 | |||
835 | memset(&q, 0, sizeof(struct gfs2_quota)); | ||
836 | pos = qd2offset(qd); | ||
837 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | ||
838 | if (error < 0) | ||
839 | return error; | ||
840 | |||
841 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
842 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
843 | qlvb->__pad = 0; | ||
844 | qlvb->qb_limit = q.qu_limit; | ||
845 | qlvb->qb_warn = q.qu_warn; | ||
846 | qlvb->qb_value = q.qu_value; | ||
847 | qd->qd_qb = *qlvb; | ||
848 | |||
849 | return 0; | ||
850 | } | ||
851 | |||
825 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, | 852 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, |
826 | struct gfs2_holder *q_gh) | 853 | struct gfs2_holder *q_gh) |
827 | { | 854 | { |
828 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 855 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
829 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 856 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
830 | struct gfs2_holder i_gh; | 857 | struct gfs2_holder i_gh; |
831 | struct gfs2_quota_host q; | ||
832 | char buf[sizeof(struct gfs2_quota)]; | ||
833 | int error; | 858 | int error; |
834 | struct gfs2_quota_lvb *qlvb; | ||
835 | 859 | ||
836 | restart: | 860 | restart: |
837 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); | 861 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); |
@@ -841,11 +865,9 @@ restart: | |||
841 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 865 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
842 | 866 | ||
843 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { | 867 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { |
844 | loff_t pos; | ||
845 | gfs2_glock_dq_uninit(q_gh); | 868 | gfs2_glock_dq_uninit(q_gh); |
846 | error = gfs2_glock_nq_init(qd->qd_gl, | 869 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, |
847 | LM_ST_EXCLUSIVE, GL_NOCACHE, | 870 | GL_NOCACHE, q_gh); |
848 | q_gh); | ||
849 | if (error) | 871 | if (error) |
850 | return error; | 872 | return error; |
851 | 873 | ||
@@ -853,29 +875,14 @@ restart: | |||
853 | if (error) | 875 | if (error) |
854 | goto fail; | 876 | goto fail; |
855 | 877 | ||
856 | memset(buf, 0, sizeof(struct gfs2_quota)); | 878 | error = update_qd(sdp, qd); |
857 | pos = qd2offset(qd); | 879 | if (error) |
858 | error = gfs2_internal_read(ip, NULL, buf, &pos, | ||
859 | sizeof(struct gfs2_quota)); | ||
860 | if (error < 0) | ||
861 | goto fail_gunlock; | 880 | goto fail_gunlock; |
862 | 881 | ||
863 | gfs2_glock_dq_uninit(&i_gh); | 882 | gfs2_glock_dq_uninit(&i_gh); |
864 | 883 | gfs2_glock_dq_uninit(q_gh); | |
865 | gfs2_quota_in(&q, buf); | 884 | force_refresh = 0; |
866 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 885 | goto restart; |
867 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
868 | qlvb->__pad = 0; | ||
869 | qlvb->qb_limit = cpu_to_be64(q.qu_limit); | ||
870 | qlvb->qb_warn = cpu_to_be64(q.qu_warn); | ||
871 | qlvb->qb_value = cpu_to_be64(q.qu_value); | ||
872 | qd->qd_qb = *qlvb; | ||
873 | |||
874 | if (gfs2_glock_is_blocking(qd->qd_gl)) { | ||
875 | gfs2_glock_dq_uninit(q_gh); | ||
876 | force_refresh = 0; | ||
877 | goto restart; | ||
878 | } | ||
879 | } | 886 | } |
880 | 887 | ||
881 | return 0; | 888 | return 0; |
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type) | |||
995 | { | 1002 | { |
996 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 1003 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
997 | 1004 | ||
998 | printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", | 1005 | printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n", |
999 | sdp->sd_fsname, type, | 1006 | sdp->sd_fsname, type, |
1000 | (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", | 1007 | (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", |
1001 | qd->qd_id); | 1008 | qd->qd_id); |
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
1032 | 1039 | ||
1033 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { | 1040 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { |
1034 | print_message(qd, "exceeded"); | 1041 | print_message(qd, "exceeded"); |
1042 | quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? | ||
1043 | USRQUOTA : GRPQUOTA, qd->qd_id, | ||
1044 | sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); | ||
1045 | |||
1035 | error = -EDQUOT; | 1046 | error = -EDQUOT; |
1036 | break; | 1047 | break; |
1037 | } else if (be64_to_cpu(qd->qd_qb.qb_warn) && | 1048 | } else if (be64_to_cpu(qd->qd_qb.qb_warn) && |
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
1039 | time_after_eq(jiffies, qd->qd_last_warn + | 1050 | time_after_eq(jiffies, qd->qd_last_warn + |
1040 | gfs2_tune_get(sdp, | 1051 | gfs2_tune_get(sdp, |
1041 | gt_quota_warn_period) * HZ)) { | 1052 | gt_quota_warn_period) * HZ)) { |
1053 | quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? | ||
1054 | USRQUOTA : GRPQUOTA, qd->qd_id, | ||
1055 | sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); | ||
1042 | error = print_message(qd, "warning"); | 1056 | error = print_message(qd, "warning"); |
1043 | qd->qd_last_warn = jiffies; | 1057 | qd->qd_last_warn = jiffies; |
1044 | } | 1058 | } |
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, | |||
1069 | } | 1083 | } |
1070 | } | 1084 | } |
1071 | 1085 | ||
1072 | int gfs2_quota_sync(struct gfs2_sbd *sdp) | 1086 | int gfs2_quota_sync(struct super_block *sb, int type) |
1073 | { | 1087 | { |
1088 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1074 | struct gfs2_quota_data **qda; | 1089 | struct gfs2_quota_data **qda; |
1075 | unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); | 1090 | unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); |
1076 | unsigned int num_qd; | 1091 | unsigned int num_qd; |
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | |||
1118 | struct gfs2_holder q_gh; | 1133 | struct gfs2_holder q_gh; |
1119 | int error; | 1134 | int error; |
1120 | 1135 | ||
1121 | error = qd_get(sdp, user, id, CREATE, &qd); | 1136 | error = qd_get(sdp, user, id, &qd); |
1122 | if (error) | 1137 | if (error) |
1123 | return error; | 1138 | return error; |
1124 | 1139 | ||
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | |||
1127 | gfs2_glock_dq_uninit(&q_gh); | 1142 | gfs2_glock_dq_uninit(&q_gh); |
1128 | 1143 | ||
1129 | qd_put(qd); | 1144 | qd_put(qd); |
1130 | |||
1131 | return error; | 1145 | return error; |
1132 | } | 1146 | } |
1133 | 1147 | ||
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) | |||
1298 | } | 1312 | } |
1299 | 1313 | ||
1300 | static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, | 1314 | static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, |
1301 | int (*fxn)(struct gfs2_sbd *sdp), | 1315 | int (*fxn)(struct super_block *sb, int type), |
1302 | unsigned long t, unsigned long *timeo, | 1316 | unsigned long t, unsigned long *timeo, |
1303 | unsigned int *new_timeo) | 1317 | unsigned int *new_timeo) |
1304 | { | 1318 | { |
1305 | if (t >= *timeo) { | 1319 | if (t >= *timeo) { |
1306 | int error = fxn(sdp); | 1320 | int error = fxn(sdp->sd_vfs, 0); |
1307 | quotad_error(sdp, msg, error); | 1321 | quotad_error(sdp, msg, error); |
1308 | *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; | 1322 | *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; |
1309 | } else { | 1323 | } else { |
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp) | |||
1330 | } | 1344 | } |
1331 | } | 1345 | } |
1332 | 1346 | ||
1347 | void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { | ||
1348 | if (!sdp->sd_statfs_force_sync) { | ||
1349 | sdp->sd_statfs_force_sync = 1; | ||
1350 | wake_up(&sdp->sd_quota_wait); | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | |||
1333 | /** | 1355 | /** |
1334 | * gfs2_quotad - Write cached quota changes into the quota file | 1356 | * gfs2_quotad - Write cached quota changes into the quota file |
1335 | * @sdp: Pointer to GFS2 superblock | 1357 | * @sdp: Pointer to GFS2 superblock |
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data) | |||
1349 | while (!kthread_should_stop()) { | 1371 | while (!kthread_should_stop()) { |
1350 | 1372 | ||
1351 | /* Update the master statfs file */ | 1373 | /* Update the master statfs file */ |
1352 | quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, | 1374 | if (sdp->sd_statfs_force_sync) { |
1353 | &statfs_timeo, &tune->gt_statfs_quantum); | 1375 | int error = gfs2_statfs_sync(sdp->sd_vfs, 0); |
1376 | quotad_error(sdp, "statfs", error); | ||
1377 | statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; | ||
1378 | } | ||
1379 | else | ||
1380 | quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, | ||
1381 | &statfs_timeo, | ||
1382 | &tune->gt_statfs_quantum); | ||
1354 | 1383 | ||
1355 | /* Update quota file */ | 1384 | /* Update quota file */ |
1356 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1385 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, |
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data) | |||
1367 | spin_lock(&sdp->sd_trunc_lock); | 1396 | spin_lock(&sdp->sd_trunc_lock); |
1368 | empty = list_empty(&sdp->sd_trunc_list); | 1397 | empty = list_empty(&sdp->sd_trunc_list); |
1369 | spin_unlock(&sdp->sd_trunc_lock); | 1398 | spin_unlock(&sdp->sd_trunc_lock); |
1370 | if (empty) | 1399 | if (empty && !sdp->sd_statfs_force_sync) |
1371 | t -= schedule_timeout(t); | 1400 | t -= schedule_timeout(t); |
1372 | else | 1401 | else |
1373 | t = 0; | 1402 | t = 0; |
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data) | |||
1377 | return 0; | 1406 | return 0; |
1378 | } | 1407 | } |
1379 | 1408 | ||
1409 | static int gfs2_quota_get_xstate(struct super_block *sb, | ||
1410 | struct fs_quota_stat *fqs) | ||
1411 | { | ||
1412 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1413 | |||
1414 | memset(fqs, 0, sizeof(struct fs_quota_stat)); | ||
1415 | fqs->qs_version = FS_QSTAT_VERSION; | ||
1416 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON) | ||
1417 | fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); | ||
1418 | else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT) | ||
1419 | fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); | ||
1420 | if (sdp->sd_quota_inode) { | ||
1421 | fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; | ||
1422 | fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; | ||
1423 | } | ||
1424 | fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ | ||
1425 | fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ | ||
1426 | fqs->qs_incoredqs = atomic_read(&qd_lru_count); | ||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id, | ||
1431 | struct fs_disk_quota *fdq) | ||
1432 | { | ||
1433 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1434 | struct gfs2_quota_lvb *qlvb; | ||
1435 | struct gfs2_quota_data *qd; | ||
1436 | struct gfs2_holder q_gh; | ||
1437 | int error; | ||
1438 | |||
1439 | memset(fdq, 0, sizeof(struct fs_disk_quota)); | ||
1440 | |||
1441 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | ||
1442 | return -ESRCH; /* Crazy XFS error code */ | ||
1443 | |||
1444 | if (type == USRQUOTA) | ||
1445 | type = QUOTA_USER; | ||
1446 | else if (type == GRPQUOTA) | ||
1447 | type = QUOTA_GROUP; | ||
1448 | else | ||
1449 | return -EINVAL; | ||
1450 | |||
1451 | error = qd_get(sdp, type, id, &qd); | ||
1452 | if (error) | ||
1453 | return error; | ||
1454 | error = do_glock(qd, FORCE, &q_gh); | ||
1455 | if (error) | ||
1456 | goto out; | ||
1457 | |||
1458 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
1459 | fdq->d_version = FS_DQUOT_VERSION; | ||
1460 | fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA; | ||
1461 | fdq->d_id = id; | ||
1462 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); | ||
1463 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); | ||
1464 | fdq->d_bcount = be64_to_cpu(qlvb->qb_value); | ||
1465 | |||
1466 | gfs2_glock_dq_uninit(&q_gh); | ||
1467 | out: | ||
1468 | qd_put(qd); | ||
1469 | return error; | ||
1470 | } | ||
1471 | |||
1472 | /* GFS2 only supports a subset of the XFS fields */ | ||
1473 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) | ||
1474 | |||
1475 | static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, | ||
1476 | struct fs_disk_quota *fdq) | ||
1477 | { | ||
1478 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1479 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
1480 | struct gfs2_quota_data *qd; | ||
1481 | struct gfs2_holder q_gh, i_gh; | ||
1482 | unsigned int data_blocks, ind_blocks; | ||
1483 | unsigned int blocks = 0; | ||
1484 | int alloc_required; | ||
1485 | struct gfs2_alloc *al; | ||
1486 | loff_t offset; | ||
1487 | int error; | ||
1488 | |||
1489 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | ||
1490 | return -ESRCH; /* Crazy XFS error code */ | ||
1491 | |||
1492 | switch(type) { | ||
1493 | case USRQUOTA: | ||
1494 | type = QUOTA_USER; | ||
1495 | if (fdq->d_flags != XFS_USER_QUOTA) | ||
1496 | return -EINVAL; | ||
1497 | break; | ||
1498 | case GRPQUOTA: | ||
1499 | type = QUOTA_GROUP; | ||
1500 | if (fdq->d_flags != XFS_GROUP_QUOTA) | ||
1501 | return -EINVAL; | ||
1502 | break; | ||
1503 | default: | ||
1504 | return -EINVAL; | ||
1505 | } | ||
1506 | |||
1507 | if (fdq->d_fieldmask & ~GFS2_FIELDMASK) | ||
1508 | return -EINVAL; | ||
1509 | if (fdq->d_id != id) | ||
1510 | return -EINVAL; | ||
1511 | |||
1512 | error = qd_get(sdp, type, id, &qd); | ||
1513 | if (error) | ||
1514 | return error; | ||
1515 | |||
1516 | mutex_lock(&ip->i_inode.i_mutex); | ||
1517 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); | ||
1518 | if (error) | ||
1519 | goto out_put; | ||
1520 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); | ||
1521 | if (error) | ||
1522 | goto out_q; | ||
1523 | |||
1524 | /* Check for existing entry, if none then alloc new blocks */ | ||
1525 | error = update_qd(sdp, qd); | ||
1526 | if (error) | ||
1527 | goto out_i; | ||
1528 | |||
1529 | /* If nothing has changed, this is a no-op */ | ||
1530 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | ||
1531 | (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) | ||
1532 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | ||
1533 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | ||
1534 | (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) | ||
1535 | fdq->d_fieldmask ^= FS_DQ_BHARD; | ||
1536 | if (fdq->d_fieldmask == 0) | ||
1537 | goto out_i; | ||
1538 | |||
1539 | offset = qd2offset(qd); | ||
1540 | error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | ||
1541 | &alloc_required); | ||
1542 | if (error) | ||
1543 | goto out_i; | ||
1544 | if (alloc_required) { | ||
1545 | al = gfs2_alloc_get(ip); | ||
1546 | if (al == NULL) | ||
1547 | goto out_i; | ||
1548 | gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), | ||
1549 | &data_blocks, &ind_blocks); | ||
1550 | blocks = al->al_requested = 1 + data_blocks + ind_blocks; | ||
1551 | error = gfs2_inplace_reserve(ip); | ||
1552 | if (error) | ||
1553 | goto out_alloc; | ||
1554 | } | ||
1555 | |||
1556 | error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); | ||
1557 | if (error) | ||
1558 | goto out_release; | ||
1559 | |||
1560 | /* Apply changes */ | ||
1561 | error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); | ||
1562 | |||
1563 | gfs2_trans_end(sdp); | ||
1564 | out_release: | ||
1565 | if (alloc_required) { | ||
1566 | gfs2_inplace_release(ip); | ||
1567 | out_alloc: | ||
1568 | gfs2_alloc_put(ip); | ||
1569 | } | ||
1570 | out_i: | ||
1571 | gfs2_glock_dq_uninit(&i_gh); | ||
1572 | out_q: | ||
1573 | gfs2_glock_dq_uninit(&q_gh); | ||
1574 | out_put: | ||
1575 | mutex_unlock(&ip->i_inode.i_mutex); | ||
1576 | qd_put(qd); | ||
1577 | return error; | ||
1578 | } | ||
1579 | |||
1580 | const struct quotactl_ops gfs2_quotactl_ops = { | ||
1581 | .quota_sync = gfs2_quota_sync, | ||
1582 | .get_xstate = gfs2_quota_get_xstate, | ||
1583 | .get_xquota = gfs2_xquota_get, | ||
1584 | .set_xquota = gfs2_xquota_set, | ||
1585 | }; | ||
1586 | |||
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 0fa5fa63d0e8..e271fa07ad02 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); | |||
25 | extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, | 25 | extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, |
26 | u32 uid, u32 gid); | 26 | u32 uid, u32 gid); |
27 | 27 | ||
28 | extern int gfs2_quota_sync(struct gfs2_sbd *sdp); | 28 | extern int gfs2_quota_sync(struct super_block *sb, int type); |
29 | extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); | 29 | extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); |
30 | 30 | ||
31 | extern int gfs2_quota_init(struct gfs2_sbd *sdp); | 31 | extern int gfs2_quota_init(struct gfs2_sbd *sdp); |
32 | extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); | 32 | extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); |
33 | extern int gfs2_quotad(void *data); | 33 | extern int gfs2_quotad(void *data); |
34 | 34 | ||
35 | extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp); | ||
36 | |||
35 | static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | 37 | static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) |
36 | { | 38 | { |
37 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 39 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
50 | } | 52 | } |
51 | 53 | ||
52 | extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); | 54 | extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); |
55 | extern const struct quotactl_ops gfs2_quotactl_ops; | ||
53 | 56 | ||
54 | #endif /* __QUOTA_DOT_H__ */ | 57 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 09fa31965576..4b9bece3d437 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea | |||
410 | memset(lh, 0, sizeof(struct gfs2_log_header)); | 410 | memset(lh, 0, sizeof(struct gfs2_log_header)); |
411 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | 411 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
412 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | 412 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); |
413 | lh->lh_header.__pad0 = cpu_to_be64(0); | ||
413 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | 414 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); |
415 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | ||
414 | lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); | 416 | lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); |
415 | lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); | 417 | lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); |
416 | lh->lh_blkno = cpu_to_be32(lblock); | 418 | lh->lh_blkno = cpu_to_be32(lblock); |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 8f1cfb02a6cb..0608f490c295 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) | |||
1710 | { | 1710 | { |
1711 | struct gfs2_rgrpd *rgd; | 1711 | struct gfs2_rgrpd *rgd; |
1712 | struct gfs2_holder ri_gh, rgd_gh; | 1712 | struct gfs2_holder ri_gh, rgd_gh; |
1713 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); | ||
1714 | int ri_locked = 0; | ||
1713 | int error; | 1715 | int error; |
1714 | 1716 | ||
1715 | error = gfs2_rindex_hold(sdp, &ri_gh); | 1717 | if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { |
1716 | if (error) | 1718 | error = gfs2_rindex_hold(sdp, &ri_gh); |
1717 | goto fail; | 1719 | if (error) |
1720 | goto fail; | ||
1721 | ri_locked = 1; | ||
1722 | } | ||
1718 | 1723 | ||
1719 | error = -EINVAL; | 1724 | error = -EINVAL; |
1720 | rgd = gfs2_blk2rgrpd(sdp, no_addr); | 1725 | rgd = gfs2_blk2rgrpd(sdp, no_addr); |
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) | |||
1730 | 1735 | ||
1731 | gfs2_glock_dq_uninit(&rgd_gh); | 1736 | gfs2_glock_dq_uninit(&rgd_gh); |
1732 | fail_rindex: | 1737 | fail_rindex: |
1733 | gfs2_glock_dq_uninit(&ri_gh); | 1738 | if (ri_locked) |
1739 | gfs2_glock_dq_uninit(&ri_gh); | ||
1734 | fail: | 1740 | fail: |
1735 | return error; | 1741 | return error; |
1736 | } | 1742 | } |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 0ec3ec672de1..c282ad41f3d1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -70,6 +70,11 @@ enum { | |||
70 | Opt_commit, | 70 | Opt_commit, |
71 | Opt_err_withdraw, | 71 | Opt_err_withdraw, |
72 | Opt_err_panic, | 72 | Opt_err_panic, |
73 | Opt_statfs_quantum, | ||
74 | Opt_statfs_percent, | ||
75 | Opt_quota_quantum, | ||
76 | Opt_barrier, | ||
77 | Opt_nobarrier, | ||
73 | Opt_error, | 78 | Opt_error, |
74 | }; | 79 | }; |
75 | 80 | ||
@@ -101,18 +106,23 @@ static const match_table_t tokens = { | |||
101 | {Opt_commit, "commit=%d"}, | 106 | {Opt_commit, "commit=%d"}, |
102 | {Opt_err_withdraw, "errors=withdraw"}, | 107 | {Opt_err_withdraw, "errors=withdraw"}, |
103 | {Opt_err_panic, "errors=panic"}, | 108 | {Opt_err_panic, "errors=panic"}, |
109 | {Opt_statfs_quantum, "statfs_quantum=%d"}, | ||
110 | {Opt_statfs_percent, "statfs_percent=%d"}, | ||
111 | {Opt_quota_quantum, "quota_quantum=%d"}, | ||
112 | {Opt_barrier, "barrier"}, | ||
113 | {Opt_nobarrier, "nobarrier"}, | ||
104 | {Opt_error, NULL} | 114 | {Opt_error, NULL} |
105 | }; | 115 | }; |
106 | 116 | ||
107 | /** | 117 | /** |
108 | * gfs2_mount_args - Parse mount options | 118 | * gfs2_mount_args - Parse mount options |
109 | * @sdp: | 119 | * @args: The structure into which the parsed options will be written |
110 | * @data: | 120 | * @options: The options to parse |
111 | * | 121 | * |
112 | * Return: errno | 122 | * Return: errno |
113 | */ | 123 | */ |
114 | 124 | ||
115 | int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) | 125 | int gfs2_mount_args(struct gfs2_args *args, char *options) |
116 | { | 126 | { |
117 | char *o; | 127 | char *o; |
118 | int token; | 128 | int token; |
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) | |||
157 | break; | 167 | break; |
158 | case Opt_debug: | 168 | case Opt_debug: |
159 | if (args->ar_errors == GFS2_ERRORS_PANIC) { | 169 | if (args->ar_errors == GFS2_ERRORS_PANIC) { |
160 | fs_info(sdp, "-o debug and -o errors=panic " | 170 | printk(KERN_WARNING "GFS2: -o debug and -o errors=panic " |
161 | "are mutually exclusive.\n"); | 171 | "are mutually exclusive.\n"); |
162 | return -EINVAL; | 172 | return -EINVAL; |
163 | } | 173 | } |
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) | |||
210 | case Opt_commit: | 220 | case Opt_commit: |
211 | rv = match_int(&tmp[0], &args->ar_commit); | 221 | rv = match_int(&tmp[0], &args->ar_commit); |
212 | if (rv || args->ar_commit <= 0) { | 222 | if (rv || args->ar_commit <= 0) { |
213 | fs_info(sdp, "commit mount option requires a positive numeric argument\n"); | 223 | printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n"); |
224 | return rv ? rv : -EINVAL; | ||
225 | } | ||
226 | break; | ||
227 | case Opt_statfs_quantum: | ||
228 | rv = match_int(&tmp[0], &args->ar_statfs_quantum); | ||
229 | if (rv || args->ar_statfs_quantum < 0) { | ||
230 | printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n"); | ||
231 | return rv ? rv : -EINVAL; | ||
232 | } | ||
233 | break; | ||
234 | case Opt_quota_quantum: | ||
235 | rv = match_int(&tmp[0], &args->ar_quota_quantum); | ||
236 | if (rv || args->ar_quota_quantum <= 0) { | ||
237 | printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n"); | ||
238 | return rv ? rv : -EINVAL; | ||
239 | } | ||
240 | break; | ||
241 | case Opt_statfs_percent: | ||
242 | rv = match_int(&tmp[0], &args->ar_statfs_percent); | ||
243 | if (rv || args->ar_statfs_percent < 0 || | ||
244 | args->ar_statfs_percent > 100) { | ||
245 | printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n"); | ||
214 | return rv ? rv : -EINVAL; | 246 | return rv ? rv : -EINVAL; |
215 | } | 247 | } |
216 | break; | 248 | break; |
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) | |||
219 | break; | 251 | break; |
220 | case Opt_err_panic: | 252 | case Opt_err_panic: |
221 | if (args->ar_debug) { | 253 | if (args->ar_debug) { |
222 | fs_info(sdp, "-o debug and -o errors=panic " | 254 | printk(KERN_WARNING "GFS2: -o debug and -o errors=panic " |
223 | "are mutually exclusive.\n"); | 255 | "are mutually exclusive.\n"); |
224 | return -EINVAL; | 256 | return -EINVAL; |
225 | } | 257 | } |
226 | args->ar_errors = GFS2_ERRORS_PANIC; | 258 | args->ar_errors = GFS2_ERRORS_PANIC; |
227 | break; | 259 | break; |
260 | case Opt_barrier: | ||
261 | args->ar_nobarrier = 0; | ||
262 | break; | ||
263 | case Opt_nobarrier: | ||
264 | args->ar_nobarrier = 1; | ||
265 | break; | ||
228 | case Opt_error: | 266 | case Opt_error: |
229 | default: | 267 | default: |
230 | fs_info(sdp, "invalid mount option: %s\n", o); | 268 | printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o); |
231 | return -EINVAL; | 269 | return -EINVAL; |
232 | } | 270 | } |
233 | } | 271 | } |
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, | |||
442 | { | 480 | { |
443 | struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); | 481 | struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); |
444 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; | 482 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; |
483 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; | ||
445 | struct buffer_head *l_bh; | 484 | struct buffer_head *l_bh; |
485 | s64 x, y; | ||
486 | int need_sync = 0; | ||
446 | int error; | 487 | int error; |
447 | 488 | ||
448 | error = gfs2_meta_inode_buffer(l_ip, &l_bh); | 489 | error = gfs2_meta_inode_buffer(l_ip, &l_bh); |
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, | |||
456 | l_sc->sc_free += free; | 497 | l_sc->sc_free += free; |
457 | l_sc->sc_dinodes += dinodes; | 498 | l_sc->sc_dinodes += dinodes; |
458 | gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); | 499 | gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); |
500 | if (sdp->sd_args.ar_statfs_percent) { | ||
501 | x = 100 * l_sc->sc_free; | ||
502 | y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; | ||
503 | if (x >= y || x <= -y) | ||
504 | need_sync = 1; | ||
505 | } | ||
459 | spin_unlock(&sdp->sd_statfs_spin); | 506 | spin_unlock(&sdp->sd_statfs_spin); |
460 | 507 | ||
461 | brelse(l_bh); | 508 | brelse(l_bh); |
509 | if (need_sync) | ||
510 | gfs2_wake_up_statfs(sdp); | ||
462 | } | 511 | } |
463 | 512 | ||
464 | void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, | 513 | void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, |
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, | |||
484 | gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); | 533 | gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); |
485 | } | 534 | } |
486 | 535 | ||
487 | int gfs2_statfs_sync(struct gfs2_sbd *sdp) | 536 | int gfs2_statfs_sync(struct super_block *sb, int type) |
488 | { | 537 | { |
538 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
489 | struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); | 539 | struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); |
490 | struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); | 540 | struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); |
491 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; | 541 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp) | |||
521 | goto out_bh2; | 571 | goto out_bh2; |
522 | 572 | ||
523 | update_statfs(sdp, m_bh, l_bh); | 573 | update_statfs(sdp, m_bh, l_bh); |
574 | sdp->sd_statfs_force_sync = 0; | ||
524 | 575 | ||
525 | gfs2_trans_end(sdp); | 576 | gfs2_trans_end(sdp); |
526 | 577 | ||
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) | |||
712 | int error; | 763 | int error; |
713 | 764 | ||
714 | flush_workqueue(gfs2_delete_workqueue); | 765 | flush_workqueue(gfs2_delete_workqueue); |
715 | gfs2_quota_sync(sdp); | 766 | gfs2_quota_sync(sdp->sd_vfs, 0); |
716 | gfs2_statfs_sync(sdp); | 767 | gfs2_statfs_sync(sdp->sd_vfs, 0); |
717 | 768 | ||
718 | error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, | 769 | error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, |
719 | &t_gh); | 770 | &t_gh); |
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1061 | 1112 | ||
1062 | spin_lock(>->gt_spin); | 1113 | spin_lock(>->gt_spin); |
1063 | args.ar_commit = gt->gt_log_flush_secs; | 1114 | args.ar_commit = gt->gt_log_flush_secs; |
1115 | args.ar_quota_quantum = gt->gt_quota_quantum; | ||
1116 | if (gt->gt_statfs_slow) | ||
1117 | args.ar_statfs_quantum = 0; | ||
1118 | else | ||
1119 | args.ar_statfs_quantum = gt->gt_statfs_quantum; | ||
1064 | spin_unlock(>->gt_spin); | 1120 | spin_unlock(>->gt_spin); |
1065 | error = gfs2_mount_args(sdp, &args, data); | 1121 | error = gfs2_mount_args(&args, data); |
1066 | if (error) | 1122 | if (error) |
1067 | return error; | 1123 | return error; |
1068 | 1124 | ||
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1097 | sb->s_flags |= MS_POSIXACL; | 1153 | sb->s_flags |= MS_POSIXACL; |
1098 | else | 1154 | else |
1099 | sb->s_flags &= ~MS_POSIXACL; | 1155 | sb->s_flags &= ~MS_POSIXACL; |
1156 | if (sdp->sd_args.ar_nobarrier) | ||
1157 | set_bit(SDF_NOBARRIERS, &sdp->sd_flags); | ||
1158 | else | ||
1159 | clear_bit(SDF_NOBARRIERS, &sdp->sd_flags); | ||
1100 | spin_lock(>->gt_spin); | 1160 | spin_lock(>->gt_spin); |
1101 | gt->gt_log_flush_secs = args.ar_commit; | 1161 | gt->gt_log_flush_secs = args.ar_commit; |
1162 | gt->gt_quota_quantum = args.ar_quota_quantum; | ||
1163 | if (args.ar_statfs_quantum) { | ||
1164 | gt->gt_statfs_slow = 0; | ||
1165 | gt->gt_statfs_quantum = args.ar_statfs_quantum; | ||
1166 | } | ||
1167 | else { | ||
1168 | gt->gt_statfs_slow = 1; | ||
1169 | gt->gt_statfs_quantum = 30; | ||
1170 | } | ||
1102 | spin_unlock(>->gt_spin); | 1171 | spin_unlock(>->gt_spin); |
1103 | 1172 | ||
1104 | gfs2_online_uevent(sdp); | 1173 | gfs2_online_uevent(sdp); |
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1179 | { | 1248 | { |
1180 | struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; | 1249 | struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; |
1181 | struct gfs2_args *args = &sdp->sd_args; | 1250 | struct gfs2_args *args = &sdp->sd_args; |
1182 | int lfsecs; | 1251 | int val; |
1183 | 1252 | ||
1184 | if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) | 1253 | if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) |
1185 | seq_printf(s, ",meta"); | 1254 | seq_printf(s, ",meta"); |
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1240 | } | 1309 | } |
1241 | if (args->ar_discard) | 1310 | if (args->ar_discard) |
1242 | seq_printf(s, ",discard"); | 1311 | seq_printf(s, ",discard"); |
1243 | lfsecs = sdp->sd_tune.gt_log_flush_secs; | 1312 | val = sdp->sd_tune.gt_log_flush_secs; |
1244 | if (lfsecs != 60) | 1313 | if (val != 60) |
1245 | seq_printf(s, ",commit=%d", lfsecs); | 1314 | seq_printf(s, ",commit=%d", val); |
1315 | val = sdp->sd_tune.gt_statfs_quantum; | ||
1316 | if (val != 30) | ||
1317 | seq_printf(s, ",statfs_quantum=%d", val); | ||
1318 | val = sdp->sd_tune.gt_quota_quantum; | ||
1319 | if (val != 60) | ||
1320 | seq_printf(s, ",quota_quantum=%d", val); | ||
1321 | if (args->ar_statfs_percent) | ||
1322 | seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); | ||
1246 | if (args->ar_errors != GFS2_ERRORS_DEFAULT) { | 1323 | if (args->ar_errors != GFS2_ERRORS_DEFAULT) { |
1247 | const char *state; | 1324 | const char *state; |
1248 | 1325 | ||
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1259 | } | 1336 | } |
1260 | seq_printf(s, ",errors=%s", state); | 1337 | seq_printf(s, ",errors=%s", state); |
1261 | } | 1338 | } |
1339 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | ||
1340 | seq_printf(s, ",nobarrier"); | ||
1341 | |||
1262 | return 0; | 1342 | return 0; |
1263 | } | 1343 | } |
1264 | 1344 | ||
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index 235db3682885..3df60f2d84e3 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h | |||
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) | |||
27 | 27 | ||
28 | extern void gfs2_jindex_free(struct gfs2_sbd *sdp); | 28 | extern void gfs2_jindex_free(struct gfs2_sbd *sdp); |
29 | 29 | ||
30 | extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); | 30 | extern int gfs2_mount_args(struct gfs2_args *args, char *data); |
31 | 31 | ||
32 | extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); | 32 | extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); |
33 | extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); | 33 | extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); |
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, | |||
44 | const void *buf); | 44 | const void *buf); |
45 | extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, | 45 | extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, |
46 | struct buffer_head *l_bh); | 46 | struct buffer_head *l_bh); |
47 | extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); | 47 | extern int gfs2_statfs_sync(struct super_block *sb, int type); |
48 | 48 | ||
49 | extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); | 49 | extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); |
50 | extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); | 50 | extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 446329728d52..c5dad1eb7b91 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, | |||
158 | if (simple_strtol(buf, NULL, 0) != 1) | 158 | if (simple_strtol(buf, NULL, 0) != 1) |
159 | return -EINVAL; | 159 | return -EINVAL; |
160 | 160 | ||
161 | gfs2_statfs_sync(sdp); | 161 | gfs2_statfs_sync(sdp->sd_vfs, 0); |
162 | return len; | 162 | return len; |
163 | } | 163 | } |
164 | 164 | ||
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, | |||
171 | if (simple_strtol(buf, NULL, 0) != 1) | 171 | if (simple_strtol(buf, NULL, 0) != 1) |
172 | return -EINVAL; | 172 | return -EINVAL; |
173 | 173 | ||
174 | gfs2_quota_sync(sdp); | 174 | gfs2_quota_sync(sdp->sd_vfs, 0); |
175 | return len; | 175 | return len; |
176 | } | 176 | } |
177 | 177 | ||
178 | static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, | 178 | static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, |
179 | size_t len) | 179 | size_t len) |
180 | { | 180 | { |
181 | int error; | ||
181 | u32 id; | 182 | u32 id; |
182 | 183 | ||
183 | if (!capable(CAP_SYS_ADMIN)) | 184 | if (!capable(CAP_SYS_ADMIN)) |
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, | |||
185 | 186 | ||
186 | id = simple_strtoul(buf, NULL, 0); | 187 | id = simple_strtoul(buf, NULL, 0); |
187 | 188 | ||
188 | gfs2_quota_refresh(sdp, 1, id); | 189 | error = gfs2_quota_refresh(sdp, 1, id); |
189 | return len; | 190 | return error ? error : len; |
190 | } | 191 | } |
191 | 192 | ||
192 | static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, | 193 | static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, |
193 | size_t len) | 194 | size_t len) |
194 | { | 195 | { |
196 | int error; | ||
195 | u32 id; | 197 | u32 id; |
196 | 198 | ||
197 | if (!capable(CAP_SYS_ADMIN)) | 199 | if (!capable(CAP_SYS_ADMIN)) |
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, | |||
199 | 201 | ||
200 | id = simple_strtoul(buf, NULL, 0); | 202 | id = simple_strtoul(buf, NULL, 0); |
201 | 203 | ||
202 | gfs2_quota_refresh(sdp, 0, id); | 204 | error = gfs2_quota_refresh(sdp, 0, id); |
203 | return len; | 205 | return error ? error : len; |
204 | } | 206 | } |
205 | 207 | ||
206 | static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | 208 | static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) |
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 8a0f8ef6ee27..912f5cbc4740 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c | |||
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh, | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, | 189 | static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, |
190 | struct gfs2_ea_location *el) | 190 | struct gfs2_ea_location *el) |
191 | { | 191 | { |
192 | struct ea_find ef; | 192 | struct ea_find ef; |
193 | int error; | 193 | int error; |
@@ -516,8 +516,8 @@ out: | |||
516 | return error; | 516 | return error; |
517 | } | 517 | } |
518 | 518 | ||
519 | int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, | 519 | static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, |
520 | char *data, size_t size) | 520 | char *data, size_t size) |
521 | { | 521 | { |
522 | int ret; | 522 | int ret; |
523 | size_t len = GFS2_EA_DATA_LEN(el->el_ea); | 523 | size_t len = GFS2_EA_DATA_LEN(el->el_ea); |
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, | |||
534 | return len; | 534 | return len; |
535 | } | 535 | } |
536 | 536 | ||
537 | int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata) | ||
538 | { | ||
539 | struct gfs2_ea_location el; | ||
540 | int error; | ||
541 | int len; | ||
542 | char *data; | ||
543 | |||
544 | error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el); | ||
545 | if (error) | ||
546 | return error; | ||
547 | if (!el.el_ea) | ||
548 | goto out; | ||
549 | if (!GFS2_EA_DATA_LEN(el.el_ea)) | ||
550 | goto out; | ||
551 | |||
552 | len = GFS2_EA_DATA_LEN(el.el_ea); | ||
553 | data = kmalloc(len, GFP_NOFS); | ||
554 | error = -ENOMEM; | ||
555 | if (data == NULL) | ||
556 | goto out; | ||
557 | |||
558 | error = gfs2_ea_get_copy(ip, &el, data, len); | ||
559 | if (error == 0) | ||
560 | error = len; | ||
561 | *ppdata = data; | ||
562 | out: | ||
563 | brelse(el.el_bh); | ||
564 | return error; | ||
565 | } | ||
566 | |||
537 | /** | 567 | /** |
538 | * gfs2_xattr_get - Get a GFS2 extended attribute | 568 | * gfs2_xattr_get - Get a GFS2 extended attribute |
539 | * @inode: The inode | 569 | * @inode: The inode |
@@ -1259,22 +1289,26 @@ fail: | |||
1259 | return error; | 1289 | return error; |
1260 | } | 1290 | } |
1261 | 1291 | ||
1262 | int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, | 1292 | int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) |
1263 | struct iattr *attr, char *data) | ||
1264 | { | 1293 | { |
1294 | struct gfs2_ea_location el; | ||
1265 | struct buffer_head *dibh; | 1295 | struct buffer_head *dibh; |
1266 | int error; | 1296 | int error; |
1267 | 1297 | ||
1268 | if (GFS2_EA_IS_STUFFED(el->el_ea)) { | 1298 | error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el); |
1299 | if (error) | ||
1300 | return error; | ||
1301 | |||
1302 | if (GFS2_EA_IS_STUFFED(el.el_ea)) { | ||
1269 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); | 1303 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); |
1270 | if (error) | 1304 | if (error) |
1271 | return error; | 1305 | return error; |
1272 | 1306 | ||
1273 | gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); | 1307 | gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1); |
1274 | memcpy(GFS2_EA2DATA(el->el_ea), data, | 1308 | memcpy(GFS2_EA2DATA(el.el_ea), data, |
1275 | GFS2_EA_DATA_LEN(el->el_ea)); | 1309 | GFS2_EA_DATA_LEN(el.el_ea)); |
1276 | } else | 1310 | } else |
1277 | error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); | 1311 | error = ea_acl_chmod_unstuffed(ip, el.el_ea, data); |
1278 | 1312 | ||
1279 | if (error) | 1313 | if (error) |
1280 | return error; | 1314 | return error; |
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name, | |||
1507 | return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); | 1541 | return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); |
1508 | } | 1542 | } |
1509 | 1543 | ||
1510 | static int gfs2_xattr_system_get(struct inode *inode, const char *name, | ||
1511 | void *buffer, size_t size) | ||
1512 | { | ||
1513 | return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size); | ||
1514 | } | ||
1515 | |||
1516 | static int gfs2_xattr_system_set(struct inode *inode, const char *name, | ||
1517 | const void *value, size_t size, int flags) | ||
1518 | { | ||
1519 | return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags); | ||
1520 | } | ||
1521 | |||
1522 | static int gfs2_xattr_security_get(struct inode *inode, const char *name, | 1544 | static int gfs2_xattr_security_get(struct inode *inode, const char *name, |
1523 | void *buffer, size_t size) | 1545 | void *buffer, size_t size) |
1524 | { | 1546 | { |
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = { | |||
1543 | .set = gfs2_xattr_security_set, | 1565 | .set = gfs2_xattr_security_set, |
1544 | }; | 1566 | }; |
1545 | 1567 | ||
1546 | static struct xattr_handler gfs2_xattr_system_handler = { | ||
1547 | .prefix = XATTR_SYSTEM_PREFIX, | ||
1548 | .get = gfs2_xattr_system_get, | ||
1549 | .set = gfs2_xattr_system_set, | ||
1550 | }; | ||
1551 | |||
1552 | struct xattr_handler *gfs2_xattr_handlers[] = { | 1568 | struct xattr_handler *gfs2_xattr_handlers[] = { |
1553 | &gfs2_xattr_user_handler, | 1569 | &gfs2_xattr_user_handler, |
1554 | &gfs2_xattr_security_handler, | 1570 | &gfs2_xattr_security_handler, |
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h index cbdfd7743733..8d6ae5813c4d 100644 --- a/fs/gfs2/xattr.h +++ b/fs/gfs2/xattr.h | |||
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip); | |||
62 | 62 | ||
63 | /* Exported to acl.c */ | 63 | /* Exported to acl.c */ |
64 | 64 | ||
65 | extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, | 65 | extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data); |
66 | struct gfs2_ea_location *el); | 66 | extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data); |
67 | extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, | ||
68 | char *data, size_t size); | ||
69 | extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, | ||
70 | struct iattr *attr, char *data); | ||
71 | 67 | ||
72 | #endif /* __EATTR_DOT_H__ */ | 68 | #endif /* __EATTR_DOT_H__ */ |
diff --git a/fs/inode.c b/fs/inode.c index 4d8e3be55976..06c1f02de611 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/hash.h> | 18 | #include <linux/hash.h> |
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/security.h> | 20 | #include <linux/security.h> |
21 | #include <linux/ima.h> | ||
22 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
23 | #include <linux/cdev.h> | 22 | #include <linux/cdev.h> |
24 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
@@ -157,11 +156,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) | |||
157 | 156 | ||
158 | if (security_inode_alloc(inode)) | 157 | if (security_inode_alloc(inode)) |
159 | goto out; | 158 | goto out; |
160 | |||
161 | /* allocate and initialize an i_integrity */ | ||
162 | if (ima_inode_alloc(inode)) | ||
163 | goto out_free_security; | ||
164 | |||
165 | spin_lock_init(&inode->i_lock); | 159 | spin_lock_init(&inode->i_lock); |
166 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); | 160 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); |
167 | 161 | ||
@@ -201,9 +195,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) | |||
201 | #endif | 195 | #endif |
202 | 196 | ||
203 | return 0; | 197 | return 0; |
204 | |||
205 | out_free_security: | ||
206 | security_inode_free(inode); | ||
207 | out: | 198 | out: |
208 | return -ENOMEM; | 199 | return -ENOMEM; |
209 | } | 200 | } |
@@ -235,7 +226,6 @@ static struct inode *alloc_inode(struct super_block *sb) | |||
235 | void __destroy_inode(struct inode *inode) | 226 | void __destroy_inode(struct inode *inode) |
236 | { | 227 | { |
237 | BUG_ON(inode_has_buffers(inode)); | 228 | BUG_ON(inode_has_buffers(inode)); |
238 | ima_inode_free(inode); | ||
239 | security_inode_free(inode); | 229 | security_inode_free(inode); |
240 | fsnotify_inode_delete(inode); | 230 | fsnotify_inode_delete(inode); |
241 | #ifdef CONFIG_FS_POSIX_ACL | 231 | #ifdef CONFIG_FS_POSIX_ACL |
diff --git a/fs/namespace.c b/fs/namespace.c index bdc3cb4fd222..7d70d63ceb29 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1921,6 +1921,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1921 | if (data_page) | 1921 | if (data_page) |
1922 | ((char *)data_page)[PAGE_SIZE - 1] = 0; | 1922 | ((char *)data_page)[PAGE_SIZE - 1] = 0; |
1923 | 1923 | ||
1924 | /* ... and get the mountpoint */ | ||
1925 | retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); | ||
1926 | if (retval) | ||
1927 | return retval; | ||
1928 | |||
1929 | retval = security_sb_mount(dev_name, &path, | ||
1930 | type_page, flags, data_page); | ||
1931 | if (retval) | ||
1932 | goto dput_out; | ||
1933 | |||
1924 | /* Default to relatime unless overriden */ | 1934 | /* Default to relatime unless overriden */ |
1925 | if (!(flags & MS_NOATIME)) | 1935 | if (!(flags & MS_NOATIME)) |
1926 | mnt_flags |= MNT_RELATIME; | 1936 | mnt_flags |= MNT_RELATIME; |
@@ -1945,16 +1955,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1945 | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | | 1955 | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | |
1946 | MS_STRICTATIME); | 1956 | MS_STRICTATIME); |
1947 | 1957 | ||
1948 | /* ... and get the mountpoint */ | ||
1949 | retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); | ||
1950 | if (retval) | ||
1951 | return retval; | ||
1952 | |||
1953 | retval = security_sb_mount(dev_name, &path, | ||
1954 | type_page, flags, data_page); | ||
1955 | if (retval) | ||
1956 | goto dput_out; | ||
1957 | |||
1958 | if (flags & MS_REMOUNT) | 1958 | if (flags & MS_REMOUNT) |
1959 | retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, | 1959 | retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, |
1960 | data_page); | 1960 | data_page); |
@@ -587,6 +587,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename) | |||
587 | error = -EPERM; | 587 | error = -EPERM; |
588 | if (!capable(CAP_SYS_CHROOT)) | 588 | if (!capable(CAP_SYS_CHROOT)) |
589 | goto dput_and_out; | 589 | goto dput_and_out; |
590 | error = security_path_chroot(&path); | ||
591 | if (error) | ||
592 | goto dput_and_out; | ||
590 | 593 | ||
591 | set_fs_root(current->fs, &path); | 594 | set_fs_root(current->fs, &path); |
592 | error = 0; | 595 | error = 0; |
@@ -617,11 +620,15 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode) | |||
617 | if (err) | 620 | if (err) |
618 | goto out_putf; | 621 | goto out_putf; |
619 | mutex_lock(&inode->i_mutex); | 622 | mutex_lock(&inode->i_mutex); |
623 | err = security_path_chmod(dentry, file->f_vfsmnt, mode); | ||
624 | if (err) | ||
625 | goto out_unlock; | ||
620 | if (mode == (mode_t) -1) | 626 | if (mode == (mode_t) -1) |
621 | mode = inode->i_mode; | 627 | mode = inode->i_mode; |
622 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); | 628 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); |
623 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; | 629 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; |
624 | err = notify_change(dentry, &newattrs); | 630 | err = notify_change(dentry, &newattrs); |
631 | out_unlock: | ||
625 | mutex_unlock(&inode->i_mutex); | 632 | mutex_unlock(&inode->i_mutex); |
626 | mnt_drop_write(file->f_path.mnt); | 633 | mnt_drop_write(file->f_path.mnt); |
627 | out_putf: | 634 | out_putf: |
@@ -646,11 +653,15 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode) | |||
646 | if (error) | 653 | if (error) |
647 | goto dput_and_out; | 654 | goto dput_and_out; |
648 | mutex_lock(&inode->i_mutex); | 655 | mutex_lock(&inode->i_mutex); |
656 | error = security_path_chmod(path.dentry, path.mnt, mode); | ||
657 | if (error) | ||
658 | goto out_unlock; | ||
649 | if (mode == (mode_t) -1) | 659 | if (mode == (mode_t) -1) |
650 | mode = inode->i_mode; | 660 | mode = inode->i_mode; |
651 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); | 661 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); |
652 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; | 662 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; |
653 | error = notify_change(path.dentry, &newattrs); | 663 | error = notify_change(path.dentry, &newattrs); |
664 | out_unlock: | ||
654 | mutex_unlock(&inode->i_mutex); | 665 | mutex_unlock(&inode->i_mutex); |
655 | mnt_drop_write(path.mnt); | 666 | mnt_drop_write(path.mnt); |
656 | dput_and_out: | 667 | dput_and_out: |
@@ -664,9 +675,9 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode) | |||
664 | return sys_fchmodat(AT_FDCWD, filename, mode); | 675 | return sys_fchmodat(AT_FDCWD, filename, mode); |
665 | } | 676 | } |
666 | 677 | ||
667 | static int chown_common(struct dentry * dentry, uid_t user, gid_t group) | 678 | static int chown_common(struct path *path, uid_t user, gid_t group) |
668 | { | 679 | { |
669 | struct inode *inode = dentry->d_inode; | 680 | struct inode *inode = path->dentry->d_inode; |
670 | int error; | 681 | int error; |
671 | struct iattr newattrs; | 682 | struct iattr newattrs; |
672 | 683 | ||
@@ -683,7 +694,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group) | |||
683 | newattrs.ia_valid |= | 694 | newattrs.ia_valid |= |
684 | ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; | 695 | ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; |
685 | mutex_lock(&inode->i_mutex); | 696 | mutex_lock(&inode->i_mutex); |
686 | error = notify_change(dentry, &newattrs); | 697 | error = security_path_chown(path, user, group); |
698 | if (!error) | ||
699 | error = notify_change(path->dentry, &newattrs); | ||
687 | mutex_unlock(&inode->i_mutex); | 700 | mutex_unlock(&inode->i_mutex); |
688 | 701 | ||
689 | return error; | 702 | return error; |
@@ -700,7 +713,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) | |||
700 | error = mnt_want_write(path.mnt); | 713 | error = mnt_want_write(path.mnt); |
701 | if (error) | 714 | if (error) |
702 | goto out_release; | 715 | goto out_release; |
703 | error = chown_common(path.dentry, user, group); | 716 | error = chown_common(&path, user, group); |
704 | mnt_drop_write(path.mnt); | 717 | mnt_drop_write(path.mnt); |
705 | out_release: | 718 | out_release: |
706 | path_put(&path); | 719 | path_put(&path); |
@@ -725,7 +738,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, | |||
725 | error = mnt_want_write(path.mnt); | 738 | error = mnt_want_write(path.mnt); |
726 | if (error) | 739 | if (error) |
727 | goto out_release; | 740 | goto out_release; |
728 | error = chown_common(path.dentry, user, group); | 741 | error = chown_common(&path, user, group); |
729 | mnt_drop_write(path.mnt); | 742 | mnt_drop_write(path.mnt); |
730 | out_release: | 743 | out_release: |
731 | path_put(&path); | 744 | path_put(&path); |
@@ -744,7 +757,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group | |||
744 | error = mnt_want_write(path.mnt); | 757 | error = mnt_want_write(path.mnt); |
745 | if (error) | 758 | if (error) |
746 | goto out_release; | 759 | goto out_release; |
747 | error = chown_common(path.dentry, user, group); | 760 | error = chown_common(&path, user, group); |
748 | mnt_drop_write(path.mnt); | 761 | mnt_drop_write(path.mnt); |
749 | out_release: | 762 | out_release: |
750 | path_put(&path); | 763 | path_put(&path); |
@@ -767,7 +780,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) | |||
767 | goto out_fput; | 780 | goto out_fput; |
768 | dentry = file->f_path.dentry; | 781 | dentry = file->f_path.dentry; |
769 | audit_inode(NULL, dentry); | 782 | audit_inode(NULL, dentry); |
770 | error = chown_common(dentry, user, group); | 783 | error = chown_common(&file->f_path, user, group); |
771 | mnt_drop_write(file->f_path.mnt); | 784 | mnt_drop_write(file->f_path.mnt); |
772 | out_fput: | 785 | out_fput: |
773 | fput(file); | 786 | fput(file); |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 822c2d506518..4badde179b18 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -410,6 +410,16 @@ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task) | |||
410 | } | 410 | } |
411 | #endif /* CONFIG_MMU */ | 411 | #endif /* CONFIG_MMU */ |
412 | 412 | ||
413 | static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) | ||
414 | { | ||
415 | seq_printf(m, "Cpus_allowed:\t"); | ||
416 | seq_cpumask(m, &task->cpus_allowed); | ||
417 | seq_printf(m, "\n"); | ||
418 | seq_printf(m, "Cpus_allowed_list:\t"); | ||
419 | seq_cpumask_list(m, &task->cpus_allowed); | ||
420 | seq_printf(m, "\n"); | ||
421 | } | ||
422 | |||
413 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | 423 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, |
414 | struct pid *pid, struct task_struct *task) | 424 | struct pid *pid, struct task_struct *task) |
415 | { | 425 | { |
@@ -424,6 +434,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
424 | } | 434 | } |
425 | task_sig(m, task); | 435 | task_sig(m, task); |
426 | task_cap(m, task); | 436 | task_cap(m, task); |
437 | task_cpus_allowed(m, task); | ||
427 | cpuset_task_status_allowed(m, task); | 438 | cpuset_task_status_allowed(m, task); |
428 | #if defined(CONFIG_S390) | 439 | #if defined(CONFIG_S390) |
429 | task_show_regs(m, task); | 440 | task_show_regs(m, task); |
@@ -495,20 +506,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
495 | 506 | ||
496 | /* add up live thread stats at the group level */ | 507 | /* add up live thread stats at the group level */ |
497 | if (whole) { | 508 | if (whole) { |
498 | struct task_cputime cputime; | ||
499 | struct task_struct *t = task; | 509 | struct task_struct *t = task; |
500 | do { | 510 | do { |
501 | min_flt += t->min_flt; | 511 | min_flt += t->min_flt; |
502 | maj_flt += t->maj_flt; | 512 | maj_flt += t->maj_flt; |
503 | gtime = cputime_add(gtime, task_gtime(t)); | 513 | gtime = cputime_add(gtime, t->gtime); |
504 | t = next_thread(t); | 514 | t = next_thread(t); |
505 | } while (t != task); | 515 | } while (t != task); |
506 | 516 | ||
507 | min_flt += sig->min_flt; | 517 | min_flt += sig->min_flt; |
508 | maj_flt += sig->maj_flt; | 518 | maj_flt += sig->maj_flt; |
509 | thread_group_cputime(task, &cputime); | 519 | thread_group_times(task, &utime, &stime); |
510 | utime = cputime.utime; | ||
511 | stime = cputime.stime; | ||
512 | gtime = cputime_add(gtime, sig->gtime); | 520 | gtime = cputime_add(gtime, sig->gtime); |
513 | } | 521 | } |
514 | 522 | ||
@@ -524,9 +532,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
524 | if (!whole) { | 532 | if (!whole) { |
525 | min_flt = task->min_flt; | 533 | min_flt = task->min_flt; |
526 | maj_flt = task->maj_flt; | 534 | maj_flt = task->maj_flt; |
527 | utime = task_utime(task); | 535 | task_times(task, &utime, &stime); |
528 | stime = task_stime(task); | 536 | gtime = task->gtime; |
529 | gtime = task_gtime(task); | ||
530 | } | 537 | } |
531 | 538 | ||
532 | /* scale priority and nice values from timeslices to -20..20 */ | 539 | /* scale priority and nice values from timeslices to -20..20 */ |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 7cc726c6d70a..b9b7aad2003d 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -27,7 +27,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
27 | int i, j; | 27 | int i, j; |
28 | unsigned long jif; | 28 | unsigned long jif; |
29 | cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; | 29 | cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; |
30 | cputime64_t guest; | 30 | cputime64_t guest, guest_nice; |
31 | u64 sum = 0; | 31 | u64 sum = 0; |
32 | u64 sum_softirq = 0; | 32 | u64 sum_softirq = 0; |
33 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; | 33 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; |
@@ -36,7 +36,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
36 | 36 | ||
37 | user = nice = system = idle = iowait = | 37 | user = nice = system = idle = iowait = |
38 | irq = softirq = steal = cputime64_zero; | 38 | irq = softirq = steal = cputime64_zero; |
39 | guest = cputime64_zero; | 39 | guest = guest_nice = cputime64_zero; |
40 | getboottime(&boottime); | 40 | getboottime(&boottime); |
41 | jif = boottime.tv_sec; | 41 | jif = boottime.tv_sec; |
42 | 42 | ||
@@ -51,6 +51,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
51 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 51 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); |
52 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 52 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); |
53 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 53 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); |
54 | guest_nice = cputime64_add(guest_nice, | ||
55 | kstat_cpu(i).cpustat.guest_nice); | ||
54 | for_each_irq_nr(j) { | 56 | for_each_irq_nr(j) { |
55 | sum += kstat_irqs_cpu(j, i); | 57 | sum += kstat_irqs_cpu(j, i); |
56 | } | 58 | } |
@@ -65,7 +67,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
65 | } | 67 | } |
66 | sum += arch_irq_stat(); | 68 | sum += arch_irq_stat(); |
67 | 69 | ||
68 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", | 70 | seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu " |
71 | "%llu\n", | ||
69 | (unsigned long long)cputime64_to_clock_t(user), | 72 | (unsigned long long)cputime64_to_clock_t(user), |
70 | (unsigned long long)cputime64_to_clock_t(nice), | 73 | (unsigned long long)cputime64_to_clock_t(nice), |
71 | (unsigned long long)cputime64_to_clock_t(system), | 74 | (unsigned long long)cputime64_to_clock_t(system), |
@@ -74,7 +77,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
74 | (unsigned long long)cputime64_to_clock_t(irq), | 77 | (unsigned long long)cputime64_to_clock_t(irq), |
75 | (unsigned long long)cputime64_to_clock_t(softirq), | 78 | (unsigned long long)cputime64_to_clock_t(softirq), |
76 | (unsigned long long)cputime64_to_clock_t(steal), | 79 | (unsigned long long)cputime64_to_clock_t(steal), |
77 | (unsigned long long)cputime64_to_clock_t(guest)); | 80 | (unsigned long long)cputime64_to_clock_t(guest), |
81 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | ||
78 | for_each_online_cpu(i) { | 82 | for_each_online_cpu(i) { |
79 | 83 | ||
80 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ | 84 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ |
@@ -88,8 +92,10 @@ static int show_stat(struct seq_file *p, void *v) | |||
88 | softirq = kstat_cpu(i).cpustat.softirq; | 92 | softirq = kstat_cpu(i).cpustat.softirq; |
89 | steal = kstat_cpu(i).cpustat.steal; | 93 | steal = kstat_cpu(i).cpustat.steal; |
90 | guest = kstat_cpu(i).cpustat.guest; | 94 | guest = kstat_cpu(i).cpustat.guest; |
95 | guest_nice = kstat_cpu(i).cpustat.guest_nice; | ||
91 | seq_printf(p, | 96 | seq_printf(p, |
92 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", | 97 | "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " |
98 | "%llu\n", | ||
93 | i, | 99 | i, |
94 | (unsigned long long)cputime64_to_clock_t(user), | 100 | (unsigned long long)cputime64_to_clock_t(user), |
95 | (unsigned long long)cputime64_to_clock_t(nice), | 101 | (unsigned long long)cputime64_to_clock_t(nice), |
@@ -99,7 +105,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
99 | (unsigned long long)cputime64_to_clock_t(irq), | 105 | (unsigned long long)cputime64_to_clock_t(irq), |
100 | (unsigned long long)cputime64_to_clock_t(softirq), | 106 | (unsigned long long)cputime64_to_clock_t(softirq), |
101 | (unsigned long long)cputime64_to_clock_t(steal), | 107 | (unsigned long long)cputime64_to_clock_t(steal), |
102 | (unsigned long long)cputime64_to_clock_t(guest)); | 108 | (unsigned long long)cputime64_to_clock_t(guest), |
109 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | ||
103 | } | 110 | } |
104 | seq_printf(p, "intr %llu", (unsigned long long)sum); | 111 | seq_printf(p, "intr %llu", (unsigned long long)sum); |
105 | 112 | ||
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index 8047e01ef46b..353e78a9ebee 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig | |||
@@ -17,7 +17,7 @@ config QUOTA | |||
17 | 17 | ||
18 | config QUOTA_NETLINK_INTERFACE | 18 | config QUOTA_NETLINK_INTERFACE |
19 | bool "Report quota messages through netlink interface" | 19 | bool "Report quota messages through netlink interface" |
20 | depends on QUOTA && NET | 20 | depends on QUOTACTL && NET |
21 | help | 21 | help |
22 | If you say Y here, quota warnings (about exceeding softlimit, reaching | 22 | If you say Y here, quota warnings (about exceeding softlimit, reaching |
23 | hardlimit, etc.) will be reported through netlink interface. If unsure, | 23 | hardlimit, etc.) will be reported through netlink interface. If unsure, |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 39b49c42a7ed..9b6ad908dcb2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -77,10 +77,6 @@ | |||
77 | #include <linux/capability.h> | 77 | #include <linux/capability.h> |
78 | #include <linux/quotaops.h> | 78 | #include <linux/quotaops.h> |
79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ | 79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ |
80 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
81 | #include <net/netlink.h> | ||
82 | #include <net/genetlink.h> | ||
83 | #endif | ||
84 | 80 | ||
85 | #include <asm/uaccess.h> | 81 | #include <asm/uaccess.h> |
86 | 82 | ||
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype) | |||
1071 | } | 1067 | } |
1072 | #endif | 1068 | #endif |
1073 | 1069 | ||
1074 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1075 | |||
1076 | /* Netlink family structure for quota */ | ||
1077 | static struct genl_family quota_genl_family = { | ||
1078 | .id = GENL_ID_GENERATE, | ||
1079 | .hdrsize = 0, | ||
1080 | .name = "VFS_DQUOT", | ||
1081 | .version = 1, | ||
1082 | .maxattr = QUOTA_NL_A_MAX, | ||
1083 | }; | ||
1084 | |||
1085 | /* Send warning to userspace about user which exceeded quota */ | ||
1086 | static void send_warning(const struct dquot *dquot, const char warntype) | ||
1087 | { | ||
1088 | static atomic_t seq; | ||
1089 | struct sk_buff *skb; | ||
1090 | void *msg_head; | ||
1091 | int ret; | ||
1092 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
1093 | 2 * nla_total_size(sizeof(u64)); | ||
1094 | |||
1095 | /* We have to allocate using GFP_NOFS as we are called from a | ||
1096 | * filesystem performing write and thus further recursion into | ||
1097 | * the fs to free some data could cause deadlocks. */ | ||
1098 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
1099 | if (!skb) { | ||
1100 | printk(KERN_ERR | ||
1101 | "VFS: Not enough memory to send quota warning.\n"); | ||
1102 | return; | ||
1103 | } | ||
1104 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
1105 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
1106 | if (!msg_head) { | ||
1107 | printk(KERN_ERR | ||
1108 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
1109 | goto err_out; | ||
1110 | } | ||
1111 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); | ||
1112 | if (ret) | ||
1113 | goto attr_err_out; | ||
1114 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); | ||
1115 | if (ret) | ||
1116 | goto attr_err_out; | ||
1117 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
1118 | if (ret) | ||
1119 | goto attr_err_out; | ||
1120 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, | ||
1121 | MAJOR(dquot->dq_sb->s_dev)); | ||
1122 | if (ret) | ||
1123 | goto attr_err_out; | ||
1124 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, | ||
1125 | MINOR(dquot->dq_sb->s_dev)); | ||
1126 | if (ret) | ||
1127 | goto attr_err_out; | ||
1128 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
1129 | if (ret) | ||
1130 | goto attr_err_out; | ||
1131 | genlmsg_end(skb, msg_head); | ||
1132 | |||
1133 | genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
1134 | return; | ||
1135 | attr_err_out: | ||
1136 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
1137 | err_out: | ||
1138 | kfree_skb(skb); | ||
1139 | } | ||
1140 | #endif | ||
1141 | /* | 1070 | /* |
1142 | * Write warnings to the console and send warning messages over netlink. | 1071 | * Write warnings to the console and send warning messages over netlink. |
1143 | * | 1072 | * |
@@ -1145,18 +1074,20 @@ err_out: | |||
1145 | */ | 1074 | */ |
1146 | static void flush_warnings(struct dquot *const *dquots, char *warntype) | 1075 | static void flush_warnings(struct dquot *const *dquots, char *warntype) |
1147 | { | 1076 | { |
1077 | struct dquot *dq; | ||
1148 | int i; | 1078 | int i; |
1149 | 1079 | ||
1150 | for (i = 0; i < MAXQUOTAS; i++) | 1080 | for (i = 0; i < MAXQUOTAS; i++) { |
1151 | if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && | 1081 | dq = dquots[i]; |
1152 | !warning_issued(dquots[i], warntype[i])) { | 1082 | if (dq && warntype[i] != QUOTA_NL_NOWARN && |
1083 | !warning_issued(dq, warntype[i])) { | ||
1153 | #ifdef CONFIG_PRINT_QUOTA_WARNING | 1084 | #ifdef CONFIG_PRINT_QUOTA_WARNING |
1154 | print_warning(dquots[i], warntype[i]); | 1085 | print_warning(dq, warntype[i]); |
1155 | #endif | ||
1156 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1157 | send_warning(dquots[i], warntype[i]); | ||
1158 | #endif | 1086 | #endif |
1087 | quota_send_warning(dq->dq_type, dq->dq_id, | ||
1088 | dq->dq_sb->s_dev, warntype[i]); | ||
1159 | } | 1089 | } |
1090 | } | ||
1160 | } | 1091 | } |
1161 | 1092 | ||
1162 | static int ignore_hardlimit(struct dquot *dquot) | 1093 | static int ignore_hardlimit(struct dquot *dquot) |
@@ -2607,12 +2538,6 @@ static int __init dquot_init(void) | |||
2607 | 2538 | ||
2608 | register_shrinker(&dqcache_shrinker); | 2539 | register_shrinker(&dqcache_shrinker); |
2609 | 2540 | ||
2610 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
2611 | if (genl_register_family("a_genl_family) != 0) | ||
2612 | printk(KERN_ERR | ||
2613 | "VFS: Failed to create quota netlink interface.\n"); | ||
2614 | #endif | ||
2615 | |||
2616 | return 0; | 2541 | return 0; |
2617 | } | 2542 | } |
2618 | module_init(dquot_init); | 2543 | module_init(dquot_init); |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 95c5b42384b2..ee91e2756950 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/capability.h> | 18 | #include <linux/capability.h> |
19 | #include <linux/quotaops.h> | 19 | #include <linux/quotaops.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <net/netlink.h> | ||
22 | #include <net/genetlink.h> | ||
21 | 23 | ||
22 | /* Check validity of generic quotactl commands */ | 24 | /* Check validity of generic quotactl commands */ |
23 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, | 25 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, |
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | |||
525 | return ret; | 527 | return ret; |
526 | } | 528 | } |
527 | #endif | 529 | #endif |
530 | |||
531 | |||
532 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
533 | |||
534 | /* Netlink family structure for quota */ | ||
535 | static struct genl_family quota_genl_family = { | ||
536 | .id = GENL_ID_GENERATE, | ||
537 | .hdrsize = 0, | ||
538 | .name = "VFS_DQUOT", | ||
539 | .version = 1, | ||
540 | .maxattr = QUOTA_NL_A_MAX, | ||
541 | }; | ||
542 | |||
543 | /** | ||
544 | * quota_send_warning - Send warning to userspace about exceeded quota | ||
545 | * @type: The quota type: USRQQUOTA, GRPQUOTA,... | ||
546 | * @id: The user or group id of the quota that was exceeded | ||
547 | * @dev: The device on which the fs is mounted (sb->s_dev) | ||
548 | * @warntype: The type of the warning: QUOTA_NL_... | ||
549 | * | ||
550 | * This can be used by filesystems (including those which don't use | ||
551 | * dquot) to send a message to userspace relating to quota limits. | ||
552 | * | ||
553 | */ | ||
554 | |||
555 | void quota_send_warning(short type, unsigned int id, dev_t dev, | ||
556 | const char warntype) | ||
557 | { | ||
558 | static atomic_t seq; | ||
559 | struct sk_buff *skb; | ||
560 | void *msg_head; | ||
561 | int ret; | ||
562 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
563 | 2 * nla_total_size(sizeof(u64)); | ||
564 | |||
565 | /* We have to allocate using GFP_NOFS as we are called from a | ||
566 | * filesystem performing write and thus further recursion into | ||
567 | * the fs to free some data could cause deadlocks. */ | ||
568 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
569 | if (!skb) { | ||
570 | printk(KERN_ERR | ||
571 | "VFS: Not enough memory to send quota warning.\n"); | ||
572 | return; | ||
573 | } | ||
574 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
575 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
576 | if (!msg_head) { | ||
577 | printk(KERN_ERR | ||
578 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
579 | goto err_out; | ||
580 | } | ||
581 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); | ||
582 | if (ret) | ||
583 | goto attr_err_out; | ||
584 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); | ||
585 | if (ret) | ||
586 | goto attr_err_out; | ||
587 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
588 | if (ret) | ||
589 | goto attr_err_out; | ||
590 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); | ||
591 | if (ret) | ||
592 | goto attr_err_out; | ||
593 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); | ||
594 | if (ret) | ||
595 | goto attr_err_out; | ||
596 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
597 | if (ret) | ||
598 | goto attr_err_out; | ||
599 | genlmsg_end(skb, msg_head); | ||
600 | |||
601 | genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
602 | return; | ||
603 | attr_err_out: | ||
604 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
605 | err_out: | ||
606 | kfree_skb(skb); | ||
607 | } | ||
608 | EXPORT_SYMBOL(quota_send_warning); | ||
609 | |||
610 | static int __init quota_init(void) | ||
611 | { | ||
612 | if (genl_register_family("a_genl_family) != 0) | ||
613 | printk(KERN_ERR | ||
614 | "VFS: Failed to create quota netlink interface.\n"); | ||
615 | return 0; | ||
616 | }; | ||
617 | |||
618 | module_init(quota_init); | ||
619 | #endif | ||
620 | |||
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c index c6ad7c7e3ee9..05ac0fe9c4d3 100644 --- a/fs/xattr_acl.c +++ b/fs/xattr_acl.c | |||
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size) | |||
36 | if (count == 0) | 36 | if (count == 0) |
37 | return NULL; | 37 | return NULL; |
38 | 38 | ||
39 | acl = posix_acl_alloc(count, GFP_KERNEL); | 39 | acl = posix_acl_alloc(count, GFP_NOFS); |
40 | if (!acl) | 40 | if (!acl) |
41 | return ERR_PTR(-ENOMEM); | 41 | return ERR_PTR(-ENOMEM); |
42 | acl_e = acl->a_entries; | 42 | acl_e = acl->a_entries; |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 1feed71551c9..5a5385749e16 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -330,6 +330,7 @@ unifdef-y += scc.h | |||
330 | unifdef-y += sched.h | 330 | unifdef-y += sched.h |
331 | unifdef-y += screen_info.h | 331 | unifdef-y += screen_info.h |
332 | unifdef-y += sdla.h | 332 | unifdef-y += sdla.h |
333 | unifdef-y += securebits.h | ||
333 | unifdef-y += selinux_netlink.h | 334 | unifdef-y += selinux_netlink.h |
334 | unifdef-y += sem.h | 335 | unifdef-y += sem.h |
335 | unifdef-y += serial_core.h | 336 | unifdef-y += serial_core.h |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index dd97fb8408a8..b10ec49ee2dd 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat, | |||
53 | unsigned long addr, | 53 | unsigned long addr, |
54 | unsigned long size); | 54 | unsigned long size); |
55 | extern void free_bootmem(unsigned long addr, unsigned long size); | 55 | extern void free_bootmem(unsigned long addr, unsigned long size); |
56 | extern void free_bootmem_late(unsigned long addr, unsigned long size); | ||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, | 59 | * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, |
diff --git a/include/linux/capability.h b/include/linux/capability.h index c8f2a5f70ed5..39e5ff512fbe 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -92,9 +92,7 @@ struct vfs_cap_data { | |||
92 | #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 | 92 | #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 |
93 | #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 | 93 | #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 |
94 | 94 | ||
95 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
96 | extern int file_caps_enabled; | 95 | extern int file_caps_enabled; |
97 | #endif | ||
98 | 96 | ||
99 | typedef struct kernel_cap_struct { | 97 | typedef struct kernel_cap_struct { |
100 | __u32 cap[_KERNEL_CAPABILITY_U32S]; | 98 | __u32 cap[_KERNEL_CAPABILITY_U32S]; |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index a3ed7cb8ca34..73dcf804bc94 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define noinline __attribute__((noinline)) | 79 | #define noinline __attribute__((noinline)) |
80 | #define __attribute_const__ __attribute__((__const__)) | 80 | #define __attribute_const__ __attribute__((__const__)) |
81 | #define __maybe_unused __attribute__((unused)) | 81 | #define __maybe_unused __attribute__((unused)) |
82 | #define __always_unused __attribute__((unused)) | ||
82 | 83 | ||
83 | #define __gcc_header(x) #x | 84 | #define __gcc_header(x) #x |
84 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) | 85 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) |
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 450fa597c94d..94dea3ffbfa1 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h | |||
@@ -36,4 +36,26 @@ | |||
36 | the kernel context */ | 36 | the kernel context */ |
37 | #define __cold __attribute__((__cold__)) | 37 | #define __cold __attribute__((__cold__)) |
38 | 38 | ||
39 | |||
40 | #if __GNUC_MINOR__ >= 5 | ||
41 | /* | ||
42 | * Mark a position in code as unreachable. This can be used to | ||
43 | * suppress control flow warnings after asm blocks that transfer | ||
44 | * control elsewhere. | ||
45 | * | ||
46 | * Early snapshots of gcc 4.5 don't support this and we can't detect | ||
47 | * this in the preprocessor, but we can live with this because they're | ||
48 | * unreleased. Really, we need to have autoconf for the kernel. | ||
49 | */ | ||
50 | #define unreachable() __builtin_unreachable() | ||
51 | #endif | ||
52 | |||
53 | #endif | ||
54 | |||
55 | #if __GNUC_MINOR__ > 0 | ||
56 | #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) | ||
57 | #endif | ||
58 | #if __GNUC_MINOR__ >= 4 | ||
59 | #define __compiletime_warning(message) __attribute__((warning(message))) | ||
60 | #define __compiletime_error(message) __attribute__((error(message))) | ||
39 | #endif | 61 | #endif |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 04fb5135b4e1..5be3dab4a695 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
144 | # define barrier() __memory_barrier() | 144 | # define barrier() __memory_barrier() |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | /* Unreachable code */ | ||
148 | #ifndef unreachable | ||
149 | # define unreachable() do { } while (1) | ||
150 | #endif | ||
151 | |||
147 | #ifndef RELOC_HIDE | 152 | #ifndef RELOC_HIDE |
148 | # define RELOC_HIDE(ptr, off) \ | 153 | # define RELOC_HIDE(ptr, off) \ |
149 | ({ unsigned long __ptr; \ | 154 | ({ unsigned long __ptr; \ |
@@ -213,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
213 | # define __maybe_unused /* unimplemented */ | 218 | # define __maybe_unused /* unimplemented */ |
214 | #endif | 219 | #endif |
215 | 220 | ||
221 | #ifndef __always_unused | ||
222 | # define __always_unused /* unimplemented */ | ||
223 | #endif | ||
224 | |||
216 | #ifndef noinline | 225 | #ifndef noinline |
217 | #define noinline | 226 | #define noinline |
218 | #endif | 227 | #endif |
@@ -266,6 +275,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
266 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | 275 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
267 | #endif | 276 | #endif |
268 | 277 | ||
278 | /* Compile time object size, -1 for unknown */ | ||
279 | #ifndef __compiletime_object_size | ||
280 | # define __compiletime_object_size(obj) -1 | ||
281 | #endif | ||
282 | #ifndef __compiletime_warning | ||
283 | # define __compiletime_warning(message) | ||
284 | #endif | ||
285 | #ifndef __compiletime_error | ||
286 | # define __compiletime_error(message) | ||
287 | #endif | ||
288 | |||
269 | /* | 289 | /* |
270 | * Prevent the compiler from merging or refetching accesses. The compiler | 290 | * Prevent the compiler from merging or refetching accesses. The compiler |
271 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | 291 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 4a2b162c256a..5de4c9e5856d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -208,16 +208,9 @@ struct dmar_atsr_unit { | |||
208 | u8 include_all:1; /* include all ports */ | 208 | u8 include_all:1; /* include all ports */ |
209 | }; | 209 | }; |
210 | 210 | ||
211 | /* Intel DMAR initialization functions */ | ||
212 | extern int intel_iommu_init(void); | 211 | extern int intel_iommu_init(void); |
213 | #else | 212 | #else /* !CONFIG_DMAR: */ |
214 | static inline int intel_iommu_init(void) | 213 | static inline int intel_iommu_init(void) { return -ENODEV; } |
215 | { | 214 | #endif /* CONFIG_DMAR */ |
216 | #ifdef CONFIG_INTR_REMAP | 215 | |
217 | return dmar_dev_scope_init(); | ||
218 | #else | ||
219 | return -ENODEV; | ||
220 | #endif | ||
221 | } | ||
222 | #endif /* !CONFIG_DMAR */ | ||
223 | #endif /* __DMAR_H__ */ | 216 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4ec5e67e18cf..47bbdf9c38d0 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -117,12 +117,12 @@ struct ftrace_event_call { | |||
117 | struct dentry *dir; | 117 | struct dentry *dir; |
118 | struct trace_event *event; | 118 | struct trace_event *event; |
119 | int enabled; | 119 | int enabled; |
120 | int (*regfunc)(void *); | 120 | int (*regfunc)(struct ftrace_event_call *); |
121 | void (*unregfunc)(void *); | 121 | void (*unregfunc)(struct ftrace_event_call *); |
122 | int id; | 122 | int id; |
123 | int (*raw_init)(void); | 123 | int (*raw_init)(struct ftrace_event_call *); |
124 | int (*show_format)(struct ftrace_event_call *call, | 124 | int (*show_format)(struct ftrace_event_call *, |
125 | struct trace_seq *s); | 125 | struct trace_seq *); |
126 | int (*define_fields)(struct ftrace_event_call *); | 126 | int (*define_fields)(struct ftrace_event_call *); |
127 | struct list_head fields; | 127 | struct list_head fields; |
128 | int filter_active; | 128 | int filter_active; |
@@ -131,20 +131,20 @@ struct ftrace_event_call { | |||
131 | void *data; | 131 | void *data; |
132 | 132 | ||
133 | atomic_t profile_count; | 133 | atomic_t profile_count; |
134 | int (*profile_enable)(void); | 134 | int (*profile_enable)(struct ftrace_event_call *); |
135 | void (*profile_disable)(void); | 135 | void (*profile_disable)(struct ftrace_event_call *); |
136 | }; | 136 | }; |
137 | 137 | ||
138 | #define FTRACE_MAX_PROFILE_SIZE 2048 | 138 | #define FTRACE_MAX_PROFILE_SIZE 2048 |
139 | 139 | ||
140 | extern char *trace_profile_buf; | 140 | extern char *perf_trace_buf; |
141 | extern char *trace_profile_buf_nmi; | 141 | extern char *perf_trace_buf_nmi; |
142 | 142 | ||
143 | #define MAX_FILTER_PRED 32 | 143 | #define MAX_FILTER_PRED 32 |
144 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 144 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
145 | 145 | ||
146 | extern void destroy_preds(struct ftrace_event_call *call); | 146 | extern void destroy_preds(struct ftrace_event_call *call); |
147 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | 147 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
148 | extern int filter_current_check_discard(struct ring_buffer *buffer, | 148 | extern int filter_current_check_discard(struct ring_buffer *buffer, |
149 | struct ftrace_event_call *call, | 149 | struct ftrace_event_call *call, |
150 | void *rec, | 150 | void *rec, |
@@ -157,11 +157,12 @@ enum { | |||
157 | FILTER_PTR_STRING, | 157 | FILTER_PTR_STRING, |
158 | }; | 158 | }; |
159 | 159 | ||
160 | extern int trace_define_field(struct ftrace_event_call *call, | ||
161 | const char *type, const char *name, | ||
162 | int offset, int size, int is_signed, | ||
163 | int filter_type); | ||
164 | extern int trace_define_common_fields(struct ftrace_event_call *call); | 160 | extern int trace_define_common_fields(struct ftrace_event_call *call); |
161 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | ||
162 | const char *name, int offset, int size, | ||
163 | int is_signed, int filter_type); | ||
164 | extern int trace_add_event_call(struct ftrace_event_call *call); | ||
165 | extern void trace_remove_event_call(struct ftrace_event_call *call); | ||
165 | 166 | ||
166 | #define is_signed_type(type) (((type)(-1)) < 0) | 167 | #define is_signed_type(type) (((type)(-1)) < 0) |
167 | 168 | ||
@@ -186,4 +187,13 @@ do { \ | |||
186 | __trace_printk(ip, fmt, ##args); \ | 187 | __trace_printk(ip, fmt, ##args); \ |
187 | } while (0) | 188 | } while (0) |
188 | 189 | ||
190 | #ifdef CONFIG_EVENT_PROFILE | ||
191 | struct perf_event; | ||
192 | extern int ftrace_profile_enable(int event_id); | ||
193 | extern void ftrace_profile_disable(int event_id); | ||
194 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | ||
195 | char *filter_str); | ||
196 | extern void ftrace_profile_free_filter(struct perf_event *event); | ||
197 | #endif | ||
198 | |||
189 | #endif /* _LINUX_FTRACE_EVENT_H */ | 199 | #endif /* _LINUX_FTRACE_EVENT_H */ |
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h index b80c88dedbbb..81f90a59cda6 100644 --- a/include/linux/gfs2_ondisk.h +++ b/include/linux/gfs2_ondisk.h | |||
@@ -81,7 +81,11 @@ struct gfs2_meta_header { | |||
81 | __be32 mh_type; | 81 | __be32 mh_type; |
82 | __be64 __pad0; /* Was generation number in gfs1 */ | 82 | __be64 __pad0; /* Was generation number in gfs1 */ |
83 | __be32 mh_format; | 83 | __be32 mh_format; |
84 | __be32 __pad1; /* Was incarnation number in gfs1 */ | 84 | /* This union is to keep userspace happy */ |
85 | union { | ||
86 | __be32 mh_jid; /* Was incarnation number in gfs1 */ | ||
87 | __be32 __pad1; | ||
88 | }; | ||
85 | }; | 89 | }; |
86 | 90 | ||
87 | /* | 91 | /* |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 6d527ee82b2b..d5b387669dab 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #if defined(CONFIG_NO_HZ) | 141 | #if defined(CONFIG_NO_HZ) |
142 | #if defined(CONFIG_TINY_RCU) | ||
143 | extern void rcu_enter_nohz(void); | ||
144 | extern void rcu_exit_nohz(void); | ||
145 | |||
146 | static inline void rcu_irq_enter(void) | ||
147 | { | ||
148 | rcu_exit_nohz(); | ||
149 | } | ||
150 | |||
151 | static inline void rcu_irq_exit(void) | ||
152 | { | ||
153 | rcu_enter_nohz(); | ||
154 | } | ||
155 | |||
156 | static inline void rcu_nmi_enter(void) | ||
157 | { | ||
158 | } | ||
159 | |||
160 | static inline void rcu_nmi_exit(void) | ||
161 | { | ||
162 | } | ||
163 | |||
164 | #else | ||
142 | extern void rcu_irq_enter(void); | 165 | extern void rcu_irq_enter(void); |
143 | extern void rcu_irq_exit(void); | 166 | extern void rcu_irq_exit(void); |
144 | extern void rcu_nmi_enter(void); | 167 | extern void rcu_nmi_enter(void); |
145 | extern void rcu_nmi_exit(void); | 168 | extern void rcu_nmi_exit(void); |
169 | #endif | ||
146 | #else | 170 | #else |
147 | # define rcu_irq_enter() do { } while (0) | 171 | # define rcu_irq_enter() do { } while (0) |
148 | # define rcu_irq_exit() do { } while (0) | 172 | # define rcu_irq_exit() do { } while (0) |
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h new file mode 100644 index 000000000000..a03daed08c59 --- /dev/null +++ b/include/linux/hw_breakpoint.h | |||
@@ -0,0 +1,131 @@ | |||
1 | #ifndef _LINUX_HW_BREAKPOINT_H | ||
2 | #define _LINUX_HW_BREAKPOINT_H | ||
3 | |||
4 | enum { | ||
5 | HW_BREAKPOINT_LEN_1 = 1, | ||
6 | HW_BREAKPOINT_LEN_2 = 2, | ||
7 | HW_BREAKPOINT_LEN_4 = 4, | ||
8 | HW_BREAKPOINT_LEN_8 = 8, | ||
9 | }; | ||
10 | |||
11 | enum { | ||
12 | HW_BREAKPOINT_R = 1, | ||
13 | HW_BREAKPOINT_W = 2, | ||
14 | HW_BREAKPOINT_X = 4, | ||
15 | }; | ||
16 | |||
17 | #ifdef __KERNEL__ | ||
18 | |||
19 | #include <linux/perf_event.h> | ||
20 | |||
21 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
22 | |||
23 | /* As it's for in-kernel or ptrace use, we want it to be pinned */ | ||
24 | #define DEFINE_BREAKPOINT_ATTR(name) \ | ||
25 | struct perf_event_attr name = { \ | ||
26 | .type = PERF_TYPE_BREAKPOINT, \ | ||
27 | .size = sizeof(name), \ | ||
28 | .pinned = 1, \ | ||
29 | }; | ||
30 | |||
31 | static inline void hw_breakpoint_init(struct perf_event_attr *attr) | ||
32 | { | ||
33 | attr->type = PERF_TYPE_BREAKPOINT; | ||
34 | attr->size = sizeof(*attr); | ||
35 | attr->pinned = 1; | ||
36 | } | ||
37 | |||
38 | static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) | ||
39 | { | ||
40 | return bp->attr.bp_addr; | ||
41 | } | ||
42 | |||
43 | static inline int hw_breakpoint_type(struct perf_event *bp) | ||
44 | { | ||
45 | return bp->attr.bp_type; | ||
46 | } | ||
47 | |||
48 | static inline int hw_breakpoint_len(struct perf_event *bp) | ||
49 | { | ||
50 | return bp->attr.bp_len; | ||
51 | } | ||
52 | |||
53 | extern struct perf_event * | ||
54 | register_user_hw_breakpoint(struct perf_event_attr *attr, | ||
55 | perf_callback_t triggered, | ||
56 | struct task_struct *tsk); | ||
57 | |||
58 | /* FIXME: only change from the attr, and don't unregister */ | ||
59 | extern struct perf_event * | ||
60 | modify_user_hw_breakpoint(struct perf_event *bp, | ||
61 | struct perf_event_attr *attr, | ||
62 | perf_callback_t triggered, | ||
63 | struct task_struct *tsk); | ||
64 | |||
65 | /* | ||
66 | * Kernel breakpoints are not associated with any particular thread. | ||
67 | */ | ||
68 | extern struct perf_event * | ||
69 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, | ||
70 | perf_callback_t triggered, | ||
71 | int cpu); | ||
72 | |||
73 | extern struct perf_event ** | ||
74 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | ||
75 | perf_callback_t triggered); | ||
76 | |||
77 | extern int register_perf_hw_breakpoint(struct perf_event *bp); | ||
78 | extern int __register_perf_hw_breakpoint(struct perf_event *bp); | ||
79 | extern void unregister_hw_breakpoint(struct perf_event *bp); | ||
80 | extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); | ||
81 | |||
82 | extern int reserve_bp_slot(struct perf_event *bp); | ||
83 | extern void release_bp_slot(struct perf_event *bp); | ||
84 | |||
85 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); | ||
86 | |||
87 | static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) | ||
88 | { | ||
89 | return &bp->hw.info; | ||
90 | } | ||
91 | |||
92 | #else /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
93 | |||
94 | static inline struct perf_event * | ||
95 | register_user_hw_breakpoint(struct perf_event_attr *attr, | ||
96 | perf_callback_t triggered, | ||
97 | struct task_struct *tsk) { return NULL; } | ||
98 | static inline struct perf_event * | ||
99 | modify_user_hw_breakpoint(struct perf_event *bp, | ||
100 | struct perf_event_attr *attr, | ||
101 | perf_callback_t triggered, | ||
102 | struct task_struct *tsk) { return NULL; } | ||
103 | static inline struct perf_event * | ||
104 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, | ||
105 | perf_callback_t triggered, | ||
106 | int cpu) { return NULL; } | ||
107 | static inline struct perf_event ** | ||
108 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | ||
109 | perf_callback_t triggered) { return NULL; } | ||
110 | static inline int | ||
111 | register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } | ||
112 | static inline int | ||
113 | __register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } | ||
114 | static inline void unregister_hw_breakpoint(struct perf_event *bp) { } | ||
115 | static inline void | ||
116 | unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { } | ||
117 | static inline int | ||
118 | reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } | ||
119 | static inline void release_bp_slot(struct perf_event *bp) { } | ||
120 | |||
121 | static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } | ||
122 | |||
123 | static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) | ||
124 | { | ||
125 | return NULL; | ||
126 | } | ||
127 | |||
128 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
129 | #endif /* __KERNEL__ */ | ||
130 | |||
131 | #endif /* _LINUX_HW_BREAKPOINT_H */ | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 21a6f5d9af22..8d10aa7fd4c9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -83,16 +83,12 @@ extern struct group_info init_groups; | |||
83 | #define INIT_IDS | 83 | #define INIT_IDS |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
87 | /* | 86 | /* |
88 | * Because of the reduced scope of CAP_SETPCAP when filesystem | 87 | * Because of the reduced scope of CAP_SETPCAP when filesystem |
89 | * capabilities are in effect, it is safe to allow CAP_SETPCAP to | 88 | * capabilities are in effect, it is safe to allow CAP_SETPCAP to |
90 | * be available in the default configuration. | 89 | * be available in the default configuration. |
91 | */ | 90 | */ |
92 | # define CAP_INIT_BSET CAP_FULL_SET | 91 | # define CAP_INIT_BSET CAP_FULL_SET |
93 | #else | ||
94 | # define CAP_INIT_BSET CAP_INIT_EFF_SET | ||
95 | #endif | ||
96 | 92 | ||
97 | #ifdef CONFIG_TREE_PREEMPT_RCU | 93 | #ifdef CONFIG_TREE_PREEMPT_RCU |
98 | #define INIT_TASK_RCU_PREEMPT(tsk) \ | 94 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 7ca72b74eec7..75f3f00ac1e5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -603,12 +603,6 @@ static inline void init_irq_proc(void) | |||
603 | } | 603 | } |
604 | #endif | 604 | #endif |
605 | 605 | ||
606 | #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ) | ||
607 | extern void debug_poll_all_shared_irqs(void); | ||
608 | #else | ||
609 | static inline void debug_poll_all_shared_irqs(void) { } | ||
610 | #endif | ||
611 | |||
612 | struct seq_file; | 606 | struct seq_file; |
613 | int show_interrupts(struct seq_file *p, void *v); | 607 | int show_interrupts(struct seq_file *p, void *v); |
614 | 608 | ||
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index b02a3f1d46a0..006bf45eae30 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -124,6 +124,6 @@ | |||
124 | typecheck(unsigned long, flags); \ | 124 | typecheck(unsigned long, flags); \ |
125 | raw_irqs_disabled_flags(flags); \ | 125 | raw_irqs_disabled_flags(flags); \ |
126 | }) | 126 | }) |
127 | #endif /* CONFIG_X86 */ | 127 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
128 | 128 | ||
129 | #endif | 129 | #endif |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 1a9cf78bfce5..6811f4bfc6e7 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x); | |||
307 | extern unsigned long clock_t_to_jiffies(unsigned long x); | 307 | extern unsigned long clock_t_to_jiffies(unsigned long x); |
308 | extern u64 jiffies_64_to_clock_t(u64 x); | 308 | extern u64 jiffies_64_to_clock_t(u64 x); |
309 | extern u64 nsec_to_clock_t(u64 x); | 309 | extern u64 nsec_to_clock_t(u64 x); |
310 | extern unsigned long nsecs_to_jiffies(u64 n); | ||
310 | 311 | ||
311 | #define TIMESTAMP_SIZE 30 | 312 | #define TIMESTAMP_SIZE 30 |
312 | 313 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f4e3184fa054..3fa4c590cf12 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | #include <linux/log2.h> | 16 | #include <linux/log2.h> |
17 | #include <linux/typecheck.h> | 17 | #include <linux/typecheck.h> |
18 | #include <linux/ratelimit.h> | ||
19 | #include <linux/dynamic_debug.h> | 18 | #include <linux/dynamic_debug.h> |
20 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
21 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
@@ -241,8 +240,8 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
241 | asmlinkage int printk(const char * fmt, ...) | 240 | asmlinkage int printk(const char * fmt, ...) |
242 | __attribute__ ((format (printf, 1, 2))) __cold; | 241 | __attribute__ ((format (printf, 1, 2))) __cold; |
243 | 242 | ||
244 | extern struct ratelimit_state printk_ratelimit_state; | 243 | extern int __printk_ratelimit(const char *func); |
245 | extern int printk_ratelimit(void); | 244 | #define printk_ratelimit() __printk_ratelimit(__func__) |
246 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | 245 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
247 | unsigned int interval_msec); | 246 | unsigned int interval_msec); |
248 | 247 | ||
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 348fa8874b52..c059044bc6dc 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -25,6 +25,7 @@ struct cpu_usage_stat { | |||
25 | cputime64_t iowait; | 25 | cputime64_t iowait; |
26 | cputime64_t steal; | 26 | cputime64_t steal; |
27 | cputime64_t guest; | 27 | cputime64_t guest; |
28 | cputime64_t guest_nice; | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | struct kernel_stat { | 31 | struct kernel_stat { |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 3a46b7b7abb2..1b672f74a32f 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -296,6 +296,8 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); | |||
296 | int disable_kprobe(struct kprobe *kp); | 296 | int disable_kprobe(struct kprobe *kp); |
297 | int enable_kprobe(struct kprobe *kp); | 297 | int enable_kprobe(struct kprobe *kp); |
298 | 298 | ||
299 | void dump_kprobe(struct kprobe *kp); | ||
300 | |||
299 | #else /* !CONFIG_KPROBES: */ | 301 | #else /* !CONFIG_KPROBES: */ |
300 | 302 | ||
301 | static inline int kprobes_built_in(void) | 303 | static inline int kprobes_built_in(void) |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 190c37854870..f78f83d7663f 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h | |||
@@ -26,14 +26,15 @@ | |||
26 | 26 | ||
27 | /* Auxiliary data to use in generating the audit record. */ | 27 | /* Auxiliary data to use in generating the audit record. */ |
28 | struct common_audit_data { | 28 | struct common_audit_data { |
29 | char type; | 29 | char type; |
30 | #define LSM_AUDIT_DATA_FS 1 | 30 | #define LSM_AUDIT_DATA_FS 1 |
31 | #define LSM_AUDIT_DATA_NET 2 | 31 | #define LSM_AUDIT_DATA_NET 2 |
32 | #define LSM_AUDIT_DATA_CAP 3 | 32 | #define LSM_AUDIT_DATA_CAP 3 |
33 | #define LSM_AUDIT_DATA_IPC 4 | 33 | #define LSM_AUDIT_DATA_IPC 4 |
34 | #define LSM_AUDIT_DATA_TASK 5 | 34 | #define LSM_AUDIT_DATA_TASK 5 |
35 | #define LSM_AUDIT_DATA_KEY 6 | 35 | #define LSM_AUDIT_DATA_KEY 6 |
36 | #define LSM_AUDIT_NO_AUDIT 7 | 36 | #define LSM_AUDIT_NO_AUDIT 7 |
37 | #define LSM_AUDIT_DATA_KMOD 8 | ||
37 | struct task_struct *tsk; | 38 | struct task_struct *tsk; |
38 | union { | 39 | union { |
39 | struct { | 40 | struct { |
@@ -66,6 +67,7 @@ struct common_audit_data { | |||
66 | char *key_desc; | 67 | char *key_desc; |
67 | } key_struct; | 68 | } key_struct; |
68 | #endif | 69 | #endif |
70 | char *kmod_name; | ||
69 | } u; | 71 | } u; |
70 | /* this union contains LSM specific data */ | 72 | /* this union contains LSM specific data */ |
71 | union { | 73 | union { |
diff --git a/drivers/mfd/mcp.h b/include/linux/mfd/mcp.h index c093a93b8808..ee496708e38b 100644 --- a/drivers/mfd/mcp.h +++ b/include/linux/mfd/mcp.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef MCP_H | 10 | #ifndef MCP_H |
11 | #define MCP_H | 11 | #define MCP_H |
12 | 12 | ||
13 | #include <mach/dma.h> | ||
14 | |||
13 | struct mcp_ops; | 15 | struct mcp_ops; |
14 | 16 | ||
15 | struct mcp { | 17 | struct mcp { |
@@ -24,6 +26,7 @@ struct mcp { | |||
24 | dma_device_t dma_telco_rd; | 26 | dma_device_t dma_telco_rd; |
25 | dma_device_t dma_telco_wr; | 27 | dma_device_t dma_telco_wr; |
26 | struct device attached_device; | 28 | struct device attached_device; |
29 | int gpio_base; | ||
27 | }; | 30 | }; |
28 | 31 | ||
29 | struct mcp_ops { | 32 | struct mcp_ops { |
diff --git a/drivers/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h index a8ad8a0ed5db..aa9c3789bed4 100644 --- a/drivers/mfd/ucb1x00.h +++ b/include/linux/mfd/ucb1x00.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mfd/ucb1x00.h | 2 | * linux/include/mfd/ucb1x00.h |
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King, All Rights Reserved. | 4 | * Copyright (C) 2001 Russell King, All Rights Reserved. |
5 | * | 5 | * |
@@ -10,6 +10,9 @@ | |||
10 | #ifndef UCB1200_H | 10 | #ifndef UCB1200_H |
11 | #define UCB1200_H | 11 | #define UCB1200_H |
12 | 12 | ||
13 | #include <linux/mfd/mcp.h> | ||
14 | #include <linux/gpio.h> | ||
15 | |||
13 | #define UCB_IO_DATA 0x00 | 16 | #define UCB_IO_DATA 0x00 |
14 | #define UCB_IO_DIR 0x01 | 17 | #define UCB_IO_DIR 0x01 |
15 | 18 | ||
@@ -100,7 +103,6 @@ | |||
100 | #define UCB_MODE_DYN_VFLAG_ENA (1 << 12) | 103 | #define UCB_MODE_DYN_VFLAG_ENA (1 << 12) |
101 | #define UCB_MODE_AUD_OFF_CAN (1 << 13) | 104 | #define UCB_MODE_AUD_OFF_CAN (1 << 13) |
102 | 105 | ||
103 | #include "mcp.h" | ||
104 | 106 | ||
105 | struct ucb1x00_irq { | 107 | struct ucb1x00_irq { |
106 | void *devid; | 108 | void *devid; |
@@ -123,6 +125,7 @@ struct ucb1x00 { | |||
123 | struct device dev; | 125 | struct device dev; |
124 | struct list_head node; | 126 | struct list_head node; |
125 | struct list_head devs; | 127 | struct list_head devs; |
128 | struct gpio_chip gpio; | ||
126 | }; | 129 | }; |
127 | 130 | ||
128 | struct ucb1x00_driver; | 131 | struct ucb1x00_driver; |
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index f95466343fb2..955d30fc6a27 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h | |||
@@ -1212,7 +1212,7 @@ | |||
1212 | #define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ | 1212 | #define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ |
1213 | #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ | 1213 | #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ |
1214 | 1214 | ||
1215 | #define WM831X_ISINK_MAX_ISEL 56 | 1215 | #define WM831X_ISINK_MAX_ISEL 55 |
1216 | extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; | 1216 | extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; |
1217 | 1217 | ||
1218 | #endif | 1218 | #endif |
diff --git a/include/linux/net.h b/include/linux/net.h index 529a0931711d..d7e26e30c8c2 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -358,6 +358,7 @@ static const struct proto_ops name##_ops = { \ | |||
358 | 358 | ||
359 | #ifdef CONFIG_SYSCTL | 359 | #ifdef CONFIG_SYSCTL |
360 | #include <linux/sysctl.h> | 360 | #include <linux/sysctl.h> |
361 | #include <linux/ratelimit.h> | ||
361 | extern struct ratelimit_state net_ratelimit_state; | 362 | extern struct ratelimit_state net_ratelimit_state; |
362 | #endif | 363 | #endif |
363 | 364 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 84cf1f3b7838..daecca3c8300 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1633,6 +1633,8 @@ | |||
1633 | #define PCI_DEVICE_ID_O2_6730 0x673a | 1633 | #define PCI_DEVICE_ID_O2_6730 0x673a |
1634 | #define PCI_DEVICE_ID_O2_6832 0x6832 | 1634 | #define PCI_DEVICE_ID_O2_6832 0x6832 |
1635 | #define PCI_DEVICE_ID_O2_6836 0x6836 | 1635 | #define PCI_DEVICE_ID_O2_6836 0x6836 |
1636 | #define PCI_DEVICE_ID_O2_6812 0x6872 | ||
1637 | #define PCI_DEVICE_ID_O2_6933 0x6933 | ||
1636 | 1638 | ||
1637 | #define PCI_VENDOR_ID_3DFX 0x121a | 1639 | #define PCI_VENDOR_ID_3DFX 0x121a |
1638 | #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 | 1640 | #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7b7fbf433cff..e3fb25606706 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -106,6 +106,8 @@ enum perf_sw_ids { | |||
106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
109 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
110 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
109 | 111 | ||
110 | PERF_COUNT_SW_MAX, /* non-ABI */ | 112 | PERF_COUNT_SW_MAX, /* non-ABI */ |
111 | }; | 113 | }; |
@@ -225,6 +227,7 @@ struct perf_counter_attr { | |||
225 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | 227 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) |
226 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | 228 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) |
227 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) | 229 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) |
230 | #define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) | ||
228 | 231 | ||
229 | enum perf_counter_ioc_flags { | 232 | enum perf_counter_ioc_flags { |
230 | PERF_IOC_FLAG_GROUP = 1U << 0, | 233 | PERF_IOC_FLAG_GROUP = 1U << 0, |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9e7012689a84..43adbd7f0010 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -18,6 +18,10 @@ | |||
18 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
19 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
20 | 20 | ||
21 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
22 | #include <asm/hw_breakpoint.h> | ||
23 | #endif | ||
24 | |||
21 | /* | 25 | /* |
22 | * User-space ABI bits: | 26 | * User-space ABI bits: |
23 | */ | 27 | */ |
@@ -31,6 +35,7 @@ enum perf_type_id { | |||
31 | PERF_TYPE_TRACEPOINT = 2, | 35 | PERF_TYPE_TRACEPOINT = 2, |
32 | PERF_TYPE_HW_CACHE = 3, | 36 | PERF_TYPE_HW_CACHE = 3, |
33 | PERF_TYPE_RAW = 4, | 37 | PERF_TYPE_RAW = 4, |
38 | PERF_TYPE_BREAKPOINT = 5, | ||
34 | 39 | ||
35 | PERF_TYPE_MAX, /* non-ABI */ | 40 | PERF_TYPE_MAX, /* non-ABI */ |
36 | }; | 41 | }; |
@@ -102,6 +107,8 @@ enum perf_sw_ids { | |||
102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 107 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 108 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 109 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
110 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
111 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
105 | 112 | ||
106 | PERF_COUNT_SW_MAX, /* non-ABI */ | 113 | PERF_COUNT_SW_MAX, /* non-ABI */ |
107 | }; | 114 | }; |
@@ -207,6 +214,15 @@ struct perf_event_attr { | |||
207 | __u32 wakeup_events; /* wakeup every n events */ | 214 | __u32 wakeup_events; /* wakeup every n events */ |
208 | __u32 wakeup_watermark; /* bytes before wakeup */ | 215 | __u32 wakeup_watermark; /* bytes before wakeup */ |
209 | }; | 216 | }; |
217 | |||
218 | union { | ||
219 | struct { /* Hardware breakpoint info */ | ||
220 | __u64 bp_addr; | ||
221 | __u32 bp_type; | ||
222 | __u32 bp_len; | ||
223 | }; | ||
224 | }; | ||
225 | |||
210 | __u32 __reserved_2; | 226 | __u32 __reserved_2; |
211 | 227 | ||
212 | __u64 __reserved_3; | 228 | __u64 __reserved_3; |
@@ -219,8 +235,9 @@ struct perf_event_attr { | |||
219 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | 235 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) |
220 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | 236 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) |
221 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | 237 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) |
222 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) | 238 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) |
223 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | 239 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) |
240 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) | ||
224 | 241 | ||
225 | enum perf_event_ioc_flags { | 242 | enum perf_event_ioc_flags { |
226 | PERF_IOC_FLAG_GROUP = 1U << 0, | 243 | PERF_IOC_FLAG_GROUP = 1U << 0, |
@@ -475,6 +492,11 @@ struct hw_perf_event { | |||
475 | s64 remaining; | 492 | s64 remaining; |
476 | struct hrtimer hrtimer; | 493 | struct hrtimer hrtimer; |
477 | }; | 494 | }; |
495 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
496 | union { /* breakpoint */ | ||
497 | struct arch_hw_breakpoint info; | ||
498 | }; | ||
499 | #endif | ||
478 | }; | 500 | }; |
479 | atomic64_t prev_count; | 501 | atomic64_t prev_count; |
480 | u64 sample_period; | 502 | u64 sample_period; |
@@ -543,6 +565,10 @@ struct perf_pending_entry { | |||
543 | void (*func)(struct perf_pending_entry *); | 565 | void (*func)(struct perf_pending_entry *); |
544 | }; | 566 | }; |
545 | 567 | ||
568 | typedef void (*perf_callback_t)(struct perf_event *, void *); | ||
569 | |||
570 | struct perf_sample_data; | ||
571 | |||
546 | /** | 572 | /** |
547 | * struct perf_event - performance event kernel representation: | 573 | * struct perf_event - performance event kernel representation: |
548 | */ | 574 | */ |
@@ -585,7 +611,7 @@ struct perf_event { | |||
585 | u64 tstamp_running; | 611 | u64 tstamp_running; |
586 | u64 tstamp_stopped; | 612 | u64 tstamp_stopped; |
587 | 613 | ||
588 | struct perf_event_attr attr; | 614 | struct perf_event_attr attr; |
589 | struct hw_perf_event hw; | 615 | struct hw_perf_event hw; |
590 | 616 | ||
591 | struct perf_event_context *ctx; | 617 | struct perf_event_context *ctx; |
@@ -633,7 +659,20 @@ struct perf_event { | |||
633 | 659 | ||
634 | struct pid_namespace *ns; | 660 | struct pid_namespace *ns; |
635 | u64 id; | 661 | u64 id; |
662 | |||
663 | void (*overflow_handler)(struct perf_event *event, | ||
664 | int nmi, struct perf_sample_data *data, | ||
665 | struct pt_regs *regs); | ||
666 | |||
667 | #ifdef CONFIG_EVENT_PROFILE | ||
668 | struct event_filter *filter; | ||
636 | #endif | 669 | #endif |
670 | |||
671 | perf_callback_t callback; | ||
672 | |||
673 | perf_callback_t event_callback; | ||
674 | |||
675 | #endif /* CONFIG_PERF_EVENTS */ | ||
637 | }; | 676 | }; |
638 | 677 | ||
639 | /** | 678 | /** |
@@ -706,7 +745,6 @@ struct perf_output_handle { | |||
706 | int nmi; | 745 | int nmi; |
707 | int sample; | 746 | int sample; |
708 | int locked; | 747 | int locked; |
709 | unsigned long flags; | ||
710 | }; | 748 | }; |
711 | 749 | ||
712 | #ifdef CONFIG_PERF_EVENTS | 750 | #ifdef CONFIG_PERF_EVENTS |
@@ -738,6 +776,14 @@ extern int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
738 | struct perf_cpu_context *cpuctx, | 776 | struct perf_cpu_context *cpuctx, |
739 | struct perf_event_context *ctx, int cpu); | 777 | struct perf_event_context *ctx, int cpu); |
740 | extern void perf_event_update_userpage(struct perf_event *event); | 778 | extern void perf_event_update_userpage(struct perf_event *event); |
779 | extern int perf_event_release_kernel(struct perf_event *event); | ||
780 | extern struct perf_event * | ||
781 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | ||
782 | int cpu, | ||
783 | pid_t pid, | ||
784 | perf_callback_t callback); | ||
785 | extern u64 perf_event_read_value(struct perf_event *event, | ||
786 | u64 *enabled, u64 *running); | ||
741 | 787 | ||
742 | struct perf_sample_data { | 788 | struct perf_sample_data { |
743 | u64 type; | 789 | u64 type; |
@@ -814,6 +860,7 @@ extern int sysctl_perf_event_sample_rate; | |||
814 | extern void perf_event_init(void); | 860 | extern void perf_event_init(void); |
815 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 861 | extern void perf_tp_event(int event_id, u64 addr, u64 count, |
816 | void *record, int entry_size); | 862 | void *record, int entry_size); |
863 | extern void perf_bp_event(struct perf_event *event, void *data); | ||
817 | 864 | ||
818 | #ifndef perf_misc_flags | 865 | #ifndef perf_misc_flags |
819 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 866 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ |
@@ -827,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle, | |||
827 | extern void perf_output_end(struct perf_output_handle *handle); | 874 | extern void perf_output_end(struct perf_output_handle *handle); |
828 | extern void perf_output_copy(struct perf_output_handle *handle, | 875 | extern void perf_output_copy(struct perf_output_handle *handle, |
829 | const void *buf, unsigned int len); | 876 | const void *buf, unsigned int len); |
877 | extern int perf_swevent_get_recursion_context(void); | ||
878 | extern void perf_swevent_put_recursion_context(int rctx); | ||
830 | #else | 879 | #else |
831 | static inline void | 880 | static inline void |
832 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 881 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -848,11 +897,15 @@ static inline int perf_event_task_enable(void) { return -EINVAL; } | |||
848 | static inline void | 897 | static inline void |
849 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 898 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
850 | struct pt_regs *regs, u64 addr) { } | 899 | struct pt_regs *regs, u64 addr) { } |
900 | static inline void | ||
901 | perf_bp_event(struct perf_event *event, void *data) { } | ||
851 | 902 | ||
852 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 903 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
853 | static inline void perf_event_comm(struct task_struct *tsk) { } | 904 | static inline void perf_event_comm(struct task_struct *tsk) { } |
854 | static inline void perf_event_fork(struct task_struct *tsk) { } | 905 | static inline void perf_event_fork(struct task_struct *tsk) { } |
855 | static inline void perf_event_init(void) { } | 906 | static inline void perf_event_init(void) { } |
907 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | ||
908 | static inline void perf_swevent_put_recursion_context(int rctx) { } | ||
856 | 909 | ||
857 | #endif | 910 | #endif |
858 | 911 | ||
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 065a3652a3ea..67608161df6b 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h | |||
@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type) | |||
147 | if (old != ACL_NOT_CACHED) | 147 | if (old != ACL_NOT_CACHED) |
148 | posix_acl_release(old); | 148 | posix_acl_release(old); |
149 | } | 149 | } |
150 | |||
151 | static inline void forget_all_cached_acls(struct inode *inode) | ||
152 | { | ||
153 | struct posix_acl *old_access, *old_default; | ||
154 | spin_lock(&inode->i_lock); | ||
155 | old_access = inode->i_acl; | ||
156 | old_default = inode->i_default_acl; | ||
157 | inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; | ||
158 | spin_unlock(&inode->i_lock); | ||
159 | if (old_access != ACL_NOT_CACHED) | ||
160 | posix_acl_release(old_access); | ||
161 | if (old_default != ACL_NOT_CACHED) | ||
162 | posix_acl_release(old_default); | ||
163 | } | ||
150 | #endif | 164 | #endif |
151 | 165 | ||
152 | static inline void cache_no_acl(struct inode *inode) | 166 | static inline void cache_no_acl(struct inode *inode) |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 72b1a10a59b6..2e681d9555bd 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -105,6 +105,11 @@ struct preempt_notifier; | |||
105 | * @sched_out: we've just been preempted | 105 | * @sched_out: we've just been preempted |
106 | * notifier: struct preempt_notifier for the task being preempted | 106 | * notifier: struct preempt_notifier for the task being preempted |
107 | * next: the task that's kicking us out | 107 | * next: the task that's kicking us out |
108 | * | ||
109 | * Please note that sched_in and out are called under different | ||
110 | * contexts. sched_out is called with rq lock held and irq disabled | ||
111 | * while sched_in is called without rq lock and irq enabled. This | ||
112 | * difference is intentional and depended upon by its users. | ||
108 | */ | 113 | */ |
109 | struct preempt_ops { | 114 | struct preempt_ops { |
110 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); | 115 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 78c48895b12a..ce9a9b2e5cd4 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type) | |||
376 | return flags >> _DQUOT_STATE_FLAGS; | 376 | return flags >> _DQUOT_STATE_FLAGS; |
377 | } | 377 | } |
378 | 378 | ||
379 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
380 | extern void quota_send_warning(short type, unsigned int id, dev_t dev, | ||
381 | const char warntype); | ||
382 | #else | ||
383 | static inline void quota_send_warning(short type, unsigned int id, dev_t dev, | ||
384 | const char warntype) | ||
385 | { | ||
386 | return; | ||
387 | } | ||
388 | #endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ | ||
389 | |||
379 | struct quota_info { | 390 | struct quota_info { |
380 | unsigned int flags; /* Flags for diskquotas on this device */ | 391 | unsigned int flags; /* Flags for diskquotas on this device */ |
381 | struct mutex dqio_mutex; /* lock device while I/O in progress */ | 392 | struct mutex dqio_mutex; /* lock device while I/O in progress */ |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 00044b856453..668cf1bef030 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
@@ -1,20 +1,31 @@ | |||
1 | #ifndef _LINUX_RATELIMIT_H | 1 | #ifndef _LINUX_RATELIMIT_H |
2 | #define _LINUX_RATELIMIT_H | 2 | #define _LINUX_RATELIMIT_H |
3 | |||
3 | #include <linux/param.h> | 4 | #include <linux/param.h> |
5 | #include <linux/spinlock_types.h> | ||
4 | 6 | ||
5 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) | 7 | #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) |
6 | #define DEFAULT_RATELIMIT_BURST 10 | 8 | #define DEFAULT_RATELIMIT_BURST 10 |
7 | 9 | ||
8 | struct ratelimit_state { | 10 | struct ratelimit_state { |
9 | int interval; | 11 | spinlock_t lock; /* protect the state */ |
10 | int burst; | 12 | |
11 | int printed; | 13 | int interval; |
12 | int missed; | 14 | int burst; |
13 | unsigned long begin; | 15 | int printed; |
16 | int missed; | ||
17 | unsigned long begin; | ||
14 | }; | 18 | }; |
15 | 19 | ||
16 | #define DEFINE_RATELIMIT_STATE(name, interval, burst) \ | 20 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ |
17 | struct ratelimit_state name = {interval, burst,} | 21 | \ |
22 | struct ratelimit_state name = { \ | ||
23 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | ||
24 | .interval = interval_init, \ | ||
25 | .burst = burst_init, \ | ||
26 | } | ||
27 | |||
28 | extern int ___ratelimit(struct ratelimit_state *rs, const char *func); | ||
29 | #define __ratelimit(state) ___ratelimit(state, __func__) | ||
18 | 30 | ||
19 | extern int __ratelimit(struct ratelimit_state *rs); | 31 | #endif /* _LINUX_RATELIMIT_H */ |
20 | #endif | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 3ebd0b7bcb08..24440f4bf476 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,11 +52,6 @@ struct rcu_head { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* Exported common interfaces */ | 54 | /* Exported common interfaces */ |
55 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
56 | extern void synchronize_rcu(void); | ||
57 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
58 | #define synchronize_rcu synchronize_sched | ||
59 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
60 | extern void synchronize_rcu_bh(void); | 55 | extern void synchronize_rcu_bh(void); |
61 | extern void synchronize_sched(void); | 56 | extern void synchronize_sched(void); |
62 | extern void rcu_barrier(void); | 57 | extern void rcu_barrier(void); |
@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page); | |||
67 | 62 | ||
68 | /* Internal to kernel */ | 63 | /* Internal to kernel */ |
69 | extern void rcu_init(void); | 64 | extern void rcu_init(void); |
70 | extern void rcu_scheduler_starting(void); | ||
71 | extern int rcu_needs_cpu(int cpu); | ||
72 | extern int rcu_scheduler_active; | ||
73 | 65 | ||
74 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 66 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
75 | #include <linux/rcutree.h> | 67 | #include <linux/rcutree.h> |
68 | #elif defined(CONFIG_TINY_RCU) | ||
69 | #include <linux/rcutiny.h> | ||
76 | #else | 70 | #else |
77 | #error "Unknown RCU implementation specified to kernel configuration" | 71 | #error "Unknown RCU implementation specified to kernel configuration" |
78 | #endif | 72 | #endif |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h new file mode 100644 index 000000000000..c4ba9a78721e --- /dev/null +++ b/include/linux/rcutiny.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2008 | ||
19 | * | ||
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
21 | * | ||
22 | * For detailed explanation of Read-Copy Update mechanism see - | ||
23 | * Documentation/RCU | ||
24 | */ | ||
25 | #ifndef __LINUX_TINY_H | ||
26 | #define __LINUX_TINY_H | ||
27 | |||
28 | #include <linux/cache.h> | ||
29 | |||
30 | void rcu_sched_qs(int cpu); | ||
31 | void rcu_bh_qs(int cpu); | ||
32 | |||
33 | #define __rcu_read_lock() preempt_disable() | ||
34 | #define __rcu_read_unlock() preempt_enable() | ||
35 | #define __rcu_read_lock_bh() local_bh_disable() | ||
36 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
37 | #define call_rcu_sched call_rcu | ||
38 | |||
39 | #define rcu_init_sched() do { } while (0) | ||
40 | extern void rcu_check_callbacks(int cpu, int user); | ||
41 | |||
42 | static inline int rcu_needs_cpu(int cpu) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Return the number of grace periods. | ||
49 | */ | ||
50 | static inline long rcu_batches_completed(void) | ||
51 | { | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Return the number of bottom-half grace periods. | ||
57 | */ | ||
58 | static inline long rcu_batches_completed_bh(void) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | extern int rcu_expedited_torture_stats(char *page); | ||
64 | |||
65 | #define synchronize_rcu synchronize_sched | ||
66 | |||
67 | static inline void synchronize_rcu_expedited(void) | ||
68 | { | ||
69 | synchronize_sched(); | ||
70 | } | ||
71 | |||
72 | static inline void synchronize_rcu_bh_expedited(void) | ||
73 | { | ||
74 | synchronize_sched(); | ||
75 | } | ||
76 | |||
77 | struct notifier_block; | ||
78 | |||
79 | #ifdef CONFIG_NO_HZ | ||
80 | |||
81 | extern void rcu_enter_nohz(void); | ||
82 | extern void rcu_exit_nohz(void); | ||
83 | |||
84 | #else /* #ifdef CONFIG_NO_HZ */ | ||
85 | |||
86 | static inline void rcu_enter_nohz(void) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline void rcu_exit_nohz(void) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
95 | |||
96 | static inline void rcu_scheduler_starting(void) | ||
97 | { | ||
98 | } | ||
99 | |||
100 | static inline void exit_rcu(void) | ||
101 | { | ||
102 | } | ||
103 | |||
104 | #endif /* __LINUX_RCUTINY_H */ | ||
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 9642c6bcb399..c93eee5911b0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -34,15 +34,15 @@ struct notifier_block; | |||
34 | 34 | ||
35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); |
36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
37 | extern int rcu_cpu_notify(struct notifier_block *self, | ||
38 | unsigned long action, void *hcpu); | ||
39 | extern int rcu_needs_cpu(int cpu); | 37 | extern int rcu_needs_cpu(int cpu); |
38 | extern void rcu_scheduler_starting(void); | ||
40 | extern int rcu_expedited_torture_stats(char *page); | 39 | extern int rcu_expedited_torture_stats(char *page); |
41 | 40 | ||
42 | #ifdef CONFIG_TREE_PREEMPT_RCU | 41 | #ifdef CONFIG_TREE_PREEMPT_RCU |
43 | 42 | ||
44 | extern void __rcu_read_lock(void); | 43 | extern void __rcu_read_lock(void); |
45 | extern void __rcu_read_unlock(void); | 44 | extern void __rcu_read_unlock(void); |
45 | extern void synchronize_rcu(void); | ||
46 | extern void exit_rcu(void); | 46 | extern void exit_rcu(void); |
47 | 47 | ||
48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void) | |||
57 | preempt_enable(); | 57 | preempt_enable(); |
58 | } | 58 | } |
59 | 59 | ||
60 | #define __synchronize_sched() synchronize_rcu() | 60 | #define synchronize_rcu synchronize_sched |
61 | 61 | ||
62 | static inline void exit_rcu(void) | 62 | static inline void exit_rcu(void) |
63 | { | 63 | { |
@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
83 | synchronize_sched_expedited(); | 83 | synchronize_sched_expedited(); |
84 | } | 84 | } |
85 | 85 | ||
86 | extern void __rcu_init(void); | ||
87 | extern void rcu_check_callbacks(int cpu, int user); | 86 | extern void rcu_check_callbacks(int cpu, int user); |
88 | 87 | ||
89 | extern long rcu_batches_completed(void); | 88 | extern long rcu_batches_completed(void); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60bf583..89115ec7d43f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void); | |||
145 | 145 | ||
146 | 146 | ||
147 | extern void calc_global_load(void); | 147 | extern void calc_global_load(void); |
148 | extern u64 cpu_nr_migrations(int cpu); | ||
149 | 148 | ||
150 | extern unsigned long get_parent_ip(unsigned long addr); | 149 | extern unsigned long get_parent_ip(unsigned long addr); |
151 | 150 | ||
@@ -171,8 +170,6 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
171 | } | 170 | } |
172 | #endif | 171 | #endif |
173 | 172 | ||
174 | extern unsigned long long time_sync_thresh; | ||
175 | |||
176 | /* | 173 | /* |
177 | * Task state bitmask. NOTE! These bits are also | 174 | * Task state bitmask. NOTE! These bits are also |
178 | * encoded in fs/proc/array.c: get_task_state(). | 175 | * encoded in fs/proc/array.c: get_task_state(). |
@@ -349,7 +346,6 @@ extern signed long schedule_timeout(signed long timeout); | |||
349 | extern signed long schedule_timeout_interruptible(signed long timeout); | 346 | extern signed long schedule_timeout_interruptible(signed long timeout); |
350 | extern signed long schedule_timeout_killable(signed long timeout); | 347 | extern signed long schedule_timeout_killable(signed long timeout); |
351 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 348 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
352 | asmlinkage void __schedule(void); | ||
353 | asmlinkage void schedule(void); | 349 | asmlinkage void schedule(void); |
354 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 350 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); |
355 | 351 | ||
@@ -628,6 +624,9 @@ struct signal_struct { | |||
628 | cputime_t utime, stime, cutime, cstime; | 624 | cputime_t utime, stime, cutime, cstime; |
629 | cputime_t gtime; | 625 | cputime_t gtime; |
630 | cputime_t cgtime; | 626 | cputime_t cgtime; |
627 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
628 | cputime_t prev_utime, prev_stime; | ||
629 | #endif | ||
631 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 630 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
632 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 631 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
633 | unsigned long inblock, oublock, cinblock, coublock; | 632 | unsigned long inblock, oublock, cinblock, coublock; |
@@ -1013,9 +1012,13 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | |||
1013 | return to_cpumask(sd->span); | 1012 | return to_cpumask(sd->span); |
1014 | } | 1013 | } |
1015 | 1014 | ||
1016 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 1015 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1017 | struct sched_domain_attr *dattr_new); | 1016 | struct sched_domain_attr *dattr_new); |
1018 | 1017 | ||
1018 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | ||
1019 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | ||
1020 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | ||
1021 | |||
1019 | /* Test a flag in parent sched domain */ | 1022 | /* Test a flag in parent sched domain */ |
1020 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | 1023 | static inline int test_sd_parent(struct sched_domain *sd, int flag) |
1021 | { | 1024 | { |
@@ -1033,7 +1036,7 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | |||
1033 | struct sched_domain_attr; | 1036 | struct sched_domain_attr; |
1034 | 1037 | ||
1035 | static inline void | 1038 | static inline void |
1036 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 1039 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1037 | struct sched_domain_attr *dattr_new) | 1040 | struct sched_domain_attr *dattr_new) |
1038 | { | 1041 | { |
1039 | } | 1042 | } |
@@ -1331,7 +1334,9 @@ struct task_struct { | |||
1331 | 1334 | ||
1332 | cputime_t utime, stime, utimescaled, stimescaled; | 1335 | cputime_t utime, stime, utimescaled, stimescaled; |
1333 | cputime_t gtime; | 1336 | cputime_t gtime; |
1337 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
1334 | cputime_t prev_utime, prev_stime; | 1338 | cputime_t prev_utime, prev_stime; |
1339 | #endif | ||
1335 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1340 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
1336 | struct timespec start_time; /* monotonic time */ | 1341 | struct timespec start_time; /* monotonic time */ |
1337 | struct timespec real_start_time; /* boot based time */ | 1342 | struct timespec real_start_time; /* boot based time */ |
@@ -1421,17 +1426,17 @@ struct task_struct { | |||
1421 | #endif | 1426 | #endif |
1422 | #ifdef CONFIG_TRACE_IRQFLAGS | 1427 | #ifdef CONFIG_TRACE_IRQFLAGS |
1423 | unsigned int irq_events; | 1428 | unsigned int irq_events; |
1424 | int hardirqs_enabled; | ||
1425 | unsigned long hardirq_enable_ip; | 1429 | unsigned long hardirq_enable_ip; |
1426 | unsigned int hardirq_enable_event; | ||
1427 | unsigned long hardirq_disable_ip; | 1430 | unsigned long hardirq_disable_ip; |
1431 | unsigned int hardirq_enable_event; | ||
1428 | unsigned int hardirq_disable_event; | 1432 | unsigned int hardirq_disable_event; |
1429 | int softirqs_enabled; | 1433 | int hardirqs_enabled; |
1434 | int hardirq_context; | ||
1430 | unsigned long softirq_disable_ip; | 1435 | unsigned long softirq_disable_ip; |
1431 | unsigned int softirq_disable_event; | ||
1432 | unsigned long softirq_enable_ip; | 1436 | unsigned long softirq_enable_ip; |
1437 | unsigned int softirq_disable_event; | ||
1433 | unsigned int softirq_enable_event; | 1438 | unsigned int softirq_enable_event; |
1434 | int hardirq_context; | 1439 | int softirqs_enabled; |
1435 | int softirq_context; | 1440 | int softirq_context; |
1436 | #endif | 1441 | #endif |
1437 | #ifdef CONFIG_LOCKDEP | 1442 | #ifdef CONFIG_LOCKDEP |
@@ -1720,9 +1725,8 @@ static inline void put_task_struct(struct task_struct *t) | |||
1720 | __put_task_struct(t); | 1725 | __put_task_struct(t); |
1721 | } | 1726 | } |
1722 | 1727 | ||
1723 | extern cputime_t task_utime(struct task_struct *p); | 1728 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1724 | extern cputime_t task_stime(struct task_struct *p); | 1729 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1725 | extern cputime_t task_gtime(struct task_struct *p); | ||
1726 | 1730 | ||
1727 | /* | 1731 | /* |
1728 | * Per process flags | 1732 | * Per process flags |
@@ -2086,11 +2090,18 @@ static inline int is_si_special(const struct siginfo *info) | |||
2086 | return info <= SEND_SIG_FORCED; | 2090 | return info <= SEND_SIG_FORCED; |
2087 | } | 2091 | } |
2088 | 2092 | ||
2089 | /* True if we are on the alternate signal stack. */ | 2093 | /* |
2090 | 2094 | * True if we are on the alternate signal stack. | |
2095 | */ | ||
2091 | static inline int on_sig_stack(unsigned long sp) | 2096 | static inline int on_sig_stack(unsigned long sp) |
2092 | { | 2097 | { |
2093 | return (sp - current->sas_ss_sp < current->sas_ss_size); | 2098 | #ifdef CONFIG_STACK_GROWSUP |
2099 | return sp >= current->sas_ss_sp && | ||
2100 | sp - current->sas_ss_sp < current->sas_ss_size; | ||
2101 | #else | ||
2102 | return sp > current->sas_ss_sp && | ||
2103 | sp - current->sas_ss_sp <= current->sas_ss_size; | ||
2104 | #endif | ||
2094 | } | 2105 | } |
2095 | 2106 | ||
2096 | static inline int sas_ss_flags(unsigned long sp) | 2107 | static inline int sas_ss_flags(unsigned long sp) |
diff --git a/include/linux/securebits.h b/include/linux/securebits.h index d2c5ed845bcc..33406174cbe8 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h | |||
@@ -1,6 +1,15 @@ | |||
1 | #ifndef _LINUX_SECUREBITS_H | 1 | #ifndef _LINUX_SECUREBITS_H |
2 | #define _LINUX_SECUREBITS_H 1 | 2 | #define _LINUX_SECUREBITS_H 1 |
3 | 3 | ||
4 | /* Each securesetting is implemented using two bits. One bit specifies | ||
5 | whether the setting is on or off. The other bit specify whether the | ||
6 | setting is locked or not. A setting which is locked cannot be | ||
7 | changed from user-level. */ | ||
8 | #define issecure_mask(X) (1 << (X)) | ||
9 | #ifdef __KERNEL__ | ||
10 | #define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) | ||
11 | #endif | ||
12 | |||
4 | #define SECUREBITS_DEFAULT 0x00000000 | 13 | #define SECUREBITS_DEFAULT 0x00000000 |
5 | 14 | ||
6 | /* When set UID 0 has no special privileges. When unset, we support | 15 | /* When set UID 0 has no special privileges. When unset, we support |
@@ -12,6 +21,9 @@ | |||
12 | #define SECURE_NOROOT 0 | 21 | #define SECURE_NOROOT 0 |
13 | #define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ | 22 | #define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ |
14 | 23 | ||
24 | #define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT)) | ||
25 | #define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED)) | ||
26 | |||
15 | /* When set, setuid to/from uid 0 does not trigger capability-"fixup". | 27 | /* When set, setuid to/from uid 0 does not trigger capability-"fixup". |
16 | When unset, to provide compatiblility with old programs relying on | 28 | When unset, to provide compatiblility with old programs relying on |
17 | set*uid to gain/lose privilege, transitions to/from uid 0 cause | 29 | set*uid to gain/lose privilege, transitions to/from uid 0 cause |
@@ -19,6 +31,10 @@ | |||
19 | #define SECURE_NO_SETUID_FIXUP 2 | 31 | #define SECURE_NO_SETUID_FIXUP 2 |
20 | #define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ | 32 | #define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ |
21 | 33 | ||
34 | #define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP)) | ||
35 | #define SECBIT_NO_SETUID_FIXUP_LOCKED \ | ||
36 | (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)) | ||
37 | |||
22 | /* When set, a process can retain its capabilities even after | 38 | /* When set, a process can retain its capabilities even after |
23 | transitioning to a non-root user (the set-uid fixup suppressed by | 39 | transitioning to a non-root user (the set-uid fixup suppressed by |
24 | bit 2). Bit-4 is cleared when a process calls exec(); setting both | 40 | bit 2). Bit-4 is cleared when a process calls exec(); setting both |
@@ -27,12 +43,8 @@ | |||
27 | #define SECURE_KEEP_CAPS 4 | 43 | #define SECURE_KEEP_CAPS 4 |
28 | #define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ | 44 | #define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ |
29 | 45 | ||
30 | /* Each securesetting is implemented using two bits. One bit specifies | 46 | #define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS)) |
31 | whether the setting is on or off. The other bit specify whether the | 47 | #define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED)) |
32 | setting is locked or not. A setting which is locked cannot be | ||
33 | changed from user-level. */ | ||
34 | #define issecure_mask(X) (1 << (X)) | ||
35 | #define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) | ||
36 | 48 | ||
37 | #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ | 49 | #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ |
38 | issecure_mask(SECURE_NO_SETUID_FIXUP) | \ | 50 | issecure_mask(SECURE_NO_SETUID_FIXUP) | \ |
diff --git a/include/linux/security.h b/include/linux/security.h index 239e40d0450b..466cbadbd1ef 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -447,6 +447,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
447 | * @new_dir contains the path structure for parent of the new link. | 447 | * @new_dir contains the path structure for parent of the new link. |
448 | * @new_dentry contains the dentry structure of the new link. | 448 | * @new_dentry contains the dentry structure of the new link. |
449 | * Return 0 if permission is granted. | 449 | * Return 0 if permission is granted. |
450 | * @path_chmod: | ||
451 | * Check for permission to change DAC's permission of a file or directory. | ||
452 | * @dentry contains the dentry structure. | ||
453 | * @mnt contains the vfsmnt structure. | ||
454 | * @mode contains DAC's mode. | ||
455 | * Return 0 if permission is granted. | ||
456 | * @path_chown: | ||
457 | * Check for permission to change owner/group of a file or directory. | ||
458 | * @path contains the path structure. | ||
459 | * @uid contains new owner's ID. | ||
460 | * @gid contains new group's ID. | ||
461 | * Return 0 if permission is granted. | ||
462 | * @path_chroot: | ||
463 | * Check for permission to change root directory. | ||
464 | * @path contains the path structure. | ||
465 | * Return 0 if permission is granted. | ||
450 | * @inode_readlink: | 466 | * @inode_readlink: |
451 | * Check the permission to read the symbolic link. | 467 | * Check the permission to read the symbolic link. |
452 | * @dentry contains the dentry structure for the file link. | 468 | * @dentry contains the dentry structure for the file link. |
@@ -690,6 +706,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
690 | * @kernel_module_request: | 706 | * @kernel_module_request: |
691 | * Ability to trigger the kernel to automatically upcall to userspace for | 707 | * Ability to trigger the kernel to automatically upcall to userspace for |
692 | * userspace to load a kernel module with the given name. | 708 | * userspace to load a kernel module with the given name. |
709 | * @kmod_name name of the module requested by the kernel | ||
693 | * Return 0 if successful. | 710 | * Return 0 if successful. |
694 | * @task_setuid: | 711 | * @task_setuid: |
695 | * Check permission before setting one or more of the user identity | 712 | * Check permission before setting one or more of the user identity |
@@ -1488,6 +1505,10 @@ struct security_operations { | |||
1488 | struct dentry *new_dentry); | 1505 | struct dentry *new_dentry); |
1489 | int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, | 1506 | int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, |
1490 | struct path *new_dir, struct dentry *new_dentry); | 1507 | struct path *new_dir, struct dentry *new_dentry); |
1508 | int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt, | ||
1509 | mode_t mode); | ||
1510 | int (*path_chown) (struct path *path, uid_t uid, gid_t gid); | ||
1511 | int (*path_chroot) (struct path *path); | ||
1491 | #endif | 1512 | #endif |
1492 | 1513 | ||
1493 | int (*inode_alloc_security) (struct inode *inode); | 1514 | int (*inode_alloc_security) (struct inode *inode); |
@@ -1557,7 +1578,7 @@ struct security_operations { | |||
1557 | void (*cred_transfer)(struct cred *new, const struct cred *old); | 1578 | void (*cred_transfer)(struct cred *new, const struct cred *old); |
1558 | int (*kernel_act_as)(struct cred *new, u32 secid); | 1579 | int (*kernel_act_as)(struct cred *new, u32 secid); |
1559 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); | 1580 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); |
1560 | int (*kernel_module_request)(void); | 1581 | int (*kernel_module_request)(char *kmod_name); |
1561 | int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); | 1582 | int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); |
1562 | int (*task_fix_setuid) (struct cred *new, const struct cred *old, | 1583 | int (*task_fix_setuid) (struct cred *new, const struct cred *old, |
1563 | int flags); | 1584 | int flags); |
@@ -1822,7 +1843,7 @@ void security_commit_creds(struct cred *new, const struct cred *old); | |||
1822 | void security_transfer_creds(struct cred *new, const struct cred *old); | 1843 | void security_transfer_creds(struct cred *new, const struct cred *old); |
1823 | int security_kernel_act_as(struct cred *new, u32 secid); | 1844 | int security_kernel_act_as(struct cred *new, u32 secid); |
1824 | int security_kernel_create_files_as(struct cred *new, struct inode *inode); | 1845 | int security_kernel_create_files_as(struct cred *new, struct inode *inode); |
1825 | int security_kernel_module_request(void); | 1846 | int security_kernel_module_request(char *kmod_name); |
1826 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); | 1847 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); |
1827 | int security_task_fix_setuid(struct cred *new, const struct cred *old, | 1848 | int security_task_fix_setuid(struct cred *new, const struct cred *old, |
1828 | int flags); | 1849 | int flags); |
@@ -2387,7 +2408,7 @@ static inline int security_kernel_create_files_as(struct cred *cred, | |||
2387 | return 0; | 2408 | return 0; |
2388 | } | 2409 | } |
2389 | 2410 | ||
2390 | static inline int security_kernel_module_request(void) | 2411 | static inline int security_kernel_module_request(char *kmod_name) |
2391 | { | 2412 | { |
2392 | return 0; | 2413 | return 0; |
2393 | } | 2414 | } |
@@ -2952,6 +2973,10 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir, | |||
2952 | struct dentry *new_dentry); | 2973 | struct dentry *new_dentry); |
2953 | int security_path_rename(struct path *old_dir, struct dentry *old_dentry, | 2974 | int security_path_rename(struct path *old_dir, struct dentry *old_dentry, |
2954 | struct path *new_dir, struct dentry *new_dentry); | 2975 | struct path *new_dir, struct dentry *new_dentry); |
2976 | int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, | ||
2977 | mode_t mode); | ||
2978 | int security_path_chown(struct path *path, uid_t uid, gid_t gid); | ||
2979 | int security_path_chroot(struct path *path); | ||
2955 | #else /* CONFIG_SECURITY_PATH */ | 2980 | #else /* CONFIG_SECURITY_PATH */ |
2956 | static inline int security_path_unlink(struct path *dir, struct dentry *dentry) | 2981 | static inline int security_path_unlink(struct path *dir, struct dentry *dentry) |
2957 | { | 2982 | { |
@@ -3001,6 +3026,23 @@ static inline int security_path_rename(struct path *old_dir, | |||
3001 | { | 3026 | { |
3002 | return 0; | 3027 | return 0; |
3003 | } | 3028 | } |
3029 | |||
3030 | static inline int security_path_chmod(struct dentry *dentry, | ||
3031 | struct vfsmount *mnt, | ||
3032 | mode_t mode) | ||
3033 | { | ||
3034 | return 0; | ||
3035 | } | ||
3036 | |||
3037 | static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid) | ||
3038 | { | ||
3039 | return 0; | ||
3040 | } | ||
3041 | |||
3042 | static inline int security_path_chroot(struct path *path) | ||
3043 | { | ||
3044 | return 0; | ||
3045 | } | ||
3004 | #endif /* CONFIG_SECURITY_PATH */ | 3046 | #endif /* CONFIG_SECURITY_PATH */ |
3005 | 3047 | ||
3006 | #ifdef CONFIG_KEYS | 3048 | #ifdef CONFIG_KEYS |
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index 5035a2691739..13337bf6c3f5 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/timer.h> | 20 | #include <linux/timer.h> |
21 | 21 | ||
22 | struct slow_work; | 22 | struct slow_work; |
23 | #ifdef CONFIG_SLOW_WORK_PROC | 23 | #ifdef CONFIG_SLOW_WORK_DEBUG |
24 | struct seq_file; | 24 | struct seq_file; |
25 | #endif | 25 | #endif |
26 | 26 | ||
@@ -42,8 +42,8 @@ struct slow_work_ops { | |||
42 | /* execute a work item */ | 42 | /* execute a work item */ |
43 | void (*execute)(struct slow_work *work); | 43 | void (*execute)(struct slow_work *work); |
44 | 44 | ||
45 | #ifdef CONFIG_SLOW_WORK_PROC | 45 | #ifdef CONFIG_SLOW_WORK_DEBUG |
46 | /* describe a work item for /proc */ | 46 | /* describe a work item for debugfs */ |
47 | void (*desc)(struct slow_work *work, struct seq_file *m); | 47 | void (*desc)(struct slow_work *work, struct seq_file *m); |
48 | #endif | 48 | #endif |
49 | }; | 49 | }; |
@@ -64,7 +64,7 @@ struct slow_work { | |||
64 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ | 64 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ |
65 | const struct slow_work_ops *ops; /* operations table for this item */ | 65 | const struct slow_work_ops *ops; /* operations table for this item */ |
66 | struct list_head link; /* link in queue */ | 66 | struct list_head link; /* link in queue */ |
67 | #ifdef CONFIG_SLOW_WORK_PROC | 67 | #ifdef CONFIG_SLOW_WORK_DEBUG |
68 | struct timespec mark; /* jiffies at which queued or exec begun */ | 68 | struct timespec mark; /* jiffies at which queued or exec begun */ |
69 | #endif | 69 | #endif |
70 | }; | 70 | }; |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 39c64bae776d..7a0570e6a596 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -76,6 +76,9 @@ void smp_call_function_many(const struct cpumask *mask, | |||
76 | void __smp_call_function_single(int cpuid, struct call_single_data *data, | 76 | void __smp_call_function_single(int cpuid, struct call_single_data *data, |
77 | int wait); | 77 | int wait); |
78 | 78 | ||
79 | int smp_call_function_any(const struct cpumask *mask, | ||
80 | void (*func)(void *info), void *info, int wait); | ||
81 | |||
79 | /* | 82 | /* |
80 | * Generic and arch helpers | 83 | * Generic and arch helpers |
81 | */ | 84 | */ |
@@ -137,9 +140,15 @@ static inline void smp_send_reschedule(int cpu) { } | |||
137 | #define smp_prepare_boot_cpu() do {} while (0) | 140 | #define smp_prepare_boot_cpu() do {} while (0) |
138 | #define smp_call_function_many(mask, func, info, wait) \ | 141 | #define smp_call_function_many(mask, func, info, wait) \ |
139 | (up_smp_call_function(func, info)) | 142 | (up_smp_call_function(func, info)) |
140 | static inline void init_call_single_data(void) | 143 | static inline void init_call_single_data(void) { } |
144 | |||
145 | static inline int | ||
146 | smp_call_function_any(const struct cpumask *mask, void (*func)(void *info), | ||
147 | void *info, int wait) | ||
141 | { | 148 | { |
149 | return smp_call_function_single(0, func, info, wait); | ||
142 | } | 150 | } |
151 | |||
143 | #endif /* !SMP */ | 152 | #endif /* !SMP */ |
144 | 153 | ||
145 | /* | 154 | /* |
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 813be59bf345..2ea1dd1ba21c 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task) | |||
24 | return 0; | 24 | return 0; |
25 | } | 25 | } |
26 | 26 | ||
27 | extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); | 27 | extern void __lockfunc |
28 | extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); | 28 | _lock_kernel(const char *func, const char *file, int line) |
29 | __acquires(kernel_lock); | ||
30 | |||
31 | extern void __lockfunc | ||
32 | _unlock_kernel(const char *func, const char *file, int line) | ||
33 | __releases(kernel_lock); | ||
34 | |||
35 | #define lock_kernel() do { \ | ||
36 | _lock_kernel(__func__, __FILE__, __LINE__); \ | ||
37 | } while (0) | ||
38 | |||
39 | #define unlock_kernel() do { \ | ||
40 | _unlock_kernel(__func__, __FILE__, __LINE__); \ | ||
41 | } while (0) | ||
29 | 42 | ||
30 | /* | 43 | /* |
31 | * Various legacy drivers don't really need the BKL in a specific | 44 | * Various legacy drivers don't really need the BKL in a specific |
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void) | |||
41 | 54 | ||
42 | #else | 55 | #else |
43 | 56 | ||
44 | #define lock_kernel() do { } while(0) | 57 | #define lock_kernel() |
45 | #define unlock_kernel() do { } while(0) | 58 | #define unlock_kernel() |
46 | #define release_kernel_lock(task) do { } while(0) | 59 | #define release_kernel_lock(task) do { } while(0) |
47 | #define cycle_kernel_lock() do { } while(0) | 60 | #define cycle_kernel_lock() do { } while(0) |
48 | #define reacquire_kernel_lock(task) 0 | 61 | #define reacquire_kernel_lock(task) 0 |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f0ca7a7a1757..71dccfeb0d88 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -79,8 +79,6 @@ | |||
79 | */ | 79 | */ |
80 | #include <linux/spinlock_types.h> | 80 | #include <linux/spinlock_types.h> |
81 | 81 | ||
82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | ||
83 | |||
84 | /* | 82 | /* |
85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
86 | */ | 84 | */ |
@@ -102,7 +100,7 @@ do { \ | |||
102 | 100 | ||
103 | #else | 101 | #else |
104 | # define spin_lock_init(lock) \ | 102 | # define spin_lock_init(lock) \ |
105 | do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | 103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
106 | #endif | 104 | #endif |
107 | 105 | ||
108 | #ifdef CONFIG_DEBUG_SPINLOCK | 106 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -116,7 +114,7 @@ do { \ | |||
116 | } while (0) | 114 | } while (0) |
117 | #else | 115 | #else |
118 | # define rwlock_init(lock) \ | 116 | # define rwlock_init(lock) \ |
119 | do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | 117 | do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) |
120 | #endif | 118 | #endif |
121 | 119 | ||
122 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 120 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 7a7e18fc2415..8264a7f459bc 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | /* | 63 | #ifdef CONFIG_INLINE_SPIN_LOCK |
64 | * We inline the unlock functions in the nondebug case: | ||
65 | */ | ||
66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
67 | #define __always_inline__spin_unlock | ||
68 | #define __always_inline__read_unlock | ||
69 | #define __always_inline__write_unlock | ||
70 | #define __always_inline__spin_unlock_irq | ||
71 | #define __always_inline__read_unlock_irq | ||
72 | #define __always_inline__write_unlock_irq | ||
73 | #endif | ||
74 | |||
75 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
76 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
77 | |||
78 | #ifdef __always_inline__spin_lock | ||
79 | #define _spin_lock(lock) __spin_lock(lock) | 64 | #define _spin_lock(lock) __spin_lock(lock) |
80 | #endif | 65 | #endif |
81 | 66 | ||
82 | #ifdef __always_inline__read_lock | 67 | #ifdef CONFIG_INLINE_READ_LOCK |
83 | #define _read_lock(lock) __read_lock(lock) | 68 | #define _read_lock(lock) __read_lock(lock) |
84 | #endif | 69 | #endif |
85 | 70 | ||
86 | #ifdef __always_inline__write_lock | 71 | #ifdef CONFIG_INLINE_WRITE_LOCK |
87 | #define _write_lock(lock) __write_lock(lock) | 72 | #define _write_lock(lock) __write_lock(lock) |
88 | #endif | 73 | #endif |
89 | 74 | ||
90 | #ifdef __always_inline__spin_lock_bh | 75 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
91 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 76 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) |
92 | #endif | 77 | #endif |
93 | 78 | ||
94 | #ifdef __always_inline__read_lock_bh | 79 | #ifdef CONFIG_INLINE_READ_LOCK_BH |
95 | #define _read_lock_bh(lock) __read_lock_bh(lock) | 80 | #define _read_lock_bh(lock) __read_lock_bh(lock) |
96 | #endif | 81 | #endif |
97 | 82 | ||
98 | #ifdef __always_inline__write_lock_bh | 83 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH |
99 | #define _write_lock_bh(lock) __write_lock_bh(lock) | 84 | #define _write_lock_bh(lock) __write_lock_bh(lock) |
100 | #endif | 85 | #endif |
101 | 86 | ||
102 | #ifdef __always_inline__spin_lock_irq | 87 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
103 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 88 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) |
104 | #endif | 89 | #endif |
105 | 90 | ||
106 | #ifdef __always_inline__read_lock_irq | 91 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ |
107 | #define _read_lock_irq(lock) __read_lock_irq(lock) | 92 | #define _read_lock_irq(lock) __read_lock_irq(lock) |
108 | #endif | 93 | #endif |
109 | 94 | ||
110 | #ifdef __always_inline__write_lock_irq | 95 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ |
111 | #define _write_lock_irq(lock) __write_lock_irq(lock) | 96 | #define _write_lock_irq(lock) __write_lock_irq(lock) |
112 | #endif | 97 | #endif |
113 | 98 | ||
114 | #ifdef __always_inline__spin_lock_irqsave | 99 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
115 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 100 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) |
116 | #endif | 101 | #endif |
117 | 102 | ||
118 | #ifdef __always_inline__read_lock_irqsave | 103 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE |
119 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | 104 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) |
120 | #endif | 105 | #endif |
121 | 106 | ||
122 | #ifdef __always_inline__write_lock_irqsave | 107 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
123 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | 108 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) |
124 | #endif | 109 | #endif |
125 | 110 | ||
126 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | 111 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
127 | |||
128 | #ifdef __always_inline__spin_trylock | ||
129 | #define _spin_trylock(lock) __spin_trylock(lock) | 112 | #define _spin_trylock(lock) __spin_trylock(lock) |
130 | #endif | 113 | #endif |
131 | 114 | ||
132 | #ifdef __always_inline__read_trylock | 115 | #ifdef CONFIG_INLINE_READ_TRYLOCK |
133 | #define _read_trylock(lock) __read_trylock(lock) | 116 | #define _read_trylock(lock) __read_trylock(lock) |
134 | #endif | 117 | #endif |
135 | 118 | ||
136 | #ifdef __always_inline__write_trylock | 119 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK |
137 | #define _write_trylock(lock) __write_trylock(lock) | 120 | #define _write_trylock(lock) __write_trylock(lock) |
138 | #endif | 121 | #endif |
139 | 122 | ||
140 | #ifdef __always_inline__spin_trylock_bh | 123 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
141 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 124 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) |
142 | #endif | 125 | #endif |
143 | 126 | ||
144 | #ifdef __always_inline__spin_unlock | 127 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
145 | #define _spin_unlock(lock) __spin_unlock(lock) | 128 | #define _spin_unlock(lock) __spin_unlock(lock) |
146 | #endif | 129 | #endif |
147 | 130 | ||
148 | #ifdef __always_inline__read_unlock | 131 | #ifdef CONFIG_INLINE_READ_UNLOCK |
149 | #define _read_unlock(lock) __read_unlock(lock) | 132 | #define _read_unlock(lock) __read_unlock(lock) |
150 | #endif | 133 | #endif |
151 | 134 | ||
152 | #ifdef __always_inline__write_unlock | 135 | #ifdef CONFIG_INLINE_WRITE_UNLOCK |
153 | #define _write_unlock(lock) __write_unlock(lock) | 136 | #define _write_unlock(lock) __write_unlock(lock) |
154 | #endif | 137 | #endif |
155 | 138 | ||
156 | #ifdef __always_inline__spin_unlock_bh | 139 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
157 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 140 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) |
158 | #endif | 141 | #endif |
159 | 142 | ||
160 | #ifdef __always_inline__read_unlock_bh | 143 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH |
161 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | 144 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) |
162 | #endif | 145 | #endif |
163 | 146 | ||
164 | #ifdef __always_inline__write_unlock_bh | 147 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH |
165 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | 148 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) |
166 | #endif | 149 | #endif |
167 | 150 | ||
168 | #ifdef __always_inline__spin_unlock_irq | 151 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
169 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 152 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) |
170 | #endif | 153 | #endif |
171 | 154 | ||
172 | #ifdef __always_inline__read_unlock_irq | 155 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ |
173 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | 156 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) |
174 | #endif | 157 | #endif |
175 | 158 | ||
176 | #ifdef __always_inline__write_unlock_irq | 159 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
177 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | 160 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) |
178 | #endif | 161 | #endif |
179 | 162 | ||
180 | #ifdef __always_inline__spin_unlock_irqrestore | 163 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
181 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 164 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
182 | #endif | 165 | #endif |
183 | 166 | ||
184 | #ifdef __always_inline__read_unlock_irqrestore | 167 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
185 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | 168 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) |
186 | #endif | 169 | #endif |
187 | 170 | ||
188 | #ifdef __always_inline__write_unlock_irqrestore | 171 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
189 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | 172 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) |
190 | #endif | 173 | #endif |
191 | 174 | ||
192 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
193 | |||
194 | static inline int __spin_trylock(spinlock_t *lock) | 175 | static inline int __spin_trylock(spinlock_t *lock) |
195 | { | 176 | { |
196 | preempt_disable(); | 177 | preempt_disable(); |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index aca0eee53930..4765d97dcafb 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp); | |||
48 | int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | 48 | int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); |
49 | void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 49 | void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); |
50 | void synchronize_srcu(struct srcu_struct *sp); | 50 | void synchronize_srcu(struct srcu_struct *sp); |
51 | void synchronize_srcu_expedited(struct srcu_struct *sp); | ||
51 | long srcu_batches_completed(struct srcu_struct *sp); | 52 | long srcu_batches_completed(struct srcu_struct *sp); |
52 | 53 | ||
53 | #endif | 54 | #endif |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 73b1f1cec423..febedcf67c7e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -7,6 +7,8 @@ struct device; | |||
7 | struct dma_attrs; | 7 | struct dma_attrs; |
8 | struct scatterlist; | 8 | struct scatterlist; |
9 | 9 | ||
10 | extern int swiotlb_force; | ||
11 | |||
10 | /* | 12 | /* |
11 | * Maximum allowable number of contiguous slabs to map, | 13 | * Maximum allowable number of contiguous slabs to map, |
12 | * must be a power of 2. What is the appropriate value ? | 14 | * must be a power of 2. What is the appropriate value ? |
@@ -20,8 +22,7 @@ struct scatterlist; | |||
20 | */ | 22 | */ |
21 | #define IO_TLB_SHIFT 11 | 23 | #define IO_TLB_SHIFT 11 |
22 | 24 | ||
23 | extern void | 25 | extern void swiotlb_init(int verbose); |
24 | swiotlb_init(void); | ||
25 | 26 | ||
26 | extern void | 27 | extern void |
27 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 28 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
@@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | |||
88 | extern int | 89 | extern int |
89 | swiotlb_dma_supported(struct device *hwdev, u64 mask); | 90 | swiotlb_dma_supported(struct device *hwdev, u64 mask); |
90 | 91 | ||
92 | #ifdef CONFIG_SWIOTLB | ||
93 | extern void __init swiotlb_free(void); | ||
94 | #else | ||
95 | static inline void swiotlb_free(void) { } | ||
96 | #endif | ||
97 | |||
98 | extern void swiotlb_print_info(void); | ||
91 | #endif /* __LINUX_SWIOTLB_H */ | 99 | #endif /* __LINUX_SWIOTLB_H */ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a990ace1a838..e79e2f3ccc51 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -99,37 +99,16 @@ struct perf_event_attr; | |||
99 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 99 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
100 | 100 | ||
101 | #ifdef CONFIG_EVENT_PROFILE | 101 | #ifdef CONFIG_EVENT_PROFILE |
102 | #define TRACE_SYS_ENTER_PROFILE(sname) \ | ||
103 | static int prof_sysenter_enable_##sname(void) \ | ||
104 | { \ | ||
105 | return reg_prof_syscall_enter("sys"#sname); \ | ||
106 | } \ | ||
107 | \ | ||
108 | static void prof_sysenter_disable_##sname(void) \ | ||
109 | { \ | ||
110 | unreg_prof_syscall_enter("sys"#sname); \ | ||
111 | } | ||
112 | |||
113 | #define TRACE_SYS_EXIT_PROFILE(sname) \ | ||
114 | static int prof_sysexit_enable_##sname(void) \ | ||
115 | { \ | ||
116 | return reg_prof_syscall_exit("sys"#sname); \ | ||
117 | } \ | ||
118 | \ | ||
119 | static void prof_sysexit_disable_##sname(void) \ | ||
120 | { \ | ||
121 | unreg_prof_syscall_exit("sys"#sname); \ | ||
122 | } | ||
123 | 102 | ||
124 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 103 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ |
125 | .profile_count = ATOMIC_INIT(-1), \ | 104 | .profile_count = ATOMIC_INIT(-1), \ |
126 | .profile_enable = prof_sysenter_enable_##sname, \ | 105 | .profile_enable = prof_sysenter_enable, \ |
127 | .profile_disable = prof_sysenter_disable_##sname, | 106 | .profile_disable = prof_sysenter_disable, |
128 | 107 | ||
129 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 108 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ |
130 | .profile_count = ATOMIC_INIT(-1), \ | 109 | .profile_count = ATOMIC_INIT(-1), \ |
131 | .profile_enable = prof_sysexit_enable_##sname, \ | 110 | .profile_enable = prof_sysexit_enable, \ |
132 | .profile_disable = prof_sysexit_disable_##sname, | 111 | .profile_disable = prof_sysexit_disable, |
133 | #else | 112 | #else |
134 | #define TRACE_SYS_ENTER_PROFILE(sname) | 113 | #define TRACE_SYS_ENTER_PROFILE(sname) |
135 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) | 114 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) |
@@ -153,74 +132,46 @@ static void prof_sysexit_disable_##sname(void) \ | |||
153 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) | 132 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) |
154 | 133 | ||
155 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | 134 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ |
135 | static const struct syscall_metadata __syscall_meta_##sname; \ | ||
156 | static struct ftrace_event_call event_enter_##sname; \ | 136 | static struct ftrace_event_call event_enter_##sname; \ |
157 | struct trace_event enter_syscall_print_##sname = { \ | 137 | static struct trace_event enter_syscall_print_##sname = { \ |
158 | .trace = print_syscall_enter, \ | 138 | .trace = print_syscall_enter, \ |
159 | }; \ | 139 | }; \ |
160 | static int init_enter_##sname(void) \ | ||
161 | { \ | ||
162 | int num, id; \ | ||
163 | num = syscall_name_to_nr("sys"#sname); \ | ||
164 | if (num < 0) \ | ||
165 | return -ENOSYS; \ | ||
166 | id = register_ftrace_event(&enter_syscall_print_##sname);\ | ||
167 | if (!id) \ | ||
168 | return -ENODEV; \ | ||
169 | event_enter_##sname.id = id; \ | ||
170 | set_syscall_enter_id(num, id); \ | ||
171 | INIT_LIST_HEAD(&event_enter_##sname.fields); \ | ||
172 | return 0; \ | ||
173 | } \ | ||
174 | TRACE_SYS_ENTER_PROFILE(sname); \ | ||
175 | static struct ftrace_event_call __used \ | 140 | static struct ftrace_event_call __used \ |
176 | __attribute__((__aligned__(4))) \ | 141 | __attribute__((__aligned__(4))) \ |
177 | __attribute__((section("_ftrace_events"))) \ | 142 | __attribute__((section("_ftrace_events"))) \ |
178 | event_enter_##sname = { \ | 143 | event_enter_##sname = { \ |
179 | .name = "sys_enter"#sname, \ | 144 | .name = "sys_enter"#sname, \ |
180 | .system = "syscalls", \ | 145 | .system = "syscalls", \ |
181 | .event = &event_syscall_enter, \ | 146 | .event = &enter_syscall_print_##sname, \ |
182 | .raw_init = init_enter_##sname, \ | 147 | .raw_init = init_syscall_trace, \ |
183 | .show_format = syscall_enter_format, \ | 148 | .show_format = syscall_enter_format, \ |
184 | .define_fields = syscall_enter_define_fields, \ | 149 | .define_fields = syscall_enter_define_fields, \ |
185 | .regfunc = reg_event_syscall_enter, \ | 150 | .regfunc = reg_event_syscall_enter, \ |
186 | .unregfunc = unreg_event_syscall_enter, \ | 151 | .unregfunc = unreg_event_syscall_enter, \ |
187 | .data = "sys"#sname, \ | 152 | .data = (void *)&__syscall_meta_##sname,\ |
188 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 153 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ |
189 | } | 154 | } |
190 | 155 | ||
191 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 156 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
157 | static const struct syscall_metadata __syscall_meta_##sname; \ | ||
192 | static struct ftrace_event_call event_exit_##sname; \ | 158 | static struct ftrace_event_call event_exit_##sname; \ |
193 | struct trace_event exit_syscall_print_##sname = { \ | 159 | static struct trace_event exit_syscall_print_##sname = { \ |
194 | .trace = print_syscall_exit, \ | 160 | .trace = print_syscall_exit, \ |
195 | }; \ | 161 | }; \ |
196 | static int init_exit_##sname(void) \ | ||
197 | { \ | ||
198 | int num, id; \ | ||
199 | num = syscall_name_to_nr("sys"#sname); \ | ||
200 | if (num < 0) \ | ||
201 | return -ENOSYS; \ | ||
202 | id = register_ftrace_event(&exit_syscall_print_##sname);\ | ||
203 | if (!id) \ | ||
204 | return -ENODEV; \ | ||
205 | event_exit_##sname.id = id; \ | ||
206 | set_syscall_exit_id(num, id); \ | ||
207 | INIT_LIST_HEAD(&event_exit_##sname.fields); \ | ||
208 | return 0; \ | ||
209 | } \ | ||
210 | TRACE_SYS_EXIT_PROFILE(sname); \ | ||
211 | static struct ftrace_event_call __used \ | 162 | static struct ftrace_event_call __used \ |
212 | __attribute__((__aligned__(4))) \ | 163 | __attribute__((__aligned__(4))) \ |
213 | __attribute__((section("_ftrace_events"))) \ | 164 | __attribute__((section("_ftrace_events"))) \ |
214 | event_exit_##sname = { \ | 165 | event_exit_##sname = { \ |
215 | .name = "sys_exit"#sname, \ | 166 | .name = "sys_exit"#sname, \ |
216 | .system = "syscalls", \ | 167 | .system = "syscalls", \ |
217 | .event = &event_syscall_exit, \ | 168 | .event = &exit_syscall_print_##sname, \ |
218 | .raw_init = init_exit_##sname, \ | 169 | .raw_init = init_syscall_trace, \ |
219 | .show_format = syscall_exit_format, \ | 170 | .show_format = syscall_exit_format, \ |
220 | .define_fields = syscall_exit_define_fields, \ | 171 | .define_fields = syscall_exit_define_fields, \ |
221 | .regfunc = reg_event_syscall_exit, \ | 172 | .regfunc = reg_event_syscall_exit, \ |
222 | .unregfunc = unreg_event_syscall_exit, \ | 173 | .unregfunc = unreg_event_syscall_exit, \ |
223 | .data = "sys"#sname, \ | 174 | .data = (void *)&__syscall_meta_##sname,\ |
224 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 175 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ |
225 | } | 176 | } |
226 | 177 | ||
diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 3338b3f5c21a..ac5d1c1285d9 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h | |||
@@ -27,9 +27,16 @@ | |||
27 | */ | 27 | */ |
28 | #define TPM_ANY_NUM 0xFFFF | 28 | #define TPM_ANY_NUM 0xFFFF |
29 | 29 | ||
30 | #if defined(CONFIG_TCG_TPM) | 30 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) |
31 | 31 | ||
32 | extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); | 32 | extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); |
33 | extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); | 33 | extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); |
34 | #else | ||
35 | static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { | ||
36 | return -ENODEV; | ||
37 | } | ||
38 | static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { | ||
39 | return -ENODEV; | ||
40 | } | ||
34 | #endif | 41 | #endif |
35 | #endif | 42 | #endif |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 2aac8a83e89b..f59604ed0ec6 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -280,6 +280,12 @@ static inline void tracepoint_synchronize_unregister(void) | |||
280 | * TRACE_EVENT_FN to perform any (un)registration work. | 280 | * TRACE_EVENT_FN to perform any (un)registration work. |
281 | */ | 281 | */ |
282 | 282 | ||
283 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) | ||
284 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
285 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
286 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
287 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
288 | |||
283 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | 289 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
284 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 290 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
285 | #define TRACE_EVENT_FN(name, proto, args, struct, \ | 291 | #define TRACE_EVENT_FN(name, proto, args, struct, \ |
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h index 904468a191ef..afc2bfb9e917 100644 --- a/include/pcmcia/cs.h +++ b/include/pcmcia/cs.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #ifndef _LINUX_CS_H | 15 | #ifndef _LINUX_CS_H |
16 | #define _LINUX_CS_H | 16 | #define _LINUX_CS_H |
17 | 17 | ||
18 | #ifdef __KERNEL__ | ||
19 | #include <linux/interrupt.h> | ||
20 | #endif | ||
21 | |||
18 | /* For AccessConfigurationRegister */ | 22 | /* For AccessConfigurationRegister */ |
19 | typedef struct conf_reg_t { | 23 | typedef struct conf_reg_t { |
20 | u_char Function; | 24 | u_char Function; |
@@ -111,11 +115,9 @@ typedef struct io_req_t { | |||
111 | 115 | ||
112 | /* For RequestIRQ and ReleaseIRQ */ | 116 | /* For RequestIRQ and ReleaseIRQ */ |
113 | typedef struct irq_req_t { | 117 | typedef struct irq_req_t { |
114 | u_int Attributes; | 118 | u_int Attributes; |
115 | u_int AssignedIRQ; | 119 | u_int AssignedIRQ; |
116 | u_int IRQInfo1, IRQInfo2; /* IRQInfo2 is ignored */ | 120 | irq_handler_t Handler; |
117 | void *Handler; | ||
118 | void *Instance; | ||
119 | } irq_req_t; | 121 | } irq_req_t; |
120 | 122 | ||
121 | /* Attributes for RequestIRQ and ReleaseIRQ */ | 123 | /* Attributes for RequestIRQ and ReleaseIRQ */ |
@@ -125,7 +127,7 @@ typedef struct irq_req_t { | |||
125 | #define IRQ_TYPE_DYNAMIC_SHARING 0x02 | 127 | #define IRQ_TYPE_DYNAMIC_SHARING 0x02 |
126 | #define IRQ_FORCED_PULSE 0x04 | 128 | #define IRQ_FORCED_PULSE 0x04 |
127 | #define IRQ_FIRST_SHARED 0x08 | 129 | #define IRQ_FIRST_SHARED 0x08 |
128 | #define IRQ_HANDLE_PRESENT 0x10 | 130 | //#define IRQ_HANDLE_PRESENT 0x10 |
129 | #define IRQ_PULSE_ALLOCATED 0x100 | 131 | #define IRQ_PULSE_ALLOCATED 0x100 |
130 | 132 | ||
131 | /* Bits in IRQInfo1 field */ | 133 | /* Bits in IRQInfo1 field */ |
diff --git a/include/pcmcia/cs_types.h b/include/pcmcia/cs_types.h index 315965a37930..f5e3b8386c8f 100644 --- a/include/pcmcia/cs_types.h +++ b/include/pcmcia/cs_types.h | |||
@@ -26,8 +26,7 @@ typedef u_int event_t; | |||
26 | typedef u_char cisdata_t; | 26 | typedef u_char cisdata_t; |
27 | typedef u_short page_t; | 27 | typedef u_short page_t; |
28 | 28 | ||
29 | struct window_t; | 29 | typedef unsigned long window_handle_t; |
30 | typedef struct window_t *window_handle_t; | ||
31 | 30 | ||
32 | struct region_t; | 31 | struct region_t; |
33 | typedef struct region_t *memory_handle_t; | 32 | typedef struct region_t *memory_handle_t; |
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index a2be80b9a095..d403c12f7978 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -34,6 +34,7 @@ | |||
34 | struct pcmcia_socket; | 34 | struct pcmcia_socket; |
35 | struct pcmcia_device; | 35 | struct pcmcia_device; |
36 | struct config_t; | 36 | struct config_t; |
37 | struct net_device; | ||
37 | 38 | ||
38 | /* dynamic device IDs for PCMCIA device drivers. See | 39 | /* dynamic device IDs for PCMCIA device drivers. See |
39 | * Documentation/pcmcia/driver.txt for details. | 40 | * Documentation/pcmcia/driver.txt for details. |
@@ -137,65 +138,39 @@ struct pcmcia_device { | |||
137 | #define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) | 138 | #define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) |
138 | #define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) | 139 | #define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) |
139 | 140 | ||
140 | /* deprecated -- don't use! */ | ||
141 | #define handle_to_dev(handle) (handle->dev) | ||
142 | 141 | ||
143 | 142 | /* | |
144 | /* (deprecated) error reporting by PCMCIA devices. Use dev_printk() | 143 | * CIS access. |
145 | * or dev_dbg() directly in the driver, without referring to pcmcia_error_func() | 144 | * |
146 | * and/or pcmcia_error_ret() for those functions will go away soon. | 145 | * Please use the following functions to access CIS tuples: |
147 | */ | 146 | * - pcmcia_get_tuple() |
148 | enum service { | 147 | * - pcmcia_loop_tuple() |
149 | AccessConfigurationRegister, AddSocketServices, | 148 | * - pcmcia_get_mac_from_cis() |
150 | AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory, | 149 | * |
151 | DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo, | 150 | * To parse a tuple_t, pcmcia_parse_tuple() exists. Its interface |
152 | GetClientInfo, GetConfigurationInfo, GetEventMask, | 151 | * might change in future. |
153 | GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple, | ||
154 | GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple, | ||
155 | GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage, | ||
156 | MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow, | ||
157 | OpenMemory, ParseTuple, ReadMemory, RegisterClient, | ||
158 | RegisterEraseQueue, RegisterMTD, RegisterTimer, | ||
159 | ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ, | ||
160 | ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices, | ||
161 | RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ, | ||
162 | RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry, | ||
163 | SetEventMask, SetRegion, ValidateCIS, VendorSpecific, | ||
164 | WriteMemory, BindDevice, BindMTD, ReportError, | ||
165 | SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS, | ||
166 | GetFirstWindow, GetNextWindow, GetMemPage | ||
167 | }; | ||
168 | const char *pcmcia_error_func(int func); | ||
169 | const char *pcmcia_error_ret(int ret); | ||
170 | |||
171 | #define cs_error(p_dev, func, ret) \ | ||
172 | { \ | ||
173 | dev_printk(KERN_NOTICE, &p_dev->dev, \ | ||
174 | "%s : %s\n", \ | ||
175 | pcmcia_error_func(func), \ | ||
176 | pcmcia_error_ret(ret)); \ | ||
177 | } | ||
178 | |||
179 | /* CIS access. | ||
180 | * Use the pcmcia_* versions in PCMCIA drivers | ||
181 | */ | 152 | */ |
182 | int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse); | ||
183 | 153 | ||
184 | int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, | 154 | /* get the very first CIS entry of type @code. Note that buf is pointer |
185 | tuple_t *tuple); | 155 | * to u8 *buf; and that you need to kfree(buf) afterwards. */ |
186 | #define pcmcia_get_first_tuple(p_dev, tuple) \ | 156 | size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code, |
187 | pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple) | 157 | u8 **buf); |
188 | 158 | ||
189 | int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, | 159 | /* loop over CIS entries */ |
190 | tuple_t *tuple); | 160 | int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, |
191 | #define pcmcia_get_next_tuple(p_dev, tuple) \ | 161 | int (*loop_tuple) (struct pcmcia_device *p_dev, |
192 | pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple) | 162 | tuple_t *tuple, |
163 | void *priv_data), | ||
164 | void *priv_data); | ||
193 | 165 | ||
194 | int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); | 166 | /* get the MAC address from CISTPL_FUNCE */ |
195 | #define pcmcia_get_tuple_data(p_dev, tuple) \ | 167 | int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, |
196 | pccard_get_tuple_data(p_dev->socket, tuple) | 168 | struct net_device *dev); |
197 | 169 | ||
198 | 170 | ||
171 | /* parse a tuple_t */ | ||
172 | int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse); | ||
173 | |||
199 | /* loop CIS entries for valid configuration */ | 174 | /* loop CIS entries for valid configuration */ |
200 | int pcmcia_loop_config(struct pcmcia_device *p_dev, | 175 | int pcmcia_loop_config(struct pcmcia_device *p_dev, |
201 | int (*conf_check) (struct pcmcia_device *p_dev, | 176 | int (*conf_check) (struct pcmcia_device *p_dev, |
@@ -221,12 +196,11 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req); | |||
221 | int pcmcia_request_configuration(struct pcmcia_device *p_dev, | 196 | int pcmcia_request_configuration(struct pcmcia_device *p_dev, |
222 | config_req_t *req); | 197 | config_req_t *req); |
223 | 198 | ||
224 | int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, | 199 | int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, |
225 | window_handle_t *wh); | 200 | window_handle_t *wh); |
226 | int pcmcia_release_window(window_handle_t win); | 201 | int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t win); |
227 | 202 | int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t win, | |
228 | int pcmcia_get_mem_page(window_handle_t win, memreq_t *req); | 203 | memreq_t *req); |
229 | int pcmcia_map_mem_page(window_handle_t win, memreq_t *req); | ||
230 | 204 | ||
231 | int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); | 205 | int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); |
232 | void pcmcia_disable_device(struct pcmcia_device *p_dev); | 206 | void pcmcia_disable_device(struct pcmcia_device *p_dev); |
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index e0f6feb8588c..7c23be706f12 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h | |||
@@ -107,15 +107,6 @@ typedef struct io_window_t { | |||
107 | struct resource *res; | 107 | struct resource *res; |
108 | } io_window_t; | 108 | } io_window_t; |
109 | 109 | ||
110 | #define WINDOW_MAGIC 0xB35C | ||
111 | typedef struct window_t { | ||
112 | u_short magic; | ||
113 | u_short index; | ||
114 | struct pcmcia_device *handle; | ||
115 | struct pcmcia_socket *sock; | ||
116 | pccard_mem_map ctl; | ||
117 | } window_t; | ||
118 | |||
119 | /* Maximum number of IO windows per socket */ | 110 | /* Maximum number of IO windows per socket */ |
120 | #define MAX_IO_WIN 2 | 111 | #define MAX_IO_WIN 2 |
121 | 112 | ||
@@ -155,7 +146,7 @@ struct pcmcia_socket { | |||
155 | u_int Config; | 146 | u_int Config; |
156 | } irq; | 147 | } irq; |
157 | io_window_t io[MAX_IO_WIN]; | 148 | io_window_t io[MAX_IO_WIN]; |
158 | window_t win[MAX_WIN]; | 149 | pccard_mem_map win[MAX_WIN]; |
159 | struct list_head cis_cache; | 150 | struct list_head cis_cache; |
160 | size_t fake_cis_len; | 151 | size_t fake_cis_len; |
161 | u8 *fake_cis; | 152 | u8 *fake_cis; |
@@ -172,7 +163,7 @@ struct pcmcia_socket { | |||
172 | u_int irq_mask; | 163 | u_int irq_mask; |
173 | u_int map_size; | 164 | u_int map_size; |
174 | u_int io_offset; | 165 | u_int io_offset; |
175 | u_char pci_irq; | 166 | u_int pci_irq; |
176 | struct pci_dev * cb_dev; | 167 | struct pci_dev * cb_dev; |
177 | 168 | ||
178 | 169 | ||
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 2a4b3bf74033..5acfb1eb4df9 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
@@ -31,6 +31,14 @@ | |||
31 | assign, print, reg, unreg) \ | 31 | assign, print, reg, unreg) \ |
32 | DEFINE_TRACE_FN(name, reg, unreg) | 32 | DEFINE_TRACE_FN(name, reg, unreg) |
33 | 33 | ||
34 | #undef DEFINE_EVENT | ||
35 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
36 | DEFINE_TRACE(name) | ||
37 | |||
38 | #undef DEFINE_EVENT_PRINT | ||
39 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
40 | DEFINE_TRACE(name) | ||
41 | |||
34 | #undef DECLARE_TRACE | 42 | #undef DECLARE_TRACE |
35 | #define DECLARE_TRACE(name, proto, args) \ | 43 | #define DECLARE_TRACE(name, proto, args) \ |
36 | DEFINE_TRACE(name) | 44 | DEFINE_TRACE(name) |
@@ -63,6 +71,9 @@ | |||
63 | 71 | ||
64 | #undef TRACE_EVENT | 72 | #undef TRACE_EVENT |
65 | #undef TRACE_EVENT_FN | 73 | #undef TRACE_EVENT_FN |
74 | #undef DECLARE_EVENT_CLASS | ||
75 | #undef DEFINE_EVENT | ||
76 | #undef DEFINE_EVENT_PRINT | ||
66 | #undef TRACE_HEADER_MULTI_READ | 77 | #undef TRACE_HEADER_MULTI_READ |
67 | 78 | ||
68 | /* Only undef what we defined in this file */ | 79 | /* Only undef what we defined in this file */ |
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h new file mode 100644 index 000000000000..1af72dc24278 --- /dev/null +++ b/include/trace/events/bkl.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM bkl | ||
3 | |||
4 | #if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_BKL_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | TRACE_EVENT(lock_kernel, | ||
10 | |||
11 | TP_PROTO(const char *func, const char *file, int line), | ||
12 | |||
13 | TP_ARGS(func, file, line), | ||
14 | |||
15 | TP_STRUCT__entry( | ||
16 | __field( int, depth ) | ||
17 | __field_ext( const char *, func, FILTER_PTR_STRING ) | ||
18 | __field_ext( const char *, file, FILTER_PTR_STRING ) | ||
19 | __field( int, line ) | ||
20 | ), | ||
21 | |||
22 | TP_fast_assign( | ||
23 | /* We want to record the lock_depth after lock is acquired */ | ||
24 | __entry->depth = current->lock_depth + 1; | ||
25 | __entry->func = func; | ||
26 | __entry->file = file; | ||
27 | __entry->line = line; | ||
28 | ), | ||
29 | |||
30 | TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, | ||
31 | __entry->file, __entry->line, __entry->func) | ||
32 | ); | ||
33 | |||
34 | TRACE_EVENT(unlock_kernel, | ||
35 | |||
36 | TP_PROTO(const char *func, const char *file, int line), | ||
37 | |||
38 | TP_ARGS(func, file, line), | ||
39 | |||
40 | TP_STRUCT__entry( | ||
41 | __field(int, depth ) | ||
42 | __field(const char *, func ) | ||
43 | __field(const char *, file ) | ||
44 | __field(int, line ) | ||
45 | ), | ||
46 | |||
47 | TP_fast_assign( | ||
48 | __entry->depth = current->lock_depth; | ||
49 | __entry->func = func; | ||
50 | __entry->file = file; | ||
51 | __entry->line = line; | ||
52 | ), | ||
53 | |||
54 | TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, | ||
55 | __entry->file, __entry->line, __entry->func) | ||
56 | ); | ||
57 | |||
58 | #endif /* _TRACE_BKL_H */ | ||
59 | |||
60 | /* This part must be outside protection */ | ||
61 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 00405b5f624a..5fb72733331e 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | 10 | ||
11 | TRACE_EVENT(block_rq_abort, | 11 | DECLARE_EVENT_CLASS(block_rq_with_error, |
12 | 12 | ||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | 13 | TP_PROTO(struct request_queue *q, struct request *rq), |
14 | 14 | ||
@@ -40,41 +40,28 @@ TRACE_EVENT(block_rq_abort, | |||
40 | __entry->nr_sector, __entry->errors) | 40 | __entry->nr_sector, __entry->errors) |
41 | ); | 41 | ); |
42 | 42 | ||
43 | TRACE_EVENT(block_rq_insert, | 43 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, |
44 | 44 | ||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | 45 | TP_PROTO(struct request_queue *q, struct request *rq), |
46 | 46 | ||
47 | TP_ARGS(q, rq), | 47 | TP_ARGS(q, rq) |
48 | ); | ||
48 | 49 | ||
49 | TP_STRUCT__entry( | 50 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
50 | __field( dev_t, dev ) | ||
51 | __field( sector_t, sector ) | ||
52 | __field( unsigned int, nr_sector ) | ||
53 | __field( unsigned int, bytes ) | ||
54 | __array( char, rwbs, 6 ) | ||
55 | __array( char, comm, TASK_COMM_LEN ) | ||
56 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
57 | ), | ||
58 | 51 | ||
59 | TP_fast_assign( | 52 | TP_PROTO(struct request_queue *q, struct request *rq), |
60 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
61 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
62 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
63 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
64 | 53 | ||
65 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 54 | TP_ARGS(q, rq) |
66 | blk_dump_cmd(__get_str(cmd), rq); | 55 | ); |
67 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
68 | ), | ||
69 | 56 | ||
70 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | 57 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, |
71 | MAJOR(__entry->dev), MINOR(__entry->dev), | 58 | |
72 | __entry->rwbs, __entry->bytes, __get_str(cmd), | 59 | TP_PROTO(struct request_queue *q, struct request *rq), |
73 | (unsigned long long)__entry->sector, | 60 | |
74 | __entry->nr_sector, __entry->comm) | 61 | TP_ARGS(q, rq) |
75 | ); | 62 | ); |
76 | 63 | ||
77 | TRACE_EVENT(block_rq_issue, | 64 | DECLARE_EVENT_CLASS(block_rq, |
78 | 65 | ||
79 | TP_PROTO(struct request_queue *q, struct request *rq), | 66 | TP_PROTO(struct request_queue *q, struct request *rq), |
80 | 67 | ||
@@ -86,7 +73,7 @@ TRACE_EVENT(block_rq_issue, | |||
86 | __field( unsigned int, nr_sector ) | 73 | __field( unsigned int, nr_sector ) |
87 | __field( unsigned int, bytes ) | 74 | __field( unsigned int, bytes ) |
88 | __array( char, rwbs, 6 ) | 75 | __array( char, rwbs, 6 ) |
89 | __array( char, comm, TASK_COMM_LEN ) | 76 | __array( char, comm, TASK_COMM_LEN ) |
90 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | 77 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) |
91 | ), | 78 | ), |
92 | 79 | ||
@@ -108,68 +95,18 @@ TRACE_EVENT(block_rq_issue, | |||
108 | __entry->nr_sector, __entry->comm) | 95 | __entry->nr_sector, __entry->comm) |
109 | ); | 96 | ); |
110 | 97 | ||
111 | TRACE_EVENT(block_rq_requeue, | 98 | DEFINE_EVENT(block_rq, block_rq_insert, |
112 | 99 | ||
113 | TP_PROTO(struct request_queue *q, struct request *rq), | 100 | TP_PROTO(struct request_queue *q, struct request *rq), |
114 | 101 | ||
115 | TP_ARGS(q, rq), | 102 | TP_ARGS(q, rq) |
116 | |||
117 | TP_STRUCT__entry( | ||
118 | __field( dev_t, dev ) | ||
119 | __field( sector_t, sector ) | ||
120 | __field( unsigned int, nr_sector ) | ||
121 | __field( int, errors ) | ||
122 | __array( char, rwbs, 6 ) | ||
123 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
124 | ), | ||
125 | |||
126 | TP_fast_assign( | ||
127 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
128 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
129 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
130 | __entry->errors = rq->errors; | ||
131 | |||
132 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
133 | blk_dump_cmd(__get_str(cmd), rq); | ||
134 | ), | ||
135 | |||
136 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
137 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
138 | __entry->rwbs, __get_str(cmd), | ||
139 | (unsigned long long)__entry->sector, | ||
140 | __entry->nr_sector, __entry->errors) | ||
141 | ); | 103 | ); |
142 | 104 | ||
143 | TRACE_EVENT(block_rq_complete, | 105 | DEFINE_EVENT(block_rq, block_rq_issue, |
144 | 106 | ||
145 | TP_PROTO(struct request_queue *q, struct request *rq), | 107 | TP_PROTO(struct request_queue *q, struct request *rq), |
146 | 108 | ||
147 | TP_ARGS(q, rq), | 109 | TP_ARGS(q, rq) |
148 | |||
149 | TP_STRUCT__entry( | ||
150 | __field( dev_t, dev ) | ||
151 | __field( sector_t, sector ) | ||
152 | __field( unsigned int, nr_sector ) | ||
153 | __field( int, errors ) | ||
154 | __array( char, rwbs, 6 ) | ||
155 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
156 | ), | ||
157 | |||
158 | TP_fast_assign( | ||
159 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
160 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
161 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
162 | __entry->errors = rq->errors; | ||
163 | |||
164 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
165 | blk_dump_cmd(__get_str(cmd), rq); | ||
166 | ), | ||
167 | |||
168 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
169 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
170 | __entry->rwbs, __get_str(cmd), | ||
171 | (unsigned long long)__entry->sector, | ||
172 | __entry->nr_sector, __entry->errors) | ||
173 | ); | 110 | ); |
174 | 111 | ||
175 | TRACE_EVENT(block_bio_bounce, | 112 | TRACE_EVENT(block_bio_bounce, |
@@ -228,7 +165,7 @@ TRACE_EVENT(block_bio_complete, | |||
228 | __entry->nr_sector, __entry->error) | 165 | __entry->nr_sector, __entry->error) |
229 | ); | 166 | ); |
230 | 167 | ||
231 | TRACE_EVENT(block_bio_backmerge, | 168 | DECLARE_EVENT_CLASS(block_bio, |
232 | 169 | ||
233 | TP_PROTO(struct request_queue *q, struct bio *bio), | 170 | TP_PROTO(struct request_queue *q, struct bio *bio), |
234 | 171 | ||
@@ -256,63 +193,28 @@ TRACE_EVENT(block_bio_backmerge, | |||
256 | __entry->nr_sector, __entry->comm) | 193 | __entry->nr_sector, __entry->comm) |
257 | ); | 194 | ); |
258 | 195 | ||
259 | TRACE_EVENT(block_bio_frontmerge, | 196 | DEFINE_EVENT(block_bio, block_bio_backmerge, |
260 | 197 | ||
261 | TP_PROTO(struct request_queue *q, struct bio *bio), | 198 | TP_PROTO(struct request_queue *q, struct bio *bio), |
262 | 199 | ||
263 | TP_ARGS(q, bio), | 200 | TP_ARGS(q, bio) |
264 | |||
265 | TP_STRUCT__entry( | ||
266 | __field( dev_t, dev ) | ||
267 | __field( sector_t, sector ) | ||
268 | __field( unsigned, nr_sector ) | ||
269 | __array( char, rwbs, 6 ) | ||
270 | __array( char, comm, TASK_COMM_LEN ) | ||
271 | ), | ||
272 | |||
273 | TP_fast_assign( | ||
274 | __entry->dev = bio->bi_bdev->bd_dev; | ||
275 | __entry->sector = bio->bi_sector; | ||
276 | __entry->nr_sector = bio->bi_size >> 9; | ||
277 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
278 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
279 | ), | ||
280 | |||
281 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
282 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
283 | (unsigned long long)__entry->sector, | ||
284 | __entry->nr_sector, __entry->comm) | ||
285 | ); | 201 | ); |
286 | 202 | ||
287 | TRACE_EVENT(block_bio_queue, | 203 | DEFINE_EVENT(block_bio, block_bio_frontmerge, |
288 | 204 | ||
289 | TP_PROTO(struct request_queue *q, struct bio *bio), | 205 | TP_PROTO(struct request_queue *q, struct bio *bio), |
290 | 206 | ||
291 | TP_ARGS(q, bio), | 207 | TP_ARGS(q, bio) |
208 | ); | ||
292 | 209 | ||
293 | TP_STRUCT__entry( | 210 | DEFINE_EVENT(block_bio, block_bio_queue, |
294 | __field( dev_t, dev ) | ||
295 | __field( sector_t, sector ) | ||
296 | __field( unsigned int, nr_sector ) | ||
297 | __array( char, rwbs, 6 ) | ||
298 | __array( char, comm, TASK_COMM_LEN ) | ||
299 | ), | ||
300 | 211 | ||
301 | TP_fast_assign( | 212 | TP_PROTO(struct request_queue *q, struct bio *bio), |
302 | __entry->dev = bio->bi_bdev->bd_dev; | ||
303 | __entry->sector = bio->bi_sector; | ||
304 | __entry->nr_sector = bio->bi_size >> 9; | ||
305 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
306 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
307 | ), | ||
308 | 213 | ||
309 | TP_printk("%d,%d %s %llu + %u [%s]", | 214 | TP_ARGS(q, bio) |
310 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
311 | (unsigned long long)__entry->sector, | ||
312 | __entry->nr_sector, __entry->comm) | ||
313 | ); | 215 | ); |
314 | 216 | ||
315 | TRACE_EVENT(block_getrq, | 217 | DECLARE_EVENT_CLASS(block_get_rq, |
316 | 218 | ||
317 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 219 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
318 | 220 | ||
@@ -341,33 +243,18 @@ TRACE_EVENT(block_getrq, | |||
341 | __entry->nr_sector, __entry->comm) | 243 | __entry->nr_sector, __entry->comm) |
342 | ); | 244 | ); |
343 | 245 | ||
344 | TRACE_EVENT(block_sleeprq, | 246 | DEFINE_EVENT(block_get_rq, block_getrq, |
345 | 247 | ||
346 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 248 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
347 | 249 | ||
348 | TP_ARGS(q, bio, rw), | 250 | TP_ARGS(q, bio, rw) |
251 | ); | ||
349 | 252 | ||
350 | TP_STRUCT__entry( | 253 | DEFINE_EVENT(block_get_rq, block_sleeprq, |
351 | __field( dev_t, dev ) | ||
352 | __field( sector_t, sector ) | ||
353 | __field( unsigned int, nr_sector ) | ||
354 | __array( char, rwbs, 6 ) | ||
355 | __array( char, comm, TASK_COMM_LEN ) | ||
356 | ), | ||
357 | 254 | ||
358 | TP_fast_assign( | 255 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
359 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
360 | __entry->sector = bio ? bio->bi_sector : 0; | ||
361 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
362 | blk_fill_rwbs(__entry->rwbs, | ||
363 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
364 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
365 | ), | ||
366 | 256 | ||
367 | TP_printk("%d,%d %s %llu + %u [%s]", | 257 | TP_ARGS(q, bio, rw) |
368 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
369 | (unsigned long long)__entry->sector, | ||
370 | __entry->nr_sector, __entry->comm) | ||
371 | ); | 258 | ); |
372 | 259 | ||
373 | TRACE_EVENT(block_plug, | 260 | TRACE_EVENT(block_plug, |
@@ -387,7 +274,7 @@ TRACE_EVENT(block_plug, | |||
387 | TP_printk("[%s]", __entry->comm) | 274 | TP_printk("[%s]", __entry->comm) |
388 | ); | 275 | ); |
389 | 276 | ||
390 | TRACE_EVENT(block_unplug_timer, | 277 | DECLARE_EVENT_CLASS(block_unplug, |
391 | 278 | ||
392 | TP_PROTO(struct request_queue *q), | 279 | TP_PROTO(struct request_queue *q), |
393 | 280 | ||
@@ -406,23 +293,18 @@ TRACE_EVENT(block_unplug_timer, | |||
406 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 293 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) |
407 | ); | 294 | ); |
408 | 295 | ||
409 | TRACE_EVENT(block_unplug_io, | 296 | DEFINE_EVENT(block_unplug, block_unplug_timer, |
410 | 297 | ||
411 | TP_PROTO(struct request_queue *q), | 298 | TP_PROTO(struct request_queue *q), |
412 | 299 | ||
413 | TP_ARGS(q), | 300 | TP_ARGS(q) |
301 | ); | ||
414 | 302 | ||
415 | TP_STRUCT__entry( | 303 | DEFINE_EVENT(block_unplug, block_unplug_io, |
416 | __field( int, nr_rq ) | ||
417 | __array( char, comm, TASK_COMM_LEN ) | ||
418 | ), | ||
419 | 304 | ||
420 | TP_fast_assign( | 305 | TP_PROTO(struct request_queue *q), |
421 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
422 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
423 | ), | ||
424 | 306 | ||
425 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 307 | TP_ARGS(q) |
426 | ); | 308 | ); |
427 | 309 | ||
428 | TRACE_EVENT(block_split, | 310 | TRACE_EVENT(block_split, |
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d09550bf3f95..318f76535bd4 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h | |||
@@ -90,7 +90,7 @@ TRACE_EVENT(ext4_allocate_inode, | |||
90 | (unsigned long) __entry->dir, __entry->mode) | 90 | (unsigned long) __entry->dir, __entry->mode) |
91 | ); | 91 | ); |
92 | 92 | ||
93 | TRACE_EVENT(ext4_write_begin, | 93 | DECLARE_EVENT_CLASS(ext4__write_begin, |
94 | 94 | ||
95 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | 95 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
96 | unsigned int flags), | 96 | unsigned int flags), |
@@ -118,7 +118,23 @@ TRACE_EVENT(ext4_write_begin, | |||
118 | __entry->pos, __entry->len, __entry->flags) | 118 | __entry->pos, __entry->len, __entry->flags) |
119 | ); | 119 | ); |
120 | 120 | ||
121 | TRACE_EVENT(ext4_ordered_write_end, | 121 | DEFINE_EVENT(ext4__write_begin, ext4_write_begin, |
122 | |||
123 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | ||
124 | unsigned int flags), | ||
125 | |||
126 | TP_ARGS(inode, pos, len, flags) | ||
127 | ); | ||
128 | |||
129 | DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin, | ||
130 | |||
131 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | ||
132 | unsigned int flags), | ||
133 | |||
134 | TP_ARGS(inode, pos, len, flags) | ||
135 | ); | ||
136 | |||
137 | DECLARE_EVENT_CLASS(ext4__write_end, | ||
122 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | 138 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
123 | unsigned int copied), | 139 | unsigned int copied), |
124 | 140 | ||
@@ -145,57 +161,36 @@ TRACE_EVENT(ext4_ordered_write_end, | |||
145 | __entry->pos, __entry->len, __entry->copied) | 161 | __entry->pos, __entry->len, __entry->copied) |
146 | ); | 162 | ); |
147 | 163 | ||
148 | TRACE_EVENT(ext4_writeback_write_end, | 164 | DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, |
165 | |||
149 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | 166 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
150 | unsigned int copied), | 167 | unsigned int copied), |
151 | 168 | ||
152 | TP_ARGS(inode, pos, len, copied), | 169 | TP_ARGS(inode, pos, len, copied) |
170 | ); | ||
153 | 171 | ||
154 | TP_STRUCT__entry( | 172 | DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end, |
155 | __field( dev_t, dev ) | ||
156 | __field( ino_t, ino ) | ||
157 | __field( loff_t, pos ) | ||
158 | __field( unsigned int, len ) | ||
159 | __field( unsigned int, copied ) | ||
160 | ), | ||
161 | 173 | ||
162 | TP_fast_assign( | 174 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
163 | __entry->dev = inode->i_sb->s_dev; | 175 | unsigned int copied), |
164 | __entry->ino = inode->i_ino; | ||
165 | __entry->pos = pos; | ||
166 | __entry->len = len; | ||
167 | __entry->copied = copied; | ||
168 | ), | ||
169 | 176 | ||
170 | TP_printk("dev %s ino %lu pos %llu len %u copied %u", | 177 | TP_ARGS(inode, pos, len, copied) |
171 | jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, | ||
172 | __entry->pos, __entry->len, __entry->copied) | ||
173 | ); | 178 | ); |
174 | 179 | ||
175 | TRACE_EVENT(ext4_journalled_write_end, | 180 | DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end, |
181 | |||
176 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | 182 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
177 | unsigned int copied), | 183 | unsigned int copied), |
178 | TP_ARGS(inode, pos, len, copied), | ||
179 | 184 | ||
180 | TP_STRUCT__entry( | 185 | TP_ARGS(inode, pos, len, copied) |
181 | __field( dev_t, dev ) | 186 | ); |
182 | __field( ino_t, ino ) | ||
183 | __field( loff_t, pos ) | ||
184 | __field( unsigned int, len ) | ||
185 | __field( unsigned int, copied ) | ||
186 | ), | ||
187 | 187 | ||
188 | TP_fast_assign( | 188 | DEFINE_EVENT(ext4__write_end, ext4_da_write_end, |
189 | __entry->dev = inode->i_sb->s_dev; | ||
190 | __entry->ino = inode->i_ino; | ||
191 | __entry->pos = pos; | ||
192 | __entry->len = len; | ||
193 | __entry->copied = copied; | ||
194 | ), | ||
195 | 189 | ||
196 | TP_printk("dev %s ino %lu pos %llu len %u copied %u", | 190 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, |
197 | jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, | 191 | unsigned int copied), |
198 | __entry->pos, __entry->len, __entry->copied) | 192 | |
193 | TP_ARGS(inode, pos, len, copied) | ||
199 | ); | 194 | ); |
200 | 195 | ||
201 | TRACE_EVENT(ext4_writepage, | 196 | TRACE_EVENT(ext4_writepage, |
@@ -337,60 +332,6 @@ TRACE_EVENT(ext4_da_writepages_result, | |||
337 | (unsigned long) __entry->writeback_index) | 332 | (unsigned long) __entry->writeback_index) |
338 | ); | 333 | ); |
339 | 334 | ||
340 | TRACE_EVENT(ext4_da_write_begin, | ||
341 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | ||
342 | unsigned int flags), | ||
343 | |||
344 | TP_ARGS(inode, pos, len, flags), | ||
345 | |||
346 | TP_STRUCT__entry( | ||
347 | __field( dev_t, dev ) | ||
348 | __field( ino_t, ino ) | ||
349 | __field( loff_t, pos ) | ||
350 | __field( unsigned int, len ) | ||
351 | __field( unsigned int, flags ) | ||
352 | ), | ||
353 | |||
354 | TP_fast_assign( | ||
355 | __entry->dev = inode->i_sb->s_dev; | ||
356 | __entry->ino = inode->i_ino; | ||
357 | __entry->pos = pos; | ||
358 | __entry->len = len; | ||
359 | __entry->flags = flags; | ||
360 | ), | ||
361 | |||
362 | TP_printk("dev %s ino %lu pos %llu len %u flags %u", | ||
363 | jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, | ||
364 | __entry->pos, __entry->len, __entry->flags) | ||
365 | ); | ||
366 | |||
367 | TRACE_EVENT(ext4_da_write_end, | ||
368 | TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, | ||
369 | unsigned int copied), | ||
370 | |||
371 | TP_ARGS(inode, pos, len, copied), | ||
372 | |||
373 | TP_STRUCT__entry( | ||
374 | __field( dev_t, dev ) | ||
375 | __field( ino_t, ino ) | ||
376 | __field( loff_t, pos ) | ||
377 | __field( unsigned int, len ) | ||
378 | __field( unsigned int, copied ) | ||
379 | ), | ||
380 | |||
381 | TP_fast_assign( | ||
382 | __entry->dev = inode->i_sb->s_dev; | ||
383 | __entry->ino = inode->i_ino; | ||
384 | __entry->pos = pos; | ||
385 | __entry->len = len; | ||
386 | __entry->copied = copied; | ||
387 | ), | ||
388 | |||
389 | TP_printk("dev %s ino %lu pos %llu len %u copied %u", | ||
390 | jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, | ||
391 | __entry->pos, __entry->len, __entry->copied) | ||
392 | ); | ||
393 | |||
394 | TRACE_EVENT(ext4_discard_blocks, | 335 | TRACE_EVENT(ext4_discard_blocks, |
395 | TP_PROTO(struct super_block *sb, unsigned long long blk, | 336 | TP_PROTO(struct super_block *sb, unsigned long long blk, |
396 | unsigned long long count), | 337 | unsigned long long count), |
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index b89f9db4a404..0e4cfb694fe7 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry, | |||
48 | __assign_str(name, action->name); | 48 | __assign_str(name, action->name); |
49 | ), | 49 | ), |
50 | 50 | ||
51 | TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) | 51 | TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) |
52 | ); | 52 | ); |
53 | 53 | ||
54 | /** | 54 | /** |
@@ -78,22 +78,11 @@ TRACE_EVENT(irq_handler_exit, | |||
78 | __entry->ret = ret; | 78 | __entry->ret = ret; |
79 | ), | 79 | ), |
80 | 80 | ||
81 | TP_printk("irq=%d return=%s", | 81 | TP_printk("irq=%d ret=%s", |
82 | __entry->irq, __entry->ret ? "handled" : "unhandled") | 82 | __entry->irq, __entry->ret ? "handled" : "unhandled") |
83 | ); | 83 | ); |
84 | 84 | ||
85 | /** | 85 | DECLARE_EVENT_CLASS(softirq, |
86 | * softirq_entry - called immediately before the softirq handler | ||
87 | * @h: pointer to struct softirq_action | ||
88 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
89 | * | ||
90 | * The @h parameter, contains a pointer to the struct softirq_action | ||
91 | * which has a pointer to the action handler that is called. By subtracting | ||
92 | * the @vec pointer from the @h pointer, we can determine the softirq | ||
93 | * number. Also, when used in combination with the softirq_exit tracepoint | ||
94 | * we can determine the softirq latency. | ||
95 | */ | ||
96 | TRACE_EVENT(softirq_entry, | ||
97 | 86 | ||
98 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | 87 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), |
99 | 88 | ||
@@ -107,11 +96,29 @@ TRACE_EVENT(softirq_entry, | |||
107 | __entry->vec = (int)(h - vec); | 96 | __entry->vec = (int)(h - vec); |
108 | ), | 97 | ), |
109 | 98 | ||
110 | TP_printk("softirq=%d action=%s", __entry->vec, | 99 | TP_printk("vec=%d [action=%s]", __entry->vec, |
111 | show_softirq_name(__entry->vec)) | 100 | show_softirq_name(__entry->vec)) |
112 | ); | 101 | ); |
113 | 102 | ||
114 | /** | 103 | /** |
104 | * softirq_entry - called immediately before the softirq handler | ||
105 | * @h: pointer to struct softirq_action | ||
106 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
107 | * | ||
108 | * The @h parameter, contains a pointer to the struct softirq_action | ||
109 | * which has a pointer to the action handler that is called. By subtracting | ||
110 | * the @vec pointer from the @h pointer, we can determine the softirq | ||
111 | * number. Also, when used in combination with the softirq_exit tracepoint | ||
112 | * we can determine the softirq latency. | ||
113 | */ | ||
114 | DEFINE_EVENT(softirq, softirq_entry, | ||
115 | |||
116 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
117 | |||
118 | TP_ARGS(h, vec) | ||
119 | ); | ||
120 | |||
121 | /** | ||
115 | * softirq_exit - called immediately after the softirq handler returns | 122 | * softirq_exit - called immediately after the softirq handler returns |
116 | * @h: pointer to struct softirq_action | 123 | * @h: pointer to struct softirq_action |
117 | * @vec: pointer to first struct softirq_action in softirq_vec array | 124 | * @vec: pointer to first struct softirq_action in softirq_vec array |
@@ -122,22 +129,11 @@ TRACE_EVENT(softirq_entry, | |||
122 | * combination with the softirq_entry tracepoint we can determine the softirq | 129 | * combination with the softirq_entry tracepoint we can determine the softirq |
123 | * latency. | 130 | * latency. |
124 | */ | 131 | */ |
125 | TRACE_EVENT(softirq_exit, | 132 | DEFINE_EVENT(softirq, softirq_exit, |
126 | 133 | ||
127 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | 134 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), |
128 | 135 | ||
129 | TP_ARGS(h, vec), | 136 | TP_ARGS(h, vec) |
130 | |||
131 | TP_STRUCT__entry( | ||
132 | __field( int, vec ) | ||
133 | ), | ||
134 | |||
135 | TP_fast_assign( | ||
136 | __entry->vec = (int)(h - vec); | ||
137 | ), | ||
138 | |||
139 | TP_printk("softirq=%d action=%s", __entry->vec, | ||
140 | show_softirq_name(__entry->vec)) | ||
141 | ); | 137 | ); |
142 | 138 | ||
143 | #endif /* _TRACE_IRQ_H */ | 139 | #endif /* _TRACE_IRQ_H */ |
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 3c60b75adb9e..96b370a050de 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h | |||
@@ -30,7 +30,7 @@ TRACE_EVENT(jbd2_checkpoint, | |||
30 | jbd2_dev_to_name(__entry->dev), __entry->result) | 30 | jbd2_dev_to_name(__entry->dev), __entry->result) |
31 | ); | 31 | ); |
32 | 32 | ||
33 | TRACE_EVENT(jbd2_start_commit, | 33 | DECLARE_EVENT_CLASS(jbd2_commit, |
34 | 34 | ||
35 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), | 35 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), |
36 | 36 | ||
@@ -53,73 +53,32 @@ TRACE_EVENT(jbd2_start_commit, | |||
53 | __entry->sync_commit) | 53 | __entry->sync_commit) |
54 | ); | 54 | ); |
55 | 55 | ||
56 | TRACE_EVENT(jbd2_commit_locking, | 56 | DEFINE_EVENT(jbd2_commit, jbd2_start_commit, |
57 | 57 | ||
58 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), | 58 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), |
59 | 59 | ||
60 | TP_ARGS(journal, commit_transaction), | 60 | TP_ARGS(journal, commit_transaction) |
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __field( dev_t, dev ) | ||
64 | __field( char, sync_commit ) | ||
65 | __field( int, transaction ) | ||
66 | ), | ||
67 | |||
68 | TP_fast_assign( | ||
69 | __entry->dev = journal->j_fs_dev->bd_dev; | ||
70 | __entry->sync_commit = commit_transaction->t_synchronous_commit; | ||
71 | __entry->transaction = commit_transaction->t_tid; | ||
72 | ), | ||
73 | |||
74 | TP_printk("dev %s transaction %d sync %d", | ||
75 | jbd2_dev_to_name(__entry->dev), __entry->transaction, | ||
76 | __entry->sync_commit) | ||
77 | ); | 61 | ); |
78 | 62 | ||
79 | TRACE_EVENT(jbd2_commit_flushing, | 63 | DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, |
80 | 64 | ||
81 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), | 65 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), |
82 | 66 | ||
83 | TP_ARGS(journal, commit_transaction), | 67 | TP_ARGS(journal, commit_transaction) |
84 | |||
85 | TP_STRUCT__entry( | ||
86 | __field( dev_t, dev ) | ||
87 | __field( char, sync_commit ) | ||
88 | __field( int, transaction ) | ||
89 | ), | ||
90 | |||
91 | TP_fast_assign( | ||
92 | __entry->dev = journal->j_fs_dev->bd_dev; | ||
93 | __entry->sync_commit = commit_transaction->t_synchronous_commit; | ||
94 | __entry->transaction = commit_transaction->t_tid; | ||
95 | ), | ||
96 | |||
97 | TP_printk("dev %s transaction %d sync %d", | ||
98 | jbd2_dev_to_name(__entry->dev), __entry->transaction, | ||
99 | __entry->sync_commit) | ||
100 | ); | 68 | ); |
101 | 69 | ||
102 | TRACE_EVENT(jbd2_commit_logging, | 70 | DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, |
103 | 71 | ||
104 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), | 72 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), |
105 | 73 | ||
106 | TP_ARGS(journal, commit_transaction), | 74 | TP_ARGS(journal, commit_transaction) |
75 | ); | ||
107 | 76 | ||
108 | TP_STRUCT__entry( | 77 | DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, |
109 | __field( dev_t, dev ) | ||
110 | __field( char, sync_commit ) | ||
111 | __field( int, transaction ) | ||
112 | ), | ||
113 | 78 | ||
114 | TP_fast_assign( | 79 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), |
115 | __entry->dev = journal->j_fs_dev->bd_dev; | ||
116 | __entry->sync_commit = commit_transaction->t_synchronous_commit; | ||
117 | __entry->transaction = commit_transaction->t_tid; | ||
118 | ), | ||
119 | 80 | ||
120 | TP_printk("dev %s transaction %d sync %d", | 81 | TP_ARGS(journal, commit_transaction) |
121 | jbd2_dev_to_name(__entry->dev), __entry->transaction, | ||
122 | __entry->sync_commit) | ||
123 | ); | 82 | ); |
124 | 83 | ||
125 | TRACE_EVENT(jbd2_end_commit, | 84 | TRACE_EVENT(jbd2_end_commit, |
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index eaf46bdd18a5..3adca0ca9dbe 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h | |||
@@ -44,7 +44,7 @@ | |||
44 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | 44 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ |
45 | ) : "GFP_NOWAIT" | 45 | ) : "GFP_NOWAIT" |
46 | 46 | ||
47 | TRACE_EVENT(kmalloc, | 47 | DECLARE_EVENT_CLASS(kmem_alloc, |
48 | 48 | ||
49 | TP_PROTO(unsigned long call_site, | 49 | TP_PROTO(unsigned long call_site, |
50 | const void *ptr, | 50 | const void *ptr, |
@@ -78,41 +78,23 @@ TRACE_EVENT(kmalloc, | |||
78 | show_gfp_flags(__entry->gfp_flags)) | 78 | show_gfp_flags(__entry->gfp_flags)) |
79 | ); | 79 | ); |
80 | 80 | ||
81 | TRACE_EVENT(kmem_cache_alloc, | 81 | DEFINE_EVENT(kmem_alloc, kmalloc, |
82 | 82 | ||
83 | TP_PROTO(unsigned long call_site, | 83 | TP_PROTO(unsigned long call_site, const void *ptr, |
84 | const void *ptr, | 84 | size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), |
85 | size_t bytes_req, | ||
86 | size_t bytes_alloc, | ||
87 | gfp_t gfp_flags), | ||
88 | 85 | ||
89 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | 86 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) |
87 | ); | ||
90 | 88 | ||
91 | TP_STRUCT__entry( | 89 | DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, |
92 | __field( unsigned long, call_site ) | ||
93 | __field( const void *, ptr ) | ||
94 | __field( size_t, bytes_req ) | ||
95 | __field( size_t, bytes_alloc ) | ||
96 | __field( gfp_t, gfp_flags ) | ||
97 | ), | ||
98 | 90 | ||
99 | TP_fast_assign( | 91 | TP_PROTO(unsigned long call_site, const void *ptr, |
100 | __entry->call_site = call_site; | 92 | size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), |
101 | __entry->ptr = ptr; | ||
102 | __entry->bytes_req = bytes_req; | ||
103 | __entry->bytes_alloc = bytes_alloc; | ||
104 | __entry->gfp_flags = gfp_flags; | ||
105 | ), | ||
106 | 93 | ||
107 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", | 94 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) |
108 | __entry->call_site, | ||
109 | __entry->ptr, | ||
110 | __entry->bytes_req, | ||
111 | __entry->bytes_alloc, | ||
112 | show_gfp_flags(__entry->gfp_flags)) | ||
113 | ); | 95 | ); |
114 | 96 | ||
115 | TRACE_EVENT(kmalloc_node, | 97 | DECLARE_EVENT_CLASS(kmem_alloc_node, |
116 | 98 | ||
117 | TP_PROTO(unsigned long call_site, | 99 | TP_PROTO(unsigned long call_site, |
118 | const void *ptr, | 100 | const void *ptr, |
@@ -150,45 +132,25 @@ TRACE_EVENT(kmalloc_node, | |||
150 | __entry->node) | 132 | __entry->node) |
151 | ); | 133 | ); |
152 | 134 | ||
153 | TRACE_EVENT(kmem_cache_alloc_node, | 135 | DEFINE_EVENT(kmem_alloc_node, kmalloc_node, |
154 | 136 | ||
155 | TP_PROTO(unsigned long call_site, | 137 | TP_PROTO(unsigned long call_site, const void *ptr, |
156 | const void *ptr, | 138 | size_t bytes_req, size_t bytes_alloc, |
157 | size_t bytes_req, | 139 | gfp_t gfp_flags, int node), |
158 | size_t bytes_alloc, | ||
159 | gfp_t gfp_flags, | ||
160 | int node), | ||
161 | 140 | ||
162 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | 141 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) |
142 | ); | ||
163 | 143 | ||
164 | TP_STRUCT__entry( | 144 | DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, |
165 | __field( unsigned long, call_site ) | ||
166 | __field( const void *, ptr ) | ||
167 | __field( size_t, bytes_req ) | ||
168 | __field( size_t, bytes_alloc ) | ||
169 | __field( gfp_t, gfp_flags ) | ||
170 | __field( int, node ) | ||
171 | ), | ||
172 | 145 | ||
173 | TP_fast_assign( | 146 | TP_PROTO(unsigned long call_site, const void *ptr, |
174 | __entry->call_site = call_site; | 147 | size_t bytes_req, size_t bytes_alloc, |
175 | __entry->ptr = ptr; | 148 | gfp_t gfp_flags, int node), |
176 | __entry->bytes_req = bytes_req; | ||
177 | __entry->bytes_alloc = bytes_alloc; | ||
178 | __entry->gfp_flags = gfp_flags; | ||
179 | __entry->node = node; | ||
180 | ), | ||
181 | 149 | ||
182 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", | 150 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) |
183 | __entry->call_site, | ||
184 | __entry->ptr, | ||
185 | __entry->bytes_req, | ||
186 | __entry->bytes_alloc, | ||
187 | show_gfp_flags(__entry->gfp_flags), | ||
188 | __entry->node) | ||
189 | ); | 151 | ); |
190 | 152 | ||
191 | TRACE_EVENT(kfree, | 153 | DECLARE_EVENT_CLASS(kmem_free, |
192 | 154 | ||
193 | TP_PROTO(unsigned long call_site, const void *ptr), | 155 | TP_PROTO(unsigned long call_site, const void *ptr), |
194 | 156 | ||
@@ -207,23 +169,18 @@ TRACE_EVENT(kfree, | |||
207 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | 169 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) |
208 | ); | 170 | ); |
209 | 171 | ||
210 | TRACE_EVENT(kmem_cache_free, | 172 | DEFINE_EVENT(kmem_free, kfree, |
211 | 173 | ||
212 | TP_PROTO(unsigned long call_site, const void *ptr), | 174 | TP_PROTO(unsigned long call_site, const void *ptr), |
213 | 175 | ||
214 | TP_ARGS(call_site, ptr), | 176 | TP_ARGS(call_site, ptr) |
177 | ); | ||
215 | 178 | ||
216 | TP_STRUCT__entry( | 179 | DEFINE_EVENT(kmem_free, kmem_cache_free, |
217 | __field( unsigned long, call_site ) | ||
218 | __field( const void *, ptr ) | ||
219 | ), | ||
220 | 180 | ||
221 | TP_fast_assign( | 181 | TP_PROTO(unsigned long call_site, const void *ptr), |
222 | __entry->call_site = call_site; | ||
223 | __entry->ptr = ptr; | ||
224 | ), | ||
225 | 182 | ||
226 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | 183 | TP_ARGS(call_site, ptr) |
227 | ); | 184 | ); |
228 | 185 | ||
229 | TRACE_EVENT(mm_page_free_direct, | 186 | TRACE_EVENT(mm_page_free_direct, |
@@ -299,7 +256,7 @@ TRACE_EVENT(mm_page_alloc, | |||
299 | show_gfp_flags(__entry->gfp_flags)) | 256 | show_gfp_flags(__entry->gfp_flags)) |
300 | ); | 257 | ); |
301 | 258 | ||
302 | TRACE_EVENT(mm_page_alloc_zone_locked, | 259 | DECLARE_EVENT_CLASS(mm_page, |
303 | 260 | ||
304 | TP_PROTO(struct page *page, unsigned int order, int migratetype), | 261 | TP_PROTO(struct page *page, unsigned int order, int migratetype), |
305 | 262 | ||
@@ -325,29 +282,22 @@ TRACE_EVENT(mm_page_alloc_zone_locked, | |||
325 | __entry->order == 0) | 282 | __entry->order == 0) |
326 | ); | 283 | ); |
327 | 284 | ||
328 | TRACE_EVENT(mm_page_pcpu_drain, | 285 | DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, |
329 | 286 | ||
330 | TP_PROTO(struct page *page, int order, int migratetype), | 287 | TP_PROTO(struct page *page, unsigned int order, int migratetype), |
331 | 288 | ||
332 | TP_ARGS(page, order, migratetype), | 289 | TP_ARGS(page, order, migratetype) |
290 | ); | ||
333 | 291 | ||
334 | TP_STRUCT__entry( | 292 | DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, |
335 | __field( struct page *, page ) | ||
336 | __field( int, order ) | ||
337 | __field( int, migratetype ) | ||
338 | ), | ||
339 | 293 | ||
340 | TP_fast_assign( | 294 | TP_PROTO(struct page *page, unsigned int order, int migratetype), |
341 | __entry->page = page; | 295 | |
342 | __entry->order = order; | 296 | TP_ARGS(page, order, migratetype), |
343 | __entry->migratetype = migratetype; | ||
344 | ), | ||
345 | 297 | ||
346 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", | 298 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", |
347 | __entry->page, | 299 | __entry->page, page_to_pfn(__entry->page), |
348 | page_to_pfn(__entry->page), | 300 | __entry->order, __entry->migratetype) |
349 | __entry->order, | ||
350 | __entry->migratetype) | ||
351 | ); | 301 | ); |
352 | 302 | ||
353 | TRACE_EVENT(mm_page_alloc_extfrag, | 303 | TRACE_EVENT(mm_page_alloc_extfrag, |
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lock.h index bcf1d209a00d..a870ba125aa8 100644 --- a/include/trace/events/lockdep.h +++ b/include/trace/events/lock.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM lockdep | 2 | #define TRACE_SYSTEM lock |
3 | 3 | ||
4 | #if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) | 4 | #if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) |
5 | #define _TRACE_LOCKDEP_H | 5 | #define _TRACE_LOCK_H |
6 | 6 | ||
7 | #include <linux/lockdep.h> | 7 | #include <linux/lockdep.h> |
8 | #include <linux/tracepoint.h> | 8 | #include <linux/tracepoint.h> |
@@ -90,7 +90,7 @@ TRACE_EVENT(lock_acquired, | |||
90 | #endif | 90 | #endif |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #endif /* _TRACE_LOCKDEP_H */ | 93 | #endif /* _TRACE_LOCK_H */ |
94 | 94 | ||
95 | /* This part must be outside protection */ | 95 | /* This part must be outside protection */ |
96 | #include <trace/define_trace.h> | 96 | #include <trace/define_trace.h> |
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h new file mode 100644 index 000000000000..7eee77895cb3 --- /dev/null +++ b/include/trace/events/mce.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM mce | ||
3 | |||
4 | #if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_MCE_H | ||
6 | |||
7 | #include <linux/ktime.h> | ||
8 | #include <linux/tracepoint.h> | ||
9 | #include <asm/mce.h> | ||
10 | |||
11 | TRACE_EVENT(mce_record, | ||
12 | |||
13 | TP_PROTO(struct mce *m), | ||
14 | |||
15 | TP_ARGS(m), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __field( u64, mcgcap ) | ||
19 | __field( u64, mcgstatus ) | ||
20 | __field( u8, bank ) | ||
21 | __field( u64, status ) | ||
22 | __field( u64, addr ) | ||
23 | __field( u64, misc ) | ||
24 | __field( u64, ip ) | ||
25 | __field( u8, cs ) | ||
26 | __field( u64, tsc ) | ||
27 | __field( u64, walltime ) | ||
28 | __field( u32, cpu ) | ||
29 | __field( u32, cpuid ) | ||
30 | __field( u32, apicid ) | ||
31 | __field( u32, socketid ) | ||
32 | __field( u8, cpuvendor ) | ||
33 | ), | ||
34 | |||
35 | TP_fast_assign( | ||
36 | __entry->mcgcap = m->mcgcap; | ||
37 | __entry->mcgstatus = m->mcgstatus; | ||
38 | __entry->bank = m->bank; | ||
39 | __entry->status = m->status; | ||
40 | __entry->addr = m->addr; | ||
41 | __entry->misc = m->misc; | ||
42 | __entry->ip = m->ip; | ||
43 | __entry->cs = m->cs; | ||
44 | __entry->tsc = m->tsc; | ||
45 | __entry->walltime = m->time; | ||
46 | __entry->cpu = m->extcpu; | ||
47 | __entry->cpuid = m->cpuid; | ||
48 | __entry->apicid = m->apicid; | ||
49 | __entry->socketid = m->socketid; | ||
50 | __entry->cpuvendor = m->cpuvendor; | ||
51 | ), | ||
52 | |||
53 | TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", | ||
54 | __entry->cpu, | ||
55 | __entry->mcgcap, __entry->mcgstatus, | ||
56 | __entry->bank, __entry->status, | ||
57 | __entry->addr, __entry->misc, | ||
58 | __entry->cs, __entry->ip, | ||
59 | __entry->tsc, | ||
60 | __entry->cpuvendor, __entry->cpuid, | ||
61 | __entry->walltime, | ||
62 | __entry->socketid, | ||
63 | __entry->apicid) | ||
64 | ); | ||
65 | |||
66 | #endif /* _TRACE_MCE_H */ | ||
67 | |||
68 | /* This part must be outside protection */ | ||
69 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 84160fb18478..4b0f48ba16a6 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h | |||
@@ -51,7 +51,7 @@ TRACE_EVENT(module_free, | |||
51 | TP_printk("%s", __get_str(name)) | 51 | TP_printk("%s", __get_str(name)) |
52 | ); | 52 | ); |
53 | 53 | ||
54 | TRACE_EVENT(module_get, | 54 | DECLARE_EVENT_CLASS(module_refcnt, |
55 | 55 | ||
56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), |
57 | 57 | ||
@@ -73,26 +73,18 @@ TRACE_EVENT(module_get, | |||
73 | __get_str(name), (void *)__entry->ip, __entry->refcnt) | 73 | __get_str(name), (void *)__entry->ip, __entry->refcnt) |
74 | ); | 74 | ); |
75 | 75 | ||
76 | TRACE_EVENT(module_put, | 76 | DEFINE_EVENT(module_refcnt, module_get, |
77 | 77 | ||
78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), |
79 | 79 | ||
80 | TP_ARGS(mod, ip, refcnt), | 80 | TP_ARGS(mod, ip, refcnt) |
81 | ); | ||
81 | 82 | ||
82 | TP_STRUCT__entry( | 83 | DEFINE_EVENT(module_refcnt, module_put, |
83 | __field( unsigned long, ip ) | ||
84 | __field( int, refcnt ) | ||
85 | __string( name, mod->name ) | ||
86 | ), | ||
87 | 84 | ||
88 | TP_fast_assign( | 85 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), |
89 | __entry->ip = ip; | ||
90 | __entry->refcnt = refcnt; | ||
91 | __assign_str(name, mod->name); | ||
92 | ), | ||
93 | 86 | ||
94 | TP_printk("%s call_site=%pf refcnt=%d", | 87 | TP_ARGS(mod, ip, refcnt) |
95 | __get_str(name), (void *)__entry->ip, __entry->refcnt) | ||
96 | ); | 88 | ); |
97 | 89 | ||
98 | TRACE_EVENT(module_request, | 90 | TRACE_EVENT(module_request, |
diff --git a/include/trace/events/power.h b/include/trace/events/power.h index ea6d579261ad..c4efe9b8280d 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h | |||
@@ -16,9 +16,7 @@ enum { | |||
16 | }; | 16 | }; |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | 19 | DECLARE_EVENT_CLASS(power, | |
20 | |||
21 | TRACE_EVENT(power_start, | ||
22 | 20 | ||
23 | TP_PROTO(unsigned int type, unsigned int state), | 21 | TP_PROTO(unsigned int type, unsigned int state), |
24 | 22 | ||
@@ -37,42 +35,36 @@ TRACE_EVENT(power_start, | |||
37 | TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) | 35 | TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) |
38 | ); | 36 | ); |
39 | 37 | ||
40 | TRACE_EVENT(power_end, | 38 | DEFINE_EVENT(power, power_start, |
41 | |||
42 | TP_PROTO(int dummy), | ||
43 | 39 | ||
44 | TP_ARGS(dummy), | 40 | TP_PROTO(unsigned int type, unsigned int state), |
45 | 41 | ||
46 | TP_STRUCT__entry( | 42 | TP_ARGS(type, state) |
47 | __field( u64, dummy ) | 43 | ); |
48 | ), | ||
49 | 44 | ||
50 | TP_fast_assign( | 45 | DEFINE_EVENT(power, power_frequency, |
51 | __entry->dummy = 0xffff; | ||
52 | ), | ||
53 | 46 | ||
54 | TP_printk("dummy=%lu", (unsigned long)__entry->dummy) | 47 | TP_PROTO(unsigned int type, unsigned int state), |
55 | 48 | ||
49 | TP_ARGS(type, state) | ||
56 | ); | 50 | ); |
57 | 51 | ||
52 | TRACE_EVENT(power_end, | ||
58 | 53 | ||
59 | TRACE_EVENT(power_frequency, | 54 | TP_PROTO(int dummy), |
60 | |||
61 | TP_PROTO(unsigned int type, unsigned int state), | ||
62 | 55 | ||
63 | TP_ARGS(type, state), | 56 | TP_ARGS(dummy), |
64 | 57 | ||
65 | TP_STRUCT__entry( | 58 | TP_STRUCT__entry( |
66 | __field( u64, type ) | 59 | __field( u64, dummy ) |
67 | __field( u64, state ) | ||
68 | ), | 60 | ), |
69 | 61 | ||
70 | TP_fast_assign( | 62 | TP_fast_assign( |
71 | __entry->type = type; | 63 | __entry->dummy = 0xffff; |
72 | __entry->state = state; | ||
73 | ), | 64 | ), |
74 | 65 | ||
75 | TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state) | 66 | TP_printk("dummy=%lu", (unsigned long)__entry->dummy) |
67 | |||
76 | ); | 68 | ); |
77 | 69 | ||
78 | #endif /* _TRACE_POWER_H */ | 70 | #endif /* _TRACE_POWER_H */ |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4069c43f4187..cfceb0b73e20 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop, | |||
26 | __entry->pid = t->pid; | 26 | __entry->pid = t->pid; |
27 | ), | 27 | ), |
28 | 28 | ||
29 | TP_printk("task %s:%d", __entry->comm, __entry->pid) | 29 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
30 | ); | 30 | ); |
31 | 31 | ||
32 | /* | 32 | /* |
@@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret, | |||
46 | __entry->ret = ret; | 46 | __entry->ret = ret; |
47 | ), | 47 | ), |
48 | 48 | ||
49 | TP_printk("ret %d", __entry->ret) | 49 | TP_printk("ret=%d", __entry->ret) |
50 | ); | 50 | ); |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task, | |||
73 | __entry->prio = p->prio; | 73 | __entry->prio = p->prio; |
74 | ), | 74 | ), |
75 | 75 | ||
76 | TP_printk("task %s:%d [%d]", | 76 | TP_printk("comm=%s pid=%d prio=%d", |
77 | __entry->comm, __entry->pid, __entry->prio) | 77 | __entry->comm, __entry->pid, __entry->prio) |
78 | ); | 78 | ); |
79 | 79 | ||
@@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task, | |||
83 | * (NOTE: the 'rq' argument is not used by generic trace events, | 83 | * (NOTE: the 'rq' argument is not used by generic trace events, |
84 | * but used by the latency tracer plugin. ) | 84 | * but used by the latency tracer plugin. ) |
85 | */ | 85 | */ |
86 | TRACE_EVENT(sched_wakeup, | 86 | DECLARE_EVENT_CLASS(sched_wakeup_template, |
87 | 87 | ||
88 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 88 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), |
89 | 89 | ||
@@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup, | |||
94 | __field( pid_t, pid ) | 94 | __field( pid_t, pid ) |
95 | __field( int, prio ) | 95 | __field( int, prio ) |
96 | __field( int, success ) | 96 | __field( int, success ) |
97 | __field( int, cpu ) | 97 | __field( int, target_cpu ) |
98 | ), | 98 | ), |
99 | 99 | ||
100 | TP_fast_assign( | 100 | TP_fast_assign( |
@@ -102,46 +102,27 @@ TRACE_EVENT(sched_wakeup, | |||
102 | __entry->pid = p->pid; | 102 | __entry->pid = p->pid; |
103 | __entry->prio = p->prio; | 103 | __entry->prio = p->prio; |
104 | __entry->success = success; | 104 | __entry->success = success; |
105 | __entry->cpu = task_cpu(p); | 105 | __entry->target_cpu = task_cpu(p); |
106 | ), | 106 | ), |
107 | 107 | ||
108 | TP_printk("task %s:%d [%d] success=%d [%03d]", | 108 | TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", |
109 | __entry->comm, __entry->pid, __entry->prio, | 109 | __entry->comm, __entry->pid, __entry->prio, |
110 | __entry->success, __entry->cpu) | 110 | __entry->success, __entry->target_cpu) |
111 | ); | 111 | ); |
112 | 112 | ||
113 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, | ||
114 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | ||
115 | TP_ARGS(rq, p, success)); | ||
116 | |||
113 | /* | 117 | /* |
114 | * Tracepoint for waking up a new task: | 118 | * Tracepoint for waking up a new task: |
115 | * | 119 | * |
116 | * (NOTE: the 'rq' argument is not used by generic trace events, | 120 | * (NOTE: the 'rq' argument is not used by generic trace events, |
117 | * but used by the latency tracer plugin. ) | 121 | * but used by the latency tracer plugin. ) |
118 | */ | 122 | */ |
119 | TRACE_EVENT(sched_wakeup_new, | 123 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, |
120 | 124 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | |
121 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 125 | TP_ARGS(rq, p, success)); |
122 | |||
123 | TP_ARGS(rq, p, success), | ||
124 | |||
125 | TP_STRUCT__entry( | ||
126 | __array( char, comm, TASK_COMM_LEN ) | ||
127 | __field( pid_t, pid ) | ||
128 | __field( int, prio ) | ||
129 | __field( int, success ) | ||
130 | __field( int, cpu ) | ||
131 | ), | ||
132 | |||
133 | TP_fast_assign( | ||
134 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | ||
135 | __entry->pid = p->pid; | ||
136 | __entry->prio = p->prio; | ||
137 | __entry->success = success; | ||
138 | __entry->cpu = task_cpu(p); | ||
139 | ), | ||
140 | |||
141 | TP_printk("task %s:%d [%d] success=%d [%03d]", | ||
142 | __entry->comm, __entry->pid, __entry->prio, | ||
143 | __entry->success, __entry->cpu) | ||
144 | ); | ||
145 | 126 | ||
146 | /* | 127 | /* |
147 | * Tracepoint for task switches, performed by the scheduler: | 128 | * Tracepoint for task switches, performed by the scheduler: |
@@ -176,7 +157,7 @@ TRACE_EVENT(sched_switch, | |||
176 | __entry->next_prio = next->prio; | 157 | __entry->next_prio = next->prio; |
177 | ), | 158 | ), |
178 | 159 | ||
179 | TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", | 160 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", |
180 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | 161 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
181 | __entry->prev_state ? | 162 | __entry->prev_state ? |
182 | __print_flags(__entry->prev_state, "|", | 163 | __print_flags(__entry->prev_state, "|", |
@@ -211,15 +192,12 @@ TRACE_EVENT(sched_migrate_task, | |||
211 | __entry->dest_cpu = dest_cpu; | 192 | __entry->dest_cpu = dest_cpu; |
212 | ), | 193 | ), |
213 | 194 | ||
214 | TP_printk("task %s:%d [%d] from: %d to: %d", | 195 | TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", |
215 | __entry->comm, __entry->pid, __entry->prio, | 196 | __entry->comm, __entry->pid, __entry->prio, |
216 | __entry->orig_cpu, __entry->dest_cpu) | 197 | __entry->orig_cpu, __entry->dest_cpu) |
217 | ); | 198 | ); |
218 | 199 | ||
219 | /* | 200 | DECLARE_EVENT_CLASS(sched_process_template, |
220 | * Tracepoint for freeing a task: | ||
221 | */ | ||
222 | TRACE_EVENT(sched_process_free, | ||
223 | 201 | ||
224 | TP_PROTO(struct task_struct *p), | 202 | TP_PROTO(struct task_struct *p), |
225 | 203 | ||
@@ -237,34 +215,24 @@ TRACE_EVENT(sched_process_free, | |||
237 | __entry->prio = p->prio; | 215 | __entry->prio = p->prio; |
238 | ), | 216 | ), |
239 | 217 | ||
240 | TP_printk("task %s:%d [%d]", | 218 | TP_printk("comm=%s pid=%d prio=%d", |
241 | __entry->comm, __entry->pid, __entry->prio) | 219 | __entry->comm, __entry->pid, __entry->prio) |
242 | ); | 220 | ); |
243 | 221 | ||
244 | /* | 222 | /* |
245 | * Tracepoint for a task exiting: | 223 | * Tracepoint for freeing a task: |
246 | */ | 224 | */ |
247 | TRACE_EVENT(sched_process_exit, | 225 | DEFINE_EVENT(sched_process_template, sched_process_free, |
226 | TP_PROTO(struct task_struct *p), | ||
227 | TP_ARGS(p)); | ||
228 | |||
248 | 229 | ||
249 | TP_PROTO(struct task_struct *p), | 230 | /* |
250 | 231 | * Tracepoint for a task exiting: | |
251 | TP_ARGS(p), | 232 | */ |
252 | 233 | DEFINE_EVENT(sched_process_template, sched_process_exit, | |
253 | TP_STRUCT__entry( | 234 | TP_PROTO(struct task_struct *p), |
254 | __array( char, comm, TASK_COMM_LEN ) | 235 | TP_ARGS(p)); |
255 | __field( pid_t, pid ) | ||
256 | __field( int, prio ) | ||
257 | ), | ||
258 | |||
259 | TP_fast_assign( | ||
260 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | ||
261 | __entry->pid = p->pid; | ||
262 | __entry->prio = p->prio; | ||
263 | ), | ||
264 | |||
265 | TP_printk("task %s:%d [%d]", | ||
266 | __entry->comm, __entry->pid, __entry->prio) | ||
267 | ); | ||
268 | 236 | ||
269 | /* | 237 | /* |
270 | * Tracepoint for a waiting task: | 238 | * Tracepoint for a waiting task: |
@@ -287,7 +255,7 @@ TRACE_EVENT(sched_process_wait, | |||
287 | __entry->prio = current->prio; | 255 | __entry->prio = current->prio; |
288 | ), | 256 | ), |
289 | 257 | ||
290 | TP_printk("task %s:%d [%d]", | 258 | TP_printk("comm=%s pid=%d prio=%d", |
291 | __entry->comm, __entry->pid, __entry->prio) | 259 | __entry->comm, __entry->pid, __entry->prio) |
292 | ); | 260 | ); |
293 | 261 | ||
@@ -314,46 +282,16 @@ TRACE_EVENT(sched_process_fork, | |||
314 | __entry->child_pid = child->pid; | 282 | __entry->child_pid = child->pid; |
315 | ), | 283 | ), |
316 | 284 | ||
317 | TP_printk("parent %s:%d child %s:%d", | 285 | TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", |
318 | __entry->parent_comm, __entry->parent_pid, | 286 | __entry->parent_comm, __entry->parent_pid, |
319 | __entry->child_comm, __entry->child_pid) | 287 | __entry->child_comm, __entry->child_pid) |
320 | ); | 288 | ); |
321 | 289 | ||
322 | /* | 290 | /* |
323 | * Tracepoint for sending a signal: | ||
324 | */ | ||
325 | TRACE_EVENT(sched_signal_send, | ||
326 | |||
327 | TP_PROTO(int sig, struct task_struct *p), | ||
328 | |||
329 | TP_ARGS(sig, p), | ||
330 | |||
331 | TP_STRUCT__entry( | ||
332 | __field( int, sig ) | ||
333 | __array( char, comm, TASK_COMM_LEN ) | ||
334 | __field( pid_t, pid ) | ||
335 | ), | ||
336 | |||
337 | TP_fast_assign( | ||
338 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | ||
339 | __entry->pid = p->pid; | ||
340 | __entry->sig = sig; | ||
341 | ), | ||
342 | |||
343 | TP_printk("sig: %d task %s:%d", | ||
344 | __entry->sig, __entry->comm, __entry->pid) | ||
345 | ); | ||
346 | |||
347 | /* | ||
348 | * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE | 291 | * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE |
349 | * adding sched_stat support to SCHED_FIFO/RR would be welcome. | 292 | * adding sched_stat support to SCHED_FIFO/RR would be welcome. |
350 | */ | 293 | */ |
351 | 294 | DECLARE_EVENT_CLASS(sched_stat_template, | |
352 | /* | ||
353 | * Tracepoint for accounting wait time (time the task is runnable | ||
354 | * but not actually running due to scheduler contention). | ||
355 | */ | ||
356 | TRACE_EVENT(sched_stat_wait, | ||
357 | 295 | ||
358 | TP_PROTO(struct task_struct *tsk, u64 delay), | 296 | TP_PROTO(struct task_struct *tsk, u64 delay), |
359 | 297 | ||
@@ -374,11 +312,36 @@ TRACE_EVENT(sched_stat_wait, | |||
374 | __perf_count(delay); | 312 | __perf_count(delay); |
375 | ), | 313 | ), |
376 | 314 | ||
377 | TP_printk("task: %s:%d wait: %Lu [ns]", | 315 | TP_printk("comm=%s pid=%d delay=%Lu [ns]", |
378 | __entry->comm, __entry->pid, | 316 | __entry->comm, __entry->pid, |
379 | (unsigned long long)__entry->delay) | 317 | (unsigned long long)__entry->delay) |
380 | ); | 318 | ); |
381 | 319 | ||
320 | |||
321 | /* | ||
322 | * Tracepoint for accounting wait time (time the task is runnable | ||
323 | * but not actually running due to scheduler contention). | ||
324 | */ | ||
325 | DEFINE_EVENT(sched_stat_template, sched_stat_wait, | ||
326 | TP_PROTO(struct task_struct *tsk, u64 delay), | ||
327 | TP_ARGS(tsk, delay)); | ||
328 | |||
329 | /* | ||
330 | * Tracepoint for accounting sleep time (time the task is not runnable, | ||
331 | * including iowait, see below). | ||
332 | */ | ||
333 | DEFINE_EVENT(sched_stat_template, sched_stat_sleep, | ||
334 | TP_PROTO(struct task_struct *tsk, u64 delay), | ||
335 | TP_ARGS(tsk, delay)); | ||
336 | |||
337 | /* | ||
338 | * Tracepoint for accounting iowait time (time the task is not runnable | ||
339 | * due to waiting on IO to complete). | ||
340 | */ | ||
341 | DEFINE_EVENT(sched_stat_template, sched_stat_iowait, | ||
342 | TP_PROTO(struct task_struct *tsk, u64 delay), | ||
343 | TP_ARGS(tsk, delay)); | ||
344 | |||
382 | /* | 345 | /* |
383 | * Tracepoint for accounting runtime (time the task is executing | 346 | * Tracepoint for accounting runtime (time the task is executing |
384 | * on a CPU). | 347 | * on a CPU). |
@@ -406,72 +369,12 @@ TRACE_EVENT(sched_stat_runtime, | |||
406 | __perf_count(runtime); | 369 | __perf_count(runtime); |
407 | ), | 370 | ), |
408 | 371 | ||
409 | TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", | 372 | TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", |
410 | __entry->comm, __entry->pid, | 373 | __entry->comm, __entry->pid, |
411 | (unsigned long long)__entry->runtime, | 374 | (unsigned long long)__entry->runtime, |
412 | (unsigned long long)__entry->vruntime) | 375 | (unsigned long long)__entry->vruntime) |
413 | ); | 376 | ); |
414 | 377 | ||
415 | /* | ||
416 | * Tracepoint for accounting sleep time (time the task is not runnable, | ||
417 | * including iowait, see below). | ||
418 | */ | ||
419 | TRACE_EVENT(sched_stat_sleep, | ||
420 | |||
421 | TP_PROTO(struct task_struct *tsk, u64 delay), | ||
422 | |||
423 | TP_ARGS(tsk, delay), | ||
424 | |||
425 | TP_STRUCT__entry( | ||
426 | __array( char, comm, TASK_COMM_LEN ) | ||
427 | __field( pid_t, pid ) | ||
428 | __field( u64, delay ) | ||
429 | ), | ||
430 | |||
431 | TP_fast_assign( | ||
432 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | ||
433 | __entry->pid = tsk->pid; | ||
434 | __entry->delay = delay; | ||
435 | ) | ||
436 | TP_perf_assign( | ||
437 | __perf_count(delay); | ||
438 | ), | ||
439 | |||
440 | TP_printk("task: %s:%d sleep: %Lu [ns]", | ||
441 | __entry->comm, __entry->pid, | ||
442 | (unsigned long long)__entry->delay) | ||
443 | ); | ||
444 | |||
445 | /* | ||
446 | * Tracepoint for accounting iowait time (time the task is not runnable | ||
447 | * due to waiting on IO to complete). | ||
448 | */ | ||
449 | TRACE_EVENT(sched_stat_iowait, | ||
450 | |||
451 | TP_PROTO(struct task_struct *tsk, u64 delay), | ||
452 | |||
453 | TP_ARGS(tsk, delay), | ||
454 | |||
455 | TP_STRUCT__entry( | ||
456 | __array( char, comm, TASK_COMM_LEN ) | ||
457 | __field( pid_t, pid ) | ||
458 | __field( u64, delay ) | ||
459 | ), | ||
460 | |||
461 | TP_fast_assign( | ||
462 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | ||
463 | __entry->pid = tsk->pid; | ||
464 | __entry->delay = delay; | ||
465 | ) | ||
466 | TP_perf_assign( | ||
467 | __perf_count(delay); | ||
468 | ), | ||
469 | |||
470 | TP_printk("task: %s:%d iowait: %Lu [ns]", | ||
471 | __entry->comm, __entry->pid, | ||
472 | (unsigned long long)__entry->delay) | ||
473 | ); | ||
474 | |||
475 | #endif /* _TRACE_SCHED_H */ | 378 | #endif /* _TRACE_SCHED_H */ |
476 | 379 | ||
477 | /* This part must be outside protection */ | 380 | /* This part must be outside protection */ |
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h new file mode 100644 index 000000000000..a510b75ac304 --- /dev/null +++ b/include/trace/events/signal.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM signal | ||
3 | |||
4 | #if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_SIGNAL_H | ||
6 | |||
7 | #include <linux/signal.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/tracepoint.h> | ||
10 | |||
11 | #define TP_STORE_SIGINFO(__entry, info) \ | ||
12 | do { \ | ||
13 | if (info == SEND_SIG_NOINFO) { \ | ||
14 | __entry->errno = 0; \ | ||
15 | __entry->code = SI_USER; \ | ||
16 | } else if (info == SEND_SIG_PRIV) { \ | ||
17 | __entry->errno = 0; \ | ||
18 | __entry->code = SI_KERNEL; \ | ||
19 | } else { \ | ||
20 | __entry->errno = info->si_errno; \ | ||
21 | __entry->code = info->si_code; \ | ||
22 | } \ | ||
23 | } while (0) | ||
24 | |||
25 | /** | ||
26 | * signal_generate - called when a signal is generated | ||
27 | * @sig: signal number | ||
28 | * @info: pointer to struct siginfo | ||
29 | * @task: pointer to struct task_struct | ||
30 | * | ||
31 | * Current process sends a 'sig' signal to 'task' process with | ||
32 | * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, | ||
33 | * 'info' is not a pointer and you can't access its field. Instead, | ||
34 | * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV | ||
35 | * means that si_code is SI_KERNEL. | ||
36 | */ | ||
37 | TRACE_EVENT(signal_generate, | ||
38 | |||
39 | TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), | ||
40 | |||
41 | TP_ARGS(sig, info, task), | ||
42 | |||
43 | TP_STRUCT__entry( | ||
44 | __field( int, sig ) | ||
45 | __field( int, errno ) | ||
46 | __field( int, code ) | ||
47 | __array( char, comm, TASK_COMM_LEN ) | ||
48 | __field( pid_t, pid ) | ||
49 | ), | ||
50 | |||
51 | TP_fast_assign( | ||
52 | __entry->sig = sig; | ||
53 | TP_STORE_SIGINFO(__entry, info); | ||
54 | memcpy(__entry->comm, task->comm, TASK_COMM_LEN); | ||
55 | __entry->pid = task->pid; | ||
56 | ), | ||
57 | |||
58 | TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", | ||
59 | __entry->sig, __entry->errno, __entry->code, | ||
60 | __entry->comm, __entry->pid) | ||
61 | ); | ||
62 | |||
63 | /** | ||
64 | * signal_deliver - called when a signal is delivered | ||
65 | * @sig: signal number | ||
66 | * @info: pointer to struct siginfo | ||
67 | * @ka: pointer to struct k_sigaction | ||
68 | * | ||
69 | * A 'sig' signal is delivered to current process with 'info' siginfo, | ||
70 | * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or | ||
71 | * SIG_DFL. | ||
72 | * Note that some signals reported by signal_generate tracepoint can be | ||
73 | * lost, ignored or modified (by debugger) before hitting this tracepoint. | ||
74 | * This means, this can show which signals are actually delivered, but | ||
75 | * matching generated signals and delivered signals may not be correct. | ||
76 | */ | ||
77 | TRACE_EVENT(signal_deliver, | ||
78 | |||
79 | TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), | ||
80 | |||
81 | TP_ARGS(sig, info, ka), | ||
82 | |||
83 | TP_STRUCT__entry( | ||
84 | __field( int, sig ) | ||
85 | __field( int, errno ) | ||
86 | __field( int, code ) | ||
87 | __field( unsigned long, sa_handler ) | ||
88 | __field( unsigned long, sa_flags ) | ||
89 | ), | ||
90 | |||
91 | TP_fast_assign( | ||
92 | __entry->sig = sig; | ||
93 | TP_STORE_SIGINFO(__entry, info); | ||
94 | __entry->sa_handler = (unsigned long)ka->sa.sa_handler; | ||
95 | __entry->sa_flags = ka->sa.sa_flags; | ||
96 | ), | ||
97 | |||
98 | TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx", | ||
99 | __entry->sig, __entry->errno, __entry->code, | ||
100 | __entry->sa_handler, __entry->sa_flags) | ||
101 | ); | ||
102 | |||
103 | /** | ||
104 | * signal_overflow_fail - called when signal queue is overflow | ||
105 | * @sig: signal number | ||
106 | * @group: signal to process group or not (bool) | ||
107 | * @info: pointer to struct siginfo | ||
108 | * | ||
109 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
110 | * siginfo queue is overflow, and the signal is dropped. | ||
111 | * 'group' is not 0 if the signal will be sent to a process group. | ||
112 | * 'sig' is always one of RT signals. | ||
113 | */ | ||
114 | TRACE_EVENT(signal_overflow_fail, | ||
115 | |||
116 | TP_PROTO(int sig, int group, struct siginfo *info), | ||
117 | |||
118 | TP_ARGS(sig, group, info), | ||
119 | |||
120 | TP_STRUCT__entry( | ||
121 | __field( int, sig ) | ||
122 | __field( int, group ) | ||
123 | __field( int, errno ) | ||
124 | __field( int, code ) | ||
125 | ), | ||
126 | |||
127 | TP_fast_assign( | ||
128 | __entry->sig = sig; | ||
129 | __entry->group = group; | ||
130 | TP_STORE_SIGINFO(__entry, info); | ||
131 | ), | ||
132 | |||
133 | TP_printk("sig=%d group=%d errno=%d code=%d", | ||
134 | __entry->sig, __entry->group, __entry->errno, __entry->code) | ||
135 | ); | ||
136 | |||
137 | /** | ||
138 | * signal_lose_info - called when siginfo is lost | ||
139 | * @sig: signal number | ||
140 | * @group: signal to process group or not (bool) | ||
141 | * @info: pointer to struct siginfo | ||
142 | * | ||
143 | * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo | ||
144 | * queue is overflow. | ||
145 | * 'group' is not 0 if the signal will be sent to a process group. | ||
146 | * 'sig' is always one of non-RT signals. | ||
147 | */ | ||
148 | TRACE_EVENT(signal_lose_info, | ||
149 | |||
150 | TP_PROTO(int sig, int group, struct siginfo *info), | ||
151 | |||
152 | TP_ARGS(sig, group, info), | ||
153 | |||
154 | TP_STRUCT__entry( | ||
155 | __field( int, sig ) | ||
156 | __field( int, group ) | ||
157 | __field( int, errno ) | ||
158 | __field( int, code ) | ||
159 | ), | ||
160 | |||
161 | TP_fast_assign( | ||
162 | __entry->sig = sig; | ||
163 | __entry->group = group; | ||
164 | TP_STORE_SIGINFO(__entry, info); | ||
165 | ), | ||
166 | |||
167 | TP_printk("sig=%d group=%d errno=%d code=%d", | ||
168 | __entry->sig, __entry->group, __entry->errno, __entry->code) | ||
169 | ); | ||
170 | #endif /* _TRACE_SIGNAL_H */ | ||
171 | |||
172 | /* This part must be outside protection */ | ||
173 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 397dff2dbd5a..fb726ac7caee 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h | |||
@@ -1,5 +1,6 @@ | |||
1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM syscalls | 2 | #define TRACE_SYSTEM raw_syscalls |
3 | #define TRACE_INCLUDE_FILE syscalls | ||
3 | 4 | ||
4 | #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) | 5 | #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) |
5 | #define _TRACE_EVENTS_SYSCALLS_H | 6 | #define _TRACE_EVENTS_SYSCALLS_H |
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1844c48d640e..e5ce87a0498d 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h | |||
@@ -26,7 +26,7 @@ TRACE_EVENT(timer_init, | |||
26 | __entry->timer = timer; | 26 | __entry->timer = timer; |
27 | ), | 27 | ), |
28 | 28 | ||
29 | TP_printk("timer %p", __entry->timer) | 29 | TP_printk("timer=%p", __entry->timer) |
30 | ); | 30 | ); |
31 | 31 | ||
32 | /** | 32 | /** |
@@ -54,7 +54,7 @@ TRACE_EVENT(timer_start, | |||
54 | __entry->now = jiffies; | 54 | __entry->now = jiffies; |
55 | ), | 55 | ), |
56 | 56 | ||
57 | TP_printk("timer %p: func %pf, expires %lu, timeout %ld", | 57 | TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", |
58 | __entry->timer, __entry->function, __entry->expires, | 58 | __entry->timer, __entry->function, __entry->expires, |
59 | (long)__entry->expires - __entry->now) | 59 | (long)__entry->expires - __entry->now) |
60 | ); | 60 | ); |
@@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry, | |||
81 | __entry->now = jiffies; | 81 | __entry->now = jiffies; |
82 | ), | 82 | ), |
83 | 83 | ||
84 | TP_printk("timer %p: now %lu", __entry->timer, __entry->now) | 84 | TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) |
85 | ); | 85 | ); |
86 | 86 | ||
87 | /** | 87 | /** |
@@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit, | |||
108 | __entry->timer = timer; | 108 | __entry->timer = timer; |
109 | ), | 109 | ), |
110 | 110 | ||
111 | TP_printk("timer %p", __entry->timer) | 111 | TP_printk("timer=%p", __entry->timer) |
112 | ); | 112 | ); |
113 | 113 | ||
114 | /** | 114 | /** |
@@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel, | |||
129 | __entry->timer = timer; | 129 | __entry->timer = timer; |
130 | ), | 130 | ), |
131 | 131 | ||
132 | TP_printk("timer %p", __entry->timer) | 132 | TP_printk("timer=%p", __entry->timer) |
133 | ); | 133 | ); |
134 | 134 | ||
135 | /** | 135 | /** |
@@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel, | |||
140 | */ | 140 | */ |
141 | TRACE_EVENT(hrtimer_init, | 141 | TRACE_EVENT(hrtimer_init, |
142 | 142 | ||
143 | TP_PROTO(struct hrtimer *timer, clockid_t clockid, | 143 | TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, |
144 | enum hrtimer_mode mode), | 144 | enum hrtimer_mode mode), |
145 | 145 | ||
146 | TP_ARGS(timer, clockid, mode), | 146 | TP_ARGS(hrtimer, clockid, mode), |
147 | 147 | ||
148 | TP_STRUCT__entry( | 148 | TP_STRUCT__entry( |
149 | __field( void *, timer ) | 149 | __field( void *, hrtimer ) |
150 | __field( clockid_t, clockid ) | 150 | __field( clockid_t, clockid ) |
151 | __field( enum hrtimer_mode, mode ) | 151 | __field( enum hrtimer_mode, mode ) |
152 | ), | 152 | ), |
153 | 153 | ||
154 | TP_fast_assign( | 154 | TP_fast_assign( |
155 | __entry->timer = timer; | 155 | __entry->hrtimer = hrtimer; |
156 | __entry->clockid = clockid; | 156 | __entry->clockid = clockid; |
157 | __entry->mode = mode; | 157 | __entry->mode = mode; |
158 | ), | 158 | ), |
159 | 159 | ||
160 | TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, | 160 | TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, |
161 | __entry->clockid == CLOCK_REALTIME ? | 161 | __entry->clockid == CLOCK_REALTIME ? |
162 | "CLOCK_REALTIME" : "CLOCK_MONOTONIC", | 162 | "CLOCK_REALTIME" : "CLOCK_MONOTONIC", |
163 | __entry->mode == HRTIMER_MODE_ABS ? | 163 | __entry->mode == HRTIMER_MODE_ABS ? |
@@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init, | |||
170 | */ | 170 | */ |
171 | TRACE_EVENT(hrtimer_start, | 171 | TRACE_EVENT(hrtimer_start, |
172 | 172 | ||
173 | TP_PROTO(struct hrtimer *timer), | 173 | TP_PROTO(struct hrtimer *hrtimer), |
174 | 174 | ||
175 | TP_ARGS(timer), | 175 | TP_ARGS(hrtimer), |
176 | 176 | ||
177 | TP_STRUCT__entry( | 177 | TP_STRUCT__entry( |
178 | __field( void *, timer ) | 178 | __field( void *, hrtimer ) |
179 | __field( void *, function ) | 179 | __field( void *, function ) |
180 | __field( s64, expires ) | 180 | __field( s64, expires ) |
181 | __field( s64, softexpires ) | 181 | __field( s64, softexpires ) |
182 | ), | 182 | ), |
183 | 183 | ||
184 | TP_fast_assign( | 184 | TP_fast_assign( |
185 | __entry->timer = timer; | 185 | __entry->hrtimer = hrtimer; |
186 | __entry->function = timer->function; | 186 | __entry->function = hrtimer->function; |
187 | __entry->expires = hrtimer_get_expires(timer).tv64; | 187 | __entry->expires = hrtimer_get_expires(hrtimer).tv64; |
188 | __entry->softexpires = hrtimer_get_softexpires(timer).tv64; | 188 | __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; |
189 | ), | 189 | ), |
190 | 190 | ||
191 | TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", | 191 | TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", |
192 | __entry->timer, __entry->function, | 192 | __entry->hrtimer, __entry->function, |
193 | (unsigned long long)ktime_to_ns((ktime_t) { | 193 | (unsigned long long)ktime_to_ns((ktime_t) { |
194 | .tv64 = __entry->expires }), | 194 | .tv64 = __entry->expires }), |
195 | (unsigned long long)ktime_to_ns((ktime_t) { | 195 | (unsigned long long)ktime_to_ns((ktime_t) { |
@@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start, | |||
206 | */ | 206 | */ |
207 | TRACE_EVENT(hrtimer_expire_entry, | 207 | TRACE_EVENT(hrtimer_expire_entry, |
208 | 208 | ||
209 | TP_PROTO(struct hrtimer *timer, ktime_t *now), | 209 | TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), |
210 | 210 | ||
211 | TP_ARGS(timer, now), | 211 | TP_ARGS(hrtimer, now), |
212 | 212 | ||
213 | TP_STRUCT__entry( | 213 | TP_STRUCT__entry( |
214 | __field( void *, timer ) | 214 | __field( void *, hrtimer ) |
215 | __field( s64, now ) | 215 | __field( s64, now ) |
216 | ), | 216 | ), |
217 | 217 | ||
218 | TP_fast_assign( | 218 | TP_fast_assign( |
219 | __entry->timer = timer; | 219 | __entry->hrtimer = hrtimer; |
220 | __entry->now = now->tv64; | 220 | __entry->now = now->tv64; |
221 | ), | 221 | ), |
222 | 222 | ||
223 | TP_printk("hrtimer %p, now %llu", __entry->timer, | 223 | TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, |
224 | (unsigned long long)ktime_to_ns((ktime_t) { | 224 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) |
225 | .tv64 = __entry->now })) | ||
226 | ); | 225 | ); |
227 | 226 | ||
228 | /** | 227 | /** |
@@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry, | |||
234 | */ | 233 | */ |
235 | TRACE_EVENT(hrtimer_expire_exit, | 234 | TRACE_EVENT(hrtimer_expire_exit, |
236 | 235 | ||
237 | TP_PROTO(struct hrtimer *timer), | 236 | TP_PROTO(struct hrtimer *hrtimer), |
238 | 237 | ||
239 | TP_ARGS(timer), | 238 | TP_ARGS(hrtimer), |
240 | 239 | ||
241 | TP_STRUCT__entry( | 240 | TP_STRUCT__entry( |
242 | __field( void *, timer ) | 241 | __field( void *, hrtimer ) |
243 | ), | 242 | ), |
244 | 243 | ||
245 | TP_fast_assign( | 244 | TP_fast_assign( |
246 | __entry->timer = timer; | 245 | __entry->hrtimer = hrtimer; |
247 | ), | 246 | ), |
248 | 247 | ||
249 | TP_printk("hrtimer %p", __entry->timer) | 248 | TP_printk("hrtimer=%p", __entry->hrtimer) |
250 | ); | 249 | ); |
251 | 250 | ||
252 | /** | 251 | /** |
253 | * hrtimer_cancel - called when the hrtimer is canceled | 252 | * hrtimer_cancel - called when the hrtimer is canceled |
254 | * @timer: pointer to struct hrtimer | 253 | * @hrtimer: pointer to struct hrtimer |
255 | */ | 254 | */ |
256 | TRACE_EVENT(hrtimer_cancel, | 255 | TRACE_EVENT(hrtimer_cancel, |
257 | 256 | ||
258 | TP_PROTO(struct hrtimer *timer), | 257 | TP_PROTO(struct hrtimer *hrtimer), |
259 | 258 | ||
260 | TP_ARGS(timer), | 259 | TP_ARGS(hrtimer), |
261 | 260 | ||
262 | TP_STRUCT__entry( | 261 | TP_STRUCT__entry( |
263 | __field( void *, timer ) | 262 | __field( void *, hrtimer ) |
264 | ), | 263 | ), |
265 | 264 | ||
266 | TP_fast_assign( | 265 | TP_fast_assign( |
267 | __entry->timer = timer; | 266 | __entry->hrtimer = hrtimer; |
268 | ), | 267 | ), |
269 | 268 | ||
270 | TP_printk("hrtimer %p", __entry->timer) | 269 | TP_printk("hrtimer=%p", __entry->hrtimer) |
271 | ); | 270 | ); |
272 | 271 | ||
273 | /** | 272 | /** |
@@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state, | |||
302 | __entry->interval_usec = value->it_interval.tv_usec; | 301 | __entry->interval_usec = value->it_interval.tv_usec; |
303 | ), | 302 | ), |
304 | 303 | ||
305 | TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", | 304 | TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu", |
306 | __entry->which, __entry->expires, | 305 | __entry->which, __entry->expires, |
307 | __entry->value_sec, __entry->value_usec, | 306 | __entry->value_sec, __entry->value_usec, |
308 | __entry->interval_sec, __entry->interval_usec) | 307 | __entry->interval_sec, __entry->interval_usec) |
@@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire, | |||
332 | __entry->pid = pid_nr(pid); | 331 | __entry->pid = pid_nr(pid); |
333 | ), | 332 | ), |
334 | 333 | ||
335 | TP_printk("which %d, pid %d, now %lu", __entry->which, | 334 | TP_printk("which=%d pid=%d now=%lu", __entry->which, |
336 | (int) __entry->pid, __entry->now) | 335 | (int) __entry->pid, __entry->now) |
337 | ); | 336 | ); |
338 | 337 | ||
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index e4612dbd7ba6..d6c974474e70 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | 10 | ||
11 | TRACE_EVENT(workqueue_insertion, | 11 | DECLARE_EVENT_CLASS(workqueue, |
12 | 12 | ||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | 13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), |
14 | 14 | ||
@@ -30,26 +30,18 @@ TRACE_EVENT(workqueue_insertion, | |||
30 | __entry->thread_pid, __entry->func) | 30 | __entry->thread_pid, __entry->func) |
31 | ); | 31 | ); |
32 | 32 | ||
33 | TRACE_EVENT(workqueue_execution, | 33 | DEFINE_EVENT(workqueue, workqueue_insertion, |
34 | 34 | ||
35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | 35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), |
36 | 36 | ||
37 | TP_ARGS(wq_thread, work), | 37 | TP_ARGS(wq_thread, work) |
38 | ); | ||
38 | 39 | ||
39 | TP_STRUCT__entry( | 40 | DEFINE_EVENT(workqueue, workqueue_execution, |
40 | __array(char, thread_comm, TASK_COMM_LEN) | ||
41 | __field(pid_t, thread_pid) | ||
42 | __field(work_func_t, func) | ||
43 | ), | ||
44 | 41 | ||
45 | TP_fast_assign( | 42 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), |
46 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
47 | __entry->thread_pid = wq_thread->pid; | ||
48 | __entry->func = work->func; | ||
49 | ), | ||
50 | 43 | ||
51 | TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, | 44 | TP_ARGS(wq_thread, work) |
52 | __entry->thread_pid, __entry->func) | ||
53 | ); | 45 | ); |
54 | 46 | ||
55 | /* Trace the creation of one workqueue thread on a cpu */ | 47 | /* Trace the creation of one workqueue thread on a cpu */ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index cc0d9667e182..d1b3de9c1a71 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -18,6 +18,26 @@ | |||
18 | 18 | ||
19 | #include <linux/ftrace_event.h> | 19 | #include <linux/ftrace_event.h> |
20 | 20 | ||
21 | /* | ||
22 | * DECLARE_EVENT_CLASS can be used to add a generic function | ||
23 | * handlers for events. That is, if all events have the same | ||
24 | * parameters and just have distinct trace points. | ||
25 | * Each tracepoint can be defined with DEFINE_EVENT and that | ||
26 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | ||
27 | * | ||
28 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | ||
29 | */ | ||
30 | #undef TRACE_EVENT | ||
31 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
32 | DECLARE_EVENT_CLASS(name, \ | ||
33 | PARAMS(proto), \ | ||
34 | PARAMS(args), \ | ||
35 | PARAMS(tstruct), \ | ||
36 | PARAMS(assign), \ | ||
37 | PARAMS(print)); \ | ||
38 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); | ||
39 | |||
40 | |||
21 | #undef __field | 41 | #undef __field |
22 | #define __field(type, item) type item; | 42 | #define __field(type, item) type item; |
23 | 43 | ||
@@ -36,15 +56,21 @@ | |||
36 | #undef TP_STRUCT__entry | 56 | #undef TP_STRUCT__entry |
37 | #define TP_STRUCT__entry(args...) args | 57 | #define TP_STRUCT__entry(args...) args |
38 | 58 | ||
39 | #undef TRACE_EVENT | 59 | #undef DECLARE_EVENT_CLASS |
40 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | 60 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ |
41 | struct ftrace_raw_##name { \ | 61 | struct ftrace_raw_##name { \ |
42 | struct trace_entry ent; \ | 62 | struct trace_entry ent; \ |
43 | tstruct \ | 63 | tstruct \ |
44 | char __data[0]; \ | 64 | char __data[0]; \ |
45 | }; \ | 65 | }; |
66 | #undef DEFINE_EVENT | ||
67 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
46 | static struct ftrace_event_call event_##name | 68 | static struct ftrace_event_call event_##name |
47 | 69 | ||
70 | #undef DEFINE_EVENT_PRINT | ||
71 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
72 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
73 | |||
48 | #undef __cpparg | 74 | #undef __cpparg |
49 | #define __cpparg(arg...) arg | 75 | #define __cpparg(arg...) arg |
50 | 76 | ||
@@ -89,12 +115,19 @@ | |||
89 | #undef __string | 115 | #undef __string |
90 | #define __string(item, src) __dynamic_array(char, item, -1) | 116 | #define __string(item, src) __dynamic_array(char, item, -1) |
91 | 117 | ||
92 | #undef TRACE_EVENT | 118 | #undef DECLARE_EVENT_CLASS |
93 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 119 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
94 | struct ftrace_data_offsets_##call { \ | 120 | struct ftrace_data_offsets_##call { \ |
95 | tstruct; \ | 121 | tstruct; \ |
96 | }; | 122 | }; |
97 | 123 | ||
124 | #undef DEFINE_EVENT | ||
125 | #define DEFINE_EVENT(template, name, proto, args) | ||
126 | |||
127 | #undef DEFINE_EVENT_PRINT | ||
128 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
129 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
130 | |||
98 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 131 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
99 | 132 | ||
100 | /* | 133 | /* |
@@ -120,9 +153,10 @@ | |||
120 | #undef __field | 153 | #undef __field |
121 | #define __field(type, item) \ | 154 | #define __field(type, item) \ |
122 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 155 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
123 | "offset:%u;\tsize:%u;\n", \ | 156 | "offset:%u;\tsize:%u;\tsigned:%u;\n", \ |
124 | (unsigned int)offsetof(typeof(field), item), \ | 157 | (unsigned int)offsetof(typeof(field), item), \ |
125 | (unsigned int)sizeof(field.item)); \ | 158 | (unsigned int)sizeof(field.item), \ |
159 | (unsigned int)is_signed_type(type)); \ | ||
126 | if (!ret) \ | 160 | if (!ret) \ |
127 | return 0; | 161 | return 0; |
128 | 162 | ||
@@ -132,19 +166,21 @@ | |||
132 | #undef __array | 166 | #undef __array |
133 | #define __array(type, item, len) \ | 167 | #define __array(type, item, len) \ |
134 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 168 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
135 | "offset:%u;\tsize:%u;\n", \ | 169 | "offset:%u;\tsize:%u;\tsigned:%u;\n", \ |
136 | (unsigned int)offsetof(typeof(field), item), \ | 170 | (unsigned int)offsetof(typeof(field), item), \ |
137 | (unsigned int)sizeof(field.item)); \ | 171 | (unsigned int)sizeof(field.item), \ |
172 | (unsigned int)is_signed_type(type)); \ | ||
138 | if (!ret) \ | 173 | if (!ret) \ |
139 | return 0; | 174 | return 0; |
140 | 175 | ||
141 | #undef __dynamic_array | 176 | #undef __dynamic_array |
142 | #define __dynamic_array(type, item, len) \ | 177 | #define __dynamic_array(type, item, len) \ |
143 | ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ | 178 | ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ |
144 | "offset:%u;\tsize:%u;\n", \ | 179 | "offset:%u;\tsize:%u;\tsigned:%u;\n", \ |
145 | (unsigned int)offsetof(typeof(field), \ | 180 | (unsigned int)offsetof(typeof(field), \ |
146 | __data_loc_##item), \ | 181 | __data_loc_##item), \ |
147 | (unsigned int)sizeof(field.__data_loc_##item)); \ | 182 | (unsigned int)sizeof(field.__data_loc_##item), \ |
183 | (unsigned int)is_signed_type(type)); \ | ||
148 | if (!ret) \ | 184 | if (!ret) \ |
149 | return 0; | 185 | return 0; |
150 | 186 | ||
@@ -159,7 +195,7 @@ | |||
159 | #undef __get_str | 195 | #undef __get_str |
160 | 196 | ||
161 | #undef TP_printk | 197 | #undef TP_printk |
162 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | 198 | #define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args) |
163 | 199 | ||
164 | #undef TP_fast_assign | 200 | #undef TP_fast_assign |
165 | #define TP_fast_assign(args...) args | 201 | #define TP_fast_assign(args...) args |
@@ -167,17 +203,50 @@ | |||
167 | #undef TP_perf_assign | 203 | #undef TP_perf_assign |
168 | #define TP_perf_assign(args...) | 204 | #define TP_perf_assign(args...) |
169 | 205 | ||
170 | #undef TRACE_EVENT | 206 | #undef DECLARE_EVENT_CLASS |
171 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 207 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ |
172 | static int \ | 208 | static int \ |
173 | ftrace_format_##call(struct ftrace_event_call *unused, \ | 209 | ftrace_format_setup_##call(struct ftrace_event_call *unused, \ |
174 | struct trace_seq *s) \ | 210 | struct trace_seq *s) \ |
175 | { \ | 211 | { \ |
176 | struct ftrace_raw_##call field __attribute__((unused)); \ | 212 | struct ftrace_raw_##call field __attribute__((unused)); \ |
177 | int ret = 0; \ | 213 | int ret = 0; \ |
178 | \ | 214 | \ |
179 | tstruct; \ | 215 | tstruct; \ |
180 | \ | 216 | \ |
217 | return ret; \ | ||
218 | } \ | ||
219 | \ | ||
220 | static int \ | ||
221 | ftrace_format_##call(struct ftrace_event_call *unused, \ | ||
222 | struct trace_seq *s) \ | ||
223 | { \ | ||
224 | int ret = 0; \ | ||
225 | \ | ||
226 | ret = ftrace_format_setup_##call(unused, s); \ | ||
227 | if (!ret) \ | ||
228 | return ret; \ | ||
229 | \ | ||
230 | ret = trace_seq_printf(s, "\nprint fmt: " print); \ | ||
231 | \ | ||
232 | return ret; \ | ||
233 | } | ||
234 | |||
235 | #undef DEFINE_EVENT | ||
236 | #define DEFINE_EVENT(template, name, proto, args) | ||
237 | |||
238 | #undef DEFINE_EVENT_PRINT | ||
239 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
240 | static int \ | ||
241 | ftrace_format_##name(struct ftrace_event_call *unused, \ | ||
242 | struct trace_seq *s) \ | ||
243 | { \ | ||
244 | int ret = 0; \ | ||
245 | \ | ||
246 | ret = ftrace_format_setup_##template(unused, s); \ | ||
247 | if (!ret) \ | ||
248 | return ret; \ | ||
249 | \ | ||
181 | trace_seq_printf(s, "\nprint fmt: " print); \ | 250 | trace_seq_printf(s, "\nprint fmt: " print); \ |
182 | \ | 251 | \ |
183 | return ret; \ | 252 | return ret; \ |
@@ -252,10 +321,11 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ | |||
252 | ftrace_print_symbols_seq(p, value, symbols); \ | 321 | ftrace_print_symbols_seq(p, value, symbols); \ |
253 | }) | 322 | }) |
254 | 323 | ||
255 | #undef TRACE_EVENT | 324 | #undef DECLARE_EVENT_CLASS |
256 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 325 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
257 | static enum print_line_t \ | 326 | static enum print_line_t \ |
258 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 327 | ftrace_raw_output_id_##call(int event_id, const char *name, \ |
328 | struct trace_iterator *iter, int flags) \ | ||
259 | { \ | 329 | { \ |
260 | struct trace_seq *s = &iter->seq; \ | 330 | struct trace_seq *s = &iter->seq; \ |
261 | struct ftrace_raw_##call *field; \ | 331 | struct ftrace_raw_##call *field; \ |
@@ -265,6 +335,47 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
265 | \ | 335 | \ |
266 | entry = iter->ent; \ | 336 | entry = iter->ent; \ |
267 | \ | 337 | \ |
338 | if (entry->type != event_id) { \ | ||
339 | WARN_ON_ONCE(1); \ | ||
340 | return TRACE_TYPE_UNHANDLED; \ | ||
341 | } \ | ||
342 | \ | ||
343 | field = (typeof(field))entry; \ | ||
344 | \ | ||
345 | p = &get_cpu_var(ftrace_event_seq); \ | ||
346 | trace_seq_init(p); \ | ||
347 | ret = trace_seq_printf(s, "%s: ", name); \ | ||
348 | if (ret) \ | ||
349 | ret = trace_seq_printf(s, print); \ | ||
350 | put_cpu(); \ | ||
351 | if (!ret) \ | ||
352 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
353 | \ | ||
354 | return TRACE_TYPE_HANDLED; \ | ||
355 | } | ||
356 | |||
357 | #undef DEFINE_EVENT | ||
358 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
359 | static enum print_line_t \ | ||
360 | ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ | ||
361 | { \ | ||
362 | return ftrace_raw_output_id_##template(event_##name.id, \ | ||
363 | #name, iter, flags); \ | ||
364 | } | ||
365 | |||
366 | #undef DEFINE_EVENT_PRINT | ||
367 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | ||
368 | static enum print_line_t \ | ||
369 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
370 | { \ | ||
371 | struct trace_seq *s = &iter->seq; \ | ||
372 | struct ftrace_raw_##template *field; \ | ||
373 | struct trace_entry *entry; \ | ||
374 | struct trace_seq *p; \ | ||
375 | int ret; \ | ||
376 | \ | ||
377 | entry = iter->ent; \ | ||
378 | \ | ||
268 | if (entry->type != event_##call.id) { \ | 379 | if (entry->type != event_##call.id) { \ |
269 | WARN_ON_ONCE(1); \ | 380 | WARN_ON_ONCE(1); \ |
270 | return TRACE_TYPE_UNHANDLED; \ | 381 | return TRACE_TYPE_UNHANDLED; \ |
@@ -274,14 +385,16 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
274 | \ | 385 | \ |
275 | p = &get_cpu_var(ftrace_event_seq); \ | 386 | p = &get_cpu_var(ftrace_event_seq); \ |
276 | trace_seq_init(p); \ | 387 | trace_seq_init(p); \ |
277 | ret = trace_seq_printf(s, #call ": " print); \ | 388 | ret = trace_seq_printf(s, "%s: ", #call); \ |
389 | if (ret) \ | ||
390 | ret = trace_seq_printf(s, print); \ | ||
278 | put_cpu(); \ | 391 | put_cpu(); \ |
279 | if (!ret) \ | 392 | if (!ret) \ |
280 | return TRACE_TYPE_PARTIAL_LINE; \ | 393 | return TRACE_TYPE_PARTIAL_LINE; \ |
281 | \ | 394 | \ |
282 | return TRACE_TYPE_HANDLED; \ | 395 | return TRACE_TYPE_HANDLED; \ |
283 | } | 396 | } |
284 | 397 | ||
285 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 398 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
286 | 399 | ||
287 | #undef __field_ext | 400 | #undef __field_ext |
@@ -315,8 +428,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
315 | #undef __string | 428 | #undef __string |
316 | #define __string(item, src) __dynamic_array(char, item, -1) | 429 | #define __string(item, src) __dynamic_array(char, item, -1) |
317 | 430 | ||
318 | #undef TRACE_EVENT | 431 | #undef DECLARE_EVENT_CLASS |
319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 432 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ |
320 | static int \ | 433 | static int \ |
321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | 434 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
322 | { \ | 435 | { \ |
@@ -332,6 +445,13 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |||
332 | return ret; \ | 445 | return ret; \ |
333 | } | 446 | } |
334 | 447 | ||
448 | #undef DEFINE_EVENT | ||
449 | #define DEFINE_EVENT(template, name, proto, args) | ||
450 | |||
451 | #undef DEFINE_EVENT_PRINT | ||
452 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
453 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
454 | |||
335 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 455 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
336 | 456 | ||
337 | /* | 457 | /* |
@@ -358,10 +478,10 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |||
358 | __data_size += (len) * sizeof(type); | 478 | __data_size += (len) * sizeof(type); |
359 | 479 | ||
360 | #undef __string | 480 | #undef __string |
361 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ | 481 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) |
362 | 482 | ||
363 | #undef TRACE_EVENT | 483 | #undef DECLARE_EVENT_CLASS |
364 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 484 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
365 | static inline int ftrace_get_offsets_##call( \ | 485 | static inline int ftrace_get_offsets_##call( \ |
366 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | 486 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ |
367 | { \ | 487 | { \ |
@@ -373,6 +493,13 @@ static inline int ftrace_get_offsets_##call( \ | |||
373 | return __data_size; \ | 493 | return __data_size; \ |
374 | } | 494 | } |
375 | 495 | ||
496 | #undef DEFINE_EVENT | ||
497 | #define DEFINE_EVENT(template, name, proto, args) | ||
498 | |||
499 | #undef DEFINE_EVENT_PRINT | ||
500 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
501 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
502 | |||
376 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 503 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
377 | 504 | ||
378 | #ifdef CONFIG_EVENT_PROFILE | 505 | #ifdef CONFIG_EVENT_PROFILE |
@@ -394,21 +521,28 @@ static inline int ftrace_get_offsets_##call( \ | |||
394 | * | 521 | * |
395 | */ | 522 | */ |
396 | 523 | ||
397 | #undef TRACE_EVENT | 524 | #undef DECLARE_EVENT_CLASS |
398 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 525 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) |
526 | |||
527 | #undef DEFINE_EVENT | ||
528 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
399 | \ | 529 | \ |
400 | static void ftrace_profile_##call(proto); \ | 530 | static void ftrace_profile_##name(proto); \ |
401 | \ | 531 | \ |
402 | static int ftrace_profile_enable_##call(void) \ | 532 | static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\ |
403 | { \ | 533 | { \ |
404 | return register_trace_##call(ftrace_profile_##call); \ | 534 | return register_trace_##name(ftrace_profile_##name); \ |
405 | } \ | 535 | } \ |
406 | \ | 536 | \ |
407 | static void ftrace_profile_disable_##call(void) \ | 537 | static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ |
408 | { \ | 538 | { \ |
409 | unregister_trace_##call(ftrace_profile_##call); \ | 539 | unregister_trace_##name(ftrace_profile_##name); \ |
410 | } | 540 | } |
411 | 541 | ||
542 | #undef DEFINE_EVENT_PRINT | ||
543 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
544 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
545 | |||
412 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 546 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
413 | 547 | ||
414 | #endif | 548 | #endif |
@@ -423,7 +557,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
423 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | 557 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); |
424 | * } | 558 | * } |
425 | * | 559 | * |
426 | * static int ftrace_reg_event_<call>(void) | 560 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) |
427 | * { | 561 | * { |
428 | * int ret; | 562 | * int ret; |
429 | * | 563 | * |
@@ -434,7 +568,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
434 | * return ret; | 568 | * return ret; |
435 | * } | 569 | * } |
436 | * | 570 | * |
437 | * static void ftrace_unreg_event_<call>(void) | 571 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
438 | * { | 572 | * { |
439 | * unregister_trace_<call>(ftrace_event_<call>); | 573 | * unregister_trace_<call>(ftrace_event_<call>); |
440 | * } | 574 | * } |
@@ -469,7 +603,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
469 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 603 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
470 | * } | 604 | * } |
471 | * | 605 | * |
472 | * static int ftrace_raw_reg_event_<call>(void) | 606 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) |
473 | * { | 607 | * { |
474 | * int ret; | 608 | * int ret; |
475 | * | 609 | * |
@@ -480,7 +614,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
480 | * return ret; | 614 | * return ret; |
481 | * } | 615 | * } |
482 | * | 616 | * |
483 | * static void ftrace_unreg_event_<call>(void) | 617 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
484 | * { | 618 | * { |
485 | * unregister_trace_<call>(ftrace_raw_event_<call>); | 619 | * unregister_trace_<call>(ftrace_raw_event_<call>); |
486 | * } | 620 | * } |
@@ -489,7 +623,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
489 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 623 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
490 | * }; | 624 | * }; |
491 | * | 625 | * |
492 | * static int ftrace_raw_init_event_<call>(void) | 626 | * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused) |
493 | * { | 627 | * { |
494 | * int id; | 628 | * int id; |
495 | * | 629 | * |
@@ -547,15 +681,13 @@ static void ftrace_profile_disable_##call(void) \ | |||
547 | #define __assign_str(dst, src) \ | 681 | #define __assign_str(dst, src) \ |
548 | strcpy(__get_str(dst), src); | 682 | strcpy(__get_str(dst), src); |
549 | 683 | ||
550 | #undef TRACE_EVENT | 684 | #undef DECLARE_EVENT_CLASS |
551 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 685 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
552 | \ | 686 | \ |
553 | static struct ftrace_event_call event_##call; \ | 687 | static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ |
554 | \ | 688 | proto) \ |
555 | static void ftrace_raw_event_##call(proto) \ | ||
556 | { \ | 689 | { \ |
557 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 690 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
558 | struct ftrace_event_call *event_call = &event_##call; \ | ||
559 | struct ring_buffer_event *event; \ | 691 | struct ring_buffer_event *event; \ |
560 | struct ftrace_raw_##call *entry; \ | 692 | struct ftrace_raw_##call *entry; \ |
561 | struct ring_buffer *buffer; \ | 693 | struct ring_buffer *buffer; \ |
@@ -569,7 +701,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
569 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 701 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
570 | \ | 702 | \ |
571 | event = trace_current_buffer_lock_reserve(&buffer, \ | 703 | event = trace_current_buffer_lock_reserve(&buffer, \ |
572 | event_##call.id, \ | 704 | event_call->id, \ |
573 | sizeof(*entry) + __data_size, \ | 705 | sizeof(*entry) + __data_size, \ |
574 | irq_flags, pc); \ | 706 | irq_flags, pc); \ |
575 | if (!event) \ | 707 | if (!event) \ |
@@ -584,9 +716,17 @@ static void ftrace_raw_event_##call(proto) \ | |||
584 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | 716 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
585 | trace_nowake_buffer_unlock_commit(buffer, \ | 717 | trace_nowake_buffer_unlock_commit(buffer, \ |
586 | event, irq_flags, pc); \ | 718 | event, irq_flags, pc); \ |
719 | } | ||
720 | |||
721 | #undef DEFINE_EVENT | ||
722 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
723 | \ | ||
724 | static void ftrace_raw_event_##call(proto) \ | ||
725 | { \ | ||
726 | ftrace_raw_event_id_##template(&event_##call, args); \ | ||
587 | } \ | 727 | } \ |
588 | \ | 728 | \ |
589 | static int ftrace_raw_reg_event_##call(void *ptr) \ | 729 | static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ |
590 | { \ | 730 | { \ |
591 | int ret; \ | 731 | int ret; \ |
592 | \ | 732 | \ |
@@ -597,7 +737,7 @@ static int ftrace_raw_reg_event_##call(void *ptr) \ | |||
597 | return ret; \ | 737 | return ret; \ |
598 | } \ | 738 | } \ |
599 | \ | 739 | \ |
600 | static void ftrace_raw_unreg_event_##call(void *ptr) \ | 740 | static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ |
601 | { \ | 741 | { \ |
602 | unregister_trace_##call(ftrace_raw_event_##call); \ | 742 | unregister_trace_##call(ftrace_raw_event_##call); \ |
603 | } \ | 743 | } \ |
@@ -606,7 +746,7 @@ static struct trace_event ftrace_event_type_##call = { \ | |||
606 | .trace = ftrace_raw_output_##call, \ | 746 | .trace = ftrace_raw_output_##call, \ |
607 | }; \ | 747 | }; \ |
608 | \ | 748 | \ |
609 | static int ftrace_raw_init_event_##call(void) \ | 749 | static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ |
610 | { \ | 750 | { \ |
611 | int id; \ | 751 | int id; \ |
612 | \ | 752 | \ |
@@ -616,7 +756,36 @@ static int ftrace_raw_init_event_##call(void) \ | |||
616 | event_##call.id = id; \ | 756 | event_##call.id = id; \ |
617 | INIT_LIST_HEAD(&event_##call.fields); \ | 757 | INIT_LIST_HEAD(&event_##call.fields); \ |
618 | return 0; \ | 758 | return 0; \ |
619 | } \ | 759 | } |
760 | |||
761 | #undef DEFINE_EVENT_PRINT | ||
762 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
763 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
764 | |||
765 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
766 | |||
767 | #undef DECLARE_EVENT_CLASS | ||
768 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) | ||
769 | |||
770 | #undef DEFINE_EVENT | ||
771 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
772 | \ | ||
773 | static struct ftrace_event_call __used \ | ||
774 | __attribute__((__aligned__(4))) \ | ||
775 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
776 | .name = #call, \ | ||
777 | .system = __stringify(TRACE_SYSTEM), \ | ||
778 | .event = &ftrace_event_type_##call, \ | ||
779 | .raw_init = ftrace_raw_init_event_##call, \ | ||
780 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
781 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
782 | .show_format = ftrace_format_##template, \ | ||
783 | .define_fields = ftrace_define_fields_##template, \ | ||
784 | _TRACE_PROFILE_INIT(call) \ | ||
785 | } | ||
786 | |||
787 | #undef DEFINE_EVENT_PRINT | ||
788 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | ||
620 | \ | 789 | \ |
621 | static struct ftrace_event_call __used \ | 790 | static struct ftrace_event_call __used \ |
622 | __attribute__((__aligned__(4))) \ | 791 | __attribute__((__aligned__(4))) \ |
@@ -628,7 +797,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
628 | .regfunc = ftrace_raw_reg_event_##call, \ | 797 | .regfunc = ftrace_raw_reg_event_##call, \ |
629 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 798 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
630 | .show_format = ftrace_format_##call, \ | 799 | .show_format = ftrace_format_##call, \ |
631 | .define_fields = ftrace_define_fields_##call, \ | 800 | .define_fields = ftrace_define_fields_##template, \ |
632 | _TRACE_PROFILE_INIT(call) \ | 801 | _TRACE_PROFILE_INIT(call) \ |
633 | } | 802 | } |
634 | 803 | ||
@@ -646,6 +815,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
646 | * struct ftrace_event_call *event_call = &event_<call>; | 815 | * struct ftrace_event_call *event_call = &event_<call>; |
647 | * extern void perf_tp_event(int, u64, u64, void *, int); | 816 | * extern void perf_tp_event(int, u64, u64, void *, int); |
648 | * struct ftrace_raw_##call *entry; | 817 | * struct ftrace_raw_##call *entry; |
818 | * struct perf_trace_buf *trace_buf; | ||
649 | * u64 __addr = 0, __count = 1; | 819 | * u64 __addr = 0, __count = 1; |
650 | * unsigned long irq_flags; | 820 | * unsigned long irq_flags; |
651 | * struct trace_entry *ent; | 821 | * struct trace_entry *ent; |
@@ -670,14 +840,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
670 | * __cpu = smp_processor_id(); | 840 | * __cpu = smp_processor_id(); |
671 | * | 841 | * |
672 | * if (in_nmi()) | 842 | * if (in_nmi()) |
673 | * raw_data = rcu_dereference(trace_profile_buf_nmi); | 843 | * trace_buf = rcu_dereference(perf_trace_buf_nmi); |
674 | * else | 844 | * else |
675 | * raw_data = rcu_dereference(trace_profile_buf); | 845 | * trace_buf = rcu_dereference(perf_trace_buf); |
676 | * | 846 | * |
677 | * if (!raw_data) | 847 | * if (!trace_buf) |
678 | * goto end; | 848 | * goto end; |
679 | * | 849 | * |
680 | * raw_data = per_cpu_ptr(raw_data, __cpu); | 850 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); |
851 | * | ||
852 | * // Avoid recursion from perf that could mess up the buffer | ||
853 | * if (trace_buf->recursion++) | ||
854 | * goto end_recursion; | ||
855 | * | ||
856 | * raw_data = trace_buf->buf; | ||
857 | * | ||
858 | * // Make recursion update visible before entering perf_tp_event | ||
859 | * // so that we protect from perf recursions. | ||
860 | * | ||
861 | * barrier(); | ||
681 | * | 862 | * |
682 | * //zero dead bytes from alignment to avoid stack leak to userspace: | 863 | * //zero dead bytes from alignment to avoid stack leak to userspace: |
683 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | 864 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; |
@@ -704,21 +885,26 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
704 | #undef __perf_count | 885 | #undef __perf_count |
705 | #define __perf_count(c) __count = (c) | 886 | #define __perf_count(c) __count = (c) |
706 | 887 | ||
707 | #undef TRACE_EVENT | 888 | #undef DECLARE_EVENT_CLASS |
708 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 889 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
709 | static void ftrace_profile_##call(proto) \ | 890 | static void \ |
891 | ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | ||
892 | proto) \ | ||
710 | { \ | 893 | { \ |
711 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 894 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
712 | struct ftrace_event_call *event_call = &event_##call; \ | 895 | extern int perf_swevent_get_recursion_context(void); \ |
713 | extern void perf_tp_event(int, u64, u64, void *, int); \ | 896 | extern void perf_swevent_put_recursion_context(int rctx); \ |
897 | extern void perf_tp_event(int, u64, u64, void *, int); \ | ||
714 | struct ftrace_raw_##call *entry; \ | 898 | struct ftrace_raw_##call *entry; \ |
715 | u64 __addr = 0, __count = 1; \ | 899 | u64 __addr = 0, __count = 1; \ |
716 | unsigned long irq_flags; \ | 900 | unsigned long irq_flags; \ |
717 | struct trace_entry *ent; \ | 901 | struct trace_entry *ent; \ |
718 | int __entry_size; \ | 902 | int __entry_size; \ |
719 | int __data_size; \ | 903 | int __data_size; \ |
904 | char *trace_buf; \ | ||
720 | char *raw_data; \ | 905 | char *raw_data; \ |
721 | int __cpu; \ | 906 | int __cpu; \ |
907 | int rctx; \ | ||
722 | int pc; \ | 908 | int pc; \ |
723 | \ | 909 | \ |
724 | pc = preempt_count(); \ | 910 | pc = preempt_count(); \ |
@@ -733,17 +919,22 @@ static void ftrace_profile_##call(proto) \ | |||
733 | return; \ | 919 | return; \ |
734 | \ | 920 | \ |
735 | local_irq_save(irq_flags); \ | 921 | local_irq_save(irq_flags); \ |
922 | \ | ||
923 | rctx = perf_swevent_get_recursion_context(); \ | ||
924 | if (rctx < 0) \ | ||
925 | goto end_recursion; \ | ||
926 | \ | ||
736 | __cpu = smp_processor_id(); \ | 927 | __cpu = smp_processor_id(); \ |
737 | \ | 928 | \ |
738 | if (in_nmi()) \ | 929 | if (in_nmi()) \ |
739 | raw_data = rcu_dereference(trace_profile_buf_nmi); \ | 930 | trace_buf = rcu_dereference(perf_trace_buf_nmi); \ |
740 | else \ | 931 | else \ |
741 | raw_data = rcu_dereference(trace_profile_buf); \ | 932 | trace_buf = rcu_dereference(perf_trace_buf); \ |
742 | \ | 933 | \ |
743 | if (!raw_data) \ | 934 | if (!trace_buf) \ |
744 | goto end; \ | 935 | goto end; \ |
745 | \ | 936 | \ |
746 | raw_data = per_cpu_ptr(raw_data, __cpu); \ | 937 | raw_data = per_cpu_ptr(trace_buf, __cpu); \ |
747 | \ | 938 | \ |
748 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | 939 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ |
749 | entry = (struct ftrace_raw_##call *)raw_data; \ | 940 | entry = (struct ftrace_raw_##call *)raw_data; \ |
@@ -759,10 +950,25 @@ static void ftrace_profile_##call(proto) \ | |||
759 | __entry_size); \ | 950 | __entry_size); \ |
760 | \ | 951 | \ |
761 | end: \ | 952 | end: \ |
953 | perf_swevent_put_recursion_context(rctx); \ | ||
954 | end_recursion: \ | ||
762 | local_irq_restore(irq_flags); \ | 955 | local_irq_restore(irq_flags); \ |
763 | \ | 956 | \ |
764 | } | 957 | } |
765 | 958 | ||
959 | #undef DEFINE_EVENT | ||
960 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
961 | static void ftrace_profile_##call(proto) \ | ||
962 | { \ | ||
963 | struct ftrace_event_call *event_call = &event_##call; \ | ||
964 | \ | ||
965 | ftrace_profile_templ_##template(event_call, args); \ | ||
966 | } | ||
967 | |||
968 | #undef DEFINE_EVENT_PRINT | ||
969 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
970 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
971 | |||
766 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 972 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
767 | #endif /* CONFIG_EVENT_PROFILE */ | 973 | #endif /* CONFIG_EVENT_PROFILE */ |
768 | 974 | ||
diff --git a/include/trace/power.h b/include/trace/power.h deleted file mode 100644 index ef204666e983..000000000000 --- a/include/trace/power.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | #ifndef _TRACE_POWER_H | ||
2 | #define _TRACE_POWER_H | ||
3 | |||
4 | #include <linux/ktime.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | enum { | ||
8 | POWER_NONE = 0, | ||
9 | POWER_CSTATE = 1, | ||
10 | POWER_PSTATE = 2, | ||
11 | }; | ||
12 | |||
13 | struct power_trace { | ||
14 | ktime_t stamp; | ||
15 | ktime_t end; | ||
16 | int type; | ||
17 | int state; | ||
18 | }; | ||
19 | |||
20 | DECLARE_TRACE(power_start, | ||
21 | TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), | ||
22 | TP_ARGS(it, type, state)); | ||
23 | |||
24 | DECLARE_TRACE(power_mark, | ||
25 | TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), | ||
26 | TP_ARGS(it, type, state)); | ||
27 | |||
28 | DECLARE_TRACE(power_end, | ||
29 | TP_PROTO(struct power_trace *it), | ||
30 | TP_ARGS(it)); | ||
31 | |||
32 | #endif /* _TRACE_POWER_H */ | ||
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 5dc283ba5ae0..961fda3556bb 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -12,51 +12,48 @@ | |||
12 | * A syscall entry in the ftrace syscalls array. | 12 | * A syscall entry in the ftrace syscalls array. |
13 | * | 13 | * |
14 | * @name: name of the syscall | 14 | * @name: name of the syscall |
15 | * @syscall_nr: number of the syscall | ||
15 | * @nb_args: number of parameters it takes | 16 | * @nb_args: number of parameters it takes |
16 | * @types: list of types as strings | 17 | * @types: list of types as strings |
17 | * @args: list of args as strings (args[i] matches types[i]) | 18 | * @args: list of args as strings (args[i] matches types[i]) |
18 | * @enter_id: associated ftrace enter event id | ||
19 | * @exit_id: associated ftrace exit event id | ||
20 | * @enter_event: associated syscall_enter trace event | 19 | * @enter_event: associated syscall_enter trace event |
21 | * @exit_event: associated syscall_exit trace event | 20 | * @exit_event: associated syscall_exit trace event |
22 | */ | 21 | */ |
23 | struct syscall_metadata { | 22 | struct syscall_metadata { |
24 | const char *name; | 23 | const char *name; |
24 | int syscall_nr; | ||
25 | int nb_args; | 25 | int nb_args; |
26 | const char **types; | 26 | const char **types; |
27 | const char **args; | 27 | const char **args; |
28 | int enter_id; | ||
29 | int exit_id; | ||
30 | 28 | ||
31 | struct ftrace_event_call *enter_event; | 29 | struct ftrace_event_call *enter_event; |
32 | struct ftrace_event_call *exit_event; | 30 | struct ftrace_event_call *exit_event; |
33 | }; | 31 | }; |
34 | 32 | ||
35 | #ifdef CONFIG_FTRACE_SYSCALLS | 33 | #ifdef CONFIG_FTRACE_SYSCALLS |
36 | extern struct syscall_metadata *syscall_nr_to_meta(int nr); | 34 | extern unsigned long arch_syscall_addr(int nr); |
37 | extern int syscall_name_to_nr(char *name); | 35 | extern int init_syscall_trace(struct ftrace_event_call *call); |
38 | void set_syscall_enter_id(int num, int id); | 36 | |
39 | void set_syscall_exit_id(int num, int id); | ||
40 | extern struct trace_event event_syscall_enter; | ||
41 | extern struct trace_event event_syscall_exit; | ||
42 | extern int reg_event_syscall_enter(void *ptr); | ||
43 | extern void unreg_event_syscall_enter(void *ptr); | ||
44 | extern int reg_event_syscall_exit(void *ptr); | ||
45 | extern void unreg_event_syscall_exit(void *ptr); | ||
46 | extern int syscall_enter_format(struct ftrace_event_call *call, | 37 | extern int syscall_enter_format(struct ftrace_event_call *call, |
47 | struct trace_seq *s); | 38 | struct trace_seq *s); |
48 | extern int syscall_exit_format(struct ftrace_event_call *call, | 39 | extern int syscall_exit_format(struct ftrace_event_call *call, |
49 | struct trace_seq *s); | 40 | struct trace_seq *s); |
50 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); | 41 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); |
51 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); | 42 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); |
43 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); | ||
44 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); | ||
45 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); | ||
46 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); | ||
47 | extern int | ||
48 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); | ||
52 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); | 49 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); |
53 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | 50 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); |
54 | #endif | 51 | #endif |
55 | #ifdef CONFIG_EVENT_PROFILE | 52 | #ifdef CONFIG_EVENT_PROFILE |
56 | int reg_prof_syscall_enter(char *name); | 53 | int prof_sysenter_enable(struct ftrace_event_call *call); |
57 | void unreg_prof_syscall_enter(char *name); | 54 | void prof_sysenter_disable(struct ftrace_event_call *call); |
58 | int reg_prof_syscall_exit(char *name); | 55 | int prof_sysexit_enable(struct ftrace_event_call *call); |
59 | void unreg_prof_syscall_exit(char *name); | 56 | void prof_sysexit_disable(struct ftrace_event_call *call); |
60 | 57 | ||
61 | #endif | 58 | #endif |
62 | 59 | ||
diff --git a/init/Kconfig b/init/Kconfig index ab5c64801fe5..9ee778294756 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -334,6 +334,15 @@ config TREE_PREEMPT_RCU | |||
334 | is also required. It also scales down nicely to | 334 | is also required. It also scales down nicely to |
335 | smaller systems. | 335 | smaller systems. |
336 | 336 | ||
337 | config TINY_RCU | ||
338 | bool "UP-only small-memory-footprint RCU" | ||
339 | depends on !SMP | ||
340 | help | ||
341 | This option selects the RCU implementation that is | ||
342 | designed for UP systems from which real-time response | ||
343 | is not required. This option greatly reduces the | ||
344 | memory footprint of RCU. | ||
345 | |||
337 | endchoice | 346 | endchoice |
338 | 347 | ||
339 | config RCU_TRACE | 348 | config RCU_TRACE |
@@ -606,7 +615,7 @@ config SYSFS_DEPRECATED | |||
606 | bool | 615 | bool |
607 | 616 | ||
608 | config SYSFS_DEPRECATED_V2 | 617 | config SYSFS_DEPRECATED_V2 |
609 | bool "remove sysfs features which may confuse old userspace tools" | 618 | bool "enable deprecated sysfs features which may confuse old userspace tools" |
610 | depends on SYSFS | 619 | depends on SYSFS |
611 | default n | 620 | default n |
612 | select SYSFS_DEPRECATED | 621 | select SYSFS_DEPRECATED |
@@ -1098,12 +1107,12 @@ config SLOW_WORK | |||
1098 | 1107 | ||
1099 | See Documentation/slow-work.txt. | 1108 | See Documentation/slow-work.txt. |
1100 | 1109 | ||
1101 | config SLOW_WORK_PROC | 1110 | config SLOW_WORK_DEBUG |
1102 | bool "Slow work debugging through /proc" | 1111 | bool "Slow work debugging through debugfs" |
1103 | default n | 1112 | default n |
1104 | depends on SLOW_WORK && PROC_FS | 1113 | depends on SLOW_WORK && DEBUG_FS |
1105 | help | 1114 | help |
1106 | Display the contents of the slow work run queue through /proc, | 1115 | Display the contents of the slow work run queue through debugfs, |
1107 | including items currently executing. | 1116 | including items currently executing. |
1108 | 1117 | ||
1109 | See Documentation/slow-work.txt. | 1118 | See Documentation/slow-work.txt. |
@@ -1220,3 +1229,4 @@ source "block/Kconfig" | |||
1220 | config PREEMPT_NOTIFIERS | 1229 | config PREEMPT_NOTIFIERS |
1221 | bool | 1230 | bool |
1222 | 1231 | ||
1232 | source "kernel/Kconfig.locks" | ||
diff --git a/init/main.c b/init/main.c index 5988debfc505..4051d75dd2d6 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -251,7 +251,7 @@ early_param("loglevel", loglevel); | |||
251 | 251 | ||
252 | /* | 252 | /* |
253 | * Unknown boot options get handed to init, unless they look like | 253 | * Unknown boot options get handed to init, unless they look like |
254 | * failed parameters | 254 | * unused parameters (modprobe will find them in /proc/cmdline). |
255 | */ | 255 | */ |
256 | static int __init unknown_bootoption(char *param, char *val) | 256 | static int __init unknown_bootoption(char *param, char *val) |
257 | { | 257 | { |
@@ -272,14 +272,9 @@ static int __init unknown_bootoption(char *param, char *val) | |||
272 | if (obsolete_checksetup(param)) | 272 | if (obsolete_checksetup(param)) |
273 | return 0; | 273 | return 0; |
274 | 274 | ||
275 | /* | 275 | /* Unused module parameter. */ |
276 | * Preemptive maintenance for "why didn't my misspelled command | 276 | if (strchr(param, '.') && (!val || strchr(param, '.') < val)) |
277 | * line work?" | ||
278 | */ | ||
279 | if (strchr(param, '.') && (!val || strchr(param, '.') < val)) { | ||
280 | printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param); | ||
281 | return 0; | 277 | return 0; |
282 | } | ||
283 | 278 | ||
284 | if (panic_later) | 279 | if (panic_later) |
285 | return 0; | 280 | return 0; |
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks new file mode 100644 index 000000000000..88c92fb44618 --- /dev/null +++ b/kernel/Kconfig.locks | |||
@@ -0,0 +1,202 @@ | |||
1 | # | ||
2 | # The ARCH_INLINE foo is necessary because select ignores "depends on" | ||
3 | # | ||
4 | config ARCH_INLINE_SPIN_TRYLOCK | ||
5 | bool | ||
6 | |||
7 | config ARCH_INLINE_SPIN_TRYLOCK_BH | ||
8 | bool | ||
9 | |||
10 | config ARCH_INLINE_SPIN_LOCK | ||
11 | bool | ||
12 | |||
13 | config ARCH_INLINE_SPIN_LOCK_BH | ||
14 | bool | ||
15 | |||
16 | config ARCH_INLINE_SPIN_LOCK_IRQ | ||
17 | bool | ||
18 | |||
19 | config ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
20 | bool | ||
21 | |||
22 | config ARCH_INLINE_SPIN_UNLOCK | ||
23 | bool | ||
24 | |||
25 | config ARCH_INLINE_SPIN_UNLOCK_BH | ||
26 | bool | ||
27 | |||
28 | config ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
29 | bool | ||
30 | |||
31 | config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
32 | bool | ||
33 | |||
34 | |||
35 | config ARCH_INLINE_READ_TRYLOCK | ||
36 | bool | ||
37 | |||
38 | config ARCH_INLINE_READ_LOCK | ||
39 | bool | ||
40 | |||
41 | config ARCH_INLINE_READ_LOCK_BH | ||
42 | bool | ||
43 | |||
44 | config ARCH_INLINE_READ_LOCK_IRQ | ||
45 | bool | ||
46 | |||
47 | config ARCH_INLINE_READ_LOCK_IRQSAVE | ||
48 | bool | ||
49 | |||
50 | config ARCH_INLINE_READ_UNLOCK | ||
51 | bool | ||
52 | |||
53 | config ARCH_INLINE_READ_UNLOCK_BH | ||
54 | bool | ||
55 | |||
56 | config ARCH_INLINE_READ_UNLOCK_IRQ | ||
57 | bool | ||
58 | |||
59 | config ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
60 | bool | ||
61 | |||
62 | |||
63 | config ARCH_INLINE_WRITE_TRYLOCK | ||
64 | bool | ||
65 | |||
66 | config ARCH_INLINE_WRITE_LOCK | ||
67 | bool | ||
68 | |||
69 | config ARCH_INLINE_WRITE_LOCK_BH | ||
70 | bool | ||
71 | |||
72 | config ARCH_INLINE_WRITE_LOCK_IRQ | ||
73 | bool | ||
74 | |||
75 | config ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
76 | bool | ||
77 | |||
78 | config ARCH_INLINE_WRITE_UNLOCK | ||
79 | bool | ||
80 | |||
81 | config ARCH_INLINE_WRITE_UNLOCK_BH | ||
82 | bool | ||
83 | |||
84 | config ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
85 | bool | ||
86 | |||
87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
88 | bool | ||
89 | |||
90 | # | ||
91 | # lock_* functions are inlined when: | ||
92 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y | ||
93 | # | ||
94 | # trylock_* functions are inlined when: | ||
95 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
96 | # | ||
97 | # unlock and unlock_irq functions are inlined when: | ||
98 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
99 | # or | ||
100 | # - DEBUG_SPINLOCK=n and PREEMPT=n | ||
101 | # | ||
102 | # unlock_bh and unlock_irqrestore functions are inlined when: | ||
103 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
104 | # | ||
105 | |||
106 | config INLINE_SPIN_TRYLOCK | ||
107 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK | ||
108 | |||
109 | config INLINE_SPIN_TRYLOCK_BH | ||
110 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH | ||
111 | |||
112 | config INLINE_SPIN_LOCK | ||
113 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | ||
114 | |||
115 | config INLINE_SPIN_LOCK_BH | ||
116 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
117 | ARCH_INLINE_SPIN_LOCK_BH | ||
118 | |||
119 | config INLINE_SPIN_LOCK_IRQ | ||
120 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
121 | ARCH_INLINE_SPIN_LOCK_IRQ | ||
122 | |||
123 | config INLINE_SPIN_LOCK_IRQSAVE | ||
124 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
125 | ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
126 | |||
127 | config INLINE_SPIN_UNLOCK | ||
128 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK) | ||
129 | |||
130 | config INLINE_SPIN_UNLOCK_BH | ||
131 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH | ||
132 | |||
133 | config INLINE_SPIN_UNLOCK_IRQ | ||
134 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) | ||
135 | |||
136 | config INLINE_SPIN_UNLOCK_IRQRESTORE | ||
137 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
138 | |||
139 | |||
140 | config INLINE_READ_TRYLOCK | ||
141 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK | ||
142 | |||
143 | config INLINE_READ_LOCK | ||
144 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | ||
145 | |||
146 | config INLINE_READ_LOCK_BH | ||
147 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
148 | ARCH_INLINE_READ_LOCK_BH | ||
149 | |||
150 | config INLINE_READ_LOCK_IRQ | ||
151 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
152 | ARCH_INLINE_READ_LOCK_IRQ | ||
153 | |||
154 | config INLINE_READ_LOCK_IRQSAVE | ||
155 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
156 | ARCH_INLINE_READ_LOCK_IRQSAVE | ||
157 | |||
158 | config INLINE_READ_UNLOCK | ||
159 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) | ||
160 | |||
161 | config INLINE_READ_UNLOCK_BH | ||
162 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH | ||
163 | |||
164 | config INLINE_READ_UNLOCK_IRQ | ||
165 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) | ||
166 | |||
167 | config INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
169 | |||
170 | |||
171 | config INLINE_WRITE_TRYLOCK | ||
172 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK | ||
173 | |||
174 | config INLINE_WRITE_LOCK | ||
175 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | ||
176 | |||
177 | config INLINE_WRITE_LOCK_BH | ||
178 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
179 | ARCH_INLINE_WRITE_LOCK_BH | ||
180 | |||
181 | config INLINE_WRITE_LOCK_IRQ | ||
182 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
183 | ARCH_INLINE_WRITE_LOCK_IRQ | ||
184 | |||
185 | config INLINE_WRITE_LOCK_IRQSAVE | ||
186 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
187 | ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
188 | |||
189 | config INLINE_WRITE_UNLOCK | ||
190 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) | ||
191 | |||
192 | config INLINE_WRITE_UNLOCK_BH | ||
193 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH | ||
194 | |||
195 | config INLINE_WRITE_UNLOCK_IRQ | ||
196 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) | ||
197 | |||
198 | config INLINE_WRITE_UNLOCK_IRQRESTORE | ||
199 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
200 | |||
201 | config MUTEX_SPIN_ON_OWNER | ||
202 | def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 776ffed1556d..982c50e2ce53 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 22 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 23 | CFLAGS_REMOVE_sched_clock.o = -pg |
24 | CFLAGS_REMOVE_perf_event.o = -pg | ||
24 | endif | 25 | endif |
25 | 26 | ||
26 | obj-$(CONFIG_FREEZER) += freezer.o | 27 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -82,6 +83,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | |||
82 | obj-$(CONFIG_TREE_RCU) += rcutree.o | 83 | obj-$(CONFIG_TREE_RCU) += rcutree.o |
83 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o | 84 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o |
84 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | 85 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
86 | obj-$(CONFIG_TINY_RCU) += rcutiny.o | ||
85 | obj-$(CONFIG_RELAY) += relay.o | 87 | obj-$(CONFIG_RELAY) += relay.o |
86 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 88 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
87 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 89 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
@@ -94,8 +96,9 @@ obj-$(CONFIG_X86_DS) += trace/ | |||
94 | obj-$(CONFIG_RING_BUFFER) += trace/ | 96 | obj-$(CONFIG_RING_BUFFER) += trace/ |
95 | obj-$(CONFIG_SMP) += sched_cpupri.o | 97 | obj-$(CONFIG_SMP) += sched_cpupri.o |
96 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 98 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
97 | obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o | 99 | obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o |
98 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 100 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
101 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
99 | 102 | ||
100 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 103 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
101 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 104 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/capability.c b/kernel/capability.c index 4e17041963f5..7f876e60521f 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set); | |||
29 | EXPORT_SYMBOL(__cap_full_set); | 29 | EXPORT_SYMBOL(__cap_full_set); |
30 | EXPORT_SYMBOL(__cap_init_eff_set); | 30 | EXPORT_SYMBOL(__cap_init_eff_set); |
31 | 31 | ||
32 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
33 | int file_caps_enabled = 1; | 32 | int file_caps_enabled = 1; |
34 | 33 | ||
35 | static int __init file_caps_disable(char *str) | 34 | static int __init file_caps_disable(char *str) |
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str) | |||
38 | return 1; | 37 | return 1; |
39 | } | 38 | } |
40 | __setup("no_file_caps", file_caps_disable); | 39 | __setup("no_file_caps", file_caps_disable); |
41 | #endif | ||
42 | 40 | ||
43 | /* | 41 | /* |
44 | * More recent versions of libcap are available from: | 42 | * More recent versions of libcap are available from: |
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) | |||
169 | kernel_cap_t pE, pI, pP; | 167 | kernel_cap_t pE, pI, pP; |
170 | 168 | ||
171 | ret = cap_validate_magic(header, &tocopy); | 169 | ret = cap_validate_magic(header, &tocopy); |
172 | if (ret != 0) | 170 | if ((dataptr == NULL) || (ret != 0)) |
173 | return ret; | 171 | return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret; |
174 | 172 | ||
175 | if (get_user(pid, &header->pid)) | 173 | if (get_user(pid, &header->pid)) |
176 | return -EFAULT; | 174 | return -EFAULT; |
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) | |||
238 | SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) | 236 | SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) |
239 | { | 237 | { |
240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 238 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
241 | unsigned i, tocopy; | 239 | unsigned i, tocopy, copybytes; |
242 | kernel_cap_t inheritable, permitted, effective; | 240 | kernel_cap_t inheritable, permitted, effective; |
243 | struct cred *new; | 241 | struct cred *new; |
244 | int ret; | 242 | int ret; |
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) | |||
255 | if (pid != 0 && pid != task_pid_vnr(current)) | 253 | if (pid != 0 && pid != task_pid_vnr(current)) |
256 | return -EPERM; | 254 | return -EPERM; |
257 | 255 | ||
258 | if (copy_from_user(&kdata, data, | 256 | copybytes = tocopy * sizeof(struct __user_cap_data_struct); |
259 | tocopy * sizeof(struct __user_cap_data_struct))) | 257 | if (copybytes > sizeof(kdata)) |
258 | return -EFAULT; | ||
259 | |||
260 | if (copy_from_user(&kdata, data, copybytes)) | ||
260 | return -EFAULT; | 261 | return -EFAULT; |
261 | 262 | ||
262 | for (i = 0; i < tocopy; i++) { | 263 | for (i = 0; i < tocopy; i++) { |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b5cb469d2545..3cf2183b472d 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -537,8 +537,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
537 | * element of the partition (one sched domain) to be passed to | 537 | * element of the partition (one sched domain) to be passed to |
538 | * partition_sched_domains(). | 538 | * partition_sched_domains(). |
539 | */ | 539 | */ |
540 | /* FIXME: see the FIXME in partition_sched_domains() */ | 540 | static int generate_sched_domains(cpumask_var_t **domains, |
541 | static int generate_sched_domains(struct cpumask **domains, | ||
542 | struct sched_domain_attr **attributes) | 541 | struct sched_domain_attr **attributes) |
543 | { | 542 | { |
544 | LIST_HEAD(q); /* queue of cpusets to be scanned */ | 543 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
@@ -546,7 +545,7 @@ static int generate_sched_domains(struct cpumask **domains, | |||
546 | struct cpuset **csa; /* array of all cpuset ptrs */ | 545 | struct cpuset **csa; /* array of all cpuset ptrs */ |
547 | int csn; /* how many cpuset ptrs in csa so far */ | 546 | int csn; /* how many cpuset ptrs in csa so far */ |
548 | int i, j, k; /* indices for partition finding loops */ | 547 | int i, j, k; /* indices for partition finding loops */ |
549 | struct cpumask *doms; /* resulting partition; i.e. sched domains */ | 548 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
550 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 549 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
551 | int ndoms = 0; /* number of sched domains in result */ | 550 | int ndoms = 0; /* number of sched domains in result */ |
552 | int nslot; /* next empty doms[] struct cpumask slot */ | 551 | int nslot; /* next empty doms[] struct cpumask slot */ |
@@ -557,7 +556,8 @@ static int generate_sched_domains(struct cpumask **domains, | |||
557 | 556 | ||
558 | /* Special case for the 99% of systems with one, full, sched domain */ | 557 | /* Special case for the 99% of systems with one, full, sched domain */ |
559 | if (is_sched_load_balance(&top_cpuset)) { | 558 | if (is_sched_load_balance(&top_cpuset)) { |
560 | doms = kmalloc(cpumask_size(), GFP_KERNEL); | 559 | ndoms = 1; |
560 | doms = alloc_sched_domains(ndoms); | ||
561 | if (!doms) | 561 | if (!doms) |
562 | goto done; | 562 | goto done; |
563 | 563 | ||
@@ -566,9 +566,8 @@ static int generate_sched_domains(struct cpumask **domains, | |||
566 | *dattr = SD_ATTR_INIT; | 566 | *dattr = SD_ATTR_INIT; |
567 | update_domain_attr_tree(dattr, &top_cpuset); | 567 | update_domain_attr_tree(dattr, &top_cpuset); |
568 | } | 568 | } |
569 | cpumask_copy(doms, top_cpuset.cpus_allowed); | 569 | cpumask_copy(doms[0], top_cpuset.cpus_allowed); |
570 | 570 | ||
571 | ndoms = 1; | ||
572 | goto done; | 571 | goto done; |
573 | } | 572 | } |
574 | 573 | ||
@@ -636,7 +635,7 @@ restart: | |||
636 | * Now we know how many domains to create. | 635 | * Now we know how many domains to create. |
637 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 636 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
638 | */ | 637 | */ |
639 | doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); | 638 | doms = alloc_sched_domains(ndoms); |
640 | if (!doms) | 639 | if (!doms) |
641 | goto done; | 640 | goto done; |
642 | 641 | ||
@@ -656,7 +655,7 @@ restart: | |||
656 | continue; | 655 | continue; |
657 | } | 656 | } |
658 | 657 | ||
659 | dp = doms + nslot; | 658 | dp = doms[nslot]; |
660 | 659 | ||
661 | if (nslot == ndoms) { | 660 | if (nslot == ndoms) { |
662 | static int warnings = 10; | 661 | static int warnings = 10; |
@@ -718,7 +717,7 @@ done: | |||
718 | static void do_rebuild_sched_domains(struct work_struct *unused) | 717 | static void do_rebuild_sched_domains(struct work_struct *unused) |
719 | { | 718 | { |
720 | struct sched_domain_attr *attr; | 719 | struct sched_domain_attr *attr; |
721 | struct cpumask *doms; | 720 | cpumask_var_t *doms; |
722 | int ndoms; | 721 | int ndoms; |
723 | 722 | ||
724 | get_online_cpus(); | 723 | get_online_cpus(); |
@@ -2052,7 +2051,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2052 | unsigned long phase, void *unused_cpu) | 2051 | unsigned long phase, void *unused_cpu) |
2053 | { | 2052 | { |
2054 | struct sched_domain_attr *attr; | 2053 | struct sched_domain_attr *attr; |
2055 | struct cpumask *doms; | 2054 | cpumask_var_t *doms; |
2056 | int ndoms; | 2055 | int ndoms; |
2057 | 2056 | ||
2058 | switch (phase) { | 2057 | switch (phase) { |
@@ -2537,15 +2536,9 @@ const struct file_operations proc_cpuset_operations = { | |||
2537 | }; | 2536 | }; |
2538 | #endif /* CONFIG_PROC_PID_CPUSET */ | 2537 | #endif /* CONFIG_PROC_PID_CPUSET */ |
2539 | 2538 | ||
2540 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ | 2539 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
2541 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) | 2540 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
2542 | { | 2541 | { |
2543 | seq_printf(m, "Cpus_allowed:\t"); | ||
2544 | seq_cpumask(m, &task->cpus_allowed); | ||
2545 | seq_printf(m, "\n"); | ||
2546 | seq_printf(m, "Cpus_allowed_list:\t"); | ||
2547 | seq_cpumask_list(m, &task->cpus_allowed); | ||
2548 | seq_printf(m, "\n"); | ||
2549 | seq_printf(m, "Mems_allowed:\t"); | 2542 | seq_printf(m, "Mems_allowed:\t"); |
2550 | seq_nodemask(m, &task->mems_allowed); | 2543 | seq_nodemask(m, &task->mems_allowed); |
2551 | seq_printf(m, "\n"); | 2544 | seq_printf(m, "\n"); |
diff --git a/kernel/exit.c b/kernel/exit.c index f7864ac2ecc1..80ae941cfd2e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
50 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | ||
52 | 53 | ||
53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
54 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
@@ -110,9 +111,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
110 | * We won't ever get here for the group leader, since it | 111 | * We won't ever get here for the group leader, since it |
111 | * will have been the last reference on the signal_struct. | 112 | * will have been the last reference on the signal_struct. |
112 | */ | 113 | */ |
113 | sig->utime = cputime_add(sig->utime, task_utime(tsk)); | 114 | sig->utime = cputime_add(sig->utime, tsk->utime); |
114 | sig->stime = cputime_add(sig->stime, task_stime(tsk)); | 115 | sig->stime = cputime_add(sig->stime, tsk->stime); |
115 | sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); | 116 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); |
116 | sig->min_flt += tsk->min_flt; | 117 | sig->min_flt += tsk->min_flt; |
117 | sig->maj_flt += tsk->maj_flt; | 118 | sig->maj_flt += tsk->maj_flt; |
118 | sig->nvcsw += tsk->nvcsw; | 119 | sig->nvcsw += tsk->nvcsw; |
@@ -978,6 +979,10 @@ NORET_TYPE void do_exit(long code) | |||
978 | proc_exit_connector(tsk); | 979 | proc_exit_connector(tsk); |
979 | 980 | ||
980 | /* | 981 | /* |
982 | * FIXME: do that only when needed, using sched_exit tracepoint | ||
983 | */ | ||
984 | flush_ptrace_hw_breakpoint(tsk); | ||
985 | /* | ||
981 | * Flush inherited counters to the parent - before the parent | 986 | * Flush inherited counters to the parent - before the parent |
982 | * gets woken up by child-exit notifications. | 987 | * gets woken up by child-exit notifications. |
983 | */ | 988 | */ |
@@ -1205,6 +1210,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1205 | struct signal_struct *psig; | 1210 | struct signal_struct *psig; |
1206 | struct signal_struct *sig; | 1211 | struct signal_struct *sig; |
1207 | unsigned long maxrss; | 1212 | unsigned long maxrss; |
1213 | cputime_t tgutime, tgstime; | ||
1208 | 1214 | ||
1209 | /* | 1215 | /* |
1210 | * The resource counters for the group leader are in its | 1216 | * The resource counters for the group leader are in its |
@@ -1220,20 +1226,23 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1220 | * need to protect the access to parent->signal fields, | 1226 | * need to protect the access to parent->signal fields, |
1221 | * as other threads in the parent group can be right | 1227 | * as other threads in the parent group can be right |
1222 | * here reaping other children at the same time. | 1228 | * here reaping other children at the same time. |
1229 | * | ||
1230 | * We use thread_group_times() to get times for the thread | ||
1231 | * group, which consolidates times for all threads in the | ||
1232 | * group including the group leader. | ||
1223 | */ | 1233 | */ |
1234 | thread_group_times(p, &tgutime, &tgstime); | ||
1224 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1235 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1225 | psig = p->real_parent->signal; | 1236 | psig = p->real_parent->signal; |
1226 | sig = p->signal; | 1237 | sig = p->signal; |
1227 | psig->cutime = | 1238 | psig->cutime = |
1228 | cputime_add(psig->cutime, | 1239 | cputime_add(psig->cutime, |
1229 | cputime_add(p->utime, | 1240 | cputime_add(tgutime, |
1230 | cputime_add(sig->utime, | 1241 | sig->cutime)); |
1231 | sig->cutime))); | ||
1232 | psig->cstime = | 1242 | psig->cstime = |
1233 | cputime_add(psig->cstime, | 1243 | cputime_add(psig->cstime, |
1234 | cputime_add(p->stime, | 1244 | cputime_add(tgstime, |
1235 | cputime_add(sig->stime, | 1245 | sig->cstime)); |
1236 | sig->cstime))); | ||
1237 | psig->cgtime = | 1246 | psig->cgtime = |
1238 | cputime_add(psig->cgtime, | 1247 | cputime_add(psig->cgtime, |
1239 | cputime_add(p->gtime, | 1248 | cputime_add(p->gtime, |
diff --git a/kernel/fork.c b/kernel/fork.c index 166b8c49257c..3d6f121bbe8a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -884,6 +884,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
884 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; | 884 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; |
885 | sig->gtime = cputime_zero; | 885 | sig->gtime = cputime_zero; |
886 | sig->cgtime = cputime_zero; | 886 | sig->cgtime = cputime_zero; |
887 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
888 | sig->prev_utime = sig->prev_stime = cputime_zero; | ||
889 | #endif | ||
887 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; | 890 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; |
888 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; | 891 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; |
889 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; | 892 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; |
@@ -1066,8 +1069,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1066 | p->gtime = cputime_zero; | 1069 | p->gtime = cputime_zero; |
1067 | p->utimescaled = cputime_zero; | 1070 | p->utimescaled = cputime_zero; |
1068 | p->stimescaled = cputime_zero; | 1071 | p->stimescaled = cputime_zero; |
1072 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
1069 | p->prev_utime = cputime_zero; | 1073 | p->prev_utime = cputime_zero; |
1070 | p->prev_stime = cputime_zero; | 1074 | p->prev_stime = cputime_zero; |
1075 | #endif | ||
1071 | 1076 | ||
1072 | p->default_timer_slack_ns = current->timer_slack_ns; | 1077 | p->default_timer_slack_ns = current->timer_slack_ns; |
1073 | 1078 | ||
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index d4e841747400..0c642d51aac2 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) | |||
144 | 144 | ||
145 | rcu_read_lock(); | 145 | rcu_read_lock(); |
146 | do_each_thread(g, t) { | 146 | do_each_thread(g, t) { |
147 | if (!--max_count) | 147 | if (!max_count--) |
148 | goto unlock; | 148 | goto unlock; |
149 | if (!--batch_count) { | 149 | if (!--batch_count) { |
150 | batch_count = HUNG_TASK_BATCHING; | 150 | batch_count = HUNG_TASK_BATCHING; |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..cf5ee1628411 --- /dev/null +++ b/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,423 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2007 Alan Stern | ||
17 | * Copyright (C) IBM Corporation, 2009 | ||
18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> | ||
19 | * | ||
20 | * Thanks to Ingo Molnar for his many suggestions. | ||
21 | * | ||
22 | * Authors: Alan Stern <stern@rowland.harvard.edu> | ||
23 | * K.Prasad <prasad@linux.vnet.ibm.com> | ||
24 | * Frederic Weisbecker <fweisbec@gmail.com> | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
29 | * using the CPU's debug registers. | ||
30 | * This file contains the arch-independent routines. | ||
31 | */ | ||
32 | |||
33 | #include <linux/irqflags.h> | ||
34 | #include <linux/kallsyms.h> | ||
35 | #include <linux/notifier.h> | ||
36 | #include <linux/kprobes.h> | ||
37 | #include <linux/kdebug.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/percpu.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/smp.h> | ||
44 | |||
45 | #include <linux/hw_breakpoint.h> | ||
46 | |||
47 | /* | ||
48 | * Constraints data | ||
49 | */ | ||
50 | |||
51 | /* Number of pinned cpu breakpoints in a cpu */ | ||
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | ||
53 | |||
54 | /* Number of pinned task breakpoints in a cpu */ | ||
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | ||
56 | |||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | ||
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | ||
59 | |||
60 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | ||
61 | struct bp_busy_slots { | ||
62 | unsigned int pinned; | ||
63 | unsigned int flexible; | ||
64 | }; | ||
65 | |||
66 | /* Serialize accesses to the above constraints */ | ||
67 | static DEFINE_MUTEX(nr_bp_mutex); | ||
68 | |||
69 | /* | ||
70 | * Report the maximum number of pinned breakpoints a task | ||
71 | * have in this cpu | ||
72 | */ | ||
73 | static unsigned int max_task_bp_pinned(int cpu) | ||
74 | { | ||
75 | int i; | ||
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | ||
77 | |||
78 | for (i = HBP_NUM -1; i >= 0; i--) { | ||
79 | if (tsk_pinned[i] > 0) | ||
80 | return i + 1; | ||
81 | } | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Report the number of pinned/un-pinned breakpoints we have in | ||
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | ||
89 | */ | ||
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | ||
91 | { | ||
92 | if (cpu >= 0) { | ||
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | ||
94 | slots->pinned += max_task_bp_pinned(cpu); | ||
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | ||
96 | |||
97 | return; | ||
98 | } | ||
99 | |||
100 | for_each_online_cpu(cpu) { | ||
101 | unsigned int nr; | ||
102 | |||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | ||
104 | nr += max_task_bp_pinned(cpu); | ||
105 | |||
106 | if (nr > slots->pinned) | ||
107 | slots->pinned = nr; | ||
108 | |||
109 | nr = per_cpu(nr_bp_flexible, cpu); | ||
110 | |||
111 | if (nr > slots->flexible) | ||
112 | slots->flexible = nr; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Add a pinned breakpoint for the given task in our constraint table | ||
118 | */ | ||
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | ||
120 | { | ||
121 | int count = 0; | ||
122 | struct perf_event *bp; | ||
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
124 | unsigned int *tsk_pinned; | ||
125 | struct list_head *list; | ||
126 | unsigned long flags; | ||
127 | |||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
129 | return; | ||
130 | |||
131 | list = &ctx->event_list; | ||
132 | |||
133 | spin_lock_irqsave(&ctx->lock, flags); | ||
134 | |||
135 | /* | ||
136 | * The current breakpoint counter is not included in the list | ||
137 | * at the open() callback time | ||
138 | */ | ||
139 | list_for_each_entry(bp, list, event_entry) { | ||
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
141 | count++; | ||
142 | } | ||
143 | |||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
145 | |||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | ||
147 | return; | ||
148 | |||
149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | ||
150 | if (enable) { | ||
151 | tsk_pinned[count]++; | ||
152 | if (count > 0) | ||
153 | tsk_pinned[count-1]--; | ||
154 | } else { | ||
155 | tsk_pinned[count]--; | ||
156 | if (count > 0) | ||
157 | tsk_pinned[count-1]++; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Add/remove the given breakpoint in our constraint table | ||
163 | */ | ||
164 | static void toggle_bp_slot(struct perf_event *bp, bool enable) | ||
165 | { | ||
166 | int cpu = bp->cpu; | ||
167 | struct task_struct *tsk = bp->ctx->task; | ||
168 | |||
169 | /* Pinned counter task profiling */ | ||
170 | if (tsk) { | ||
171 | if (cpu >= 0) { | ||
172 | toggle_bp_task_slot(tsk, cpu, enable); | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | for_each_online_cpu(cpu) | ||
177 | toggle_bp_task_slot(tsk, cpu, enable); | ||
178 | return; | ||
179 | } | ||
180 | |||
181 | /* Pinned counter cpu profiling */ | ||
182 | if (enable) | ||
183 | per_cpu(nr_cpu_bp_pinned, bp->cpu)++; | ||
184 | else | ||
185 | per_cpu(nr_cpu_bp_pinned, bp->cpu)--; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Contraints to check before allowing this new breakpoint counter: | ||
190 | * | ||
191 | * == Non-pinned counter == (Considered as pinned for now) | ||
192 | * | ||
193 | * - If attached to a single cpu, check: | ||
194 | * | ||
195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | ||
196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | ||
197 | * | ||
198 | * -> If there are already non-pinned counters in this cpu, it means | ||
199 | * there is already a free slot for them. | ||
200 | * Otherwise, we check that the maximum number of per task | ||
201 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | ||
202 | * (for this cpu) doesn't cover every registers. | ||
203 | * | ||
204 | * - If attached to every cpus, check: | ||
205 | * | ||
206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | ||
207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | ||
208 | * | ||
209 | * -> This is roughly the same, except we check the number of per cpu | ||
210 | * bp for every cpu and we keep the max one. Same for the per tasks | ||
211 | * breakpoints. | ||
212 | * | ||
213 | * | ||
214 | * == Pinned counter == | ||
215 | * | ||
216 | * - If attached to a single cpu, check: | ||
217 | * | ||
218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | ||
219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | ||
220 | * | ||
221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | ||
222 | * one register at least (or they will never be fed). | ||
223 | * | ||
224 | * - If attached to every cpus, check: | ||
225 | * | ||
226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | ||
227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | ||
228 | */ | ||
229 | int reserve_bp_slot(struct perf_event *bp) | ||
230 | { | ||
231 | struct bp_busy_slots slots = {0}; | ||
232 | int ret = 0; | ||
233 | |||
234 | mutex_lock(&nr_bp_mutex); | ||
235 | |||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | ||
237 | |||
238 | /* Flexible counters need to keep at least one slot */ | ||
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | ||
240 | ret = -ENOSPC; | ||
241 | goto end; | ||
242 | } | ||
243 | |||
244 | toggle_bp_slot(bp, true); | ||
245 | |||
246 | end: | ||
247 | mutex_unlock(&nr_bp_mutex); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | void release_bp_slot(struct perf_event *bp) | ||
253 | { | ||
254 | mutex_lock(&nr_bp_mutex); | ||
255 | |||
256 | toggle_bp_slot(bp, false); | ||
257 | |||
258 | mutex_unlock(&nr_bp_mutex); | ||
259 | } | ||
260 | |||
261 | |||
262 | int __register_perf_hw_breakpoint(struct perf_event *bp) | ||
263 | { | ||
264 | int ret; | ||
265 | |||
266 | ret = reserve_bp_slot(bp); | ||
267 | if (ret) | ||
268 | return ret; | ||
269 | |||
270 | /* | ||
271 | * Ptrace breakpoints can be temporary perf events only | ||
272 | * meant to reserve a slot. In this case, it is created disabled and | ||
273 | * we don't want to check the params right now (as we put a null addr) | ||
274 | * But perf tools create events as disabled and we want to check | ||
275 | * the params for them. | ||
276 | * This is a quick hack that will be removed soon, once we remove | ||
277 | * the tmp breakpoints from ptrace | ||
278 | */ | ||
279 | if (!bp->attr.disabled || bp->callback == perf_bp_event) | ||
280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | ||
281 | |||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | int register_perf_hw_breakpoint(struct perf_event *bp) | ||
286 | { | ||
287 | bp->callback = perf_bp_event; | ||
288 | |||
289 | return __register_perf_hw_breakpoint(bp); | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | ||
294 | * @attr: breakpoint attributes | ||
295 | * @triggered: callback to trigger when we hit the breakpoint | ||
296 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
297 | */ | ||
298 | struct perf_event * | ||
299 | register_user_hw_breakpoint(struct perf_event_attr *attr, | ||
300 | perf_callback_t triggered, | ||
301 | struct task_struct *tsk) | ||
302 | { | ||
303 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | ||
304 | } | ||
305 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | ||
306 | |||
307 | /** | ||
308 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | ||
309 | * @bp: the breakpoint structure to modify | ||
310 | * @attr: new breakpoint attributes | ||
311 | * @triggered: callback to trigger when we hit the breakpoint | ||
312 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
313 | */ | ||
314 | struct perf_event * | ||
315 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, | ||
316 | perf_callback_t triggered, | ||
317 | struct task_struct *tsk) | ||
318 | { | ||
319 | /* | ||
320 | * FIXME: do it without unregistering | ||
321 | * - We don't want to lose our slot | ||
322 | * - If the new bp is incorrect, don't lose the older one | ||
323 | */ | ||
324 | unregister_hw_breakpoint(bp); | ||
325 | |||
326 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | ||
327 | } | ||
328 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | ||
329 | |||
330 | /** | ||
331 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint | ||
332 | * @bp: the breakpoint structure to unregister | ||
333 | */ | ||
334 | void unregister_hw_breakpoint(struct perf_event *bp) | ||
335 | { | ||
336 | if (!bp) | ||
337 | return; | ||
338 | perf_event_release_kernel(bp); | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | ||
341 | |||
342 | /** | ||
343 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel | ||
344 | * @attr: breakpoint attributes | ||
345 | * @triggered: callback to trigger when we hit the breakpoint | ||
346 | * | ||
347 | * @return a set of per_cpu pointers to perf events | ||
348 | */ | ||
349 | struct perf_event ** | ||
350 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | ||
351 | perf_callback_t triggered) | ||
352 | { | ||
353 | struct perf_event **cpu_events, **pevent, *bp; | ||
354 | long err; | ||
355 | int cpu; | ||
356 | |||
357 | cpu_events = alloc_percpu(typeof(*cpu_events)); | ||
358 | if (!cpu_events) | ||
359 | return ERR_PTR(-ENOMEM); | ||
360 | |||
361 | for_each_possible_cpu(cpu) { | ||
362 | pevent = per_cpu_ptr(cpu_events, cpu); | ||
363 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); | ||
364 | |||
365 | *pevent = bp; | ||
366 | |||
367 | if (IS_ERR(bp)) { | ||
368 | err = PTR_ERR(bp); | ||
369 | goto fail; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | return cpu_events; | ||
374 | |||
375 | fail: | ||
376 | for_each_possible_cpu(cpu) { | ||
377 | pevent = per_cpu_ptr(cpu_events, cpu); | ||
378 | if (IS_ERR(*pevent)) | ||
379 | break; | ||
380 | unregister_hw_breakpoint(*pevent); | ||
381 | } | ||
382 | free_percpu(cpu_events); | ||
383 | /* return the error if any */ | ||
384 | return ERR_PTR(err); | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); | ||
387 | |||
388 | /** | ||
389 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel | ||
390 | * @cpu_events: the per cpu set of events to unregister | ||
391 | */ | ||
392 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) | ||
393 | { | ||
394 | int cpu; | ||
395 | struct perf_event **pevent; | ||
396 | |||
397 | for_each_possible_cpu(cpu) { | ||
398 | pevent = per_cpu_ptr(cpu_events, cpu); | ||
399 | unregister_hw_breakpoint(*pevent); | ||
400 | } | ||
401 | free_percpu(cpu_events); | ||
402 | } | ||
403 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); | ||
404 | |||
405 | static struct notifier_block hw_breakpoint_exceptions_nb = { | ||
406 | .notifier_call = hw_breakpoint_exceptions_notify, | ||
407 | /* we need to be notified first */ | ||
408 | .priority = 0x7fffffff | ||
409 | }; | ||
410 | |||
411 | static int __init init_hw_breakpoint(void) | ||
412 | { | ||
413 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | ||
414 | } | ||
415 | core_initcall(init_hw_breakpoint); | ||
416 | |||
417 | |||
418 | struct pmu perf_ops_bp = { | ||
419 | .enable = arch_install_hw_breakpoint, | ||
420 | .disable = arch_uninstall_hw_breakpoint, | ||
421 | .read = hw_breakpoint_pmu_read, | ||
422 | .unthrottle = hw_breakpoint_pmu_unthrottle | ||
423 | }; | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c1660194d115..ba566c261adc 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -166,11 +166,11 @@ int set_irq_data(unsigned int irq, void *data) | |||
166 | EXPORT_SYMBOL(set_irq_data); | 166 | EXPORT_SYMBOL(set_irq_data); |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * set_irq_data - set irq type data for an irq | 169 | * set_irq_msi - set MSI descriptor data for an irq |
170 | * @irq: Interrupt number | 170 | * @irq: Interrupt number |
171 | * @entry: Pointer to MSI descriptor data | 171 | * @entry: Pointer to MSI descriptor data |
172 | * | 172 | * |
173 | * Set the hardware irq controller data for an irq | 173 | * Set the MSI descriptor entry for an irq |
174 | */ | 174 | */ |
175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
176 | { | 176 | { |
@@ -590,7 +590,7 @@ out_unlock: | |||
590 | } | 590 | } |
591 | 591 | ||
592 | /** | 592 | /** |
593 | * handle_percpu_IRQ - Per CPU local irq handler | 593 | * handle_percpu_irq - Per CPU local irq handler |
594 | * @irq: the interrupt number | 594 | * @irq: the interrupt number |
595 | * @desc: the interrupt description structure for this irq | 595 | * @desc: the interrupt description structure for this irq |
596 | * | 596 | * |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 692363dd591f..0832145fea97 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -136,7 +136,7 @@ out: | |||
136 | 136 | ||
137 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |
138 | { | 138 | { |
139 | return single_open(file, default_affinity_show, NULL); | 139 | return single_open(file, default_affinity_show, PDE(inode)->data); |
140 | } | 140 | } |
141 | 141 | ||
142 | static const struct file_operations default_affinity_proc_fops = { | 142 | static const struct file_operations default_affinity_proc_fops = { |
@@ -148,18 +148,28 @@ static const struct file_operations default_affinity_proc_fops = { | |||
148 | }; | 148 | }; |
149 | #endif | 149 | #endif |
150 | 150 | ||
151 | static int irq_spurious_read(char *page, char **start, off_t off, | 151 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
152 | int count, int *eof, void *data) | ||
153 | { | 152 | { |
154 | struct irq_desc *desc = irq_to_desc((long) data); | 153 | struct irq_desc *desc = irq_to_desc((long) m->private); |
155 | return sprintf(page, "count %u\n" | 154 | |
156 | "unhandled %u\n" | 155 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
157 | "last_unhandled %u ms\n", | 156 | desc->irq_count, desc->irqs_unhandled, |
158 | desc->irq_count, | 157 | jiffies_to_msecs(desc->last_unhandled)); |
159 | desc->irqs_unhandled, | 158 | return 0; |
160 | jiffies_to_msecs(desc->last_unhandled)); | 159 | } |
160 | |||
161 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) | ||
162 | { | ||
163 | return single_open(file, irq_spurious_proc_show, NULL); | ||
161 | } | 164 | } |
162 | 165 | ||
166 | static const struct file_operations irq_spurious_proc_fops = { | ||
167 | .open = irq_spurious_proc_open, | ||
168 | .read = seq_read, | ||
169 | .llseek = seq_lseek, | ||
170 | .release = single_release, | ||
171 | }; | ||
172 | |||
163 | #define MAX_NAMELEN 128 | 173 | #define MAX_NAMELEN 128 |
164 | 174 | ||
165 | static int name_unique(unsigned int irq, struct irqaction *new_action) | 175 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
@@ -204,7 +214,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
204 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 214 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
205 | { | 215 | { |
206 | char name [MAX_NAMELEN]; | 216 | char name [MAX_NAMELEN]; |
207 | struct proc_dir_entry *entry; | ||
208 | 217 | ||
209 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) | 218 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) |
210 | return; | 219 | return; |
@@ -214,6 +223,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
214 | 223 | ||
215 | /* create /proc/irq/1234 */ | 224 | /* create /proc/irq/1234 */ |
216 | desc->dir = proc_mkdir(name, root_irq_dir); | 225 | desc->dir = proc_mkdir(name, root_irq_dir); |
226 | if (!desc->dir) | ||
227 | return; | ||
217 | 228 | ||
218 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
219 | /* create /proc/irq/<irq>/smp_affinity */ | 230 | /* create /proc/irq/<irq>/smp_affinity */ |
@@ -221,11 +232,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
221 | &irq_affinity_proc_fops, (void *)(long)irq); | 232 | &irq_affinity_proc_fops, (void *)(long)irq); |
222 | #endif | 233 | #endif |
223 | 234 | ||
224 | entry = create_proc_entry("spurious", 0444, desc->dir); | 235 | proc_create_data("spurious", 0444, desc->dir, |
225 | if (entry) { | 236 | &irq_spurious_proc_fops, (void *)(long)irq); |
226 | entry->data = (void *)(long)irq; | ||
227 | entry->read_proc = irq_spurious_read; | ||
228 | } | ||
229 | } | 237 | } |
230 | 238 | ||
231 | #undef MAX_NAMELEN | 239 | #undef MAX_NAMELEN |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index bd7273e6282e..22b0a6eedf24 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
104 | return ok; | 104 | return ok; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void poll_all_shared_irqs(void) | 107 | static void poll_spurious_irqs(unsigned long dummy) |
108 | { | 108 | { |
109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
110 | int i; | 110 | int i; |
@@ -125,23 +125,11 @@ static void poll_all_shared_irqs(void) | |||
125 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
126 | local_irq_enable(); | 126 | local_irq_enable(); |
127 | } | 127 | } |
128 | } | ||
129 | |||
130 | static void poll_spurious_irqs(unsigned long dummy) | ||
131 | { | ||
132 | poll_all_shared_irqs(); | ||
133 | 128 | ||
134 | mod_timer(&poll_spurious_irq_timer, | 129 | mod_timer(&poll_spurious_irq_timer, |
135 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 130 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
136 | } | 131 | } |
137 | 132 | ||
138 | #ifdef CONFIG_DEBUG_SHIRQ | ||
139 | void debug_poll_all_shared_irqs(void) | ||
140 | { | ||
141 | poll_all_shared_irqs(); | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | /* | 133 | /* |
146 | * If 99,900 of the previous 100,000 interrupts have not been handled | 134 | * If 99,900 of the previous 100,000 interrupts have not been handled |
147 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 135 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 8b6b8b697c68..8e5288a8a355 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -181,6 +181,7 @@ unsigned long kallsyms_lookup_name(const char *name) | |||
181 | } | 181 | } |
182 | return module_kallsyms_lookup_name(name); | 182 | return module_kallsyms_lookup_name(name); |
183 | } | 183 | } |
184 | EXPORT_SYMBOL_GPL(kallsyms_lookup_name); | ||
184 | 185 | ||
185 | int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, | 186 | int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, |
186 | unsigned long), | 187 | unsigned long), |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 9147a3190c9d..7d7014634022 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -870,7 +870,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks) | |||
870 | 870 | ||
871 | /* | 871 | /* |
872 | * All threads that don't have debuggerinfo should be | 872 | * All threads that don't have debuggerinfo should be |
873 | * in __schedule() sleeping, since all other CPUs | 873 | * in schedule() sleeping, since all other CPUs |
874 | * are in kgdb_wait, and thus have debuggerinfo. | 874 | * are in kgdb_wait, and thus have debuggerinfo. |
875 | */ | 875 | */ |
876 | if (local_debuggerinfo) { | 876 | if (local_debuggerinfo) { |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 9fcb53a11f87..25b103190364 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...) | |||
80 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ | 80 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ |
81 | static int kmod_loop_msg; | 81 | static int kmod_loop_msg; |
82 | 82 | ||
83 | ret = security_kernel_module_request(); | ||
84 | if (ret) | ||
85 | return ret; | ||
86 | |||
87 | va_start(args, fmt); | 83 | va_start(args, fmt); |
88 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); | 84 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); |
89 | va_end(args); | 85 | va_end(args); |
90 | if (ret >= MODULE_NAME_LEN) | 86 | if (ret >= MODULE_NAME_LEN) |
91 | return -ENAMETOOLONG; | 87 | return -ENAMETOOLONG; |
92 | 88 | ||
89 | ret = security_kernel_module_request(module_name); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | |||
93 | /* If modprobe needs a service that is in a module, we get a recursive | 93 | /* If modprobe needs a service that is in a module, we get a recursive |
94 | * loop. Limit the number of running kmod threads to max_threads/2 or | 94 | * loop. Limit the number of running kmod threads to max_threads/2 or |
95 | * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method | 95 | * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5240d75f4c60..e5342a344c43 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -90,6 +90,9 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | |||
90 | */ | 90 | */ |
91 | static struct kprobe_blackpoint kprobe_blacklist[] = { | 91 | static struct kprobe_blackpoint kprobe_blacklist[] = { |
92 | {"preempt_schedule",}, | 92 | {"preempt_schedule",}, |
93 | {"native_get_debugreg",}, | ||
94 | {"irq_entries_start",}, | ||
95 | {"common_interrupt",}, | ||
93 | {NULL} /* Terminator */ | 96 | {NULL} /* Terminator */ |
94 | }; | 97 | }; |
95 | 98 | ||
@@ -673,6 +676,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) | |||
673 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); | 676 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); |
674 | } | 677 | } |
675 | 678 | ||
679 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ | ||
680 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) | ||
681 | { | ||
682 | struct kprobe *old_p, *list_p; | ||
683 | |||
684 | old_p = get_kprobe(p->addr); | ||
685 | if (unlikely(!old_p)) | ||
686 | return NULL; | ||
687 | |||
688 | if (p != old_p) { | ||
689 | list_for_each_entry_rcu(list_p, &old_p->list, list) | ||
690 | if (list_p == p) | ||
691 | /* kprobe p is a valid probe */ | ||
692 | goto valid; | ||
693 | return NULL; | ||
694 | } | ||
695 | valid: | ||
696 | return old_p; | ||
697 | } | ||
698 | |||
699 | /* Return error if the kprobe is being re-registered */ | ||
700 | static inline int check_kprobe_rereg(struct kprobe *p) | ||
701 | { | ||
702 | int ret = 0; | ||
703 | struct kprobe *old_p; | ||
704 | |||
705 | mutex_lock(&kprobe_mutex); | ||
706 | old_p = __get_valid_kprobe(p); | ||
707 | if (old_p) | ||
708 | ret = -EINVAL; | ||
709 | mutex_unlock(&kprobe_mutex); | ||
710 | return ret; | ||
711 | } | ||
712 | |||
676 | int __kprobes register_kprobe(struct kprobe *p) | 713 | int __kprobes register_kprobe(struct kprobe *p) |
677 | { | 714 | { |
678 | int ret = 0; | 715 | int ret = 0; |
@@ -685,6 +722,10 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
685 | return -EINVAL; | 722 | return -EINVAL; |
686 | p->addr = addr; | 723 | p->addr = addr; |
687 | 724 | ||
725 | ret = check_kprobe_rereg(p); | ||
726 | if (ret) | ||
727 | return ret; | ||
728 | |||
688 | preempt_disable(); | 729 | preempt_disable(); |
689 | if (!kernel_text_address((unsigned long) p->addr) || | 730 | if (!kernel_text_address((unsigned long) p->addr) || |
690 | in_kprobes_functions((unsigned long) p->addr)) { | 731 | in_kprobes_functions((unsigned long) p->addr)) { |
@@ -754,26 +795,6 @@ out: | |||
754 | } | 795 | } |
755 | EXPORT_SYMBOL_GPL(register_kprobe); | 796 | EXPORT_SYMBOL_GPL(register_kprobe); |
756 | 797 | ||
757 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ | ||
758 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) | ||
759 | { | ||
760 | struct kprobe *old_p, *list_p; | ||
761 | |||
762 | old_p = get_kprobe(p->addr); | ||
763 | if (unlikely(!old_p)) | ||
764 | return NULL; | ||
765 | |||
766 | if (p != old_p) { | ||
767 | list_for_each_entry_rcu(list_p, &old_p->list, list) | ||
768 | if (list_p == p) | ||
769 | /* kprobe p is a valid probe */ | ||
770 | goto valid; | ||
771 | return NULL; | ||
772 | } | ||
773 | valid: | ||
774 | return old_p; | ||
775 | } | ||
776 | |||
777 | /* | 798 | /* |
778 | * Unregister a kprobe without a scheduler synchronization. | 799 | * Unregister a kprobe without a scheduler synchronization. |
779 | */ | 800 | */ |
@@ -1014,9 +1035,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
1014 | /* Pre-allocate memory for max kretprobe instances */ | 1035 | /* Pre-allocate memory for max kretprobe instances */ |
1015 | if (rp->maxactive <= 0) { | 1036 | if (rp->maxactive <= 0) { |
1016 | #ifdef CONFIG_PREEMPT | 1037 | #ifdef CONFIG_PREEMPT |
1017 | rp->maxactive = max(10, 2 * NR_CPUS); | 1038 | rp->maxactive = max(10, 2 * num_possible_cpus()); |
1018 | #else | 1039 | #else |
1019 | rp->maxactive = NR_CPUS; | 1040 | rp->maxactive = num_possible_cpus(); |
1020 | #endif | 1041 | #endif |
1021 | } | 1042 | } |
1022 | spin_lock_init(&rp->lock); | 1043 | spin_lock_init(&rp->lock); |
@@ -1141,6 +1162,13 @@ static void __kprobes kill_kprobe(struct kprobe *p) | |||
1141 | arch_remove_kprobe(p); | 1162 | arch_remove_kprobe(p); |
1142 | } | 1163 | } |
1143 | 1164 | ||
1165 | void __kprobes dump_kprobe(struct kprobe *kp) | ||
1166 | { | ||
1167 | printk(KERN_WARNING "Dumping kprobe:\n"); | ||
1168 | printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", | ||
1169 | kp->symbol_name, kp->addr, kp->offset); | ||
1170 | } | ||
1171 | |||
1144 | /* Module notifier call back, checking kprobes on the module */ | 1172 | /* Module notifier call back, checking kprobes on the module */ |
1145 | static int __kprobes kprobes_module_callback(struct notifier_block *nb, | 1173 | static int __kprobes kprobes_module_callback(struct notifier_block *nb, |
1146 | unsigned long val, void *data) | 1174 | unsigned long val, void *data) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 9af56723c096..f5dcd36d3151 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "lockdep_internals.h" | 49 | #include "lockdep_internals.h" |
50 | 50 | ||
51 | #define CREATE_TRACE_POINTS | 51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/lockdep.h> | 52 | #include <trace/events/lock.h> |
53 | 53 | ||
54 | #ifdef CONFIG_PROVE_LOCKING | 54 | #ifdef CONFIG_PROVE_LOCKING |
55 | int prove_locking = 1; | 55 | int prove_locking = 1; |
diff --git a/kernel/module.c b/kernel/module.c index 8b7d8805819d..5842a71cf052 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1187 | 1187 | ||
1188 | /* Count loaded sections and allocate structures */ | 1188 | /* Count loaded sections and allocate structures */ |
1189 | for (i = 0; i < nsect; i++) | 1189 | for (i = 0; i < nsect; i++) |
1190 | if (sechdrs[i].sh_flags & SHF_ALLOC) | 1190 | if (sechdrs[i].sh_flags & SHF_ALLOC |
1191 | && sechdrs[i].sh_size) | ||
1191 | nloaded++; | 1192 | nloaded++; |
1192 | size[0] = ALIGN(sizeof(*sect_attrs) | 1193 | size[0] = ALIGN(sizeof(*sect_attrs) |
1193 | + nloaded * sizeof(sect_attrs->attrs[0]), | 1194 | + nloaded * sizeof(sect_attrs->attrs[0]), |
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1207 | for (i = 0; i < nsect; i++) { | 1208 | for (i = 0; i < nsect; i++) { |
1208 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) | 1209 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) |
1209 | continue; | 1210 | continue; |
1211 | if (!sechdrs[i].sh_size) | ||
1212 | continue; | ||
1210 | sattr->address = sechdrs[i].sh_addr; | 1213 | sattr->address = sechdrs[i].sh_addr; |
1211 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, | 1214 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, |
1212 | GFP_KERNEL); | 1215 | GFP_KERNEL); |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 947b3ad551f8..632f04c57d82 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
148 | 148 | ||
149 | preempt_disable(); | 149 | preempt_disable(); |
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ | 151 | |
152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | 152 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
153 | /* | 153 | /* |
154 | * Optimistic spinning. | 154 | * Optimistic spinning. |
155 | * | 155 | * |
diff --git a/kernel/notifier.c b/kernel/notifier.c index 61d5aa5eced3..acd24e7643eb 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier); | |||
558 | 558 | ||
559 | static ATOMIC_NOTIFIER_HEAD(die_chain); | 559 | static ATOMIC_NOTIFIER_HEAD(die_chain); |
560 | 560 | ||
561 | int notrace notify_die(enum die_val val, const char *str, | 561 | int notrace __kprobes notify_die(enum die_val val, const char *str, |
562 | struct pt_regs *regs, long err, int trap, int sig) | 562 | struct pt_regs *regs, long err, int trap, int sig) |
563 | { | 563 | { |
564 | struct die_args args = { | 564 | struct die_args args = { |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 7f29643c8985..6b7ddba1dd64 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/anon_inodes.h> | 28 | #include <linux/anon_inodes.h> |
29 | #include <linux/kernel_stat.h> | 29 | #include <linux/kernel_stat.h> |
30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
31 | #include <linux/ftrace_event.h> | ||
32 | #include <linux/hw_breakpoint.h> | ||
31 | 33 | ||
32 | #include <asm/irq_regs.h> | 34 | #include <asm/irq_regs.h> |
33 | 35 | ||
@@ -244,6 +246,49 @@ static void perf_unpin_context(struct perf_event_context *ctx) | |||
244 | put_ctx(ctx); | 246 | put_ctx(ctx); |
245 | } | 247 | } |
246 | 248 | ||
249 | static inline u64 perf_clock(void) | ||
250 | { | ||
251 | return cpu_clock(smp_processor_id()); | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Update the record of the current time in a context. | ||
256 | */ | ||
257 | static void update_context_time(struct perf_event_context *ctx) | ||
258 | { | ||
259 | u64 now = perf_clock(); | ||
260 | |||
261 | ctx->time += now - ctx->timestamp; | ||
262 | ctx->timestamp = now; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Update the total_time_enabled and total_time_running fields for a event. | ||
267 | */ | ||
268 | static void update_event_times(struct perf_event *event) | ||
269 | { | ||
270 | struct perf_event_context *ctx = event->ctx; | ||
271 | u64 run_end; | ||
272 | |||
273 | if (event->state < PERF_EVENT_STATE_INACTIVE || | ||
274 | event->group_leader->state < PERF_EVENT_STATE_INACTIVE) | ||
275 | return; | ||
276 | |||
277 | if (ctx->is_active) | ||
278 | run_end = ctx->time; | ||
279 | else | ||
280 | run_end = event->tstamp_stopped; | ||
281 | |||
282 | event->total_time_enabled = run_end - event->tstamp_enabled; | ||
283 | |||
284 | if (event->state == PERF_EVENT_STATE_INACTIVE) | ||
285 | run_end = event->tstamp_stopped; | ||
286 | else | ||
287 | run_end = ctx->time; | ||
288 | |||
289 | event->total_time_running = run_end - event->tstamp_running; | ||
290 | } | ||
291 | |||
247 | /* | 292 | /* |
248 | * Add a event from the lists for its context. | 293 | * Add a event from the lists for its context. |
249 | * Must be called with ctx->mutex and ctx->lock held. | 294 | * Must be called with ctx->mutex and ctx->lock held. |
@@ -292,6 +337,18 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) | |||
292 | if (event->group_leader != event) | 337 | if (event->group_leader != event) |
293 | event->group_leader->nr_siblings--; | 338 | event->group_leader->nr_siblings--; |
294 | 339 | ||
340 | update_event_times(event); | ||
341 | |||
342 | /* | ||
343 | * If event was in error state, then keep it | ||
344 | * that way, otherwise bogus counts will be | ||
345 | * returned on read(). The only way to get out | ||
346 | * of error state is by explicit re-enabling | ||
347 | * of the event | ||
348 | */ | ||
349 | if (event->state > PERF_EVENT_STATE_OFF) | ||
350 | event->state = PERF_EVENT_STATE_OFF; | ||
351 | |||
295 | /* | 352 | /* |
296 | * If this was a group event with sibling events then | 353 | * If this was a group event with sibling events then |
297 | * upgrade the siblings to singleton events by adding them | 354 | * upgrade the siblings to singleton events by adding them |
@@ -445,50 +502,11 @@ retry: | |||
445 | * can remove the event safely, if the call above did not | 502 | * can remove the event safely, if the call above did not |
446 | * succeed. | 503 | * succeed. |
447 | */ | 504 | */ |
448 | if (!list_empty(&event->group_entry)) { | 505 | if (!list_empty(&event->group_entry)) |
449 | list_del_event(event, ctx); | 506 | list_del_event(event, ctx); |
450 | } | ||
451 | spin_unlock_irq(&ctx->lock); | 507 | spin_unlock_irq(&ctx->lock); |
452 | } | 508 | } |
453 | 509 | ||
454 | static inline u64 perf_clock(void) | ||
455 | { | ||
456 | return cpu_clock(smp_processor_id()); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Update the record of the current time in a context. | ||
461 | */ | ||
462 | static void update_context_time(struct perf_event_context *ctx) | ||
463 | { | ||
464 | u64 now = perf_clock(); | ||
465 | |||
466 | ctx->time += now - ctx->timestamp; | ||
467 | ctx->timestamp = now; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Update the total_time_enabled and total_time_running fields for a event. | ||
472 | */ | ||
473 | static void update_event_times(struct perf_event *event) | ||
474 | { | ||
475 | struct perf_event_context *ctx = event->ctx; | ||
476 | u64 run_end; | ||
477 | |||
478 | if (event->state < PERF_EVENT_STATE_INACTIVE || | ||
479 | event->group_leader->state < PERF_EVENT_STATE_INACTIVE) | ||
480 | return; | ||
481 | |||
482 | event->total_time_enabled = ctx->time - event->tstamp_enabled; | ||
483 | |||
484 | if (event->state == PERF_EVENT_STATE_INACTIVE) | ||
485 | run_end = event->tstamp_stopped; | ||
486 | else | ||
487 | run_end = ctx->time; | ||
488 | |||
489 | event->total_time_running = run_end - event->tstamp_running; | ||
490 | } | ||
491 | |||
492 | /* | 510 | /* |
493 | * Update total_time_enabled and total_time_running for all events in a group. | 511 | * Update total_time_enabled and total_time_running for all events in a group. |
494 | */ | 512 | */ |
@@ -1031,10 +1049,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, | |||
1031 | update_context_time(ctx); | 1049 | update_context_time(ctx); |
1032 | 1050 | ||
1033 | perf_disable(); | 1051 | perf_disable(); |
1034 | if (ctx->nr_active) | 1052 | if (ctx->nr_active) { |
1035 | list_for_each_entry(event, &ctx->group_list, group_entry) | 1053 | list_for_each_entry(event, &ctx->group_list, group_entry) |
1036 | group_sched_out(event, cpuctx, ctx); | 1054 | group_sched_out(event, cpuctx, ctx); |
1037 | 1055 | } | |
1038 | perf_enable(); | 1056 | perf_enable(); |
1039 | out: | 1057 | out: |
1040 | spin_unlock(&ctx->lock); | 1058 | spin_unlock(&ctx->lock); |
@@ -1059,8 +1077,6 @@ static int context_equiv(struct perf_event_context *ctx1, | |||
1059 | && !ctx1->pin_count && !ctx2->pin_count; | 1077 | && !ctx1->pin_count && !ctx2->pin_count; |
1060 | } | 1078 | } |
1061 | 1079 | ||
1062 | static void __perf_event_read(void *event); | ||
1063 | |||
1064 | static void __perf_event_sync_stat(struct perf_event *event, | 1080 | static void __perf_event_sync_stat(struct perf_event *event, |
1065 | struct perf_event *next_event) | 1081 | struct perf_event *next_event) |
1066 | { | 1082 | { |
@@ -1078,8 +1094,8 @@ static void __perf_event_sync_stat(struct perf_event *event, | |||
1078 | */ | 1094 | */ |
1079 | switch (event->state) { | 1095 | switch (event->state) { |
1080 | case PERF_EVENT_STATE_ACTIVE: | 1096 | case PERF_EVENT_STATE_ACTIVE: |
1081 | __perf_event_read(event); | 1097 | event->pmu->read(event); |
1082 | break; | 1098 | /* fall-through */ |
1083 | 1099 | ||
1084 | case PERF_EVENT_STATE_INACTIVE: | 1100 | case PERF_EVENT_STATE_INACTIVE: |
1085 | update_event_times(event); | 1101 | update_event_times(event); |
@@ -1118,6 +1134,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, | |||
1118 | if (!ctx->nr_stat) | 1134 | if (!ctx->nr_stat) |
1119 | return; | 1135 | return; |
1120 | 1136 | ||
1137 | update_context_time(ctx); | ||
1138 | |||
1121 | event = list_first_entry(&ctx->event_list, | 1139 | event = list_first_entry(&ctx->event_list, |
1122 | struct perf_event, event_entry); | 1140 | struct perf_event, event_entry); |
1123 | 1141 | ||
@@ -1161,8 +1179,6 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1161 | if (likely(!ctx || !cpuctx->task_ctx)) | 1179 | if (likely(!ctx || !cpuctx->task_ctx)) |
1162 | return; | 1180 | return; |
1163 | 1181 | ||
1164 | update_context_time(ctx); | ||
1165 | |||
1166 | rcu_read_lock(); | 1182 | rcu_read_lock(); |
1167 | parent = rcu_dereference(ctx->parent_ctx); | 1183 | parent = rcu_dereference(ctx->parent_ctx); |
1168 | next_ctx = next->perf_event_ctxp; | 1184 | next_ctx = next->perf_event_ctxp; |
@@ -1515,7 +1531,6 @@ static void __perf_event_read(void *info) | |||
1515 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1531 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
1516 | struct perf_event *event = info; | 1532 | struct perf_event *event = info; |
1517 | struct perf_event_context *ctx = event->ctx; | 1533 | struct perf_event_context *ctx = event->ctx; |
1518 | unsigned long flags; | ||
1519 | 1534 | ||
1520 | /* | 1535 | /* |
1521 | * If this is a task context, we need to check whether it is | 1536 | * If this is a task context, we need to check whether it is |
@@ -1527,12 +1542,12 @@ static void __perf_event_read(void *info) | |||
1527 | if (ctx->task && cpuctx->task_ctx != ctx) | 1542 | if (ctx->task && cpuctx->task_ctx != ctx) |
1528 | return; | 1543 | return; |
1529 | 1544 | ||
1530 | local_irq_save(flags); | 1545 | spin_lock(&ctx->lock); |
1531 | if (ctx->is_active) | 1546 | update_context_time(ctx); |
1532 | update_context_time(ctx); | ||
1533 | event->pmu->read(event); | ||
1534 | update_event_times(event); | 1547 | update_event_times(event); |
1535 | local_irq_restore(flags); | 1548 | spin_unlock(&ctx->lock); |
1549 | |||
1550 | event->pmu->read(event); | ||
1536 | } | 1551 | } |
1537 | 1552 | ||
1538 | static u64 perf_event_read(struct perf_event *event) | 1553 | static u64 perf_event_read(struct perf_event *event) |
@@ -1545,7 +1560,13 @@ static u64 perf_event_read(struct perf_event *event) | |||
1545 | smp_call_function_single(event->oncpu, | 1560 | smp_call_function_single(event->oncpu, |
1546 | __perf_event_read, event, 1); | 1561 | __perf_event_read, event, 1); |
1547 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 1562 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
1563 | struct perf_event_context *ctx = event->ctx; | ||
1564 | unsigned long flags; | ||
1565 | |||
1566 | spin_lock_irqsave(&ctx->lock, flags); | ||
1567 | update_context_time(ctx); | ||
1548 | update_event_times(event); | 1568 | update_event_times(event); |
1569 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
1549 | } | 1570 | } |
1550 | 1571 | ||
1551 | return atomic64_read(&event->count); | 1572 | return atomic64_read(&event->count); |
@@ -1658,6 +1679,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | |||
1658 | return ERR_PTR(err); | 1679 | return ERR_PTR(err); |
1659 | } | 1680 | } |
1660 | 1681 | ||
1682 | static void perf_event_free_filter(struct perf_event *event); | ||
1683 | |||
1661 | static void free_event_rcu(struct rcu_head *head) | 1684 | static void free_event_rcu(struct rcu_head *head) |
1662 | { | 1685 | { |
1663 | struct perf_event *event; | 1686 | struct perf_event *event; |
@@ -1665,6 +1688,7 @@ static void free_event_rcu(struct rcu_head *head) | |||
1665 | event = container_of(head, struct perf_event, rcu_head); | 1688 | event = container_of(head, struct perf_event, rcu_head); |
1666 | if (event->ns) | 1689 | if (event->ns) |
1667 | put_pid_ns(event->ns); | 1690 | put_pid_ns(event->ns); |
1691 | perf_event_free_filter(event); | ||
1668 | kfree(event); | 1692 | kfree(event); |
1669 | } | 1693 | } |
1670 | 1694 | ||
@@ -1696,16 +1720,10 @@ static void free_event(struct perf_event *event) | |||
1696 | call_rcu(&event->rcu_head, free_event_rcu); | 1720 | call_rcu(&event->rcu_head, free_event_rcu); |
1697 | } | 1721 | } |
1698 | 1722 | ||
1699 | /* | 1723 | int perf_event_release_kernel(struct perf_event *event) |
1700 | * Called when the last reference to the file is gone. | ||
1701 | */ | ||
1702 | static int perf_release(struct inode *inode, struct file *file) | ||
1703 | { | 1724 | { |
1704 | struct perf_event *event = file->private_data; | ||
1705 | struct perf_event_context *ctx = event->ctx; | 1725 | struct perf_event_context *ctx = event->ctx; |
1706 | 1726 | ||
1707 | file->private_data = NULL; | ||
1708 | |||
1709 | WARN_ON_ONCE(ctx->parent_ctx); | 1727 | WARN_ON_ONCE(ctx->parent_ctx); |
1710 | mutex_lock(&ctx->mutex); | 1728 | mutex_lock(&ctx->mutex); |
1711 | perf_event_remove_from_context(event); | 1729 | perf_event_remove_from_context(event); |
@@ -1720,6 +1738,19 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1720 | 1738 | ||
1721 | return 0; | 1739 | return 0; |
1722 | } | 1740 | } |
1741 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); | ||
1742 | |||
1743 | /* | ||
1744 | * Called when the last reference to the file is gone. | ||
1745 | */ | ||
1746 | static int perf_release(struct inode *inode, struct file *file) | ||
1747 | { | ||
1748 | struct perf_event *event = file->private_data; | ||
1749 | |||
1750 | file->private_data = NULL; | ||
1751 | |||
1752 | return perf_event_release_kernel(event); | ||
1753 | } | ||
1723 | 1754 | ||
1724 | static int perf_event_read_size(struct perf_event *event) | 1755 | static int perf_event_read_size(struct perf_event *event) |
1725 | { | 1756 | { |
@@ -1746,91 +1777,94 @@ static int perf_event_read_size(struct perf_event *event) | |||
1746 | return size; | 1777 | return size; |
1747 | } | 1778 | } |
1748 | 1779 | ||
1749 | static u64 perf_event_read_value(struct perf_event *event) | 1780 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
1750 | { | 1781 | { |
1751 | struct perf_event *child; | 1782 | struct perf_event *child; |
1752 | u64 total = 0; | 1783 | u64 total = 0; |
1753 | 1784 | ||
1785 | *enabled = 0; | ||
1786 | *running = 0; | ||
1787 | |||
1788 | mutex_lock(&event->child_mutex); | ||
1754 | total += perf_event_read(event); | 1789 | total += perf_event_read(event); |
1755 | list_for_each_entry(child, &event->child_list, child_list) | 1790 | *enabled += event->total_time_enabled + |
1791 | atomic64_read(&event->child_total_time_enabled); | ||
1792 | *running += event->total_time_running + | ||
1793 | atomic64_read(&event->child_total_time_running); | ||
1794 | |||
1795 | list_for_each_entry(child, &event->child_list, child_list) { | ||
1756 | total += perf_event_read(child); | 1796 | total += perf_event_read(child); |
1797 | *enabled += child->total_time_enabled; | ||
1798 | *running += child->total_time_running; | ||
1799 | } | ||
1800 | mutex_unlock(&event->child_mutex); | ||
1757 | 1801 | ||
1758 | return total; | 1802 | return total; |
1759 | } | 1803 | } |
1760 | 1804 | EXPORT_SYMBOL_GPL(perf_event_read_value); | |
1761 | static int perf_event_read_entry(struct perf_event *event, | ||
1762 | u64 read_format, char __user *buf) | ||
1763 | { | ||
1764 | int n = 0, count = 0; | ||
1765 | u64 values[2]; | ||
1766 | |||
1767 | values[n++] = perf_event_read_value(event); | ||
1768 | if (read_format & PERF_FORMAT_ID) | ||
1769 | values[n++] = primary_event_id(event); | ||
1770 | |||
1771 | count = n * sizeof(u64); | ||
1772 | |||
1773 | if (copy_to_user(buf, values, count)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | return count; | ||
1777 | } | ||
1778 | 1805 | ||
1779 | static int perf_event_read_group(struct perf_event *event, | 1806 | static int perf_event_read_group(struct perf_event *event, |
1780 | u64 read_format, char __user *buf) | 1807 | u64 read_format, char __user *buf) |
1781 | { | 1808 | { |
1782 | struct perf_event *leader = event->group_leader, *sub; | 1809 | struct perf_event *leader = event->group_leader, *sub; |
1783 | int n = 0, size = 0, err = -EFAULT; | 1810 | int n = 0, size = 0, ret = -EFAULT; |
1784 | u64 values[3]; | 1811 | struct perf_event_context *ctx = leader->ctx; |
1812 | u64 values[5]; | ||
1813 | u64 count, enabled, running; | ||
1814 | |||
1815 | mutex_lock(&ctx->mutex); | ||
1816 | count = perf_event_read_value(leader, &enabled, &running); | ||
1785 | 1817 | ||
1786 | values[n++] = 1 + leader->nr_siblings; | 1818 | values[n++] = 1 + leader->nr_siblings; |
1787 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 1819 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1788 | values[n++] = leader->total_time_enabled + | 1820 | values[n++] = enabled; |
1789 | atomic64_read(&leader->child_total_time_enabled); | 1821 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
1790 | } | 1822 | values[n++] = running; |
1791 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 1823 | values[n++] = count; |
1792 | values[n++] = leader->total_time_running + | 1824 | if (read_format & PERF_FORMAT_ID) |
1793 | atomic64_read(&leader->child_total_time_running); | 1825 | values[n++] = primary_event_id(leader); |
1794 | } | ||
1795 | 1826 | ||
1796 | size = n * sizeof(u64); | 1827 | size = n * sizeof(u64); |
1797 | 1828 | ||
1798 | if (copy_to_user(buf, values, size)) | 1829 | if (copy_to_user(buf, values, size)) |
1799 | return -EFAULT; | 1830 | goto unlock; |
1800 | |||
1801 | err = perf_event_read_entry(leader, read_format, buf + size); | ||
1802 | if (err < 0) | ||
1803 | return err; | ||
1804 | 1831 | ||
1805 | size += err; | 1832 | ret = size; |
1806 | 1833 | ||
1807 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 1834 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { |
1808 | err = perf_event_read_entry(sub, read_format, | 1835 | n = 0; |
1809 | buf + size); | 1836 | |
1810 | if (err < 0) | 1837 | values[n++] = perf_event_read_value(sub, &enabled, &running); |
1811 | return err; | 1838 | if (read_format & PERF_FORMAT_ID) |
1839 | values[n++] = primary_event_id(sub); | ||
1840 | |||
1841 | size = n * sizeof(u64); | ||
1812 | 1842 | ||
1813 | size += err; | 1843 | if (copy_to_user(buf + ret, values, size)) { |
1844 | ret = -EFAULT; | ||
1845 | goto unlock; | ||
1846 | } | ||
1847 | |||
1848 | ret += size; | ||
1814 | } | 1849 | } |
1850 | unlock: | ||
1851 | mutex_unlock(&ctx->mutex); | ||
1815 | 1852 | ||
1816 | return size; | 1853 | return ret; |
1817 | } | 1854 | } |
1818 | 1855 | ||
1819 | static int perf_event_read_one(struct perf_event *event, | 1856 | static int perf_event_read_one(struct perf_event *event, |
1820 | u64 read_format, char __user *buf) | 1857 | u64 read_format, char __user *buf) |
1821 | { | 1858 | { |
1859 | u64 enabled, running; | ||
1822 | u64 values[4]; | 1860 | u64 values[4]; |
1823 | int n = 0; | 1861 | int n = 0; |
1824 | 1862 | ||
1825 | values[n++] = perf_event_read_value(event); | 1863 | values[n++] = perf_event_read_value(event, &enabled, &running); |
1826 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 1864 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1827 | values[n++] = event->total_time_enabled + | 1865 | values[n++] = enabled; |
1828 | atomic64_read(&event->child_total_time_enabled); | 1866 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
1829 | } | 1867 | values[n++] = running; |
1830 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1831 | values[n++] = event->total_time_running + | ||
1832 | atomic64_read(&event->child_total_time_running); | ||
1833 | } | ||
1834 | if (read_format & PERF_FORMAT_ID) | 1868 | if (read_format & PERF_FORMAT_ID) |
1835 | values[n++] = primary_event_id(event); | 1869 | values[n++] = primary_event_id(event); |
1836 | 1870 | ||
@@ -1861,12 +1895,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | |||
1861 | return -ENOSPC; | 1895 | return -ENOSPC; |
1862 | 1896 | ||
1863 | WARN_ON_ONCE(event->ctx->parent_ctx); | 1897 | WARN_ON_ONCE(event->ctx->parent_ctx); |
1864 | mutex_lock(&event->child_mutex); | ||
1865 | if (read_format & PERF_FORMAT_GROUP) | 1898 | if (read_format & PERF_FORMAT_GROUP) |
1866 | ret = perf_event_read_group(event, read_format, buf); | 1899 | ret = perf_event_read_group(event, read_format, buf); |
1867 | else | 1900 | else |
1868 | ret = perf_event_read_one(event, read_format, buf); | 1901 | ret = perf_event_read_one(event, read_format, buf); |
1869 | mutex_unlock(&event->child_mutex); | ||
1870 | 1902 | ||
1871 | return ret; | 1903 | return ret; |
1872 | } | 1904 | } |
@@ -1974,7 +2006,8 @@ unlock: | |||
1974 | return ret; | 2006 | return ret; |
1975 | } | 2007 | } |
1976 | 2008 | ||
1977 | int perf_event_set_output(struct perf_event *event, int output_fd); | 2009 | static int perf_event_set_output(struct perf_event *event, int output_fd); |
2010 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); | ||
1978 | 2011 | ||
1979 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2012 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
1980 | { | 2013 | { |
@@ -2002,6 +2035,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2002 | case PERF_EVENT_IOC_SET_OUTPUT: | 2035 | case PERF_EVENT_IOC_SET_OUTPUT: |
2003 | return perf_event_set_output(event, arg); | 2036 | return perf_event_set_output(event, arg); |
2004 | 2037 | ||
2038 | case PERF_EVENT_IOC_SET_FILTER: | ||
2039 | return perf_event_set_filter(event, (void __user *)arg); | ||
2040 | |||
2005 | default: | 2041 | default: |
2006 | return -ENOTTY; | 2042 | return -ENOTTY; |
2007 | } | 2043 | } |
@@ -2174,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) | |||
2174 | perf_mmap_free_page((unsigned long)data->user_page); | 2210 | perf_mmap_free_page((unsigned long)data->user_page); |
2175 | for (i = 0; i < data->nr_pages; i++) | 2211 | for (i = 0; i < data->nr_pages; i++) |
2176 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 2212 | perf_mmap_free_page((unsigned long)data->data_pages[i]); |
2213 | kfree(data); | ||
2177 | } | 2214 | } |
2178 | 2215 | ||
2179 | #else | 2216 | #else |
@@ -2214,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work) | |||
2214 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 2251 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
2215 | 2252 | ||
2216 | vfree(base); | 2253 | vfree(base); |
2254 | kfree(data); | ||
2217 | } | 2255 | } |
2218 | 2256 | ||
2219 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2257 | static void perf_mmap_data_free(struct perf_mmap_data *data) |
@@ -2307,7 +2345,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | |||
2307 | } | 2345 | } |
2308 | 2346 | ||
2309 | if (!data->watermark) | 2347 | if (!data->watermark) |
2310 | data->watermark = max_t(long, PAGE_SIZE, max_size / 2); | 2348 | data->watermark = max_size / 2; |
2311 | 2349 | ||
2312 | 2350 | ||
2313 | rcu_assign_pointer(event->data, data); | 2351 | rcu_assign_pointer(event->data, data); |
@@ -2319,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | |||
2319 | 2357 | ||
2320 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | 2358 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); |
2321 | perf_mmap_data_free(data); | 2359 | perf_mmap_data_free(data); |
2322 | kfree(data); | ||
2323 | } | 2360 | } |
2324 | 2361 | ||
2325 | static void perf_mmap_data_release(struct perf_event *event) | 2362 | static void perf_mmap_data_release(struct perf_event *event) |
@@ -2666,20 +2703,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
2666 | static void perf_output_lock(struct perf_output_handle *handle) | 2703 | static void perf_output_lock(struct perf_output_handle *handle) |
2667 | { | 2704 | { |
2668 | struct perf_mmap_data *data = handle->data; | 2705 | struct perf_mmap_data *data = handle->data; |
2669 | int cpu; | 2706 | int cur, cpu = get_cpu(); |
2670 | 2707 | ||
2671 | handle->locked = 0; | 2708 | handle->locked = 0; |
2672 | 2709 | ||
2673 | local_irq_save(handle->flags); | 2710 | for (;;) { |
2674 | cpu = smp_processor_id(); | 2711 | cur = atomic_cmpxchg(&data->lock, -1, cpu); |
2675 | 2712 | if (cur == -1) { | |
2676 | if (in_nmi() && atomic_read(&data->lock) == cpu) | 2713 | handle->locked = 1; |
2677 | return; | 2714 | break; |
2715 | } | ||
2716 | if (cur == cpu) | ||
2717 | break; | ||
2678 | 2718 | ||
2679 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2680 | cpu_relax(); | 2719 | cpu_relax(); |
2681 | 2720 | } | |
2682 | handle->locked = 1; | ||
2683 | } | 2721 | } |
2684 | 2722 | ||
2685 | static void perf_output_unlock(struct perf_output_handle *handle) | 2723 | static void perf_output_unlock(struct perf_output_handle *handle) |
@@ -2725,7 +2763,7 @@ again: | |||
2725 | if (atomic_xchg(&data->wakeup, 0)) | 2763 | if (atomic_xchg(&data->wakeup, 0)) |
2726 | perf_output_wakeup(handle); | 2764 | perf_output_wakeup(handle); |
2727 | out: | 2765 | out: |
2728 | local_irq_restore(handle->flags); | 2766 | put_cpu(); |
2729 | } | 2767 | } |
2730 | 2768 | ||
2731 | void perf_output_copy(struct perf_output_handle *handle, | 2769 | void perf_output_copy(struct perf_output_handle *handle, |
@@ -3236,15 +3274,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, | |||
3236 | { | 3274 | { |
3237 | struct perf_event *event; | 3275 | struct perf_event *event; |
3238 | 3276 | ||
3239 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3240 | return; | ||
3241 | |||
3242 | rcu_read_lock(); | ||
3243 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3277 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3244 | if (perf_event_task_match(event)) | 3278 | if (perf_event_task_match(event)) |
3245 | perf_event_task_output(event, task_event); | 3279 | perf_event_task_output(event, task_event); |
3246 | } | 3280 | } |
3247 | rcu_read_unlock(); | ||
3248 | } | 3281 | } |
3249 | 3282 | ||
3250 | static void perf_event_task_event(struct perf_task_event *task_event) | 3283 | static void perf_event_task_event(struct perf_task_event *task_event) |
@@ -3252,11 +3285,11 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
3252 | struct perf_cpu_context *cpuctx; | 3285 | struct perf_cpu_context *cpuctx; |
3253 | struct perf_event_context *ctx = task_event->task_ctx; | 3286 | struct perf_event_context *ctx = task_event->task_ctx; |
3254 | 3287 | ||
3288 | rcu_read_lock(); | ||
3255 | cpuctx = &get_cpu_var(perf_cpu_context); | 3289 | cpuctx = &get_cpu_var(perf_cpu_context); |
3256 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3290 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
3257 | put_cpu_var(perf_cpu_context); | 3291 | put_cpu_var(perf_cpu_context); |
3258 | 3292 | ||
3259 | rcu_read_lock(); | ||
3260 | if (!ctx) | 3293 | if (!ctx) |
3261 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | 3294 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); |
3262 | if (ctx) | 3295 | if (ctx) |
@@ -3348,15 +3381,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx, | |||
3348 | { | 3381 | { |
3349 | struct perf_event *event; | 3382 | struct perf_event *event; |
3350 | 3383 | ||
3351 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3352 | return; | ||
3353 | |||
3354 | rcu_read_lock(); | ||
3355 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3384 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3356 | if (perf_event_comm_match(event)) | 3385 | if (perf_event_comm_match(event)) |
3357 | perf_event_comm_output(event, comm_event); | 3386 | perf_event_comm_output(event, comm_event); |
3358 | } | 3387 | } |
3359 | rcu_read_unlock(); | ||
3360 | } | 3388 | } |
3361 | 3389 | ||
3362 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | 3390 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
@@ -3367,7 +3395,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
3367 | char comm[TASK_COMM_LEN]; | 3395 | char comm[TASK_COMM_LEN]; |
3368 | 3396 | ||
3369 | memset(comm, 0, sizeof(comm)); | 3397 | memset(comm, 0, sizeof(comm)); |
3370 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | 3398 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
3371 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 3399 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
3372 | 3400 | ||
3373 | comm_event->comm = comm; | 3401 | comm_event->comm = comm; |
@@ -3375,11 +3403,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
3375 | 3403 | ||
3376 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 3404 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
3377 | 3405 | ||
3406 | rcu_read_lock(); | ||
3378 | cpuctx = &get_cpu_var(perf_cpu_context); | 3407 | cpuctx = &get_cpu_var(perf_cpu_context); |
3379 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 3408 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); |
3380 | put_cpu_var(perf_cpu_context); | 3409 | put_cpu_var(perf_cpu_context); |
3381 | 3410 | ||
3382 | rcu_read_lock(); | ||
3383 | /* | 3411 | /* |
3384 | * doesn't really matter which of the child contexts the | 3412 | * doesn't really matter which of the child contexts the |
3385 | * events ends up in. | 3413 | * events ends up in. |
@@ -3472,15 +3500,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx, | |||
3472 | { | 3500 | { |
3473 | struct perf_event *event; | 3501 | struct perf_event *event; |
3474 | 3502 | ||
3475 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3476 | return; | ||
3477 | |||
3478 | rcu_read_lock(); | ||
3479 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3503 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3480 | if (perf_event_mmap_match(event, mmap_event)) | 3504 | if (perf_event_mmap_match(event, mmap_event)) |
3481 | perf_event_mmap_output(event, mmap_event); | 3505 | perf_event_mmap_output(event, mmap_event); |
3482 | } | 3506 | } |
3483 | rcu_read_unlock(); | ||
3484 | } | 3507 | } |
3485 | 3508 | ||
3486 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | 3509 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
@@ -3536,11 +3559,11 @@ got_name: | |||
3536 | 3559 | ||
3537 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 3560 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
3538 | 3561 | ||
3562 | rcu_read_lock(); | ||
3539 | cpuctx = &get_cpu_var(perf_cpu_context); | 3563 | cpuctx = &get_cpu_var(perf_cpu_context); |
3540 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | 3564 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); |
3541 | put_cpu_var(perf_cpu_context); | 3565 | put_cpu_var(perf_cpu_context); |
3542 | 3566 | ||
3543 | rcu_read_lock(); | ||
3544 | /* | 3567 | /* |
3545 | * doesn't really matter which of the child contexts the | 3568 | * doesn't really matter which of the child contexts the |
3546 | * events ends up in. | 3569 | * events ends up in. |
@@ -3679,7 +3702,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
3679 | perf_event_disable(event); | 3702 | perf_event_disable(event); |
3680 | } | 3703 | } |
3681 | 3704 | ||
3682 | perf_event_output(event, nmi, data, regs); | 3705 | if (event->overflow_handler) |
3706 | event->overflow_handler(event, nmi, data, regs); | ||
3707 | else | ||
3708 | perf_event_output(event, nmi, data, regs); | ||
3709 | |||
3683 | return ret; | 3710 | return ret; |
3684 | } | 3711 | } |
3685 | 3712 | ||
@@ -3724,16 +3751,16 @@ again: | |||
3724 | return nr; | 3751 | return nr; |
3725 | } | 3752 | } |
3726 | 3753 | ||
3727 | static void perf_swevent_overflow(struct perf_event *event, | 3754 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
3728 | int nmi, struct perf_sample_data *data, | 3755 | int nmi, struct perf_sample_data *data, |
3729 | struct pt_regs *regs) | 3756 | struct pt_regs *regs) |
3730 | { | 3757 | { |
3731 | struct hw_perf_event *hwc = &event->hw; | 3758 | struct hw_perf_event *hwc = &event->hw; |
3732 | int throttle = 0; | 3759 | int throttle = 0; |
3733 | u64 overflow; | ||
3734 | 3760 | ||
3735 | data->period = event->hw.last_period; | 3761 | data->period = event->hw.last_period; |
3736 | overflow = perf_swevent_set_period(event); | 3762 | if (!overflow) |
3763 | overflow = perf_swevent_set_period(event); | ||
3737 | 3764 | ||
3738 | if (hwc->interrupts == MAX_INTERRUPTS) | 3765 | if (hwc->interrupts == MAX_INTERRUPTS) |
3739 | return; | 3766 | return; |
@@ -3766,14 +3793,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
3766 | 3793 | ||
3767 | atomic64_add(nr, &event->count); | 3794 | atomic64_add(nr, &event->count); |
3768 | 3795 | ||
3796 | if (!regs) | ||
3797 | return; | ||
3798 | |||
3769 | if (!hwc->sample_period) | 3799 | if (!hwc->sample_period) |
3770 | return; | 3800 | return; |
3771 | 3801 | ||
3772 | if (!regs) | 3802 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
3803 | return perf_swevent_overflow(event, 1, nmi, data, regs); | ||
3804 | |||
3805 | if (atomic64_add_negative(nr, &hwc->period_left)) | ||
3773 | return; | 3806 | return; |
3774 | 3807 | ||
3775 | if (!atomic64_add_negative(nr, &hwc->period_left)) | 3808 | perf_swevent_overflow(event, 0, nmi, data, regs); |
3776 | perf_swevent_overflow(event, nmi, data, regs); | ||
3777 | } | 3809 | } |
3778 | 3810 | ||
3779 | static int perf_swevent_is_counting(struct perf_event *event) | 3811 | static int perf_swevent_is_counting(struct perf_event *event) |
@@ -3806,25 +3838,44 @@ static int perf_swevent_is_counting(struct perf_event *event) | |||
3806 | return 1; | 3838 | return 1; |
3807 | } | 3839 | } |
3808 | 3840 | ||
3841 | static int perf_tp_event_match(struct perf_event *event, | ||
3842 | struct perf_sample_data *data); | ||
3843 | |||
3844 | static int perf_exclude_event(struct perf_event *event, | ||
3845 | struct pt_regs *regs) | ||
3846 | { | ||
3847 | if (regs) { | ||
3848 | if (event->attr.exclude_user && user_mode(regs)) | ||
3849 | return 1; | ||
3850 | |||
3851 | if (event->attr.exclude_kernel && !user_mode(regs)) | ||
3852 | return 1; | ||
3853 | } | ||
3854 | |||
3855 | return 0; | ||
3856 | } | ||
3857 | |||
3809 | static int perf_swevent_match(struct perf_event *event, | 3858 | static int perf_swevent_match(struct perf_event *event, |
3810 | enum perf_type_id type, | 3859 | enum perf_type_id type, |
3811 | u32 event_id, struct pt_regs *regs) | 3860 | u32 event_id, |
3861 | struct perf_sample_data *data, | ||
3862 | struct pt_regs *regs) | ||
3812 | { | 3863 | { |
3813 | if (!perf_swevent_is_counting(event)) | 3864 | if (!perf_swevent_is_counting(event)) |
3814 | return 0; | 3865 | return 0; |
3815 | 3866 | ||
3816 | if (event->attr.type != type) | 3867 | if (event->attr.type != type) |
3817 | return 0; | 3868 | return 0; |
3869 | |||
3818 | if (event->attr.config != event_id) | 3870 | if (event->attr.config != event_id) |
3819 | return 0; | 3871 | return 0; |
3820 | 3872 | ||
3821 | if (regs) { | 3873 | if (perf_exclude_event(event, regs)) |
3822 | if (event->attr.exclude_user && user_mode(regs)) | 3874 | return 0; |
3823 | return 0; | ||
3824 | 3875 | ||
3825 | if (event->attr.exclude_kernel && !user_mode(regs)) | 3876 | if (event->attr.type == PERF_TYPE_TRACEPOINT && |
3826 | return 0; | 3877 | !perf_tp_event_match(event, data)) |
3827 | } | 3878 | return 0; |
3828 | 3879 | ||
3829 | return 1; | 3880 | return 1; |
3830 | } | 3881 | } |
@@ -3837,49 +3888,59 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, | |||
3837 | { | 3888 | { |
3838 | struct perf_event *event; | 3889 | struct perf_event *event; |
3839 | 3890 | ||
3840 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3841 | return; | ||
3842 | |||
3843 | rcu_read_lock(); | ||
3844 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3891 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3845 | if (perf_swevent_match(event, type, event_id, regs)) | 3892 | if (perf_swevent_match(event, type, event_id, data, regs)) |
3846 | perf_swevent_add(event, nr, nmi, data, regs); | 3893 | perf_swevent_add(event, nr, nmi, data, regs); |
3847 | } | 3894 | } |
3848 | rcu_read_unlock(); | ||
3849 | } | 3895 | } |
3850 | 3896 | ||
3851 | static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) | 3897 | int perf_swevent_get_recursion_context(void) |
3852 | { | 3898 | { |
3899 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | ||
3900 | int rctx; | ||
3901 | |||
3853 | if (in_nmi()) | 3902 | if (in_nmi()) |
3854 | return &cpuctx->recursion[3]; | 3903 | rctx = 3; |
3904 | else if (in_irq()) | ||
3905 | rctx = 2; | ||
3906 | else if (in_softirq()) | ||
3907 | rctx = 1; | ||
3908 | else | ||
3909 | rctx = 0; | ||
3910 | |||
3911 | if (cpuctx->recursion[rctx]) { | ||
3912 | put_cpu_var(perf_cpu_context); | ||
3913 | return -1; | ||
3914 | } | ||
3855 | 3915 | ||
3856 | if (in_irq()) | 3916 | cpuctx->recursion[rctx]++; |
3857 | return &cpuctx->recursion[2]; | 3917 | barrier(); |
3858 | 3918 | ||
3859 | if (in_softirq()) | 3919 | return rctx; |
3860 | return &cpuctx->recursion[1]; | 3920 | } |
3921 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | ||
3861 | 3922 | ||
3862 | return &cpuctx->recursion[0]; | 3923 | void perf_swevent_put_recursion_context(int rctx) |
3924 | { | ||
3925 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
3926 | barrier(); | ||
3927 | cpuctx->recursion[rctx]--; | ||
3928 | put_cpu_var(perf_cpu_context); | ||
3863 | } | 3929 | } |
3930 | EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); | ||
3864 | 3931 | ||
3865 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | 3932 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
3866 | u64 nr, int nmi, | 3933 | u64 nr, int nmi, |
3867 | struct perf_sample_data *data, | 3934 | struct perf_sample_data *data, |
3868 | struct pt_regs *regs) | 3935 | struct pt_regs *regs) |
3869 | { | 3936 | { |
3870 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 3937 | struct perf_cpu_context *cpuctx; |
3871 | int *recursion = perf_swevent_recursion_context(cpuctx); | ||
3872 | struct perf_event_context *ctx; | 3938 | struct perf_event_context *ctx; |
3873 | 3939 | ||
3874 | if (*recursion) | 3940 | cpuctx = &__get_cpu_var(perf_cpu_context); |
3875 | goto out; | 3941 | rcu_read_lock(); |
3876 | |||
3877 | (*recursion)++; | ||
3878 | barrier(); | ||
3879 | |||
3880 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, | 3942 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, |
3881 | nr, nmi, data, regs); | 3943 | nr, nmi, data, regs); |
3882 | rcu_read_lock(); | ||
3883 | /* | 3944 | /* |
3884 | * doesn't really matter which of the child contexts the | 3945 | * doesn't really matter which of the child contexts the |
3885 | * events ends up in. | 3946 | * events ends up in. |
@@ -3888,23 +3949,24 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | |||
3888 | if (ctx) | 3949 | if (ctx) |
3889 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); | 3950 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); |
3890 | rcu_read_unlock(); | 3951 | rcu_read_unlock(); |
3891 | |||
3892 | barrier(); | ||
3893 | (*recursion)--; | ||
3894 | |||
3895 | out: | ||
3896 | put_cpu_var(perf_cpu_context); | ||
3897 | } | 3952 | } |
3898 | 3953 | ||
3899 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 3954 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
3900 | struct pt_regs *regs, u64 addr) | 3955 | struct pt_regs *regs, u64 addr) |
3901 | { | 3956 | { |
3902 | struct perf_sample_data data = { | 3957 | struct perf_sample_data data; |
3903 | .addr = addr, | 3958 | int rctx; |
3904 | }; | ||
3905 | 3959 | ||
3906 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, | 3960 | rctx = perf_swevent_get_recursion_context(); |
3907 | &data, regs); | 3961 | if (rctx < 0) |
3962 | return; | ||
3963 | |||
3964 | data.addr = addr; | ||
3965 | data.raw = NULL; | ||
3966 | |||
3967 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); | ||
3968 | |||
3969 | perf_swevent_put_recursion_context(rctx); | ||
3908 | } | 3970 | } |
3909 | 3971 | ||
3910 | static void perf_swevent_read(struct perf_event *event) | 3972 | static void perf_swevent_read(struct perf_event *event) |
@@ -3949,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3949 | event->pmu->read(event); | 4011 | event->pmu->read(event); |
3950 | 4012 | ||
3951 | data.addr = 0; | 4013 | data.addr = 0; |
4014 | data.period = event->hw.last_period; | ||
3952 | regs = get_irq_regs(); | 4015 | regs = get_irq_regs(); |
3953 | /* | 4016 | /* |
3954 | * In case we exclude kernel IPs or are somehow not in interrupt | 4017 | * In case we exclude kernel IPs or are somehow not in interrupt |
@@ -4108,6 +4171,7 @@ static const struct pmu perf_ops_task_clock = { | |||
4108 | }; | 4171 | }; |
4109 | 4172 | ||
4110 | #ifdef CONFIG_EVENT_PROFILE | 4173 | #ifdef CONFIG_EVENT_PROFILE |
4174 | |||
4111 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | 4175 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
4112 | int entry_size) | 4176 | int entry_size) |
4113 | { | 4177 | { |
@@ -4126,13 +4190,21 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | |||
4126 | if (!regs) | 4190 | if (!regs) |
4127 | regs = task_pt_regs(current); | 4191 | regs = task_pt_regs(current); |
4128 | 4192 | ||
4193 | /* Trace events already protected against recursion */ | ||
4129 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4194 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
4130 | &data, regs); | 4195 | &data, regs); |
4131 | } | 4196 | } |
4132 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4197 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4133 | 4198 | ||
4134 | extern int ftrace_profile_enable(int); | 4199 | static int perf_tp_event_match(struct perf_event *event, |
4135 | extern void ftrace_profile_disable(int); | 4200 | struct perf_sample_data *data) |
4201 | { | ||
4202 | void *record = data->raw->data; | ||
4203 | |||
4204 | if (likely(!event->filter) || filter_match_preds(event->filter, record)) | ||
4205 | return 1; | ||
4206 | return 0; | ||
4207 | } | ||
4136 | 4208 | ||
4137 | static void tp_perf_event_destroy(struct perf_event *event) | 4209 | static void tp_perf_event_destroy(struct perf_event *event) |
4138 | { | 4210 | { |
@@ -4157,11 +4229,99 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
4157 | 4229 | ||
4158 | return &perf_ops_generic; | 4230 | return &perf_ops_generic; |
4159 | } | 4231 | } |
4232 | |||
4233 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | ||
4234 | { | ||
4235 | char *filter_str; | ||
4236 | int ret; | ||
4237 | |||
4238 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | ||
4239 | return -EINVAL; | ||
4240 | |||
4241 | filter_str = strndup_user(arg, PAGE_SIZE); | ||
4242 | if (IS_ERR(filter_str)) | ||
4243 | return PTR_ERR(filter_str); | ||
4244 | |||
4245 | ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); | ||
4246 | |||
4247 | kfree(filter_str); | ||
4248 | return ret; | ||
4249 | } | ||
4250 | |||
4251 | static void perf_event_free_filter(struct perf_event *event) | ||
4252 | { | ||
4253 | ftrace_profile_free_filter(event); | ||
4254 | } | ||
4255 | |||
4160 | #else | 4256 | #else |
4257 | |||
4258 | static int perf_tp_event_match(struct perf_event *event, | ||
4259 | struct perf_sample_data *data) | ||
4260 | { | ||
4261 | return 1; | ||
4262 | } | ||
4263 | |||
4161 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4264 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
4162 | { | 4265 | { |
4163 | return NULL; | 4266 | return NULL; |
4164 | } | 4267 | } |
4268 | |||
4269 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | ||
4270 | { | ||
4271 | return -ENOENT; | ||
4272 | } | ||
4273 | |||
4274 | static void perf_event_free_filter(struct perf_event *event) | ||
4275 | { | ||
4276 | } | ||
4277 | |||
4278 | #endif /* CONFIG_EVENT_PROFILE */ | ||
4279 | |||
4280 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
4281 | static void bp_perf_event_destroy(struct perf_event *event) | ||
4282 | { | ||
4283 | release_bp_slot(event); | ||
4284 | } | ||
4285 | |||
4286 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | ||
4287 | { | ||
4288 | int err; | ||
4289 | /* | ||
4290 | * The breakpoint is already filled if we haven't created the counter | ||
4291 | * through perf syscall | ||
4292 | * FIXME: manage to get trigerred to NULL if it comes from syscalls | ||
4293 | */ | ||
4294 | if (!bp->callback) | ||
4295 | err = register_perf_hw_breakpoint(bp); | ||
4296 | else | ||
4297 | err = __register_perf_hw_breakpoint(bp); | ||
4298 | if (err) | ||
4299 | return ERR_PTR(err); | ||
4300 | |||
4301 | bp->destroy = bp_perf_event_destroy; | ||
4302 | |||
4303 | return &perf_ops_bp; | ||
4304 | } | ||
4305 | |||
4306 | void perf_bp_event(struct perf_event *bp, void *data) | ||
4307 | { | ||
4308 | struct perf_sample_data sample; | ||
4309 | struct pt_regs *regs = data; | ||
4310 | |||
4311 | sample.addr = bp->attr.bp_addr; | ||
4312 | |||
4313 | if (!perf_exclude_event(bp, regs)) | ||
4314 | perf_swevent_add(bp, 1, 1, &sample, regs); | ||
4315 | } | ||
4316 | #else | ||
4317 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | ||
4318 | { | ||
4319 | return NULL; | ||
4320 | } | ||
4321 | |||
4322 | void perf_bp_event(struct perf_event *bp, void *regs) | ||
4323 | { | ||
4324 | } | ||
4165 | #endif | 4325 | #endif |
4166 | 4326 | ||
4167 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4327 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -4208,6 +4368,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event) | |||
4208 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 4368 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: |
4209 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 4369 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
4210 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 4370 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
4371 | case PERF_COUNT_SW_ALIGNMENT_FAULTS: | ||
4372 | case PERF_COUNT_SW_EMULATION_FAULTS: | ||
4211 | if (!event->parent) { | 4373 | if (!event->parent) { |
4212 | atomic_inc(&perf_swevent_enabled[event_id]); | 4374 | atomic_inc(&perf_swevent_enabled[event_id]); |
4213 | event->destroy = sw_perf_event_destroy; | 4375 | event->destroy = sw_perf_event_destroy; |
@@ -4228,6 +4390,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4228 | struct perf_event_context *ctx, | 4390 | struct perf_event_context *ctx, |
4229 | struct perf_event *group_leader, | 4391 | struct perf_event *group_leader, |
4230 | struct perf_event *parent_event, | 4392 | struct perf_event *parent_event, |
4393 | perf_callback_t callback, | ||
4231 | gfp_t gfpflags) | 4394 | gfp_t gfpflags) |
4232 | { | 4395 | { |
4233 | const struct pmu *pmu; | 4396 | const struct pmu *pmu; |
@@ -4270,6 +4433,11 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4270 | 4433 | ||
4271 | event->state = PERF_EVENT_STATE_INACTIVE; | 4434 | event->state = PERF_EVENT_STATE_INACTIVE; |
4272 | 4435 | ||
4436 | if (!callback && parent_event) | ||
4437 | callback = parent_event->callback; | ||
4438 | |||
4439 | event->callback = callback; | ||
4440 | |||
4273 | if (attr->disabled) | 4441 | if (attr->disabled) |
4274 | event->state = PERF_EVENT_STATE_OFF; | 4442 | event->state = PERF_EVENT_STATE_OFF; |
4275 | 4443 | ||
@@ -4304,6 +4472,11 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4304 | pmu = tp_perf_event_init(event); | 4472 | pmu = tp_perf_event_init(event); |
4305 | break; | 4473 | break; |
4306 | 4474 | ||
4475 | case PERF_TYPE_BREAKPOINT: | ||
4476 | pmu = bp_perf_event_init(event); | ||
4477 | break; | ||
4478 | |||
4479 | |||
4307 | default: | 4480 | default: |
4308 | break; | 4481 | break; |
4309 | } | 4482 | } |
@@ -4416,7 +4589,7 @@ err_size: | |||
4416 | goto out; | 4589 | goto out; |
4417 | } | 4590 | } |
4418 | 4591 | ||
4419 | int perf_event_set_output(struct perf_event *event, int output_fd) | 4592 | static int perf_event_set_output(struct perf_event *event, int output_fd) |
4420 | { | 4593 | { |
4421 | struct perf_event *output_event = NULL; | 4594 | struct perf_event *output_event = NULL; |
4422 | struct file *output_file = NULL; | 4595 | struct file *output_file = NULL; |
@@ -4546,7 +4719,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
4546 | } | 4719 | } |
4547 | 4720 | ||
4548 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, | 4721 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, |
4549 | NULL, GFP_KERNEL); | 4722 | NULL, NULL, GFP_KERNEL); |
4550 | err = PTR_ERR(event); | 4723 | err = PTR_ERR(event); |
4551 | if (IS_ERR(event)) | 4724 | if (IS_ERR(event)) |
4552 | goto err_put_context; | 4725 | goto err_put_context; |
@@ -4594,6 +4767,60 @@ err_put_context: | |||
4594 | return err; | 4767 | return err; |
4595 | } | 4768 | } |
4596 | 4769 | ||
4770 | /** | ||
4771 | * perf_event_create_kernel_counter | ||
4772 | * | ||
4773 | * @attr: attributes of the counter to create | ||
4774 | * @cpu: cpu in which the counter is bound | ||
4775 | * @pid: task to profile | ||
4776 | */ | ||
4777 | struct perf_event * | ||
4778 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | ||
4779 | pid_t pid, perf_callback_t callback) | ||
4780 | { | ||
4781 | struct perf_event *event; | ||
4782 | struct perf_event_context *ctx; | ||
4783 | int err; | ||
4784 | |||
4785 | /* | ||
4786 | * Get the target context (task or percpu): | ||
4787 | */ | ||
4788 | |||
4789 | ctx = find_get_context(pid, cpu); | ||
4790 | if (IS_ERR(ctx)) { | ||
4791 | err = PTR_ERR(ctx); | ||
4792 | goto err_exit; | ||
4793 | } | ||
4794 | |||
4795 | event = perf_event_alloc(attr, cpu, ctx, NULL, | ||
4796 | NULL, callback, GFP_KERNEL); | ||
4797 | if (IS_ERR(event)) { | ||
4798 | err = PTR_ERR(event); | ||
4799 | goto err_put_context; | ||
4800 | } | ||
4801 | |||
4802 | event->filp = NULL; | ||
4803 | WARN_ON_ONCE(ctx->parent_ctx); | ||
4804 | mutex_lock(&ctx->mutex); | ||
4805 | perf_install_in_context(ctx, event, cpu); | ||
4806 | ++ctx->generation; | ||
4807 | mutex_unlock(&ctx->mutex); | ||
4808 | |||
4809 | event->owner = current; | ||
4810 | get_task_struct(current); | ||
4811 | mutex_lock(¤t->perf_event_mutex); | ||
4812 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | ||
4813 | mutex_unlock(¤t->perf_event_mutex); | ||
4814 | |||
4815 | return event; | ||
4816 | |||
4817 | err_put_context: | ||
4818 | put_ctx(ctx); | ||
4819 | err_exit: | ||
4820 | return ERR_PTR(err); | ||
4821 | } | ||
4822 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); | ||
4823 | |||
4597 | /* | 4824 | /* |
4598 | * inherit a event from parent task to child task: | 4825 | * inherit a event from parent task to child task: |
4599 | */ | 4826 | */ |
@@ -4619,7 +4846,7 @@ inherit_event(struct perf_event *parent_event, | |||
4619 | child_event = perf_event_alloc(&parent_event->attr, | 4846 | child_event = perf_event_alloc(&parent_event->attr, |
4620 | parent_event->cpu, child_ctx, | 4847 | parent_event->cpu, child_ctx, |
4621 | group_leader, parent_event, | 4848 | group_leader, parent_event, |
4622 | GFP_KERNEL); | 4849 | NULL, GFP_KERNEL); |
4623 | if (IS_ERR(child_event)) | 4850 | if (IS_ERR(child_event)) |
4624 | return child_event; | 4851 | return child_event; |
4625 | get_ctx(child_ctx); | 4852 | get_ctx(child_ctx); |
@@ -4637,6 +4864,8 @@ inherit_event(struct perf_event *parent_event, | |||
4637 | if (parent_event->attr.freq) | 4864 | if (parent_event->attr.freq) |
4638 | child_event->hw.sample_period = parent_event->hw.sample_period; | 4865 | child_event->hw.sample_period = parent_event->hw.sample_period; |
4639 | 4866 | ||
4867 | child_event->overflow_handler = parent_event->overflow_handler; | ||
4868 | |||
4640 | /* | 4869 | /* |
4641 | * Link it up in the child's context: | 4870 | * Link it up in the child's context: |
4642 | */ | 4871 | */ |
@@ -4726,7 +4955,6 @@ __perf_event_exit_task(struct perf_event *child_event, | |||
4726 | { | 4955 | { |
4727 | struct perf_event *parent_event; | 4956 | struct perf_event *parent_event; |
4728 | 4957 | ||
4729 | update_event_times(child_event); | ||
4730 | perf_event_remove_from_context(child_event); | 4958 | perf_event_remove_from_context(child_event); |
4731 | 4959 | ||
4732 | parent_event = child_event->parent; | 4960 | parent_event = child_event->parent; |
@@ -4778,6 +5006,7 @@ void perf_event_exit_task(struct task_struct *child) | |||
4778 | * the events from it. | 5006 | * the events from it. |
4779 | */ | 5007 | */ |
4780 | unclone_ctx(child_ctx); | 5008 | unclone_ctx(child_ctx); |
5009 | update_context_time(child_ctx); | ||
4781 | spin_unlock_irqrestore(&child_ctx->lock, flags); | 5010 | spin_unlock_irqrestore(&child_ctx->lock, flags); |
4782 | 5011 | ||
4783 | /* | 5012 | /* |
diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f78a4e..b5ac4d99c667 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> | 35 | #include <linux/kexec.h> |
36 | #include <linux/ratelimit.h> | ||
36 | 37 | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | 39 | ||
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles); | |||
1376 | */ | 1377 | */ |
1377 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); | 1378 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
1378 | 1379 | ||
1379 | int printk_ratelimit(void) | 1380 | int __printk_ratelimit(const char *func) |
1380 | { | 1381 | { |
1381 | return __ratelimit(&printk_ratelimit_state); | 1382 | return ___ratelimit(&printk_ratelimit_state, func); |
1382 | } | 1383 | } |
1383 | EXPORT_SYMBOL(printk_ratelimit); | 1384 | EXPORT_SYMBOL(__printk_ratelimit); |
1384 | 1385 | ||
1385 | /** | 1386 | /** |
1386 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | 1387 | * printk_timed_ratelimit - caller-controlled printk ratelimiting |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 400183346ad2..9b7fd4723878 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | ||
48 | 47 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 49 | static struct lock_class_key rcu_lock_key; |
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 52 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 53 | #endif |
55 | 54 | ||
56 | int rcu_scheduler_active __read_mostly; | ||
57 | |||
58 | /* | 55 | /* |
59 | * Awaken the corresponding synchronize_rcu() instance now that a | 56 | * Awaken the corresponding synchronize_rcu() instance now that a |
60 | * grace period has elapsed. | 57 | * grace period has elapsed. |
@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
66 | rcu = container_of(head, struct rcu_synchronize, head); | 63 | rcu = container_of(head, struct rcu_synchronize, head); |
67 | complete(&rcu->completion); | 64 | complete(&rcu->completion); |
68 | } | 65 | } |
69 | |||
70 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
71 | |||
72 | /** | ||
73 | * synchronize_rcu - wait until a grace period has elapsed. | ||
74 | * | ||
75 | * Control will return to the caller some time after a full grace | ||
76 | * period has elapsed, in other words after all currently executing RCU | ||
77 | * read-side critical sections have completed. RCU read-side critical | ||
78 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
79 | * and may be nested. | ||
80 | */ | ||
81 | void synchronize_rcu(void) | ||
82 | { | ||
83 | struct rcu_synchronize rcu; | ||
84 | |||
85 | if (!rcu_scheduler_active) | ||
86 | return; | ||
87 | |||
88 | init_completion(&rcu.completion); | ||
89 | /* Will wake me after RCU finished. */ | ||
90 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
91 | /* Wait for it. */ | ||
92 | wait_for_completion(&rcu.completion); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
95 | |||
96 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
97 | |||
98 | /** | ||
99 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
100 | * | ||
101 | * Control will return to the caller some time after a full rcu-sched | ||
102 | * grace period has elapsed, in other words after all currently executing | ||
103 | * rcu-sched read-side critical sections have completed. These read-side | ||
104 | * critical sections are delimited by rcu_read_lock_sched() and | ||
105 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
106 | * local_irq_disable(), and so on may be used in place of | ||
107 | * rcu_read_lock_sched(). | ||
108 | * | ||
109 | * This means that all preempt_disable code sequences, including NMI and | ||
110 | * hardware-interrupt handlers, in progress on entry will have completed | ||
111 | * before this primitive returns. However, this does not guarantee that | ||
112 | * softirq handlers will have completed, since in some kernels, these | ||
113 | * handlers can run in process context, and can block. | ||
114 | * | ||
115 | * This primitive provides the guarantees made by the (now removed) | ||
116 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
117 | * guarantees that rcu_read_lock() sections will have completed. | ||
118 | * In "classic RCU", these two guarantees happen to be one and | ||
119 | * the same, but can differ in realtime RCU implementations. | ||
120 | */ | ||
121 | void synchronize_sched(void) | ||
122 | { | ||
123 | struct rcu_synchronize rcu; | ||
124 | |||
125 | if (rcu_blocking_is_gp()) | ||
126 | return; | ||
127 | |||
128 | init_completion(&rcu.completion); | ||
129 | /* Will wake me after RCU finished. */ | ||
130 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
131 | /* Wait for it. */ | ||
132 | wait_for_completion(&rcu.completion); | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
135 | |||
136 | /** | ||
137 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
138 | * | ||
139 | * Control will return to the caller some time after a full rcu_bh grace | ||
140 | * period has elapsed, in other words after all currently executing rcu_bh | ||
141 | * read-side critical sections have completed. RCU read-side critical | ||
142 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
143 | * and may be nested. | ||
144 | */ | ||
145 | void synchronize_rcu_bh(void) | ||
146 | { | ||
147 | struct rcu_synchronize rcu; | ||
148 | |||
149 | if (rcu_blocking_is_gp()) | ||
150 | return; | ||
151 | |||
152 | init_completion(&rcu.completion); | ||
153 | /* Will wake me after RCU finished. */ | ||
154 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
155 | /* Wait for it. */ | ||
156 | wait_for_completion(&rcu.completion); | ||
157 | } | ||
158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
159 | |||
160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | ||
161 | unsigned long action, void *hcpu) | ||
162 | { | ||
163 | return rcu_cpu_notify(self, action, hcpu); | ||
164 | } | ||
165 | |||
166 | void __init rcu_init(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | __rcu_init(); | ||
171 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); | ||
172 | |||
173 | /* | ||
174 | * We don't need protection against CPU-hotplug here because | ||
175 | * this is called early in boot, before either interrupts | ||
176 | * or the scheduler are operational. | ||
177 | */ | ||
178 | for_each_online_cpu(i) | ||
179 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
180 | } | ||
181 | |||
182 | void rcu_scheduler_starting(void) | ||
183 | { | ||
184 | WARN_ON(num_online_cpus() != 1); | ||
185 | WARN_ON(nr_context_switches() > 0); | ||
186 | rcu_scheduler_active = 1; | ||
187 | } | ||
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c new file mode 100644 index 000000000000..9f6d9ff2572c --- /dev/null +++ b/kernel/rcutiny.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2008 | ||
19 | * | ||
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
21 | * | ||
22 | * For detailed explanation of Read-Copy Update mechanism see - | ||
23 | * Documentation/RCU | ||
24 | */ | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/completion.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/notifier.h> | ||
29 | #include <linux/rcupdate.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/time.h> | ||
37 | #include <linux/cpu.h> | ||
38 | |||
39 | /* Global control variables for rcupdate callback mechanism. */ | ||
40 | struct rcu_ctrlblk { | ||
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | ||
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | ||
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | ||
44 | }; | ||
45 | |||
46 | /* Definition for rcupdate control block. */ | ||
47 | static struct rcu_ctrlblk rcu_ctrlblk = { | ||
48 | .donetail = &rcu_ctrlblk.rcucblist, | ||
49 | .curtail = &rcu_ctrlblk.rcucblist, | ||
50 | }; | ||
51 | |||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_NO_HZ | ||
58 | |||
59 | static long rcu_dynticks_nesting = 1; | ||
60 | |||
61 | /* | ||
62 | * Enter dynticks-idle mode, which is an extended quiescent state | ||
63 | * if we have fully entered that mode (i.e., if the new value of | ||
64 | * dynticks_nesting is zero). | ||
65 | */ | ||
66 | void rcu_enter_nohz(void) | ||
67 | { | ||
68 | if (--rcu_dynticks_nesting == 0) | ||
69 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Exit dynticks-idle mode, so that we are no longer in an extended | ||
74 | * quiescent state. | ||
75 | */ | ||
76 | void rcu_exit_nohz(void) | ||
77 | { | ||
78 | rcu_dynticks_nesting++; | ||
79 | } | ||
80 | |||
81 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
82 | |||
83 | /* | ||
84 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | ||
85 | * Also disable irqs to avoid confusion due to interrupt handlers | ||
86 | * invoking call_rcu(). | ||
87 | */ | ||
88 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | if (rcp->rcucblist != NULL && | ||
94 | rcp->donetail != rcp->curtail) { | ||
95 | rcp->donetail = rcp->curtail; | ||
96 | local_irq_restore(flags); | ||
97 | return 1; | ||
98 | } | ||
99 | local_irq_restore(flags); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | ||
106 | * are at it, given that any rcu quiescent state is also an rcu_bh | ||
107 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | ||
108 | */ | ||
109 | void rcu_sched_qs(int cpu) | ||
110 | { | ||
111 | if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
112 | raise_softirq(RCU_SOFTIRQ); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Record an rcu_bh quiescent state. | ||
117 | */ | ||
118 | void rcu_bh_qs(int cpu) | ||
119 | { | ||
120 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
121 | raise_softirq(RCU_SOFTIRQ); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Check to see if the scheduling-clock interrupt came from an extended | ||
126 | * quiescent state, and, if so, tell RCU about it. | ||
127 | */ | ||
128 | void rcu_check_callbacks(int cpu, int user) | ||
129 | { | ||
130 | if (user || | ||
131 | (idle_cpu(cpu) && | ||
132 | !in_softirq() && | ||
133 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) | ||
134 | rcu_sched_qs(cpu); | ||
135 | else if (!in_softirq()) | ||
136 | rcu_bh_qs(cpu); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Helper function for rcu_process_callbacks() that operates on the | ||
141 | * specified rcu_ctrlkblk structure. | ||
142 | */ | ||
143 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | ||
144 | { | ||
145 | struct rcu_head *next, *list; | ||
146 | unsigned long flags; | ||
147 | |||
148 | /* If no RCU callbacks ready to invoke, just return. */ | ||
149 | if (&rcp->rcucblist == rcp->donetail) | ||
150 | return; | ||
151 | |||
152 | /* Move the ready-to-invoke callbacks to a local list. */ | ||
153 | local_irq_save(flags); | ||
154 | list = rcp->rcucblist; | ||
155 | rcp->rcucblist = *rcp->donetail; | ||
156 | *rcp->donetail = NULL; | ||
157 | if (rcp->curtail == rcp->donetail) | ||
158 | rcp->curtail = &rcp->rcucblist; | ||
159 | rcp->donetail = &rcp->rcucblist; | ||
160 | local_irq_restore(flags); | ||
161 | |||
162 | /* Invoke the callbacks on the local list. */ | ||
163 | while (list) { | ||
164 | next = list->next; | ||
165 | prefetch(next); | ||
166 | list->func(list); | ||
167 | list = next; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Invoke any callbacks whose grace period has completed. | ||
173 | */ | ||
174 | static void rcu_process_callbacks(struct softirq_action *unused) | ||
175 | { | ||
176 | __rcu_process_callbacks(&rcu_ctrlblk); | ||
177 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Wait for a grace period to elapse. But it is illegal to invoke | ||
182 | * synchronize_sched() from within an RCU read-side critical section. | ||
183 | * Therefore, any legal call to synchronize_sched() is a quiescent | ||
184 | * state, and so on a UP system, synchronize_sched() need do nothing. | ||
185 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | ||
186 | * benefits of doing might_sleep() to reduce latency.) | ||
187 | * | ||
188 | * Cool, huh? (Due to Josh Triplett.) | ||
189 | * | ||
190 | * But we want to make this a static inline later. | ||
191 | */ | ||
192 | void synchronize_sched(void) | ||
193 | { | ||
194 | cond_resched(); | ||
195 | } | ||
196 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
197 | |||
198 | void synchronize_rcu_bh(void) | ||
199 | { | ||
200 | synchronize_sched(); | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
203 | |||
204 | /* | ||
205 | * Helper function for call_rcu() and call_rcu_bh(). | ||
206 | */ | ||
207 | static void __call_rcu(struct rcu_head *head, | ||
208 | void (*func)(struct rcu_head *rcu), | ||
209 | struct rcu_ctrlblk *rcp) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | |||
213 | head->func = func; | ||
214 | head->next = NULL; | ||
215 | |||
216 | local_irq_save(flags); | ||
217 | *rcp->curtail = head; | ||
218 | rcp->curtail = &head->next; | ||
219 | local_irq_restore(flags); | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Post an RCU callback to be invoked after the end of an RCU grace | ||
224 | * period. But since we have but one CPU, that would be after any | ||
225 | * quiescent state. | ||
226 | */ | ||
227 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
228 | { | ||
229 | __call_rcu(head, func, &rcu_ctrlblk); | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(call_rcu); | ||
232 | |||
233 | /* | ||
234 | * Post an RCU bottom-half callback to be invoked after any subsequent | ||
235 | * quiescent state. | ||
236 | */ | ||
237 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
238 | { | ||
239 | __call_rcu(head, func, &rcu_bh_ctrlblk); | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(call_rcu_bh); | ||
242 | |||
243 | void rcu_barrier(void) | ||
244 | { | ||
245 | struct rcu_synchronize rcu; | ||
246 | |||
247 | init_completion(&rcu.completion); | ||
248 | /* Will wake me after RCU finished. */ | ||
249 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
250 | /* Wait for it. */ | ||
251 | wait_for_completion(&rcu.completion); | ||
252 | } | ||
253 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
254 | |||
255 | void rcu_barrier_bh(void) | ||
256 | { | ||
257 | struct rcu_synchronize rcu; | ||
258 | |||
259 | init_completion(&rcu.completion); | ||
260 | /* Will wake me after RCU finished. */ | ||
261 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
262 | /* Wait for it. */ | ||
263 | wait_for_completion(&rcu.completion); | ||
264 | } | ||
265 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
266 | |||
267 | void rcu_barrier_sched(void) | ||
268 | { | ||
269 | struct rcu_synchronize rcu; | ||
270 | |||
271 | init_completion(&rcu.completion); | ||
272 | /* Will wake me after RCU finished. */ | ||
273 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
274 | /* Wait for it. */ | ||
275 | wait_for_completion(&rcu.completion); | ||
276 | } | ||
277 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
278 | |||
279 | void __init rcu_init(void) | ||
280 | { | ||
281 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
282 | } | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 697c0a0229d4..a621a67ef4e3 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p) | |||
327 | cur_ops->deferred_free(rp); | 327 | cur_ops->deferred_free(rp); |
328 | } | 328 | } |
329 | 329 | ||
330 | static int rcu_no_completed(void) | ||
331 | { | ||
332 | return 0; | ||
333 | } | ||
334 | |||
330 | static void rcu_torture_deferred_free(struct rcu_torture *p) | 335 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
331 | { | 336 | { |
332 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | 337 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = { | |||
388 | .name = "rcu_sync" | 393 | .name = "rcu_sync" |
389 | }; | 394 | }; |
390 | 395 | ||
396 | static struct rcu_torture_ops rcu_expedited_ops = { | ||
397 | .init = rcu_sync_torture_init, | ||
398 | .cleanup = NULL, | ||
399 | .readlock = rcu_torture_read_lock, | ||
400 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
401 | .readunlock = rcu_torture_read_unlock, | ||
402 | .completed = rcu_no_completed, | ||
403 | .deferred_free = rcu_sync_torture_deferred_free, | ||
404 | .sync = synchronize_rcu_expedited, | ||
405 | .cb_barrier = NULL, | ||
406 | .stats = NULL, | ||
407 | .irq_capable = 1, | ||
408 | .name = "rcu_expedited" | ||
409 | }; | ||
410 | |||
391 | /* | 411 | /* |
392 | * Definitions for rcu_bh torture testing. | 412 | * Definitions for rcu_bh torture testing. |
393 | */ | 413 | */ |
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = { | |||
547 | .name = "srcu" | 567 | .name = "srcu" |
548 | }; | 568 | }; |
549 | 569 | ||
570 | static void srcu_torture_synchronize_expedited(void) | ||
571 | { | ||
572 | synchronize_srcu_expedited(&srcu_ctl); | ||
573 | } | ||
574 | |||
575 | static struct rcu_torture_ops srcu_expedited_ops = { | ||
576 | .init = srcu_torture_init, | ||
577 | .cleanup = srcu_torture_cleanup, | ||
578 | .readlock = srcu_torture_read_lock, | ||
579 | .read_delay = srcu_read_delay, | ||
580 | .readunlock = srcu_torture_read_unlock, | ||
581 | .completed = srcu_torture_completed, | ||
582 | .deferred_free = rcu_sync_torture_deferred_free, | ||
583 | .sync = srcu_torture_synchronize_expedited, | ||
584 | .cb_barrier = NULL, | ||
585 | .stats = srcu_torture_stats, | ||
586 | .name = "srcu_expedited" | ||
587 | }; | ||
588 | |||
550 | /* | 589 | /* |
551 | * Definitions for sched torture testing. | 590 | * Definitions for sched torture testing. |
552 | */ | 591 | */ |
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx) | |||
562 | preempt_enable(); | 601 | preempt_enable(); |
563 | } | 602 | } |
564 | 603 | ||
565 | static int sched_torture_completed(void) | ||
566 | { | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) | 604 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) |
571 | { | 605 | { |
572 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | 606 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); |
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = { | |||
583 | .readlock = sched_torture_read_lock, | 617 | .readlock = sched_torture_read_lock, |
584 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 618 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
585 | .readunlock = sched_torture_read_unlock, | 619 | .readunlock = sched_torture_read_unlock, |
586 | .completed = sched_torture_completed, | 620 | .completed = rcu_no_completed, |
587 | .deferred_free = rcu_sched_torture_deferred_free, | 621 | .deferred_free = rcu_sched_torture_deferred_free, |
588 | .sync = sched_torture_synchronize, | 622 | .sync = sched_torture_synchronize, |
589 | .cb_barrier = rcu_barrier_sched, | 623 | .cb_barrier = rcu_barrier_sched, |
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = { | |||
592 | .name = "sched" | 626 | .name = "sched" |
593 | }; | 627 | }; |
594 | 628 | ||
595 | static struct rcu_torture_ops sched_ops_sync = { | 629 | static struct rcu_torture_ops sched_sync_ops = { |
596 | .init = rcu_sync_torture_init, | 630 | .init = rcu_sync_torture_init, |
597 | .cleanup = NULL, | 631 | .cleanup = NULL, |
598 | .readlock = sched_torture_read_lock, | 632 | .readlock = sched_torture_read_lock, |
599 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 633 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
600 | .readunlock = sched_torture_read_unlock, | 634 | .readunlock = sched_torture_read_unlock, |
601 | .completed = sched_torture_completed, | 635 | .completed = rcu_no_completed, |
602 | .deferred_free = rcu_sync_torture_deferred_free, | 636 | .deferred_free = rcu_sync_torture_deferred_free, |
603 | .sync = sched_torture_synchronize, | 637 | .sync = sched_torture_synchronize, |
604 | .cb_barrier = NULL, | 638 | .cb_barrier = NULL, |
@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = { | |||
612 | .readlock = sched_torture_read_lock, | 646 | .readlock = sched_torture_read_lock, |
613 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 647 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
614 | .readunlock = sched_torture_read_unlock, | 648 | .readunlock = sched_torture_read_unlock, |
615 | .completed = sched_torture_completed, | 649 | .completed = rcu_no_completed, |
616 | .deferred_free = rcu_sync_torture_deferred_free, | 650 | .deferred_free = rcu_sync_torture_deferred_free, |
617 | .sync = synchronize_sched_expedited, | 651 | .sync = synchronize_sched_expedited, |
618 | .cb_barrier = NULL, | 652 | .cb_barrier = NULL, |
@@ -1097,9 +1131,10 @@ rcu_torture_init(void) | |||
1097 | int cpu; | 1131 | int cpu; |
1098 | int firsterr = 0; | 1132 | int firsterr = 0; |
1099 | static struct rcu_torture_ops *torture_ops[] = | 1133 | static struct rcu_torture_ops *torture_ops[] = |
1100 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1134 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, |
1101 | &sched_expedited_ops, | 1135 | &rcu_bh_ops, &rcu_bh_sync_ops, |
1102 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1136 | &srcu_ops, &srcu_expedited_ops, |
1137 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; | ||
1103 | 1138 | ||
1104 | mutex_lock(&fullstop_mutex); | 1139 | mutex_lock(&fullstop_mutex); |
1105 | 1140 | ||
@@ -1110,8 +1145,12 @@ rcu_torture_init(void) | |||
1110 | break; | 1145 | break; |
1111 | } | 1146 | } |
1112 | if (i == ARRAY_SIZE(torture_ops)) { | 1147 | if (i == ARRAY_SIZE(torture_ops)) { |
1113 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | 1148 | printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n", |
1114 | torture_type); | 1149 | torture_type); |
1150 | printk(KERN_ALERT "rcu-torture types:"); | ||
1151 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | ||
1152 | printk(KERN_ALERT " %s", torture_ops[i]->name); | ||
1153 | printk(KERN_ALERT "\n"); | ||
1115 | mutex_unlock(&fullstop_mutex); | 1154 | mutex_unlock(&fullstop_mutex); |
1116 | return -EINVAL; | 1155 | return -EINVAL; |
1117 | } | 1156 | } |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f3077c0ab181..53ae9598f798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,18 +46,22 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
49 | 50 | ||
50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
51 | 52 | ||
52 | /* Data structures. */ | 53 | /* Data structures. */ |
53 | 54 | ||
55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | ||
56 | |||
54 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(name) { \ |
55 | .level = { &name.node[0] }, \ | 58 | .level = { &name.node[0] }, \ |
56 | .levelcnt = { \ | 59 | .levelcnt = { \ |
57 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
58 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
59 | NUM_RCU_LVL_2, \ | 62 | NUM_RCU_LVL_2, \ |
60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 63 | NUM_RCU_LVL_3, \ |
64 | NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ | ||
61 | }, \ | 65 | }, \ |
62 | .signaled = RCU_GP_IDLE, \ | 66 | .signaled = RCU_GP_IDLE, \ |
63 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
77 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
78 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
79 | 83 | ||
84 | static int rcu_scheduler_active __read_mostly; | ||
85 | |||
80 | 86 | ||
81 | /* | 87 | /* |
82 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 88 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu) | |||
98 | struct rcu_data *rdp; | 104 | struct rcu_data *rdp; |
99 | 105 | ||
100 | rdp = &per_cpu(rcu_sched_data, cpu); | 106 | rdp = &per_cpu(rcu_sched_data, cpu); |
101 | rdp->passed_quiesc_completed = rdp->completed; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
102 | barrier(); | 108 | barrier(); |
103 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
104 | rcu_preempt_note_context_switch(cpu); | 110 | rcu_preempt_note_context_switch(cpu); |
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu) | |||
109 | struct rcu_data *rdp; | 115 | struct rcu_data *rdp; |
110 | 116 | ||
111 | rdp = &per_cpu(rcu_bh_data, cpu); | 117 | rdp = &per_cpu(rcu_bh_data, cpu); |
112 | rdp->passed_quiesc_completed = rdp->completed; | 118 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
113 | barrier(); | 119 | barrier(); |
114 | rdp->passed_quiesc = 1; | 120 | rdp->passed_quiesc = 1; |
115 | } | 121 | } |
@@ -335,28 +341,9 @@ void rcu_irq_exit(void) | |||
335 | set_need_resched(); | 341 | set_need_resched(); |
336 | } | 342 | } |
337 | 343 | ||
338 | /* | ||
339 | * Record the specified "completed" value, which is later used to validate | ||
340 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
341 | * unconditionally invalidate any future dynticks manipulations (which is | ||
342 | * useful at the beginning of a grace period). | ||
343 | */ | ||
344 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
345 | { | ||
346 | rsp->dynticks_completed = comp; | ||
347 | } | ||
348 | |||
349 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
350 | 345 | ||
351 | /* | 346 | /* |
352 | * Recall the previously recorded value of the completion for dynticks. | ||
353 | */ | ||
354 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
355 | { | ||
356 | return rsp->dynticks_completed; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Snapshot the specified CPU's dynticks counter so that we can later | 347 | * Snapshot the specified CPU's dynticks counter so that we can later |
361 | * credit them with an implicit quiescent state. Return 1 if this CPU | 348 | * credit them with an implicit quiescent state. Return 1 if this CPU |
362 | * is in dynticks idle mode, which is an extended quiescent state. | 349 | * is in dynticks idle mode, which is an extended quiescent state. |
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
419 | 406 | ||
420 | #else /* #ifdef CONFIG_NO_HZ */ | 407 | #else /* #ifdef CONFIG_NO_HZ */ |
421 | 408 | ||
422 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
423 | { | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
427 | 410 | ||
428 | /* | ||
429 | * If there are no dynticks, then the only way that a CPU can passively | ||
430 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
431 | * is a point in time during the prior (already finished) grace period, | ||
432 | * an offline CPU is always in a quiescent state, and thus can be | ||
433 | * unconditionally applied. So just return the current value of completed. | ||
434 | */ | ||
435 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
436 | { | ||
437 | return rsp->completed; | ||
438 | } | ||
439 | |||
440 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 411 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
441 | { | 412 | { |
442 | return 0; | 413 | return 0; |
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
553 | /* | 524 | /* |
554 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 525 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
555 | * This is used both when we started the grace period and when we notice | 526 | * This is used both when we started the grace period and when we notice |
556 | * that someone else started the grace period. | 527 | * that someone else started the grace period. The caller must hold the |
528 | * ->lock of the leaf rcu_node structure corresponding to the current CPU, | ||
529 | * and must have irqs disabled. | ||
557 | */ | 530 | */ |
531 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
532 | { | ||
533 | if (rdp->gpnum != rnp->gpnum) { | ||
534 | rdp->qs_pending = 1; | ||
535 | rdp->passed_quiesc = 0; | ||
536 | rdp->gpnum = rnp->gpnum; | ||
537 | } | ||
538 | } | ||
539 | |||
558 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | 540 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) |
559 | { | 541 | { |
560 | rdp->qs_pending = 1; | 542 | unsigned long flags; |
561 | rdp->passed_quiesc = 0; | 543 | struct rcu_node *rnp; |
562 | rdp->gpnum = rsp->gpnum; | 544 | |
545 | local_irq_save(flags); | ||
546 | rnp = rdp->mynode; | ||
547 | if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ | ||
548 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
549 | local_irq_restore(flags); | ||
550 | return; | ||
551 | } | ||
552 | __note_new_gpnum(rsp, rnp, rdp); | ||
553 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
563 | } | 554 | } |
564 | 555 | ||
565 | /* | 556 | /* |
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | |||
583 | } | 574 | } |
584 | 575 | ||
585 | /* | 576 | /* |
577 | * Advance this CPU's callbacks, but only if the current grace period | ||
578 | * has ended. This may be called only from the CPU to whom the rdp | ||
579 | * belongs. In addition, the corresponding leaf rcu_node structure's | ||
580 | * ->lock must be held by the caller, with irqs disabled. | ||
581 | */ | ||
582 | static void | ||
583 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
584 | { | ||
585 | /* Did another grace period end? */ | ||
586 | if (rdp->completed != rnp->completed) { | ||
587 | |||
588 | /* Advance callbacks. No harm if list empty. */ | ||
589 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
590 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
591 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
592 | |||
593 | /* Remember that we saw this grace-period completion. */ | ||
594 | rdp->completed = rnp->completed; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Advance this CPU's callbacks, but only if the current grace period | ||
600 | * has ended. This may be called only from the CPU to whom the rdp | ||
601 | * belongs. | ||
602 | */ | ||
603 | static void | ||
604 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
605 | { | ||
606 | unsigned long flags; | ||
607 | struct rcu_node *rnp; | ||
608 | |||
609 | local_irq_save(flags); | ||
610 | rnp = rdp->mynode; | ||
611 | if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ | ||
612 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
613 | local_irq_restore(flags); | ||
614 | return; | ||
615 | } | ||
616 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
617 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Do per-CPU grace-period initialization for running CPU. The caller | ||
622 | * must hold the lock of the leaf rcu_node structure corresponding to | ||
623 | * this CPU. | ||
624 | */ | ||
625 | static void | ||
626 | rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
627 | { | ||
628 | /* Prior grace period ended, so advance callbacks for current CPU. */ | ||
629 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
630 | |||
631 | /* | ||
632 | * Because this CPU just now started the new grace period, we know | ||
633 | * that all of its callbacks will be covered by this upcoming grace | ||
634 | * period, even the ones that were registered arbitrarily recently. | ||
635 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
636 | * | ||
637 | * Other CPUs cannot be sure exactly when the grace period started. | ||
638 | * Therefore, their recently registered callbacks must pass through | ||
639 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
640 | * by the next RCU grace period. | ||
641 | */ | ||
642 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
643 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
644 | |||
645 | /* Set state so that this CPU will detect the next quiescent state. */ | ||
646 | __note_new_gpnum(rsp, rnp, rdp); | ||
647 | } | ||
648 | |||
649 | /* | ||
586 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | 650 | * Start a new RCU grace period if warranted, re-initializing the hierarchy |
587 | * in preparation for detecting the next grace period. The caller must hold | 651 | * in preparation for detecting the next grace period. The caller must hold |
588 | * the root node's ->lock, which is released before return. Hard irqs must | 652 | * the root node's ->lock, which is released before return. Hard irqs must |
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
596 | struct rcu_node *rnp = rcu_get_root(rsp); | 660 | struct rcu_node *rnp = rcu_get_root(rsp); |
597 | 661 | ||
598 | if (!cpu_needs_another_gp(rsp, rdp)) { | 662 | if (!cpu_needs_another_gp(rsp, rdp)) { |
599 | spin_unlock_irqrestore(&rnp->lock, flags); | 663 | if (rnp->completed == rsp->completed) { |
664 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
665 | return; | ||
666 | } | ||
667 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | |||
669 | /* | ||
670 | * Propagate new ->completed value to rcu_node structures | ||
671 | * so that other CPUs don't have to wait until the start | ||
672 | * of the next grace period to process their callbacks. | ||
673 | */ | ||
674 | rcu_for_each_node_breadth_first(rsp, rnp) { | ||
675 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
676 | rnp->completed = rsp->completed; | ||
677 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
678 | } | ||
679 | local_irq_restore(flags); | ||
600 | return; | 680 | return; |
601 | } | 681 | } |
602 | 682 | ||
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
606 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 686 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
607 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 687 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
608 | record_gp_stall_check_time(rsp); | 688 | record_gp_stall_check_time(rsp); |
609 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
610 | note_new_gpnum(rsp, rdp); | ||
611 | |||
612 | /* | ||
613 | * Because this CPU just now started the new grace period, we know | ||
614 | * that all of its callbacks will be covered by this upcoming grace | ||
615 | * period, even the ones that were registered arbitrarily recently. | ||
616 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
617 | * | ||
618 | * Other CPUs cannot be sure exactly when the grace period started. | ||
619 | * Therefore, their recently registered callbacks must pass through | ||
620 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
621 | * by the next RCU grace period. | ||
622 | */ | ||
623 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
624 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
625 | 689 | ||
626 | /* Special-case the common single-level case. */ | 690 | /* Special-case the common single-level case. */ |
627 | if (NUM_RCU_NODES == 1) { | 691 | if (NUM_RCU_NODES == 1) { |
628 | rcu_preempt_check_blocked_tasks(rnp); | 692 | rcu_preempt_check_blocked_tasks(rnp); |
629 | rnp->qsmask = rnp->qsmaskinit; | 693 | rnp->qsmask = rnp->qsmaskinit; |
630 | rnp->gpnum = rsp->gpnum; | 694 | rnp->gpnum = rsp->gpnum; |
695 | rnp->completed = rsp->completed; | ||
631 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 696 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
697 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
632 | spin_unlock_irqrestore(&rnp->lock, flags); | 698 | spin_unlock_irqrestore(&rnp->lock, flags); |
633 | return; | 699 | return; |
634 | } | 700 | } |
@@ -661,6 +727,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
661 | rcu_preempt_check_blocked_tasks(rnp); | 727 | rcu_preempt_check_blocked_tasks(rnp); |
662 | rnp->qsmask = rnp->qsmaskinit; | 728 | rnp->qsmask = rnp->qsmaskinit; |
663 | rnp->gpnum = rsp->gpnum; | 729 | rnp->gpnum = rsp->gpnum; |
730 | rnp->completed = rsp->completed; | ||
731 | if (rnp == rdp->mynode) | ||
732 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
664 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 733 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
665 | } | 734 | } |
666 | 735 | ||
@@ -672,58 +741,32 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
672 | } | 741 | } |
673 | 742 | ||
674 | /* | 743 | /* |
675 | * Advance this CPU's callbacks, but only if the current grace period | 744 | * Report a full set of quiescent states to the specified rcu_state |
676 | * has ended. This may be called only from the CPU to whom the rdp | 745 | * data structure. This involves cleaning up after the prior grace |
677 | * belongs. | 746 | * period and letting rcu_start_gp() start up the next grace period |
747 | * if one is needed. Note that the caller must hold rnp->lock, as | ||
748 | * required by rcu_start_gp(), which will release it. | ||
678 | */ | 749 | */ |
679 | static void | 750 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
680 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
681 | { | ||
682 | long completed_snap; | ||
683 | unsigned long flags; | ||
684 | |||
685 | local_irq_save(flags); | ||
686 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
687 | |||
688 | /* Did another grace period end? */ | ||
689 | if (rdp->completed != completed_snap) { | ||
690 | |||
691 | /* Advance callbacks. No harm if list empty. */ | ||
692 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
693 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
694 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
695 | |||
696 | /* Remember that we saw this grace-period completion. */ | ||
697 | rdp->completed = completed_snap; | ||
698 | } | ||
699 | local_irq_restore(flags); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
704 | * the next grace period if one is needed. Note that the caller must | ||
705 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
706 | */ | ||
707 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
708 | __releases(rcu_get_root(rsp)->lock) | 751 | __releases(rcu_get_root(rsp)->lock) |
709 | { | 752 | { |
710 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 753 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
711 | rsp->completed = rsp->gpnum; | 754 | rsp->completed = rsp->gpnum; |
712 | rsp->signaled = RCU_GP_IDLE; | 755 | rsp->signaled = RCU_GP_IDLE; |
713 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
714 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 756 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
715 | } | 757 | } |
716 | 758 | ||
717 | /* | 759 | /* |
718 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 760 | * Similar to rcu_report_qs_rdp(), for which it is a helper function. |
719 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 761 | * Allows quiescent states for a group of CPUs to be reported at one go |
720 | * group must be represented by the same leaf rcu_node structure. | 762 | * to the specified rcu_node structure, though all the CPUs in the group |
721 | * That structure's lock must be held upon entry, and it is released | 763 | * must be represented by the same rcu_node structure (which need not be |
722 | * before return. | 764 | * a leaf rcu_node structure, though it often will be). That structure's |
765 | * lock must be held upon entry, and it is released before return. | ||
723 | */ | 766 | */ |
724 | static void | 767 | static void |
725 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | 768 | rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, |
726 | unsigned long flags) | 769 | struct rcu_node *rnp, unsigned long flags) |
727 | __releases(rnp->lock) | 770 | __releases(rnp->lock) |
728 | { | 771 | { |
729 | struct rcu_node *rnp_c; | 772 | struct rcu_node *rnp_c; |
@@ -759,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
759 | 802 | ||
760 | /* | 803 | /* |
761 | * Get here if we are the last CPU to pass through a quiescent | 804 | * Get here if we are the last CPU to pass through a quiescent |
762 | * state for this grace period. Invoke cpu_quiet_msk_finish() | 805 | * state for this grace period. Invoke rcu_report_qs_rsp() |
763 | * to clean up and start the next grace period if one is needed. | 806 | * to clean up and start the next grace period if one is needed. |
764 | */ | 807 | */ |
765 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ | 808 | rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
766 | } | 809 | } |
767 | 810 | ||
768 | /* | 811 | /* |
769 | * Record a quiescent state for the specified CPU, which must either be | 812 | * Record a quiescent state for the specified CPU to that CPU's rcu_data |
770 | * the current CPU. The lastcomp argument is used to make sure we are | 813 | * structure. This must be either called from the specified CPU, or |
771 | * still in the grace period of interest. We don't want to end the current | 814 | * called when the specified CPU is known to be offline (and when it is |
772 | * grace period based on quiescent states detected in an earlier grace | 815 | * also known that no other CPU is concurrently trying to help the offline |
773 | * period! | 816 | * CPU). The lastcomp argument is used to make sure we are still in the |
817 | * grace period of interest. We don't want to end the current grace period | ||
818 | * based on quiescent states detected in an earlier grace period! | ||
774 | */ | 819 | */ |
775 | static void | 820 | static void |
776 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | 821 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
777 | { | 822 | { |
778 | unsigned long flags; | 823 | unsigned long flags; |
779 | unsigned long mask; | 824 | unsigned long mask; |
@@ -781,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
781 | 826 | ||
782 | rnp = rdp->mynode; | 827 | rnp = rdp->mynode; |
783 | spin_lock_irqsave(&rnp->lock, flags); | 828 | spin_lock_irqsave(&rnp->lock, flags); |
784 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | 829 | if (lastcomp != rnp->completed) { |
785 | 830 | ||
786 | /* | 831 | /* |
787 | * Someone beat us to it for this grace period, so leave. | 832 | * Someone beat us to it for this grace period, so leave. |
788 | * The race with GP start is resolved by the fact that we | 833 | * The race with GP start is resolved by the fact that we |
789 | * hold the leaf rcu_node lock, so that the per-CPU bits | 834 | * hold the leaf rcu_node lock, so that the per-CPU bits |
790 | * cannot yet be initialized -- so we would simply find our | 835 | * cannot yet be initialized -- so we would simply find our |
791 | * CPU's bit already cleared in cpu_quiet_msk() if this race | 836 | * CPU's bit already cleared in rcu_report_qs_rnp() if this |
792 | * occurred. | 837 | * race occurred. |
793 | */ | 838 | */ |
794 | rdp->passed_quiesc = 0; /* try again later! */ | 839 | rdp->passed_quiesc = 0; /* try again later! */ |
795 | spin_unlock_irqrestore(&rnp->lock, flags); | 840 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -807,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
807 | */ | 852 | */ |
808 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 853 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
809 | 854 | ||
810 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | 855 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
811 | } | 856 | } |
812 | } | 857 | } |
813 | 858 | ||
@@ -838,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
838 | if (!rdp->passed_quiesc) | 883 | if (!rdp->passed_quiesc) |
839 | return; | 884 | return; |
840 | 885 | ||
841 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | 886 | /* |
842 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | 887 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the |
888 | * judge of that). | ||
889 | */ | ||
890 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | ||
843 | } | 891 | } |
844 | 892 | ||
845 | #ifdef CONFIG_HOTPLUG_CPU | 893 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -899,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
899 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 947 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
900 | { | 948 | { |
901 | unsigned long flags; | 949 | unsigned long flags; |
902 | long lastcomp; | ||
903 | unsigned long mask; | 950 | unsigned long mask; |
951 | int need_report = 0; | ||
904 | struct rcu_data *rdp = rsp->rda[cpu]; | 952 | struct rcu_data *rdp = rsp->rda[cpu]; |
905 | struct rcu_node *rnp; | 953 | struct rcu_node *rnp; |
906 | 954 | ||
@@ -914,30 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
914 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 962 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
915 | rnp->qsmaskinit &= ~mask; | 963 | rnp->qsmaskinit &= ~mask; |
916 | if (rnp->qsmaskinit != 0) { | 964 | if (rnp->qsmaskinit != 0) { |
917 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 965 | if (rnp != rdp->mynode) |
966 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
918 | break; | 967 | break; |
919 | } | 968 | } |
920 | 969 | if (rnp == rdp->mynode) | |
921 | /* | 970 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); |
922 | * If there was a task blocking the current grace period, | 971 | else |
923 | * and if all CPUs have checked in, we need to propagate | 972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
924 | * the quiescent state up the rcu_node hierarchy. But that | ||
925 | * is inconvenient at the moment due to deadlock issues if | ||
926 | * this should end the current grace period. So set the | ||
927 | * offlined CPU's bit in ->qsmask in order to force the | ||
928 | * next force_quiescent_state() invocation to clean up this | ||
929 | * mess in a deadlock-free manner. | ||
930 | */ | ||
931 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
932 | rnp->qsmask |= mask; | ||
933 | |||
934 | mask = rnp->grpmask; | 973 | mask = rnp->grpmask; |
935 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
936 | rnp = rnp->parent; | 974 | rnp = rnp->parent; |
937 | } while (rnp != NULL); | 975 | } while (rnp != NULL); |
938 | lastcomp = rsp->completed; | ||
939 | 976 | ||
940 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 977 | /* |
978 | * We still hold the leaf rcu_node structure lock here, and | ||
979 | * irqs are still disabled. The reason for this subterfuge is | ||
980 | * because invoking rcu_report_unblock_qs_rnp() with ->onofflock | ||
981 | * held leads to deadlock. | ||
982 | */ | ||
983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
984 | rnp = rdp->mynode; | ||
985 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
986 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
987 | else | ||
988 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
989 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
990 | rcu_report_exp_rnp(rsp, rnp); | ||
941 | 991 | ||
942 | rcu_adopt_orphan_cbs(rsp); | 992 | rcu_adopt_orphan_cbs(rsp); |
943 | } | 993 | } |
@@ -1109,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1109 | rcu_for_each_leaf_node(rsp, rnp) { | 1159 | rcu_for_each_leaf_node(rsp, rnp) { |
1110 | mask = 0; | 1160 | mask = 0; |
1111 | spin_lock_irqsave(&rnp->lock, flags); | 1161 | spin_lock_irqsave(&rnp->lock, flags); |
1112 | if (rsp->completed != lastcomp) { | 1162 | if (rnp->completed != lastcomp) { |
1113 | spin_unlock_irqrestore(&rnp->lock, flags); | 1163 | spin_unlock_irqrestore(&rnp->lock, flags); |
1114 | return 1; | 1164 | return 1; |
1115 | } | 1165 | } |
@@ -1123,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1123 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1173 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1124 | mask |= bit; | 1174 | mask |= bit; |
1125 | } | 1175 | } |
1126 | if (mask != 0 && rsp->completed == lastcomp) { | 1176 | if (mask != 0 && rnp->completed == lastcomp) { |
1127 | 1177 | ||
1128 | /* cpu_quiet_msk() releases rnp->lock. */ | 1178 | /* rcu_report_qs_rnp() releases rnp->lock. */ |
1129 | cpu_quiet_msk(mask, rsp, rnp, flags); | 1179 | rcu_report_qs_rnp(mask, rsp, rnp, flags); |
1130 | continue; | 1180 | continue; |
1131 | } | 1181 | } |
1132 | spin_unlock_irqrestore(&rnp->lock, flags); | 1182 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1144,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1144 | long lastcomp; | 1194 | long lastcomp; |
1145 | struct rcu_node *rnp = rcu_get_root(rsp); | 1195 | struct rcu_node *rnp = rcu_get_root(rsp); |
1146 | u8 signaled; | 1196 | u8 signaled; |
1197 | u8 forcenow; | ||
1147 | 1198 | ||
1148 | if (!rcu_gp_in_progress(rsp)) | 1199 | if (!rcu_gp_in_progress(rsp)) |
1149 | return; /* No grace period in progress, nothing to force. */ | 1200 | return; /* No grace period in progress, nothing to force. */ |
@@ -1156,10 +1207,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1156 | goto unlock_ret; /* no emergency and done recently. */ | 1207 | goto unlock_ret; /* no emergency and done recently. */ |
1157 | rsp->n_force_qs++; | 1208 | rsp->n_force_qs++; |
1158 | spin_lock(&rnp->lock); | 1209 | spin_lock(&rnp->lock); |
1159 | lastcomp = rsp->completed; | 1210 | lastcomp = rsp->gpnum - 1; |
1160 | signaled = rsp->signaled; | 1211 | signaled = rsp->signaled; |
1161 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1212 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1162 | if (lastcomp == rsp->gpnum) { | 1213 | if(!rcu_gp_in_progress(rsp)) { |
1163 | rsp->n_force_qs_ngp++; | 1214 | rsp->n_force_qs_ngp++; |
1164 | spin_unlock(&rnp->lock); | 1215 | spin_unlock(&rnp->lock); |
1165 | goto unlock_ret; /* no GP in progress, time updated. */ | 1216 | goto unlock_ret; /* no GP in progress, time updated. */ |
@@ -1180,21 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1180 | if (rcu_process_dyntick(rsp, lastcomp, | 1231 | if (rcu_process_dyntick(rsp, lastcomp, |
1181 | dyntick_save_progress_counter)) | 1232 | dyntick_save_progress_counter)) |
1182 | goto unlock_ret; | 1233 | goto unlock_ret; |
1234 | /* fall into next case. */ | ||
1235 | |||
1236 | case RCU_SAVE_COMPLETED: | ||
1183 | 1237 | ||
1184 | /* Update state, record completion counter. */ | 1238 | /* Update state, record completion counter. */ |
1239 | forcenow = 0; | ||
1185 | spin_lock(&rnp->lock); | 1240 | spin_lock(&rnp->lock); |
1186 | if (lastcomp == rsp->completed && | 1241 | if (lastcomp + 1 == rsp->gpnum && |
1187 | rsp->signaled == RCU_SAVE_DYNTICK) { | 1242 | lastcomp == rsp->completed && |
1243 | rsp->signaled == signaled) { | ||
1188 | rsp->signaled = RCU_FORCE_QS; | 1244 | rsp->signaled = RCU_FORCE_QS; |
1189 | dyntick_record_completed(rsp, lastcomp); | 1245 | rsp->completed_fqs = lastcomp; |
1246 | forcenow = signaled == RCU_SAVE_COMPLETED; | ||
1190 | } | 1247 | } |
1191 | spin_unlock(&rnp->lock); | 1248 | spin_unlock(&rnp->lock); |
1192 | break; | 1249 | if (!forcenow) |
1250 | break; | ||
1251 | /* fall into next case. */ | ||
1193 | 1252 | ||
1194 | case RCU_FORCE_QS: | 1253 | case RCU_FORCE_QS: |
1195 | 1254 | ||
1196 | /* Check dyntick-idle state, send IPI to laggarts. */ | 1255 | /* Check dyntick-idle state, send IPI to laggarts. */ |
1197 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | 1256 | if (rcu_process_dyntick(rsp, rsp->completed_fqs, |
1198 | rcu_implicit_dynticks_qs)) | 1257 | rcu_implicit_dynticks_qs)) |
1199 | goto unlock_ret; | 1258 | goto unlock_ret; |
1200 | 1259 | ||
@@ -1351,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
1351 | } | 1410 | } |
1352 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1411 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
1353 | 1412 | ||
1413 | /** | ||
1414 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
1415 | * | ||
1416 | * Control will return to the caller some time after a full rcu-sched | ||
1417 | * grace period has elapsed, in other words after all currently executing | ||
1418 | * rcu-sched read-side critical sections have completed. These read-side | ||
1419 | * critical sections are delimited by rcu_read_lock_sched() and | ||
1420 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
1421 | * local_irq_disable(), and so on may be used in place of | ||
1422 | * rcu_read_lock_sched(). | ||
1423 | * | ||
1424 | * This means that all preempt_disable code sequences, including NMI and | ||
1425 | * hardware-interrupt handlers, in progress on entry will have completed | ||
1426 | * before this primitive returns. However, this does not guarantee that | ||
1427 | * softirq handlers will have completed, since in some kernels, these | ||
1428 | * handlers can run in process context, and can block. | ||
1429 | * | ||
1430 | * This primitive provides the guarantees made by the (now removed) | ||
1431 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
1432 | * guarantees that rcu_read_lock() sections will have completed. | ||
1433 | * In "classic RCU", these two guarantees happen to be one and | ||
1434 | * the same, but can differ in realtime RCU implementations. | ||
1435 | */ | ||
1436 | void synchronize_sched(void) | ||
1437 | { | ||
1438 | struct rcu_synchronize rcu; | ||
1439 | |||
1440 | if (rcu_blocking_is_gp()) | ||
1441 | return; | ||
1442 | |||
1443 | init_completion(&rcu.completion); | ||
1444 | /* Will wake me after RCU finished. */ | ||
1445 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1446 | /* Wait for it. */ | ||
1447 | wait_for_completion(&rcu.completion); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
1450 | |||
1451 | /** | ||
1452 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
1453 | * | ||
1454 | * Control will return to the caller some time after a full rcu_bh grace | ||
1455 | * period has elapsed, in other words after all currently executing rcu_bh | ||
1456 | * read-side critical sections have completed. RCU read-side critical | ||
1457 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
1458 | * and may be nested. | ||
1459 | */ | ||
1460 | void synchronize_rcu_bh(void) | ||
1461 | { | ||
1462 | struct rcu_synchronize rcu; | ||
1463 | |||
1464 | if (rcu_blocking_is_gp()) | ||
1465 | return; | ||
1466 | |||
1467 | init_completion(&rcu.completion); | ||
1468 | /* Will wake me after RCU finished. */ | ||
1469 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
1470 | /* Wait for it. */ | ||
1471 | wait_for_completion(&rcu.completion); | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
1474 | |||
1354 | /* | 1475 | /* |
1355 | * Check to see if there is any immediate RCU-related work to be done | 1476 | * Check to see if there is any immediate RCU-related work to be done |
1356 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 1477 | * by the current CPU, for the specified type of RCU, returning 1 if so. |
@@ -1360,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
1360 | */ | 1481 | */ |
1361 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | 1482 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) |
1362 | { | 1483 | { |
1484 | struct rcu_node *rnp = rdp->mynode; | ||
1485 | |||
1363 | rdp->n_rcu_pending++; | 1486 | rdp->n_rcu_pending++; |
1364 | 1487 | ||
1365 | /* Check for CPU stalls, if enabled. */ | 1488 | /* Check for CPU stalls, if enabled. */ |
@@ -1384,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1384 | } | 1507 | } |
1385 | 1508 | ||
1386 | /* Has another RCU grace period completed? */ | 1509 | /* Has another RCU grace period completed? */ |
1387 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ | 1510 | if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
1388 | rdp->n_rp_gp_completed++; | 1511 | rdp->n_rp_gp_completed++; |
1389 | return 1; | 1512 | return 1; |
1390 | } | 1513 | } |
1391 | 1514 | ||
1392 | /* Has a new RCU grace period started? */ | 1515 | /* Has a new RCU grace period started? */ |
1393 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ | 1516 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
1394 | rdp->n_rp_gp_started++; | 1517 | rdp->n_rp_gp_started++; |
1395 | return 1; | 1518 | return 1; |
1396 | } | 1519 | } |
@@ -1433,6 +1556,21 @@ int rcu_needs_cpu(int cpu) | |||
1433 | rcu_preempt_needs_cpu(cpu); | 1556 | rcu_preempt_needs_cpu(cpu); |
1434 | } | 1557 | } |
1435 | 1558 | ||
1559 | /* | ||
1560 | * This function is invoked towards the end of the scheduler's initialization | ||
1561 | * process. Before this is called, the idle task might contain | ||
1562 | * RCU read-side critical sections (during which time, this idle | ||
1563 | * task is booting the system). After this function is called, the | ||
1564 | * idle tasks are prohibited from containing RCU read-side critical | ||
1565 | * sections. | ||
1566 | */ | ||
1567 | void rcu_scheduler_starting(void) | ||
1568 | { | ||
1569 | WARN_ON(num_online_cpus() != 1); | ||
1570 | WARN_ON(nr_context_switches() > 0); | ||
1571 | rcu_scheduler_active = 1; | ||
1572 | } | ||
1573 | |||
1436 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 1574 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1437 | static atomic_t rcu_barrier_cpu_count; | 1575 | static atomic_t rcu_barrier_cpu_count; |
1438 | static DEFINE_MUTEX(rcu_barrier_mutex); | 1576 | static DEFINE_MUTEX(rcu_barrier_mutex); |
@@ -1544,21 +1682,16 @@ static void __cpuinit | |||
1544 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 1682 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) |
1545 | { | 1683 | { |
1546 | unsigned long flags; | 1684 | unsigned long flags; |
1547 | long lastcomp; | ||
1548 | unsigned long mask; | 1685 | unsigned long mask; |
1549 | struct rcu_data *rdp = rsp->rda[cpu]; | 1686 | struct rcu_data *rdp = rsp->rda[cpu]; |
1550 | struct rcu_node *rnp = rcu_get_root(rsp); | 1687 | struct rcu_node *rnp = rcu_get_root(rsp); |
1551 | 1688 | ||
1552 | /* Set up local state, ensuring consistent view of global state. */ | 1689 | /* Set up local state, ensuring consistent view of global state. */ |
1553 | spin_lock_irqsave(&rnp->lock, flags); | 1690 | spin_lock_irqsave(&rnp->lock, flags); |
1554 | lastcomp = rsp->completed; | ||
1555 | rdp->completed = lastcomp; | ||
1556 | rdp->gpnum = lastcomp; | ||
1557 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1691 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1558 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1692 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1559 | rdp->beenonline = 1; /* We have now been online. */ | 1693 | rdp->beenonline = 1; /* We have now been online. */ |
1560 | rdp->preemptable = preemptable; | 1694 | rdp->preemptable = preemptable; |
1561 | rdp->passed_quiesc_completed = lastcomp - 1; | ||
1562 | rdp->qlen_last_fqs_check = 0; | 1695 | rdp->qlen_last_fqs_check = 0; |
1563 | rdp->n_force_qs_snap = rsp->n_force_qs; | 1696 | rdp->n_force_qs_snap = rsp->n_force_qs; |
1564 | rdp->blimit = blimit; | 1697 | rdp->blimit = blimit; |
@@ -1580,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1580 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 1713 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
1581 | rnp->qsmaskinit |= mask; | 1714 | rnp->qsmaskinit |= mask; |
1582 | mask = rnp->grpmask; | 1715 | mask = rnp->grpmask; |
1716 | if (rnp == rdp->mynode) { | ||
1717 | rdp->gpnum = rnp->completed; /* if GP in progress... */ | ||
1718 | rdp->completed = rnp->completed; | ||
1719 | rdp->passed_quiesc_completed = rnp->completed - 1; | ||
1720 | } | ||
1583 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 1721 | spin_unlock(&rnp->lock); /* irqs already disabled. */ |
1584 | rnp = rnp->parent; | 1722 | rnp = rnp->parent; |
1585 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | 1723 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
@@ -1597,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1597 | /* | 1735 | /* |
1598 | * Handle CPU online/offline notification events. | 1736 | * Handle CPU online/offline notification events. |
1599 | */ | 1737 | */ |
1600 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1738 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1601 | unsigned long action, void *hcpu) | 1739 | unsigned long action, void *hcpu) |
1602 | { | 1740 | { |
1603 | long cpu = (long)hcpu; | 1741 | long cpu = (long)hcpu; |
1604 | 1742 | ||
@@ -1685,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1685 | cpustride *= rsp->levelspread[i]; | 1823 | cpustride *= rsp->levelspread[i]; |
1686 | rnp = rsp->level[i]; | 1824 | rnp = rsp->level[i]; |
1687 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1825 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1688 | if (rnp != rcu_get_root(rsp)) | 1826 | spin_lock_init(&rnp->lock); |
1689 | spin_lock_init(&rnp->lock); | 1827 | lockdep_set_class(&rnp->lock, &rcu_node_class[i]); |
1690 | rnp->gpnum = 0; | 1828 | rnp->gpnum = 0; |
1691 | rnp->qsmask = 0; | 1829 | rnp->qsmask = 0; |
1692 | rnp->qsmaskinit = 0; | 1830 | rnp->qsmaskinit = 0; |
@@ -1707,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1707 | rnp->level = i; | 1845 | rnp->level = i; |
1708 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 1846 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); |
1709 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | 1847 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); |
1848 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1710 | } | 1850 | } |
1711 | } | 1851 | } |
1712 | spin_lock_init(&rcu_get_root(rsp)->lock); | ||
1713 | } | 1852 | } |
1714 | 1853 | ||
1715 | /* | 1854 | /* |
@@ -1735,16 +1874,30 @@ do { \ | |||
1735 | } \ | 1874 | } \ |
1736 | } while (0) | 1875 | } while (0) |
1737 | 1876 | ||
1738 | void __init __rcu_init(void) | 1877 | void __init rcu_init(void) |
1739 | { | 1878 | { |
1879 | int i; | ||
1880 | |||
1740 | rcu_bootup_announce(); | 1881 | rcu_bootup_announce(); |
1741 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1742 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1743 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1885 | #if NUM_RCU_LVL_4 != 0 | ||
1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
1744 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1745 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1746 | __rcu_init_preempt(); | 1890 | __rcu_init_preempt(); |
1747 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1891 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1892 | |||
1893 | /* | ||
1894 | * We don't need protection against CPU-hotplug here because | ||
1895 | * this is called early in boot, before either interrupts | ||
1896 | * or the scheduler are operational. | ||
1897 | */ | ||
1898 | cpu_notifier(rcu_cpu_notify, 0); | ||
1899 | for_each_online_cpu(i) | ||
1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
1748 | } | 1901 | } |
1749 | 1902 | ||
1750 | #include "rcutree_plugin.h" | 1903 | #include "rcutree_plugin.h" |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 1899023b0962..d2a0046f63b2 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -34,10 +34,11 @@ | |||
34 | * In practice, this has not been tested, so there is probably some | 34 | * In practice, this has not been tested, so there is probably some |
35 | * bug somewhere. | 35 | * bug somewhere. |
36 | */ | 36 | */ |
37 | #define MAX_RCU_LVLS 3 | 37 | #define MAX_RCU_LVLS 4 |
38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | 38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) |
39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | 39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) |
40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | 40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) |
41 | #define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT) | ||
41 | 42 | ||
42 | #if NR_CPUS <= RCU_FANOUT | 43 | #if NR_CPUS <= RCU_FANOUT |
43 | # define NUM_RCU_LVLS 1 | 44 | # define NUM_RCU_LVLS 1 |
@@ -45,23 +46,33 @@ | |||
45 | # define NUM_RCU_LVL_1 (NR_CPUS) | 46 | # define NUM_RCU_LVL_1 (NR_CPUS) |
46 | # define NUM_RCU_LVL_2 0 | 47 | # define NUM_RCU_LVL_2 0 |
47 | # define NUM_RCU_LVL_3 0 | 48 | # define NUM_RCU_LVL_3 0 |
49 | # define NUM_RCU_LVL_4 0 | ||
48 | #elif NR_CPUS <= RCU_FANOUT_SQ | 50 | #elif NR_CPUS <= RCU_FANOUT_SQ |
49 | # define NUM_RCU_LVLS 2 | 51 | # define NUM_RCU_LVLS 2 |
50 | # define NUM_RCU_LVL_0 1 | 52 | # define NUM_RCU_LVL_0 1 |
51 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 53 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
52 | # define NUM_RCU_LVL_2 (NR_CPUS) | 54 | # define NUM_RCU_LVL_2 (NR_CPUS) |
53 | # define NUM_RCU_LVL_3 0 | 55 | # define NUM_RCU_LVL_3 0 |
56 | # define NUM_RCU_LVL_4 0 | ||
54 | #elif NR_CPUS <= RCU_FANOUT_CUBE | 57 | #elif NR_CPUS <= RCU_FANOUT_CUBE |
55 | # define NUM_RCU_LVLS 3 | 58 | # define NUM_RCU_LVLS 3 |
56 | # define NUM_RCU_LVL_0 1 | 59 | # define NUM_RCU_LVL_0 1 |
57 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 60 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) |
58 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 61 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
59 | # define NUM_RCU_LVL_3 NR_CPUS | 62 | # define NUM_RCU_LVL_3 NR_CPUS |
63 | # define NUM_RCU_LVL_4 0 | ||
64 | #elif NR_CPUS <= RCU_FANOUT_FOURTH | ||
65 | # define NUM_RCU_LVLS 4 | ||
66 | # define NUM_RCU_LVL_0 1 | ||
67 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE) | ||
68 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | ||
69 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | ||
70 | # define NUM_RCU_LVL_4 NR_CPUS | ||
60 | #else | 71 | #else |
61 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 72 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
62 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | 73 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ |
63 | 74 | ||
64 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | 75 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) |
65 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 76 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) |
66 | 77 | ||
67 | /* | 78 | /* |
@@ -84,14 +95,21 @@ struct rcu_node { | |||
84 | long gpnum; /* Current grace period for this node. */ | 95 | long gpnum; /* Current grace period for this node. */ |
85 | /* This will either be equal to or one */ | 96 | /* This will either be equal to or one */ |
86 | /* behind the root rcu_node's gpnum. */ | 97 | /* behind the root rcu_node's gpnum. */ |
98 | long completed; /* Last grace period completed for this node. */ | ||
99 | /* This will either be equal to or one */ | ||
100 | /* behind the root rcu_node's gpnum. */ | ||
87 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | 101 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
88 | /* order for current grace period to proceed.*/ | 102 | /* order for current grace period to proceed.*/ |
89 | /* In leaf rcu_node, each bit corresponds to */ | 103 | /* In leaf rcu_node, each bit corresponds to */ |
90 | /* an rcu_data structure, otherwise, each */ | 104 | /* an rcu_data structure, otherwise, each */ |
91 | /* bit corresponds to a child rcu_node */ | 105 | /* bit corresponds to a child rcu_node */ |
92 | /* structure. */ | 106 | /* structure. */ |
107 | unsigned long expmask; /* Groups that have ->blocked_tasks[] */ | ||
108 | /* elements that need to drain to allow the */ | ||
109 | /* current expedited grace period to */ | ||
110 | /* complete (only for TREE_PREEMPT_RCU). */ | ||
93 | unsigned long qsmaskinit; | 111 | unsigned long qsmaskinit; |
94 | /* Per-GP initialization for qsmask. */ | 112 | /* Per-GP initial value for qsmask & expmask. */ |
95 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 113 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
96 | /* Only one bit will be set in this mask. */ | 114 | /* Only one bit will be set in this mask. */ |
97 | int grplo; /* lowest-numbered CPU or group here. */ | 115 | int grplo; /* lowest-numbered CPU or group here. */ |
@@ -99,7 +117,7 @@ struct rcu_node { | |||
99 | u8 grpnum; /* CPU/group number for next level up. */ | 117 | u8 grpnum; /* CPU/group number for next level up. */ |
100 | u8 level; /* root is at level 0. */ | 118 | u8 level; /* root is at level 0. */ |
101 | struct rcu_node *parent; | 119 | struct rcu_node *parent; |
102 | struct list_head blocked_tasks[2]; | 120 | struct list_head blocked_tasks[4]; |
103 | /* Tasks blocked in RCU read-side critsect. */ | 121 | /* Tasks blocked in RCU read-side critsect. */ |
104 | /* Grace period number (->gpnum) x blocked */ | 122 | /* Grace period number (->gpnum) x blocked */ |
105 | /* by tasks on the (x & 0x1) element of the */ | 123 | /* by tasks on the (x & 0x1) element of the */ |
@@ -114,6 +132,21 @@ struct rcu_node { | |||
114 | for ((rnp) = &(rsp)->node[0]; \ | 132 | for ((rnp) = &(rsp)->node[0]; \ |
115 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 133 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) |
116 | 134 | ||
135 | /* | ||
136 | * Do a breadth-first scan of the non-leaf rcu_node structures for the | ||
137 | * specified rcu_state structure. Note that if there is a singleton | ||
138 | * rcu_node tree with but one rcu_node structure, this loop is a no-op. | ||
139 | */ | ||
140 | #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ | ||
141 | for ((rnp) = &(rsp)->node[0]; \ | ||
142 | (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++) | ||
143 | |||
144 | /* | ||
145 | * Scan the leaves of the rcu_node hierarchy for the specified rcu_state | ||
146 | * structure. Note that if there is a singleton rcu_node tree with but | ||
147 | * one rcu_node structure, this loop -will- visit the rcu_node structure. | ||
148 | * It is still a leaf node, even if it is also the root node. | ||
149 | */ | ||
117 | #define rcu_for_each_leaf_node(rsp, rnp) \ | 150 | #define rcu_for_each_leaf_node(rsp, rnp) \ |
118 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 151 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
119 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 152 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) |
@@ -204,11 +237,12 @@ struct rcu_data { | |||
204 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ | 237 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ |
205 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ | 238 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ |
206 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ | 239 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ |
207 | #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ | 240 | #define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */ |
241 | #define RCU_FORCE_QS 4 /* Need to force quiescent state. */ | ||
208 | #ifdef CONFIG_NO_HZ | 242 | #ifdef CONFIG_NO_HZ |
209 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | 243 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK |
210 | #else /* #ifdef CONFIG_NO_HZ */ | 244 | #else /* #ifdef CONFIG_NO_HZ */ |
211 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | 245 | #define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED |
212 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 246 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
213 | 247 | ||
214 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | 248 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
@@ -246,7 +280,7 @@ struct rcu_state { | |||
246 | long gpnum; /* Current gp number. */ | 280 | long gpnum; /* Current gp number. */ |
247 | long completed; /* # of last completed gp. */ | 281 | long completed; /* # of last completed gp. */ |
248 | 282 | ||
249 | /* End of fields guarded by root rcu_node's lock. */ | 283 | /* End of fields guarded by root rcu_node's lock. */ |
250 | 284 | ||
251 | spinlock_t onofflock; /* exclude on/offline and */ | 285 | spinlock_t onofflock; /* exclude on/offline and */ |
252 | /* starting new GP. Also */ | 286 | /* starting new GP. Also */ |
@@ -260,6 +294,8 @@ struct rcu_state { | |||
260 | long orphan_qlen; /* Number of orphaned cbs. */ | 294 | long orphan_qlen; /* Number of orphaned cbs. */ |
261 | spinlock_t fqslock; /* Only one task forcing */ | 295 | spinlock_t fqslock; /* Only one task forcing */ |
262 | /* quiescent states. */ | 296 | /* quiescent states. */ |
297 | long completed_fqs; /* Value of completed @ snap. */ | ||
298 | /* Protected by fqslock. */ | ||
263 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 299 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
264 | /* force_quiescent_state(). */ | 300 | /* force_quiescent_state(). */ |
265 | unsigned long n_force_qs; /* Number of calls to */ | 301 | unsigned long n_force_qs; /* Number of calls to */ |
@@ -274,11 +310,15 @@ struct rcu_state { | |||
274 | unsigned long jiffies_stall; /* Time at which to check */ | 310 | unsigned long jiffies_stall; /* Time at which to check */ |
275 | /* for CPU stalls. */ | 311 | /* for CPU stalls. */ |
276 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 312 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
277 | #ifdef CONFIG_NO_HZ | ||
278 | long dynticks_completed; /* Value of completed @ snap. */ | ||
279 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
280 | }; | 313 | }; |
281 | 314 | ||
315 | /* Return values for rcu_preempt_offline_tasks(). */ | ||
316 | |||
317 | #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ | ||
318 | /* GP were moved to root. */ | ||
319 | #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ | ||
320 | /* GP were moved to root. */ | ||
321 | |||
282 | #ifdef RCU_TREE_NONCORE | 322 | #ifdef RCU_TREE_NONCORE |
283 | 323 | ||
284 | /* | 324 | /* |
@@ -298,10 +338,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | |||
298 | #else /* #ifdef RCU_TREE_NONCORE */ | 338 | #else /* #ifdef RCU_TREE_NONCORE */ |
299 | 339 | ||
300 | /* Forward declarations for rcutree_plugin.h */ | 340 | /* Forward declarations for rcutree_plugin.h */ |
301 | static inline void rcu_bootup_announce(void); | 341 | static void rcu_bootup_announce(void); |
302 | long rcu_batches_completed(void); | 342 | long rcu_batches_completed(void); |
303 | static void rcu_preempt_note_context_switch(int cpu); | 343 | static void rcu_preempt_note_context_switch(int cpu); |
304 | static int rcu_preempted_readers(struct rcu_node *rnp); | 344 | static int rcu_preempted_readers(struct rcu_node *rnp); |
345 | #ifdef CONFIG_HOTPLUG_CPU | ||
346 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | ||
347 | unsigned long flags); | ||
348 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
305 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 349 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
306 | static void rcu_print_task_stall(struct rcu_node *rnp); | 350 | static void rcu_print_task_stall(struct rcu_node *rnp); |
307 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 351 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
@@ -315,6 +359,9 @@ static void rcu_preempt_offline_cpu(int cpu); | |||
315 | static void rcu_preempt_check_callbacks(int cpu); | 359 | static void rcu_preempt_check_callbacks(int cpu); |
316 | static void rcu_preempt_process_callbacks(void); | 360 | static void rcu_preempt_process_callbacks(void); |
317 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 361 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
362 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
363 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); | ||
364 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | ||
318 | static int rcu_preempt_pending(int cpu); | 365 | static int rcu_preempt_pending(int cpu); |
319 | static int rcu_preempt_needs_cpu(int cpu); | 366 | static int rcu_preempt_needs_cpu(int cpu); |
320 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 367 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ef2a58c2b9d5..37fbccdf41d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -24,16 +24,19 @@ | |||
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | 29 | #ifdef CONFIG_TREE_PREEMPT_RCU |
29 | 30 | ||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 32 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
32 | 33 | ||
34 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | ||
35 | |||
33 | /* | 36 | /* |
34 | * Tell them what RCU they are running. | 37 | * Tell them what RCU they are running. |
35 | */ | 38 | */ |
36 | static inline void rcu_bootup_announce(void) | 39 | static void __init rcu_bootup_announce(void) |
37 | { | 40 | { |
38 | printk(KERN_INFO | 41 | printk(KERN_INFO |
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | 42 | "Experimental preemptable hierarchical RCU implementation.\n"); |
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
67 | static void rcu_preempt_qs(int cpu) | 70 | static void rcu_preempt_qs(int cpu) |
68 | { | 71 | { |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 72 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
70 | rdp->passed_quiesc_completed = rdp->completed; | 73 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
71 | barrier(); | 74 | barrier(); |
72 | rdp->passed_quiesc = 1; | 75 | rdp->passed_quiesc = 1; |
73 | } | 76 | } |
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); | |||
157 | */ | 160 | */ |
158 | static int rcu_preempted_readers(struct rcu_node *rnp) | 161 | static int rcu_preempted_readers(struct rcu_node *rnp) |
159 | { | 162 | { |
160 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | 163 | int phase = rnp->gpnum & 0x1; |
164 | |||
165 | return !list_empty(&rnp->blocked_tasks[phase]) || | ||
166 | !list_empty(&rnp->blocked_tasks[phase + 2]); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Record a quiescent state for all tasks that were previously queued | ||
171 | * on the specified rcu_node structure and that were blocking the current | ||
172 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
173 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
174 | * disabled. | ||
175 | */ | ||
176 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
177 | __releases(rnp->lock) | ||
178 | { | ||
179 | unsigned long mask; | ||
180 | struct rcu_node *rnp_p; | ||
181 | |||
182 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | ||
183 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
184 | return; /* Still need more quiescent states! */ | ||
185 | } | ||
186 | |||
187 | rnp_p = rnp->parent; | ||
188 | if (rnp_p == NULL) { | ||
189 | /* | ||
190 | * Either there is only one rcu_node in the tree, | ||
191 | * or tasks were kicked up to root rcu_node due to | ||
192 | * CPUs going offline. | ||
193 | */ | ||
194 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | /* Report up the rest of the hierarchy. */ | ||
199 | mask = rnp->grpmask; | ||
200 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
201 | spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
202 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | ||
161 | } | 203 | } |
162 | 204 | ||
205 | /* | ||
206 | * Handle special cases during rcu_read_unlock(), such as needing to | ||
207 | * notify RCU core processing or task having blocked during the RCU | ||
208 | * read-side critical section. | ||
209 | */ | ||
163 | static void rcu_read_unlock_special(struct task_struct *t) | 210 | static void rcu_read_unlock_special(struct task_struct *t) |
164 | { | 211 | { |
165 | int empty; | 212 | int empty; |
213 | int empty_exp; | ||
166 | unsigned long flags; | 214 | unsigned long flags; |
167 | unsigned long mask; | ||
168 | struct rcu_node *rnp; | 215 | struct rcu_node *rnp; |
169 | int special; | 216 | int special; |
170 | 217 | ||
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
207 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 254 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
208 | } | 255 | } |
209 | empty = !rcu_preempted_readers(rnp); | 256 | empty = !rcu_preempted_readers(rnp); |
257 | empty_exp = !rcu_preempted_readers_exp(rnp); | ||
258 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | ||
210 | list_del_init(&t->rcu_node_entry); | 259 | list_del_init(&t->rcu_node_entry); |
211 | t->rcu_blocked_node = NULL; | 260 | t->rcu_blocked_node = NULL; |
212 | 261 | ||
213 | /* | 262 | /* |
214 | * If this was the last task on the current list, and if | 263 | * If this was the last task on the current list, and if |
215 | * we aren't waiting on any CPUs, report the quiescent state. | 264 | * we aren't waiting on any CPUs, report the quiescent state. |
216 | * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() | 265 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
217 | * drop rnp->lock and restore irq. | ||
218 | */ | 266 | */ |
219 | if (!empty && rnp->qsmask == 0 && | 267 | if (empty) |
220 | !rcu_preempted_readers(rnp)) { | ||
221 | struct rcu_node *rnp_p; | ||
222 | |||
223 | if (rnp->parent == NULL) { | ||
224 | /* Only one rcu_node in the tree. */ | ||
225 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | ||
226 | return; | ||
227 | } | ||
228 | /* Report up the rest of the hierarchy. */ | ||
229 | mask = rnp->grpmask; | ||
230 | spin_unlock_irqrestore(&rnp->lock, flags); | 268 | spin_unlock_irqrestore(&rnp->lock, flags); |
231 | rnp_p = rnp->parent; | 269 | else |
232 | spin_lock_irqsave(&rnp_p->lock, flags); | 270 | rcu_report_unblock_qs_rnp(rnp, flags); |
233 | WARN_ON_ONCE(rnp->qsmask); | 271 | |
234 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); | 272 | /* |
235 | return; | 273 | * If this was the last task on the expedited lists, |
236 | } | 274 | * then we need to report up the rcu_node hierarchy. |
237 | spin_unlock(&rnp->lock); | 275 | */ |
276 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | ||
277 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | ||
278 | } else { | ||
279 | local_irq_restore(flags); | ||
238 | } | 280 | } |
239 | local_irq_restore(flags); | ||
240 | } | 281 | } |
241 | 282 | ||
242 | /* | 283 | /* |
@@ -303,6 +344,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
303 | * rcu_node. The reason for not just moving them to the immediate | 344 | * rcu_node. The reason for not just moving them to the immediate |
304 | * parent is to remove the need for rcu_read_unlock_special() to | 345 | * parent is to remove the need for rcu_read_unlock_special() to |
305 | * make more than two attempts to acquire the target rcu_node's lock. | 346 | * make more than two attempts to acquire the target rcu_node's lock. |
347 | * Returns true if there were tasks blocking the current RCU grace | ||
348 | * period. | ||
306 | * | 349 | * |
307 | * Returns 1 if there was previously a task blocking the current grace | 350 | * Returns 1 if there was previously a task blocking the current grace |
308 | * period on the specified rcu_node structure. | 351 | * period on the specified rcu_node structure. |
@@ -316,7 +359,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
316 | int i; | 359 | int i; |
317 | struct list_head *lp; | 360 | struct list_head *lp; |
318 | struct list_head *lp_root; | 361 | struct list_head *lp_root; |
319 | int retval = rcu_preempted_readers(rnp); | 362 | int retval = 0; |
320 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 363 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
321 | struct task_struct *tp; | 364 | struct task_struct *tp; |
322 | 365 | ||
@@ -326,7 +369,9 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
326 | } | 369 | } |
327 | WARN_ON_ONCE(rnp != rdp->mynode && | 370 | WARN_ON_ONCE(rnp != rdp->mynode && |
328 | (!list_empty(&rnp->blocked_tasks[0]) || | 371 | (!list_empty(&rnp->blocked_tasks[0]) || |
329 | !list_empty(&rnp->blocked_tasks[1]))); | 372 | !list_empty(&rnp->blocked_tasks[1]) || |
373 | !list_empty(&rnp->blocked_tasks[2]) || | ||
374 | !list_empty(&rnp->blocked_tasks[3]))); | ||
330 | 375 | ||
331 | /* | 376 | /* |
332 | * Move tasks up to root rcu_node. Rely on the fact that the | 377 | * Move tasks up to root rcu_node. Rely on the fact that the |
@@ -334,7 +379,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
334 | * rcu_nodes in terms of gp_num value. This fact allows us to | 379 | * rcu_nodes in terms of gp_num value. This fact allows us to |
335 | * move the blocked_tasks[] array directly, element by element. | 380 | * move the blocked_tasks[] array directly, element by element. |
336 | */ | 381 | */ |
337 | for (i = 0; i < 2; i++) { | 382 | if (rcu_preempted_readers(rnp)) |
383 | retval |= RCU_OFL_TASKS_NORM_GP; | ||
384 | if (rcu_preempted_readers_exp(rnp)) | ||
385 | retval |= RCU_OFL_TASKS_EXP_GP; | ||
386 | for (i = 0; i < 4; i++) { | ||
338 | lp = &rnp->blocked_tasks[i]; | 387 | lp = &rnp->blocked_tasks[i]; |
339 | lp_root = &rnp_root->blocked_tasks[i]; | 388 | lp_root = &rnp_root->blocked_tasks[i]; |
340 | while (!list_empty(lp)) { | 389 | while (!list_empty(lp)) { |
@@ -346,7 +395,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 395 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
347 | } | 396 | } |
348 | } | 397 | } |
349 | |||
350 | return retval; | 398 | return retval; |
351 | } | 399 | } |
352 | 400 | ||
@@ -398,14 +446,183 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
398 | } | 446 | } |
399 | EXPORT_SYMBOL_GPL(call_rcu); | 447 | EXPORT_SYMBOL_GPL(call_rcu); |
400 | 448 | ||
449 | /** | ||
450 | * synchronize_rcu - wait until a grace period has elapsed. | ||
451 | * | ||
452 | * Control will return to the caller some time after a full grace | ||
453 | * period has elapsed, in other words after all currently executing RCU | ||
454 | * read-side critical sections have completed. RCU read-side critical | ||
455 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
456 | * and may be nested. | ||
457 | */ | ||
458 | void synchronize_rcu(void) | ||
459 | { | ||
460 | struct rcu_synchronize rcu; | ||
461 | |||
462 | if (!rcu_scheduler_active) | ||
463 | return; | ||
464 | |||
465 | init_completion(&rcu.completion); | ||
466 | /* Will wake me after RCU finished. */ | ||
467 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
468 | /* Wait for it. */ | ||
469 | wait_for_completion(&rcu.completion); | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
472 | |||
473 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | ||
474 | static long sync_rcu_preempt_exp_count; | ||
475 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | ||
476 | |||
401 | /* | 477 | /* |
402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | 478 | * Return non-zero if there are any tasks in RCU read-side critical |
403 | * grace period, but this is the crude slow compatability hack, so just | 479 | * sections blocking the current preemptible-RCU expedited grace period. |
404 | * invoke synchronize_rcu(). | 480 | * If there is no preemptible-RCU expedited grace period currently in |
481 | * progress, returns zero unconditionally. | ||
482 | */ | ||
483 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | ||
484 | { | ||
485 | return !list_empty(&rnp->blocked_tasks[2]) || | ||
486 | !list_empty(&rnp->blocked_tasks[3]); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * return non-zero if there is no RCU expedited grace period in progress | ||
491 | * for the specified rcu_node structure, in other words, if all CPUs and | ||
492 | * tasks covered by the specified rcu_node structure have done their bit | ||
493 | * for the current expedited grace period. Works only for preemptible | ||
494 | * RCU -- other RCU implementation use other means. | ||
495 | * | ||
496 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
497 | */ | ||
498 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | ||
499 | { | ||
500 | return !rcu_preempted_readers_exp(rnp) && | ||
501 | ACCESS_ONCE(rnp->expmask) == 0; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Report the exit from RCU read-side critical section for the last task | ||
506 | * that queued itself during or before the current expedited preemptible-RCU | ||
507 | * grace period. This event is reported either to the rcu_node structure on | ||
508 | * which the task was queued or to one of that rcu_node structure's ancestors, | ||
509 | * recursively up the tree. (Calm down, calm down, we do the recursion | ||
510 | * iteratively!) | ||
511 | * | ||
512 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
513 | */ | ||
514 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
515 | { | ||
516 | unsigned long flags; | ||
517 | unsigned long mask; | ||
518 | |||
519 | spin_lock_irqsave(&rnp->lock, flags); | ||
520 | for (;;) { | ||
521 | if (!sync_rcu_preempt_exp_done(rnp)) | ||
522 | break; | ||
523 | if (rnp->parent == NULL) { | ||
524 | wake_up(&sync_rcu_preempt_exp_wq); | ||
525 | break; | ||
526 | } | ||
527 | mask = rnp->grpmask; | ||
528 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
529 | rnp = rnp->parent; | ||
530 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
531 | rnp->expmask &= ~mask; | ||
532 | } | ||
533 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | ||
538 | * grace period for the specified rcu_node structure. If there are no such | ||
539 | * tasks, report it up the rcu_node hierarchy. | ||
540 | * | ||
541 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | ||
542 | */ | ||
543 | static void | ||
544 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | ||
545 | { | ||
546 | int must_wait; | ||
547 | |||
548 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
549 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | ||
550 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | ||
551 | must_wait = rcu_preempted_readers_exp(rnp); | ||
552 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
553 | if (!must_wait) | ||
554 | rcu_report_exp_rnp(rsp, rnp); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | ||
559 | * is to invoke synchronize_sched_expedited() to push all the tasks to | ||
560 | * the ->blocked_tasks[] lists, move all entries from the first set of | ||
561 | * ->blocked_tasks[] lists to the second set, and finally wait for this | ||
562 | * second set to drain. | ||
405 | */ | 563 | */ |
406 | void synchronize_rcu_expedited(void) | 564 | void synchronize_rcu_expedited(void) |
407 | { | 565 | { |
408 | synchronize_rcu(); | 566 | unsigned long flags; |
567 | struct rcu_node *rnp; | ||
568 | struct rcu_state *rsp = &rcu_preempt_state; | ||
569 | long snap; | ||
570 | int trycount = 0; | ||
571 | |||
572 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | ||
573 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | ||
574 | smp_mb(); /* Above access cannot bleed into critical section. */ | ||
575 | |||
576 | /* | ||
577 | * Acquire lock, falling back to synchronize_rcu() if too many | ||
578 | * lock-acquisition failures. Of course, if someone does the | ||
579 | * expedited grace period for us, just leave. | ||
580 | */ | ||
581 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | ||
582 | if (trycount++ < 10) | ||
583 | udelay(trycount * num_online_cpus()); | ||
584 | else { | ||
585 | synchronize_rcu(); | ||
586 | return; | ||
587 | } | ||
588 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
589 | goto mb_ret; /* Others did our work for us. */ | ||
590 | } | ||
591 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
592 | goto unlock_mb_ret; /* Others did our work for us. */ | ||
593 | |||
594 | /* force all RCU readers onto blocked_tasks[]. */ | ||
595 | synchronize_sched_expedited(); | ||
596 | |||
597 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
598 | |||
599 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | ||
600 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | ||
601 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
602 | rnp->expmask = rnp->qsmaskinit; | ||
603 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
604 | } | ||
605 | |||
606 | /* Snapshot current state of ->blocked_tasks[] lists. */ | ||
607 | rcu_for_each_leaf_node(rsp, rnp) | ||
608 | sync_rcu_preempt_exp_init(rsp, rnp); | ||
609 | if (NUM_RCU_NODES > 1) | ||
610 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | ||
611 | |||
612 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
613 | |||
614 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | ||
615 | rnp = rcu_get_root(rsp); | ||
616 | wait_event(sync_rcu_preempt_exp_wq, | ||
617 | sync_rcu_preempt_exp_done(rnp)); | ||
618 | |||
619 | /* Clean up and exit. */ | ||
620 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | ||
621 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | ||
622 | unlock_mb_ret: | ||
623 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | ||
624 | mb_ret: | ||
625 | smp_mb(); /* ensure subsequent action seen after grace period. */ | ||
409 | } | 626 | } |
410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 627 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
411 | 628 | ||
@@ -481,7 +698,7 @@ void exit_rcu(void) | |||
481 | /* | 698 | /* |
482 | * Tell them what RCU they are running. | 699 | * Tell them what RCU they are running. |
483 | */ | 700 | */ |
484 | static inline void rcu_bootup_announce(void) | 701 | static void __init rcu_bootup_announce(void) |
485 | { | 702 | { |
486 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 703 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); |
487 | } | 704 | } |
@@ -512,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
512 | return 0; | 729 | return 0; |
513 | } | 730 | } |
514 | 731 | ||
732 | #ifdef CONFIG_HOTPLUG_CPU | ||
733 | |||
734 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | ||
735 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
736 | { | ||
737 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
738 | } | ||
739 | |||
740 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
741 | |||
515 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 742 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
516 | 743 | ||
517 | /* | 744 | /* |
@@ -594,6 +821,20 @@ void synchronize_rcu_expedited(void) | |||
594 | } | 821 | } |
595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 822 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
596 | 823 | ||
824 | #ifdef CONFIG_HOTPLUG_CPU | ||
825 | |||
826 | /* | ||
827 | * Because preemptable RCU does not exist, there is never any need to | ||
828 | * report on tasks preempted in RCU read-side critical sections during | ||
829 | * expedited RCU grace periods. | ||
830 | */ | ||
831 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
832 | { | ||
833 | return; | ||
834 | } | ||
835 | |||
836 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
837 | |||
597 | /* | 838 | /* |
598 | * Because preemptable RCU does not exist, it never has any work to do. | 839 | * Because preemptable RCU does not exist, it never has any work to do. |
599 | */ | 840 | */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4b31c779e62e..9d2c88423b31 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = { | |||
155 | 155 | ||
156 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | 156 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) |
157 | { | 157 | { |
158 | long gpnum; | ||
158 | int level = 0; | 159 | int level = 0; |
160 | int phase; | ||
159 | struct rcu_node *rnp; | 161 | struct rcu_node *rnp; |
160 | 162 | ||
163 | gpnum = rsp->gpnum; | ||
161 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " | 164 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " |
162 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", | 165 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", |
163 | rsp->completed, rsp->gpnum, rsp->signaled, | 166 | rsp->completed, gpnum, rsp->signaled, |
164 | (long)(rsp->jiffies_force_qs - jiffies), | 167 | (long)(rsp->jiffies_force_qs - jiffies), |
165 | (int)(jiffies & 0xffff), | 168 | (int)(jiffies & 0xffff), |
166 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 169 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
171 | seq_puts(m, "\n"); | 174 | seq_puts(m, "\n"); |
172 | level = rnp->level; | 175 | level = rnp->level; |
173 | } | 176 | } |
174 | seq_printf(m, "%lx/%lx %d:%d ^%d ", | 177 | phase = gpnum & 0x1; |
178 | seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ", | ||
175 | rnp->qsmask, rnp->qsmaskinit, | 179 | rnp->qsmask, rnp->qsmaskinit, |
180 | "T."[list_empty(&rnp->blocked_tasks[phase])], | ||
181 | "E."[list_empty(&rnp->blocked_tasks[phase + 2])], | ||
182 | "T."[list_empty(&rnp->blocked_tasks[!phase])], | ||
183 | "E."[list_empty(&rnp->blocked_tasks[!phase + 2])], | ||
176 | rnp->grplo, rnp->grphi, rnp->grpnum); | 184 | rnp->grplo, rnp->grphi, rnp->grpnum); |
177 | } | 185 | } |
178 | seq_puts(m, "\n"); | 186 | seq_puts(m, "\n"); |
diff --git a/kernel/sched.c b/kernel/sched.c index 3c11ae0a948d..aa31244caa9f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -535,14 +535,12 @@ struct rq { | |||
535 | #define CPU_LOAD_IDX_MAX 5 | 535 | #define CPU_LOAD_IDX_MAX 5 |
536 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 536 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
537 | #ifdef CONFIG_NO_HZ | 537 | #ifdef CONFIG_NO_HZ |
538 | unsigned long last_tick_seen; | ||
539 | unsigned char in_nohz_recently; | 538 | unsigned char in_nohz_recently; |
540 | #endif | 539 | #endif |
541 | /* capture load from *all* tasks on this cpu: */ | 540 | /* capture load from *all* tasks on this cpu: */ |
542 | struct load_weight load; | 541 | struct load_weight load; |
543 | unsigned long nr_load_updates; | 542 | unsigned long nr_load_updates; |
544 | u64 nr_switches; | 543 | u64 nr_switches; |
545 | u64 nr_migrations_in; | ||
546 | 544 | ||
547 | struct cfs_rq cfs; | 545 | struct cfs_rq cfs; |
548 | struct rt_rq rt; | 546 | struct rt_rq rt; |
@@ -591,6 +589,8 @@ struct rq { | |||
591 | 589 | ||
592 | u64 rt_avg; | 590 | u64 rt_avg; |
593 | u64 age_stamp; | 591 | u64 age_stamp; |
592 | u64 idle_stamp; | ||
593 | u64 avg_idle; | ||
594 | #endif | 594 | #endif |
595 | 595 | ||
596 | /* calc_load related fields */ | 596 | /* calc_load related fields */ |
@@ -772,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
772 | if (!sched_feat_names[i]) | 772 | if (!sched_feat_names[i]) |
773 | return -EINVAL; | 773 | return -EINVAL; |
774 | 774 | ||
775 | filp->f_pos += cnt; | 775 | *ppos += cnt; |
776 | 776 | ||
777 | return cnt; | 777 | return cnt; |
778 | } | 778 | } |
@@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
2017 | } | 2017 | } |
2018 | 2018 | ||
2019 | spin_lock_irqsave(&rq->lock, flags); | 2019 | spin_lock_irqsave(&rq->lock, flags); |
2020 | update_rq_clock(rq); | ||
2020 | set_task_cpu(p, cpu); | 2021 | set_task_cpu(p, cpu); |
2021 | p->cpus_allowed = cpumask_of_cpu(cpu); | 2022 | p->cpus_allowed = cpumask_of_cpu(cpu); |
2022 | p->rt.nr_cpus_allowed = 1; | 2023 | p->rt.nr_cpus_allowed = 1; |
@@ -2078,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2078 | #endif | 2079 | #endif |
2079 | if (old_cpu != new_cpu) { | 2080 | if (old_cpu != new_cpu) { |
2080 | p->se.nr_migrations++; | 2081 | p->se.nr_migrations++; |
2081 | new_rq->nr_migrations_in++; | ||
2082 | #ifdef CONFIG_SCHEDSTATS | 2082 | #ifdef CONFIG_SCHEDSTATS |
2083 | if (task_hot(p, old_rq->clock, NULL)) | 2083 | if (task_hot(p, old_rq->clock, NULL)) |
2084 | schedstat_inc(p, se.nr_forced2_migrations); | 2084 | schedstat_inc(p, se.nr_forced2_migrations); |
@@ -2115,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2115 | * it is sufficient to simply update the task's cpu field. | 2115 | * it is sufficient to simply update the task's cpu field. |
2116 | */ | 2116 | */ |
2117 | if (!p->se.on_rq && !task_running(rq, p)) { | 2117 | if (!p->se.on_rq && !task_running(rq, p)) { |
2118 | update_rq_clock(rq); | ||
2118 | set_task_cpu(p, dest_cpu); | 2119 | set_task_cpu(p, dest_cpu); |
2119 | return 0; | 2120 | return 0; |
2120 | } | 2121 | } |
@@ -2376,14 +2377,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2376 | task_rq_unlock(rq, &flags); | 2377 | task_rq_unlock(rq, &flags); |
2377 | 2378 | ||
2378 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | 2379 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
2379 | if (cpu != orig_cpu) | 2380 | if (cpu != orig_cpu) { |
2381 | local_irq_save(flags); | ||
2382 | rq = cpu_rq(cpu); | ||
2383 | update_rq_clock(rq); | ||
2380 | set_task_cpu(p, cpu); | 2384 | set_task_cpu(p, cpu); |
2381 | 2385 | local_irq_restore(flags); | |
2386 | } | ||
2382 | rq = task_rq_lock(p, &flags); | 2387 | rq = task_rq_lock(p, &flags); |
2383 | 2388 | ||
2384 | if (rq != orig_rq) | ||
2385 | update_rq_clock(rq); | ||
2386 | |||
2387 | WARN_ON(p->state != TASK_WAKING); | 2389 | WARN_ON(p->state != TASK_WAKING); |
2388 | cpu = task_cpu(p); | 2390 | cpu = task_cpu(p); |
2389 | 2391 | ||
@@ -2440,6 +2442,17 @@ out_running: | |||
2440 | #ifdef CONFIG_SMP | 2442 | #ifdef CONFIG_SMP |
2441 | if (p->sched_class->task_wake_up) | 2443 | if (p->sched_class->task_wake_up) |
2442 | p->sched_class->task_wake_up(rq, p); | 2444 | p->sched_class->task_wake_up(rq, p); |
2445 | |||
2446 | if (unlikely(rq->idle_stamp)) { | ||
2447 | u64 delta = rq->clock - rq->idle_stamp; | ||
2448 | u64 max = 2*sysctl_sched_migration_cost; | ||
2449 | |||
2450 | if (delta > max) | ||
2451 | rq->avg_idle = max; | ||
2452 | else | ||
2453 | update_avg(&rq->avg_idle, delta); | ||
2454 | rq->idle_stamp = 0; | ||
2455 | } | ||
2443 | #endif | 2456 | #endif |
2444 | out: | 2457 | out: |
2445 | task_rq_unlock(rq, &flags); | 2458 | task_rq_unlock(rq, &flags); |
@@ -2545,6 +2558,7 @@ static void __sched_fork(struct task_struct *p) | |||
2545 | void sched_fork(struct task_struct *p, int clone_flags) | 2558 | void sched_fork(struct task_struct *p, int clone_flags) |
2546 | { | 2559 | { |
2547 | int cpu = get_cpu(); | 2560 | int cpu = get_cpu(); |
2561 | unsigned long flags; | ||
2548 | 2562 | ||
2549 | __sched_fork(p); | 2563 | __sched_fork(p); |
2550 | 2564 | ||
@@ -2581,7 +2595,10 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2581 | #ifdef CONFIG_SMP | 2595 | #ifdef CONFIG_SMP |
2582 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); | 2596 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); |
2583 | #endif | 2597 | #endif |
2598 | local_irq_save(flags); | ||
2599 | update_rq_clock(cpu_rq(cpu)); | ||
2584 | set_task_cpu(p, cpu); | 2600 | set_task_cpu(p, cpu); |
2601 | local_irq_restore(flags); | ||
2585 | 2602 | ||
2586 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2603 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2587 | if (likely(sched_info_on())) | 2604 | if (likely(sched_info_on())) |
@@ -2848,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2848 | */ | 2865 | */ |
2849 | arch_start_context_switch(prev); | 2866 | arch_start_context_switch(prev); |
2850 | 2867 | ||
2851 | if (unlikely(!mm)) { | 2868 | if (likely(!mm)) { |
2852 | next->active_mm = oldmm; | 2869 | next->active_mm = oldmm; |
2853 | atomic_inc(&oldmm->mm_count); | 2870 | atomic_inc(&oldmm->mm_count); |
2854 | enter_lazy_tlb(oldmm, next); | 2871 | enter_lazy_tlb(oldmm, next); |
2855 | } else | 2872 | } else |
2856 | switch_mm(oldmm, mm, next); | 2873 | switch_mm(oldmm, mm, next); |
2857 | 2874 | ||
2858 | if (unlikely(!prev->mm)) { | 2875 | if (likely(!prev->mm)) { |
2859 | prev->active_mm = NULL; | 2876 | prev->active_mm = NULL; |
2860 | rq->prev_mm = oldmm; | 2877 | rq->prev_mm = oldmm; |
2861 | } | 2878 | } |
@@ -3018,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq) | |||
3018 | } | 3035 | } |
3019 | 3036 | ||
3020 | /* | 3037 | /* |
3021 | * Externally visible per-cpu scheduler statistics: | ||
3022 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
3023 | */ | ||
3024 | u64 cpu_nr_migrations(int cpu) | ||
3025 | { | ||
3026 | return cpu_rq(cpu)->nr_migrations_in; | ||
3027 | } | ||
3028 | |||
3029 | /* | ||
3030 | * Update rq->cpu_load[] statistics. This function is usually called every | 3038 | * Update rq->cpu_load[] statistics. This function is usually called every |
3031 | * scheduler tick (TICK_NSEC). | 3039 | * scheduler tick (TICK_NSEC). |
3032 | */ | 3040 | */ |
@@ -4126,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4126 | unsigned long flags; | 4134 | unsigned long flags; |
4127 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4135 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4128 | 4136 | ||
4129 | cpumask_setall(cpus); | 4137 | cpumask_copy(cpus, cpu_online_mask); |
4130 | 4138 | ||
4131 | /* | 4139 | /* |
4132 | * When power savings policy is enabled for the parent domain, idle | 4140 | * When power savings policy is enabled for the parent domain, idle |
@@ -4289,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
4289 | int all_pinned = 0; | 4297 | int all_pinned = 0; |
4290 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | 4298 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); |
4291 | 4299 | ||
4292 | cpumask_setall(cpus); | 4300 | cpumask_copy(cpus, cpu_online_mask); |
4293 | 4301 | ||
4294 | /* | 4302 | /* |
4295 | * When power savings policy is enabled for the parent domain, idle | 4303 | * When power savings policy is enabled for the parent domain, idle |
@@ -4429,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4429 | int pulled_task = 0; | 4437 | int pulled_task = 0; |
4430 | unsigned long next_balance = jiffies + HZ; | 4438 | unsigned long next_balance = jiffies + HZ; |
4431 | 4439 | ||
4440 | this_rq->idle_stamp = this_rq->clock; | ||
4441 | |||
4442 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | ||
4443 | return; | ||
4444 | |||
4432 | for_each_domain(this_cpu, sd) { | 4445 | for_each_domain(this_cpu, sd) { |
4433 | unsigned long interval; | 4446 | unsigned long interval; |
4434 | 4447 | ||
@@ -4443,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
4443 | interval = msecs_to_jiffies(sd->balance_interval); | 4456 | interval = msecs_to_jiffies(sd->balance_interval); |
4444 | if (time_after(next_balance, sd->last_balance + interval)) | 4457 | if (time_after(next_balance, sd->last_balance + interval)) |
4445 | next_balance = sd->last_balance + interval; | 4458 | next_balance = sd->last_balance + interval; |
4446 | if (pulled_task) | 4459 | if (pulled_task) { |
4460 | this_rq->idle_stamp = 0; | ||
4447 | break; | 4461 | break; |
4462 | } | ||
4448 | } | 4463 | } |
4449 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 4464 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
4450 | /* | 4465 | /* |
@@ -5046,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, | |||
5046 | p->gtime = cputime_add(p->gtime, cputime); | 5061 | p->gtime = cputime_add(p->gtime, cputime); |
5047 | 5062 | ||
5048 | /* Add guest time to cpustat. */ | 5063 | /* Add guest time to cpustat. */ |
5049 | cpustat->user = cputime64_add(cpustat->user, tmp); | 5064 | if (TASK_NICE(p) > 0) { |
5050 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 5065 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
5066 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); | ||
5067 | } else { | ||
5068 | cpustat->user = cputime64_add(cpustat->user, tmp); | ||
5069 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | ||
5070 | } | ||
5051 | } | 5071 | } |
5052 | 5072 | ||
5053 | /* | 5073 | /* |
@@ -5162,60 +5182,86 @@ void account_idle_ticks(unsigned long ticks) | |||
5162 | * Use precise platform statistics if available: | 5182 | * Use precise platform statistics if available: |
5163 | */ | 5183 | */ |
5164 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 5184 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
5165 | cputime_t task_utime(struct task_struct *p) | 5185 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
5166 | { | 5186 | { |
5167 | return p->utime; | 5187 | *ut = p->utime; |
5188 | *st = p->stime; | ||
5168 | } | 5189 | } |
5169 | 5190 | ||
5170 | cputime_t task_stime(struct task_struct *p) | 5191 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
5171 | { | 5192 | { |
5172 | return p->stime; | 5193 | struct task_cputime cputime; |
5194 | |||
5195 | thread_group_cputime(p, &cputime); | ||
5196 | |||
5197 | *ut = cputime.utime; | ||
5198 | *st = cputime.stime; | ||
5173 | } | 5199 | } |
5174 | #else | 5200 | #else |
5175 | cputime_t task_utime(struct task_struct *p) | 5201 | |
5202 | #ifndef nsecs_to_cputime | ||
5203 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) | ||
5204 | #endif | ||
5205 | |||
5206 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
5176 | { | 5207 | { |
5177 | clock_t utime = cputime_to_clock_t(p->utime), | 5208 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); |
5178 | total = utime + cputime_to_clock_t(p->stime); | ||
5179 | u64 temp; | ||
5180 | 5209 | ||
5181 | /* | 5210 | /* |
5182 | * Use CFS's precise accounting: | 5211 | * Use CFS's precise accounting: |
5183 | */ | 5212 | */ |
5184 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | 5213 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
5185 | 5214 | ||
5186 | if (total) { | 5215 | if (total) { |
5187 | temp *= utime; | 5216 | u64 temp; |
5217 | |||
5218 | temp = (u64)(rtime * utime); | ||
5188 | do_div(temp, total); | 5219 | do_div(temp, total); |
5189 | } | 5220 | utime = (cputime_t)temp; |
5190 | utime = (clock_t)temp; | 5221 | } else |
5222 | utime = rtime; | ||
5223 | |||
5224 | /* | ||
5225 | * Compare with previous values, to keep monotonicity: | ||
5226 | */ | ||
5227 | p->prev_utime = max(p->prev_utime, utime); | ||
5228 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); | ||
5191 | 5229 | ||
5192 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | 5230 | *ut = p->prev_utime; |
5193 | return p->prev_utime; | 5231 | *st = p->prev_stime; |
5194 | } | 5232 | } |
5195 | 5233 | ||
5196 | cputime_t task_stime(struct task_struct *p) | 5234 | /* |
5235 | * Must be called with siglock held. | ||
5236 | */ | ||
5237 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
5197 | { | 5238 | { |
5198 | clock_t stime; | 5239 | struct signal_struct *sig = p->signal; |
5240 | struct task_cputime cputime; | ||
5241 | cputime_t rtime, utime, total; | ||
5199 | 5242 | ||
5200 | /* | 5243 | thread_group_cputime(p, &cputime); |
5201 | * Use CFS's precise accounting. (we subtract utime from | ||
5202 | * the total, to make sure the total observed by userspace | ||
5203 | * grows monotonically - apps rely on that): | ||
5204 | */ | ||
5205 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
5206 | cputime_to_clock_t(task_utime(p)); | ||
5207 | 5244 | ||
5208 | if (stime >= 0) | 5245 | total = cputime_add(cputime.utime, cputime.stime); |
5209 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | 5246 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
5210 | 5247 | ||
5211 | return p->prev_stime; | 5248 | if (total) { |
5212 | } | 5249 | u64 temp; |
5213 | #endif | ||
5214 | 5250 | ||
5215 | inline cputime_t task_gtime(struct task_struct *p) | 5251 | temp = (u64)(rtime * cputime.utime); |
5216 | { | 5252 | do_div(temp, total); |
5217 | return p->gtime; | 5253 | utime = (cputime_t)temp; |
5254 | } else | ||
5255 | utime = rtime; | ||
5256 | |||
5257 | sig->prev_utime = max(sig->prev_utime, utime); | ||
5258 | sig->prev_stime = max(sig->prev_stime, | ||
5259 | cputime_sub(rtime, sig->prev_utime)); | ||
5260 | |||
5261 | *ut = sig->prev_utime; | ||
5262 | *st = sig->prev_stime; | ||
5218 | } | 5263 | } |
5264 | #endif | ||
5219 | 5265 | ||
5220 | /* | 5266 | /* |
5221 | * This function gets called by the timer code, with HZ frequency. | 5267 | * This function gets called by the timer code, with HZ frequency. |
@@ -5481,7 +5527,7 @@ need_resched_nonpreemptible: | |||
5481 | } | 5527 | } |
5482 | EXPORT_SYMBOL(schedule); | 5528 | EXPORT_SYMBOL(schedule); |
5483 | 5529 | ||
5484 | #ifdef CONFIG_SMP | 5530 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
5485 | /* | 5531 | /* |
5486 | * Look out! "owner" is an entirely speculative pointer | 5532 | * Look out! "owner" is an entirely speculative pointer |
5487 | * access and not reliable. | 5533 | * access and not reliable. |
@@ -6175,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
6175 | BUG_ON(p->se.on_rq); | 6221 | BUG_ON(p->se.on_rq); |
6176 | 6222 | ||
6177 | p->policy = policy; | 6223 | p->policy = policy; |
6178 | switch (p->policy) { | ||
6179 | case SCHED_NORMAL: | ||
6180 | case SCHED_BATCH: | ||
6181 | case SCHED_IDLE: | ||
6182 | p->sched_class = &fair_sched_class; | ||
6183 | break; | ||
6184 | case SCHED_FIFO: | ||
6185 | case SCHED_RR: | ||
6186 | p->sched_class = &rt_sched_class; | ||
6187 | break; | ||
6188 | } | ||
6189 | |||
6190 | p->rt_priority = prio; | 6224 | p->rt_priority = prio; |
6191 | p->normal_prio = normal_prio(p); | 6225 | p->normal_prio = normal_prio(p); |
6192 | /* we are holding p->pi_lock already */ | 6226 | /* we are holding p->pi_lock already */ |
6193 | p->prio = rt_mutex_getprio(p); | 6227 | p->prio = rt_mutex_getprio(p); |
6228 | if (rt_prio(p->prio)) | ||
6229 | p->sched_class = &rt_sched_class; | ||
6230 | else | ||
6231 | p->sched_class = &fair_sched_class; | ||
6194 | set_load_weight(p); | 6232 | set_load_weight(p); |
6195 | } | 6233 | } |
6196 | 6234 | ||
@@ -6935,7 +6973,7 @@ void show_state_filter(unsigned long state_filter) | |||
6935 | /* | 6973 | /* |
6936 | * Only show locks if all tasks are dumped: | 6974 | * Only show locks if all tasks are dumped: |
6937 | */ | 6975 | */ |
6938 | if (state_filter == -1) | 6976 | if (!state_filter) |
6939 | debug_show_all_locks(); | 6977 | debug_show_all_locks(); |
6940 | } | 6978 | } |
6941 | 6979 | ||
@@ -7740,6 +7778,16 @@ early_initcall(migration_init); | |||
7740 | 7778 | ||
7741 | #ifdef CONFIG_SCHED_DEBUG | 7779 | #ifdef CONFIG_SCHED_DEBUG |
7742 | 7780 | ||
7781 | static __read_mostly int sched_domain_debug_enabled; | ||
7782 | |||
7783 | static int __init sched_domain_debug_setup(char *str) | ||
7784 | { | ||
7785 | sched_domain_debug_enabled = 1; | ||
7786 | |||
7787 | return 0; | ||
7788 | } | ||
7789 | early_param("sched_debug", sched_domain_debug_setup); | ||
7790 | |||
7743 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 7791 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
7744 | struct cpumask *groupmask) | 7792 | struct cpumask *groupmask) |
7745 | { | 7793 | { |
@@ -7826,6 +7874,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
7826 | cpumask_var_t groupmask; | 7874 | cpumask_var_t groupmask; |
7827 | int level = 0; | 7875 | int level = 0; |
7828 | 7876 | ||
7877 | if (!sched_domain_debug_enabled) | ||
7878 | return; | ||
7879 | |||
7829 | if (!sd) { | 7880 | if (!sd) { |
7830 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); | 7881 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
7831 | return; | 7882 | return; |
@@ -7905,6 +7956,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
7905 | 7956 | ||
7906 | static void free_rootdomain(struct root_domain *rd) | 7957 | static void free_rootdomain(struct root_domain *rd) |
7907 | { | 7958 | { |
7959 | synchronize_sched(); | ||
7960 | |||
7908 | cpupri_cleanup(&rd->cpupri); | 7961 | cpupri_cleanup(&rd->cpupri); |
7909 | 7962 | ||
7910 | free_cpumask_var(rd->rto_mask); | 7963 | free_cpumask_var(rd->rto_mask); |
@@ -8045,6 +8098,7 @@ static cpumask_var_t cpu_isolated_map; | |||
8045 | /* Setup the mask of cpus configured for isolated domains */ | 8098 | /* Setup the mask of cpus configured for isolated domains */ |
8046 | static int __init isolated_cpu_setup(char *str) | 8099 | static int __init isolated_cpu_setup(char *str) |
8047 | { | 8100 | { |
8101 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8048 | cpulist_parse(str, cpu_isolated_map); | 8102 | cpulist_parse(str, cpu_isolated_map); |
8049 | return 1; | 8103 | return 1; |
8050 | } | 8104 | } |
@@ -8881,7 +8935,7 @@ static int build_sched_domains(const struct cpumask *cpu_map) | |||
8881 | return __build_sched_domains(cpu_map, NULL); | 8935 | return __build_sched_domains(cpu_map, NULL); |
8882 | } | 8936 | } |
8883 | 8937 | ||
8884 | static struct cpumask *doms_cur; /* current sched domains */ | 8938 | static cpumask_var_t *doms_cur; /* current sched domains */ |
8885 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 8939 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
8886 | static struct sched_domain_attr *dattr_cur; | 8940 | static struct sched_domain_attr *dattr_cur; |
8887 | /* attribues of custom domains in 'doms_cur' */ | 8941 | /* attribues of custom domains in 'doms_cur' */ |
@@ -8903,6 +8957,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
8903 | return 0; | 8957 | return 0; |
8904 | } | 8958 | } |
8905 | 8959 | ||
8960 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) | ||
8961 | { | ||
8962 | int i; | ||
8963 | cpumask_var_t *doms; | ||
8964 | |||
8965 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); | ||
8966 | if (!doms) | ||
8967 | return NULL; | ||
8968 | for (i = 0; i < ndoms; i++) { | ||
8969 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { | ||
8970 | free_sched_domains(doms, i); | ||
8971 | return NULL; | ||
8972 | } | ||
8973 | } | ||
8974 | return doms; | ||
8975 | } | ||
8976 | |||
8977 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | ||
8978 | { | ||
8979 | unsigned int i; | ||
8980 | for (i = 0; i < ndoms; i++) | ||
8981 | free_cpumask_var(doms[i]); | ||
8982 | kfree(doms); | ||
8983 | } | ||
8984 | |||
8906 | /* | 8985 | /* |
8907 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 8986 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
8908 | * For now this just excludes isolated cpus, but could be used to | 8987 | * For now this just excludes isolated cpus, but could be used to |
@@ -8914,12 +8993,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map) | |||
8914 | 8993 | ||
8915 | arch_update_cpu_topology(); | 8994 | arch_update_cpu_topology(); |
8916 | ndoms_cur = 1; | 8995 | ndoms_cur = 1; |
8917 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); | 8996 | doms_cur = alloc_sched_domains(ndoms_cur); |
8918 | if (!doms_cur) | 8997 | if (!doms_cur) |
8919 | doms_cur = fallback_doms; | 8998 | doms_cur = &fallback_doms; |
8920 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); | 8999 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
8921 | dattr_cur = NULL; | 9000 | dattr_cur = NULL; |
8922 | err = build_sched_domains(doms_cur); | 9001 | err = build_sched_domains(doms_cur[0]); |
8923 | register_sched_domain_sysctl(); | 9002 | register_sched_domain_sysctl(); |
8924 | 9003 | ||
8925 | return err; | 9004 | return err; |
@@ -8969,19 +9048,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8969 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 9048 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
8970 | * It destroys each deleted domain and builds each new domain. | 9049 | * It destroys each deleted domain and builds each new domain. |
8971 | * | 9050 | * |
8972 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. | 9051 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
8973 | * The masks don't intersect (don't overlap.) We should setup one | 9052 | * The masks don't intersect (don't overlap.) We should setup one |
8974 | * sched domain for each mask. CPUs not in any of the cpumasks will | 9053 | * sched domain for each mask. CPUs not in any of the cpumasks will |
8975 | * not be load balanced. If the same cpumask appears both in the | 9054 | * not be load balanced. If the same cpumask appears both in the |
8976 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | 9055 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
8977 | * it as it is. | 9056 | * it as it is. |
8978 | * | 9057 | * |
8979 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 9058 | * The passed in 'doms_new' should be allocated using |
8980 | * ownership of it and will kfree it when done with it. If the caller | 9059 | * alloc_sched_domains. This routine takes ownership of it and will |
8981 | * failed the kmalloc call, then it can pass in doms_new == NULL && | 9060 | * free_sched_domains it when done with it. If the caller failed the |
8982 | * ndoms_new == 1, and partition_sched_domains() will fallback to | 9061 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
8983 | * the single partition 'fallback_doms', it also forces the domains | 9062 | * and partition_sched_domains() will fallback to the single partition |
8984 | * to be rebuilt. | 9063 | * 'fallback_doms', it also forces the domains to be rebuilt. |
8985 | * | 9064 | * |
8986 | * If doms_new == NULL it will be replaced with cpu_online_mask. | 9065 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
8987 | * ndoms_new == 0 is a special case for destroying existing domains, | 9066 | * ndoms_new == 0 is a special case for destroying existing domains, |
@@ -8989,8 +9068,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8989 | * | 9068 | * |
8990 | * Call with hotplug lock held | 9069 | * Call with hotplug lock held |
8991 | */ | 9070 | */ |
8992 | /* FIXME: Change to struct cpumask *doms_new[] */ | 9071 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
8993 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
8994 | struct sched_domain_attr *dattr_new) | 9072 | struct sched_domain_attr *dattr_new) |
8995 | { | 9073 | { |
8996 | int i, j, n; | 9074 | int i, j, n; |
@@ -9009,40 +9087,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | |||
9009 | /* Destroy deleted domains */ | 9087 | /* Destroy deleted domains */ |
9010 | for (i = 0; i < ndoms_cur; i++) { | 9088 | for (i = 0; i < ndoms_cur; i++) { |
9011 | for (j = 0; j < n && !new_topology; j++) { | 9089 | for (j = 0; j < n && !new_topology; j++) { |
9012 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) | 9090 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
9013 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 9091 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
9014 | goto match1; | 9092 | goto match1; |
9015 | } | 9093 | } |
9016 | /* no match - a current sched domain not in new doms_new[] */ | 9094 | /* no match - a current sched domain not in new doms_new[] */ |
9017 | detach_destroy_domains(doms_cur + i); | 9095 | detach_destroy_domains(doms_cur[i]); |
9018 | match1: | 9096 | match1: |
9019 | ; | 9097 | ; |
9020 | } | 9098 | } |
9021 | 9099 | ||
9022 | if (doms_new == NULL) { | 9100 | if (doms_new == NULL) { |
9023 | ndoms_cur = 0; | 9101 | ndoms_cur = 0; |
9024 | doms_new = fallback_doms; | 9102 | doms_new = &fallback_doms; |
9025 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); | 9103 | cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); |
9026 | WARN_ON_ONCE(dattr_new); | 9104 | WARN_ON_ONCE(dattr_new); |
9027 | } | 9105 | } |
9028 | 9106 | ||
9029 | /* Build new domains */ | 9107 | /* Build new domains */ |
9030 | for (i = 0; i < ndoms_new; i++) { | 9108 | for (i = 0; i < ndoms_new; i++) { |
9031 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 9109 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
9032 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) | 9110 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
9033 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 9111 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
9034 | goto match2; | 9112 | goto match2; |
9035 | } | 9113 | } |
9036 | /* no match - add a new doms_new */ | 9114 | /* no match - add a new doms_new */ |
9037 | __build_sched_domains(doms_new + i, | 9115 | __build_sched_domains(doms_new[i], |
9038 | dattr_new ? dattr_new + i : NULL); | 9116 | dattr_new ? dattr_new + i : NULL); |
9039 | match2: | 9117 | match2: |
9040 | ; | 9118 | ; |
9041 | } | 9119 | } |
9042 | 9120 | ||
9043 | /* Remember the new sched domains */ | 9121 | /* Remember the new sched domains */ |
9044 | if (doms_cur != fallback_doms) | 9122 | if (doms_cur != &fallback_doms) |
9045 | kfree(doms_cur); | 9123 | free_sched_domains(doms_cur, ndoms_cur); |
9046 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 9124 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
9047 | doms_cur = doms_new; | 9125 | doms_cur = doms_new; |
9048 | dattr_cur = dattr_new; | 9126 | dattr_cur = dattr_new; |
@@ -9364,10 +9442,6 @@ void __init sched_init(void) | |||
9364 | #ifdef CONFIG_CPUMASK_OFFSTACK | 9442 | #ifdef CONFIG_CPUMASK_OFFSTACK |
9365 | alloc_size += num_possible_cpus() * cpumask_size(); | 9443 | alloc_size += num_possible_cpus() * cpumask_size(); |
9366 | #endif | 9444 | #endif |
9367 | /* | ||
9368 | * As sched_init() is called before page_alloc is setup, | ||
9369 | * we use alloc_bootmem(). | ||
9370 | */ | ||
9371 | if (alloc_size) { | 9445 | if (alloc_size) { |
9372 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); | 9446 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
9373 | 9447 | ||
@@ -9522,6 +9596,8 @@ void __init sched_init(void) | |||
9522 | rq->cpu = i; | 9596 | rq->cpu = i; |
9523 | rq->online = 0; | 9597 | rq->online = 0; |
9524 | rq->migration_thread = NULL; | 9598 | rq->migration_thread = NULL; |
9599 | rq->idle_stamp = 0; | ||
9600 | rq->avg_idle = 2*sysctl_sched_migration_cost; | ||
9525 | INIT_LIST_HEAD(&rq->migration_queue); | 9601 | INIT_LIST_HEAD(&rq->migration_queue); |
9526 | rq_attach_root(rq, &def_root_domain); | 9602 | rq_attach_root(rq, &def_root_domain); |
9527 | #endif | 9603 | #endif |
@@ -9571,7 +9647,9 @@ void __init sched_init(void) | |||
9571 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9647 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
9572 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9648 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
9573 | #endif | 9649 | #endif |
9574 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9650 | /* May be allocated at isolcpus cmdline parse time */ |
9651 | if (cpu_isolated_map == NULL) | ||
9652 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | ||
9575 | #endif /* SMP */ | 9653 | #endif /* SMP */ |
9576 | 9654 | ||
9577 | perf_event_init(); | 9655 | perf_event_init(); |
@@ -10901,6 +10979,7 @@ void synchronize_sched_expedited(void) | |||
10901 | spin_unlock_irqrestore(&rq->lock, flags); | 10979 | spin_unlock_irqrestore(&rq->lock, flags); |
10902 | } | 10980 | } |
10903 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 10981 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
10982 | synchronize_sched_expedited_count++; | ||
10904 | mutex_unlock(&rcu_sched_expedited_mutex); | 10983 | mutex_unlock(&rcu_sched_expedited_mutex); |
10905 | put_online_cpus(); | 10984 | put_online_cpus(); |
10906 | if (need_full_sync) | 10985 | if (need_full_sync) |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index efb84409bc43..6988cf08f705 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -285,12 +285,16 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
285 | 285 | ||
286 | #ifdef CONFIG_SCHEDSTATS | 286 | #ifdef CONFIG_SCHEDSTATS |
287 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | 287 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); |
288 | #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); | ||
288 | 289 | ||
289 | P(yld_count); | 290 | P(yld_count); |
290 | 291 | ||
291 | P(sched_switch); | 292 | P(sched_switch); |
292 | P(sched_count); | 293 | P(sched_count); |
293 | P(sched_goidle); | 294 | P(sched_goidle); |
295 | #ifdef CONFIG_SMP | ||
296 | P64(avg_idle); | ||
297 | #endif | ||
294 | 298 | ||
295 | P(ttwu_count); | 299 | P(ttwu_count); |
296 | P(ttwu_local); | 300 | P(ttwu_local); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 37087a7fac22..f61837ad336d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1345,6 +1345,37 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | /* | 1347 | /* |
1348 | * Try and locate an idle CPU in the sched_domain. | ||
1349 | */ | ||
1350 | static int | ||
1351 | select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target) | ||
1352 | { | ||
1353 | int cpu = smp_processor_id(); | ||
1354 | int prev_cpu = task_cpu(p); | ||
1355 | int i; | ||
1356 | |||
1357 | /* | ||
1358 | * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE | ||
1359 | * test in select_task_rq_fair) and the prev_cpu is idle then that's | ||
1360 | * always a better target than the current cpu. | ||
1361 | */ | ||
1362 | if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running) | ||
1363 | return prev_cpu; | ||
1364 | |||
1365 | /* | ||
1366 | * Otherwise, iterate the domain and find an elegible idle cpu. | ||
1367 | */ | ||
1368 | for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { | ||
1369 | if (!cpu_rq(i)->cfs.nr_running) { | ||
1370 | target = i; | ||
1371 | break; | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | return target; | ||
1376 | } | ||
1377 | |||
1378 | /* | ||
1348 | * sched_balance_self: balance the current task (running on cpu) in domains | 1379 | * sched_balance_self: balance the current task (running on cpu) in domains |
1349 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | 1380 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and |
1350 | * SD_BALANCE_EXEC. | 1381 | * SD_BALANCE_EXEC. |
@@ -1398,11 +1429,35 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1398 | want_sd = 0; | 1429 | want_sd = 0; |
1399 | } | 1430 | } |
1400 | 1431 | ||
1401 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 1432 | /* |
1402 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 1433 | * While iterating the domains looking for a spanning |
1434 | * WAKE_AFFINE domain, adjust the affine target to any idle cpu | ||
1435 | * in cache sharing domains along the way. | ||
1436 | */ | ||
1437 | if (want_affine) { | ||
1438 | int target = -1; | ||
1403 | 1439 | ||
1404 | affine_sd = tmp; | 1440 | /* |
1405 | want_affine = 0; | 1441 | * If both cpu and prev_cpu are part of this domain, |
1442 | * cpu is a valid SD_WAKE_AFFINE target. | ||
1443 | */ | ||
1444 | if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) | ||
1445 | target = cpu; | ||
1446 | |||
1447 | /* | ||
1448 | * If there's an idle sibling in this domain, make that | ||
1449 | * the wake_affine target instead of the current cpu. | ||
1450 | */ | ||
1451 | if (tmp->flags & SD_PREFER_SIBLING) | ||
1452 | target = select_idle_sibling(p, tmp, target); | ||
1453 | |||
1454 | if (target >= 0) { | ||
1455 | if (tmp->flags & SD_WAKE_AFFINE) { | ||
1456 | affine_sd = tmp; | ||
1457 | want_affine = 0; | ||
1458 | } | ||
1459 | cpu = target; | ||
1460 | } | ||
1406 | } | 1461 | } |
1407 | 1462 | ||
1408 | if (!want_sd && !want_affine) | 1463 | if (!want_sd && !want_affine) |
@@ -1679,7 +1734,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1679 | struct cfs_rq *cfs_rq = &rq->cfs; | 1734 | struct cfs_rq *cfs_rq = &rq->cfs; |
1680 | struct sched_entity *se; | 1735 | struct sched_entity *se; |
1681 | 1736 | ||
1682 | if (unlikely(!cfs_rq->nr_running)) | 1737 | if (!cfs_rq->nr_running) |
1683 | return NULL; | 1738 | return NULL; |
1684 | 1739 | ||
1685 | do { | 1740 | do { |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a4d790cddb19..5c5fef378415 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1153,29 +1153,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
1153 | 1153 | ||
1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1154 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1155 | 1155 | ||
1156 | static inline int pick_optimal_cpu(int this_cpu, | ||
1157 | const struct cpumask *mask) | ||
1158 | { | ||
1159 | int first; | ||
1160 | |||
1161 | /* "this_cpu" is cheaper to preempt than a remote processor */ | ||
1162 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) | ||
1163 | return this_cpu; | ||
1164 | |||
1165 | first = cpumask_first(mask); | ||
1166 | if (first < nr_cpu_ids) | ||
1167 | return first; | ||
1168 | |||
1169 | return -1; | ||
1170 | } | ||
1171 | |||
1172 | static int find_lowest_rq(struct task_struct *task) | 1156 | static int find_lowest_rq(struct task_struct *task) |
1173 | { | 1157 | { |
1174 | struct sched_domain *sd; | 1158 | struct sched_domain *sd; |
1175 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1159 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1176 | int this_cpu = smp_processor_id(); | 1160 | int this_cpu = smp_processor_id(); |
1177 | int cpu = task_cpu(task); | 1161 | int cpu = task_cpu(task); |
1178 | cpumask_var_t domain_mask; | ||
1179 | 1162 | ||
1180 | if (task->rt.nr_cpus_allowed == 1) | 1163 | if (task->rt.nr_cpus_allowed == 1) |
1181 | return -1; /* No other targets possible */ | 1164 | return -1; /* No other targets possible */ |
@@ -1198,28 +1181,26 @@ static int find_lowest_rq(struct task_struct *task) | |||
1198 | * Otherwise, we consult the sched_domains span maps to figure | 1181 | * Otherwise, we consult the sched_domains span maps to figure |
1199 | * out which cpu is logically closest to our hot cache data. | 1182 | * out which cpu is logically closest to our hot cache data. |
1200 | */ | 1183 | */ |
1201 | if (this_cpu == cpu) | 1184 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1202 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1185 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1203 | |||
1204 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { | ||
1205 | for_each_domain(cpu, sd) { | ||
1206 | if (sd->flags & SD_WAKE_AFFINE) { | ||
1207 | int best_cpu; | ||
1208 | 1186 | ||
1209 | cpumask_and(domain_mask, | 1187 | for_each_domain(cpu, sd) { |
1210 | sched_domain_span(sd), | 1188 | if (sd->flags & SD_WAKE_AFFINE) { |
1211 | lowest_mask); | 1189 | int best_cpu; |
1212 | 1190 | ||
1213 | best_cpu = pick_optimal_cpu(this_cpu, | 1191 | /* |
1214 | domain_mask); | 1192 | * "this_cpu" is cheaper to preempt than a |
1215 | 1193 | * remote processor. | |
1216 | if (best_cpu != -1) { | 1194 | */ |
1217 | free_cpumask_var(domain_mask); | 1195 | if (this_cpu != -1 && |
1218 | return best_cpu; | 1196 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) |
1219 | } | 1197 | return this_cpu; |
1220 | } | 1198 | |
1199 | best_cpu = cpumask_first_and(lowest_mask, | ||
1200 | sched_domain_span(sd)); | ||
1201 | if (best_cpu < nr_cpu_ids) | ||
1202 | return best_cpu; | ||
1221 | } | 1203 | } |
1222 | free_cpumask_var(domain_mask); | ||
1223 | } | 1204 | } |
1224 | 1205 | ||
1225 | /* | 1206 | /* |
@@ -1227,7 +1208,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
1227 | * just give the caller *something* to work with from the compatible | 1208 | * just give the caller *something* to work with from the compatible |
1228 | * locations. | 1209 | * locations. |
1229 | */ | 1210 | */ |
1230 | return pick_optimal_cpu(this_cpu, lowest_mask); | 1211 | if (this_cpu != -1) |
1212 | return this_cpu; | ||
1213 | |||
1214 | cpu = cpumask_any(lowest_mask); | ||
1215 | if (cpu < nr_cpu_ids) | ||
1216 | return cpu; | ||
1217 | return -1; | ||
1231 | } | 1218 | } |
1232 | 1219 | ||
1233 | /* Will lock the rq it finds */ | 1220 | /* Will lock the rq it finds */ |
diff --git a/kernel/signal.c b/kernel/signal.c index 6705320784fd..6b982f2cf524 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -22,12 +22,14 @@ | |||
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> | 24 | #include <linux/signalfd.h> |
25 | #include <linux/ratelimit.h> | ||
25 | #include <linux/tracehook.h> | 26 | #include <linux/tracehook.h> |
26 | #include <linux/capability.h> | 27 | #include <linux/capability.h> |
27 | #include <linux/freezer.h> | 28 | #include <linux/freezer.h> |
28 | #include <linux/pid_namespace.h> | 29 | #include <linux/pid_namespace.h> |
29 | #include <linux/nsproxy.h> | 30 | #include <linux/nsproxy.h> |
30 | #include <trace/events/sched.h> | 31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/signal.h> | ||
31 | 33 | ||
32 | #include <asm/param.h> | 34 | #include <asm/param.h> |
33 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -41,6 +43,8 @@ | |||
41 | 43 | ||
42 | static struct kmem_cache *sigqueue_cachep; | 44 | static struct kmem_cache *sigqueue_cachep; |
43 | 45 | ||
46 | int print_fatal_signals __read_mostly; | ||
47 | |||
44 | static void __user *sig_handler(struct task_struct *t, int sig) | 48 | static void __user *sig_handler(struct task_struct *t, int sig) |
45 | { | 49 | { |
46 | return t->sighand->action[sig - 1].sa.sa_handler; | 50 | return t->sighand->action[sig - 1].sa.sa_handler; |
@@ -159,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
159 | { | 163 | { |
160 | unsigned long i, *s, *m, x; | 164 | unsigned long i, *s, *m, x; |
161 | int sig = 0; | 165 | int sig = 0; |
162 | 166 | ||
163 | s = pending->signal.sig; | 167 | s = pending->signal.sig; |
164 | m = mask->sig; | 168 | m = mask->sig; |
165 | switch (_NSIG_WORDS) { | 169 | switch (_NSIG_WORDS) { |
@@ -184,17 +188,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
184 | sig = ffz(~x) + 1; | 188 | sig = ffz(~x) + 1; |
185 | break; | 189 | break; |
186 | } | 190 | } |
187 | 191 | ||
188 | return sig; | 192 | return sig; |
189 | } | 193 | } |
190 | 194 | ||
195 | static inline void print_dropped_signal(int sig) | ||
196 | { | ||
197 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | ||
198 | |||
199 | if (!print_fatal_signals) | ||
200 | return; | ||
201 | |||
202 | if (!__ratelimit(&ratelimit_state)) | ||
203 | return; | ||
204 | |||
205 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", | ||
206 | current->comm, current->pid, sig); | ||
207 | } | ||
208 | |||
191 | /* | 209 | /* |
192 | * allocate a new signal queue record | 210 | * allocate a new signal queue record |
193 | * - this may be called without locks if and only if t == current, otherwise an | 211 | * - this may be called without locks if and only if t == current, otherwise an |
194 | * appopriate lock must be held to stop the target task from exiting | 212 | * appopriate lock must be held to stop the target task from exiting |
195 | */ | 213 | */ |
196 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | 214 | static struct sigqueue * |
197 | int override_rlimit) | 215 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
198 | { | 216 | { |
199 | struct sigqueue *q = NULL; | 217 | struct sigqueue *q = NULL; |
200 | struct user_struct *user; | 218 | struct user_struct *user; |
@@ -207,10 +225,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
207 | */ | 225 | */ |
208 | user = get_uid(__task_cred(t)->user); | 226 | user = get_uid(__task_cred(t)->user); |
209 | atomic_inc(&user->sigpending); | 227 | atomic_inc(&user->sigpending); |
228 | |||
210 | if (override_rlimit || | 229 | if (override_rlimit || |
211 | atomic_read(&user->sigpending) <= | 230 | atomic_read(&user->sigpending) <= |
212 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) | 231 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) { |
213 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 232 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
233 | } else { | ||
234 | print_dropped_signal(sig); | ||
235 | } | ||
236 | |||
214 | if (unlikely(q == NULL)) { | 237 | if (unlikely(q == NULL)) { |
215 | atomic_dec(&user->sigpending); | 238 | atomic_dec(&user->sigpending); |
216 | free_uid(user); | 239 | free_uid(user); |
@@ -834,7 +857,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
834 | struct sigqueue *q; | 857 | struct sigqueue *q; |
835 | int override_rlimit; | 858 | int override_rlimit; |
836 | 859 | ||
837 | trace_sched_signal_send(sig, t); | 860 | trace_signal_generate(sig, info, t); |
838 | 861 | ||
839 | assert_spin_locked(&t->sighand->siglock); | 862 | assert_spin_locked(&t->sighand->siglock); |
840 | 863 | ||
@@ -869,7 +892,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
869 | else | 892 | else |
870 | override_rlimit = 0; | 893 | override_rlimit = 0; |
871 | 894 | ||
872 | q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, | 895 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
873 | override_rlimit); | 896 | override_rlimit); |
874 | if (q) { | 897 | if (q) { |
875 | list_add_tail(&q->list, &pending->list); | 898 | list_add_tail(&q->list, &pending->list); |
@@ -896,12 +919,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
896 | break; | 919 | break; |
897 | } | 920 | } |
898 | } else if (!is_si_special(info)) { | 921 | } else if (!is_si_special(info)) { |
899 | if (sig >= SIGRTMIN && info->si_code != SI_USER) | 922 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
900 | /* | 923 | /* |
901 | * Queue overflow, abort. We may abort if the signal was rt | 924 | * Queue overflow, abort. We may abort if the |
902 | * and sent by user using something other than kill(). | 925 | * signal was rt and sent by user using something |
903 | */ | 926 | * other than kill(). |
927 | */ | ||
928 | trace_signal_overflow_fail(sig, group, info); | ||
904 | return -EAGAIN; | 929 | return -EAGAIN; |
930 | } else { | ||
931 | /* | ||
932 | * This is a silent loss of information. We still | ||
933 | * send the signal, but the *info bits are lost. | ||
934 | */ | ||
935 | trace_signal_lose_info(sig, group, info); | ||
936 | } | ||
905 | } | 937 | } |
906 | 938 | ||
907 | out_set: | 939 | out_set: |
@@ -925,8 +957,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
925 | return __send_signal(sig, info, t, group, from_ancestor_ns); | 957 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
926 | } | 958 | } |
927 | 959 | ||
928 | int print_fatal_signals; | ||
929 | |||
930 | static void print_fatal_signal(struct pt_regs *regs, int signr) | 960 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
931 | { | 961 | { |
932 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 962 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
@@ -1293,19 +1323,19 @@ EXPORT_SYMBOL(kill_pid); | |||
1293 | * These functions support sending signals using preallocated sigqueue | 1323 | * These functions support sending signals using preallocated sigqueue |
1294 | * structures. This is needed "because realtime applications cannot | 1324 | * structures. This is needed "because realtime applications cannot |
1295 | * afford to lose notifications of asynchronous events, like timer | 1325 | * afford to lose notifications of asynchronous events, like timer |
1296 | * expirations or I/O completions". In the case of Posix Timers | 1326 | * expirations or I/O completions". In the case of Posix Timers |
1297 | * we allocate the sigqueue structure from the timer_create. If this | 1327 | * we allocate the sigqueue structure from the timer_create. If this |
1298 | * allocation fails we are able to report the failure to the application | 1328 | * allocation fails we are able to report the failure to the application |
1299 | * with an EAGAIN error. | 1329 | * with an EAGAIN error. |
1300 | */ | 1330 | */ |
1301 | |||
1302 | struct sigqueue *sigqueue_alloc(void) | 1331 | struct sigqueue *sigqueue_alloc(void) |
1303 | { | 1332 | { |
1304 | struct sigqueue *q; | 1333 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1305 | 1334 | ||
1306 | if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) | 1335 | if (q) |
1307 | q->flags |= SIGQUEUE_PREALLOC; | 1336 | q->flags |= SIGQUEUE_PREALLOC; |
1308 | return(q); | 1337 | |
1338 | return q; | ||
1309 | } | 1339 | } |
1310 | 1340 | ||
1311 | void sigqueue_free(struct sigqueue *q) | 1341 | void sigqueue_free(struct sigqueue *q) |
@@ -1839,6 +1869,9 @@ relock: | |||
1839 | ka = &sighand->action[signr-1]; | 1869 | ka = &sighand->action[signr-1]; |
1840 | } | 1870 | } |
1841 | 1871 | ||
1872 | /* Trace actually delivered signals. */ | ||
1873 | trace_signal_deliver(signr, info, ka); | ||
1874 | |||
1842 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 1875 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1843 | continue; | 1876 | continue; |
1844 | if (ka->sa.sa_handler != SIG_DFL) { | 1877 | if (ka->sa.sa_handler != SIG_DFL) { |
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-debugfs.c index 3988032571f5..e45c43645298 100644 --- a/kernel/slow-work-proc.c +++ b/kernel/slow-work-debugfs.c | |||
@@ -57,7 +57,7 @@ static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Describe a slow work item for /proc | 60 | * Describe a slow work item for debugfs |
61 | */ | 61 | */ |
62 | static int slow_work_runqueue_show(struct seq_file *m, void *v) | 62 | static int slow_work_runqueue_show(struct seq_file *m, void *v) |
63 | { | 63 | { |
@@ -211,7 +211,7 @@ static const struct seq_operations slow_work_runqueue_ops = { | |||
211 | }; | 211 | }; |
212 | 212 | ||
213 | /* | 213 | /* |
214 | * open "/proc/slow_work_rq" to list queue contents | 214 | * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents |
215 | */ | 215 | */ |
216 | static int slow_work_runqueue_open(struct inode *inode, struct file *file) | 216 | static int slow_work_runqueue_open(struct inode *inode, struct file *file) |
217 | { | 217 | { |
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index da94f3c101af..00889bd3c590 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/debugfs.h> |
20 | #include "slow-work.h" | 20 | #include "slow-work.h" |
21 | 21 | ||
22 | static void slow_work_cull_timeout(unsigned long); | 22 | static void slow_work_cull_timeout(unsigned long); |
@@ -109,12 +109,36 @@ static struct module *slow_work_unreg_module; | |||
109 | static struct slow_work *slow_work_unreg_work_item; | 109 | static struct slow_work *slow_work_unreg_work_item; |
110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); | 110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); |
111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); | 111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); |
112 | |||
113 | static void slow_work_set_thread_processing(int id, struct slow_work *work) | ||
114 | { | ||
115 | if (work) | ||
116 | slow_work_thread_processing[id] = work->owner; | ||
117 | } | ||
118 | static void slow_work_done_thread_processing(int id, struct slow_work *work) | ||
119 | { | ||
120 | struct module *module = slow_work_thread_processing[id]; | ||
121 | |||
122 | slow_work_thread_processing[id] = NULL; | ||
123 | smp_mb(); | ||
124 | if (slow_work_unreg_work_item == work || | ||
125 | slow_work_unreg_module == module) | ||
126 | wake_up_all(&slow_work_unreg_wq); | ||
127 | } | ||
128 | static void slow_work_clear_thread_processing(int id) | ||
129 | { | ||
130 | slow_work_thread_processing[id] = NULL; | ||
131 | } | ||
132 | #else | ||
133 | static void slow_work_set_thread_processing(int id, struct slow_work *work) {} | ||
134 | static void slow_work_done_thread_processing(int id, struct slow_work *work) {} | ||
135 | static void slow_work_clear_thread_processing(int id) {} | ||
112 | #endif | 136 | #endif |
113 | 137 | ||
114 | /* | 138 | /* |
115 | * Data for tracking currently executing items for indication through /proc | 139 | * Data for tracking currently executing items for indication through /proc |
116 | */ | 140 | */ |
117 | #ifdef CONFIG_SLOW_WORK_PROC | 141 | #ifdef CONFIG_SLOW_WORK_DEBUG |
118 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; | 142 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; |
119 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; | 143 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; |
120 | DEFINE_RWLOCK(slow_work_execs_lock); | 144 | DEFINE_RWLOCK(slow_work_execs_lock); |
@@ -197,9 +221,6 @@ static unsigned slow_work_calc_vsmax(void) | |||
197 | */ | 221 | */ |
198 | static noinline bool slow_work_execute(int id) | 222 | static noinline bool slow_work_execute(int id) |
199 | { | 223 | { |
200 | #ifdef CONFIG_MODULES | ||
201 | struct module *module; | ||
202 | #endif | ||
203 | struct slow_work *work = NULL; | 224 | struct slow_work *work = NULL; |
204 | unsigned vsmax; | 225 | unsigned vsmax; |
205 | bool very_slow; | 226 | bool very_slow; |
@@ -236,10 +257,7 @@ static noinline bool slow_work_execute(int id) | |||
236 | very_slow = false; /* avoid the compiler warning */ | 257 | very_slow = false; /* avoid the compiler warning */ |
237 | } | 258 | } |
238 | 259 | ||
239 | #ifdef CONFIG_MODULES | 260 | slow_work_set_thread_processing(id, work); |
240 | if (work) | ||
241 | slow_work_thread_processing[id] = work->owner; | ||
242 | #endif | ||
243 | if (work) { | 261 | if (work) { |
244 | slow_work_mark_time(work); | 262 | slow_work_mark_time(work); |
245 | slow_work_begin_exec(id, work); | 263 | slow_work_begin_exec(id, work); |
@@ -287,15 +305,7 @@ static noinline bool slow_work_execute(int id) | |||
287 | 305 | ||
288 | /* sort out the race between module unloading and put_ref() */ | 306 | /* sort out the race between module unloading and put_ref() */ |
289 | slow_work_put_ref(work); | 307 | slow_work_put_ref(work); |
290 | 308 | slow_work_done_thread_processing(id, work); | |
291 | #ifdef CONFIG_MODULES | ||
292 | module = slow_work_thread_processing[id]; | ||
293 | slow_work_thread_processing[id] = NULL; | ||
294 | smp_mb(); | ||
295 | if (slow_work_unreg_work_item == work || | ||
296 | slow_work_unreg_module == module) | ||
297 | wake_up_all(&slow_work_unreg_wq); | ||
298 | #endif | ||
299 | 309 | ||
300 | return true; | 310 | return true; |
301 | 311 | ||
@@ -310,7 +320,7 @@ auto_requeue: | |||
310 | else | 320 | else |
311 | list_add_tail(&work->link, &slow_work_queue); | 321 | list_add_tail(&work->link, &slow_work_queue); |
312 | spin_unlock_irq(&slow_work_queue_lock); | 322 | spin_unlock_irq(&slow_work_queue_lock); |
313 | slow_work_thread_processing[id] = NULL; | 323 | slow_work_clear_thread_processing(id); |
314 | return true; | 324 | return true; |
315 | } | 325 | } |
316 | 326 | ||
@@ -813,7 +823,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
813 | static const struct slow_work_ops slow_work_new_thread_ops = { | 823 | static const struct slow_work_ops slow_work_new_thread_ops = { |
814 | .owner = THIS_MODULE, | 824 | .owner = THIS_MODULE, |
815 | .execute = slow_work_new_thread_execute, | 825 | .execute = slow_work_new_thread_execute, |
816 | #ifdef CONFIG_SLOW_WORK_PROC | 826 | #ifdef CONFIG_SLOW_WORK_DEBUG |
817 | .desc = slow_work_new_thread_desc, | 827 | .desc = slow_work_new_thread_desc, |
818 | #endif | 828 | #endif |
819 | }; | 829 | }; |
@@ -943,6 +953,7 @@ EXPORT_SYMBOL(slow_work_register_user); | |||
943 | */ | 953 | */ |
944 | static void slow_work_wait_for_items(struct module *module) | 954 | static void slow_work_wait_for_items(struct module *module) |
945 | { | 955 | { |
956 | #ifdef CONFIG_MODULES | ||
946 | DECLARE_WAITQUEUE(myself, current); | 957 | DECLARE_WAITQUEUE(myself, current); |
947 | struct slow_work *work; | 958 | struct slow_work *work; |
948 | int loop; | 959 | int loop; |
@@ -989,6 +1000,7 @@ static void slow_work_wait_for_items(struct module *module) | |||
989 | 1000 | ||
990 | remove_wait_queue(&slow_work_unreg_wq, &myself); | 1001 | remove_wait_queue(&slow_work_unreg_wq, &myself); |
991 | mutex_unlock(&slow_work_unreg_sync_lock); | 1002 | mutex_unlock(&slow_work_unreg_sync_lock); |
1003 | #endif /* CONFIG_MODULES */ | ||
992 | } | 1004 | } |
993 | 1005 | ||
994 | /** | 1006 | /** |
@@ -1043,9 +1055,15 @@ static int __init init_slow_work(void) | |||
1043 | if (slow_work_max_max_threads < nr_cpus * 2) | 1055 | if (slow_work_max_max_threads < nr_cpus * 2) |
1044 | slow_work_max_max_threads = nr_cpus * 2; | 1056 | slow_work_max_max_threads = nr_cpus * 2; |
1045 | #endif | 1057 | #endif |
1046 | #ifdef CONFIG_SLOW_WORK_PROC | 1058 | #ifdef CONFIG_SLOW_WORK_DEBUG |
1047 | proc_create("slow_work_rq", S_IFREG | 0400, NULL, | 1059 | { |
1048 | &slow_work_runqueue_fops); | 1060 | struct dentry *dbdir; |
1061 | |||
1062 | dbdir = debugfs_create_dir("slow_work", NULL); | ||
1063 | if (dbdir && !IS_ERR(dbdir)) | ||
1064 | debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, | ||
1065 | NULL, &slow_work_runqueue_fops); | ||
1066 | } | ||
1049 | #endif | 1067 | #endif |
1050 | return 0; | 1068 | return 0; |
1051 | } | 1069 | } |
diff --git a/kernel/slow-work.h b/kernel/slow-work.h index 3c2f007f3ad6..321f3c59d732 100644 --- a/kernel/slow-work.h +++ b/kernel/slow-work.h | |||
@@ -19,7 +19,7 @@ | |||
19 | /* | 19 | /* |
20 | * slow-work.c | 20 | * slow-work.c |
21 | */ | 21 | */ |
22 | #ifdef CONFIG_SLOW_WORK_PROC | 22 | #ifdef CONFIG_SLOW_WORK_DEBUG |
23 | extern struct slow_work *slow_work_execs[]; | 23 | extern struct slow_work *slow_work_execs[]; |
24 | extern pid_t slow_work_pids[]; | 24 | extern pid_t slow_work_pids[]; |
25 | extern rwlock_t slow_work_execs_lock; | 25 | extern rwlock_t slow_work_execs_lock; |
@@ -30,9 +30,9 @@ extern struct list_head vslow_work_queue; | |||
30 | extern spinlock_t slow_work_queue_lock; | 30 | extern spinlock_t slow_work_queue_lock; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * slow-work-proc.c | 33 | * slow-work-debugfs.c |
34 | */ | 34 | */ |
35 | #ifdef CONFIG_SLOW_WORK_PROC | 35 | #ifdef CONFIG_SLOW_WORK_DEBUG |
36 | extern const struct file_operations slow_work_runqueue_fops; | 36 | extern const struct file_operations slow_work_runqueue_fops; |
37 | 37 | ||
38 | extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); | 38 | extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); |
diff --git a/kernel/smp.c b/kernel/smp.c index c9d1c7835c2f..a8c76069cf50 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data); | |||
265 | * @info: An arbitrary pointer to pass to the function. | 265 | * @info: An arbitrary pointer to pass to the function. |
266 | * @wait: If true, wait until function has completed on other CPUs. | 266 | * @wait: If true, wait until function has completed on other CPUs. |
267 | * | 267 | * |
268 | * Returns 0 on success, else a negative status code. Note that @wait | 268 | * Returns 0 on success, else a negative status code. |
269 | * will be implicitly turned on in case of allocation failures, since | ||
270 | * we fall back to on-stack allocation. | ||
271 | */ | 269 | */ |
272 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 270 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
273 | int wait) | 271 | int wait) |
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
321 | } | 319 | } |
322 | EXPORT_SYMBOL(smp_call_function_single); | 320 | EXPORT_SYMBOL(smp_call_function_single); |
323 | 321 | ||
322 | /* | ||
323 | * smp_call_function_any - Run a function on any of the given cpus | ||
324 | * @mask: The mask of cpus it can run on. | ||
325 | * @func: The function to run. This must be fast and non-blocking. | ||
326 | * @info: An arbitrary pointer to pass to the function. | ||
327 | * @wait: If true, wait until function has completed. | ||
328 | * | ||
329 | * Returns 0 on success, else a negative status code (if no cpus were online). | ||
330 | * Note that @wait will be implicitly turned on in case of allocation failures, | ||
331 | * since we fall back to on-stack allocation. | ||
332 | * | ||
333 | * Selection preference: | ||
334 | * 1) current cpu if in @mask | ||
335 | * 2) any cpu of current node if in @mask | ||
336 | * 3) any other online cpu in @mask | ||
337 | */ | ||
338 | int smp_call_function_any(const struct cpumask *mask, | ||
339 | void (*func)(void *info), void *info, int wait) | ||
340 | { | ||
341 | unsigned int cpu; | ||
342 | const struct cpumask *nodemask; | ||
343 | int ret; | ||
344 | |||
345 | /* Try for same CPU (cheapest) */ | ||
346 | cpu = get_cpu(); | ||
347 | if (cpumask_test_cpu(cpu, mask)) | ||
348 | goto call; | ||
349 | |||
350 | /* Try for same node. */ | ||
351 | nodemask = cpumask_of_node(cpu); | ||
352 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | ||
353 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | ||
354 | if (cpu_online(cpu)) | ||
355 | goto call; | ||
356 | } | ||
357 | |||
358 | /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ | ||
359 | cpu = cpumask_any_and(mask, cpu_online_mask); | ||
360 | call: | ||
361 | ret = smp_call_function_single(cpu, func, info, wait); | ||
362 | put_cpu(); | ||
363 | return ret; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | ||
366 | |||
324 | /** | 367 | /** |
325 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on another CPU |
326 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
355 | * @wait: If true, wait (atomically) until function has completed | 398 | * @wait: If true, wait (atomically) until function has completed |
356 | * on other CPUs. | 399 | * on other CPUs. |
357 | * | 400 | * |
358 | * If @wait is true, then returns once @func has returned. Note that @wait | 401 | * If @wait is true, then returns once @func has returned. |
359 | * will be implicitly turned on in case of allocation failures, since | ||
360 | * we fall back to on-stack allocation. | ||
361 | * | 402 | * |
362 | * You must not call this function with disabled interrupts or from a | 403 | * You must not call this function with disabled interrupts or from a |
363 | * hardware interrupt handler or from a bottom half handler. Preemption | 404 | * hardware interrupt handler or from a bottom half handler. Preemption |
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many); | |||
443 | * Returns 0. | 484 | * Returns 0. |
444 | * | 485 | * |
445 | * If @wait is true, then returns once @func has returned; otherwise | 486 | * If @wait is true, then returns once @func has returned; otherwise |
446 | * it returns just before the target cpu calls @func. In case of allocation | 487 | * it returns just before the target cpu calls @func. |
447 | * failure, @wait will be implicitly turned on. | ||
448 | * | 488 | * |
449 | * You must not call this function with disabled interrupts or from a | 489 | * You must not call this function with disabled interrupts or from a |
450 | * hardware interrupt handler or from a bottom half handler. | 490 | * hardware interrupt handler or from a bottom half handler. |
diff --git a/kernel/softirq.c b/kernel/softirq.c index f8749e5216e0..21939d9e830e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -302,9 +302,9 @@ void irq_exit(void) | |||
302 | if (!in_interrupt() && local_softirq_pending()) | 302 | if (!in_interrupt() && local_softirq_pending()) |
303 | invoke_softirq(); | 303 | invoke_softirq(); |
304 | 304 | ||
305 | rcu_irq_exit(); | ||
305 | #ifdef CONFIG_NO_HZ | 306 | #ifdef CONFIG_NO_HZ |
306 | /* Make sure that timer wheel updates are propagated */ | 307 | /* Make sure that timer wheel updates are propagated */ |
307 | rcu_irq_exit(); | ||
308 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | 308 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) |
309 | tick_nohz_stop_sched_tick(0); | 309 | tick_nohz_stop_sched_tick(0); |
310 | #endif | 310 | #endif |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5ddab730cb2f..41e042219ff6 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,145 +21,28 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
25 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
26 | { | ||
27 | return __spin_trylock(lock); | ||
28 | } | ||
29 | EXPORT_SYMBOL(_spin_trylock); | ||
30 | #endif | ||
31 | |||
32 | #ifndef _read_trylock | ||
33 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
34 | { | ||
35 | return __read_trylock(lock); | ||
36 | } | ||
37 | EXPORT_SYMBOL(_read_trylock); | ||
38 | #endif | ||
39 | |||
40 | #ifndef _write_trylock | ||
41 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
42 | { | ||
43 | return __write_trylock(lock); | ||
44 | } | ||
45 | EXPORT_SYMBOL(_write_trylock); | ||
46 | #endif | ||
47 | |||
48 | /* | 24 | /* |
49 | * If lockdep is enabled then we use the non-preemption spin-ops | 25 | * If lockdep is enabled then we use the non-preemption spin-ops |
50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
52 | */ | 28 | */ |
53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
54 | |||
55 | #ifndef _read_lock | ||
56 | void __lockfunc _read_lock(rwlock_t *lock) | ||
57 | { | ||
58 | __read_lock(lock); | ||
59 | } | ||
60 | EXPORT_SYMBOL(_read_lock); | ||
61 | #endif | ||
62 | |||
63 | #ifndef _spin_lock_irqsave | ||
64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
65 | { | ||
66 | return __spin_lock_irqsave(lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
69 | #endif | ||
70 | |||
71 | #ifndef _spin_lock_irq | ||
72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
73 | { | ||
74 | __spin_lock_irq(lock); | ||
75 | } | ||
76 | EXPORT_SYMBOL(_spin_lock_irq); | ||
77 | #endif | ||
78 | |||
79 | #ifndef _spin_lock_bh | ||
80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
81 | { | ||
82 | __spin_lock_bh(lock); | ||
83 | } | ||
84 | EXPORT_SYMBOL(_spin_lock_bh); | ||
85 | #endif | ||
86 | |||
87 | #ifndef _read_lock_irqsave | ||
88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
89 | { | ||
90 | return __read_lock_irqsave(lock); | ||
91 | } | ||
92 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
93 | #endif | ||
94 | |||
95 | #ifndef _read_lock_irq | ||
96 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
97 | { | ||
98 | __read_lock_irq(lock); | ||
99 | } | ||
100 | EXPORT_SYMBOL(_read_lock_irq); | ||
101 | #endif | ||
102 | |||
103 | #ifndef _read_lock_bh | ||
104 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
105 | { | ||
106 | __read_lock_bh(lock); | ||
107 | } | ||
108 | EXPORT_SYMBOL(_read_lock_bh); | ||
109 | #endif | ||
110 | |||
111 | #ifndef _write_lock_irqsave | ||
112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
113 | { | ||
114 | return __write_lock_irqsave(lock); | ||
115 | } | ||
116 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
117 | #endif | ||
118 | |||
119 | #ifndef _write_lock_irq | ||
120 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
121 | { | ||
122 | __write_lock_irq(lock); | ||
123 | } | ||
124 | EXPORT_SYMBOL(_write_lock_irq); | ||
125 | #endif | ||
126 | |||
127 | #ifndef _write_lock_bh | ||
128 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
129 | { | ||
130 | __write_lock_bh(lock); | ||
131 | } | ||
132 | EXPORT_SYMBOL(_write_lock_bh); | ||
133 | #endif | ||
134 | |||
135 | #ifndef _spin_lock | ||
136 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
137 | { | ||
138 | __spin_lock(lock); | ||
139 | } | ||
140 | EXPORT_SYMBOL(_spin_lock); | ||
141 | #endif | ||
142 | |||
143 | #ifndef _write_lock | ||
144 | void __lockfunc _write_lock(rwlock_t *lock) | ||
145 | { | ||
146 | __write_lock(lock); | ||
147 | } | ||
148 | EXPORT_SYMBOL(_write_lock); | ||
149 | #endif | ||
150 | |||
151 | #else /* CONFIG_PREEMPT: */ | ||
152 | |||
153 | /* | 30 | /* |
31 | * The __lock_function inlines are taken from | ||
32 | * include/linux/spinlock_api_smp.h | ||
33 | */ | ||
34 | #else | ||
35 | /* | ||
36 | * We build the __lock_function inlines here. They are too large for | ||
37 | * inlining all over the place, but here is only one user per function | ||
38 | * which embedds them into the calling _lock_function below. | ||
39 | * | ||
154 | * This could be a long-held lock. We both prepare to spin for a long | 40 | * This could be a long-held lock. We both prepare to spin for a long |
155 | * time (making _this_ CPU preemptable if possible), and we also signal | 41 | * time (making _this_ CPU preemptable if possible), and we also signal |
156 | * towards that other CPU that it should break the lock ASAP. | 42 | * towards that other CPU that it should break the lock ASAP. |
157 | * | ||
158 | * (We do this in a function because inlining it would be excessive.) | ||
159 | */ | 43 | */ |
160 | |||
161 | #define BUILD_LOCK_OPS(op, locktype) \ | 44 | #define BUILD_LOCK_OPS(op, locktype) \ |
162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | 45 | void __lockfunc __##op##_lock(locktype##_t *lock) \ |
163 | { \ | 46 | { \ |
164 | for (;;) { \ | 47 | for (;;) { \ |
165 | preempt_disable(); \ | 48 | preempt_disable(); \ |
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ | |||
175 | (lock)->break_lock = 0; \ | 58 | (lock)->break_lock = 0; \ |
176 | } \ | 59 | } \ |
177 | \ | 60 | \ |
178 | EXPORT_SYMBOL(_##op##_lock); \ | 61 | unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ |
179 | \ | ||
180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
181 | { \ | 62 | { \ |
182 | unsigned long flags; \ | 63 | unsigned long flags; \ |
183 | \ | 64 | \ |
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |||
198 | return flags; \ | 79 | return flags; \ |
199 | } \ | 80 | } \ |
200 | \ | 81 | \ |
201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | 82 | void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ |
202 | \ | ||
203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
204 | { \ | 83 | { \ |
205 | _##op##_lock_irqsave(lock); \ | 84 | _##op##_lock_irqsave(lock); \ |
206 | } \ | 85 | } \ |
207 | \ | 86 | \ |
208 | EXPORT_SYMBOL(_##op##_lock_irq); \ | 87 | void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ |
209 | \ | ||
210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
211 | { \ | 88 | { \ |
212 | unsigned long flags; \ | 89 | unsigned long flags; \ |
213 | \ | 90 | \ |
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |||
220 | local_bh_disable(); \ | 97 | local_bh_disable(); \ |
221 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
222 | } \ | 99 | } \ |
223 | \ | ||
224 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
225 | 100 | ||
226 | /* | 101 | /* |
227 | * Build preemption-friendly versions of the following | 102 | * Build preemption-friendly versions of the following |
228 | * lock-spinning functions: | 103 | * lock-spinning functions: |
229 | * | 104 | * |
230 | * _[spin|read|write]_lock() | 105 | * __[spin|read|write]_lock() |
231 | * _[spin|read|write]_lock_irq() | 106 | * __[spin|read|write]_lock_irq() |
232 | * _[spin|read|write]_lock_irqsave() | 107 | * __[spin|read|write]_lock_irqsave() |
233 | * _[spin|read|write]_lock_bh() | 108 | * __[spin|read|write]_lock_bh() |
234 | */ | 109 | */ |
235 | BUILD_LOCK_OPS(spin, spinlock); | 110 | BUILD_LOCK_OPS(spin, spinlock); |
236 | BUILD_LOCK_OPS(read, rwlock); | 111 | BUILD_LOCK_OPS(read, rwlock); |
237 | BUILD_LOCK_OPS(write, rwlock); | 112 | BUILD_LOCK_OPS(write, rwlock); |
238 | 113 | ||
239 | #endif /* CONFIG_PREEMPT */ | 114 | #endif |
240 | 115 | ||
241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 116 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
242 | 117 | ||
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
248 | } | 123 | } |
249 | EXPORT_SYMBOL(_spin_lock_nested); | 124 | EXPORT_SYMBOL(_spin_lock_nested); |
250 | 125 | ||
251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 126 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, |
127 | int subclass) | ||
252 | { | 128 | { |
253 | unsigned long flags; | 129 | unsigned long flags; |
254 | 130 | ||
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
272 | 148 | ||
273 | #endif | 149 | #endif |
274 | 150 | ||
275 | #ifndef _spin_unlock | 151 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
152 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
153 | { | ||
154 | return __spin_trylock(lock); | ||
155 | } | ||
156 | EXPORT_SYMBOL(_spin_trylock); | ||
157 | #endif | ||
158 | |||
159 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
160 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
161 | { | ||
162 | return __read_trylock(lock); | ||
163 | } | ||
164 | EXPORT_SYMBOL(_read_trylock); | ||
165 | #endif | ||
166 | |||
167 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
168 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
169 | { | ||
170 | return __write_trylock(lock); | ||
171 | } | ||
172 | EXPORT_SYMBOL(_write_trylock); | ||
173 | #endif | ||
174 | |||
175 | #ifndef CONFIG_INLINE_READ_LOCK | ||
176 | void __lockfunc _read_lock(rwlock_t *lock) | ||
177 | { | ||
178 | __read_lock(lock); | ||
179 | } | ||
180 | EXPORT_SYMBOL(_read_lock); | ||
181 | #endif | ||
182 | |||
183 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
184 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
185 | { | ||
186 | return __spin_lock_irqsave(lock); | ||
187 | } | ||
188 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
189 | #endif | ||
190 | |||
191 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
192 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
193 | { | ||
194 | __spin_lock_irq(lock); | ||
195 | } | ||
196 | EXPORT_SYMBOL(_spin_lock_irq); | ||
197 | #endif | ||
198 | |||
199 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
200 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
201 | { | ||
202 | __spin_lock_bh(lock); | ||
203 | } | ||
204 | EXPORT_SYMBOL(_spin_lock_bh); | ||
205 | #endif | ||
206 | |||
207 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
208 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
209 | { | ||
210 | return __read_lock_irqsave(lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
213 | #endif | ||
214 | |||
215 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
216 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
217 | { | ||
218 | __read_lock_irq(lock); | ||
219 | } | ||
220 | EXPORT_SYMBOL(_read_lock_irq); | ||
221 | #endif | ||
222 | |||
223 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
224 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
225 | { | ||
226 | __read_lock_bh(lock); | ||
227 | } | ||
228 | EXPORT_SYMBOL(_read_lock_bh); | ||
229 | #endif | ||
230 | |||
231 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
232 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
233 | { | ||
234 | return __write_lock_irqsave(lock); | ||
235 | } | ||
236 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
237 | #endif | ||
238 | |||
239 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
240 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
241 | { | ||
242 | __write_lock_irq(lock); | ||
243 | } | ||
244 | EXPORT_SYMBOL(_write_lock_irq); | ||
245 | #endif | ||
246 | |||
247 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
248 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
249 | { | ||
250 | __write_lock_bh(lock); | ||
251 | } | ||
252 | EXPORT_SYMBOL(_write_lock_bh); | ||
253 | #endif | ||
254 | |||
255 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
256 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
257 | { | ||
258 | __spin_lock(lock); | ||
259 | } | ||
260 | EXPORT_SYMBOL(_spin_lock); | ||
261 | #endif | ||
262 | |||
263 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
264 | void __lockfunc _write_lock(rwlock_t *lock) | ||
265 | { | ||
266 | __write_lock(lock); | ||
267 | } | ||
268 | EXPORT_SYMBOL(_write_lock); | ||
269 | #endif | ||
270 | |||
271 | #ifndef CONFIG_INLINE_SPIN_UNLOCK | ||
276 | void __lockfunc _spin_unlock(spinlock_t *lock) | 272 | void __lockfunc _spin_unlock(spinlock_t *lock) |
277 | { | 273 | { |
278 | __spin_unlock(lock); | 274 | __spin_unlock(lock); |
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock) | |||
280 | EXPORT_SYMBOL(_spin_unlock); | 276 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif | 277 | #endif |
282 | 278 | ||
283 | #ifndef _write_unlock | 279 | #ifndef CONFIG_INLINE_WRITE_UNLOCK |
284 | void __lockfunc _write_unlock(rwlock_t *lock) | 280 | void __lockfunc _write_unlock(rwlock_t *lock) |
285 | { | 281 | { |
286 | __write_unlock(lock); | 282 | __write_unlock(lock); |
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock) | |||
288 | EXPORT_SYMBOL(_write_unlock); | 284 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif | 285 | #endif |
290 | 286 | ||
291 | #ifndef _read_unlock | 287 | #ifndef CONFIG_INLINE_READ_UNLOCK |
292 | void __lockfunc _read_unlock(rwlock_t *lock) | 288 | void __lockfunc _read_unlock(rwlock_t *lock) |
293 | { | 289 | { |
294 | __read_unlock(lock); | 290 | __read_unlock(lock); |
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock) | |||
296 | EXPORT_SYMBOL(_read_unlock); | 292 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif | 293 | #endif |
298 | 294 | ||
299 | #ifndef _spin_unlock_irqrestore | 295 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 296 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
301 | { | 297 | { |
302 | __spin_unlock_irqrestore(lock, flags); | 298 | __spin_unlock_irqrestore(lock, flags); |
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |||
304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 300 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif | 301 | #endif |
306 | 302 | ||
307 | #ifndef _spin_unlock_irq | 303 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 304 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
309 | { | 305 | { |
310 | __spin_unlock_irq(lock); | 306 | __spin_unlock_irq(lock); |
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |||
312 | EXPORT_SYMBOL(_spin_unlock_irq); | 308 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif | 309 | #endif |
314 | 310 | ||
315 | #ifndef _spin_unlock_bh | 311 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 312 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
317 | { | 313 | { |
318 | __spin_unlock_bh(lock); | 314 | __spin_unlock_bh(lock); |
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |||
320 | EXPORT_SYMBOL(_spin_unlock_bh); | 316 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif | 317 | #endif |
322 | 318 | ||
323 | #ifndef _read_unlock_irqrestore | 319 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 320 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
325 | { | 321 | { |
326 | __read_unlock_irqrestore(lock, flags); | 322 | __read_unlock_irqrestore(lock, flags); |
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
328 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 324 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif | 325 | #endif |
330 | 326 | ||
331 | #ifndef _read_unlock_irq | 327 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 328 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
333 | { | 329 | { |
334 | __read_unlock_irq(lock); | 330 | __read_unlock_irq(lock); |
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock) | |||
336 | EXPORT_SYMBOL(_read_unlock_irq); | 332 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif | 333 | #endif |
338 | 334 | ||
339 | #ifndef _read_unlock_bh | 335 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 336 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
341 | { | 337 | { |
342 | __read_unlock_bh(lock); | 338 | __read_unlock_bh(lock); |
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock) | |||
344 | EXPORT_SYMBOL(_read_unlock_bh); | 340 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif | 341 | #endif |
346 | 342 | ||
347 | #ifndef _write_unlock_irqrestore | 343 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 344 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
349 | { | 345 | { |
350 | __write_unlock_irqrestore(lock, flags); | 346 | __write_unlock_irqrestore(lock, flags); |
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
352 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 348 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif | 349 | #endif |
354 | 350 | ||
355 | #ifndef _write_unlock_irq | 351 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 352 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
357 | { | 353 | { |
358 | __write_unlock_irq(lock); | 354 | __write_unlock_irq(lock); |
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock) | |||
360 | EXPORT_SYMBOL(_write_unlock_irq); | 356 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif | 357 | #endif |
362 | 358 | ||
363 | #ifndef _write_unlock_bh | 359 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 360 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
365 | { | 361 | { |
366 | __write_unlock_bh(lock); | 362 | __write_unlock_bh(lock); |
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) | |||
368 | EXPORT_SYMBOL(_write_unlock_bh); | 364 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif | 365 | #endif |
370 | 366 | ||
371 | #ifndef _spin_trylock_bh | 367 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 368 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
373 | { | 369 | { |
374 | return __spin_trylock_bh(lock); | 370 | return __spin_trylock_bh(lock); |
diff --git a/kernel/srcu.c b/kernel/srcu.c index b0aeeaf22ce4..818d7d9aa03c 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp) | |||
49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); | 49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); | 50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); |
51 | } | 51 | } |
52 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
52 | 53 | ||
53 | /* | 54 | /* |
54 | * srcu_readers_active_idx -- returns approximate number of readers | 55 | * srcu_readers_active_idx -- returns approximate number of readers |
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) | |||
97 | free_percpu(sp->per_cpu_ref); | 98 | free_percpu(sp->per_cpu_ref); |
98 | sp->per_cpu_ref = NULL; | 99 | sp->per_cpu_ref = NULL; |
99 | } | 100 | } |
101 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
100 | 102 | ||
101 | /** | 103 | /** |
102 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 104 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp) | |||
118 | preempt_enable(); | 120 | preempt_enable(); |
119 | return idx; | 121 | return idx; |
120 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
121 | 124 | ||
122 | /** | 125 | /** |
123 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 126 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. |
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) | |||
136 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; | 139 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; |
137 | preempt_enable(); | 140 | preempt_enable(); |
138 | } | 141 | } |
142 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
139 | 143 | ||
140 | /** | 144 | /* |
141 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 145 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
142 | * @sp: srcu_struct with which to synchronize. | ||
143 | * | ||
144 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
145 | * As with classic RCU, the updater must use some separate means of | ||
146 | * synchronizing concurrent updates. Can block; must be called from | ||
147 | * process context. | ||
148 | * | ||
149 | * Note that it is illegal to call synchornize_srcu() from the corresponding | ||
150 | * SRCU read-side critical section; doing so will result in deadlock. | ||
151 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
152 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
153 | */ | 146 | */ |
154 | void synchronize_srcu(struct srcu_struct *sp) | 147 | void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) |
155 | { | 148 | { |
156 | int idx; | 149 | int idx; |
157 | 150 | ||
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
173 | return; | 166 | return; |
174 | } | 167 | } |
175 | 168 | ||
176 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 169 | sync_func(); /* Force memory barrier on all CPUs. */ |
177 | 170 | ||
178 | /* | 171 | /* |
179 | * The preceding synchronize_sched() ensures that any CPU that | 172 | * The preceding synchronize_sched() ensures that any CPU that |
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
190 | idx = sp->completed & 0x1; | 183 | idx = sp->completed & 0x1; |
191 | sp->completed++; | 184 | sp->completed++; |
192 | 185 | ||
193 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 186 | sync_func(); /* Force memory barrier on all CPUs. */ |
194 | 187 | ||
195 | /* | 188 | /* |
196 | * At this point, because of the preceding synchronize_sched(), | 189 | * At this point, because of the preceding synchronize_sched(), |
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
203 | while (srcu_readers_active_idx(sp, idx)) | 196 | while (srcu_readers_active_idx(sp, idx)) |
204 | schedule_timeout_interruptible(1); | 197 | schedule_timeout_interruptible(1); |
205 | 198 | ||
206 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 199 | sync_func(); /* Force memory barrier on all CPUs. */ |
207 | 200 | ||
208 | /* | 201 | /* |
209 | * The preceding synchronize_sched() forces all srcu_read_unlock() | 202 | * The preceding synchronize_sched() forces all srcu_read_unlock() |
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
237 | } | 230 | } |
238 | 231 | ||
239 | /** | 232 | /** |
233 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | ||
234 | * @sp: srcu_struct with which to synchronize. | ||
235 | * | ||
236 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
237 | * As with classic RCU, the updater must use some separate means of | ||
238 | * synchronizing concurrent updates. Can block; must be called from | ||
239 | * process context. | ||
240 | * | ||
241 | * Note that it is illegal to call synchronize_srcu() from the corresponding | ||
242 | * SRCU read-side critical section; doing so will result in deadlock. | ||
243 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
244 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
245 | */ | ||
246 | void synchronize_srcu(struct srcu_struct *sp) | ||
247 | { | ||
248 | __synchronize_srcu(sp, synchronize_sched); | ||
249 | } | ||
250 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
251 | |||
252 | /** | ||
253 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient | ||
254 | * @sp: srcu_struct with which to synchronize. | ||
255 | * | ||
256 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
257 | * As with classic RCU, the updater must use some separate means of | ||
258 | * synchronizing concurrent updates. Can block; must be called from | ||
259 | * process context. | ||
260 | * | ||
261 | * Note that it is illegal to call synchronize_srcu_expedited() | ||
262 | * from the corresponding SRCU read-side critical section; doing so | ||
263 | * will result in deadlock. However, it is perfectly legal to call | ||
264 | * synchronize_srcu_expedited() on one srcu_struct from some other | ||
265 | * srcu_struct's read-side critical section. | ||
266 | */ | ||
267 | void synchronize_srcu_expedited(struct srcu_struct *sp) | ||
268 | { | ||
269 | __synchronize_srcu(sp, synchronize_sched_expedited); | ||
270 | } | ||
271 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | ||
272 | |||
273 | /** | ||
240 | * srcu_batches_completed - return batches completed. | 274 | * srcu_batches_completed - return batches completed. |
241 | * @sp: srcu_struct on which to report batch completion. | 275 | * @sp: srcu_struct on which to report batch completion. |
242 | * | 276 | * |
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp) | |||
248 | { | 282 | { |
249 | return sp->completed; | 283 | return sp->completed; |
250 | } | 284 | } |
251 | |||
252 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
253 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
254 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
255 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
256 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
257 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | 285 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
diff --git a/kernel/sys.c b/kernel/sys.c index ce17760d9c51..9968c5fb55b9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -911,16 +911,15 @@ change_okay: | |||
911 | 911 | ||
912 | void do_sys_times(struct tms *tms) | 912 | void do_sys_times(struct tms *tms) |
913 | { | 913 | { |
914 | struct task_cputime cputime; | 914 | cputime_t tgutime, tgstime, cutime, cstime; |
915 | cputime_t cutime, cstime; | ||
916 | 915 | ||
917 | thread_group_cputime(current, &cputime); | ||
918 | spin_lock_irq(¤t->sighand->siglock); | 916 | spin_lock_irq(¤t->sighand->siglock); |
917 | thread_group_times(current, &tgutime, &tgstime); | ||
919 | cutime = current->signal->cutime; | 918 | cutime = current->signal->cutime; |
920 | cstime = current->signal->cstime; | 919 | cstime = current->signal->cstime; |
921 | spin_unlock_irq(¤t->sighand->siglock); | 920 | spin_unlock_irq(¤t->sighand->siglock); |
922 | tms->tms_utime = cputime_to_clock_t(cputime.utime); | 921 | tms->tms_utime = cputime_to_clock_t(tgutime); |
923 | tms->tms_stime = cputime_to_clock_t(cputime.stime); | 922 | tms->tms_stime = cputime_to_clock_t(tgstime); |
924 | tms->tms_cutime = cputime_to_clock_t(cutime); | 923 | tms->tms_cutime = cputime_to_clock_t(cutime); |
925 | tms->tms_cstime = cputime_to_clock_t(cstime); | 924 | tms->tms_cstime = cputime_to_clock_t(cstime); |
926 | } | 925 | } |
@@ -1338,16 +1337,14 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1338 | { | 1337 | { |
1339 | struct task_struct *t; | 1338 | struct task_struct *t; |
1340 | unsigned long flags; | 1339 | unsigned long flags; |
1341 | cputime_t utime, stime; | 1340 | cputime_t tgutime, tgstime, utime, stime; |
1342 | struct task_cputime cputime; | ||
1343 | unsigned long maxrss = 0; | 1341 | unsigned long maxrss = 0; |
1344 | 1342 | ||
1345 | memset((char *) r, 0, sizeof *r); | 1343 | memset((char *) r, 0, sizeof *r); |
1346 | utime = stime = cputime_zero; | 1344 | utime = stime = cputime_zero; |
1347 | 1345 | ||
1348 | if (who == RUSAGE_THREAD) { | 1346 | if (who == RUSAGE_THREAD) { |
1349 | utime = task_utime(current); | 1347 | task_times(current, &utime, &stime); |
1350 | stime = task_stime(current); | ||
1351 | accumulate_thread_rusage(p, r); | 1348 | accumulate_thread_rusage(p, r); |
1352 | maxrss = p->signal->maxrss; | 1349 | maxrss = p->signal->maxrss; |
1353 | goto out; | 1350 | goto out; |
@@ -1373,9 +1370,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1373 | break; | 1370 | break; |
1374 | 1371 | ||
1375 | case RUSAGE_SELF: | 1372 | case RUSAGE_SELF: |
1376 | thread_group_cputime(p, &cputime); | 1373 | thread_group_times(p, &tgutime, &tgstime); |
1377 | utime = cputime_add(utime, cputime.utime); | 1374 | utime = cputime_add(utime, tgutime); |
1378 | stime = cputime_add(stime, cputime.stime); | 1375 | stime = cputime_add(stime, tgstime); |
1379 | r->ru_nvcsw += p->signal->nvcsw; | 1376 | r->ru_nvcsw += p->signal->nvcsw; |
1380 | r->ru_nivcsw += p->signal->nivcsw; | 1377 | r->ru_nivcsw += p->signal->nivcsw; |
1381 | r->ru_minflt += p->signal->min_flt; | 1378 | r->ru_minflt += p->signal->min_flt; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0d949c517412..4dbf93a52ee9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/sysrq.h> | 36 | #include <linux/sysrq.h> |
37 | #include <linux/highuid.h> | 37 | #include <linux/highuid.h> |
38 | #include <linux/writeback.h> | 38 | #include <linux/writeback.h> |
39 | #include <linux/ratelimit.h> | ||
39 | #include <linux/hugetlb.h> | 40 | #include <linux/hugetlb.h> |
40 | #include <linux/initrd.h> | 41 | #include <linux/initrd.h> |
41 | #include <linux/key.h> | 42 | #include <linux/key.h> |
@@ -158,6 +159,8 @@ extern int no_unaligned_warning; | |||
158 | extern int unaligned_dump_stack; | 159 | extern int unaligned_dump_stack; |
159 | #endif | 160 | #endif |
160 | 161 | ||
162 | extern struct ratelimit_state printk_ratelimit_state; | ||
163 | |||
161 | #ifdef CONFIG_RT_MUTEXES | 164 | #ifdef CONFIG_RT_MUTEXES |
162 | extern int max_lock_depth; | 165 | extern int max_lock_depth; |
163 | #endif | 166 | #endif |
diff --git a/kernel/time.c b/kernel/time.c index 2e2e469a7fec..804798005d19 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -662,6 +662,36 @@ u64 nsec_to_clock_t(u64 x) | |||
662 | #endif | 662 | #endif |
663 | } | 663 | } |
664 | 664 | ||
665 | /** | ||
666 | * nsecs_to_jiffies - Convert nsecs in u64 to jiffies | ||
667 | * | ||
668 | * @n: nsecs in u64 | ||
669 | * | ||
670 | * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. | ||
671 | * And this doesn't return MAX_JIFFY_OFFSET since this function is designed | ||
672 | * for scheduler, not for use in device drivers to calculate timeout value. | ||
673 | * | ||
674 | * note: | ||
675 | * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) | ||
676 | * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years | ||
677 | */ | ||
678 | unsigned long nsecs_to_jiffies(u64 n) | ||
679 | { | ||
680 | #if (NSEC_PER_SEC % HZ) == 0 | ||
681 | /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ | ||
682 | return div_u64(n, NSEC_PER_SEC / HZ); | ||
683 | #elif (HZ % 512) == 0 | ||
684 | /* overflow after 292 years if HZ = 1024 */ | ||
685 | return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); | ||
686 | #else | ||
687 | /* | ||
688 | * Generic case - optimized for cases where HZ is a multiple of 3. | ||
689 | * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. | ||
690 | */ | ||
691 | return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); | ||
692 | #endif | ||
693 | } | ||
694 | |||
665 | #if (BITS_PER_LONG < 64) | 695 | #if (BITS_PER_LONG < 64) |
666 | u64 get_jiffies_64(void) | 696 | u64 get_jiffies_64(void) |
667 | { | 697 | { |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index b416512ad17f..d006554888dc 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -339,6 +339,27 @@ config POWER_TRACER | |||
339 | power management decisions, specifically the C-state and P-state | 339 | power management decisions, specifically the C-state and P-state |
340 | behavior. | 340 | behavior. |
341 | 341 | ||
342 | config KSYM_TRACER | ||
343 | bool "Trace read and write access on kernel memory locations" | ||
344 | depends on HAVE_HW_BREAKPOINT | ||
345 | select TRACING | ||
346 | help | ||
347 | This tracer helps find read and write operations on any given kernel | ||
348 | symbol i.e. /proc/kallsyms. | ||
349 | |||
350 | config PROFILE_KSYM_TRACER | ||
351 | bool "Profile all kernel memory accesses on 'watched' variables" | ||
352 | depends on KSYM_TRACER | ||
353 | help | ||
354 | This tracer profiles kernel accesses on variables watched through the | ||
355 | ksym tracer ftrace plugin. Depending upon the hardware, all read | ||
356 | and write operations on kernel variables can be monitored for | ||
357 | accesses. | ||
358 | |||
359 | The results will be displayed in: | ||
360 | /debugfs/tracing/profile_ksym | ||
361 | |||
362 | Say N if unsure. | ||
342 | 363 | ||
343 | config STACK_TRACER | 364 | config STACK_TRACER |
344 | bool "Trace max stack" | 365 | bool "Trace max stack" |
@@ -428,6 +449,23 @@ config BLK_DEV_IO_TRACE | |||
428 | 449 | ||
429 | If unsure, say N. | 450 | If unsure, say N. |
430 | 451 | ||
452 | config KPROBE_EVENT | ||
453 | depends on KPROBES | ||
454 | depends on X86 | ||
455 | bool "Enable kprobes-based dynamic events" | ||
456 | select TRACING | ||
457 | default y | ||
458 | help | ||
459 | This allows the user to add tracing events (similar to tracepoints) on the fly | ||
460 | via the ftrace interface. See Documentation/trace/kprobetrace.txt | ||
461 | for more details. | ||
462 | |||
463 | Those events can be inserted wherever kprobes can probe, and record | ||
464 | various register and memory values. | ||
465 | |||
466 | This option is also required by perf-probe subcommand of perf tools. If | ||
467 | you want to use perf tools, this option is strongly recommended. | ||
468 | |||
431 | config DYNAMIC_FTRACE | 469 | config DYNAMIC_FTRACE |
432 | bool "enable/disable ftrace tracepoints dynamically" | 470 | bool "enable/disable ftrace tracepoints dynamically" |
433 | depends on FUNCTION_TRACER | 471 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 26f03ac07c2b..cd9ecd89ec77 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -53,6 +53,8 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o | |||
53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
54 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 54 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
56 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | ||
57 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | ||
56 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 58 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o |
57 | 59 | ||
58 | libftrace-y := ftrace.o | 60 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6dc4e5ef7a01..e51a1bcb7bed 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -60,6 +60,13 @@ static int last_ftrace_enabled; | |||
60 | /* Quick disabling of function tracer. */ | 60 | /* Quick disabling of function tracer. */ |
61 | int function_trace_stop; | 61 | int function_trace_stop; |
62 | 62 | ||
63 | /* List for set_ftrace_pid's pids. */ | ||
64 | LIST_HEAD(ftrace_pids); | ||
65 | struct ftrace_pid { | ||
66 | struct list_head list; | ||
67 | struct pid *pid; | ||
68 | }; | ||
69 | |||
63 | /* | 70 | /* |
64 | * ftrace_disabled is set when an anomaly is discovered. | 71 | * ftrace_disabled is set when an anomaly is discovered. |
65 | * ftrace_disabled is much stronger than ftrace_enabled. | 72 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |||
78 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 85 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
79 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 86 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
80 | 87 | ||
88 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
89 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | ||
90 | #endif | ||
91 | |||
81 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 92 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
82 | { | 93 | { |
83 | struct ftrace_ops *op = ftrace_list; | 94 | struct ftrace_ops *op = ftrace_list; |
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
155 | else | 166 | else |
156 | func = ftrace_list_func; | 167 | func = ftrace_list_func; |
157 | 168 | ||
158 | if (ftrace_pid_trace) { | 169 | if (!list_empty(&ftrace_pids)) { |
159 | set_ftrace_pid_function(func); | 170 | set_ftrace_pid_function(func); |
160 | func = ftrace_pid_func; | 171 | func = ftrace_pid_func; |
161 | } | 172 | } |
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
203 | if (ftrace_list->next == &ftrace_list_end) { | 214 | if (ftrace_list->next == &ftrace_list_end) { |
204 | ftrace_func_t func = ftrace_list->func; | 215 | ftrace_func_t func = ftrace_list->func; |
205 | 216 | ||
206 | if (ftrace_pid_trace) { | 217 | if (!list_empty(&ftrace_pids)) { |
207 | set_ftrace_pid_function(func); | 218 | set_ftrace_pid_function(func); |
208 | func = ftrace_pid_func; | 219 | func = ftrace_pid_func; |
209 | } | 220 | } |
@@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void) | |||
231 | func = __ftrace_trace_function; | 242 | func = __ftrace_trace_function; |
232 | #endif | 243 | #endif |
233 | 244 | ||
234 | if (ftrace_pid_trace) { | 245 | if (!list_empty(&ftrace_pids)) { |
235 | set_ftrace_pid_function(func); | 246 | set_ftrace_pid_function(func); |
236 | func = ftrace_pid_func; | 247 | func = ftrace_pid_func; |
237 | } else { | 248 | } else { |
@@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
821 | } | 832 | } |
822 | #endif /* CONFIG_FUNCTION_PROFILER */ | 833 | #endif /* CONFIG_FUNCTION_PROFILER */ |
823 | 834 | ||
824 | /* set when tracing only a pid */ | ||
825 | struct pid *ftrace_pid_trace; | ||
826 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 835 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
827 | 836 | ||
828 | #ifdef CONFIG_DYNAMIC_FTRACE | 837 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod) | |||
1261 | ftrace_new_addrs = p->newlist; | 1270 | ftrace_new_addrs = p->newlist; |
1262 | p->flags = 0L; | 1271 | p->flags = 0L; |
1263 | 1272 | ||
1264 | /* convert record (i.e, patch mcount-call with NOP) */ | 1273 | /* |
1265 | if (ftrace_code_disable(mod, p)) { | 1274 | * Do the initial record convertion from mcount jump |
1266 | p->flags |= FTRACE_FL_CONVERTED; | 1275 | * to the NOP instructions. |
1267 | ftrace_update_cnt++; | 1276 | */ |
1268 | } else | 1277 | if (!ftrace_code_disable(mod, p)) { |
1269 | ftrace_free_rec(p); | 1278 | ftrace_free_rec(p); |
1279 | continue; | ||
1280 | } | ||
1281 | |||
1282 | p->flags |= FTRACE_FL_CONVERTED; | ||
1283 | ftrace_update_cnt++; | ||
1284 | |||
1285 | /* | ||
1286 | * If the tracing is enabled, go ahead and enable the record. | ||
1287 | * | ||
1288 | * The reason not to enable the record immediatelly is the | ||
1289 | * inherent check of ftrace_make_nop/ftrace_make_call for | ||
1290 | * correct previous instructions. Making first the NOP | ||
1291 | * conversion puts the module to the correct state, thus | ||
1292 | * passing the ftrace_make_call check. | ||
1293 | */ | ||
1294 | if (ftrace_start_up) { | ||
1295 | int failed = __ftrace_replace_code(p, 1); | ||
1296 | if (failed) { | ||
1297 | ftrace_bug(failed, p->ip); | ||
1298 | ftrace_free_rec(p); | ||
1299 | } | ||
1300 | } | ||
1270 | } | 1301 | } |
1271 | 1302 | ||
1272 | stop = ftrace_now(raw_smp_processor_id()); | 1303 | stop = ftrace_now(raw_smp_processor_id()); |
@@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | |||
1656 | return ret; | 1687 | return ret; |
1657 | } | 1688 | } |
1658 | 1689 | ||
1659 | enum { | ||
1660 | MATCH_FULL, | ||
1661 | MATCH_FRONT_ONLY, | ||
1662 | MATCH_MIDDLE_ONLY, | ||
1663 | MATCH_END_ONLY, | ||
1664 | }; | ||
1665 | |||
1666 | /* | ||
1667 | * (static function - no need for kernel doc) | ||
1668 | * | ||
1669 | * Pass in a buffer containing a glob and this function will | ||
1670 | * set search to point to the search part of the buffer and | ||
1671 | * return the type of search it is (see enum above). | ||
1672 | * This does modify buff. | ||
1673 | * | ||
1674 | * Returns enum type. | ||
1675 | * search returns the pointer to use for comparison. | ||
1676 | * not returns 1 if buff started with a '!' | ||
1677 | * 0 otherwise. | ||
1678 | */ | ||
1679 | static int | ||
1680 | ftrace_setup_glob(char *buff, int len, char **search, int *not) | ||
1681 | { | ||
1682 | int type = MATCH_FULL; | ||
1683 | int i; | ||
1684 | |||
1685 | if (buff[0] == '!') { | ||
1686 | *not = 1; | ||
1687 | buff++; | ||
1688 | len--; | ||
1689 | } else | ||
1690 | *not = 0; | ||
1691 | |||
1692 | *search = buff; | ||
1693 | |||
1694 | for (i = 0; i < len; i++) { | ||
1695 | if (buff[i] == '*') { | ||
1696 | if (!i) { | ||
1697 | *search = buff + 1; | ||
1698 | type = MATCH_END_ONLY; | ||
1699 | } else { | ||
1700 | if (type == MATCH_END_ONLY) | ||
1701 | type = MATCH_MIDDLE_ONLY; | ||
1702 | else | ||
1703 | type = MATCH_FRONT_ONLY; | ||
1704 | buff[i] = 0; | ||
1705 | break; | ||
1706 | } | ||
1707 | } | ||
1708 | } | ||
1709 | |||
1710 | return type; | ||
1711 | } | ||
1712 | |||
1713 | static int ftrace_match(char *str, char *regex, int len, int type) | 1690 | static int ftrace_match(char *str, char *regex, int len, int type) |
1714 | { | 1691 | { |
1715 | int matched = 0; | 1692 | int matched = 0; |
@@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
1758 | int not; | 1735 | int not; |
1759 | 1736 | ||
1760 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1737 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1761 | type = ftrace_setup_glob(buff, len, &search, ¬); | 1738 | type = filter_parse_regex(buff, len, &search, ¬); |
1762 | 1739 | ||
1763 | search_len = strlen(search); | 1740 | search_len = strlen(search); |
1764 | 1741 | ||
@@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1826 | } | 1803 | } |
1827 | 1804 | ||
1828 | if (strlen(buff)) { | 1805 | if (strlen(buff)) { |
1829 | type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); | 1806 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); |
1830 | search_len = strlen(search); | 1807 | search_len = strlen(search); |
1831 | } | 1808 | } |
1832 | 1809 | ||
@@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
1991 | int count = 0; | 1968 | int count = 0; |
1992 | char *search; | 1969 | char *search; |
1993 | 1970 | ||
1994 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | 1971 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
1995 | len = strlen(search); | 1972 | len = strlen(search); |
1996 | 1973 | ||
1997 | /* we do not support '!' for function probes */ | 1974 | /* we do not support '!' for function probes */ |
@@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
2068 | else if (glob) { | 2045 | else if (glob) { |
2069 | int not; | 2046 | int not; |
2070 | 2047 | ||
2071 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | 2048 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
2072 | len = strlen(search); | 2049 | len = strlen(search); |
2073 | 2050 | ||
2074 | /* we do not support '!' for function probes */ | 2051 | /* we do not support '!' for function probes */ |
@@ -2312,6 +2289,32 @@ static int __init set_ftrace_filter(char *str) | |||
2312 | } | 2289 | } |
2313 | __setup("ftrace_filter=", set_ftrace_filter); | 2290 | __setup("ftrace_filter=", set_ftrace_filter); |
2314 | 2291 | ||
2292 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
2293 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2294 | static int __init set_graph_function(char *str) | ||
2295 | { | ||
2296 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | ||
2297 | return 1; | ||
2298 | } | ||
2299 | __setup("ftrace_graph_filter=", set_graph_function); | ||
2300 | |||
2301 | static void __init set_ftrace_early_graph(char *buf) | ||
2302 | { | ||
2303 | int ret; | ||
2304 | char *func; | ||
2305 | |||
2306 | while (buf) { | ||
2307 | func = strsep(&buf, ","); | ||
2308 | /* we allow only one expression at a time */ | ||
2309 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | ||
2310 | func); | ||
2311 | if (ret) | ||
2312 | printk(KERN_DEBUG "ftrace: function %s not " | ||
2313 | "traceable\n", func); | ||
2314 | } | ||
2315 | } | ||
2316 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
2317 | |||
2315 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2318 | static void __init set_ftrace_early_filter(char *buf, int enable) |
2316 | { | 2319 | { |
2317 | char *func; | 2320 | char *func; |
@@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void) | |||
2328 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2331 | set_ftrace_early_filter(ftrace_filter_buf, 1); |
2329 | if (ftrace_notrace_buf[0]) | 2332 | if (ftrace_notrace_buf[0]) |
2330 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2333 | set_ftrace_early_filter(ftrace_notrace_buf, 0); |
2334 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
2335 | if (ftrace_graph_buf[0]) | ||
2336 | set_ftrace_early_graph(ftrace_graph_buf); | ||
2337 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
2331 | } | 2338 | } |
2332 | 2339 | ||
2333 | static int | 2340 | static int |
@@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
2513 | return -ENODEV; | 2520 | return -ENODEV; |
2514 | 2521 | ||
2515 | /* decode regex */ | 2522 | /* decode regex */ |
2516 | type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); | 2523 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
2517 | if (not) | 2524 | if (not) |
2518 | return -EINVAL; | 2525 | return -EINVAL; |
2519 | 2526 | ||
@@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2624 | return 0; | 2631 | return 0; |
2625 | } | 2632 | } |
2626 | 2633 | ||
2627 | static int ftrace_convert_nops(struct module *mod, | 2634 | static int ftrace_process_locs(struct module *mod, |
2628 | unsigned long *start, | 2635 | unsigned long *start, |
2629 | unsigned long *end) | 2636 | unsigned long *end) |
2630 | { | 2637 | { |
@@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod, | |||
2684 | { | 2691 | { |
2685 | if (ftrace_disabled || start == end) | 2692 | if (ftrace_disabled || start == end) |
2686 | return; | 2693 | return; |
2687 | ftrace_convert_nops(mod, start, end); | 2694 | ftrace_process_locs(mod, start, end); |
2688 | } | 2695 | } |
2689 | 2696 | ||
2690 | static int ftrace_module_notify(struct notifier_block *self, | 2697 | static int ftrace_module_notify(struct notifier_block *self, |
@@ -2745,7 +2752,7 @@ void __init ftrace_init(void) | |||
2745 | 2752 | ||
2746 | last_ftrace_enabled = ftrace_enabled = 1; | 2753 | last_ftrace_enabled = ftrace_enabled = 1; |
2747 | 2754 | ||
2748 | ret = ftrace_convert_nops(NULL, | 2755 | ret = ftrace_process_locs(NULL, |
2749 | __start_mcount_loc, | 2756 | __start_mcount_loc, |
2750 | __stop_mcount_loc); | 2757 | __stop_mcount_loc); |
2751 | 2758 | ||
@@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { } | |||
2778 | # define ftrace_shutdown_sysctl() do { } while (0) | 2785 | # define ftrace_shutdown_sysctl() do { } while (0) |
2779 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 2786 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2780 | 2787 | ||
2781 | static ssize_t | ||
2782 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
2783 | size_t cnt, loff_t *ppos) | ||
2784 | { | ||
2785 | char buf[64]; | ||
2786 | int r; | ||
2787 | |||
2788 | if (ftrace_pid_trace == ftrace_swapper_pid) | ||
2789 | r = sprintf(buf, "swapper tasks\n"); | ||
2790 | else if (ftrace_pid_trace) | ||
2791 | r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace)); | ||
2792 | else | ||
2793 | r = sprintf(buf, "no pid\n"); | ||
2794 | |||
2795 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2796 | } | ||
2797 | |||
2798 | static void clear_ftrace_swapper(void) | 2788 | static void clear_ftrace_swapper(void) |
2799 | { | 2789 | { |
2800 | struct task_struct *p; | 2790 | struct task_struct *p; |
@@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid) | |||
2845 | rcu_read_unlock(); | 2835 | rcu_read_unlock(); |
2846 | } | 2836 | } |
2847 | 2837 | ||
2848 | static void clear_ftrace_pid_task(struct pid **pid) | 2838 | static void clear_ftrace_pid_task(struct pid *pid) |
2849 | { | 2839 | { |
2850 | if (*pid == ftrace_swapper_pid) | 2840 | if (pid == ftrace_swapper_pid) |
2851 | clear_ftrace_swapper(); | 2841 | clear_ftrace_swapper(); |
2852 | else | 2842 | else |
2853 | clear_ftrace_pid(*pid); | 2843 | clear_ftrace_pid(pid); |
2854 | |||
2855 | *pid = NULL; | ||
2856 | } | 2844 | } |
2857 | 2845 | ||
2858 | static void set_ftrace_pid_task(struct pid *pid) | 2846 | static void set_ftrace_pid_task(struct pid *pid) |
@@ -2863,74 +2851,184 @@ static void set_ftrace_pid_task(struct pid *pid) | |||
2863 | set_ftrace_pid(pid); | 2851 | set_ftrace_pid(pid); |
2864 | } | 2852 | } |
2865 | 2853 | ||
2866 | static ssize_t | 2854 | static int ftrace_pid_add(int p) |
2867 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
2868 | size_t cnt, loff_t *ppos) | ||
2869 | { | 2855 | { |
2870 | struct pid *pid; | 2856 | struct pid *pid; |
2871 | char buf[64]; | 2857 | struct ftrace_pid *fpid; |
2872 | long val; | 2858 | int ret = -EINVAL; |
2873 | int ret; | ||
2874 | 2859 | ||
2875 | if (cnt >= sizeof(buf)) | 2860 | mutex_lock(&ftrace_lock); |
2876 | return -EINVAL; | ||
2877 | 2861 | ||
2878 | if (copy_from_user(&buf, ubuf, cnt)) | 2862 | if (!p) |
2879 | return -EFAULT; | 2863 | pid = ftrace_swapper_pid; |
2864 | else | ||
2865 | pid = find_get_pid(p); | ||
2880 | 2866 | ||
2881 | buf[cnt] = 0; | 2867 | if (!pid) |
2868 | goto out; | ||
2882 | 2869 | ||
2883 | ret = strict_strtol(buf, 10, &val); | 2870 | ret = 0; |
2884 | if (ret < 0) | ||
2885 | return ret; | ||
2886 | 2871 | ||
2887 | mutex_lock(&ftrace_lock); | 2872 | list_for_each_entry(fpid, &ftrace_pids, list) |
2888 | if (val < 0) { | 2873 | if (fpid->pid == pid) |
2889 | /* disable pid tracing */ | 2874 | goto out_put; |
2890 | if (!ftrace_pid_trace) | ||
2891 | goto out; | ||
2892 | 2875 | ||
2893 | clear_ftrace_pid_task(&ftrace_pid_trace); | 2876 | ret = -ENOMEM; |
2894 | 2877 | ||
2895 | } else { | 2878 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); |
2896 | /* swapper task is special */ | 2879 | if (!fpid) |
2897 | if (!val) { | 2880 | goto out_put; |
2898 | pid = ftrace_swapper_pid; | ||
2899 | if (pid == ftrace_pid_trace) | ||
2900 | goto out; | ||
2901 | } else { | ||
2902 | pid = find_get_pid(val); | ||
2903 | 2881 | ||
2904 | if (pid == ftrace_pid_trace) { | 2882 | list_add(&fpid->list, &ftrace_pids); |
2905 | put_pid(pid); | 2883 | fpid->pid = pid; |
2906 | goto out; | ||
2907 | } | ||
2908 | } | ||
2909 | 2884 | ||
2910 | if (ftrace_pid_trace) | 2885 | set_ftrace_pid_task(pid); |
2911 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
2912 | 2886 | ||
2913 | if (!pid) | 2887 | ftrace_update_pid_func(); |
2914 | goto out; | 2888 | ftrace_startup_enable(0); |
2889 | |||
2890 | mutex_unlock(&ftrace_lock); | ||
2891 | return 0; | ||
2892 | |||
2893 | out_put: | ||
2894 | if (pid != ftrace_swapper_pid) | ||
2895 | put_pid(pid); | ||
2915 | 2896 | ||
2916 | ftrace_pid_trace = pid; | 2897 | out: |
2898 | mutex_unlock(&ftrace_lock); | ||
2899 | return ret; | ||
2900 | } | ||
2901 | |||
2902 | static void ftrace_pid_reset(void) | ||
2903 | { | ||
2904 | struct ftrace_pid *fpid, *safe; | ||
2917 | 2905 | ||
2918 | set_ftrace_pid_task(ftrace_pid_trace); | 2906 | mutex_lock(&ftrace_lock); |
2907 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { | ||
2908 | struct pid *pid = fpid->pid; | ||
2909 | |||
2910 | clear_ftrace_pid_task(pid); | ||
2911 | |||
2912 | list_del(&fpid->list); | ||
2913 | kfree(fpid); | ||
2919 | } | 2914 | } |
2920 | 2915 | ||
2921 | /* update the function call */ | ||
2922 | ftrace_update_pid_func(); | 2916 | ftrace_update_pid_func(); |
2923 | ftrace_startup_enable(0); | 2917 | ftrace_startup_enable(0); |
2924 | 2918 | ||
2925 | out: | ||
2926 | mutex_unlock(&ftrace_lock); | 2919 | mutex_unlock(&ftrace_lock); |
2920 | } | ||
2927 | 2921 | ||
2928 | return cnt; | 2922 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
2923 | { | ||
2924 | mutex_lock(&ftrace_lock); | ||
2925 | |||
2926 | if (list_empty(&ftrace_pids) && (!*pos)) | ||
2927 | return (void *) 1; | ||
2928 | |||
2929 | return seq_list_start(&ftrace_pids, *pos); | ||
2930 | } | ||
2931 | |||
2932 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | ||
2933 | { | ||
2934 | if (v == (void *)1) | ||
2935 | return NULL; | ||
2936 | |||
2937 | return seq_list_next(v, &ftrace_pids, pos); | ||
2938 | } | ||
2939 | |||
2940 | static void fpid_stop(struct seq_file *m, void *p) | ||
2941 | { | ||
2942 | mutex_unlock(&ftrace_lock); | ||
2943 | } | ||
2944 | |||
2945 | static int fpid_show(struct seq_file *m, void *v) | ||
2946 | { | ||
2947 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | ||
2948 | |||
2949 | if (v == (void *)1) { | ||
2950 | seq_printf(m, "no pid\n"); | ||
2951 | return 0; | ||
2952 | } | ||
2953 | |||
2954 | if (fpid->pid == ftrace_swapper_pid) | ||
2955 | seq_printf(m, "swapper tasks\n"); | ||
2956 | else | ||
2957 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | ||
2958 | |||
2959 | return 0; | ||
2960 | } | ||
2961 | |||
2962 | static const struct seq_operations ftrace_pid_sops = { | ||
2963 | .start = fpid_start, | ||
2964 | .next = fpid_next, | ||
2965 | .stop = fpid_stop, | ||
2966 | .show = fpid_show, | ||
2967 | }; | ||
2968 | |||
2969 | static int | ||
2970 | ftrace_pid_open(struct inode *inode, struct file *file) | ||
2971 | { | ||
2972 | int ret = 0; | ||
2973 | |||
2974 | if ((file->f_mode & FMODE_WRITE) && | ||
2975 | (file->f_flags & O_TRUNC)) | ||
2976 | ftrace_pid_reset(); | ||
2977 | |||
2978 | if (file->f_mode & FMODE_READ) | ||
2979 | ret = seq_open(file, &ftrace_pid_sops); | ||
2980 | |||
2981 | return ret; | ||
2982 | } | ||
2983 | |||
2984 | static ssize_t | ||
2985 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
2986 | size_t cnt, loff_t *ppos) | ||
2987 | { | ||
2988 | char buf[64], *tmp; | ||
2989 | long val; | ||
2990 | int ret; | ||
2991 | |||
2992 | if (cnt >= sizeof(buf)) | ||
2993 | return -EINVAL; | ||
2994 | |||
2995 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2996 | return -EFAULT; | ||
2997 | |||
2998 | buf[cnt] = 0; | ||
2999 | |||
3000 | /* | ||
3001 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | ||
3002 | * to clean the filter quietly. | ||
3003 | */ | ||
3004 | tmp = strstrip(buf); | ||
3005 | if (strlen(tmp) == 0) | ||
3006 | return 1; | ||
3007 | |||
3008 | ret = strict_strtol(tmp, 10, &val); | ||
3009 | if (ret < 0) | ||
3010 | return ret; | ||
3011 | |||
3012 | ret = ftrace_pid_add(val); | ||
3013 | |||
3014 | return ret ? ret : cnt; | ||
3015 | } | ||
3016 | |||
3017 | static int | ||
3018 | ftrace_pid_release(struct inode *inode, struct file *file) | ||
3019 | { | ||
3020 | if (file->f_mode & FMODE_READ) | ||
3021 | seq_release(inode, file); | ||
3022 | |||
3023 | return 0; | ||
2929 | } | 3024 | } |
2930 | 3025 | ||
2931 | static const struct file_operations ftrace_pid_fops = { | 3026 | static const struct file_operations ftrace_pid_fops = { |
2932 | .read = ftrace_pid_read, | 3027 | .open = ftrace_pid_open, |
2933 | .write = ftrace_pid_write, | 3028 | .write = ftrace_pid_write, |
3029 | .read = seq_read, | ||
3030 | .llseek = seq_lseek, | ||
3031 | .release = ftrace_pid_release, | ||
2934 | }; | 3032 | }; |
2935 | 3033 | ||
2936 | static __init int ftrace_init_debugfs(void) | 3034 | static __init int ftrace_init_debugfs(void) |
@@ -3293,4 +3391,3 @@ void ftrace_graph_stop(void) | |||
3293 | ftrace_stop(); | 3391 | ftrace_stop(); |
3294 | } | 3392 | } |
3295 | #endif | 3393 | #endif |
3296 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5dd017fea6f5..a1ca4956ab5e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
397 | int ret; | 397 | int ret; |
398 | 398 | ||
399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
400 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
401 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
402 | (unsigned int)is_signed_type(u64)); | ||
402 | 403 | ||
403 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
404 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
405 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
406 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
408 | (unsigned int)is_signed_type(long)); | ||
407 | 409 | ||
408 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
409 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
410 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
411 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
414 | (unsigned int)is_signed_type(char)); | ||
412 | 415 | ||
413 | return ret; | 416 | return ret; |
414 | } | 417 | } |
@@ -1787,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1787 | static struct ring_buffer_event * | 1790 | static struct ring_buffer_event * |
1788 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1791 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1789 | unsigned long length, unsigned long tail, | 1792 | unsigned long length, unsigned long tail, |
1790 | struct buffer_page *commit_page, | ||
1791 | struct buffer_page *tail_page, u64 *ts) | 1793 | struct buffer_page *tail_page, u64 *ts) |
1792 | { | 1794 | { |
1795 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1793 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1796 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1794 | struct buffer_page *next_page; | 1797 | struct buffer_page *next_page; |
1795 | int ret; | 1798 | int ret; |
@@ -1892,13 +1895,10 @@ static struct ring_buffer_event * | |||
1892 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1895 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1893 | unsigned type, unsigned long length, u64 *ts) | 1896 | unsigned type, unsigned long length, u64 *ts) |
1894 | { | 1897 | { |
1895 | struct buffer_page *tail_page, *commit_page; | 1898 | struct buffer_page *tail_page; |
1896 | struct ring_buffer_event *event; | 1899 | struct ring_buffer_event *event; |
1897 | unsigned long tail, write; | 1900 | unsigned long tail, write; |
1898 | 1901 | ||
1899 | commit_page = cpu_buffer->commit_page; | ||
1900 | /* we just need to protect against interrupts */ | ||
1901 | barrier(); | ||
1902 | tail_page = cpu_buffer->tail_page; | 1902 | tail_page = cpu_buffer->tail_page; |
1903 | write = local_add_return(length, &tail_page->write); | 1903 | write = local_add_return(length, &tail_page->write); |
1904 | 1904 | ||
@@ -1909,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1909 | /* See if we shot pass the end of this buffer page */ | 1909 | /* See if we shot pass the end of this buffer page */ |
1910 | if (write > BUF_PAGE_SIZE) | 1910 | if (write > BUF_PAGE_SIZE) |
1911 | return rb_move_tail(cpu_buffer, length, tail, | 1911 | return rb_move_tail(cpu_buffer, length, tail, |
1912 | commit_page, tail_page, ts); | 1912 | tail_page, ts); |
1913 | 1913 | ||
1914 | /* We reserved something on the buffer */ | 1914 | /* We reserved something on the buffer */ |
1915 | 1915 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 573d3cc762c3..b2477caf09c2 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -35,6 +35,28 @@ static int disable_reader; | |||
35 | module_param(disable_reader, uint, 0644); | 35 | module_param(disable_reader, uint, 0644); |
36 | MODULE_PARM_DESC(disable_reader, "only run producer"); | 36 | MODULE_PARM_DESC(disable_reader, "only run producer"); |
37 | 37 | ||
38 | static int write_iteration = 50; | ||
39 | module_param(write_iteration, uint, 0644); | ||
40 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | ||
41 | |||
42 | static int producer_nice = 19; | ||
43 | static int consumer_nice = 19; | ||
44 | |||
45 | static int producer_fifo = -1; | ||
46 | static int consumer_fifo = -1; | ||
47 | |||
48 | module_param(producer_nice, uint, 0644); | ||
49 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); | ||
50 | |||
51 | module_param(consumer_nice, uint, 0644); | ||
52 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); | ||
53 | |||
54 | module_param(producer_fifo, uint, 0644); | ||
55 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); | ||
56 | |||
57 | module_param(consumer_fifo, uint, 0644); | ||
58 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); | ||
59 | |||
38 | static int read_events; | 60 | static int read_events; |
39 | 61 | ||
40 | static int kill_test; | 62 | static int kill_test; |
@@ -208,15 +230,18 @@ static void ring_buffer_producer(void) | |||
208 | do { | 230 | do { |
209 | struct ring_buffer_event *event; | 231 | struct ring_buffer_event *event; |
210 | int *entry; | 232 | int *entry; |
211 | 233 | int i; | |
212 | event = ring_buffer_lock_reserve(buffer, 10); | 234 | |
213 | if (!event) { | 235 | for (i = 0; i < write_iteration; i++) { |
214 | missed++; | 236 | event = ring_buffer_lock_reserve(buffer, 10); |
215 | } else { | 237 | if (!event) { |
216 | hit++; | 238 | missed++; |
217 | entry = ring_buffer_event_data(event); | 239 | } else { |
218 | *entry = smp_processor_id(); | 240 | hit++; |
219 | ring_buffer_unlock_commit(buffer, event); | 241 | entry = ring_buffer_event_data(event); |
242 | *entry = smp_processor_id(); | ||
243 | ring_buffer_unlock_commit(buffer, event); | ||
244 | } | ||
220 | } | 245 | } |
221 | do_gettimeofday(&end_tv); | 246 | do_gettimeofday(&end_tv); |
222 | 247 | ||
@@ -263,6 +288,27 @@ static void ring_buffer_producer(void) | |||
263 | 288 | ||
264 | if (kill_test) | 289 | if (kill_test) |
265 | trace_printk("ERROR!\n"); | 290 | trace_printk("ERROR!\n"); |
291 | |||
292 | if (!disable_reader) { | ||
293 | if (consumer_fifo < 0) | ||
294 | trace_printk("Running Consumer at nice: %d\n", | ||
295 | consumer_nice); | ||
296 | else | ||
297 | trace_printk("Running Consumer at SCHED_FIFO %d\n", | ||
298 | consumer_fifo); | ||
299 | } | ||
300 | if (producer_fifo < 0) | ||
301 | trace_printk("Running Producer at nice: %d\n", | ||
302 | producer_nice); | ||
303 | else | ||
304 | trace_printk("Running Producer at SCHED_FIFO %d\n", | ||
305 | producer_fifo); | ||
306 | |||
307 | /* Let the user know that the test is running at low priority */ | ||
308 | if (producer_fifo < 0 && consumer_fifo < 0 && | ||
309 | producer_nice == 19 && consumer_nice == 19) | ||
310 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); | ||
311 | |||
266 | trace_printk("Time: %lld (usecs)\n", time); | 312 | trace_printk("Time: %lld (usecs)\n", time); |
267 | trace_printk("Overruns: %lld\n", overruns); | 313 | trace_printk("Overruns: %lld\n", overruns); |
268 | if (disable_reader) | 314 | if (disable_reader) |
@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void) | |||
392 | if (IS_ERR(producer)) | 438 | if (IS_ERR(producer)) |
393 | goto out_kill; | 439 | goto out_kill; |
394 | 440 | ||
441 | /* | ||
442 | * Run them as low-prio background tasks by default: | ||
443 | */ | ||
444 | if (!disable_reader) { | ||
445 | if (consumer_fifo >= 0) { | ||
446 | struct sched_param param = { | ||
447 | .sched_priority = consumer_fifo | ||
448 | }; | ||
449 | sched_setscheduler(consumer, SCHED_FIFO, ¶m); | ||
450 | } else | ||
451 | set_user_nice(consumer, consumer_nice); | ||
452 | } | ||
453 | |||
454 | if (producer_fifo >= 0) { | ||
455 | struct sched_param param = { | ||
456 | .sched_priority = consumer_fifo | ||
457 | }; | ||
458 | sched_setscheduler(producer, SCHED_FIFO, ¶m); | ||
459 | } else | ||
460 | set_user_nice(producer, producer_nice); | ||
461 | |||
395 | return 0; | 462 | return 0; |
396 | 463 | ||
397 | out_kill: | 464 | out_kill: |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b20d3ec75de9..874f2893cff0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); | |||
129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) | |||
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
138 | return 1; | 138 | return 1; |
139 | } | 139 | } |
140 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); |
141 | 141 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 143 | { |
@@ -1361,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | if (args == NULL) { |
1365 | 1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | |
1366 | len = min(len, TRACE_BUF_SIZE-1); | 1366 | len = strlen(trace_buf); |
1367 | trace_buf[len] = 0; | 1367 | } else |
1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1368 | 1369 | ||
1369 | size = sizeof(*entry) + len + 1; | 1370 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1371 | buffer = tr->buffer; |
@@ -1373,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1374 | if (!event) |
1374 | goto out_unlock; | 1375 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1376 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1377 | entry->ip = ip; |
1377 | 1378 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1379 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1380 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1381 | if (!filter_check_discard(call, entry, buffer, event)) |
1381 | ring_buffer_unlock_commit(buffer, event); | 1382 | ring_buffer_unlock_commit(buffer, event); |
1382 | 1383 | ||
@@ -3319,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3319 | return cnt; | 3320 | return cnt; |
3320 | } | 3321 | } |
3321 | 3322 | ||
3322 | static int mark_printk(const char *fmt, ...) | ||
3323 | { | ||
3324 | int ret; | ||
3325 | va_list args; | ||
3326 | va_start(args, fmt); | ||
3327 | ret = trace_vprintk(0, fmt, args); | ||
3328 | va_end(args); | ||
3329 | return ret; | ||
3330 | } | ||
3331 | |||
3332 | static ssize_t | 3323 | static ssize_t |
3333 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
3334 | size_t cnt, loff_t *fpos) | 3325 | size_t cnt, loff_t *fpos) |
3335 | { | 3326 | { |
3336 | char *buf; | 3327 | char *buf; |
3337 | char *end; | ||
3338 | 3328 | ||
3339 | if (tracing_disabled) | 3329 | if (tracing_disabled) |
3340 | return -EINVAL; | 3330 | return -EINVAL; |
@@ -3342,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3342 | if (cnt > TRACE_BUF_SIZE) | 3332 | if (cnt > TRACE_BUF_SIZE) |
3343 | cnt = TRACE_BUF_SIZE; | 3333 | cnt = TRACE_BUF_SIZE; |
3344 | 3334 | ||
3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3335 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3346 | if (buf == NULL) | 3336 | if (buf == NULL) |
3347 | return -ENOMEM; | 3337 | return -ENOMEM; |
3348 | 3338 | ||
@@ -3350,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3350 | kfree(buf); | 3340 | kfree(buf); |
3351 | return -EFAULT; | 3341 | return -EFAULT; |
3352 | } | 3342 | } |
3343 | if (buf[cnt-1] != '\n') { | ||
3344 | buf[cnt] = '\n'; | ||
3345 | buf[cnt+1] = '\0'; | ||
3346 | } else | ||
3347 | buf[cnt] = '\0'; | ||
3353 | 3348 | ||
3354 | /* Cut from the first nil or newline. */ | 3349 | cnt = trace_vprintk(0, buf, NULL); |
3355 | buf[cnt] = '\0'; | ||
3356 | end = strchr(buf, '\n'); | ||
3357 | if (end) | ||
3358 | *end = '\0'; | ||
3359 | |||
3360 | cnt = mark_printk("%s\n", buf); | ||
3361 | kfree(buf); | 3350 | kfree(buf); |
3362 | *fpos += cnt; | 3351 | *fpos += cnt; |
3363 | 3352 | ||
@@ -3730,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3730 | 3719 | ||
3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3720 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3732 | if (!s) | 3721 | if (!s) |
3733 | return ENOMEM; | 3722 | return -ENOMEM; |
3734 | 3723 | ||
3735 | trace_seq_init(s); | 3724 | trace_seq_init(s); |
3736 | 3725 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 405cb850b75d..1d7f4830a80d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | 12 | #include <trace/boot.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | #include <linux/hw_breakpoint.h> | ||
14 | 15 | ||
15 | #include <linux/trace_seq.h> | 16 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | 17 | #include <linux/ftrace_event.h> |
@@ -37,6 +38,7 @@ enum trace_type { | |||
37 | TRACE_KMEM_ALLOC, | 38 | TRACE_KMEM_ALLOC, |
38 | TRACE_KMEM_FREE, | 39 | TRACE_KMEM_FREE, |
39 | TRACE_BLK, | 40 | TRACE_BLK, |
41 | TRACE_KSYM, | ||
40 | 42 | ||
41 | __TRACE_LAST_TYPE, | 43 | __TRACE_LAST_TYPE, |
42 | }; | 44 | }; |
@@ -98,9 +100,32 @@ struct syscall_trace_enter { | |||
98 | struct syscall_trace_exit { | 100 | struct syscall_trace_exit { |
99 | struct trace_entry ent; | 101 | struct trace_entry ent; |
100 | int nr; | 102 | int nr; |
101 | unsigned long ret; | 103 | long ret; |
102 | }; | 104 | }; |
103 | 105 | ||
106 | struct kprobe_trace_entry { | ||
107 | struct trace_entry ent; | ||
108 | unsigned long ip; | ||
109 | int nargs; | ||
110 | unsigned long args[]; | ||
111 | }; | ||
112 | |||
113 | #define SIZEOF_KPROBE_TRACE_ENTRY(n) \ | ||
114 | (offsetof(struct kprobe_trace_entry, args) + \ | ||
115 | (sizeof(unsigned long) * (n))) | ||
116 | |||
117 | struct kretprobe_trace_entry { | ||
118 | struct trace_entry ent; | ||
119 | unsigned long func; | ||
120 | unsigned long ret_ip; | ||
121 | int nargs; | ||
122 | unsigned long args[]; | ||
123 | }; | ||
124 | |||
125 | #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ | ||
126 | (offsetof(struct kretprobe_trace_entry, args) + \ | ||
127 | (sizeof(unsigned long) * (n))) | ||
128 | |||
104 | /* | 129 | /* |
105 | * trace_flag_type is an enumeration that holds different | 130 | * trace_flag_type is an enumeration that holds different |
106 | * states when a trace occurs. These are: | 131 | * states when a trace occurs. These are: |
@@ -209,6 +234,7 @@ extern void __ftrace_bad_type(void); | |||
209 | TRACE_KMEM_ALLOC); \ | 234 | TRACE_KMEM_ALLOC); \ |
210 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 235 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
211 | TRACE_KMEM_FREE); \ | 236 | TRACE_KMEM_FREE); \ |
237 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | ||
212 | __ftrace_bad_type(); \ | 238 | __ftrace_bad_type(); \ |
213 | } while (0) | 239 | } while (0) |
214 | 240 | ||
@@ -364,6 +390,8 @@ int register_tracer(struct tracer *type); | |||
364 | void unregister_tracer(struct tracer *type); | 390 | void unregister_tracer(struct tracer *type); |
365 | int is_tracing_stopped(void); | 391 | int is_tracing_stopped(void); |
366 | 392 | ||
393 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
394 | |||
367 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 395 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
368 | 396 | ||
369 | #ifdef CONFIG_TRACER_MAX_TRACE | 397 | #ifdef CONFIG_TRACER_MAX_TRACE |
@@ -438,6 +466,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
438 | struct trace_array *tr); | 466 | struct trace_array *tr); |
439 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | 467 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, |
440 | struct trace_array *tr); | 468 | struct trace_array *tr); |
469 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
470 | struct trace_array *tr); | ||
441 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 471 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
442 | 472 | ||
443 | extern void *head_page(struct trace_array_cpu *data); | 473 | extern void *head_page(struct trace_array_cpu *data); |
@@ -483,10 +513,6 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
483 | return 0; | 513 | return 0; |
484 | } | 514 | } |
485 | #else | 515 | #else |
486 | static inline int ftrace_trace_addr(unsigned long addr) | ||
487 | { | ||
488 | return 1; | ||
489 | } | ||
490 | static inline int ftrace_graph_addr(unsigned long addr) | 516 | static inline int ftrace_graph_addr(unsigned long addr) |
491 | { | 517 | { |
492 | return 1; | 518 | return 1; |
@@ -500,12 +526,12 @@ print_graph_function(struct trace_iterator *iter) | |||
500 | } | 526 | } |
501 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 527 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
502 | 528 | ||
503 | extern struct pid *ftrace_pid_trace; | 529 | extern struct list_head ftrace_pids; |
504 | 530 | ||
505 | #ifdef CONFIG_FUNCTION_TRACER | 531 | #ifdef CONFIG_FUNCTION_TRACER |
506 | static inline int ftrace_trace_task(struct task_struct *task) | 532 | static inline int ftrace_trace_task(struct task_struct *task) |
507 | { | 533 | { |
508 | if (!ftrace_pid_trace) | 534 | if (list_empty(&ftrace_pids)) |
509 | return 1; | 535 | return 1; |
510 | 536 | ||
511 | return test_tsk_trace_trace(task); | 537 | return test_tsk_trace_trace(task); |
@@ -687,7 +713,6 @@ struct event_filter { | |||
687 | int n_preds; | 713 | int n_preds; |
688 | struct filter_pred **preds; | 714 | struct filter_pred **preds; |
689 | char *filter_string; | 715 | char *filter_string; |
690 | bool no_reset; | ||
691 | }; | 716 | }; |
692 | 717 | ||
693 | struct event_subsystem { | 718 | struct event_subsystem { |
@@ -699,22 +724,40 @@ struct event_subsystem { | |||
699 | }; | 724 | }; |
700 | 725 | ||
701 | struct filter_pred; | 726 | struct filter_pred; |
727 | struct regex; | ||
702 | 728 | ||
703 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, | 729 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, |
704 | int val1, int val2); | 730 | int val1, int val2); |
705 | 731 | ||
732 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); | ||
733 | |||
734 | enum regex_type { | ||
735 | MATCH_FULL = 0, | ||
736 | MATCH_FRONT_ONLY, | ||
737 | MATCH_MIDDLE_ONLY, | ||
738 | MATCH_END_ONLY, | ||
739 | }; | ||
740 | |||
741 | struct regex { | ||
742 | char pattern[MAX_FILTER_STR_VAL]; | ||
743 | int len; | ||
744 | int field_len; | ||
745 | regex_match_func match; | ||
746 | }; | ||
747 | |||
706 | struct filter_pred { | 748 | struct filter_pred { |
707 | filter_pred_fn_t fn; | 749 | filter_pred_fn_t fn; |
708 | u64 val; | 750 | u64 val; |
709 | char str_val[MAX_FILTER_STR_VAL]; | 751 | struct regex regex; |
710 | int str_len; | 752 | char *field_name; |
711 | char *field_name; | 753 | int offset; |
712 | int offset; | 754 | int not; |
713 | int not; | 755 | int op; |
714 | int op; | 756 | int pop_n; |
715 | int pop_n; | ||
716 | }; | 757 | }; |
717 | 758 | ||
759 | extern enum regex_type | ||
760 | filter_parse_regex(char *buff, int len, char **search, int *not); | ||
718 | extern void print_event_filter(struct ftrace_event_call *call, | 761 | extern void print_event_filter(struct ftrace_event_call *call, |
719 | struct trace_seq *s); | 762 | struct trace_seq *s); |
720 | extern int apply_event_filter(struct ftrace_event_call *call, | 763 | extern int apply_event_filter(struct ftrace_event_call *call, |
@@ -730,7 +773,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
730 | struct ring_buffer *buffer, | 773 | struct ring_buffer *buffer, |
731 | struct ring_buffer_event *event) | 774 | struct ring_buffer_event *event) |
732 | { | 775 | { |
733 | if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { | 776 | if (unlikely(call->filter_active) && |
777 | !filter_match_preds(call->filter, rec)) { | ||
734 | ring_buffer_discard_commit(buffer, event); | 778 | ring_buffer_discard_commit(buffer, event); |
735 | return 1; | 779 | return 1; |
736 | } | 780 | } |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 20c5f92e28a8..878c03f386ba 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/ktime.h> | 20 | #include <linux/ktime.h> |
21 | #include <linux/trace_clock.h> | 21 | #include <linux/trace_clock.h> |
22 | 22 | ||
23 | #include "trace.h" | ||
24 | |||
23 | /* | 25 | /* |
24 | * trace_clock_local(): the simplest and least coherent tracing clock. | 26 | * trace_clock_local(): the simplest and least coherent tracing clock. |
25 | * | 27 | * |
@@ -28,17 +30,17 @@ | |||
28 | */ | 30 | */ |
29 | u64 notrace trace_clock_local(void) | 31 | u64 notrace trace_clock_local(void) |
30 | { | 32 | { |
31 | unsigned long flags; | ||
32 | u64 clock; | 33 | u64 clock; |
34 | int resched; | ||
33 | 35 | ||
34 | /* | 36 | /* |
35 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
36 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
37 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
38 | */ | 40 | */ |
39 | raw_local_irq_save(flags); | 41 | resched = ftrace_preempt_disable(); |
40 | clock = sched_clock(); | 42 | clock = sched_clock(); |
41 | raw_local_irq_restore(flags); | 43 | ftrace_preempt_enable(resched); |
42 | 44 | ||
43 | return clock; | 45 | return clock; |
44 | } | 46 | } |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index ead3d724599d..c16a08f399df 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -364,3 +364,19 @@ FTRACE_ENTRY(kmem_free, kmemtrace_free_entry, | |||
364 | F_printk("type:%u call_site:%lx ptr:%p", | 364 | F_printk("type:%u call_site:%lx ptr:%p", |
365 | __entry->type_id, __entry->call_site, __entry->ptr) | 365 | __entry->type_id, __entry->call_site, __entry->ptr) |
366 | ); | 366 | ); |
367 | |||
368 | FTRACE_ENTRY(ksym_trace, ksym_trace_entry, | ||
369 | |||
370 | TRACE_KSYM, | ||
371 | |||
372 | F_STRUCT( | ||
373 | __field( unsigned long, ip ) | ||
374 | __field( unsigned char, type ) | ||
375 | __array( char , cmd, TASK_COMM_LEN ) | ||
376 | __field( unsigned long, addr ) | ||
377 | ), | ||
378 | |||
379 | F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", | ||
380 | (void *)__entry->ip, (unsigned int)__entry->type, | ||
381 | (void *)__entry->addr, __entry->cmd) | ||
382 | ); | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 8d5c171cc998..d9c60f80aa0d 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -8,17 +8,14 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include "trace.h" | 9 | #include "trace.h" |
10 | 10 | ||
11 | /* | ||
12 | * We can't use a size but a type in alloc_percpu() | ||
13 | * So let's create a dummy type that matches the desired size | ||
14 | */ | ||
15 | typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; | ||
16 | 11 | ||
17 | char *trace_profile_buf; | 12 | char *perf_trace_buf; |
18 | EXPORT_SYMBOL_GPL(trace_profile_buf); | 13 | EXPORT_SYMBOL_GPL(perf_trace_buf); |
14 | |||
15 | char *perf_trace_buf_nmi; | ||
16 | EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); | ||
19 | 17 | ||
20 | char *trace_profile_buf_nmi; | 18 | typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; |
21 | EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); | ||
22 | 19 | ||
23 | /* Count the events in use (per event id, not per instance) */ | 20 | /* Count the events in use (per event id, not per instance) */ |
24 | static int total_profile_count; | 21 | static int total_profile_count; |
@@ -32,20 +29,20 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
32 | return 0; | 29 | return 0; |
33 | 30 | ||
34 | if (!total_profile_count) { | 31 | if (!total_profile_count) { |
35 | buf = (char *)alloc_percpu(profile_buf_t); | 32 | buf = (char *)alloc_percpu(perf_trace_t); |
36 | if (!buf) | 33 | if (!buf) |
37 | goto fail_buf; | 34 | goto fail_buf; |
38 | 35 | ||
39 | rcu_assign_pointer(trace_profile_buf, buf); | 36 | rcu_assign_pointer(perf_trace_buf, buf); |
40 | 37 | ||
41 | buf = (char *)alloc_percpu(profile_buf_t); | 38 | buf = (char *)alloc_percpu(perf_trace_t); |
42 | if (!buf) | 39 | if (!buf) |
43 | goto fail_buf_nmi; | 40 | goto fail_buf_nmi; |
44 | 41 | ||
45 | rcu_assign_pointer(trace_profile_buf_nmi, buf); | 42 | rcu_assign_pointer(perf_trace_buf_nmi, buf); |
46 | } | 43 | } |
47 | 44 | ||
48 | ret = event->profile_enable(); | 45 | ret = event->profile_enable(event); |
49 | if (!ret) { | 46 | if (!ret) { |
50 | total_profile_count++; | 47 | total_profile_count++; |
51 | return 0; | 48 | return 0; |
@@ -53,10 +50,10 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
53 | 50 | ||
54 | fail_buf_nmi: | 51 | fail_buf_nmi: |
55 | if (!total_profile_count) { | 52 | if (!total_profile_count) { |
56 | free_percpu(trace_profile_buf_nmi); | 53 | free_percpu(perf_trace_buf_nmi); |
57 | free_percpu(trace_profile_buf); | 54 | free_percpu(perf_trace_buf); |
58 | trace_profile_buf_nmi = NULL; | 55 | perf_trace_buf_nmi = NULL; |
59 | trace_profile_buf = NULL; | 56 | perf_trace_buf = NULL; |
60 | } | 57 | } |
61 | fail_buf: | 58 | fail_buf: |
62 | atomic_dec(&event->profile_count); | 59 | atomic_dec(&event->profile_count); |
@@ -89,14 +86,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
89 | if (!atomic_add_negative(-1, &event->profile_count)) | 86 | if (!atomic_add_negative(-1, &event->profile_count)) |
90 | return; | 87 | return; |
91 | 88 | ||
92 | event->profile_disable(); | 89 | event->profile_disable(event); |
93 | 90 | ||
94 | if (!--total_profile_count) { | 91 | if (!--total_profile_count) { |
95 | buf = trace_profile_buf; | 92 | buf = perf_trace_buf; |
96 | rcu_assign_pointer(trace_profile_buf, NULL); | 93 | rcu_assign_pointer(perf_trace_buf, NULL); |
97 | 94 | ||
98 | nmi_buf = trace_profile_buf_nmi; | 95 | nmi_buf = perf_trace_buf_nmi; |
99 | rcu_assign_pointer(trace_profile_buf_nmi, NULL); | 96 | rcu_assign_pointer(perf_trace_buf_nmi, NULL); |
100 | 97 | ||
101 | /* | 98 | /* |
102 | * Ensure every events in profiling have finished before | 99 | * Ensure every events in profiling have finished before |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d128f65778e6..1d18315dc836 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -93,9 +93,7 @@ int trace_define_common_fields(struct ftrace_event_call *call) | |||
93 | } | 93 | } |
94 | EXPORT_SYMBOL_GPL(trace_define_common_fields); | 94 | EXPORT_SYMBOL_GPL(trace_define_common_fields); |
95 | 95 | ||
96 | #ifdef CONFIG_MODULES | 96 | void trace_destroy_fields(struct ftrace_event_call *call) |
97 | |||
98 | static void trace_destroy_fields(struct ftrace_event_call *call) | ||
99 | { | 97 | { |
100 | struct ftrace_event_field *field, *next; | 98 | struct ftrace_event_field *field, *next; |
101 | 99 | ||
@@ -107,8 +105,6 @@ static void trace_destroy_fields(struct ftrace_event_call *call) | |||
107 | } | 105 | } |
108 | } | 106 | } |
109 | 107 | ||
110 | #endif /* CONFIG_MODULES */ | ||
111 | |||
112 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | 108 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, |
113 | int enable) | 109 | int enable) |
114 | { | 110 | { |
@@ -117,14 +113,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
117 | if (call->enabled) { | 113 | if (call->enabled) { |
118 | call->enabled = 0; | 114 | call->enabled = 0; |
119 | tracing_stop_cmdline_record(); | 115 | tracing_stop_cmdline_record(); |
120 | call->unregfunc(call->data); | 116 | call->unregfunc(call); |
121 | } | 117 | } |
122 | break; | 118 | break; |
123 | case 1: | 119 | case 1: |
124 | if (!call->enabled) { | 120 | if (!call->enabled) { |
125 | call->enabled = 1; | 121 | call->enabled = 1; |
126 | tracing_start_cmdline_record(); | 122 | tracing_start_cmdline_record(); |
127 | call->regfunc(call->data); | 123 | call->regfunc(call); |
128 | } | 124 | } |
129 | break; | 125 | break; |
130 | } | 126 | } |
@@ -507,7 +503,7 @@ extern char *__bad_type_size(void); | |||
507 | #define FIELD(type, name) \ | 503 | #define FIELD(type, name) \ |
508 | sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ | 504 | sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ |
509 | #type, "common_" #name, offsetof(typeof(field), name), \ | 505 | #type, "common_" #name, offsetof(typeof(field), name), \ |
510 | sizeof(field.name) | 506 | sizeof(field.name), is_signed_type(type) |
511 | 507 | ||
512 | static int trace_write_header(struct trace_seq *s) | 508 | static int trace_write_header(struct trace_seq *s) |
513 | { | 509 | { |
@@ -515,17 +511,17 @@ static int trace_write_header(struct trace_seq *s) | |||
515 | 511 | ||
516 | /* struct trace_entry */ | 512 | /* struct trace_entry */ |
517 | return trace_seq_printf(s, | 513 | return trace_seq_printf(s, |
518 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 514 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
519 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 515 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
520 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 516 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
521 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 517 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
522 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 518 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
523 | "\n", | 519 | "\n", |
524 | FIELD(unsigned short, type), | 520 | FIELD(unsigned short, type), |
525 | FIELD(unsigned char, flags), | 521 | FIELD(unsigned char, flags), |
526 | FIELD(unsigned char, preempt_count), | 522 | FIELD(unsigned char, preempt_count), |
527 | FIELD(int, pid), | 523 | FIELD(int, pid), |
528 | FIELD(int, lock_depth)); | 524 | FIELD(int, lock_depth)); |
529 | } | 525 | } |
530 | 526 | ||
531 | static ssize_t | 527 | static ssize_t |
@@ -878,9 +874,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
878 | "'%s/filter' entry\n", name); | 874 | "'%s/filter' entry\n", name); |
879 | } | 875 | } |
880 | 876 | ||
881 | entry = trace_create_file("enable", 0644, system->entry, | 877 | trace_create_file("enable", 0644, system->entry, |
882 | (void *)system->name, | 878 | (void *)system->name, |
883 | &ftrace_system_enable_fops); | 879 | &ftrace_system_enable_fops); |
884 | 880 | ||
885 | return system->entry; | 881 | return system->entry; |
886 | } | 882 | } |
@@ -892,7 +888,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
892 | const struct file_operations *filter, | 888 | const struct file_operations *filter, |
893 | const struct file_operations *format) | 889 | const struct file_operations *format) |
894 | { | 890 | { |
895 | struct dentry *entry; | ||
896 | int ret; | 891 | int ret; |
897 | 892 | ||
898 | /* | 893 | /* |
@@ -910,12 +905,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
910 | } | 905 | } |
911 | 906 | ||
912 | if (call->regfunc) | 907 | if (call->regfunc) |
913 | entry = trace_create_file("enable", 0644, call->dir, call, | 908 | trace_create_file("enable", 0644, call->dir, call, |
914 | enable); | 909 | enable); |
915 | 910 | ||
916 | if (call->id && call->profile_enable) | 911 | if (call->id && call->profile_enable) |
917 | entry = trace_create_file("id", 0444, call->dir, call, | 912 | trace_create_file("id", 0444, call->dir, call, |
918 | id); | 913 | id); |
919 | 914 | ||
920 | if (call->define_fields) { | 915 | if (call->define_fields) { |
921 | ret = call->define_fields(call); | 916 | ret = call->define_fields(call); |
@@ -924,41 +919,60 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
924 | " events/%s\n", call->name); | 919 | " events/%s\n", call->name); |
925 | return ret; | 920 | return ret; |
926 | } | 921 | } |
927 | entry = trace_create_file("filter", 0644, call->dir, call, | 922 | trace_create_file("filter", 0644, call->dir, call, |
928 | filter); | 923 | filter); |
929 | } | 924 | } |
930 | 925 | ||
931 | /* A trace may not want to export its format */ | 926 | /* A trace may not want to export its format */ |
932 | if (!call->show_format) | 927 | if (!call->show_format) |
933 | return 0; | 928 | return 0; |
934 | 929 | ||
935 | entry = trace_create_file("format", 0444, call->dir, call, | 930 | trace_create_file("format", 0444, call->dir, call, |
936 | format); | 931 | format); |
937 | 932 | ||
938 | return 0; | 933 | return 0; |
939 | } | 934 | } |
940 | 935 | ||
941 | #define for_each_event(event, start, end) \ | 936 | static int __trace_add_event_call(struct ftrace_event_call *call) |
942 | for (event = start; \ | 937 | { |
943 | (unsigned long)event < (unsigned long)end; \ | 938 | struct dentry *d_events; |
944 | event++) | 939 | int ret; |
945 | 940 | ||
946 | #ifdef CONFIG_MODULES | 941 | if (!call->name) |
942 | return -EINVAL; | ||
947 | 943 | ||
948 | static LIST_HEAD(ftrace_module_file_list); | 944 | if (call->raw_init) { |
945 | ret = call->raw_init(call); | ||
946 | if (ret < 0) { | ||
947 | if (ret != -ENOSYS) | ||
948 | pr_warning("Could not initialize trace " | ||
949 | "events/%s\n", call->name); | ||
950 | return ret; | ||
951 | } | ||
952 | } | ||
949 | 953 | ||
950 | /* | 954 | d_events = event_trace_events_dir(); |
951 | * Modules must own their file_operations to keep up with | 955 | if (!d_events) |
952 | * reference counting. | 956 | return -ENOENT; |
953 | */ | 957 | |
954 | struct ftrace_module_file_ops { | 958 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, |
955 | struct list_head list; | 959 | &ftrace_enable_fops, &ftrace_event_filter_fops, |
956 | struct module *mod; | 960 | &ftrace_event_format_fops); |
957 | struct file_operations id; | 961 | if (!ret) |
958 | struct file_operations enable; | 962 | list_add(&call->list, &ftrace_events); |
959 | struct file_operations format; | 963 | |
960 | struct file_operations filter; | 964 | return ret; |
961 | }; | 965 | } |
966 | |||
967 | /* Add an additional event_call dynamically */ | ||
968 | int trace_add_event_call(struct ftrace_event_call *call) | ||
969 | { | ||
970 | int ret; | ||
971 | mutex_lock(&event_mutex); | ||
972 | ret = __trace_add_event_call(call); | ||
973 | mutex_unlock(&event_mutex); | ||
974 | return ret; | ||
975 | } | ||
962 | 976 | ||
963 | static void remove_subsystem_dir(const char *name) | 977 | static void remove_subsystem_dir(const char *name) |
964 | { | 978 | { |
@@ -986,6 +1000,53 @@ static void remove_subsystem_dir(const char *name) | |||
986 | } | 1000 | } |
987 | } | 1001 | } |
988 | 1002 | ||
1003 | /* | ||
1004 | * Must be called under locking both of event_mutex and trace_event_mutex. | ||
1005 | */ | ||
1006 | static void __trace_remove_event_call(struct ftrace_event_call *call) | ||
1007 | { | ||
1008 | ftrace_event_enable_disable(call, 0); | ||
1009 | if (call->event) | ||
1010 | __unregister_ftrace_event(call->event); | ||
1011 | debugfs_remove_recursive(call->dir); | ||
1012 | list_del(&call->list); | ||
1013 | trace_destroy_fields(call); | ||
1014 | destroy_preds(call); | ||
1015 | remove_subsystem_dir(call->system); | ||
1016 | } | ||
1017 | |||
1018 | /* Remove an event_call */ | ||
1019 | void trace_remove_event_call(struct ftrace_event_call *call) | ||
1020 | { | ||
1021 | mutex_lock(&event_mutex); | ||
1022 | down_write(&trace_event_mutex); | ||
1023 | __trace_remove_event_call(call); | ||
1024 | up_write(&trace_event_mutex); | ||
1025 | mutex_unlock(&event_mutex); | ||
1026 | } | ||
1027 | |||
1028 | #define for_each_event(event, start, end) \ | ||
1029 | for (event = start; \ | ||
1030 | (unsigned long)event < (unsigned long)end; \ | ||
1031 | event++) | ||
1032 | |||
1033 | #ifdef CONFIG_MODULES | ||
1034 | |||
1035 | static LIST_HEAD(ftrace_module_file_list); | ||
1036 | |||
1037 | /* | ||
1038 | * Modules must own their file_operations to keep up with | ||
1039 | * reference counting. | ||
1040 | */ | ||
1041 | struct ftrace_module_file_ops { | ||
1042 | struct list_head list; | ||
1043 | struct module *mod; | ||
1044 | struct file_operations id; | ||
1045 | struct file_operations enable; | ||
1046 | struct file_operations format; | ||
1047 | struct file_operations filter; | ||
1048 | }; | ||
1049 | |||
989 | static struct ftrace_module_file_ops * | 1050 | static struct ftrace_module_file_ops * |
990 | trace_create_file_ops(struct module *mod) | 1051 | trace_create_file_ops(struct module *mod) |
991 | { | 1052 | { |
@@ -1043,7 +1104,7 @@ static void trace_module_add_events(struct module *mod) | |||
1043 | if (!call->name) | 1104 | if (!call->name) |
1044 | continue; | 1105 | continue; |
1045 | if (call->raw_init) { | 1106 | if (call->raw_init) { |
1046 | ret = call->raw_init(); | 1107 | ret = call->raw_init(call); |
1047 | if (ret < 0) { | 1108 | if (ret < 0) { |
1048 | if (ret != -ENOSYS) | 1109 | if (ret != -ENOSYS) |
1049 | pr_warning("Could not initialize trace " | 1110 | pr_warning("Could not initialize trace " |
@@ -1061,10 +1122,11 @@ static void trace_module_add_events(struct module *mod) | |||
1061 | return; | 1122 | return; |
1062 | } | 1123 | } |
1063 | call->mod = mod; | 1124 | call->mod = mod; |
1064 | list_add(&call->list, &ftrace_events); | 1125 | ret = event_create_dir(call, d_events, |
1065 | event_create_dir(call, d_events, | 1126 | &file_ops->id, &file_ops->enable, |
1066 | &file_ops->id, &file_ops->enable, | 1127 | &file_ops->filter, &file_ops->format); |
1067 | &file_ops->filter, &file_ops->format); | 1128 | if (!ret) |
1129 | list_add(&call->list, &ftrace_events); | ||
1068 | } | 1130 | } |
1069 | } | 1131 | } |
1070 | 1132 | ||
@@ -1078,14 +1140,7 @@ static void trace_module_remove_events(struct module *mod) | |||
1078 | list_for_each_entry_safe(call, p, &ftrace_events, list) { | 1140 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
1079 | if (call->mod == mod) { | 1141 | if (call->mod == mod) { |
1080 | found = true; | 1142 | found = true; |
1081 | ftrace_event_enable_disable(call, 0); | 1143 | __trace_remove_event_call(call); |
1082 | if (call->event) | ||
1083 | __unregister_ftrace_event(call->event); | ||
1084 | debugfs_remove_recursive(call->dir); | ||
1085 | list_del(&call->list); | ||
1086 | trace_destroy_fields(call); | ||
1087 | destroy_preds(call); | ||
1088 | remove_subsystem_dir(call->system); | ||
1089 | } | 1144 | } |
1090 | } | 1145 | } |
1091 | 1146 | ||
@@ -1203,7 +1258,7 @@ static __init int event_trace_init(void) | |||
1203 | if (!call->name) | 1258 | if (!call->name) |
1204 | continue; | 1259 | continue; |
1205 | if (call->raw_init) { | 1260 | if (call->raw_init) { |
1206 | ret = call->raw_init(); | 1261 | ret = call->raw_init(call); |
1207 | if (ret < 0) { | 1262 | if (ret < 0) { |
1208 | if (ret != -ENOSYS) | 1263 | if (ret != -ENOSYS) |
1209 | pr_warning("Could not initialize trace " | 1264 | pr_warning("Could not initialize trace " |
@@ -1211,10 +1266,12 @@ static __init int event_trace_init(void) | |||
1211 | continue; | 1266 | continue; |
1212 | } | 1267 | } |
1213 | } | 1268 | } |
1214 | list_add(&call->list, &ftrace_events); | 1269 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, |
1215 | event_create_dir(call, d_events, &ftrace_event_id_fops, | 1270 | &ftrace_enable_fops, |
1216 | &ftrace_enable_fops, &ftrace_event_filter_fops, | 1271 | &ftrace_event_filter_fops, |
1217 | &ftrace_event_format_fops); | 1272 | &ftrace_event_format_fops); |
1273 | if (!ret) | ||
1274 | list_add(&call->list, &ftrace_events); | ||
1218 | } | 1275 | } |
1219 | 1276 | ||
1220 | while (true) { | 1277 | while (true) { |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 98a6cc5c64ed..50504cb228de 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -18,11 +18,10 @@ | |||
18 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | 18 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/module.h> | 21 | #include <linux/module.h> |
24 | #include <linux/ctype.h> | 22 | #include <linux/ctype.h> |
25 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/perf_event.h> | ||
26 | 25 | ||
27 | #include "trace.h" | 26 | #include "trace.h" |
28 | #include "trace_output.h" | 27 | #include "trace_output.h" |
@@ -31,6 +30,7 @@ enum filter_op_ids | |||
31 | { | 30 | { |
32 | OP_OR, | 31 | OP_OR, |
33 | OP_AND, | 32 | OP_AND, |
33 | OP_GLOB, | ||
34 | OP_NE, | 34 | OP_NE, |
35 | OP_EQ, | 35 | OP_EQ, |
36 | OP_LT, | 36 | OP_LT, |
@@ -48,16 +48,17 @@ struct filter_op { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct filter_op filter_ops[] = { | 50 | static struct filter_op filter_ops[] = { |
51 | { OP_OR, "||", 1 }, | 51 | { OP_OR, "||", 1 }, |
52 | { OP_AND, "&&", 2 }, | 52 | { OP_AND, "&&", 2 }, |
53 | { OP_NE, "!=", 4 }, | 53 | { OP_GLOB, "~", 4 }, |
54 | { OP_EQ, "==", 4 }, | 54 | { OP_NE, "!=", 4 }, |
55 | { OP_LT, "<", 5 }, | 55 | { OP_EQ, "==", 4 }, |
56 | { OP_LE, "<=", 5 }, | 56 | { OP_LT, "<", 5 }, |
57 | { OP_GT, ">", 5 }, | 57 | { OP_LE, "<=", 5 }, |
58 | { OP_GE, ">=", 5 }, | 58 | { OP_GT, ">", 5 }, |
59 | { OP_NONE, "OP_NONE", 0 }, | 59 | { OP_GE, ">=", 5 }, |
60 | { OP_OPEN_PAREN, "(", 0 }, | 60 | { OP_NONE, "OP_NONE", 0 }, |
61 | { OP_OPEN_PAREN, "(", 0 }, | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | enum { | 64 | enum { |
@@ -197,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event, | |||
197 | char *addr = (char *)(event + pred->offset); | 198 | char *addr = (char *)(event + pred->offset); |
198 | int cmp, match; | 199 | int cmp, match; |
199 | 200 | ||
200 | cmp = strncmp(addr, pred->str_val, pred->str_len); | 201 | cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); |
201 | 202 | ||
202 | match = (!cmp) ^ pred->not; | 203 | match = cmp ^ pred->not; |
203 | 204 | ||
204 | return match; | 205 | return match; |
205 | } | 206 | } |
@@ -211,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, | |||
211 | char **addr = (char **)(event + pred->offset); | 212 | char **addr = (char **)(event + pred->offset); |
212 | int cmp, match; | 213 | int cmp, match; |
213 | 214 | ||
214 | cmp = strncmp(*addr, pred->str_val, pred->str_len); | 215 | cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); |
215 | 216 | ||
216 | match = (!cmp) ^ pred->not; | 217 | match = cmp ^ pred->not; |
217 | 218 | ||
218 | return match; | 219 | return match; |
219 | } | 220 | } |
@@ -237,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event, | |||
237 | char *addr = (char *)(event + str_loc); | 238 | char *addr = (char *)(event + str_loc); |
238 | int cmp, match; | 239 | int cmp, match; |
239 | 240 | ||
240 | cmp = strncmp(addr, pred->str_val, str_len); | 241 | cmp = pred->regex.match(addr, &pred->regex, str_len); |
241 | 242 | ||
242 | match = (!cmp) ^ pred->not; | 243 | match = cmp ^ pred->not; |
243 | 244 | ||
244 | return match; | 245 | return match; |
245 | } | 246 | } |
@@ -250,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event, | |||
250 | return 0; | 251 | return 0; |
251 | } | 252 | } |
252 | 253 | ||
254 | /* Basic regex callbacks */ | ||
255 | static int regex_match_full(char *str, struct regex *r, int len) | ||
256 | { | ||
257 | if (strncmp(str, r->pattern, len) == 0) | ||
258 | return 1; | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static int regex_match_front(char *str, struct regex *r, int len) | ||
263 | { | ||
264 | if (strncmp(str, r->pattern, len) == 0) | ||
265 | return 1; | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int regex_match_middle(char *str, struct regex *r, int len) | ||
270 | { | ||
271 | if (strstr(str, r->pattern)) | ||
272 | return 1; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int regex_match_end(char *str, struct regex *r, int len) | ||
277 | { | ||
278 | char *ptr = strstr(str, r->pattern); | ||
279 | |||
280 | if (ptr && (ptr[r->len] == 0)) | ||
281 | return 1; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * filter_parse_regex - parse a basic regex | ||
287 | * @buff: the raw regex | ||
288 | * @len: length of the regex | ||
289 | * @search: will point to the beginning of the string to compare | ||
290 | * @not: tell whether the match will have to be inverted | ||
291 | * | ||
292 | * This passes in a buffer containing a regex and this function will | ||
293 | * set search to point to the search part of the buffer and | ||
294 | * return the type of search it is (see enum above). | ||
295 | * This does modify buff. | ||
296 | * | ||
297 | * Returns enum type. | ||
298 | * search returns the pointer to use for comparison. | ||
299 | * not returns 1 if buff started with a '!' | ||
300 | * 0 otherwise. | ||
301 | */ | ||
302 | enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) | ||
303 | { | ||
304 | int type = MATCH_FULL; | ||
305 | int i; | ||
306 | |||
307 | if (buff[0] == '!') { | ||
308 | *not = 1; | ||
309 | buff++; | ||
310 | len--; | ||
311 | } else | ||
312 | *not = 0; | ||
313 | |||
314 | *search = buff; | ||
315 | |||
316 | for (i = 0; i < len; i++) { | ||
317 | if (buff[i] == '*') { | ||
318 | if (!i) { | ||
319 | *search = buff + 1; | ||
320 | type = MATCH_END_ONLY; | ||
321 | } else { | ||
322 | if (type == MATCH_END_ONLY) | ||
323 | type = MATCH_MIDDLE_ONLY; | ||
324 | else | ||
325 | type = MATCH_FRONT_ONLY; | ||
326 | buff[i] = 0; | ||
327 | break; | ||
328 | } | ||
329 | } | ||
330 | } | ||
331 | |||
332 | return type; | ||
333 | } | ||
334 | |||
335 | static void filter_build_regex(struct filter_pred *pred) | ||
336 | { | ||
337 | struct regex *r = &pred->regex; | ||
338 | char *search; | ||
339 | enum regex_type type = MATCH_FULL; | ||
340 | int not = 0; | ||
341 | |||
342 | if (pred->op == OP_GLOB) { | ||
343 | type = filter_parse_regex(r->pattern, r->len, &search, ¬); | ||
344 | r->len = strlen(search); | ||
345 | memmove(r->pattern, search, r->len+1); | ||
346 | } | ||
347 | |||
348 | switch (type) { | ||
349 | case MATCH_FULL: | ||
350 | r->match = regex_match_full; | ||
351 | break; | ||
352 | case MATCH_FRONT_ONLY: | ||
353 | r->match = regex_match_front; | ||
354 | break; | ||
355 | case MATCH_MIDDLE_ONLY: | ||
356 | r->match = regex_match_middle; | ||
357 | break; | ||
358 | case MATCH_END_ONLY: | ||
359 | r->match = regex_match_end; | ||
360 | break; | ||
361 | } | ||
362 | |||
363 | pred->not ^= not; | ||
364 | } | ||
365 | |||
253 | /* return 1 if event matches, 0 otherwise (discard) */ | 366 | /* return 1 if event matches, 0 otherwise (discard) */ |
254 | int filter_match_preds(struct ftrace_event_call *call, void *rec) | 367 | int filter_match_preds(struct event_filter *filter, void *rec) |
255 | { | 368 | { |
256 | struct event_filter *filter = call->filter; | ||
257 | int match, top = 0, val1 = 0, val2 = 0; | 369 | int match, top = 0, val1 = 0, val2 = 0; |
258 | int stack[MAX_FILTER_PRED]; | 370 | int stack[MAX_FILTER_PRED]; |
259 | struct filter_pred *pred; | 371 | struct filter_pred *pred; |
@@ -396,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred) | |||
396 | { | 508 | { |
397 | kfree(pred->field_name); | 509 | kfree(pred->field_name); |
398 | pred->field_name = NULL; | 510 | pred->field_name = NULL; |
399 | pred->str_len = 0; | 511 | pred->regex.len = 0; |
400 | } | 512 | } |
401 | 513 | ||
402 | static int filter_set_pred(struct filter_pred *dest, | 514 | static int filter_set_pred(struct filter_pred *dest, |
@@ -426,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call) | |||
426 | filter->preds[i]->fn = filter_pred_none; | 538 | filter->preds[i]->fn = filter_pred_none; |
427 | } | 539 | } |
428 | 540 | ||
429 | void destroy_preds(struct ftrace_event_call *call) | 541 | static void __free_preds(struct event_filter *filter) |
430 | { | 542 | { |
431 | struct event_filter *filter = call->filter; | ||
432 | int i; | 543 | int i; |
433 | 544 | ||
434 | if (!filter) | 545 | if (!filter) |
@@ -441,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call) | |||
441 | kfree(filter->preds); | 552 | kfree(filter->preds); |
442 | kfree(filter->filter_string); | 553 | kfree(filter->filter_string); |
443 | kfree(filter); | 554 | kfree(filter); |
555 | } | ||
556 | |||
557 | void destroy_preds(struct ftrace_event_call *call) | ||
558 | { | ||
559 | __free_preds(call->filter); | ||
444 | call->filter = NULL; | 560 | call->filter = NULL; |
561 | call->filter_active = 0; | ||
445 | } | 562 | } |
446 | 563 | ||
447 | static int init_preds(struct ftrace_event_call *call) | 564 | static struct event_filter *__alloc_preds(void) |
448 | { | 565 | { |
449 | struct event_filter *filter; | 566 | struct event_filter *filter; |
450 | struct filter_pred *pred; | 567 | struct filter_pred *pred; |
451 | int i; | 568 | int i; |
452 | 569 | ||
453 | if (call->filter) | 570 | filter = kzalloc(sizeof(*filter), GFP_KERNEL); |
454 | return 0; | 571 | if (!filter) |
455 | 572 | return ERR_PTR(-ENOMEM); | |
456 | filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); | ||
457 | if (!call->filter) | ||
458 | return -ENOMEM; | ||
459 | 573 | ||
460 | filter->n_preds = 0; | 574 | filter->n_preds = 0; |
461 | 575 | ||
@@ -471,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call) | |||
471 | filter->preds[i] = pred; | 585 | filter->preds[i] = pred; |
472 | } | 586 | } |
473 | 587 | ||
474 | return 0; | 588 | return filter; |
475 | 589 | ||
476 | oom: | 590 | oom: |
477 | destroy_preds(call); | 591 | __free_preds(filter); |
592 | return ERR_PTR(-ENOMEM); | ||
593 | } | ||
478 | 594 | ||
479 | return -ENOMEM; | 595 | static int init_preds(struct ftrace_event_call *call) |
596 | { | ||
597 | if (call->filter) | ||
598 | return 0; | ||
599 | |||
600 | call->filter_active = 0; | ||
601 | call->filter = __alloc_preds(); | ||
602 | if (IS_ERR(call->filter)) | ||
603 | return PTR_ERR(call->filter); | ||
604 | |||
605 | return 0; | ||
480 | } | 606 | } |
481 | 607 | ||
482 | static int init_subsystem_preds(struct event_subsystem *system) | 608 | static int init_subsystem_preds(struct event_subsystem *system) |
@@ -499,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system) | |||
499 | return 0; | 625 | return 0; |
500 | } | 626 | } |
501 | 627 | ||
502 | enum { | 628 | static void filter_free_subsystem_preds(struct event_subsystem *system) |
503 | FILTER_DISABLE_ALL, | ||
504 | FILTER_INIT_NO_RESET, | ||
505 | FILTER_SKIP_NO_RESET, | ||
506 | }; | ||
507 | |||
508 | static void filter_free_subsystem_preds(struct event_subsystem *system, | ||
509 | int flag) | ||
510 | { | 629 | { |
511 | struct ftrace_event_call *call; | 630 | struct ftrace_event_call *call; |
512 | 631 | ||
@@ -517,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
517 | if (strcmp(call->system, system->name) != 0) | 636 | if (strcmp(call->system, system->name) != 0) |
518 | continue; | 637 | continue; |
519 | 638 | ||
520 | if (flag == FILTER_INIT_NO_RESET) { | ||
521 | call->filter->no_reset = false; | ||
522 | continue; | ||
523 | } | ||
524 | |||
525 | if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) | ||
526 | continue; | ||
527 | |||
528 | filter_disable_preds(call); | 639 | filter_disable_preds(call); |
529 | remove_filter_string(call->filter); | 640 | remove_filter_string(call->filter); |
530 | } | 641 | } |
@@ -532,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
532 | 643 | ||
533 | static int filter_add_pred_fn(struct filter_parse_state *ps, | 644 | static int filter_add_pred_fn(struct filter_parse_state *ps, |
534 | struct ftrace_event_call *call, | 645 | struct ftrace_event_call *call, |
646 | struct event_filter *filter, | ||
535 | struct filter_pred *pred, | 647 | struct filter_pred *pred, |
536 | filter_pred_fn_t fn) | 648 | filter_pred_fn_t fn) |
537 | { | 649 | { |
538 | struct event_filter *filter = call->filter; | ||
539 | int idx, err; | 650 | int idx, err; |
540 | 651 | ||
541 | if (filter->n_preds == MAX_FILTER_PRED) { | 652 | if (filter->n_preds == MAX_FILTER_PRED) { |
@@ -550,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps, | |||
550 | return err; | 661 | return err; |
551 | 662 | ||
552 | filter->n_preds++; | 663 | filter->n_preds++; |
553 | call->filter_active = 1; | ||
554 | 664 | ||
555 | return 0; | 665 | return 0; |
556 | } | 666 | } |
@@ -575,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field) | |||
575 | 685 | ||
576 | static int is_legal_op(struct ftrace_event_field *field, int op) | 686 | static int is_legal_op(struct ftrace_event_field *field, int op) |
577 | { | 687 | { |
578 | if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) | 688 | if (is_string_field(field) && |
689 | (op != OP_EQ && op != OP_NE && op != OP_GLOB)) | ||
690 | return 0; | ||
691 | if (!is_string_field(field) && op == OP_GLOB) | ||
579 | return 0; | 692 | return 0; |
580 | 693 | ||
581 | return 1; | 694 | return 1; |
@@ -626,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size, | |||
626 | 739 | ||
627 | static int filter_add_pred(struct filter_parse_state *ps, | 740 | static int filter_add_pred(struct filter_parse_state *ps, |
628 | struct ftrace_event_call *call, | 741 | struct ftrace_event_call *call, |
742 | struct event_filter *filter, | ||
629 | struct filter_pred *pred, | 743 | struct filter_pred *pred, |
630 | bool dry_run) | 744 | bool dry_run) |
631 | { | 745 | { |
@@ -660,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
660 | } | 774 | } |
661 | 775 | ||
662 | if (is_string_field(field)) { | 776 | if (is_string_field(field)) { |
663 | pred->str_len = field->size; | 777 | filter_build_regex(pred); |
664 | 778 | ||
665 | if (field->filter_type == FILTER_STATIC_STRING) | 779 | if (field->filter_type == FILTER_STATIC_STRING) { |
666 | fn = filter_pred_string; | 780 | fn = filter_pred_string; |
667 | else if (field->filter_type == FILTER_DYN_STRING) | 781 | pred->regex.field_len = field->size; |
782 | } else if (field->filter_type == FILTER_DYN_STRING) | ||
668 | fn = filter_pred_strloc; | 783 | fn = filter_pred_strloc; |
669 | else { | 784 | else { |
670 | fn = filter_pred_pchar; | 785 | fn = filter_pred_pchar; |
671 | pred->str_len = strlen(pred->str_val); | 786 | pred->regex.field_len = strlen(pred->regex.pattern); |
672 | } | 787 | } |
673 | } else { | 788 | } else { |
674 | if (field->is_signed) | 789 | if (field->is_signed) |
675 | ret = strict_strtoll(pred->str_val, 0, &val); | 790 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
676 | else | 791 | else |
677 | ret = strict_strtoull(pred->str_val, 0, &val); | 792 | ret = strict_strtoull(pred->regex.pattern, 0, &val); |
678 | if (ret) { | 793 | if (ret) { |
679 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); | 794 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); |
680 | return -EINVAL; | 795 | return -EINVAL; |
@@ -694,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
694 | 809 | ||
695 | add_pred_fn: | 810 | add_pred_fn: |
696 | if (!dry_run) | 811 | if (!dry_run) |
697 | return filter_add_pred_fn(ps, call, pred, fn); | 812 | return filter_add_pred_fn(ps, call, filter, pred, fn); |
698 | return 0; | ||
699 | } | ||
700 | |||
701 | static int filter_add_subsystem_pred(struct filter_parse_state *ps, | ||
702 | struct event_subsystem *system, | ||
703 | struct filter_pred *pred, | ||
704 | char *filter_string, | ||
705 | bool dry_run) | ||
706 | { | ||
707 | struct ftrace_event_call *call; | ||
708 | int err = 0; | ||
709 | bool fail = true; | ||
710 | |||
711 | list_for_each_entry(call, &ftrace_events, list) { | ||
712 | |||
713 | if (!call->define_fields) | ||
714 | continue; | ||
715 | |||
716 | if (strcmp(call->system, system->name)) | ||
717 | continue; | ||
718 | |||
719 | if (call->filter->no_reset) | ||
720 | continue; | ||
721 | |||
722 | err = filter_add_pred(ps, call, pred, dry_run); | ||
723 | if (err) | ||
724 | call->filter->no_reset = true; | ||
725 | else | ||
726 | fail = false; | ||
727 | |||
728 | if (!dry_run) | ||
729 | replace_filter_string(call->filter, filter_string); | ||
730 | } | ||
731 | |||
732 | if (fail) { | ||
733 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | ||
734 | return err; | ||
735 | } | ||
736 | return 0; | 813 | return 0; |
737 | } | 814 | } |
738 | 815 | ||
@@ -1045,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2) | |||
1045 | return NULL; | 1122 | return NULL; |
1046 | } | 1123 | } |
1047 | 1124 | ||
1048 | strcpy(pred->str_val, operand2); | 1125 | strcpy(pred->regex.pattern, operand2); |
1049 | pred->str_len = strlen(operand2); | 1126 | pred->regex.len = strlen(pred->regex.pattern); |
1050 | 1127 | ||
1051 | pred->op = op; | 1128 | pred->op = op; |
1052 | 1129 | ||
@@ -1090,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps) | |||
1090 | return 0; | 1167 | return 0; |
1091 | } | 1168 | } |
1092 | 1169 | ||
1093 | static int replace_preds(struct event_subsystem *system, | 1170 | static int replace_preds(struct ftrace_event_call *call, |
1094 | struct ftrace_event_call *call, | 1171 | struct event_filter *filter, |
1095 | struct filter_parse_state *ps, | 1172 | struct filter_parse_state *ps, |
1096 | char *filter_string, | 1173 | char *filter_string, |
1097 | bool dry_run) | 1174 | bool dry_run) |
@@ -1138,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system, | |||
1138 | add_pred: | 1215 | add_pred: |
1139 | if (!pred) | 1216 | if (!pred) |
1140 | return -ENOMEM; | 1217 | return -ENOMEM; |
1141 | if (call) | 1218 | err = filter_add_pred(ps, call, filter, pred, dry_run); |
1142 | err = filter_add_pred(ps, call, pred, false); | ||
1143 | else | ||
1144 | err = filter_add_subsystem_pred(ps, system, pred, | ||
1145 | filter_string, dry_run); | ||
1146 | filter_free_pred(pred); | 1219 | filter_free_pred(pred); |
1147 | if (err) | 1220 | if (err) |
1148 | return err; | 1221 | return err; |
@@ -1153,10 +1226,50 @@ add_pred: | |||
1153 | return 0; | 1226 | return 0; |
1154 | } | 1227 | } |
1155 | 1228 | ||
1156 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1229 | static int replace_system_preds(struct event_subsystem *system, |
1230 | struct filter_parse_state *ps, | ||
1231 | char *filter_string) | ||
1157 | { | 1232 | { |
1233 | struct ftrace_event_call *call; | ||
1234 | bool fail = true; | ||
1158 | int err; | 1235 | int err; |
1159 | 1236 | ||
1237 | list_for_each_entry(call, &ftrace_events, list) { | ||
1238 | struct event_filter *filter = call->filter; | ||
1239 | |||
1240 | if (!call->define_fields) | ||
1241 | continue; | ||
1242 | |||
1243 | if (strcmp(call->system, system->name) != 0) | ||
1244 | continue; | ||
1245 | |||
1246 | /* try to see if the filter can be applied */ | ||
1247 | err = replace_preds(call, filter, ps, filter_string, true); | ||
1248 | if (err) | ||
1249 | continue; | ||
1250 | |||
1251 | /* really apply the filter */ | ||
1252 | filter_disable_preds(call); | ||
1253 | err = replace_preds(call, filter, ps, filter_string, false); | ||
1254 | if (err) | ||
1255 | filter_disable_preds(call); | ||
1256 | else { | ||
1257 | call->filter_active = 1; | ||
1258 | replace_filter_string(filter, filter_string); | ||
1259 | } | ||
1260 | fail = false; | ||
1261 | } | ||
1262 | |||
1263 | if (fail) { | ||
1264 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | ||
1265 | return -EINVAL; | ||
1266 | } | ||
1267 | return 0; | ||
1268 | } | ||
1269 | |||
1270 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | ||
1271 | { | ||
1272 | int err; | ||
1160 | struct filter_parse_state *ps; | 1273 | struct filter_parse_state *ps; |
1161 | 1274 | ||
1162 | mutex_lock(&event_mutex); | 1275 | mutex_lock(&event_mutex); |
@@ -1168,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1168 | if (!strcmp(strstrip(filter_string), "0")) { | 1281 | if (!strcmp(strstrip(filter_string), "0")) { |
1169 | filter_disable_preds(call); | 1282 | filter_disable_preds(call); |
1170 | remove_filter_string(call->filter); | 1283 | remove_filter_string(call->filter); |
1171 | mutex_unlock(&event_mutex); | 1284 | goto out_unlock; |
1172 | return 0; | ||
1173 | } | 1285 | } |
1174 | 1286 | ||
1175 | err = -ENOMEM; | 1287 | err = -ENOMEM; |
@@ -1187,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1187 | goto out; | 1299 | goto out; |
1188 | } | 1300 | } |
1189 | 1301 | ||
1190 | err = replace_preds(NULL, call, ps, filter_string, false); | 1302 | err = replace_preds(call, call->filter, ps, filter_string, false); |
1191 | if (err) | 1303 | if (err) |
1192 | append_filter_err(ps, call->filter); | 1304 | append_filter_err(ps, call->filter); |
1193 | 1305 | else | |
1306 | call->filter_active = 1; | ||
1194 | out: | 1307 | out: |
1195 | filter_opstack_clear(ps); | 1308 | filter_opstack_clear(ps); |
1196 | postfix_clear(ps); | 1309 | postfix_clear(ps); |
@@ -1205,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1205 | char *filter_string) | 1318 | char *filter_string) |
1206 | { | 1319 | { |
1207 | int err; | 1320 | int err; |
1208 | |||
1209 | struct filter_parse_state *ps; | 1321 | struct filter_parse_state *ps; |
1210 | 1322 | ||
1211 | mutex_lock(&event_mutex); | 1323 | mutex_lock(&event_mutex); |
@@ -1215,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1215 | goto out_unlock; | 1327 | goto out_unlock; |
1216 | 1328 | ||
1217 | if (!strcmp(strstrip(filter_string), "0")) { | 1329 | if (!strcmp(strstrip(filter_string), "0")) { |
1218 | filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); | 1330 | filter_free_subsystem_preds(system); |
1219 | remove_filter_string(system->filter); | 1331 | remove_filter_string(system->filter); |
1220 | mutex_unlock(&event_mutex); | 1332 | goto out_unlock; |
1221 | return 0; | ||
1222 | } | 1333 | } |
1223 | 1334 | ||
1224 | err = -ENOMEM; | 1335 | err = -ENOMEM; |
@@ -1235,31 +1346,87 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1235 | goto out; | 1346 | goto out; |
1236 | } | 1347 | } |
1237 | 1348 | ||
1238 | filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); | 1349 | err = replace_system_preds(system, ps, filter_string); |
1239 | 1350 | if (err) | |
1240 | /* try to see the filter can be applied to which events */ | ||
1241 | err = replace_preds(system, NULL, ps, filter_string, true); | ||
1242 | if (err) { | ||
1243 | append_filter_err(ps, system->filter); | 1351 | append_filter_err(ps, system->filter); |
1244 | goto out; | 1352 | |
1353 | out: | ||
1354 | filter_opstack_clear(ps); | ||
1355 | postfix_clear(ps); | ||
1356 | kfree(ps); | ||
1357 | out_unlock: | ||
1358 | mutex_unlock(&event_mutex); | ||
1359 | |||
1360 | return err; | ||
1361 | } | ||
1362 | |||
1363 | #ifdef CONFIG_EVENT_PROFILE | ||
1364 | |||
1365 | void ftrace_profile_free_filter(struct perf_event *event) | ||
1366 | { | ||
1367 | struct event_filter *filter = event->filter; | ||
1368 | |||
1369 | event->filter = NULL; | ||
1370 | __free_preds(filter); | ||
1371 | } | ||
1372 | |||
1373 | int ftrace_profile_set_filter(struct perf_event *event, int event_id, | ||
1374 | char *filter_str) | ||
1375 | { | ||
1376 | int err; | ||
1377 | struct event_filter *filter; | ||
1378 | struct filter_parse_state *ps; | ||
1379 | struct ftrace_event_call *call = NULL; | ||
1380 | |||
1381 | mutex_lock(&event_mutex); | ||
1382 | |||
1383 | list_for_each_entry(call, &ftrace_events, list) { | ||
1384 | if (call->id == event_id) | ||
1385 | break; | ||
1245 | } | 1386 | } |
1246 | 1387 | ||
1247 | filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); | 1388 | err = -EINVAL; |
1389 | if (!call) | ||
1390 | goto out_unlock; | ||
1248 | 1391 | ||
1249 | /* really apply the filter to the events */ | 1392 | err = -EEXIST; |
1250 | err = replace_preds(system, NULL, ps, filter_string, false); | 1393 | if (event->filter) |
1251 | if (err) { | 1394 | goto out_unlock; |
1252 | append_filter_err(ps, system->filter); | 1395 | |
1253 | filter_free_subsystem_preds(system, 2); | 1396 | filter = __alloc_preds(); |
1397 | if (IS_ERR(filter)) { | ||
1398 | err = PTR_ERR(filter); | ||
1399 | goto out_unlock; | ||
1254 | } | 1400 | } |
1255 | 1401 | ||
1256 | out: | 1402 | err = -ENOMEM; |
1403 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); | ||
1404 | if (!ps) | ||
1405 | goto free_preds; | ||
1406 | |||
1407 | parse_init(ps, filter_ops, filter_str); | ||
1408 | err = filter_parse(ps); | ||
1409 | if (err) | ||
1410 | goto free_ps; | ||
1411 | |||
1412 | err = replace_preds(call, filter, ps, filter_str, false); | ||
1413 | if (!err) | ||
1414 | event->filter = filter; | ||
1415 | |||
1416 | free_ps: | ||
1257 | filter_opstack_clear(ps); | 1417 | filter_opstack_clear(ps); |
1258 | postfix_clear(ps); | 1418 | postfix_clear(ps); |
1259 | kfree(ps); | 1419 | kfree(ps); |
1420 | |||
1421 | free_preds: | ||
1422 | if (err) | ||
1423 | __free_preds(filter); | ||
1424 | |||
1260 | out_unlock: | 1425 | out_unlock: |
1261 | mutex_unlock(&event_mutex); | 1426 | mutex_unlock(&event_mutex); |
1262 | 1427 | ||
1263 | return err; | 1428 | return err; |
1264 | } | 1429 | } |
1265 | 1430 | ||
1431 | #endif /* CONFIG_EVENT_PROFILE */ | ||
1432 | |||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 9753fcc61bc5..dff8c84ddf17 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -48,11 +48,11 @@ | |||
48 | struct ____ftrace_##name { \ | 48 | struct ____ftrace_##name { \ |
49 | tstruct \ | 49 | tstruct \ |
50 | }; \ | 50 | }; \ |
51 | static void __used ____ftrace_check_##name(void) \ | 51 | static void __always_unused ____ftrace_check_##name(void) \ |
52 | { \ | 52 | { \ |
53 | struct ____ftrace_##name *__entry = NULL; \ | 53 | struct ____ftrace_##name *__entry = NULL; \ |
54 | \ | 54 | \ |
55 | /* force cmpile-time check on F_printk() */ \ | 55 | /* force compile-time check on F_printk() */ \ |
56 | printk(print); \ | 56 | printk(print); \ |
57 | } | 57 | } |
58 | 58 | ||
@@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \ | |||
66 | #undef __field | 66 | #undef __field |
67 | #define __field(type, item) \ | 67 | #define __field(type, item) \ |
68 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 68 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
69 | "offset:%zu;\tsize:%zu;\n", \ | 69 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
70 | offsetof(typeof(field), item), \ | 70 | offsetof(typeof(field), item), \ |
71 | sizeof(field.item)); \ | 71 | sizeof(field.item), is_signed_type(type)); \ |
72 | if (!ret) \ | 72 | if (!ret) \ |
73 | return 0; | 73 | return 0; |
74 | 74 | ||
75 | #undef __field_desc | 75 | #undef __field_desc |
76 | #define __field_desc(type, container, item) \ | 76 | #define __field_desc(type, container, item) \ |
77 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 77 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
78 | "offset:%zu;\tsize:%zu;\n", \ | 78 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
79 | offsetof(typeof(field), container.item), \ | 79 | offsetof(typeof(field), container.item), \ |
80 | sizeof(field.container.item)); \ | 80 | sizeof(field.container.item), \ |
81 | is_signed_type(type)); \ | ||
81 | if (!ret) \ | 82 | if (!ret) \ |
82 | return 0; | 83 | return 0; |
83 | 84 | ||
84 | #undef __array | 85 | #undef __array |
85 | #define __array(type, item, len) \ | 86 | #define __array(type, item, len) \ |
86 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 87 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
87 | "offset:%zu;\tsize:%zu;\n", \ | 88 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
88 | offsetof(typeof(field), item), \ | 89 | offsetof(typeof(field), item), \ |
89 | sizeof(field.item)); \ | 90 | sizeof(field.item), is_signed_type(type)); \ |
90 | if (!ret) \ | 91 | if (!ret) \ |
91 | return 0; | 92 | return 0; |
92 | 93 | ||
93 | #undef __array_desc | 94 | #undef __array_desc |
94 | #define __array_desc(type, container, item, len) \ | 95 | #define __array_desc(type, container, item, len) \ |
95 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 96 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
96 | "offset:%zu;\tsize:%zu;\n", \ | 97 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
97 | offsetof(typeof(field), container.item), \ | 98 | offsetof(typeof(field), container.item), \ |
98 | sizeof(field.container.item)); \ | 99 | sizeof(field.container.item), \ |
100 | is_signed_type(type)); \ | ||
99 | if (!ret) \ | 101 | if (!ret) \ |
100 | return 0; | 102 | return 0; |
101 | 103 | ||
102 | #undef __dynamic_array | 104 | #undef __dynamic_array |
103 | #define __dynamic_array(type, item) \ | 105 | #define __dynamic_array(type, item) \ |
104 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 106 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
105 | "offset:%zu;\tsize:0;\n", \ | 107 | "offset:%zu;\tsize:0;\tsigned:%u;\n", \ |
106 | offsetof(typeof(field), item)); \ | 108 | offsetof(typeof(field), item), \ |
109 | is_signed_type(type)); \ | ||
107 | if (!ret) \ | 110 | if (!ret) \ |
108 | return 0; | 111 | return 0; |
109 | 112 | ||
@@ -131,7 +134,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
131 | 134 | ||
132 | #include "trace_entries.h" | 135 | #include "trace_entries.h" |
133 | 136 | ||
134 | |||
135 | #undef __field | 137 | #undef __field |
136 | #define __field(type, item) \ | 138 | #define __field(type, item) \ |
137 | ret = trace_define_field(event_call, #type, #item, \ | 139 | ret = trace_define_field(event_call, #type, #item, \ |
@@ -193,6 +195,11 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
193 | 195 | ||
194 | #include "trace_entries.h" | 196 | #include "trace_entries.h" |
195 | 197 | ||
198 | static int ftrace_raw_init_event(struct ftrace_event_call *call) | ||
199 | { | ||
200 | INIT_LIST_HEAD(&call->fields); | ||
201 | return 0; | ||
202 | } | ||
196 | 203 | ||
197 | #undef __field | 204 | #undef __field |
198 | #define __field(type, item) | 205 | #define __field(type, item) |
@@ -211,7 +218,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
211 | 218 | ||
212 | #undef FTRACE_ENTRY | 219 | #undef FTRACE_ENTRY |
213 | #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ | 220 | #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ |
214 | static int ftrace_raw_init_event_##call(void); \ | ||
215 | \ | 221 | \ |
216 | struct ftrace_event_call __used \ | 222 | struct ftrace_event_call __used \ |
217 | __attribute__((__aligned__(4))) \ | 223 | __attribute__((__aligned__(4))) \ |
@@ -219,14 +225,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
219 | .name = #call, \ | 225 | .name = #call, \ |
220 | .id = type, \ | 226 | .id = type, \ |
221 | .system = __stringify(TRACE_SYSTEM), \ | 227 | .system = __stringify(TRACE_SYSTEM), \ |
222 | .raw_init = ftrace_raw_init_event_##call, \ | 228 | .raw_init = ftrace_raw_init_event, \ |
223 | .show_format = ftrace_format_##call, \ | 229 | .show_format = ftrace_format_##call, \ |
224 | .define_fields = ftrace_define_fields_##call, \ | 230 | .define_fields = ftrace_define_fields_##call, \ |
225 | }; \ | 231 | }; \ |
226 | static int ftrace_raw_init_event_##call(void) \ | ||
227 | { \ | ||
228 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
229 | return 0; \ | ||
230 | } \ | ||
231 | 232 | ||
232 | #include "trace_entries.h" | 233 | #include "trace_entries.h" |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c new file mode 100644 index 000000000000..aff5f80b59b8 --- /dev/null +++ b/kernel/trace/trace_kprobe.c | |||
@@ -0,0 +1,1523 @@ | |||
1 | /* | ||
2 | * Kprobes-based tracing events | ||
3 | * | ||
4 | * Created by Masami Hiramatsu <mhiramat@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/kprobes.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/debugfs.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/ctype.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/perf_event.h> | ||
32 | |||
33 | #include "trace.h" | ||
34 | #include "trace_output.h" | ||
35 | |||
36 | #define MAX_TRACE_ARGS 128 | ||
37 | #define MAX_ARGSTR_LEN 63 | ||
38 | #define MAX_EVENT_NAME_LEN 64 | ||
39 | #define KPROBE_EVENT_SYSTEM "kprobes" | ||
40 | |||
41 | /* Reserved field names */ | ||
42 | #define FIELD_STRING_IP "__probe_ip" | ||
43 | #define FIELD_STRING_NARGS "__probe_nargs" | ||
44 | #define FIELD_STRING_RETIP "__probe_ret_ip" | ||
45 | #define FIELD_STRING_FUNC "__probe_func" | ||
46 | |||
47 | const char *reserved_field_names[] = { | ||
48 | "common_type", | ||
49 | "common_flags", | ||
50 | "common_preempt_count", | ||
51 | "common_pid", | ||
52 | "common_tgid", | ||
53 | "common_lock_depth", | ||
54 | FIELD_STRING_IP, | ||
55 | FIELD_STRING_NARGS, | ||
56 | FIELD_STRING_RETIP, | ||
57 | FIELD_STRING_FUNC, | ||
58 | }; | ||
59 | |||
60 | struct fetch_func { | ||
61 | unsigned long (*func)(struct pt_regs *, void *); | ||
62 | void *data; | ||
63 | }; | ||
64 | |||
65 | static __kprobes unsigned long call_fetch(struct fetch_func *f, | ||
66 | struct pt_regs *regs) | ||
67 | { | ||
68 | return f->func(regs, f->data); | ||
69 | } | ||
70 | |||
71 | /* fetch handlers */ | ||
72 | static __kprobes unsigned long fetch_register(struct pt_regs *regs, | ||
73 | void *offset) | ||
74 | { | ||
75 | return regs_get_register(regs, (unsigned int)((unsigned long)offset)); | ||
76 | } | ||
77 | |||
78 | static __kprobes unsigned long fetch_stack(struct pt_regs *regs, | ||
79 | void *num) | ||
80 | { | ||
81 | return regs_get_kernel_stack_nth(regs, | ||
82 | (unsigned int)((unsigned long)num)); | ||
83 | } | ||
84 | |||
85 | static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) | ||
86 | { | ||
87 | unsigned long retval; | ||
88 | |||
89 | if (probe_kernel_address(addr, retval)) | ||
90 | return 0; | ||
91 | return retval; | ||
92 | } | ||
93 | |||
94 | static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) | ||
95 | { | ||
96 | return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); | ||
97 | } | ||
98 | |||
99 | static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, | ||
100 | void *dummy) | ||
101 | { | ||
102 | return regs_return_value(regs); | ||
103 | } | ||
104 | |||
105 | static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, | ||
106 | void *dummy) | ||
107 | { | ||
108 | return kernel_stack_pointer(regs); | ||
109 | } | ||
110 | |||
111 | /* Memory fetching by symbol */ | ||
112 | struct symbol_cache { | ||
113 | char *symbol; | ||
114 | long offset; | ||
115 | unsigned long addr; | ||
116 | }; | ||
117 | |||
118 | static unsigned long update_symbol_cache(struct symbol_cache *sc) | ||
119 | { | ||
120 | sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); | ||
121 | if (sc->addr) | ||
122 | sc->addr += sc->offset; | ||
123 | return sc->addr; | ||
124 | } | ||
125 | |||
126 | static void free_symbol_cache(struct symbol_cache *sc) | ||
127 | { | ||
128 | kfree(sc->symbol); | ||
129 | kfree(sc); | ||
130 | } | ||
131 | |||
132 | static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) | ||
133 | { | ||
134 | struct symbol_cache *sc; | ||
135 | |||
136 | if (!sym || strlen(sym) == 0) | ||
137 | return NULL; | ||
138 | sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); | ||
139 | if (!sc) | ||
140 | return NULL; | ||
141 | |||
142 | sc->symbol = kstrdup(sym, GFP_KERNEL); | ||
143 | if (!sc->symbol) { | ||
144 | kfree(sc); | ||
145 | return NULL; | ||
146 | } | ||
147 | sc->offset = offset; | ||
148 | |||
149 | update_symbol_cache(sc); | ||
150 | return sc; | ||
151 | } | ||
152 | |||
153 | static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) | ||
154 | { | ||
155 | struct symbol_cache *sc = data; | ||
156 | |||
157 | if (sc->addr) | ||
158 | return fetch_memory(regs, (void *)sc->addr); | ||
159 | else | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | /* Special indirect memory access interface */ | ||
164 | struct indirect_fetch_data { | ||
165 | struct fetch_func orig; | ||
166 | long offset; | ||
167 | }; | ||
168 | |||
169 | static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) | ||
170 | { | ||
171 | struct indirect_fetch_data *ind = data; | ||
172 | unsigned long addr; | ||
173 | |||
174 | addr = call_fetch(&ind->orig, regs); | ||
175 | if (addr) { | ||
176 | addr += ind->offset; | ||
177 | return fetch_memory(regs, (void *)addr); | ||
178 | } else | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) | ||
183 | { | ||
184 | if (data->orig.func == fetch_indirect) | ||
185 | free_indirect_fetch_data(data->orig.data); | ||
186 | else if (data->orig.func == fetch_symbol) | ||
187 | free_symbol_cache(data->orig.data); | ||
188 | kfree(data); | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * Kprobe event core functions | ||
193 | */ | ||
194 | |||
195 | struct probe_arg { | ||
196 | struct fetch_func fetch; | ||
197 | const char *name; | ||
198 | }; | ||
199 | |||
200 | /* Flags for trace_probe */ | ||
201 | #define TP_FLAG_TRACE 1 | ||
202 | #define TP_FLAG_PROFILE 2 | ||
203 | |||
204 | struct trace_probe { | ||
205 | struct list_head list; | ||
206 | struct kretprobe rp; /* Use rp.kp for kprobe use */ | ||
207 | unsigned long nhit; | ||
208 | unsigned int flags; /* For TP_FLAG_* */ | ||
209 | const char *symbol; /* symbol name */ | ||
210 | struct ftrace_event_call call; | ||
211 | struct trace_event event; | ||
212 | unsigned int nr_args; | ||
213 | struct probe_arg args[]; | ||
214 | }; | ||
215 | |||
216 | #define SIZEOF_TRACE_PROBE(n) \ | ||
217 | (offsetof(struct trace_probe, args) + \ | ||
218 | (sizeof(struct probe_arg) * (n))) | ||
219 | |||
220 | static __kprobes int probe_is_return(struct trace_probe *tp) | ||
221 | { | ||
222 | return tp->rp.handler != NULL; | ||
223 | } | ||
224 | |||
225 | static __kprobes const char *probe_symbol(struct trace_probe *tp) | ||
226 | { | ||
227 | return tp->symbol ? tp->symbol : "unknown"; | ||
228 | } | ||
229 | |||
230 | static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) | ||
231 | { | ||
232 | int ret = -EINVAL; | ||
233 | |||
234 | if (ff->func == fetch_argument) | ||
235 | ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); | ||
236 | else if (ff->func == fetch_register) { | ||
237 | const char *name; | ||
238 | name = regs_query_register_name((unsigned int)((long)ff->data)); | ||
239 | ret = snprintf(buf, n, "%%%s", name); | ||
240 | } else if (ff->func == fetch_stack) | ||
241 | ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); | ||
242 | else if (ff->func == fetch_memory) | ||
243 | ret = snprintf(buf, n, "@0x%p", ff->data); | ||
244 | else if (ff->func == fetch_symbol) { | ||
245 | struct symbol_cache *sc = ff->data; | ||
246 | if (sc->offset) | ||
247 | ret = snprintf(buf, n, "@%s%+ld", sc->symbol, | ||
248 | sc->offset); | ||
249 | else | ||
250 | ret = snprintf(buf, n, "@%s", sc->symbol); | ||
251 | } else if (ff->func == fetch_retvalue) | ||
252 | ret = snprintf(buf, n, "$retval"); | ||
253 | else if (ff->func == fetch_stack_address) | ||
254 | ret = snprintf(buf, n, "$stack"); | ||
255 | else if (ff->func == fetch_indirect) { | ||
256 | struct indirect_fetch_data *id = ff->data; | ||
257 | size_t l = 0; | ||
258 | ret = snprintf(buf, n, "%+ld(", id->offset); | ||
259 | if (ret >= n) | ||
260 | goto end; | ||
261 | l += ret; | ||
262 | ret = probe_arg_string(buf + l, n - l, &id->orig); | ||
263 | if (ret < 0) | ||
264 | goto end; | ||
265 | l += ret; | ||
266 | ret = snprintf(buf + l, n - l, ")"); | ||
267 | ret += l; | ||
268 | } | ||
269 | end: | ||
270 | if (ret >= n) | ||
271 | return -ENOSPC; | ||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | static int register_probe_event(struct trace_probe *tp); | ||
276 | static void unregister_probe_event(struct trace_probe *tp); | ||
277 | |||
278 | static DEFINE_MUTEX(probe_lock); | ||
279 | static LIST_HEAD(probe_list); | ||
280 | |||
281 | static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | ||
282 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | ||
283 | struct pt_regs *regs); | ||
284 | |||
285 | /* | ||
286 | * Allocate new trace_probe and initialize it (including kprobes). | ||
287 | */ | ||
288 | static struct trace_probe *alloc_trace_probe(const char *group, | ||
289 | const char *event, | ||
290 | void *addr, | ||
291 | const char *symbol, | ||
292 | unsigned long offs, | ||
293 | int nargs, int is_return) | ||
294 | { | ||
295 | struct trace_probe *tp; | ||
296 | |||
297 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); | ||
298 | if (!tp) | ||
299 | return ERR_PTR(-ENOMEM); | ||
300 | |||
301 | if (symbol) { | ||
302 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | ||
303 | if (!tp->symbol) | ||
304 | goto error; | ||
305 | tp->rp.kp.symbol_name = tp->symbol; | ||
306 | tp->rp.kp.offset = offs; | ||
307 | } else | ||
308 | tp->rp.kp.addr = addr; | ||
309 | |||
310 | if (is_return) | ||
311 | tp->rp.handler = kretprobe_dispatcher; | ||
312 | else | ||
313 | tp->rp.kp.pre_handler = kprobe_dispatcher; | ||
314 | |||
315 | if (!event) | ||
316 | goto error; | ||
317 | tp->call.name = kstrdup(event, GFP_KERNEL); | ||
318 | if (!tp->call.name) | ||
319 | goto error; | ||
320 | |||
321 | if (!group) | ||
322 | goto error; | ||
323 | tp->call.system = kstrdup(group, GFP_KERNEL); | ||
324 | if (!tp->call.system) | ||
325 | goto error; | ||
326 | |||
327 | INIT_LIST_HEAD(&tp->list); | ||
328 | return tp; | ||
329 | error: | ||
330 | kfree(tp->call.name); | ||
331 | kfree(tp->symbol); | ||
332 | kfree(tp); | ||
333 | return ERR_PTR(-ENOMEM); | ||
334 | } | ||
335 | |||
336 | static void free_probe_arg(struct probe_arg *arg) | ||
337 | { | ||
338 | if (arg->fetch.func == fetch_symbol) | ||
339 | free_symbol_cache(arg->fetch.data); | ||
340 | else if (arg->fetch.func == fetch_indirect) | ||
341 | free_indirect_fetch_data(arg->fetch.data); | ||
342 | kfree(arg->name); | ||
343 | } | ||
344 | |||
345 | static void free_trace_probe(struct trace_probe *tp) | ||
346 | { | ||
347 | int i; | ||
348 | |||
349 | for (i = 0; i < tp->nr_args; i++) | ||
350 | free_probe_arg(&tp->args[i]); | ||
351 | |||
352 | kfree(tp->call.system); | ||
353 | kfree(tp->call.name); | ||
354 | kfree(tp->symbol); | ||
355 | kfree(tp); | ||
356 | } | ||
357 | |||
358 | static struct trace_probe *find_probe_event(const char *event, | ||
359 | const char *group) | ||
360 | { | ||
361 | struct trace_probe *tp; | ||
362 | |||
363 | list_for_each_entry(tp, &probe_list, list) | ||
364 | if (strcmp(tp->call.name, event) == 0 && | ||
365 | strcmp(tp->call.system, group) == 0) | ||
366 | return tp; | ||
367 | return NULL; | ||
368 | } | ||
369 | |||
370 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ | ||
371 | static void unregister_trace_probe(struct trace_probe *tp) | ||
372 | { | ||
373 | if (probe_is_return(tp)) | ||
374 | unregister_kretprobe(&tp->rp); | ||
375 | else | ||
376 | unregister_kprobe(&tp->rp.kp); | ||
377 | list_del(&tp->list); | ||
378 | unregister_probe_event(tp); | ||
379 | } | ||
380 | |||
381 | /* Register a trace_probe and probe_event */ | ||
382 | static int register_trace_probe(struct trace_probe *tp) | ||
383 | { | ||
384 | struct trace_probe *old_tp; | ||
385 | int ret; | ||
386 | |||
387 | mutex_lock(&probe_lock); | ||
388 | |||
389 | /* register as an event */ | ||
390 | old_tp = find_probe_event(tp->call.name, tp->call.system); | ||
391 | if (old_tp) { | ||
392 | /* delete old event */ | ||
393 | unregister_trace_probe(old_tp); | ||
394 | free_trace_probe(old_tp); | ||
395 | } | ||
396 | ret = register_probe_event(tp); | ||
397 | if (ret) { | ||
398 | pr_warning("Faild to register probe event(%d)\n", ret); | ||
399 | goto end; | ||
400 | } | ||
401 | |||
402 | tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; | ||
403 | if (probe_is_return(tp)) | ||
404 | ret = register_kretprobe(&tp->rp); | ||
405 | else | ||
406 | ret = register_kprobe(&tp->rp.kp); | ||
407 | |||
408 | if (ret) { | ||
409 | pr_warning("Could not insert probe(%d)\n", ret); | ||
410 | if (ret == -EILSEQ) { | ||
411 | pr_warning("Probing address(0x%p) is not an " | ||
412 | "instruction boundary.\n", | ||
413 | tp->rp.kp.addr); | ||
414 | ret = -EINVAL; | ||
415 | } | ||
416 | unregister_probe_event(tp); | ||
417 | } else | ||
418 | list_add_tail(&tp->list, &probe_list); | ||
419 | end: | ||
420 | mutex_unlock(&probe_lock); | ||
421 | return ret; | ||
422 | } | ||
423 | |||
424 | /* Split symbol and offset. */ | ||
425 | static int split_symbol_offset(char *symbol, unsigned long *offset) | ||
426 | { | ||
427 | char *tmp; | ||
428 | int ret; | ||
429 | |||
430 | if (!offset) | ||
431 | return -EINVAL; | ||
432 | |||
433 | tmp = strchr(symbol, '+'); | ||
434 | if (tmp) { | ||
435 | /* skip sign because strict_strtol doesn't accept '+' */ | ||
436 | ret = strict_strtoul(tmp + 1, 0, offset); | ||
437 | if (ret) | ||
438 | return ret; | ||
439 | *tmp = '\0'; | ||
440 | } else | ||
441 | *offset = 0; | ||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | #define PARAM_MAX_ARGS 16 | ||
446 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) | ||
447 | |||
448 | static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) | ||
449 | { | ||
450 | int ret = 0; | ||
451 | unsigned long param; | ||
452 | |||
453 | if (strcmp(arg, "retval") == 0) { | ||
454 | if (is_return) { | ||
455 | ff->func = fetch_retvalue; | ||
456 | ff->data = NULL; | ||
457 | } else | ||
458 | ret = -EINVAL; | ||
459 | } else if (strncmp(arg, "stack", 5) == 0) { | ||
460 | if (arg[5] == '\0') { | ||
461 | ff->func = fetch_stack_address; | ||
462 | ff->data = NULL; | ||
463 | } else if (isdigit(arg[5])) { | ||
464 | ret = strict_strtoul(arg + 5, 10, ¶m); | ||
465 | if (ret || param > PARAM_MAX_STACK) | ||
466 | ret = -EINVAL; | ||
467 | else { | ||
468 | ff->func = fetch_stack; | ||
469 | ff->data = (void *)param; | ||
470 | } | ||
471 | } else | ||
472 | ret = -EINVAL; | ||
473 | } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { | ||
474 | ret = strict_strtoul(arg + 3, 10, ¶m); | ||
475 | if (ret || param > PARAM_MAX_ARGS) | ||
476 | ret = -EINVAL; | ||
477 | else { | ||
478 | ff->func = fetch_argument; | ||
479 | ff->data = (void *)param; | ||
480 | } | ||
481 | } else | ||
482 | ret = -EINVAL; | ||
483 | return ret; | ||
484 | } | ||
485 | |||
486 | /* Recursive argument parser */ | ||
487 | static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | ||
488 | { | ||
489 | int ret = 0; | ||
490 | unsigned long param; | ||
491 | long offset; | ||
492 | char *tmp; | ||
493 | |||
494 | switch (arg[0]) { | ||
495 | case '$': | ||
496 | ret = parse_probe_vars(arg + 1, ff, is_return); | ||
497 | break; | ||
498 | case '%': /* named register */ | ||
499 | ret = regs_query_register_offset(arg + 1); | ||
500 | if (ret >= 0) { | ||
501 | ff->func = fetch_register; | ||
502 | ff->data = (void *)(unsigned long)ret; | ||
503 | ret = 0; | ||
504 | } | ||
505 | break; | ||
506 | case '@': /* memory or symbol */ | ||
507 | if (isdigit(arg[1])) { | ||
508 | ret = strict_strtoul(arg + 1, 0, ¶m); | ||
509 | if (ret) | ||
510 | break; | ||
511 | ff->func = fetch_memory; | ||
512 | ff->data = (void *)param; | ||
513 | } else { | ||
514 | ret = split_symbol_offset(arg + 1, &offset); | ||
515 | if (ret) | ||
516 | break; | ||
517 | ff->data = alloc_symbol_cache(arg + 1, offset); | ||
518 | if (ff->data) | ||
519 | ff->func = fetch_symbol; | ||
520 | else | ||
521 | ret = -EINVAL; | ||
522 | } | ||
523 | break; | ||
524 | case '+': /* indirect memory */ | ||
525 | case '-': | ||
526 | tmp = strchr(arg, '('); | ||
527 | if (!tmp) { | ||
528 | ret = -EINVAL; | ||
529 | break; | ||
530 | } | ||
531 | *tmp = '\0'; | ||
532 | ret = strict_strtol(arg + 1, 0, &offset); | ||
533 | if (ret) | ||
534 | break; | ||
535 | if (arg[0] == '-') | ||
536 | offset = -offset; | ||
537 | arg = tmp + 1; | ||
538 | tmp = strrchr(arg, ')'); | ||
539 | if (tmp) { | ||
540 | struct indirect_fetch_data *id; | ||
541 | *tmp = '\0'; | ||
542 | id = kzalloc(sizeof(struct indirect_fetch_data), | ||
543 | GFP_KERNEL); | ||
544 | if (!id) | ||
545 | return -ENOMEM; | ||
546 | id->offset = offset; | ||
547 | ret = __parse_probe_arg(arg, &id->orig, is_return); | ||
548 | if (ret) | ||
549 | kfree(id); | ||
550 | else { | ||
551 | ff->func = fetch_indirect; | ||
552 | ff->data = (void *)id; | ||
553 | } | ||
554 | } else | ||
555 | ret = -EINVAL; | ||
556 | break; | ||
557 | default: | ||
558 | /* TODO: support custom handler */ | ||
559 | ret = -EINVAL; | ||
560 | } | ||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | /* String length checking wrapper */ | ||
565 | static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) | ||
566 | { | ||
567 | if (strlen(arg) > MAX_ARGSTR_LEN) { | ||
568 | pr_info("Argument is too long.: %s\n", arg); | ||
569 | return -ENOSPC; | ||
570 | } | ||
571 | return __parse_probe_arg(arg, ff, is_return); | ||
572 | } | ||
573 | |||
574 | /* Return 1 if name is reserved or already used by another argument */ | ||
575 | static int conflict_field_name(const char *name, | ||
576 | struct probe_arg *args, int narg) | ||
577 | { | ||
578 | int i; | ||
579 | for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) | ||
580 | if (strcmp(reserved_field_names[i], name) == 0) | ||
581 | return 1; | ||
582 | for (i = 0; i < narg; i++) | ||
583 | if (strcmp(args[i].name, name) == 0) | ||
584 | return 1; | ||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | static int create_trace_probe(int argc, char **argv) | ||
589 | { | ||
590 | /* | ||
591 | * Argument syntax: | ||
592 | * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] | ||
593 | * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] | ||
594 | * Fetch args: | ||
595 | * $argN : fetch Nth of function argument. (N:0-) | ||
596 | * $retval : fetch return value | ||
597 | * $stack : fetch stack address | ||
598 | * $stackN : fetch Nth of stack (N:0-) | ||
599 | * @ADDR : fetch memory at ADDR (ADDR should be in kernel) | ||
600 | * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) | ||
601 | * %REG : fetch register REG | ||
602 | * Indirect memory fetch: | ||
603 | * +|-offs(ARG) : fetch memory at ARG +|- offs address. | ||
604 | * Alias name of args: | ||
605 | * NAME=FETCHARG : set NAME as alias of FETCHARG. | ||
606 | */ | ||
607 | struct trace_probe *tp; | ||
608 | int i, ret = 0; | ||
609 | int is_return = 0; | ||
610 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; | ||
611 | unsigned long offset = 0; | ||
612 | void *addr = NULL; | ||
613 | char buf[MAX_EVENT_NAME_LEN]; | ||
614 | |||
615 | if (argc < 2) { | ||
616 | pr_info("Probe point is not specified.\n"); | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | if (argv[0][0] == 'p') | ||
621 | is_return = 0; | ||
622 | else if (argv[0][0] == 'r') | ||
623 | is_return = 1; | ||
624 | else { | ||
625 | pr_info("Probe definition must be started with 'p' or 'r'.\n"); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | if (argv[0][1] == ':') { | ||
630 | event = &argv[0][2]; | ||
631 | if (strchr(event, '/')) { | ||
632 | group = event; | ||
633 | event = strchr(group, '/') + 1; | ||
634 | event[-1] = '\0'; | ||
635 | if (strlen(group) == 0) { | ||
636 | pr_info("Group name is not specifiled\n"); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | } | ||
640 | if (strlen(event) == 0) { | ||
641 | pr_info("Event name is not specifiled\n"); | ||
642 | return -EINVAL; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | if (isdigit(argv[1][0])) { | ||
647 | if (is_return) { | ||
648 | pr_info("Return probe point must be a symbol.\n"); | ||
649 | return -EINVAL; | ||
650 | } | ||
651 | /* an address specified */ | ||
652 | ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); | ||
653 | if (ret) { | ||
654 | pr_info("Failed to parse address.\n"); | ||
655 | return ret; | ||
656 | } | ||
657 | } else { | ||
658 | /* a symbol specified */ | ||
659 | symbol = argv[1]; | ||
660 | /* TODO: support .init module functions */ | ||
661 | ret = split_symbol_offset(symbol, &offset); | ||
662 | if (ret) { | ||
663 | pr_info("Failed to parse symbol.\n"); | ||
664 | return ret; | ||
665 | } | ||
666 | if (offset && is_return) { | ||
667 | pr_info("Return probe must be used without offset.\n"); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | argc -= 2; argv += 2; | ||
672 | |||
673 | /* setup a probe */ | ||
674 | if (!group) | ||
675 | group = KPROBE_EVENT_SYSTEM; | ||
676 | if (!event) { | ||
677 | /* Make a new event name */ | ||
678 | if (symbol) | ||
679 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", | ||
680 | is_return ? 'r' : 'p', symbol, offset); | ||
681 | else | ||
682 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", | ||
683 | is_return ? 'r' : 'p', addr); | ||
684 | event = buf; | ||
685 | } | ||
686 | tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, | ||
687 | is_return); | ||
688 | if (IS_ERR(tp)) { | ||
689 | pr_info("Failed to allocate trace_probe.(%d)\n", | ||
690 | (int)PTR_ERR(tp)); | ||
691 | return PTR_ERR(tp); | ||
692 | } | ||
693 | |||
694 | /* parse arguments */ | ||
695 | ret = 0; | ||
696 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | ||
697 | /* Parse argument name */ | ||
698 | arg = strchr(argv[i], '='); | ||
699 | if (arg) | ||
700 | *arg++ = '\0'; | ||
701 | else | ||
702 | arg = argv[i]; | ||
703 | |||
704 | if (conflict_field_name(argv[i], tp->args, i)) { | ||
705 | pr_info("Argument%d name '%s' conflicts with " | ||
706 | "another field.\n", i, argv[i]); | ||
707 | ret = -EINVAL; | ||
708 | goto error; | ||
709 | } | ||
710 | |||
711 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | ||
712 | if (!tp->args[i].name) { | ||
713 | pr_info("Failed to allocate argument%d name '%s'.\n", | ||
714 | i, argv[i]); | ||
715 | ret = -ENOMEM; | ||
716 | goto error; | ||
717 | } | ||
718 | |||
719 | /* Parse fetch argument */ | ||
720 | ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); | ||
721 | if (ret) { | ||
722 | pr_info("Parse error at argument%d. (%d)\n", i, ret); | ||
723 | kfree(tp->args[i].name); | ||
724 | goto error; | ||
725 | } | ||
726 | |||
727 | tp->nr_args++; | ||
728 | } | ||
729 | |||
730 | ret = register_trace_probe(tp); | ||
731 | if (ret) | ||
732 | goto error; | ||
733 | return 0; | ||
734 | |||
735 | error: | ||
736 | free_trace_probe(tp); | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static void cleanup_all_probes(void) | ||
741 | { | ||
742 | struct trace_probe *tp; | ||
743 | |||
744 | mutex_lock(&probe_lock); | ||
745 | /* TODO: Use batch unregistration */ | ||
746 | while (!list_empty(&probe_list)) { | ||
747 | tp = list_entry(probe_list.next, struct trace_probe, list); | ||
748 | unregister_trace_probe(tp); | ||
749 | free_trace_probe(tp); | ||
750 | } | ||
751 | mutex_unlock(&probe_lock); | ||
752 | } | ||
753 | |||
754 | |||
755 | /* Probes listing interfaces */ | ||
756 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) | ||
757 | { | ||
758 | mutex_lock(&probe_lock); | ||
759 | return seq_list_start(&probe_list, *pos); | ||
760 | } | ||
761 | |||
762 | static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
763 | { | ||
764 | return seq_list_next(v, &probe_list, pos); | ||
765 | } | ||
766 | |||
767 | static void probes_seq_stop(struct seq_file *m, void *v) | ||
768 | { | ||
769 | mutex_unlock(&probe_lock); | ||
770 | } | ||
771 | |||
772 | static int probes_seq_show(struct seq_file *m, void *v) | ||
773 | { | ||
774 | struct trace_probe *tp = v; | ||
775 | int i, ret; | ||
776 | char buf[MAX_ARGSTR_LEN + 1]; | ||
777 | |||
778 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); | ||
779 | seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); | ||
780 | |||
781 | if (!tp->symbol) | ||
782 | seq_printf(m, " 0x%p", tp->rp.kp.addr); | ||
783 | else if (tp->rp.kp.offset) | ||
784 | seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); | ||
785 | else | ||
786 | seq_printf(m, " %s", probe_symbol(tp)); | ||
787 | |||
788 | for (i = 0; i < tp->nr_args; i++) { | ||
789 | ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); | ||
790 | if (ret < 0) { | ||
791 | pr_warning("Argument%d decoding error(%d).\n", i, ret); | ||
792 | return ret; | ||
793 | } | ||
794 | seq_printf(m, " %s=%s", tp->args[i].name, buf); | ||
795 | } | ||
796 | seq_printf(m, "\n"); | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static const struct seq_operations probes_seq_op = { | ||
801 | .start = probes_seq_start, | ||
802 | .next = probes_seq_next, | ||
803 | .stop = probes_seq_stop, | ||
804 | .show = probes_seq_show | ||
805 | }; | ||
806 | |||
807 | static int probes_open(struct inode *inode, struct file *file) | ||
808 | { | ||
809 | if ((file->f_mode & FMODE_WRITE) && | ||
810 | (file->f_flags & O_TRUNC)) | ||
811 | cleanup_all_probes(); | ||
812 | |||
813 | return seq_open(file, &probes_seq_op); | ||
814 | } | ||
815 | |||
816 | static int command_trace_probe(const char *buf) | ||
817 | { | ||
818 | char **argv; | ||
819 | int argc = 0, ret = 0; | ||
820 | |||
821 | argv = argv_split(GFP_KERNEL, buf, &argc); | ||
822 | if (!argv) | ||
823 | return -ENOMEM; | ||
824 | |||
825 | if (argc) | ||
826 | ret = create_trace_probe(argc, argv); | ||
827 | |||
828 | argv_free(argv); | ||
829 | return ret; | ||
830 | } | ||
831 | |||
832 | #define WRITE_BUFSIZE 128 | ||
833 | |||
834 | static ssize_t probes_write(struct file *file, const char __user *buffer, | ||
835 | size_t count, loff_t *ppos) | ||
836 | { | ||
837 | char *kbuf, *tmp; | ||
838 | int ret; | ||
839 | size_t done; | ||
840 | size_t size; | ||
841 | |||
842 | kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); | ||
843 | if (!kbuf) | ||
844 | return -ENOMEM; | ||
845 | |||
846 | ret = done = 0; | ||
847 | while (done < count) { | ||
848 | size = count - done; | ||
849 | if (size >= WRITE_BUFSIZE) | ||
850 | size = WRITE_BUFSIZE - 1; | ||
851 | if (copy_from_user(kbuf, buffer + done, size)) { | ||
852 | ret = -EFAULT; | ||
853 | goto out; | ||
854 | } | ||
855 | kbuf[size] = '\0'; | ||
856 | tmp = strchr(kbuf, '\n'); | ||
857 | if (tmp) { | ||
858 | *tmp = '\0'; | ||
859 | size = tmp - kbuf + 1; | ||
860 | } else if (done + size < count) { | ||
861 | pr_warning("Line length is too long: " | ||
862 | "Should be less than %d.", WRITE_BUFSIZE); | ||
863 | ret = -EINVAL; | ||
864 | goto out; | ||
865 | } | ||
866 | done += size; | ||
867 | /* Remove comments */ | ||
868 | tmp = strchr(kbuf, '#'); | ||
869 | if (tmp) | ||
870 | *tmp = '\0'; | ||
871 | |||
872 | ret = command_trace_probe(kbuf); | ||
873 | if (ret) | ||
874 | goto out; | ||
875 | } | ||
876 | ret = done; | ||
877 | out: | ||
878 | kfree(kbuf); | ||
879 | return ret; | ||
880 | } | ||
881 | |||
882 | static const struct file_operations kprobe_events_ops = { | ||
883 | .owner = THIS_MODULE, | ||
884 | .open = probes_open, | ||
885 | .read = seq_read, | ||
886 | .llseek = seq_lseek, | ||
887 | .release = seq_release, | ||
888 | .write = probes_write, | ||
889 | }; | ||
890 | |||
891 | /* Probes profiling interfaces */ | ||
892 | static int probes_profile_seq_show(struct seq_file *m, void *v) | ||
893 | { | ||
894 | struct trace_probe *tp = v; | ||
895 | |||
896 | seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, | ||
897 | tp->rp.kp.nmissed); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | static const struct seq_operations profile_seq_op = { | ||
903 | .start = probes_seq_start, | ||
904 | .next = probes_seq_next, | ||
905 | .stop = probes_seq_stop, | ||
906 | .show = probes_profile_seq_show | ||
907 | }; | ||
908 | |||
909 | static int profile_open(struct inode *inode, struct file *file) | ||
910 | { | ||
911 | return seq_open(file, &profile_seq_op); | ||
912 | } | ||
913 | |||
914 | static const struct file_operations kprobe_profile_ops = { | ||
915 | .owner = THIS_MODULE, | ||
916 | .open = profile_open, | ||
917 | .read = seq_read, | ||
918 | .llseek = seq_lseek, | ||
919 | .release = seq_release, | ||
920 | }; | ||
921 | |||
922 | /* Kprobe handler */ | ||
923 | static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | ||
924 | { | ||
925 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | ||
926 | struct kprobe_trace_entry *entry; | ||
927 | struct ring_buffer_event *event; | ||
928 | struct ring_buffer *buffer; | ||
929 | int size, i, pc; | ||
930 | unsigned long irq_flags; | ||
931 | struct ftrace_event_call *call = &tp->call; | ||
932 | |||
933 | tp->nhit++; | ||
934 | |||
935 | local_save_flags(irq_flags); | ||
936 | pc = preempt_count(); | ||
937 | |||
938 | size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | ||
939 | |||
940 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | ||
941 | irq_flags, pc); | ||
942 | if (!event) | ||
943 | return 0; | ||
944 | |||
945 | entry = ring_buffer_event_data(event); | ||
946 | entry->nargs = tp->nr_args; | ||
947 | entry->ip = (unsigned long)kp->addr; | ||
948 | for (i = 0; i < tp->nr_args; i++) | ||
949 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
950 | |||
951 | if (!filter_current_check_discard(buffer, call, entry, event)) | ||
952 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | /* Kretprobe handler */ | ||
957 | static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | ||
958 | struct pt_regs *regs) | ||
959 | { | ||
960 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | ||
961 | struct kretprobe_trace_entry *entry; | ||
962 | struct ring_buffer_event *event; | ||
963 | struct ring_buffer *buffer; | ||
964 | int size, i, pc; | ||
965 | unsigned long irq_flags; | ||
966 | struct ftrace_event_call *call = &tp->call; | ||
967 | |||
968 | local_save_flags(irq_flags); | ||
969 | pc = preempt_count(); | ||
970 | |||
971 | size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | ||
972 | |||
973 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | ||
974 | irq_flags, pc); | ||
975 | if (!event) | ||
976 | return 0; | ||
977 | |||
978 | entry = ring_buffer_event_data(event); | ||
979 | entry->nargs = tp->nr_args; | ||
980 | entry->func = (unsigned long)tp->rp.kp.addr; | ||
981 | entry->ret_ip = (unsigned long)ri->ret_addr; | ||
982 | for (i = 0; i < tp->nr_args; i++) | ||
983 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
984 | |||
985 | if (!filter_current_check_discard(buffer, call, entry, event)) | ||
986 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
987 | |||
988 | return 0; | ||
989 | } | ||
990 | |||
991 | /* Event entry printers */ | ||
992 | enum print_line_t | ||
993 | print_kprobe_event(struct trace_iterator *iter, int flags) | ||
994 | { | ||
995 | struct kprobe_trace_entry *field; | ||
996 | struct trace_seq *s = &iter->seq; | ||
997 | struct trace_event *event; | ||
998 | struct trace_probe *tp; | ||
999 | int i; | ||
1000 | |||
1001 | field = (struct kprobe_trace_entry *)iter->ent; | ||
1002 | event = ftrace_find_event(field->ent.type); | ||
1003 | tp = container_of(event, struct trace_probe, event); | ||
1004 | |||
1005 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | ||
1006 | goto partial; | ||
1007 | |||
1008 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | ||
1009 | goto partial; | ||
1010 | |||
1011 | if (!trace_seq_puts(s, ")")) | ||
1012 | goto partial; | ||
1013 | |||
1014 | for (i = 0; i < field->nargs; i++) | ||
1015 | if (!trace_seq_printf(s, " %s=%lx", | ||
1016 | tp->args[i].name, field->args[i])) | ||
1017 | goto partial; | ||
1018 | |||
1019 | if (!trace_seq_puts(s, "\n")) | ||
1020 | goto partial; | ||
1021 | |||
1022 | return TRACE_TYPE_HANDLED; | ||
1023 | partial: | ||
1024 | return TRACE_TYPE_PARTIAL_LINE; | ||
1025 | } | ||
1026 | |||
1027 | enum print_line_t | ||
1028 | print_kretprobe_event(struct trace_iterator *iter, int flags) | ||
1029 | { | ||
1030 | struct kretprobe_trace_entry *field; | ||
1031 | struct trace_seq *s = &iter->seq; | ||
1032 | struct trace_event *event; | ||
1033 | struct trace_probe *tp; | ||
1034 | int i; | ||
1035 | |||
1036 | field = (struct kretprobe_trace_entry *)iter->ent; | ||
1037 | event = ftrace_find_event(field->ent.type); | ||
1038 | tp = container_of(event, struct trace_probe, event); | ||
1039 | |||
1040 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | ||
1041 | goto partial; | ||
1042 | |||
1043 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | ||
1044 | goto partial; | ||
1045 | |||
1046 | if (!trace_seq_puts(s, " <- ")) | ||
1047 | goto partial; | ||
1048 | |||
1049 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) | ||
1050 | goto partial; | ||
1051 | |||
1052 | if (!trace_seq_puts(s, ")")) | ||
1053 | goto partial; | ||
1054 | |||
1055 | for (i = 0; i < field->nargs; i++) | ||
1056 | if (!trace_seq_printf(s, " %s=%lx", | ||
1057 | tp->args[i].name, field->args[i])) | ||
1058 | goto partial; | ||
1059 | |||
1060 | if (!trace_seq_puts(s, "\n")) | ||
1061 | goto partial; | ||
1062 | |||
1063 | return TRACE_TYPE_HANDLED; | ||
1064 | partial: | ||
1065 | return TRACE_TYPE_PARTIAL_LINE; | ||
1066 | } | ||
1067 | |||
1068 | static int probe_event_enable(struct ftrace_event_call *call) | ||
1069 | { | ||
1070 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1071 | |||
1072 | tp->flags |= TP_FLAG_TRACE; | ||
1073 | if (probe_is_return(tp)) | ||
1074 | return enable_kretprobe(&tp->rp); | ||
1075 | else | ||
1076 | return enable_kprobe(&tp->rp.kp); | ||
1077 | } | ||
1078 | |||
1079 | static void probe_event_disable(struct ftrace_event_call *call) | ||
1080 | { | ||
1081 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1082 | |||
1083 | tp->flags &= ~TP_FLAG_TRACE; | ||
1084 | if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { | ||
1085 | if (probe_is_return(tp)) | ||
1086 | disable_kretprobe(&tp->rp); | ||
1087 | else | ||
1088 | disable_kprobe(&tp->rp.kp); | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | ||
1093 | { | ||
1094 | INIT_LIST_HEAD(&event_call->fields); | ||
1095 | |||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | #undef DEFINE_FIELD | ||
1100 | #define DEFINE_FIELD(type, item, name, is_signed) \ | ||
1101 | do { \ | ||
1102 | ret = trace_define_field(event_call, #type, name, \ | ||
1103 | offsetof(typeof(field), item), \ | ||
1104 | sizeof(field.item), is_signed, \ | ||
1105 | FILTER_OTHER); \ | ||
1106 | if (ret) \ | ||
1107 | return ret; \ | ||
1108 | } while (0) | ||
1109 | |||
1110 | static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | ||
1111 | { | ||
1112 | int ret, i; | ||
1113 | struct kprobe_trace_entry field; | ||
1114 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | ||
1115 | |||
1116 | ret = trace_define_common_fields(event_call); | ||
1117 | if (!ret) | ||
1118 | return ret; | ||
1119 | |||
1120 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | ||
1121 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | ||
1122 | /* Set argument names as fields */ | ||
1123 | for (i = 0; i < tp->nr_args; i++) | ||
1124 | DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); | ||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | ||
1129 | { | ||
1130 | int ret, i; | ||
1131 | struct kretprobe_trace_entry field; | ||
1132 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | ||
1133 | |||
1134 | ret = trace_define_common_fields(event_call); | ||
1135 | if (!ret) | ||
1136 | return ret; | ||
1137 | |||
1138 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | ||
1139 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | ||
1140 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | ||
1141 | /* Set argument names as fields */ | ||
1142 | for (i = 0; i < tp->nr_args; i++) | ||
1143 | DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); | ||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | static int __probe_event_show_format(struct trace_seq *s, | ||
1148 | struct trace_probe *tp, const char *fmt, | ||
1149 | const char *arg) | ||
1150 | { | ||
1151 | int i; | ||
1152 | |||
1153 | /* Show format */ | ||
1154 | if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) | ||
1155 | return 0; | ||
1156 | |||
1157 | for (i = 0; i < tp->nr_args; i++) | ||
1158 | if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) | ||
1159 | return 0; | ||
1160 | |||
1161 | if (!trace_seq_printf(s, "\", %s", arg)) | ||
1162 | return 0; | ||
1163 | |||
1164 | for (i = 0; i < tp->nr_args; i++) | ||
1165 | if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name)) | ||
1166 | return 0; | ||
1167 | |||
1168 | return trace_seq_puts(s, "\n"); | ||
1169 | } | ||
1170 | |||
1171 | #undef SHOW_FIELD | ||
1172 | #define SHOW_FIELD(type, item, name) \ | ||
1173 | do { \ | ||
1174 | ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ | ||
1175 | "offset:%u;\tsize:%u;\n", name, \ | ||
1176 | (unsigned int)offsetof(typeof(field), item),\ | ||
1177 | (unsigned int)sizeof(type)); \ | ||
1178 | if (!ret) \ | ||
1179 | return 0; \ | ||
1180 | } while (0) | ||
1181 | |||
1182 | static int kprobe_event_show_format(struct ftrace_event_call *call, | ||
1183 | struct trace_seq *s) | ||
1184 | { | ||
1185 | struct kprobe_trace_entry field __attribute__((unused)); | ||
1186 | int ret, i; | ||
1187 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1188 | |||
1189 | SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP); | ||
1190 | SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); | ||
1191 | |||
1192 | /* Show fields */ | ||
1193 | for (i = 0; i < tp->nr_args; i++) | ||
1194 | SHOW_FIELD(unsigned long, args[i], tp->args[i].name); | ||
1195 | trace_seq_puts(s, "\n"); | ||
1196 | |||
1197 | return __probe_event_show_format(s, tp, "(%lx)", | ||
1198 | "REC->" FIELD_STRING_IP); | ||
1199 | } | ||
1200 | |||
1201 | static int kretprobe_event_show_format(struct ftrace_event_call *call, | ||
1202 | struct trace_seq *s) | ||
1203 | { | ||
1204 | struct kretprobe_trace_entry field __attribute__((unused)); | ||
1205 | int ret, i; | ||
1206 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1207 | |||
1208 | SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); | ||
1209 | SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); | ||
1210 | SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); | ||
1211 | |||
1212 | /* Show fields */ | ||
1213 | for (i = 0; i < tp->nr_args; i++) | ||
1214 | SHOW_FIELD(unsigned long, args[i], tp->args[i].name); | ||
1215 | trace_seq_puts(s, "\n"); | ||
1216 | |||
1217 | return __probe_event_show_format(s, tp, "(%lx <- %lx)", | ||
1218 | "REC->" FIELD_STRING_FUNC | ||
1219 | ", REC->" FIELD_STRING_RETIP); | ||
1220 | } | ||
1221 | |||
1222 | #ifdef CONFIG_EVENT_PROFILE | ||
1223 | |||
1224 | /* Kprobe profile handler */ | ||
1225 | static __kprobes int kprobe_profile_func(struct kprobe *kp, | ||
1226 | struct pt_regs *regs) | ||
1227 | { | ||
1228 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | ||
1229 | struct ftrace_event_call *call = &tp->call; | ||
1230 | struct kprobe_trace_entry *entry; | ||
1231 | struct trace_entry *ent; | ||
1232 | int size, __size, i, pc, __cpu; | ||
1233 | unsigned long irq_flags; | ||
1234 | char *trace_buf; | ||
1235 | char *raw_data; | ||
1236 | int rctx; | ||
1237 | |||
1238 | pc = preempt_count(); | ||
1239 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | ||
1240 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | ||
1241 | size -= sizeof(u32); | ||
1242 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
1243 | "profile buffer not large enough")) | ||
1244 | return 0; | ||
1245 | |||
1246 | /* | ||
1247 | * Protect the non nmi buffer | ||
1248 | * This also protects the rcu read side | ||
1249 | */ | ||
1250 | local_irq_save(irq_flags); | ||
1251 | |||
1252 | rctx = perf_swevent_get_recursion_context(); | ||
1253 | if (rctx < 0) | ||
1254 | goto end_recursion; | ||
1255 | |||
1256 | __cpu = smp_processor_id(); | ||
1257 | |||
1258 | if (in_nmi()) | ||
1259 | trace_buf = rcu_dereference(perf_trace_buf_nmi); | ||
1260 | else | ||
1261 | trace_buf = rcu_dereference(perf_trace_buf); | ||
1262 | |||
1263 | if (!trace_buf) | ||
1264 | goto end; | ||
1265 | |||
1266 | raw_data = per_cpu_ptr(trace_buf, __cpu); | ||
1267 | |||
1268 | /* Zero dead bytes from alignment to avoid buffer leak to userspace */ | ||
1269 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
1270 | entry = (struct kprobe_trace_entry *)raw_data; | ||
1271 | ent = &entry->ent; | ||
1272 | |||
1273 | tracing_generic_entry_update(ent, irq_flags, pc); | ||
1274 | ent->type = call->id; | ||
1275 | entry->nargs = tp->nr_args; | ||
1276 | entry->ip = (unsigned long)kp->addr; | ||
1277 | for (i = 0; i < tp->nr_args; i++) | ||
1278 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
1279 | perf_tp_event(call->id, entry->ip, 1, entry, size); | ||
1280 | |||
1281 | end: | ||
1282 | perf_swevent_put_recursion_context(rctx); | ||
1283 | end_recursion: | ||
1284 | local_irq_restore(irq_flags); | ||
1285 | |||
1286 | return 0; | ||
1287 | } | ||
1288 | |||
1289 | /* Kretprobe profile handler */ | ||
1290 | static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, | ||
1291 | struct pt_regs *regs) | ||
1292 | { | ||
1293 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | ||
1294 | struct ftrace_event_call *call = &tp->call; | ||
1295 | struct kretprobe_trace_entry *entry; | ||
1296 | struct trace_entry *ent; | ||
1297 | int size, __size, i, pc, __cpu; | ||
1298 | unsigned long irq_flags; | ||
1299 | char *trace_buf; | ||
1300 | char *raw_data; | ||
1301 | int rctx; | ||
1302 | |||
1303 | pc = preempt_count(); | ||
1304 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | ||
1305 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | ||
1306 | size -= sizeof(u32); | ||
1307 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
1308 | "profile buffer not large enough")) | ||
1309 | return 0; | ||
1310 | |||
1311 | /* | ||
1312 | * Protect the non nmi buffer | ||
1313 | * This also protects the rcu read side | ||
1314 | */ | ||
1315 | local_irq_save(irq_flags); | ||
1316 | |||
1317 | rctx = perf_swevent_get_recursion_context(); | ||
1318 | if (rctx < 0) | ||
1319 | goto end_recursion; | ||
1320 | |||
1321 | __cpu = smp_processor_id(); | ||
1322 | |||
1323 | if (in_nmi()) | ||
1324 | trace_buf = rcu_dereference(perf_trace_buf_nmi); | ||
1325 | else | ||
1326 | trace_buf = rcu_dereference(perf_trace_buf); | ||
1327 | |||
1328 | if (!trace_buf) | ||
1329 | goto end; | ||
1330 | |||
1331 | raw_data = per_cpu_ptr(trace_buf, __cpu); | ||
1332 | |||
1333 | /* Zero dead bytes from alignment to avoid buffer leak to userspace */ | ||
1334 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
1335 | entry = (struct kretprobe_trace_entry *)raw_data; | ||
1336 | ent = &entry->ent; | ||
1337 | |||
1338 | tracing_generic_entry_update(ent, irq_flags, pc); | ||
1339 | ent->type = call->id; | ||
1340 | entry->nargs = tp->nr_args; | ||
1341 | entry->func = (unsigned long)tp->rp.kp.addr; | ||
1342 | entry->ret_ip = (unsigned long)ri->ret_addr; | ||
1343 | for (i = 0; i < tp->nr_args; i++) | ||
1344 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | ||
1345 | perf_tp_event(call->id, entry->ret_ip, 1, entry, size); | ||
1346 | |||
1347 | end: | ||
1348 | perf_swevent_put_recursion_context(rctx); | ||
1349 | end_recursion: | ||
1350 | local_irq_restore(irq_flags); | ||
1351 | |||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1355 | static int probe_profile_enable(struct ftrace_event_call *call) | ||
1356 | { | ||
1357 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1358 | |||
1359 | tp->flags |= TP_FLAG_PROFILE; | ||
1360 | |||
1361 | if (probe_is_return(tp)) | ||
1362 | return enable_kretprobe(&tp->rp); | ||
1363 | else | ||
1364 | return enable_kprobe(&tp->rp.kp); | ||
1365 | } | ||
1366 | |||
1367 | static void probe_profile_disable(struct ftrace_event_call *call) | ||
1368 | { | ||
1369 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1370 | |||
1371 | tp->flags &= ~TP_FLAG_PROFILE; | ||
1372 | |||
1373 | if (!(tp->flags & TP_FLAG_TRACE)) { | ||
1374 | if (probe_is_return(tp)) | ||
1375 | disable_kretprobe(&tp->rp); | ||
1376 | else | ||
1377 | disable_kprobe(&tp->rp.kp); | ||
1378 | } | ||
1379 | } | ||
1380 | #endif /* CONFIG_EVENT_PROFILE */ | ||
1381 | |||
1382 | |||
1383 | static __kprobes | ||
1384 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | ||
1385 | { | ||
1386 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | ||
1387 | |||
1388 | if (tp->flags & TP_FLAG_TRACE) | ||
1389 | kprobe_trace_func(kp, regs); | ||
1390 | #ifdef CONFIG_EVENT_PROFILE | ||
1391 | if (tp->flags & TP_FLAG_PROFILE) | ||
1392 | kprobe_profile_func(kp, regs); | ||
1393 | #endif /* CONFIG_EVENT_PROFILE */ | ||
1394 | return 0; /* We don't tweek kernel, so just return 0 */ | ||
1395 | } | ||
1396 | |||
1397 | static __kprobes | ||
1398 | int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
1399 | { | ||
1400 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | ||
1401 | |||
1402 | if (tp->flags & TP_FLAG_TRACE) | ||
1403 | kretprobe_trace_func(ri, regs); | ||
1404 | #ifdef CONFIG_EVENT_PROFILE | ||
1405 | if (tp->flags & TP_FLAG_PROFILE) | ||
1406 | kretprobe_profile_func(ri, regs); | ||
1407 | #endif /* CONFIG_EVENT_PROFILE */ | ||
1408 | return 0; /* We don't tweek kernel, so just return 0 */ | ||
1409 | } | ||
1410 | |||
1411 | static int register_probe_event(struct trace_probe *tp) | ||
1412 | { | ||
1413 | struct ftrace_event_call *call = &tp->call; | ||
1414 | int ret; | ||
1415 | |||
1416 | /* Initialize ftrace_event_call */ | ||
1417 | if (probe_is_return(tp)) { | ||
1418 | tp->event.trace = print_kretprobe_event; | ||
1419 | call->raw_init = probe_event_raw_init; | ||
1420 | call->show_format = kretprobe_event_show_format; | ||
1421 | call->define_fields = kretprobe_event_define_fields; | ||
1422 | } else { | ||
1423 | tp->event.trace = print_kprobe_event; | ||
1424 | call->raw_init = probe_event_raw_init; | ||
1425 | call->show_format = kprobe_event_show_format; | ||
1426 | call->define_fields = kprobe_event_define_fields; | ||
1427 | } | ||
1428 | call->event = &tp->event; | ||
1429 | call->id = register_ftrace_event(&tp->event); | ||
1430 | if (!call->id) | ||
1431 | return -ENODEV; | ||
1432 | call->enabled = 0; | ||
1433 | call->regfunc = probe_event_enable; | ||
1434 | call->unregfunc = probe_event_disable; | ||
1435 | |||
1436 | #ifdef CONFIG_EVENT_PROFILE | ||
1437 | atomic_set(&call->profile_count, -1); | ||
1438 | call->profile_enable = probe_profile_enable; | ||
1439 | call->profile_disable = probe_profile_disable; | ||
1440 | #endif | ||
1441 | call->data = tp; | ||
1442 | ret = trace_add_event_call(call); | ||
1443 | if (ret) { | ||
1444 | pr_info("Failed to register kprobe event: %s\n", call->name); | ||
1445 | unregister_ftrace_event(&tp->event); | ||
1446 | } | ||
1447 | return ret; | ||
1448 | } | ||
1449 | |||
1450 | static void unregister_probe_event(struct trace_probe *tp) | ||
1451 | { | ||
1452 | /* tp->event is unregistered in trace_remove_event_call() */ | ||
1453 | trace_remove_event_call(&tp->call); | ||
1454 | } | ||
1455 | |||
1456 | /* Make a debugfs interface for controling probe points */ | ||
1457 | static __init int init_kprobe_trace(void) | ||
1458 | { | ||
1459 | struct dentry *d_tracer; | ||
1460 | struct dentry *entry; | ||
1461 | |||
1462 | d_tracer = tracing_init_dentry(); | ||
1463 | if (!d_tracer) | ||
1464 | return 0; | ||
1465 | |||
1466 | entry = debugfs_create_file("kprobe_events", 0644, d_tracer, | ||
1467 | NULL, &kprobe_events_ops); | ||
1468 | |||
1469 | /* Event list interface */ | ||
1470 | if (!entry) | ||
1471 | pr_warning("Could not create debugfs " | ||
1472 | "'kprobe_events' entry\n"); | ||
1473 | |||
1474 | /* Profile interface */ | ||
1475 | entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, | ||
1476 | NULL, &kprobe_profile_ops); | ||
1477 | |||
1478 | if (!entry) | ||
1479 | pr_warning("Could not create debugfs " | ||
1480 | "'kprobe_profile' entry\n"); | ||
1481 | return 0; | ||
1482 | } | ||
1483 | fs_initcall(init_kprobe_trace); | ||
1484 | |||
1485 | |||
1486 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
1487 | |||
1488 | static int kprobe_trace_selftest_target(int a1, int a2, int a3, | ||
1489 | int a4, int a5, int a6) | ||
1490 | { | ||
1491 | return a1 + a2 + a3 + a4 + a5 + a6; | ||
1492 | } | ||
1493 | |||
1494 | static __init int kprobe_trace_self_tests_init(void) | ||
1495 | { | ||
1496 | int ret; | ||
1497 | int (*target)(int, int, int, int, int, int); | ||
1498 | |||
1499 | target = kprobe_trace_selftest_target; | ||
1500 | |||
1501 | pr_info("Testing kprobe tracing: "); | ||
1502 | |||
1503 | ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " | ||
1504 | "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); | ||
1505 | if (WARN_ON_ONCE(ret)) | ||
1506 | pr_warning("error enabling function entry\n"); | ||
1507 | |||
1508 | ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " | ||
1509 | "$retval"); | ||
1510 | if (WARN_ON_ONCE(ret)) | ||
1511 | pr_warning("error enabling function return\n"); | ||
1512 | |||
1513 | ret = target(1, 2, 3, 4, 5, 6); | ||
1514 | |||
1515 | cleanup_all_probes(); | ||
1516 | |||
1517 | pr_cont("OK\n"); | ||
1518 | return 0; | ||
1519 | } | ||
1520 | |||
1521 | late_initcall(kprobe_trace_self_tests_init); | ||
1522 | |||
1523 | #endif | ||
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c new file mode 100644 index 000000000000..ddfa0fd43bc0 --- /dev/null +++ b/kernel/trace/trace_ksym.c | |||
@@ -0,0 +1,550 @@ | |||
1 | /* | ||
2 | * trace_ksym.c - Kernel Symbol Tracer | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/fs.h> | ||
27 | |||
28 | #include "trace_output.h" | ||
29 | #include "trace_stat.h" | ||
30 | #include "trace.h" | ||
31 | |||
32 | #include <linux/hw_breakpoint.h> | ||
33 | #include <asm/hw_breakpoint.h> | ||
34 | |||
35 | /* | ||
36 | * For now, let us restrict the no. of symbols traced simultaneously to number | ||
37 | * of available hardware breakpoint registers. | ||
38 | */ | ||
39 | #define KSYM_TRACER_MAX HBP_NUM | ||
40 | |||
41 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | ||
42 | |||
43 | struct trace_ksym { | ||
44 | struct perf_event **ksym_hbp; | ||
45 | struct perf_event_attr attr; | ||
46 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
47 | unsigned long counter; | ||
48 | #endif | ||
49 | struct hlist_node ksym_hlist; | ||
50 | }; | ||
51 | |||
52 | static struct trace_array *ksym_trace_array; | ||
53 | |||
54 | static unsigned int ksym_filter_entry_count; | ||
55 | static unsigned int ksym_tracing_enabled; | ||
56 | |||
57 | static HLIST_HEAD(ksym_filter_head); | ||
58 | |||
59 | static DEFINE_MUTEX(ksym_tracer_mutex); | ||
60 | |||
61 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
62 | |||
63 | #define MAX_UL_INT 0xffffffff | ||
64 | |||
65 | void ksym_collect_stats(unsigned long hbp_hit_addr) | ||
66 | { | ||
67 | struct hlist_node *node; | ||
68 | struct trace_ksym *entry; | ||
69 | |||
70 | rcu_read_lock(); | ||
71 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
72 | if ((entry->attr.bp_addr == hbp_hit_addr) && | ||
73 | (entry->counter <= MAX_UL_INT)) { | ||
74 | entry->counter++; | ||
75 | break; | ||
76 | } | ||
77 | } | ||
78 | rcu_read_unlock(); | ||
79 | } | ||
80 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
81 | |||
82 | void ksym_hbp_handler(struct perf_event *hbp, void *data) | ||
83 | { | ||
84 | struct ring_buffer_event *event; | ||
85 | struct ksym_trace_entry *entry; | ||
86 | struct pt_regs *regs = data; | ||
87 | struct ring_buffer *buffer; | ||
88 | int pc; | ||
89 | |||
90 | if (!ksym_tracing_enabled) | ||
91 | return; | ||
92 | |||
93 | buffer = ksym_trace_array->buffer; | ||
94 | |||
95 | pc = preempt_count(); | ||
96 | |||
97 | event = trace_buffer_lock_reserve(buffer, TRACE_KSYM, | ||
98 | sizeof(*entry), 0, pc); | ||
99 | if (!event) | ||
100 | return; | ||
101 | |||
102 | entry = ring_buffer_event_data(event); | ||
103 | entry->ip = instruction_pointer(regs); | ||
104 | entry->type = hw_breakpoint_type(hbp); | ||
105 | entry->addr = hw_breakpoint_addr(hbp); | ||
106 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); | ||
107 | |||
108 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
109 | ksym_collect_stats(hw_breakpoint_addr(hbp)); | ||
110 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
111 | |||
112 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
113 | } | ||
114 | |||
115 | /* Valid access types are represented as | ||
116 | * | ||
117 | * rw- : Set Read/Write Access Breakpoint | ||
118 | * -w- : Set Write Access Breakpoint | ||
119 | * --- : Clear Breakpoints | ||
120 | * --x : Set Execution Break points (Not available yet) | ||
121 | * | ||
122 | */ | ||
123 | static int ksym_trace_get_access_type(char *str) | ||
124 | { | ||
125 | int access = 0; | ||
126 | |||
127 | if (str[0] == 'r') | ||
128 | access |= HW_BREAKPOINT_R; | ||
129 | |||
130 | if (str[1] == 'w') | ||
131 | access |= HW_BREAKPOINT_W; | ||
132 | |||
133 | if (str[2] == 'x') | ||
134 | access |= HW_BREAKPOINT_X; | ||
135 | |||
136 | switch (access) { | ||
137 | case HW_BREAKPOINT_R: | ||
138 | case HW_BREAKPOINT_W: | ||
139 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
140 | return access; | ||
141 | default: | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * There can be several possible malformed requests and we attempt to capture | ||
148 | * all of them. We enumerate some of the rules | ||
149 | * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. | ||
150 | * i.e. multiple ':' symbols disallowed. Possible uses are of the form | ||
151 | * <module>:<ksym_name>:<op>. | ||
152 | * 2. No delimiter symbol ':' in the input string | ||
153 | * 3. Spurious operator symbols or symbols not in their respective positions | ||
154 | * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file | ||
155 | * 5. Kernel symbol not a part of /proc/kallsyms | ||
156 | * 6. Duplicate requests | ||
157 | */ | ||
158 | static int parse_ksym_trace_str(char *input_string, char **ksymname, | ||
159 | unsigned long *addr) | ||
160 | { | ||
161 | int ret; | ||
162 | |||
163 | *ksymname = strsep(&input_string, ":"); | ||
164 | *addr = kallsyms_lookup_name(*ksymname); | ||
165 | |||
166 | /* Check for malformed request: (2), (1) and (5) */ | ||
167 | if ((!input_string) || | ||
168 | (strlen(input_string) != KSYM_TRACER_OP_LEN) || | ||
169 | (*addr == 0)) | ||
170 | return -EINVAL;; | ||
171 | |||
172 | ret = ksym_trace_get_access_type(input_string); | ||
173 | |||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | ||
178 | { | ||
179 | struct trace_ksym *entry; | ||
180 | int ret = -ENOMEM; | ||
181 | |||
182 | if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { | ||
183 | printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" | ||
184 | " new requests for tracing can be accepted now.\n", | ||
185 | KSYM_TRACER_MAX); | ||
186 | return -ENOSPC; | ||
187 | } | ||
188 | |||
189 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | ||
190 | if (!entry) | ||
191 | return -ENOMEM; | ||
192 | |||
193 | hw_breakpoint_init(&entry->attr); | ||
194 | |||
195 | entry->attr.bp_type = op; | ||
196 | entry->attr.bp_addr = addr; | ||
197 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; | ||
198 | |||
199 | ret = -EAGAIN; | ||
200 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, | ||
201 | ksym_hbp_handler); | ||
202 | |||
203 | if (IS_ERR(entry->ksym_hbp)) { | ||
204 | ret = PTR_ERR(entry->ksym_hbp); | ||
205 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
206 | " later!!\n"); | ||
207 | goto err; | ||
208 | } | ||
209 | |||
210 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | ||
211 | ksym_filter_entry_count++; | ||
212 | |||
213 | return 0; | ||
214 | |||
215 | err: | ||
216 | kfree(entry); | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | ||
222 | size_t count, loff_t *ppos) | ||
223 | { | ||
224 | struct trace_ksym *entry; | ||
225 | struct hlist_node *node; | ||
226 | struct trace_seq *s; | ||
227 | ssize_t cnt = 0; | ||
228 | int ret; | ||
229 | |||
230 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
231 | if (!s) | ||
232 | return -ENOMEM; | ||
233 | trace_seq_init(s); | ||
234 | |||
235 | mutex_lock(&ksym_tracer_mutex); | ||
236 | |||
237 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
238 | ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); | ||
239 | if (entry->attr.bp_type == HW_BREAKPOINT_R) | ||
240 | ret = trace_seq_puts(s, "r--\n"); | ||
241 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) | ||
242 | ret = trace_seq_puts(s, "-w-\n"); | ||
243 | else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) | ||
244 | ret = trace_seq_puts(s, "rw-\n"); | ||
245 | WARN_ON_ONCE(!ret); | ||
246 | } | ||
247 | |||
248 | cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
249 | |||
250 | mutex_unlock(&ksym_tracer_mutex); | ||
251 | |||
252 | kfree(s); | ||
253 | |||
254 | return cnt; | ||
255 | } | ||
256 | |||
257 | static void __ksym_trace_reset(void) | ||
258 | { | ||
259 | struct trace_ksym *entry; | ||
260 | struct hlist_node *node, *node1; | ||
261 | |||
262 | mutex_lock(&ksym_tracer_mutex); | ||
263 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | ||
264 | ksym_hlist) { | ||
265 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
266 | ksym_filter_entry_count--; | ||
267 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
268 | synchronize_rcu(); | ||
269 | kfree(entry); | ||
270 | } | ||
271 | mutex_unlock(&ksym_tracer_mutex); | ||
272 | } | ||
273 | |||
274 | static ssize_t ksym_trace_filter_write(struct file *file, | ||
275 | const char __user *buffer, | ||
276 | size_t count, loff_t *ppos) | ||
277 | { | ||
278 | struct trace_ksym *entry; | ||
279 | struct hlist_node *node; | ||
280 | char *input_string, *ksymname = NULL; | ||
281 | unsigned long ksym_addr = 0; | ||
282 | int ret, op, changed = 0; | ||
283 | |||
284 | input_string = kzalloc(count + 1, GFP_KERNEL); | ||
285 | if (!input_string) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | if (copy_from_user(input_string, buffer, count)) { | ||
289 | kfree(input_string); | ||
290 | return -EFAULT; | ||
291 | } | ||
292 | input_string[count] = '\0'; | ||
293 | |||
294 | strstrip(input_string); | ||
295 | |||
296 | /* | ||
297 | * Clear all breakpoints if: | ||
298 | * 1: echo > ksym_trace_filter | ||
299 | * 2: echo 0 > ksym_trace_filter | ||
300 | * 3: echo "*:---" > ksym_trace_filter | ||
301 | */ | ||
302 | if (!input_string[0] || !strcmp(input_string, "0") || | ||
303 | !strcmp(input_string, "*:---")) { | ||
304 | __ksym_trace_reset(); | ||
305 | kfree(input_string); | ||
306 | return count; | ||
307 | } | ||
308 | |||
309 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | ||
310 | if (ret < 0) { | ||
311 | kfree(input_string); | ||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | mutex_lock(&ksym_tracer_mutex); | ||
316 | |||
317 | ret = -EINVAL; | ||
318 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
319 | if (entry->attr.bp_addr == ksym_addr) { | ||
320 | /* Check for malformed request: (6) */ | ||
321 | if (entry->attr.bp_type != op) | ||
322 | changed = 1; | ||
323 | else | ||
324 | goto out; | ||
325 | break; | ||
326 | } | ||
327 | } | ||
328 | if (changed) { | ||
329 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
330 | entry->attr.bp_type = op; | ||
331 | ret = 0; | ||
332 | if (op > 0) { | ||
333 | entry->ksym_hbp = | ||
334 | register_wide_hw_breakpoint(&entry->attr, | ||
335 | ksym_hbp_handler); | ||
336 | if (IS_ERR(entry->ksym_hbp)) | ||
337 | ret = PTR_ERR(entry->ksym_hbp); | ||
338 | else | ||
339 | goto out; | ||
340 | } | ||
341 | /* Error or "symbol:---" case: drop it */ | ||
342 | ksym_filter_entry_count--; | ||
343 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
344 | synchronize_rcu(); | ||
345 | kfree(entry); | ||
346 | goto out; | ||
347 | } else { | ||
348 | /* Check for malformed request: (4) */ | ||
349 | if (op == 0) | ||
350 | goto out; | ||
351 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
352 | } | ||
353 | out: | ||
354 | mutex_unlock(&ksym_tracer_mutex); | ||
355 | |||
356 | kfree(input_string); | ||
357 | |||
358 | if (!ret) | ||
359 | ret = count; | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | static const struct file_operations ksym_tracing_fops = { | ||
364 | .open = tracing_open_generic, | ||
365 | .read = ksym_trace_filter_read, | ||
366 | .write = ksym_trace_filter_write, | ||
367 | }; | ||
368 | |||
369 | static void ksym_trace_reset(struct trace_array *tr) | ||
370 | { | ||
371 | ksym_tracing_enabled = 0; | ||
372 | __ksym_trace_reset(); | ||
373 | } | ||
374 | |||
375 | static int ksym_trace_init(struct trace_array *tr) | ||
376 | { | ||
377 | int cpu, ret = 0; | ||
378 | |||
379 | for_each_online_cpu(cpu) | ||
380 | tracing_reset(tr, cpu); | ||
381 | ksym_tracing_enabled = 1; | ||
382 | ksym_trace_array = tr; | ||
383 | |||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static void ksym_trace_print_header(struct seq_file *m) | ||
388 | { | ||
389 | seq_puts(m, | ||
390 | "# TASK-PID CPU# Symbol " | ||
391 | "Type Function\n"); | ||
392 | seq_puts(m, | ||
393 | "# | | | " | ||
394 | " | |\n"); | ||
395 | } | ||
396 | |||
397 | static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | ||
398 | { | ||
399 | struct trace_entry *entry = iter->ent; | ||
400 | struct trace_seq *s = &iter->seq; | ||
401 | struct ksym_trace_entry *field; | ||
402 | char str[KSYM_SYMBOL_LEN]; | ||
403 | int ret; | ||
404 | |||
405 | if (entry->type != TRACE_KSYM) | ||
406 | return TRACE_TYPE_UNHANDLED; | ||
407 | |||
408 | trace_assign_type(field, entry); | ||
409 | |||
410 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, | ||
411 | entry->pid, iter->cpu, (char *)field->addr); | ||
412 | if (!ret) | ||
413 | return TRACE_TYPE_PARTIAL_LINE; | ||
414 | |||
415 | switch (field->type) { | ||
416 | case HW_BREAKPOINT_R: | ||
417 | ret = trace_seq_printf(s, " R "); | ||
418 | break; | ||
419 | case HW_BREAKPOINT_W: | ||
420 | ret = trace_seq_printf(s, " W "); | ||
421 | break; | ||
422 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
423 | ret = trace_seq_printf(s, " RW "); | ||
424 | break; | ||
425 | default: | ||
426 | return TRACE_TYPE_PARTIAL_LINE; | ||
427 | } | ||
428 | |||
429 | if (!ret) | ||
430 | return TRACE_TYPE_PARTIAL_LINE; | ||
431 | |||
432 | sprint_symbol(str, field->ip); | ||
433 | ret = trace_seq_printf(s, "%s\n", str); | ||
434 | if (!ret) | ||
435 | return TRACE_TYPE_PARTIAL_LINE; | ||
436 | |||
437 | return TRACE_TYPE_HANDLED; | ||
438 | } | ||
439 | |||
440 | struct tracer ksym_tracer __read_mostly = | ||
441 | { | ||
442 | .name = "ksym_tracer", | ||
443 | .init = ksym_trace_init, | ||
444 | .reset = ksym_trace_reset, | ||
445 | #ifdef CONFIG_FTRACE_SELFTEST | ||
446 | .selftest = trace_selftest_startup_ksym, | ||
447 | #endif | ||
448 | .print_header = ksym_trace_print_header, | ||
449 | .print_line = ksym_trace_output | ||
450 | }; | ||
451 | |||
452 | __init static int init_ksym_trace(void) | ||
453 | { | ||
454 | struct dentry *d_tracer; | ||
455 | struct dentry *entry; | ||
456 | |||
457 | d_tracer = tracing_init_dentry(); | ||
458 | ksym_filter_entry_count = 0; | ||
459 | |||
460 | entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, | ||
461 | NULL, &ksym_tracing_fops); | ||
462 | if (!entry) | ||
463 | pr_warning("Could not create debugfs " | ||
464 | "'ksym_trace_filter' file\n"); | ||
465 | |||
466 | return register_tracer(&ksym_tracer); | ||
467 | } | ||
468 | device_initcall(init_ksym_trace); | ||
469 | |||
470 | |||
471 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
472 | static int ksym_tracer_stat_headers(struct seq_file *m) | ||
473 | { | ||
474 | seq_puts(m, " Access Type "); | ||
475 | seq_puts(m, " Symbol Counter\n"); | ||
476 | seq_puts(m, " ----------- "); | ||
477 | seq_puts(m, " ------ -------\n"); | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | static int ksym_tracer_stat_show(struct seq_file *m, void *v) | ||
482 | { | ||
483 | struct hlist_node *stat = v; | ||
484 | struct trace_ksym *entry; | ||
485 | int access_type = 0; | ||
486 | char fn_name[KSYM_NAME_LEN]; | ||
487 | |||
488 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | ||
489 | |||
490 | access_type = entry->attr.bp_type; | ||
491 | |||
492 | switch (access_type) { | ||
493 | case HW_BREAKPOINT_R: | ||
494 | seq_puts(m, " R "); | ||
495 | break; | ||
496 | case HW_BREAKPOINT_W: | ||
497 | seq_puts(m, " W "); | ||
498 | break; | ||
499 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
500 | seq_puts(m, " RW "); | ||
501 | break; | ||
502 | default: | ||
503 | seq_puts(m, " NA "); | ||
504 | } | ||
505 | |||
506 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) | ||
507 | seq_printf(m, " %-36s", fn_name); | ||
508 | else | ||
509 | seq_printf(m, " %-36s", "<NA>"); | ||
510 | seq_printf(m, " %15lu\n", entry->counter); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static void *ksym_tracer_stat_start(struct tracer_stat *trace) | ||
516 | { | ||
517 | return ksym_filter_head.first; | ||
518 | } | ||
519 | |||
520 | static void * | ||
521 | ksym_tracer_stat_next(void *v, int idx) | ||
522 | { | ||
523 | struct hlist_node *stat = v; | ||
524 | |||
525 | return stat->next; | ||
526 | } | ||
527 | |||
528 | static struct tracer_stat ksym_tracer_stats = { | ||
529 | .name = "ksym_tracer", | ||
530 | .stat_start = ksym_tracer_stat_start, | ||
531 | .stat_next = ksym_tracer_stat_next, | ||
532 | .stat_headers = ksym_tracer_stat_headers, | ||
533 | .stat_show = ksym_tracer_stat_show | ||
534 | }; | ||
535 | |||
536 | __init static int ksym_tracer_stat_init(void) | ||
537 | { | ||
538 | int ret; | ||
539 | |||
540 | ret = register_stat_tracer(&ksym_tracer_stats); | ||
541 | if (ret) { | ||
542 | printk(KERN_WARNING "Warning: could not register " | ||
543 | "ksym tracer stats\n"); | ||
544 | return 1; | ||
545 | } | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | fs_initcall(ksym_tracer_stat_init); | ||
550 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index d2cdbabb4ead..dc98309e839a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
17 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
19 | case TRACE_HW_BRANCHES: | 19 | case TRACE_HW_BRANCHES: |
20 | case TRACE_KSYM: | ||
20 | return 1; | 21 | return 1; |
21 | } | 22 | } |
22 | return 0; | 23 | return 0; |
@@ -808,3 +809,57 @@ trace_selftest_startup_hw_branches(struct tracer *trace, | |||
808 | return ret; | 809 | return ret; |
809 | } | 810 | } |
810 | #endif /* CONFIG_HW_BRANCH_TRACER */ | 811 | #endif /* CONFIG_HW_BRANCH_TRACER */ |
812 | |||
813 | #ifdef CONFIG_KSYM_TRACER | ||
814 | static int ksym_selftest_dummy; | ||
815 | |||
816 | int | ||
817 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | ||
818 | { | ||
819 | unsigned long count; | ||
820 | int ret; | ||
821 | |||
822 | /* start the tracing */ | ||
823 | ret = tracer_init(trace, tr); | ||
824 | if (ret) { | ||
825 | warn_failed_init_tracer(trace, ret); | ||
826 | return ret; | ||
827 | } | ||
828 | |||
829 | ksym_selftest_dummy = 0; | ||
830 | /* Register the read-write tracing request */ | ||
831 | |||
832 | ret = process_new_ksym_entry("ksym_selftest_dummy", | ||
833 | HW_BREAKPOINT_R | HW_BREAKPOINT_W, | ||
834 | (unsigned long)(&ksym_selftest_dummy)); | ||
835 | |||
836 | if (ret < 0) { | ||
837 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | ||
838 | goto ret_path; | ||
839 | } | ||
840 | /* Perform a read and a write operation over the dummy variable to | ||
841 | * trigger the tracer | ||
842 | */ | ||
843 | if (ksym_selftest_dummy == 0) | ||
844 | ksym_selftest_dummy++; | ||
845 | |||
846 | /* stop the tracing. */ | ||
847 | tracing_stop(); | ||
848 | /* check the trace buffer */ | ||
849 | ret = trace_test_buffer(tr, &count); | ||
850 | trace->reset(tr); | ||
851 | tracing_start(); | ||
852 | |||
853 | /* read & write operations - one each is performed on the dummy variable | ||
854 | * triggering two entries in the trace buffer | ||
855 | */ | ||
856 | if (!ret && count != 2) { | ||
857 | printk(KERN_CONT "Ksym tracer startup test failed"); | ||
858 | ret = -1; | ||
859 | } | ||
860 | |||
861 | ret_path: | ||
862 | return ret; | ||
863 | } | ||
864 | #endif /* CONFIG_KSYM_TRACER */ | ||
865 | |||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 527e17eae575..57501d90096a 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -14,6 +14,43 @@ static int sys_refcount_exit; | |||
14 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | 14 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); |
15 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | 15 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); |
16 | 16 | ||
17 | extern unsigned long __start_syscalls_metadata[]; | ||
18 | extern unsigned long __stop_syscalls_metadata[]; | ||
19 | |||
20 | static struct syscall_metadata **syscalls_metadata; | ||
21 | |||
22 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | ||
23 | { | ||
24 | struct syscall_metadata *start; | ||
25 | struct syscall_metadata *stop; | ||
26 | char str[KSYM_SYMBOL_LEN]; | ||
27 | |||
28 | |||
29 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
30 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
31 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | ||
32 | |||
33 | for ( ; start < stop; start++) { | ||
34 | /* | ||
35 | * Only compare after the "sys" prefix. Archs that use | ||
36 | * syscall wrappers may have syscalls symbols aliases prefixed | ||
37 | * with "SyS" instead of "sys", leading to an unwanted | ||
38 | * mismatch. | ||
39 | */ | ||
40 | if (start->name && !strcmp(start->name + 3, str + 3)) | ||
41 | return start; | ||
42 | } | ||
43 | return NULL; | ||
44 | } | ||
45 | |||
46 | static struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
47 | { | ||
48 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | ||
49 | return NULL; | ||
50 | |||
51 | return syscalls_metadata[nr]; | ||
52 | } | ||
53 | |||
17 | enum print_line_t | 54 | enum print_line_t |
18 | print_syscall_enter(struct trace_iterator *iter, int flags) | 55 | print_syscall_enter(struct trace_iterator *iter, int flags) |
19 | { | 56 | { |
@@ -30,7 +67,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags) | |||
30 | if (!entry) | 67 | if (!entry) |
31 | goto end; | 68 | goto end; |
32 | 69 | ||
33 | if (entry->enter_id != ent->type) { | 70 | if (entry->enter_event->id != ent->type) { |
34 | WARN_ON_ONCE(1); | 71 | WARN_ON_ONCE(1); |
35 | goto end; | 72 | goto end; |
36 | } | 73 | } |
@@ -85,7 +122,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags) | |||
85 | return TRACE_TYPE_HANDLED; | 122 | return TRACE_TYPE_HANDLED; |
86 | } | 123 | } |
87 | 124 | ||
88 | if (entry->exit_id != ent->type) { | 125 | if (entry->exit_event->id != ent->type) { |
89 | WARN_ON_ONCE(1); | 126 | WARN_ON_ONCE(1); |
90 | return TRACE_TYPE_UNHANDLED; | 127 | return TRACE_TYPE_UNHANDLED; |
91 | } | 128 | } |
@@ -103,24 +140,19 @@ extern char *__bad_type_size(void); | |||
103 | #define SYSCALL_FIELD(type, name) \ | 140 | #define SYSCALL_FIELD(type, name) \ |
104 | sizeof(type) != sizeof(trace.name) ? \ | 141 | sizeof(type) != sizeof(trace.name) ? \ |
105 | __bad_type_size() : \ | 142 | __bad_type_size() : \ |
106 | #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) | 143 | #type, #name, offsetof(typeof(trace), name), \ |
144 | sizeof(trace.name), is_signed_type(type) | ||
107 | 145 | ||
108 | int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) | 146 | int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) |
109 | { | 147 | { |
110 | int i; | 148 | int i; |
111 | int nr; | ||
112 | int ret; | 149 | int ret; |
113 | struct syscall_metadata *entry; | 150 | struct syscall_metadata *entry = call->data; |
114 | struct syscall_trace_enter trace; | 151 | struct syscall_trace_enter trace; |
115 | int offset = offsetof(struct syscall_trace_enter, args); | 152 | int offset = offsetof(struct syscall_trace_enter, args); |
116 | 153 | ||
117 | nr = syscall_name_to_nr(call->data); | 154 | ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" |
118 | entry = syscall_nr_to_meta(nr); | 155 | "\tsigned:%u;\n", |
119 | |||
120 | if (!entry) | ||
121 | return 0; | ||
122 | |||
123 | ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | ||
124 | SYSCALL_FIELD(int, nr)); | 156 | SYSCALL_FIELD(int, nr)); |
125 | if (!ret) | 157 | if (!ret) |
126 | return 0; | 158 | return 0; |
@@ -130,8 +162,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
130 | entry->args[i]); | 162 | entry->args[i]); |
131 | if (!ret) | 163 | if (!ret) |
132 | return 0; | 164 | return 0; |
133 | ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, | 165 | ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;" |
134 | sizeof(unsigned long)); | 166 | "\tsigned:%u;\n", offset, |
167 | sizeof(unsigned long), | ||
168 | is_signed_type(unsigned long)); | ||
135 | if (!ret) | 169 | if (!ret) |
136 | return 0; | 170 | return 0; |
137 | offset += sizeof(unsigned long); | 171 | offset += sizeof(unsigned long); |
@@ -163,8 +197,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
163 | struct syscall_trace_exit trace; | 197 | struct syscall_trace_exit trace; |
164 | 198 | ||
165 | ret = trace_seq_printf(s, | 199 | ret = trace_seq_printf(s, |
166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 200 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" |
167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 201 | "\tsigned:%u;\n" |
202 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" | ||
203 | "\tsigned:%u;\n", | ||
168 | SYSCALL_FIELD(int, nr), | 204 | SYSCALL_FIELD(int, nr), |
169 | SYSCALL_FIELD(long, ret)); | 205 | SYSCALL_FIELD(long, ret)); |
170 | if (!ret) | 206 | if (!ret) |
@@ -176,22 +212,19 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
176 | int syscall_enter_define_fields(struct ftrace_event_call *call) | 212 | int syscall_enter_define_fields(struct ftrace_event_call *call) |
177 | { | 213 | { |
178 | struct syscall_trace_enter trace; | 214 | struct syscall_trace_enter trace; |
179 | struct syscall_metadata *meta; | 215 | struct syscall_metadata *meta = call->data; |
180 | int ret; | 216 | int ret; |
181 | int nr; | ||
182 | int i; | 217 | int i; |
183 | int offset = offsetof(typeof(trace), args); | 218 | int offset = offsetof(typeof(trace), args); |
184 | 219 | ||
185 | nr = syscall_name_to_nr(call->data); | ||
186 | meta = syscall_nr_to_meta(nr); | ||
187 | |||
188 | if (!meta) | ||
189 | return 0; | ||
190 | |||
191 | ret = trace_define_common_fields(call); | 220 | ret = trace_define_common_fields(call); |
192 | if (ret) | 221 | if (ret) |
193 | return ret; | 222 | return ret; |
194 | 223 | ||
224 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); | ||
225 | if (ret) | ||
226 | return ret; | ||
227 | |||
195 | for (i = 0; i < meta->nb_args; i++) { | 228 | for (i = 0; i < meta->nb_args; i++) { |
196 | ret = trace_define_field(call, meta->types[i], | 229 | ret = trace_define_field(call, meta->types[i], |
197 | meta->args[i], offset, | 230 | meta->args[i], offset, |
@@ -212,7 +245,11 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
212 | if (ret) | 245 | if (ret) |
213 | return ret; | 246 | return ret; |
214 | 247 | ||
215 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, | 248 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
249 | if (ret) | ||
250 | return ret; | ||
251 | |||
252 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), | ||
216 | FILTER_OTHER); | 253 | FILTER_OTHER); |
217 | 254 | ||
218 | return ret; | 255 | return ret; |
@@ -239,8 +276,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) | |||
239 | 276 | ||
240 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | 277 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
241 | 278 | ||
242 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, | 279 | event = trace_current_buffer_lock_reserve(&buffer, |
243 | size, 0, 0); | 280 | sys_data->enter_event->id, size, 0, 0); |
244 | if (!event) | 281 | if (!event) |
245 | return; | 282 | return; |
246 | 283 | ||
@@ -271,8 +308,8 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
271 | if (!sys_data) | 308 | if (!sys_data) |
272 | return; | 309 | return; |
273 | 310 | ||
274 | event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id, | 311 | event = trace_current_buffer_lock_reserve(&buffer, |
275 | sizeof(*entry), 0, 0); | 312 | sys_data->exit_event->id, sizeof(*entry), 0, 0); |
276 | if (!event) | 313 | if (!event) |
277 | return; | 314 | return; |
278 | 315 | ||
@@ -285,14 +322,12 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) | |||
285 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 322 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
286 | } | 323 | } |
287 | 324 | ||
288 | int reg_event_syscall_enter(void *ptr) | 325 | int reg_event_syscall_enter(struct ftrace_event_call *call) |
289 | { | 326 | { |
290 | int ret = 0; | 327 | int ret = 0; |
291 | int num; | 328 | int num; |
292 | char *name; | ||
293 | 329 | ||
294 | name = (char *)ptr; | 330 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
295 | num = syscall_name_to_nr(name); | ||
296 | if (num < 0 || num >= NR_syscalls) | 331 | if (num < 0 || num >= NR_syscalls) |
297 | return -ENOSYS; | 332 | return -ENOSYS; |
298 | mutex_lock(&syscall_trace_lock); | 333 | mutex_lock(&syscall_trace_lock); |
@@ -309,13 +344,11 @@ int reg_event_syscall_enter(void *ptr) | |||
309 | return ret; | 344 | return ret; |
310 | } | 345 | } |
311 | 346 | ||
312 | void unreg_event_syscall_enter(void *ptr) | 347 | void unreg_event_syscall_enter(struct ftrace_event_call *call) |
313 | { | 348 | { |
314 | int num; | 349 | int num; |
315 | char *name; | ||
316 | 350 | ||
317 | name = (char *)ptr; | 351 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
318 | num = syscall_name_to_nr(name); | ||
319 | if (num < 0 || num >= NR_syscalls) | 352 | if (num < 0 || num >= NR_syscalls) |
320 | return; | 353 | return; |
321 | mutex_lock(&syscall_trace_lock); | 354 | mutex_lock(&syscall_trace_lock); |
@@ -326,14 +359,12 @@ void unreg_event_syscall_enter(void *ptr) | |||
326 | mutex_unlock(&syscall_trace_lock); | 359 | mutex_unlock(&syscall_trace_lock); |
327 | } | 360 | } |
328 | 361 | ||
329 | int reg_event_syscall_exit(void *ptr) | 362 | int reg_event_syscall_exit(struct ftrace_event_call *call) |
330 | { | 363 | { |
331 | int ret = 0; | 364 | int ret = 0; |
332 | int num; | 365 | int num; |
333 | char *name; | ||
334 | 366 | ||
335 | name = (char *)ptr; | 367 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
336 | num = syscall_name_to_nr(name); | ||
337 | if (num < 0 || num >= NR_syscalls) | 368 | if (num < 0 || num >= NR_syscalls) |
338 | return -ENOSYS; | 369 | return -ENOSYS; |
339 | mutex_lock(&syscall_trace_lock); | 370 | mutex_lock(&syscall_trace_lock); |
@@ -350,13 +381,11 @@ int reg_event_syscall_exit(void *ptr) | |||
350 | return ret; | 381 | return ret; |
351 | } | 382 | } |
352 | 383 | ||
353 | void unreg_event_syscall_exit(void *ptr) | 384 | void unreg_event_syscall_exit(struct ftrace_event_call *call) |
354 | { | 385 | { |
355 | int num; | 386 | int num; |
356 | char *name; | ||
357 | 387 | ||
358 | name = (char *)ptr; | 388 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
359 | num = syscall_name_to_nr(name); | ||
360 | if (num < 0 || num >= NR_syscalls) | 389 | if (num < 0 || num >= NR_syscalls) |
361 | return; | 390 | return; |
362 | mutex_lock(&syscall_trace_lock); | 391 | mutex_lock(&syscall_trace_lock); |
@@ -367,13 +396,44 @@ void unreg_event_syscall_exit(void *ptr) | |||
367 | mutex_unlock(&syscall_trace_lock); | 396 | mutex_unlock(&syscall_trace_lock); |
368 | } | 397 | } |
369 | 398 | ||
370 | struct trace_event event_syscall_enter = { | 399 | int init_syscall_trace(struct ftrace_event_call *call) |
371 | .trace = print_syscall_enter, | 400 | { |
372 | }; | 401 | int id; |
402 | |||
403 | id = register_ftrace_event(call->event); | ||
404 | if (!id) | ||
405 | return -ENODEV; | ||
406 | call->id = id; | ||
407 | INIT_LIST_HEAD(&call->fields); | ||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | int __init init_ftrace_syscalls(void) | ||
412 | { | ||
413 | struct syscall_metadata *meta; | ||
414 | unsigned long addr; | ||
415 | int i; | ||
416 | |||
417 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
418 | NR_syscalls, GFP_KERNEL); | ||
419 | if (!syscalls_metadata) { | ||
420 | WARN_ON(1); | ||
421 | return -ENOMEM; | ||
422 | } | ||
423 | |||
424 | for (i = 0; i < NR_syscalls; i++) { | ||
425 | addr = arch_syscall_addr(i); | ||
426 | meta = find_syscall_meta(addr); | ||
427 | if (!meta) | ||
428 | continue; | ||
429 | |||
430 | meta->syscall_nr = i; | ||
431 | syscalls_metadata[i] = meta; | ||
432 | } | ||
373 | 433 | ||
374 | struct trace_event event_syscall_exit = { | 434 | return 0; |
375 | .trace = print_syscall_exit, | 435 | } |
376 | }; | 436 | core_initcall(init_ftrace_syscalls); |
377 | 437 | ||
378 | #ifdef CONFIG_EVENT_PROFILE | 438 | #ifdef CONFIG_EVENT_PROFILE |
379 | 439 | ||
@@ -387,8 +447,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
387 | struct syscall_metadata *sys_data; | 447 | struct syscall_metadata *sys_data; |
388 | struct syscall_trace_enter *rec; | 448 | struct syscall_trace_enter *rec; |
389 | unsigned long flags; | 449 | unsigned long flags; |
450 | char *trace_buf; | ||
390 | char *raw_data; | 451 | char *raw_data; |
391 | int syscall_nr; | 452 | int syscall_nr; |
453 | int rctx; | ||
392 | int size; | 454 | int size; |
393 | int cpu; | 455 | int cpu; |
394 | 456 | ||
@@ -412,41 +474,42 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
412 | /* Protect the per cpu buffer, begin the rcu read side */ | 474 | /* Protect the per cpu buffer, begin the rcu read side */ |
413 | local_irq_save(flags); | 475 | local_irq_save(flags); |
414 | 476 | ||
477 | rctx = perf_swevent_get_recursion_context(); | ||
478 | if (rctx < 0) | ||
479 | goto end_recursion; | ||
480 | |||
415 | cpu = smp_processor_id(); | 481 | cpu = smp_processor_id(); |
416 | 482 | ||
417 | if (in_nmi()) | 483 | trace_buf = rcu_dereference(perf_trace_buf); |
418 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
419 | else | ||
420 | raw_data = rcu_dereference(trace_profile_buf); | ||
421 | 484 | ||
422 | if (!raw_data) | 485 | if (!trace_buf) |
423 | goto end; | 486 | goto end; |
424 | 487 | ||
425 | raw_data = per_cpu_ptr(raw_data, cpu); | 488 | raw_data = per_cpu_ptr(trace_buf, cpu); |
426 | 489 | ||
427 | /* zero the dead bytes from align to not leak stack to user */ | 490 | /* zero the dead bytes from align to not leak stack to user */ |
428 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 491 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
429 | 492 | ||
430 | rec = (struct syscall_trace_enter *) raw_data; | 493 | rec = (struct syscall_trace_enter *) raw_data; |
431 | tracing_generic_entry_update(&rec->ent, 0, 0); | 494 | tracing_generic_entry_update(&rec->ent, 0, 0); |
432 | rec->ent.type = sys_data->enter_id; | 495 | rec->ent.type = sys_data->enter_event->id; |
433 | rec->nr = syscall_nr; | 496 | rec->nr = syscall_nr; |
434 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 497 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
435 | (unsigned long *)&rec->args); | 498 | (unsigned long *)&rec->args); |
436 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); | 499 | perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); |
437 | 500 | ||
438 | end: | 501 | end: |
502 | perf_swevent_put_recursion_context(rctx); | ||
503 | end_recursion: | ||
439 | local_irq_restore(flags); | 504 | local_irq_restore(flags); |
440 | } | 505 | } |
441 | 506 | ||
442 | int reg_prof_syscall_enter(char *name) | 507 | int prof_sysenter_enable(struct ftrace_event_call *call) |
443 | { | 508 | { |
444 | int ret = 0; | 509 | int ret = 0; |
445 | int num; | 510 | int num; |
446 | 511 | ||
447 | num = syscall_name_to_nr(name); | 512 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
448 | if (num < 0 || num >= NR_syscalls) | ||
449 | return -ENOSYS; | ||
450 | 513 | ||
451 | mutex_lock(&syscall_trace_lock); | 514 | mutex_lock(&syscall_trace_lock); |
452 | if (!sys_prof_refcount_enter) | 515 | if (!sys_prof_refcount_enter) |
@@ -462,13 +525,11 @@ int reg_prof_syscall_enter(char *name) | |||
462 | return ret; | 525 | return ret; |
463 | } | 526 | } |
464 | 527 | ||
465 | void unreg_prof_syscall_enter(char *name) | 528 | void prof_sysenter_disable(struct ftrace_event_call *call) |
466 | { | 529 | { |
467 | int num; | 530 | int num; |
468 | 531 | ||
469 | num = syscall_name_to_nr(name); | 532 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
470 | if (num < 0 || num >= NR_syscalls) | ||
471 | return; | ||
472 | 533 | ||
473 | mutex_lock(&syscall_trace_lock); | 534 | mutex_lock(&syscall_trace_lock); |
474 | sys_prof_refcount_enter--; | 535 | sys_prof_refcount_enter--; |
@@ -484,7 +545,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
484 | struct syscall_trace_exit *rec; | 545 | struct syscall_trace_exit *rec; |
485 | unsigned long flags; | 546 | unsigned long flags; |
486 | int syscall_nr; | 547 | int syscall_nr; |
548 | char *trace_buf; | ||
487 | char *raw_data; | 549 | char *raw_data; |
550 | int rctx; | ||
488 | int size; | 551 | int size; |
489 | int cpu; | 552 | int cpu; |
490 | 553 | ||
@@ -510,17 +573,19 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
510 | 573 | ||
511 | /* Protect the per cpu buffer, begin the rcu read side */ | 574 | /* Protect the per cpu buffer, begin the rcu read side */ |
512 | local_irq_save(flags); | 575 | local_irq_save(flags); |
576 | |||
577 | rctx = perf_swevent_get_recursion_context(); | ||
578 | if (rctx < 0) | ||
579 | goto end_recursion; | ||
580 | |||
513 | cpu = smp_processor_id(); | 581 | cpu = smp_processor_id(); |
514 | 582 | ||
515 | if (in_nmi()) | 583 | trace_buf = rcu_dereference(perf_trace_buf); |
516 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
517 | else | ||
518 | raw_data = rcu_dereference(trace_profile_buf); | ||
519 | 584 | ||
520 | if (!raw_data) | 585 | if (!trace_buf) |
521 | goto end; | 586 | goto end; |
522 | 587 | ||
523 | raw_data = per_cpu_ptr(raw_data, cpu); | 588 | raw_data = per_cpu_ptr(trace_buf, cpu); |
524 | 589 | ||
525 | /* zero the dead bytes from align to not leak stack to user */ | 590 | /* zero the dead bytes from align to not leak stack to user */ |
526 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 591 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
@@ -528,24 +593,24 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
528 | rec = (struct syscall_trace_exit *)raw_data; | 593 | rec = (struct syscall_trace_exit *)raw_data; |
529 | 594 | ||
530 | tracing_generic_entry_update(&rec->ent, 0, 0); | 595 | tracing_generic_entry_update(&rec->ent, 0, 0); |
531 | rec->ent.type = sys_data->exit_id; | 596 | rec->ent.type = sys_data->exit_event->id; |
532 | rec->nr = syscall_nr; | 597 | rec->nr = syscall_nr; |
533 | rec->ret = syscall_get_return_value(current, regs); | 598 | rec->ret = syscall_get_return_value(current, regs); |
534 | 599 | ||
535 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); | 600 | perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); |
536 | 601 | ||
537 | end: | 602 | end: |
603 | perf_swevent_put_recursion_context(rctx); | ||
604 | end_recursion: | ||
538 | local_irq_restore(flags); | 605 | local_irq_restore(flags); |
539 | } | 606 | } |
540 | 607 | ||
541 | int reg_prof_syscall_exit(char *name) | 608 | int prof_sysexit_enable(struct ftrace_event_call *call) |
542 | { | 609 | { |
543 | int ret = 0; | 610 | int ret = 0; |
544 | int num; | 611 | int num; |
545 | 612 | ||
546 | num = syscall_name_to_nr(name); | 613 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
547 | if (num < 0 || num >= NR_syscalls) | ||
548 | return -ENOSYS; | ||
549 | 614 | ||
550 | mutex_lock(&syscall_trace_lock); | 615 | mutex_lock(&syscall_trace_lock); |
551 | if (!sys_prof_refcount_exit) | 616 | if (!sys_prof_refcount_exit) |
@@ -561,13 +626,11 @@ int reg_prof_syscall_exit(char *name) | |||
561 | return ret; | 626 | return ret; |
562 | } | 627 | } |
563 | 628 | ||
564 | void unreg_prof_syscall_exit(char *name) | 629 | void prof_sysexit_disable(struct ftrace_event_call *call) |
565 | { | 630 | { |
566 | int num; | 631 | int num; |
567 | 632 | ||
568 | num = syscall_name_to_nr(name); | 633 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
569 | if (num < 0 || num >= NR_syscalls) | ||
570 | return; | ||
571 | 634 | ||
572 | mutex_lock(&syscall_trace_lock); | 635 | mutex_lock(&syscall_trace_lock); |
573 | sys_prof_refcount_exit--; | 636 | sys_prof_refcount_exit--; |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 234ceb10861f..a79c4d0407ab 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
750 | config RCU_CPU_STALL_DETECTOR | 750 | config RCU_CPU_STALL_DETECTOR |
751 | bool "Check for stalled CPUs delaying RCU grace periods" | 751 | bool "Check for stalled CPUs delaying RCU grace periods" |
752 | depends on TREE_RCU || TREE_PREEMPT_RCU | 752 | depends on TREE_RCU || TREE_PREEMPT_RCU |
753 | default n | 753 | default y |
754 | help | 754 | help |
755 | This option causes RCU to printk information on which | 755 | This option causes RCU to printk information on which |
756 | CPUs are delaying the current grace period, but only when | 756 | CPUs are delaying the current grace period, but only when |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..4ebfa5a164d7 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -5,10 +5,13 @@ | |||
5 | * relegated to obsolescence, but used by various less | 5 | * relegated to obsolescence, but used by various less |
6 | * important (or lazy) subsystems. | 6 | * important (or lazy) subsystems. |
7 | */ | 7 | */ |
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 9 | #include <linux/kallsyms.h> |
11 | #include <linux/semaphore.h> | 10 | #include <linux/semaphore.h> |
11 | #include <linux/smp_lock.h> | ||
12 | |||
13 | #define CREATE_TRACE_POINTS | ||
14 | #include <trace/events/bkl.h> | ||
12 | 15 | ||
13 | /* | 16 | /* |
14 | * The 'big kernel lock' | 17 | * The 'big kernel lock' |
@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void) | |||
113 | * This cannot happen asynchronously, so we only need to | 116 | * This cannot happen asynchronously, so we only need to |
114 | * worry about other CPU's. | 117 | * worry about other CPU's. |
115 | */ | 118 | */ |
116 | void __lockfunc lock_kernel(void) | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
117 | { | 120 | { |
118 | int depth = current->lock_depth+1; | 121 | int depth = current->lock_depth + 1; |
122 | |||
123 | trace_lock_kernel(func, file, line); | ||
124 | |||
119 | if (likely(!depth)) | 125 | if (likely(!depth)) |
120 | __lock_kernel(); | 126 | __lock_kernel(); |
121 | current->lock_depth = depth; | 127 | current->lock_depth = depth; |
122 | } | 128 | } |
123 | 129 | ||
124 | void __lockfunc unlock_kernel(void) | 130 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
125 | { | 131 | { |
126 | BUG_ON(current->lock_depth < 0); | 132 | BUG_ON(current->lock_depth < 0); |
127 | if (likely(--current->lock_depth < 0)) | 133 | if (likely(--current->lock_depth < 0)) |
128 | __unlock_kernel(); | 134 | __unlock_kernel(); |
135 | |||
136 | trace_unlock_kernel(func, file, line); | ||
129 | } | 137 | } |
130 | 138 | ||
131 | EXPORT_SYMBOL(lock_kernel); | 139 | EXPORT_SYMBOL(_lock_kernel); |
132 | EXPORT_SYMBOL(unlock_kernel); | 140 | EXPORT_SYMBOL(_unlock_kernel); |
133 | 141 | ||
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 26187edcc7ea..09f5ce1810dc 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -7,15 +7,12 @@ | |||
7 | * parameter. Now every user can use their own standalone ratelimit_state. | 7 | * parameter. Now every user can use their own standalone ratelimit_state. |
8 | * | 8 | * |
9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
10 | * | ||
11 | */ | 10 | */ |
12 | 11 | ||
13 | #include <linux/kernel.h> | 12 | #include <linux/ratelimit.h> |
14 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | 15 | ||
17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
18 | |||
19 | /* | 16 | /* |
20 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
21 | * @rs: ratelimit_state data | 18 | * @rs: ratelimit_state data |
@@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock); | |||
23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks | 20 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks |
24 | * in every @rs->ratelimit_jiffies | 21 | * in every @rs->ratelimit_jiffies |
25 | */ | 22 | */ |
26 | int __ratelimit(struct ratelimit_state *rs) | 23 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
27 | { | 24 | { |
28 | unsigned long flags; | 25 | unsigned long flags; |
26 | int ret; | ||
29 | 27 | ||
30 | if (!rs->interval) | 28 | if (!rs->interval) |
31 | return 1; | 29 | return 1; |
32 | 30 | ||
33 | spin_lock_irqsave(&ratelimit_lock, flags); | 31 | /* |
32 | * If we contend on this state's lock then almost | ||
33 | * by definition we are too busy to print a message, | ||
34 | * in addition to the one that will be printed by | ||
35 | * the entity that is holding the lock already: | ||
36 | */ | ||
37 | if (!spin_trylock_irqsave(&rs->lock, flags)) | ||
38 | return 1; | ||
39 | |||
34 | if (!rs->begin) | 40 | if (!rs->begin) |
35 | rs->begin = jiffies; | 41 | rs->begin = jiffies; |
36 | 42 | ||
37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 43 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
38 | if (rs->missed) | 44 | if (rs->missed) |
39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 45 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", |
40 | __func__, rs->missed); | 46 | func, rs->missed); |
41 | rs->begin = 0; | 47 | rs->begin = 0; |
42 | rs->printed = 0; | 48 | rs->printed = 0; |
43 | rs->missed = 0; | 49 | rs->missed = 0; |
44 | } | 50 | } |
45 | if (rs->burst && rs->burst > rs->printed) | 51 | if (rs->burst && rs->burst > rs->printed) { |
46 | goto print; | 52 | rs->printed++; |
47 | 53 | ret = 1; | |
48 | rs->missed++; | 54 | } else { |
49 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 55 | rs->missed++; |
50 | return 0; | 56 | ret = 0; |
57 | } | ||
58 | spin_unlock_irqrestore(&rs->lock, flags); | ||
51 | 59 | ||
52 | print: | 60 | return ret; |
53 | rs->printed++; | ||
54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
55 | return 1; | ||
56 | } | 61 | } |
57 | EXPORT_SYMBOL(__ratelimit); | 62 | EXPORT_SYMBOL(___ratelimit); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e807..795472d8ae24 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr; | |||
97 | */ | 97 | */ |
98 | static DEFINE_SPINLOCK(io_tlb_lock); | 98 | static DEFINE_SPINLOCK(io_tlb_lock); |
99 | 99 | ||
100 | static int late_alloc; | ||
101 | |||
100 | static int __init | 102 | static int __init |
101 | setup_io_tlb_npages(char *str) | 103 | setup_io_tlb_npages(char *str) |
102 | { | 104 | { |
@@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str) | |||
109 | ++str; | 111 | ++str; |
110 | if (!strcmp(str, "force")) | 112 | if (!strcmp(str, "force")) |
111 | swiotlb_force = 1; | 113 | swiotlb_force = 1; |
114 | |||
112 | return 1; | 115 | return 1; |
113 | } | 116 | } |
114 | __setup("swiotlb=", setup_io_tlb_npages); | 117 | __setup("swiotlb=", setup_io_tlb_npages); |
@@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
121 | return phys_to_dma(hwdev, virt_to_phys(address)); | 124 | return phys_to_dma(hwdev, virt_to_phys(address)); |
122 | } | 125 | } |
123 | 126 | ||
124 | static void swiotlb_print_info(unsigned long bytes) | 127 | void swiotlb_print_info(void) |
125 | { | 128 | { |
129 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
126 | phys_addr_t pstart, pend; | 130 | phys_addr_t pstart, pend; |
127 | 131 | ||
128 | pstart = virt_to_phys(io_tlb_start); | 132 | pstart = virt_to_phys(io_tlb_start); |
@@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes) | |||
140 | * structures for the software IO TLB used to implement the DMA API. | 144 | * structures for the software IO TLB used to implement the DMA API. |
141 | */ | 145 | */ |
142 | void __init | 146 | void __init |
143 | swiotlb_init_with_default_size(size_t default_size) | 147 | swiotlb_init_with_default_size(size_t default_size, int verbose) |
144 | { | 148 | { |
145 | unsigned long i, bytes; | 149 | unsigned long i, bytes; |
146 | 150 | ||
@@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size) | |||
176 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 180 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
177 | if (!io_tlb_overflow_buffer) | 181 | if (!io_tlb_overflow_buffer) |
178 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 182 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
179 | 183 | if (verbose) | |
180 | swiotlb_print_info(bytes); | 184 | swiotlb_print_info(); |
181 | } | 185 | } |
182 | 186 | ||
183 | void __init | 187 | void __init |
184 | swiotlb_init(void) | 188 | swiotlb_init(int verbose) |
185 | { | 189 | { |
186 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 190 | swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ |
187 | } | 191 | } |
188 | 192 | ||
189 | /* | 193 | /* |
@@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
260 | if (!io_tlb_overflow_buffer) | 264 | if (!io_tlb_overflow_buffer) |
261 | goto cleanup4; | 265 | goto cleanup4; |
262 | 266 | ||
263 | swiotlb_print_info(bytes); | 267 | swiotlb_print_info(); |
268 | |||
269 | late_alloc = 1; | ||
264 | 270 | ||
265 | return 0; | 271 | return 0; |
266 | 272 | ||
@@ -281,6 +287,32 @@ cleanup1: | |||
281 | return -ENOMEM; | 287 | return -ENOMEM; |
282 | } | 288 | } |
283 | 289 | ||
290 | void __init swiotlb_free(void) | ||
291 | { | ||
292 | if (!io_tlb_overflow_buffer) | ||
293 | return; | ||
294 | |||
295 | if (late_alloc) { | ||
296 | free_pages((unsigned long)io_tlb_overflow_buffer, | ||
297 | get_order(io_tlb_overflow)); | ||
298 | free_pages((unsigned long)io_tlb_orig_addr, | ||
299 | get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | ||
300 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
301 | sizeof(int))); | ||
302 | free_pages((unsigned long)io_tlb_start, | ||
303 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | ||
304 | } else { | ||
305 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | ||
306 | io_tlb_overflow); | ||
307 | free_bootmem_late(__pa(io_tlb_orig_addr), | ||
308 | io_tlb_nslabs * sizeof(phys_addr_t)); | ||
309 | free_bootmem_late(__pa(io_tlb_list), | ||
310 | io_tlb_nslabs * sizeof(int)); | ||
311 | free_bootmem_late(__pa(io_tlb_start), | ||
312 | io_tlb_nslabs << IO_TLB_SHIFT); | ||
313 | } | ||
314 | } | ||
315 | |||
284 | static int is_swiotlb_buffer(phys_addr_t paddr) | 316 | static int is_swiotlb_buffer(phys_addr_t paddr) |
285 | { | 317 | { |
286 | return paddr >= virt_to_phys(io_tlb_start) && | 318 | return paddr >= virt_to_phys(io_tlb_start) && |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 555d5d2731c6..d1dc23cc7f10 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | |||
143 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | 143 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); |
144 | } | 144 | } |
145 | 145 | ||
146 | /* | ||
147 | * free_bootmem_late - free bootmem pages directly to page allocator | ||
148 | * @addr: starting address of the range | ||
149 | * @size: size of the range in bytes | ||
150 | * | ||
151 | * This is only useful when the bootmem allocator has already been torn | ||
152 | * down, but we are still initializing the system. Pages are given directly | ||
153 | * to the page allocator, no bootmem metadata is updated because it is gone. | ||
154 | */ | ||
155 | void __init free_bootmem_late(unsigned long addr, unsigned long size) | ||
156 | { | ||
157 | unsigned long cursor, end; | ||
158 | |||
159 | kmemleak_free_part(__va(addr), size); | ||
160 | |||
161 | cursor = PFN_UP(addr); | ||
162 | end = PFN_DOWN(addr + size); | ||
163 | |||
164 | for (; cursor < end; cursor++) { | ||
165 | __free_pages_bootmem(pfn_to_page(cursor), 0); | ||
166 | totalram_pages++; | ||
167 | } | ||
168 | } | ||
169 | |||
146 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | 170 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) |
147 | { | 171 | { |
148 | int aligned; | 172 | int aligned; |
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/ima.h> | ||
24 | #include <linux/hugetlb.h> | 23 | #include <linux/hugetlb.h> |
25 | #include <linux/profile.h> | 24 | #include <linux/profile.h> |
26 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -1061,9 +1060,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1061 | error = security_file_mmap(file, reqprot, prot, flags, addr, 0); | 1060 | error = security_file_mmap(file, reqprot, prot, flags, addr, 0); |
1062 | if (error) | 1061 | if (error) |
1063 | return error; | 1062 | return error; |
1064 | error = ima_file_mmap(file, prot); | ||
1065 | if (error) | ||
1066 | return error; | ||
1067 | 1063 | ||
1068 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); | 1064 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); |
1069 | } | 1065 | } |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 7db1de0497c6..887c03c4e3c6 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -10,7 +10,9 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/ratelimit.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | |||
14 | #include <net/ip.h> | 16 | #include <net/ip.h> |
15 | #include <net/sock.h> | 17 | #include <net/sock.h> |
16 | 18 | ||
diff --git a/net/core/utils.c b/net/core/utils.c index 83221aee7084..838250241d26 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/ratelimit.h> | ||
28 | |||
27 | #include <net/sock.h> | 29 | #include <net/sock.h> |
28 | 30 | ||
29 | #include <asm/byteorder.h> | 31 | #include <asm/byteorder.h> |
diff --git a/samples/Kconfig b/samples/Kconfig index b92bde3c6a89..e4be84ac3d38 100644 --- a/samples/Kconfig +++ b/samples/Kconfig | |||
@@ -40,5 +40,11 @@ config SAMPLE_KRETPROBES | |||
40 | default m | 40 | default m |
41 | depends on SAMPLE_KPROBES && KRETPROBES | 41 | depends on SAMPLE_KPROBES && KRETPROBES |
42 | 42 | ||
43 | config SAMPLE_HW_BREAKPOINT | ||
44 | tristate "Build kernel hardware breakpoint examples -- loadable module only" | ||
45 | depends on HAVE_HW_BREAKPOINT && m | ||
46 | help | ||
47 | This builds kernel hardware breakpoint example modules. | ||
48 | |||
43 | endif # SAMPLES | 49 | endif # SAMPLES |
44 | 50 | ||
diff --git a/samples/Makefile b/samples/Makefile index 43343a03b1f4..0f15e6d77fd6 100644 --- a/samples/Makefile +++ b/samples/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # Makefile for Linux samples code | 1 | # Makefile for Linux samples code |
2 | 2 | ||
3 | obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ | 3 | obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \ |
4 | hw_breakpoint/ | ||
diff --git a/samples/hw_breakpoint/Makefile b/samples/hw_breakpoint/Makefile new file mode 100644 index 000000000000..0f5c31c2fc47 --- /dev/null +++ b/samples/hw_breakpoint/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_SAMPLE_HW_BREAKPOINT) += data_breakpoint.o | |||
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c new file mode 100644 index 000000000000..29525500df00 --- /dev/null +++ b/samples/hw_breakpoint/data_breakpoint.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * data_breakpoint.c - Sample HW Breakpoint file to watch kernel data address | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * usage: insmod data_breakpoint.ko ksym=<ksym_name> | ||
19 | * | ||
20 | * This file is a kernel module that places a breakpoint over ksym_name kernel | ||
21 | * variable using Hardware Breakpoint register. The corresponding handler which | ||
22 | * prints a backtrace is invoked everytime a write operation is performed on | ||
23 | * that variable. | ||
24 | * | ||
25 | * Copyright (C) IBM Corporation, 2009 | ||
26 | * | ||
27 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> | ||
28 | */ | ||
29 | #include <linux/module.h> /* Needed by all modules */ | ||
30 | #include <linux/kernel.h> /* Needed for KERN_INFO */ | ||
31 | #include <linux/init.h> /* Needed for the macros */ | ||
32 | #include <linux/kallsyms.h> | ||
33 | |||
34 | #include <linux/perf_event.h> | ||
35 | #include <linux/hw_breakpoint.h> | ||
36 | |||
37 | struct perf_event **sample_hbp; | ||
38 | |||
39 | static char ksym_name[KSYM_NAME_LEN] = "pid_max"; | ||
40 | module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); | ||
41 | MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" | ||
42 | " write operations on the kernel symbol"); | ||
43 | |||
44 | static void sample_hbp_handler(struct perf_event *temp, void *data) | ||
45 | { | ||
46 | printk(KERN_INFO "%s value is changed\n", ksym_name); | ||
47 | dump_stack(); | ||
48 | printk(KERN_INFO "Dump stack from sample_hbp_handler\n"); | ||
49 | } | ||
50 | |||
51 | static int __init hw_break_module_init(void) | ||
52 | { | ||
53 | int ret; | ||
54 | DEFINE_BREAKPOINT_ATTR(attr); | ||
55 | |||
56 | attr.bp_addr = kallsyms_lookup_name(ksym_name); | ||
57 | attr.bp_len = HW_BREAKPOINT_LEN_4; | ||
58 | attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
59 | |||
60 | sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler); | ||
61 | if (IS_ERR(sample_hbp)) { | ||
62 | ret = PTR_ERR(sample_hbp); | ||
63 | goto fail; | ||
64 | } | ||
65 | |||
66 | printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name); | ||
67 | |||
68 | return 0; | ||
69 | |||
70 | fail: | ||
71 | printk(KERN_INFO "Breakpoint registration failed\n"); | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | static void __exit hw_break_module_exit(void) | ||
77 | { | ||
78 | unregister_wide_hw_breakpoint(sample_hbp); | ||
79 | printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name); | ||
80 | } | ||
81 | |||
82 | module_init(hw_break_module_init); | ||
83 | module_exit(hw_break_module_exit); | ||
84 | |||
85 | MODULE_LICENSE("GPL"); | ||
86 | MODULE_AUTHOR("K.Prasad"); | ||
87 | MODULE_DESCRIPTION("ksym breakpoint"); | ||
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index ea9f8a58678f..241310e59cd6 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -1852,10 +1852,17 @@ sub tracepoint_munge($) { | |||
1852 | my $tracepointname = 0; | 1852 | my $tracepointname = 0; |
1853 | my $tracepointargs = 0; | 1853 | my $tracepointargs = 0; |
1854 | 1854 | ||
1855 | if($prototype =~ m/TRACE_EVENT\((.*?),/) { | 1855 | if ($prototype =~ m/TRACE_EVENT\((.*?),/) { |
1856 | $tracepointname = $1; | 1856 | $tracepointname = $1; |
1857 | } | 1857 | } |
1858 | if($prototype =~ m/TP_PROTO\((.*?)\)/) { | 1858 | if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) { |
1859 | $tracepointname = $1; | ||
1860 | } | ||
1861 | if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) { | ||
1862 | $tracepointname = $2; | ||
1863 | } | ||
1864 | $tracepointname =~ s/^\s+//; #strip leading whitespace | ||
1865 | if ($prototype =~ m/TP_PROTO\((.*?)\)/) { | ||
1859 | $tracepointargs = $1; | 1866 | $tracepointargs = $1; |
1860 | } | 1867 | } |
1861 | if (($tracepointname eq 0) || ($tracepointargs eq 0)) { | 1868 | if (($tracepointname eq 0) || ($tracepointargs eq 0)) { |
@@ -1920,7 +1927,9 @@ sub process_state3_function($$) { | |||
1920 | if ($prototype =~ /SYSCALL_DEFINE/) { | 1927 | if ($prototype =~ /SYSCALL_DEFINE/) { |
1921 | syscall_munge(); | 1928 | syscall_munge(); |
1922 | } | 1929 | } |
1923 | if ($prototype =~ /TRACE_EVENT/) { | 1930 | if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ || |
1931 | $prototype =~ /DEFINE_SINGLE_EVENT/) | ||
1932 | { | ||
1924 | tracepoint_munge($file); | 1933 | tracepoint_munge($file); |
1925 | } | 1934 | } |
1926 | dump_function($prototype, $file); | 1935 | dump_function($prototype, $file); |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 090d300d7394..f0d14452632b 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -6,77 +6,93 @@ | |||
6 | # all the offsets to the calls to mcount. | 6 | # all the offsets to the calls to mcount. |
7 | # | 7 | # |
8 | # | 8 | # |
9 | # What we want to end up with is a section in vmlinux called | 9 | # What we want to end up with this is that each object file will have a |
10 | # __mcount_loc that contains a list of pointers to all the | 10 | # section called __mcount_loc that will hold the list of pointers to mcount |
11 | # call sites in the kernel that call mcount. Later on boot up, the kernel | 11 | # callers. After final linking, the vmlinux will have within .init.data the |
12 | # will read this list, save the locations and turn them into nops. | 12 | # list of all callers to mcount between __start_mcount_loc and __stop_mcount_loc. |
13 | # When tracing or profiling is later enabled, these locations will then | 13 | # Later on boot up, the kernel will read this list, save the locations and turn |
14 | # be converted back to pointers to some function. | 14 | # them into nops. When tracing or profiling is later enabled, these locations |
15 | # will then be converted back to pointers to some function. | ||
15 | # | 16 | # |
16 | # This is no easy feat. This script is called just after the original | 17 | # This is no easy feat. This script is called just after the original |
17 | # object is compiled and before it is linked. | 18 | # object is compiled and before it is linked. |
18 | # | 19 | # |
19 | # The references to the call sites are offsets from the section of text | 20 | # When parse this object file using 'objdump', the references to the call |
20 | # that the call site is in. Hence, all functions in a section that | 21 | # sites are offsets from the section that the call site is in. Hence, all |
21 | # has a call site to mcount, will have the offset from the beginning of | 22 | # functions in a section that has a call site to mcount, will have the |
22 | # the section and not the beginning of the function. | 23 | # offset from the beginning of the section and not the beginning of the |
24 | # function. | ||
25 | # | ||
26 | # But where this section will reside finally in vmlinx is undetermined at | ||
27 | # this point. So we can't use this kind of offsets to record the final | ||
28 | # address of this call site. | ||
29 | # | ||
30 | # The trick is to change the call offset referring the start of a section to | ||
31 | # referring a function symbol in this section. During the link step, 'ld' will | ||
32 | # compute the final address according to the information we record. | ||
23 | # | 33 | # |
24 | # The trick is to find a way to record the beginning of the section. | ||
25 | # The way we do this is to look at the first function in the section | ||
26 | # which will also be the location of that section after final link. | ||
27 | # e.g. | 34 | # e.g. |
28 | # | 35 | # |
29 | # .section ".sched.text", "ax" | 36 | # .section ".sched.text", "ax" |
30 | # .globl my_func | ||
31 | # my_func: | ||
32 | # [...] | 37 | # [...] |
33 | # call mcount (offset: 0x5) | 38 | # func1: |
39 | # [...] | ||
40 | # call mcount (offset: 0x10) | ||
34 | # [...] | 41 | # [...] |
35 | # ret | 42 | # ret |
36 | # other_func: | 43 | # .globl fun2 |
44 | # func2: (offset: 0x20) | ||
37 | # [...] | 45 | # [...] |
38 | # call mcount (offset: 0x1b) | 46 | # [...] |
47 | # ret | ||
48 | # func3: | ||
49 | # [...] | ||
50 | # call mcount (offset: 0x30) | ||
39 | # [...] | 51 | # [...] |
40 | # | 52 | # |
41 | # Both relocation offsets for the mcounts in the above example will be | 53 | # Both relocation offsets for the mcounts in the above example will be |
42 | # offset from .sched.text. If we make another file called tmp.s with: | 54 | # offset from .sched.text. If we choose global symbol func2 as a reference and |
55 | # make another file called tmp.s with the new offsets: | ||
43 | # | 56 | # |
44 | # .section __mcount_loc | 57 | # .section __mcount_loc |
45 | # .quad my_func + 0x5 | 58 | # .quad func2 - 0x10 |
46 | # .quad my_func + 0x1b | 59 | # .quad func2 + 0x10 |
47 | # | 60 | # |
48 | # We can then compile this tmp.s into tmp.o, and link it to the original | 61 | # We can then compile this tmp.s into tmp.o, and link it back to the original |
49 | # object. | 62 | # object. |
50 | # | 63 | # |
51 | # But this gets hard if my_func is not globl (a static function). | 64 | # In our algorithm, we will choose the first global function we meet in this |
52 | # In such a case we have: | 65 | # section as the reference. But this gets hard if there is no global functions |
66 | # in this section. In such a case we have to select a local one. E.g. func1: | ||
53 | # | 67 | # |
54 | # .section ".sched.text", "ax" | 68 | # .section ".sched.text", "ax" |
55 | # my_func: | 69 | # func1: |
56 | # [...] | 70 | # [...] |
57 | # call mcount (offset: 0x5) | 71 | # call mcount (offset: 0x10) |
58 | # [...] | 72 | # [...] |
59 | # ret | 73 | # ret |
60 | # other_func: | 74 | # func2: |
61 | # [...] | 75 | # [...] |
62 | # call mcount (offset: 0x1b) | 76 | # call mcount (offset: 0x20) |
63 | # [...] | 77 | # [...] |
78 | # .section "other.section" | ||
64 | # | 79 | # |
65 | # If we make the tmp.s the same as above, when we link together with | 80 | # If we make the tmp.s the same as above, when we link together with |
66 | # the original object, we will end up with two symbols for my_func: | 81 | # the original object, we will end up with two symbols for func1: |
67 | # one local, one global. After final compile, we will end up with | 82 | # one local, one global. After final compile, we will end up with |
68 | # an undefined reference to my_func. | 83 | # an undefined reference to func1 or a wrong reference to another global |
84 | # func1 in other files. | ||
69 | # | 85 | # |
70 | # Since local objects can reference local variables, we need to find | 86 | # Since local objects can reference local variables, we need to find |
71 | # a way to make tmp.o reference the local objects of the original object | 87 | # a way to make tmp.o reference the local objects of the original object |
72 | # file after it is linked together. To do this, we convert the my_func | 88 | # file after it is linked together. To do this, we convert func1 |
73 | # into a global symbol before linking tmp.o. Then after we link tmp.o | 89 | # into a global symbol before linking tmp.o. Then after we link tmp.o |
74 | # we will only have a single symbol for my_func that is global. | 90 | # we will only have a single symbol for func1 that is global. |
75 | # We can convert my_func back into a local symbol and we are done. | 91 | # We can convert func1 back into a local symbol and we are done. |
76 | # | 92 | # |
77 | # Here are the steps we take: | 93 | # Here are the steps we take: |
78 | # | 94 | # |
79 | # 1) Record all the local symbols by using 'nm' | 95 | # 1) Record all the local and weak symbols by using 'nm' |
80 | # 2) Use objdump to find all the call site offsets and sections for | 96 | # 2) Use objdump to find all the call site offsets and sections for |
81 | # mcount. | 97 | # mcount. |
82 | # 3) Compile the list into its own object. | 98 | # 3) Compile the list into its own object. |
@@ -86,10 +102,8 @@ | |||
86 | # 6) Link together this new object with the list object. | 102 | # 6) Link together this new object with the list object. |
87 | # 7) Convert the local functions back to local symbols and rename | 103 | # 7) Convert the local functions back to local symbols and rename |
88 | # the result as the original object. | 104 | # the result as the original object. |
89 | # End. | ||
90 | # 8) Link the object with the list object. | 105 | # 8) Link the object with the list object. |
91 | # 9) Move the result back to the original object. | 106 | # 9) Move the result back to the original object. |
92 | # End. | ||
93 | # | 107 | # |
94 | 108 | ||
95 | use strict; | 109 | use strict; |
@@ -99,7 +113,7 @@ $P =~ s@.*/@@g; | |||
99 | 113 | ||
100 | my $V = '0.1'; | 114 | my $V = '0.1'; |
101 | 115 | ||
102 | if ($#ARGV < 7) { | 116 | if ($#ARGV != 10) { |
103 | print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n"; | 117 | print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n"; |
104 | print "version: $V\n"; | 118 | print "version: $V\n"; |
105 | exit(1); | 119 | exit(1); |
@@ -109,7 +123,7 @@ my ($arch, $bits, $objdump, $objcopy, $cc, | |||
109 | $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV; | 123 | $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV; |
110 | 124 | ||
111 | # This file refers to mcount and shouldn't be ftraced, so lets' ignore it | 125 | # This file refers to mcount and shouldn't be ftraced, so lets' ignore it |
112 | if ($inputfile eq "kernel/trace/ftrace.o") { | 126 | if ($inputfile =~ m,kernel/trace/ftrace\.o$,) { |
113 | exit(0); | 127 | exit(0); |
114 | } | 128 | } |
115 | 129 | ||
@@ -119,6 +133,7 @@ my %text_sections = ( | |||
119 | ".sched.text" => 1, | 133 | ".sched.text" => 1, |
120 | ".spinlock.text" => 1, | 134 | ".spinlock.text" => 1, |
121 | ".irqentry.text" => 1, | 135 | ".irqentry.text" => 1, |
136 | ".text.unlikely" => 1, | ||
122 | ); | 137 | ); |
123 | 138 | ||
124 | $objdump = "objdump" if ((length $objdump) == 0); | 139 | $objdump = "objdump" if ((length $objdump) == 0); |
@@ -137,13 +152,47 @@ my %weak; # List of weak functions | |||
137 | my %convert; # List of local functions used that needs conversion | 152 | my %convert; # List of local functions used that needs conversion |
138 | 153 | ||
139 | my $type; | 154 | my $type; |
140 | my $nm_regex; # Find the local functions (return function) | 155 | my $local_regex; # Match a local function (return function) |
156 | my $weak_regex; # Match a weak function (return function) | ||
141 | my $section_regex; # Find the start of a section | 157 | my $section_regex; # Find the start of a section |
142 | my $function_regex; # Find the name of a function | 158 | my $function_regex; # Find the name of a function |
143 | # (return offset and func name) | 159 | # (return offset and func name) |
144 | my $mcount_regex; # Find the call site to mcount (return offset) | 160 | my $mcount_regex; # Find the call site to mcount (return offset) |
145 | my $alignment; # The .align value to use for $mcount_section | 161 | my $alignment; # The .align value to use for $mcount_section |
146 | my $section_type; # Section header plus possible alignment command | 162 | my $section_type; # Section header plus possible alignment command |
163 | my $can_use_local = 0; # If we can use local function references | ||
164 | |||
165 | # Shut up recordmcount if user has older objcopy | ||
166 | my $quiet_recordmcount = ".tmp_quiet_recordmcount"; | ||
167 | my $print_warning = 1; | ||
168 | $print_warning = 0 if ( -f $quiet_recordmcount); | ||
169 | |||
170 | ## | ||
171 | # check_objcopy - whether objcopy supports --globalize-symbols | ||
172 | # | ||
173 | # --globalize-symbols came out in 2.17, we must test the version | ||
174 | # of objcopy, and if it is less than 2.17, then we can not | ||
175 | # record local functions. | ||
176 | sub check_objcopy | ||
177 | { | ||
178 | open (IN, "$objcopy --version |") or die "error running $objcopy"; | ||
179 | while (<IN>) { | ||
180 | if (/objcopy.*\s(\d+)\.(\d+)/) { | ||
181 | $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17)); | ||
182 | last; | ||
183 | } | ||
184 | } | ||
185 | close (IN); | ||
186 | |||
187 | if (!$can_use_local && $print_warning) { | ||
188 | print STDERR "WARNING: could not find objcopy version or version " . | ||
189 | "is less than 2.17.\n" . | ||
190 | "\tLocal function references are disabled.\n"; | ||
191 | open (QUIET, ">$quiet_recordmcount"); | ||
192 | printf QUIET "Disables the warning from recordmcount.pl\n"; | ||
193 | close QUIET; | ||
194 | } | ||
195 | } | ||
147 | 196 | ||
148 | if ($arch eq "x86") { | 197 | if ($arch eq "x86") { |
149 | if ($bits == 64) { | 198 | if ($bits == 64) { |
@@ -157,7 +206,8 @@ if ($arch eq "x86") { | |||
157 | # We base the defaults off of i386, the other archs may | 206 | # We base the defaults off of i386, the other archs may |
158 | # feel free to change them in the below if statements. | 207 | # feel free to change them in the below if statements. |
159 | # | 208 | # |
160 | $nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; | 209 | $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; |
210 | $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; | ||
161 | $section_regex = "Disassembly of section\\s+(\\S+):"; | 211 | $section_regex = "Disassembly of section\\s+(\\S+):"; |
162 | $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; | 212 | $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; |
163 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; | 213 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; |
@@ -206,7 +256,7 @@ if ($arch eq "x86_64") { | |||
206 | $cc .= " -m32"; | 256 | $cc .= " -m32"; |
207 | 257 | ||
208 | } elsif ($arch eq "powerpc") { | 258 | } elsif ($arch eq "powerpc") { |
209 | $nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; | 259 | $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; |
210 | $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:"; | 260 | $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:"; |
211 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$"; | 261 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$"; |
212 | 262 | ||
@@ -278,44 +328,17 @@ if ($filename =~ m,^(.*)(\.\S),) { | |||
278 | my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; | 328 | my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; |
279 | my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; | 329 | my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; |
280 | 330 | ||
281 | # | 331 | check_objcopy(); |
282 | # --globalize-symbols came out in 2.17, we must test the version | ||
283 | # of objcopy, and if it is less than 2.17, then we can not | ||
284 | # record local functions. | ||
285 | my $use_locals = 01; | ||
286 | my $local_warn_once = 0; | ||
287 | my $found_version = 0; | ||
288 | |||
289 | open (IN, "$objcopy --version |") || die "error running $objcopy"; | ||
290 | while (<IN>) { | ||
291 | if (/objcopy.*\s(\d+)\.(\d+)/) { | ||
292 | my $major = $1; | ||
293 | my $minor = $2; | ||
294 | |||
295 | $found_version = 1; | ||
296 | if ($major < 2 || | ||
297 | ($major == 2 && $minor < 17)) { | ||
298 | $use_locals = 0; | ||
299 | } | ||
300 | last; | ||
301 | } | ||
302 | } | ||
303 | close (IN); | ||
304 | |||
305 | if (!$found_version) { | ||
306 | print STDERR "WARNING: could not find objcopy version.\n" . | ||
307 | "\tDisabling local function references.\n"; | ||
308 | } | ||
309 | 332 | ||
310 | # | 333 | # |
311 | # Step 1: find all the local (static functions) and weak symbols. | 334 | # Step 1: find all the local (static functions) and weak symbols. |
312 | # 't' is local, 'w/W' is weak (we never use a weak function) | 335 | # 't' is local, 'w/W' is weak |
313 | # | 336 | # |
314 | open (IN, "$nm $inputfile|") || die "error running $nm"; | 337 | open (IN, "$nm $inputfile|") || die "error running $nm"; |
315 | while (<IN>) { | 338 | while (<IN>) { |
316 | if (/$nm_regex/) { | 339 | if (/$local_regex/) { |
317 | $locals{$1} = 1; | 340 | $locals{$1} = 1; |
318 | } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) { | 341 | } elsif (/$weak_regex/) { |
319 | $weak{$2} = $1; | 342 | $weak{$2} = $1; |
320 | } | 343 | } |
321 | } | 344 | } |
@@ -333,26 +356,20 @@ my $offset = 0; # offset of ref_func to section beginning | |||
333 | # | 356 | # |
334 | sub update_funcs | 357 | sub update_funcs |
335 | { | 358 | { |
336 | return if ($#offsets < 0); | 359 | return unless ($ref_func and @offsets); |
337 | |||
338 | defined($ref_func) || die "No function to reference"; | ||
339 | 360 | ||
340 | # A section only had a weak function, to represent it. | 361 | # Sanity check on weak function. A weak function may be overwritten by |
341 | # Unfortunately, a weak function may be overwritten by another | 362 | # another function of the same name, making all these offsets incorrect. |
342 | # function of the same name, making all these offsets incorrect. | ||
343 | # To be safe, we simply print a warning and bail. | ||
344 | if (defined $weak{$ref_func}) { | 363 | if (defined $weak{$ref_func}) { |
345 | print STDERR | 364 | die "$inputfile: ERROR: referencing weak function" . |
346 | "$inputfile: WARNING: referencing weak function" . | ||
347 | " $ref_func for mcount\n"; | 365 | " $ref_func for mcount\n"; |
348 | return; | ||
349 | } | 366 | } |
350 | 367 | ||
351 | # is this function static? If so, note this fact. | 368 | # is this function static? If so, note this fact. |
352 | if (defined $locals{$ref_func}) { | 369 | if (defined $locals{$ref_func}) { |
353 | 370 | ||
354 | # only use locals if objcopy supports globalize-symbols | 371 | # only use locals if objcopy supports globalize-symbols |
355 | if (!$use_locals) { | 372 | if (!$can_use_local) { |
356 | return; | 373 | return; |
357 | } | 374 | } |
358 | $convert{$ref_func} = 1; | 375 | $convert{$ref_func} = 1; |
@@ -378,9 +395,27 @@ open(IN, "$objdump -hdr $inputfile|") || die "error running $objdump"; | |||
378 | 395 | ||
379 | my $text; | 396 | my $text; |
380 | 397 | ||
398 | |||
399 | # read headers first | ||
381 | my $read_headers = 1; | 400 | my $read_headers = 1; |
382 | 401 | ||
383 | while (<IN>) { | 402 | while (<IN>) { |
403 | |||
404 | if ($read_headers && /$mcount_section/) { | ||
405 | # | ||
406 | # Somehow the make process can execute this script on an | ||
407 | # object twice. If it does, we would duplicate the mcount | ||
408 | # section and it will cause the function tracer self test | ||
409 | # to fail. Check if the mcount section exists, and if it does, | ||
410 | # warn and exit. | ||
411 | # | ||
412 | print STDERR "ERROR: $mcount_section already in $inputfile\n" . | ||
413 | "\tThis may be an indication that your build is corrupted.\n" . | ||
414 | "\tDelete $inputfile and try again. If the same object file\n" . | ||
415 | "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n"; | ||
416 | exit(-1); | ||
417 | } | ||
418 | |||
384 | # is it a section? | 419 | # is it a section? |
385 | if (/$section_regex/) { | 420 | if (/$section_regex/) { |
386 | $read_headers = 0; | 421 | $read_headers = 0; |
@@ -392,7 +427,7 @@ while (<IN>) { | |||
392 | $read_function = 0; | 427 | $read_function = 0; |
393 | } | 428 | } |
394 | # print out any recorded offsets | 429 | # print out any recorded offsets |
395 | update_funcs() if (defined($ref_func)); | 430 | update_funcs(); |
396 | 431 | ||
397 | # reset all markers and arrays | 432 | # reset all markers and arrays |
398 | $text_found = 0; | 433 | $text_found = 0; |
@@ -421,21 +456,7 @@ while (<IN>) { | |||
421 | $offset = hex $1; | 456 | $offset = hex $1; |
422 | } | 457 | } |
423 | } | 458 | } |
424 | } elsif ($read_headers && /$mcount_section/) { | ||
425 | # | ||
426 | # Somehow the make process can execute this script on an | ||
427 | # object twice. If it does, we would duplicate the mcount | ||
428 | # section and it will cause the function tracer self test | ||
429 | # to fail. Check if the mcount section exists, and if it does, | ||
430 | # warn and exit. | ||
431 | # | ||
432 | print STDERR "ERROR: $mcount_section already in $inputfile\n" . | ||
433 | "\tThis may be an indication that your build is corrupted.\n" . | ||
434 | "\tDelete $inputfile and try again. If the same object file\n" . | ||
435 | "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n"; | ||
436 | exit(-1); | ||
437 | } | 459 | } |
438 | |||
439 | # is this a call site to mcount? If so, record it to print later | 460 | # is this a call site to mcount? If so, record it to print later |
440 | if ($text_found && /$mcount_regex/) { | 461 | if ($text_found && /$mcount_regex/) { |
441 | $offsets[$#offsets + 1] = hex $1; | 462 | $offsets[$#offsets + 1] = hex $1; |
@@ -443,7 +464,7 @@ while (<IN>) { | |||
443 | } | 464 | } |
444 | 465 | ||
445 | # dump out anymore offsets that may have been found | 466 | # dump out anymore offsets that may have been found |
446 | update_funcs() if (defined($ref_func)); | 467 | update_funcs(); |
447 | 468 | ||
448 | # If we did not find any mcount callers, we are done (do nothing). | 469 | # If we did not find any mcount callers, we are done (do nothing). |
449 | if (!$opened) { | 470 | if (!$opened) { |
diff --git a/scripts/selinux/Makefile b/scripts/selinux/Makefile index ca4b1ec01822..e8049da1831f 100644 --- a/scripts/selinux/Makefile +++ b/scripts/selinux/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | subdir-y := mdp | 1 | subdir-y := mdp genheaders |
2 | subdir- += mdp | 2 | subdir- += mdp genheaders |
diff --git a/scripts/selinux/genheaders/.gitignore b/scripts/selinux/genheaders/.gitignore new file mode 100644 index 000000000000..4c0b646ff8d5 --- /dev/null +++ b/scripts/selinux/genheaders/.gitignore | |||
@@ -0,0 +1 @@ | |||
genheaders | |||
diff --git a/scripts/selinux/genheaders/Makefile b/scripts/selinux/genheaders/Makefile new file mode 100644 index 000000000000..417b165008ee --- /dev/null +++ b/scripts/selinux/genheaders/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | hostprogs-y := genheaders | ||
2 | HOST_EXTRACFLAGS += -Isecurity/selinux/include | ||
3 | |||
4 | always := $(hostprogs-y) | ||
5 | clean-files := $(hostprogs-y) | ||
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c new file mode 100644 index 000000000000..24626968055d --- /dev/null +++ b/scripts/selinux/genheaders/genheaders.c | |||
@@ -0,0 +1,118 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <unistd.h> | ||
4 | #include <string.h> | ||
5 | #include <errno.h> | ||
6 | #include <ctype.h> | ||
7 | |||
8 | struct security_class_mapping { | ||
9 | const char *name; | ||
10 | const char *perms[sizeof(unsigned) * 8 + 1]; | ||
11 | }; | ||
12 | |||
13 | #include "classmap.h" | ||
14 | #include "initial_sid_to_string.h" | ||
15 | |||
16 | #define max(x, y) (((int)(x) > (int)(y)) ? x : y) | ||
17 | |||
18 | const char *progname; | ||
19 | |||
20 | static void usage(void) | ||
21 | { | ||
22 | printf("usage: %s flask.h av_permissions.h\n", progname); | ||
23 | exit(1); | ||
24 | } | ||
25 | |||
26 | static char *stoupperx(const char *s) | ||
27 | { | ||
28 | char *s2 = strdup(s); | ||
29 | char *p; | ||
30 | |||
31 | if (!s2) { | ||
32 | fprintf(stderr, "%s: out of memory\n", progname); | ||
33 | exit(3); | ||
34 | } | ||
35 | |||
36 | for (p = s2; *p; p++) | ||
37 | *p = toupper(*p); | ||
38 | return s2; | ||
39 | } | ||
40 | |||
41 | int main(int argc, char *argv[]) | ||
42 | { | ||
43 | int i, j, k; | ||
44 | int isids_len; | ||
45 | FILE *fout; | ||
46 | |||
47 | progname = argv[0]; | ||
48 | |||
49 | if (argc < 3) | ||
50 | usage(); | ||
51 | |||
52 | fout = fopen(argv[1], "w"); | ||
53 | if (!fout) { | ||
54 | fprintf(stderr, "Could not open %s for writing: %s\n", | ||
55 | argv[1], strerror(errno)); | ||
56 | exit(2); | ||
57 | } | ||
58 | |||
59 | for (i = 0; secclass_map[i].name; i++) { | ||
60 | struct security_class_mapping *map = &secclass_map[i]; | ||
61 | map->name = stoupperx(map->name); | ||
62 | for (j = 0; map->perms[j]; j++) | ||
63 | map->perms[j] = stoupperx(map->perms[j]); | ||
64 | } | ||
65 | |||
66 | isids_len = sizeof(initial_sid_to_string) / sizeof (char *); | ||
67 | for (i = 1; i < isids_len; i++) | ||
68 | initial_sid_to_string[i] = stoupperx(initial_sid_to_string[i]); | ||
69 | |||
70 | fprintf(fout, "/* This file is automatically generated. Do not edit. */\n"); | ||
71 | fprintf(fout, "#ifndef _SELINUX_FLASK_H_\n#define _SELINUX_FLASK_H_\n\n"); | ||
72 | |||
73 | for (i = 0; secclass_map[i].name; i++) { | ||
74 | struct security_class_mapping *map = &secclass_map[i]; | ||
75 | fprintf(fout, "#define SECCLASS_%s", map->name); | ||
76 | for (j = 0; j < max(1, 40 - strlen(map->name)); j++) | ||
77 | fprintf(fout, " "); | ||
78 | fprintf(fout, "%2d\n", i+1); | ||
79 | } | ||
80 | |||
81 | fprintf(fout, "\n"); | ||
82 | |||
83 | for (i = 1; i < isids_len; i++) { | ||
84 | char *s = initial_sid_to_string[i]; | ||
85 | fprintf(fout, "#define SECINITSID_%s", s); | ||
86 | for (j = 0; j < max(1, 40 - strlen(s)); j++) | ||
87 | fprintf(fout, " "); | ||
88 | fprintf(fout, "%2d\n", i); | ||
89 | } | ||
90 | fprintf(fout, "\n#define SECINITSID_NUM %d\n", i-1); | ||
91 | fprintf(fout, "\n#endif\n"); | ||
92 | fclose(fout); | ||
93 | |||
94 | fout = fopen(argv[2], "w"); | ||
95 | if (!fout) { | ||
96 | fprintf(stderr, "Could not open %s for writing: %s\n", | ||
97 | argv[2], strerror(errno)); | ||
98 | exit(4); | ||
99 | } | ||
100 | |||
101 | fprintf(fout, "/* This file is automatically generated. Do not edit. */\n"); | ||
102 | fprintf(fout, "#ifndef _SELINUX_AV_PERMISSIONS_H_\n#define _SELINUX_AV_PERMISSIONS_H_\n\n"); | ||
103 | |||
104 | for (i = 0; secclass_map[i].name; i++) { | ||
105 | struct security_class_mapping *map = &secclass_map[i]; | ||
106 | for (j = 0; map->perms[j]; j++) { | ||
107 | fprintf(fout, "#define %s__%s", map->name, | ||
108 | map->perms[j]); | ||
109 | for (k = 0; k < max(1, 40 - strlen(map->name) - strlen(map->perms[j])); k++) | ||
110 | fprintf(fout, " "); | ||
111 | fprintf(fout, "0x%08xUL\n", (1<<j)); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | fprintf(fout, "\n#endif\n"); | ||
116 | fclose(fout); | ||
117 | exit(0); | ||
118 | } | ||
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c index b4ced8562587..62b34ce1f50d 100644 --- a/scripts/selinux/mdp/mdp.c +++ b/scripts/selinux/mdp/mdp.c | |||
@@ -29,86 +29,27 @@ | |||
29 | #include <unistd.h> | 29 | #include <unistd.h> |
30 | #include <string.h> | 30 | #include <string.h> |
31 | 31 | ||
32 | #include "flask.h" | ||
33 | |||
34 | static void usage(char *name) | 32 | static void usage(char *name) |
35 | { | 33 | { |
36 | printf("usage: %s [-m] policy_file context_file\n", name); | 34 | printf("usage: %s [-m] policy_file context_file\n", name); |
37 | exit(1); | 35 | exit(1); |
38 | } | 36 | } |
39 | 37 | ||
40 | static void find_common_name(char *cname, char *dest, int len) | 38 | /* Class/perm mapping support */ |
41 | { | 39 | struct security_class_mapping { |
42 | char *start, *end; | 40 | const char *name; |
43 | 41 | const char *perms[sizeof(unsigned) * 8 + 1]; | |
44 | start = strchr(cname, '_')+1; | ||
45 | end = strchr(start, '_'); | ||
46 | if (!start || !end || start-cname > len || end-start > len) { | ||
47 | printf("Error with commons defines\n"); | ||
48 | exit(1); | ||
49 | } | ||
50 | strncpy(dest, start, end-start); | ||
51 | dest[end-start] = '\0'; | ||
52 | } | ||
53 | |||
54 | #define S_(x) x, | ||
55 | static char *classlist[] = { | ||
56 | #include "class_to_string.h" | ||
57 | NULL | ||
58 | }; | 42 | }; |
59 | #undef S_ | ||
60 | 43 | ||
44 | #include "classmap.h" | ||
61 | #include "initial_sid_to_string.h" | 45 | #include "initial_sid_to_string.h" |
62 | 46 | ||
63 | #define TB_(x) char *x[] = { | ||
64 | #define TE_(x) NULL }; | ||
65 | #define S_(x) x, | ||
66 | #include "common_perm_to_string.h" | ||
67 | #undef TB_ | ||
68 | #undef TE_ | ||
69 | #undef S_ | ||
70 | |||
71 | struct common { | ||
72 | char *cname; | ||
73 | char **perms; | ||
74 | }; | ||
75 | struct common common[] = { | ||
76 | #define TB_(x) { #x, x }, | ||
77 | #define S_(x) | ||
78 | #define TE_(x) | ||
79 | #include "common_perm_to_string.h" | ||
80 | #undef TB_ | ||
81 | #undef TE_ | ||
82 | #undef S_ | ||
83 | }; | ||
84 | |||
85 | #define S_(x, y, z) {x, #y}, | ||
86 | struct av_inherit { | ||
87 | int class; | ||
88 | char *common; | ||
89 | }; | ||
90 | struct av_inherit av_inherit[] = { | ||
91 | #include "av_inherit.h" | ||
92 | }; | ||
93 | #undef S_ | ||
94 | |||
95 | #include "av_permissions.h" | ||
96 | #define S_(x, y, z) {x, y, z}, | ||
97 | struct av_perms { | ||
98 | int class; | ||
99 | int perm_i; | ||
100 | char *perm_s; | ||
101 | }; | ||
102 | struct av_perms av_perms[] = { | ||
103 | #include "av_perm_to_string.h" | ||
104 | }; | ||
105 | #undef S_ | ||
106 | |||
107 | int main(int argc, char *argv[]) | 47 | int main(int argc, char *argv[]) |
108 | { | 48 | { |
109 | int i, j, mls = 0; | 49 | int i, j, mls = 0; |
50 | int initial_sid_to_string_len; | ||
110 | char **arg, *polout, *ctxout; | 51 | char **arg, *polout, *ctxout; |
111 | int classlist_len, initial_sid_to_string_len; | 52 | |
112 | FILE *fout; | 53 | FILE *fout; |
113 | 54 | ||
114 | if (argc < 3) | 55 | if (argc < 3) |
@@ -127,64 +68,25 @@ int main(int argc, char *argv[]) | |||
127 | usage(argv[0]); | 68 | usage(argv[0]); |
128 | } | 69 | } |
129 | 70 | ||
130 | classlist_len = sizeof(classlist) / sizeof(char *); | ||
131 | /* print out the classes */ | 71 | /* print out the classes */ |
132 | for (i=1; i < classlist_len; i++) { | 72 | for (i = 0; secclass_map[i].name; i++) |
133 | if(classlist[i]) | 73 | fprintf(fout, "class %s\n", secclass_map[i].name); |
134 | fprintf(fout, "class %s\n", classlist[i]); | ||
135 | else | ||
136 | fprintf(fout, "class user%d\n", i); | ||
137 | } | ||
138 | fprintf(fout, "\n"); | 74 | fprintf(fout, "\n"); |
139 | 75 | ||
140 | initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *); | 76 | initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *); |
141 | /* print out the sids */ | 77 | /* print out the sids */ |
142 | for (i=1; i < initial_sid_to_string_len; i++) | 78 | for (i = 1; i < initial_sid_to_string_len; i++) |
143 | fprintf(fout, "sid %s\n", initial_sid_to_string[i]); | 79 | fprintf(fout, "sid %s\n", initial_sid_to_string[i]); |
144 | fprintf(fout, "\n"); | 80 | fprintf(fout, "\n"); |
145 | 81 | ||
146 | /* print out the commons */ | ||
147 | for (i=0; i< sizeof(common)/sizeof(struct common); i++) { | ||
148 | char cname[101]; | ||
149 | find_common_name(common[i].cname, cname, 100); | ||
150 | cname[100] = '\0'; | ||
151 | fprintf(fout, "common %s\n{\n", cname); | ||
152 | for (j=0; common[i].perms[j]; j++) | ||
153 | fprintf(fout, "\t%s\n", common[i].perms[j]); | ||
154 | fprintf(fout, "}\n\n"); | ||
155 | } | ||
156 | fprintf(fout, "\n"); | ||
157 | |||
158 | /* print out the class permissions */ | 82 | /* print out the class permissions */ |
159 | for (i=1; i < classlist_len; i++) { | 83 | for (i = 0; secclass_map[i].name; i++) { |
160 | if (classlist[i]) { | 84 | struct security_class_mapping *map = &secclass_map[i]; |
161 | int firstperm = -1, numperms = 0; | 85 | fprintf(fout, "class %s\n", map->name); |
162 | 86 | fprintf(fout, "{\n"); | |
163 | fprintf(fout, "class %s\n", classlist[i]); | 87 | for (j = 0; map->perms[j]; j++) |
164 | /* does it inherit from a common? */ | 88 | fprintf(fout, "\t%s\n", map->perms[j]); |
165 | for (j=0; j < sizeof(av_inherit)/sizeof(struct av_inherit); j++) | 89 | fprintf(fout, "}\n\n"); |
166 | if (av_inherit[j].class == i) | ||
167 | fprintf(fout, "inherits %s\n", av_inherit[j].common); | ||
168 | |||
169 | for (j=0; j < sizeof(av_perms)/sizeof(struct av_perms); j++) { | ||
170 | if (av_perms[j].class == i) { | ||
171 | if (firstperm == -1) | ||
172 | firstperm = j; | ||
173 | numperms++; | ||
174 | } | ||
175 | } | ||
176 | if (!numperms) { | ||
177 | fprintf(fout, "\n"); | ||
178 | continue; | ||
179 | } | ||
180 | |||
181 | fprintf(fout, "{\n"); | ||
182 | /* print out the av_perms */ | ||
183 | for (j=0; j < numperms; j++) { | ||
184 | fprintf(fout, "\t%s\n", av_perms[firstperm+j].perm_s); | ||
185 | } | ||
186 | fprintf(fout, "}\n\n"); | ||
187 | } | ||
188 | } | 90 | } |
189 | fprintf(fout, "\n"); | 91 | fprintf(fout, "\n"); |
190 | 92 | ||
@@ -197,31 +99,34 @@ int main(int argc, char *argv[]) | |||
197 | /* types, roles, and allows */ | 99 | /* types, roles, and allows */ |
198 | fprintf(fout, "type base_t;\n"); | 100 | fprintf(fout, "type base_t;\n"); |
199 | fprintf(fout, "role base_r types { base_t };\n"); | 101 | fprintf(fout, "role base_r types { base_t };\n"); |
200 | for (i=1; i < classlist_len; i++) { | 102 | for (i = 0; secclass_map[i].name; i++) |
201 | if (classlist[i]) | 103 | fprintf(fout, "allow base_t base_t:%s *;\n", |
202 | fprintf(fout, "allow base_t base_t:%s *;\n", classlist[i]); | 104 | secclass_map[i].name); |
203 | else | ||
204 | fprintf(fout, "allow base_t base_t:user%d *;\n", i); | ||
205 | } | ||
206 | fprintf(fout, "user user_u roles { base_r };\n"); | 105 | fprintf(fout, "user user_u roles { base_r };\n"); |
207 | fprintf(fout, "\n"); | 106 | fprintf(fout, "\n"); |
208 | 107 | ||
209 | /* default sids */ | 108 | /* default sids */ |
210 | for (i=1; i < initial_sid_to_string_len; i++) | 109 | for (i = 1; i < initial_sid_to_string_len; i++) |
211 | fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]); | 110 | fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]); |
212 | fprintf(fout, "\n"); | 111 | fprintf(fout, "\n"); |
213 | 112 | ||
214 | |||
215 | fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n"); | 113 | fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n"); |
216 | fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n"); | 114 | fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n"); |
115 | fprintf(fout, "fs_use_xattr ext4 user_u:base_r:base_t;\n"); | ||
217 | fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n"); | 116 | fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n"); |
218 | fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n"); | 117 | fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n"); |
219 | fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n"); | 118 | fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n"); |
119 | fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n"); | ||
120 | fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n"); | ||
121 | fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n"); | ||
220 | 122 | ||
123 | fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n"); | ||
221 | fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n"); | 124 | fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n"); |
222 | fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n"); | 125 | fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n"); |
223 | 126 | ||
127 | fprintf(fout, "fs_use_trans mqueue user_u:base_r:base_t;\n"); | ||
224 | fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n"); | 128 | fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n"); |
129 | fprintf(fout, "fs_use_trans hugetlbfs user_u:base_r:base_t;\n"); | ||
225 | fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n"); | 130 | fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n"); |
226 | fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n"); | 131 | fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n"); |
227 | 132 | ||
diff --git a/security/Kconfig b/security/Kconfig index fb363cd81cf6..226b9556b25f 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -91,28 +91,6 @@ config SECURITY_PATH | |||
91 | implement pathname based access controls. | 91 | implement pathname based access controls. |
92 | If you are unsure how to answer this question, answer N. | 92 | If you are unsure how to answer this question, answer N. |
93 | 93 | ||
94 | config SECURITY_FILE_CAPABILITIES | ||
95 | bool "File POSIX Capabilities" | ||
96 | default n | ||
97 | help | ||
98 | This enables filesystem capabilities, allowing you to give | ||
99 | binaries a subset of root's powers without using setuid 0. | ||
100 | |||
101 | If in doubt, answer N. | ||
102 | |||
103 | config SECURITY_ROOTPLUG | ||
104 | bool "Root Plug Support" | ||
105 | depends on USB=y && SECURITY | ||
106 | help | ||
107 | This is a sample LSM module that should only be used as such. | ||
108 | It prevents any programs running with egid == 0 if a specific | ||
109 | USB device is not present in the system. | ||
110 | |||
111 | See <http://www.linuxjournal.com/article.php?sid=6279> for | ||
112 | more information about this module. | ||
113 | |||
114 | If you are unsure how to answer this question, answer N. | ||
115 | |||
116 | config INTEL_TXT | 94 | config INTEL_TXT |
117 | bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)" | 95 | bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)" |
118 | depends on HAVE_INTEL_TXT | 96 | depends on HAVE_INTEL_TXT |
@@ -165,5 +143,37 @@ source security/tomoyo/Kconfig | |||
165 | 143 | ||
166 | source security/integrity/ima/Kconfig | 144 | source security/integrity/ima/Kconfig |
167 | 145 | ||
146 | choice | ||
147 | prompt "Default security module" | ||
148 | default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX | ||
149 | default DEFAULT_SECURITY_SMACK if SECURITY_SMACK | ||
150 | default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO | ||
151 | default DEFAULT_SECURITY_DAC | ||
152 | |||
153 | help | ||
154 | Select the security module that will be used by default if the | ||
155 | kernel parameter security= is not specified. | ||
156 | |||
157 | config DEFAULT_SECURITY_SELINUX | ||
158 | bool "SELinux" if SECURITY_SELINUX=y | ||
159 | |||
160 | config DEFAULT_SECURITY_SMACK | ||
161 | bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y | ||
162 | |||
163 | config DEFAULT_SECURITY_TOMOYO | ||
164 | bool "TOMOYO" if SECURITY_TOMOYO=y | ||
165 | |||
166 | config DEFAULT_SECURITY_DAC | ||
167 | bool "Unix Discretionary Access Controls" | ||
168 | |||
169 | endchoice | ||
170 | |||
171 | config DEFAULT_SECURITY | ||
172 | string | ||
173 | default "selinux" if DEFAULT_SECURITY_SELINUX | ||
174 | default "smack" if DEFAULT_SECURITY_SMACK | ||
175 | default "tomoyo" if DEFAULT_SECURITY_TOMOYO | ||
176 | default "" if DEFAULT_SECURITY_DAC | ||
177 | |||
168 | endmenu | 178 | endmenu |
169 | 179 | ||
diff --git a/security/Makefile b/security/Makefile index 95ecc06392d7..bb44e350c618 100644 --- a/security/Makefile +++ b/security/Makefile | |||
@@ -18,7 +18,6 @@ obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o | |||
18 | obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o | 18 | obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o |
19 | obj-$(CONFIG_AUDIT) += lsm_audit.o | 19 | obj-$(CONFIG_AUDIT) += lsm_audit.o |
20 | obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o | 20 | obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o |
21 | obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o | ||
22 | obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o | 21 | obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o |
23 | 22 | ||
24 | # Object integrity file lists | 23 | # Object integrity file lists |
diff --git a/security/capability.c b/security/capability.c index fce07a7bc825..5c700e1a4fd3 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -308,6 +308,22 @@ static int cap_path_truncate(struct path *path, loff_t length, | |||
308 | { | 308 | { |
309 | return 0; | 309 | return 0; |
310 | } | 310 | } |
311 | |||
312 | static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt, | ||
313 | mode_t mode) | ||
314 | { | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int cap_path_chown(struct path *path, uid_t uid, gid_t gid) | ||
319 | { | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | static int cap_path_chroot(struct path *root) | ||
324 | { | ||
325 | return 0; | ||
326 | } | ||
311 | #endif | 327 | #endif |
312 | 328 | ||
313 | static int cap_file_permission(struct file *file, int mask) | 329 | static int cap_file_permission(struct file *file, int mask) |
@@ -405,7 +421,7 @@ static int cap_kernel_create_files_as(struct cred *new, struct inode *inode) | |||
405 | return 0; | 421 | return 0; |
406 | } | 422 | } |
407 | 423 | ||
408 | static int cap_kernel_module_request(void) | 424 | static int cap_kernel_module_request(char *kmod_name) |
409 | { | 425 | { |
410 | return 0; | 426 | return 0; |
411 | } | 427 | } |
@@ -977,6 +993,9 @@ void security_fixup_ops(struct security_operations *ops) | |||
977 | set_to_cap_if_null(ops, path_link); | 993 | set_to_cap_if_null(ops, path_link); |
978 | set_to_cap_if_null(ops, path_rename); | 994 | set_to_cap_if_null(ops, path_rename); |
979 | set_to_cap_if_null(ops, path_truncate); | 995 | set_to_cap_if_null(ops, path_truncate); |
996 | set_to_cap_if_null(ops, path_chmod); | ||
997 | set_to_cap_if_null(ops, path_chown); | ||
998 | set_to_cap_if_null(ops, path_chroot); | ||
980 | #endif | 999 | #endif |
981 | set_to_cap_if_null(ops, file_permission); | 1000 | set_to_cap_if_null(ops, file_permission); |
982 | set_to_cap_if_null(ops, file_alloc_security); | 1001 | set_to_cap_if_null(ops, file_alloc_security); |
diff --git a/security/commoncap.c b/security/commoncap.c index fe30751a6cd9..f800fdb3de94 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Common capabilities, needed by capability.o and root_plug.o | 1 | /* Common capabilities, needed by capability.o. |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or modify | 3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by | 4 | * it under the terms of the GNU General Public License as published by |
@@ -173,7 +173,6 @@ int cap_capget(struct task_struct *target, kernel_cap_t *effective, | |||
173 | */ | 173 | */ |
174 | static inline int cap_inh_is_capped(void) | 174 | static inline int cap_inh_is_capped(void) |
175 | { | 175 | { |
176 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
177 | 176 | ||
178 | /* they are so limited unless the current task has the CAP_SETPCAP | 177 | /* they are so limited unless the current task has the CAP_SETPCAP |
179 | * capability | 178 | * capability |
@@ -181,7 +180,6 @@ static inline int cap_inh_is_capped(void) | |||
181 | if (cap_capable(current, current_cred(), CAP_SETPCAP, | 180 | if (cap_capable(current, current_cred(), CAP_SETPCAP, |
182 | SECURITY_CAP_AUDIT) == 0) | 181 | SECURITY_CAP_AUDIT) == 0) |
183 | return 0; | 182 | return 0; |
184 | #endif | ||
185 | return 1; | 183 | return 1; |
186 | } | 184 | } |
187 | 185 | ||
@@ -239,8 +237,6 @@ static inline void bprm_clear_caps(struct linux_binprm *bprm) | |||
239 | bprm->cap_effective = false; | 237 | bprm->cap_effective = false; |
240 | } | 238 | } |
241 | 239 | ||
242 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
243 | |||
244 | /** | 240 | /** |
245 | * cap_inode_need_killpriv - Determine if inode change affects privileges | 241 | * cap_inode_need_killpriv - Determine if inode change affects privileges |
246 | * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV | 242 | * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV |
@@ -421,49 +417,6 @@ out: | |||
421 | return rc; | 417 | return rc; |
422 | } | 418 | } |
423 | 419 | ||
424 | #else | ||
425 | int cap_inode_need_killpriv(struct dentry *dentry) | ||
426 | { | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | int cap_inode_killpriv(struct dentry *dentry) | ||
431 | { | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps) | ||
436 | { | ||
437 | memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); | ||
438 | return -ENODATA; | ||
439 | } | ||
440 | |||
441 | static inline int get_file_caps(struct linux_binprm *bprm, bool *effective) | ||
442 | { | ||
443 | bprm_clear_caps(bprm); | ||
444 | return 0; | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | /* | ||
449 | * Determine whether a exec'ing process's new permitted capabilities should be | ||
450 | * limited to just what it already has. | ||
451 | * | ||
452 | * This prevents processes that are being ptraced from gaining access to | ||
453 | * CAP_SETPCAP, unless the process they're tracing already has it, and the | ||
454 | * binary they're executing has filecaps that elevate it. | ||
455 | * | ||
456 | * Returns 1 if they should be limited, 0 if they are not. | ||
457 | */ | ||
458 | static inline int cap_limit_ptraced_target(void) | ||
459 | { | ||
460 | #ifndef CONFIG_SECURITY_FILE_CAPABILITIES | ||
461 | if (capable(CAP_SETPCAP)) | ||
462 | return 0; | ||
463 | #endif | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | /** | 420 | /** |
468 | * cap_bprm_set_creds - Set up the proposed credentials for execve(). | 421 | * cap_bprm_set_creds - Set up the proposed credentials for execve(). |
469 | * @bprm: The execution parameters, including the proposed creds | 422 | * @bprm: The execution parameters, including the proposed creds |
@@ -523,9 +476,8 @@ skip: | |||
523 | new->euid = new->uid; | 476 | new->euid = new->uid; |
524 | new->egid = new->gid; | 477 | new->egid = new->gid; |
525 | } | 478 | } |
526 | if (cap_limit_ptraced_target()) | 479 | new->cap_permitted = cap_intersect(new->cap_permitted, |
527 | new->cap_permitted = cap_intersect(new->cap_permitted, | 480 | old->cap_permitted); |
528 | old->cap_permitted); | ||
529 | } | 481 | } |
530 | 482 | ||
531 | new->suid = new->fsuid = new->euid; | 483 | new->suid = new->fsuid = new->euid; |
@@ -739,7 +691,6 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags) | |||
739 | return 0; | 691 | return 0; |
740 | } | 692 | } |
741 | 693 | ||
742 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
743 | /* | 694 | /* |
744 | * Rationale: code calling task_setscheduler, task_setioprio, and | 695 | * Rationale: code calling task_setscheduler, task_setioprio, and |
745 | * task_setnice, assumes that | 696 | * task_setnice, assumes that |
@@ -820,22 +771,6 @@ static long cap_prctl_drop(struct cred *new, unsigned long cap) | |||
820 | return 0; | 771 | return 0; |
821 | } | 772 | } |
822 | 773 | ||
823 | #else | ||
824 | int cap_task_setscheduler (struct task_struct *p, int policy, | ||
825 | struct sched_param *lp) | ||
826 | { | ||
827 | return 0; | ||
828 | } | ||
829 | int cap_task_setioprio (struct task_struct *p, int ioprio) | ||
830 | { | ||
831 | return 0; | ||
832 | } | ||
833 | int cap_task_setnice (struct task_struct *p, int nice) | ||
834 | { | ||
835 | return 0; | ||
836 | } | ||
837 | #endif | ||
838 | |||
839 | /** | 774 | /** |
840 | * cap_task_prctl - Implement process control functions for this security module | 775 | * cap_task_prctl - Implement process control functions for this security module |
841 | * @option: The process control function requested | 776 | * @option: The process control function requested |
@@ -866,7 +801,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
866 | error = !!cap_raised(new->cap_bset, arg2); | 801 | error = !!cap_raised(new->cap_bset, arg2); |
867 | goto no_change; | 802 | goto no_change; |
868 | 803 | ||
869 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
870 | case PR_CAPBSET_DROP: | 804 | case PR_CAPBSET_DROP: |
871 | error = cap_prctl_drop(new, arg2); | 805 | error = cap_prctl_drop(new, arg2); |
872 | if (error < 0) | 806 | if (error < 0) |
@@ -917,8 +851,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
917 | error = new->securebits; | 851 | error = new->securebits; |
918 | goto no_change; | 852 | goto no_change; |
919 | 853 | ||
920 | #endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
921 | |||
922 | case PR_GET_KEEPCAPS: | 854 | case PR_GET_KEEPCAPS: |
923 | if (issecure(SECURE_KEEP_CAPS)) | 855 | if (issecure(SECURE_KEEP_CAPS)) |
924 | error = 1; | 856 | error = 1; |
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 53d9764e8f09..3d7846de8069 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig | |||
@@ -3,6 +3,7 @@ | |||
3 | config IMA | 3 | config IMA |
4 | bool "Integrity Measurement Architecture(IMA)" | 4 | bool "Integrity Measurement Architecture(IMA)" |
5 | depends on ACPI | 5 | depends on ACPI |
6 | depends on SECURITY | ||
6 | select SECURITYFS | 7 | select SECURITYFS |
7 | select CRYPTO | 8 | select CRYPTO |
8 | select CRYPTO_HMAC | 9 | select CRYPTO_HMAC |
diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 3bb90b6f1dd3..51bd0fd9c9f0 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c | |||
@@ -354,6 +354,10 @@ static void dump_common_audit_data(struct audit_buffer *ab, | |||
354 | } | 354 | } |
355 | break; | 355 | break; |
356 | #endif | 356 | #endif |
357 | case LSM_AUDIT_DATA_KMOD: | ||
358 | audit_log_format(ab, " kmod="); | ||
359 | audit_log_untrustedstring(ab, a->u.kmod_name); | ||
360 | break; | ||
357 | } /* switch (a->type) */ | 361 | } /* switch (a->type) */ |
358 | } | 362 | } |
359 | 363 | ||
diff --git a/security/min_addr.c b/security/min_addr.c index c844eed7915d..fc43c9d37084 100644 --- a/security/min_addr.c +++ b/security/min_addr.c | |||
@@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_table *table, int write, | |||
33 | { | 33 | { |
34 | int ret; | 34 | int ret; |
35 | 35 | ||
36 | if (!capable(CAP_SYS_RAWIO)) | ||
37 | return -EPERM; | ||
38 | |||
36 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 39 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
37 | 40 | ||
38 | update_mmap_min_addr(); | 41 | update_mmap_min_addr(); |
diff --git a/security/root_plug.c b/security/root_plug.c deleted file mode 100644 index 2f7ffa67c4d2..000000000000 --- a/security/root_plug.c +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* | ||
2 | * Root Plug sample LSM module | ||
3 | * | ||
4 | * Originally written for a Linux Journal. | ||
5 | * | ||
6 | * Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com> | ||
7 | * | ||
8 | * Prevents any programs running with egid == 0 if a specific USB device | ||
9 | * is not present in the system. Yes, it can be gotten around, but is a | ||
10 | * nice starting point for people to play with, and learn the LSM | ||
11 | * interface. | ||
12 | * | ||
13 | * If you want to turn this into something with a semblance of security, | ||
14 | * you need to hook the task_* functions also. | ||
15 | * | ||
16 | * See http://www.linuxjournal.com/article.php?sid=6279 for more information | ||
17 | * about this code. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License as | ||
21 | * published by the Free Software Foundation, version 2 of the | ||
22 | * License. | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/security.h> | ||
28 | #include <linux/usb.h> | ||
29 | #include <linux/moduleparam.h> | ||
30 | |||
31 | /* default is a generic type of usb to serial converter */ | ||
32 | static int vendor_id = 0x0557; | ||
33 | static int product_id = 0x2008; | ||
34 | |||
35 | module_param(vendor_id, uint, 0400); | ||
36 | module_param(product_id, uint, 0400); | ||
37 | |||
38 | /* should we print out debug messages */ | ||
39 | static int debug = 0; | ||
40 | |||
41 | module_param(debug, bool, 0600); | ||
42 | |||
43 | #define MY_NAME "root_plug" | ||
44 | |||
45 | #define root_dbg(fmt, arg...) \ | ||
46 | do { \ | ||
47 | if (debug) \ | ||
48 | printk(KERN_DEBUG "%s: %s: " fmt , \ | ||
49 | MY_NAME , __func__ , \ | ||
50 | ## arg); \ | ||
51 | } while (0) | ||
52 | |||
53 | static int rootplug_bprm_check_security (struct linux_binprm *bprm) | ||
54 | { | ||
55 | struct usb_device *dev; | ||
56 | |||
57 | root_dbg("file %s, e_uid = %d, e_gid = %d\n", | ||
58 | bprm->filename, bprm->cred->euid, bprm->cred->egid); | ||
59 | |||
60 | if (bprm->cred->egid == 0) { | ||
61 | dev = usb_find_device(vendor_id, product_id); | ||
62 | if (!dev) { | ||
63 | root_dbg("e_gid = 0, and device not found, " | ||
64 | "task not allowed to run...\n"); | ||
65 | return -EPERM; | ||
66 | } | ||
67 | usb_put_dev(dev); | ||
68 | } | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static struct security_operations rootplug_security_ops = { | ||
74 | .bprm_check_security = rootplug_bprm_check_security, | ||
75 | }; | ||
76 | |||
77 | static int __init rootplug_init (void) | ||
78 | { | ||
79 | /* register ourselves with the security framework */ | ||
80 | if (register_security (&rootplug_security_ops)) { | ||
81 | printk (KERN_INFO | ||
82 | "Failure registering Root Plug module with the kernel\n"); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | printk (KERN_INFO "Root Plug module initialized, " | ||
86 | "vendor_id = %4.4x, product id = %4.4x\n", vendor_id, product_id); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | security_initcall (rootplug_init); | ||
diff --git a/security/security.c b/security/security.c index c4c673240c1c..24e060be9fa5 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -16,9 +16,11 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/security.h> | 18 | #include <linux/security.h> |
19 | #include <linux/ima.h> | ||
19 | 20 | ||
20 | /* Boot-time LSM user choice */ | 21 | /* Boot-time LSM user choice */ |
21 | static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1]; | 22 | static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = |
23 | CONFIG_DEFAULT_SECURITY; | ||
22 | 24 | ||
23 | /* things that live in capability.c */ | 25 | /* things that live in capability.c */ |
24 | extern struct security_operations default_security_ops; | 26 | extern struct security_operations default_security_ops; |
@@ -79,8 +81,10 @@ __setup("security=", choose_lsm); | |||
79 | * | 81 | * |
80 | * Return true if: | 82 | * Return true if: |
81 | * -The passed LSM is the one chosen by user at boot time, | 83 | * -The passed LSM is the one chosen by user at boot time, |
82 | * -or user didn't specify a specific LSM and we're the first to ask | 84 | * -or the passed LSM is configured as the default and the user did not |
83 | * for registration permission, | 85 | * choose an alternate LSM at boot time, |
86 | * -or there is no default LSM set and the user didn't specify a | ||
87 | * specific LSM and we're the first to ask for registration permission, | ||
84 | * -or the passed LSM is currently loaded. | 88 | * -or the passed LSM is currently loaded. |
85 | * Otherwise, return false. | 89 | * Otherwise, return false. |
86 | */ | 90 | */ |
@@ -235,7 +239,12 @@ int security_bprm_set_creds(struct linux_binprm *bprm) | |||
235 | 239 | ||
236 | int security_bprm_check(struct linux_binprm *bprm) | 240 | int security_bprm_check(struct linux_binprm *bprm) |
237 | { | 241 | { |
238 | return security_ops->bprm_check_security(bprm); | 242 | int ret; |
243 | |||
244 | ret = security_ops->bprm_check_security(bprm); | ||
245 | if (ret) | ||
246 | return ret; | ||
247 | return ima_bprm_check(bprm); | ||
239 | } | 248 | } |
240 | 249 | ||
241 | void security_bprm_committing_creds(struct linux_binprm *bprm) | 250 | void security_bprm_committing_creds(struct linux_binprm *bprm) |
@@ -352,12 +361,21 @@ EXPORT_SYMBOL(security_sb_parse_opts_str); | |||
352 | 361 | ||
353 | int security_inode_alloc(struct inode *inode) | 362 | int security_inode_alloc(struct inode *inode) |
354 | { | 363 | { |
364 | int ret; | ||
365 | |||
355 | inode->i_security = NULL; | 366 | inode->i_security = NULL; |
356 | return security_ops->inode_alloc_security(inode); | 367 | ret = security_ops->inode_alloc_security(inode); |
368 | if (ret) | ||
369 | return ret; | ||
370 | ret = ima_inode_alloc(inode); | ||
371 | if (ret) | ||
372 | security_inode_free(inode); | ||
373 | return ret; | ||
357 | } | 374 | } |
358 | 375 | ||
359 | void security_inode_free(struct inode *inode) | 376 | void security_inode_free(struct inode *inode) |
360 | { | 377 | { |
378 | ima_inode_free(inode); | ||
361 | security_ops->inode_free_security(inode); | 379 | security_ops->inode_free_security(inode); |
362 | } | 380 | } |
363 | 381 | ||
@@ -434,6 +452,26 @@ int security_path_truncate(struct path *path, loff_t length, | |||
434 | return 0; | 452 | return 0; |
435 | return security_ops->path_truncate(path, length, time_attrs); | 453 | return security_ops->path_truncate(path, length, time_attrs); |
436 | } | 454 | } |
455 | |||
456 | int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, | ||
457 | mode_t mode) | ||
458 | { | ||
459 | if (unlikely(IS_PRIVATE(dentry->d_inode))) | ||
460 | return 0; | ||
461 | return security_ops->path_chmod(dentry, mnt, mode); | ||
462 | } | ||
463 | |||
464 | int security_path_chown(struct path *path, uid_t uid, gid_t gid) | ||
465 | { | ||
466 | if (unlikely(IS_PRIVATE(path->dentry->d_inode))) | ||
467 | return 0; | ||
468 | return security_ops->path_chown(path, uid, gid); | ||
469 | } | ||
470 | |||
471 | int security_path_chroot(struct path *path) | ||
472 | { | ||
473 | return security_ops->path_chroot(path); | ||
474 | } | ||
437 | #endif | 475 | #endif |
438 | 476 | ||
439 | int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) | 477 | int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) |
@@ -628,6 +666,8 @@ int security_file_alloc(struct file *file) | |||
628 | void security_file_free(struct file *file) | 666 | void security_file_free(struct file *file) |
629 | { | 667 | { |
630 | security_ops->file_free_security(file); | 668 | security_ops->file_free_security(file); |
669 | if (file->f_dentry) | ||
670 | ima_file_free(file); | ||
631 | } | 671 | } |
632 | 672 | ||
633 | int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 673 | int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
@@ -639,7 +679,12 @@ int security_file_mmap(struct file *file, unsigned long reqprot, | |||
639 | unsigned long prot, unsigned long flags, | 679 | unsigned long prot, unsigned long flags, |
640 | unsigned long addr, unsigned long addr_only) | 680 | unsigned long addr, unsigned long addr_only) |
641 | { | 681 | { |
642 | return security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); | 682 | int ret; |
683 | |||
684 | ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); | ||
685 | if (ret) | ||
686 | return ret; | ||
687 | return ima_file_mmap(file, prot); | ||
643 | } | 688 | } |
644 | 689 | ||
645 | int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, | 690 | int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, |
@@ -719,9 +764,9 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode) | |||
719 | return security_ops->kernel_create_files_as(new, inode); | 764 | return security_ops->kernel_create_files_as(new, inode); |
720 | } | 765 | } |
721 | 766 | ||
722 | int security_kernel_module_request(void) | 767 | int security_kernel_module_request(char *kmod_name) |
723 | { | 768 | { |
724 | return security_ops->kernel_module_request(); | 769 | return security_ops->kernel_module_request(kmod_name); |
725 | } | 770 | } |
726 | 771 | ||
727 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) | 772 | int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) |
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore new file mode 100644 index 000000000000..2e5040a3d48b --- /dev/null +++ b/security/selinux/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | av_permissions.h | ||
2 | flask.h | ||
diff --git a/security/selinux/Makefile b/security/selinux/Makefile index d47fc5e545e0..f013982df417 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile | |||
@@ -18,5 +18,13 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o | |||
18 | 18 | ||
19 | selinux-$(CONFIG_NETLABEL) += netlabel.o | 19 | selinux-$(CONFIG_NETLABEL) += netlabel.o |
20 | 20 | ||
21 | EXTRA_CFLAGS += -Isecurity/selinux/include | 21 | EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include |
22 | 22 | ||
23 | $(obj)/avc.o: $(obj)/flask.h | ||
24 | |||
25 | quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h | ||
26 | cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h | ||
27 | |||
28 | targets += flask.h | ||
29 | $(obj)/flask.h: $(src)/include/classmap.h FORCE | ||
30 | $(call if_changed,flask) | ||
diff --git a/security/selinux/avc.c b/security/selinux/avc.c index b4b5da1c0a42..f2dde268165a 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c | |||
@@ -31,43 +31,7 @@ | |||
31 | #include <net/ipv6.h> | 31 | #include <net/ipv6.h> |
32 | #include "avc.h" | 32 | #include "avc.h" |
33 | #include "avc_ss.h" | 33 | #include "avc_ss.h" |
34 | 34 | #include "classmap.h" | |
35 | static const struct av_perm_to_string av_perm_to_string[] = { | ||
36 | #define S_(c, v, s) { c, v, s }, | ||
37 | #include "av_perm_to_string.h" | ||
38 | #undef S_ | ||
39 | }; | ||
40 | |||
41 | static const char *class_to_string[] = { | ||
42 | #define S_(s) s, | ||
43 | #include "class_to_string.h" | ||
44 | #undef S_ | ||
45 | }; | ||
46 | |||
47 | #define TB_(s) static const char *s[] = { | ||
48 | #define TE_(s) }; | ||
49 | #define S_(s) s, | ||
50 | #include "common_perm_to_string.h" | ||
51 | #undef TB_ | ||
52 | #undef TE_ | ||
53 | #undef S_ | ||
54 | |||
55 | static const struct av_inherit av_inherit[] = { | ||
56 | #define S_(c, i, b) { .tclass = c,\ | ||
57 | .common_pts = common_##i##_perm_to_string,\ | ||
58 | .common_base = b }, | ||
59 | #include "av_inherit.h" | ||
60 | #undef S_ | ||
61 | }; | ||
62 | |||
63 | const struct selinux_class_perm selinux_class_perm = { | ||
64 | .av_perm_to_string = av_perm_to_string, | ||
65 | .av_pts_len = ARRAY_SIZE(av_perm_to_string), | ||
66 | .class_to_string = class_to_string, | ||
67 | .cts_len = ARRAY_SIZE(class_to_string), | ||
68 | .av_inherit = av_inherit, | ||
69 | .av_inherit_len = ARRAY_SIZE(av_inherit) | ||
70 | }; | ||
71 | 35 | ||
72 | #define AVC_CACHE_SLOTS 512 | 36 | #define AVC_CACHE_SLOTS 512 |
73 | #define AVC_DEF_CACHE_THRESHOLD 512 | 37 | #define AVC_DEF_CACHE_THRESHOLD 512 |
@@ -139,52 +103,28 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) | |||
139 | */ | 103 | */ |
140 | static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) | 104 | static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) |
141 | { | 105 | { |
142 | const char **common_pts = NULL; | 106 | const char **perms; |
143 | u32 common_base = 0; | 107 | int i, perm; |
144 | int i, i2, perm; | ||
145 | 108 | ||
146 | if (av == 0) { | 109 | if (av == 0) { |
147 | audit_log_format(ab, " null"); | 110 | audit_log_format(ab, " null"); |
148 | return; | 111 | return; |
149 | } | 112 | } |
150 | 113 | ||
151 | for (i = 0; i < ARRAY_SIZE(av_inherit); i++) { | 114 | perms = secclass_map[tclass-1].perms; |
152 | if (av_inherit[i].tclass == tclass) { | ||
153 | common_pts = av_inherit[i].common_pts; | ||
154 | common_base = av_inherit[i].common_base; | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | 115 | ||
159 | audit_log_format(ab, " {"); | 116 | audit_log_format(ab, " {"); |
160 | i = 0; | 117 | i = 0; |
161 | perm = 1; | 118 | perm = 1; |
162 | while (perm < common_base) { | 119 | while (i < (sizeof(av) * 8)) { |
163 | if (perm & av) { | 120 | if ((perm & av) && perms[i]) { |
164 | audit_log_format(ab, " %s", common_pts[i]); | 121 | audit_log_format(ab, " %s", perms[i]); |
165 | av &= ~perm; | 122 | av &= ~perm; |
166 | } | 123 | } |
167 | i++; | 124 | i++; |
168 | perm <<= 1; | 125 | perm <<= 1; |
169 | } | 126 | } |
170 | 127 | ||
171 | while (i < sizeof(av) * 8) { | ||
172 | if (perm & av) { | ||
173 | for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) { | ||
174 | if ((av_perm_to_string[i2].tclass == tclass) && | ||
175 | (av_perm_to_string[i2].value == perm)) | ||
176 | break; | ||
177 | } | ||
178 | if (i2 < ARRAY_SIZE(av_perm_to_string)) { | ||
179 | audit_log_format(ab, " %s", | ||
180 | av_perm_to_string[i2].name); | ||
181 | av &= ~perm; | ||
182 | } | ||
183 | } | ||
184 | i++; | ||
185 | perm <<= 1; | ||
186 | } | ||
187 | |||
188 | if (av) | 128 | if (av) |
189 | audit_log_format(ab, " 0x%x", av); | 129 | audit_log_format(ab, " 0x%x", av); |
190 | 130 | ||
@@ -219,8 +159,8 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla | |||
219 | kfree(scontext); | 159 | kfree(scontext); |
220 | } | 160 | } |
221 | 161 | ||
222 | BUG_ON(tclass >= ARRAY_SIZE(class_to_string) || !class_to_string[tclass]); | 162 | BUG_ON(tclass >= ARRAY_SIZE(secclass_map)); |
223 | audit_log_format(ab, " tclass=%s", class_to_string[tclass]); | 163 | audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); |
224 | } | 164 | } |
225 | 165 | ||
226 | /** | 166 | /** |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index bb230d5d7085..c96d63ec4753 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -91,7 +91,6 @@ | |||
91 | 91 | ||
92 | #define NUM_SEL_MNT_OPTS 5 | 92 | #define NUM_SEL_MNT_OPTS 5 |
93 | 93 | ||
94 | extern unsigned int policydb_loaded_version; | ||
95 | extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); | 94 | extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); |
96 | extern struct security_operations *security_ops; | 95 | extern struct security_operations *security_ops; |
97 | 96 | ||
@@ -3338,9 +3337,18 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode) | |||
3338 | return 0; | 3337 | return 0; |
3339 | } | 3338 | } |
3340 | 3339 | ||
3341 | static int selinux_kernel_module_request(void) | 3340 | static int selinux_kernel_module_request(char *kmod_name) |
3342 | { | 3341 | { |
3343 | return task_has_system(current, SYSTEM__MODULE_REQUEST); | 3342 | u32 sid; |
3343 | struct common_audit_data ad; | ||
3344 | |||
3345 | sid = task_sid(current); | ||
3346 | |||
3347 | COMMON_AUDIT_DATA_INIT(&ad, KMOD); | ||
3348 | ad.u.kmod_name = kmod_name; | ||
3349 | |||
3350 | return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM, | ||
3351 | SYSTEM__MODULE_REQUEST, &ad); | ||
3344 | } | 3352 | } |
3345 | 3353 | ||
3346 | static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) | 3354 | static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) |
@@ -4714,10 +4722,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) | |||
4714 | if (err) | 4722 | if (err) |
4715 | return err; | 4723 | return err; |
4716 | 4724 | ||
4717 | if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) | 4725 | return selinux_nlmsg_perm(sk, skb); |
4718 | err = selinux_nlmsg_perm(sk, skb); | ||
4719 | |||
4720 | return err; | ||
4721 | } | 4726 | } |
4722 | 4727 | ||
4723 | static int selinux_netlink_recv(struct sk_buff *skb, int capability) | 4728 | static int selinux_netlink_recv(struct sk_buff *skb, int capability) |
@@ -5830,12 +5835,12 @@ int selinux_disable(void) | |||
5830 | selinux_disabled = 1; | 5835 | selinux_disabled = 1; |
5831 | selinux_enabled = 0; | 5836 | selinux_enabled = 0; |
5832 | 5837 | ||
5833 | /* Try to destroy the avc node cache */ | ||
5834 | avc_disable(); | ||
5835 | |||
5836 | /* Reset security_ops to the secondary module, dummy or capability. */ | 5838 | /* Reset security_ops to the secondary module, dummy or capability. */ |
5837 | security_ops = secondary_ops; | 5839 | security_ops = secondary_ops; |
5838 | 5840 | ||
5841 | /* Try to destroy the avc node cache */ | ||
5842 | avc_disable(); | ||
5843 | |||
5839 | /* Unregister netfilter hooks. */ | 5844 | /* Unregister netfilter hooks. */ |
5840 | selinux_nf_ip_exit(); | 5845 | selinux_nf_ip_exit(); |
5841 | 5846 | ||
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h deleted file mode 100644 index abedcd704dae..000000000000 --- a/security/selinux/include/av_inherit.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | S_(SECCLASS_DIR, file, 0x00020000UL) | ||
3 | S_(SECCLASS_FILE, file, 0x00020000UL) | ||
4 | S_(SECCLASS_LNK_FILE, file, 0x00020000UL) | ||
5 | S_(SECCLASS_CHR_FILE, file, 0x00020000UL) | ||
6 | S_(SECCLASS_BLK_FILE, file, 0x00020000UL) | ||
7 | S_(SECCLASS_SOCK_FILE, file, 0x00020000UL) | ||
8 | S_(SECCLASS_FIFO_FILE, file, 0x00020000UL) | ||
9 | S_(SECCLASS_SOCKET, socket, 0x00400000UL) | ||
10 | S_(SECCLASS_TCP_SOCKET, socket, 0x00400000UL) | ||
11 | S_(SECCLASS_UDP_SOCKET, socket, 0x00400000UL) | ||
12 | S_(SECCLASS_RAWIP_SOCKET, socket, 0x00400000UL) | ||
13 | S_(SECCLASS_NETLINK_SOCKET, socket, 0x00400000UL) | ||
14 | S_(SECCLASS_PACKET_SOCKET, socket, 0x00400000UL) | ||
15 | S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL) | ||
16 | S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL) | ||
17 | S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL) | ||
18 | S_(SECCLASS_TUN_SOCKET, socket, 0x00400000UL) | ||
19 | S_(SECCLASS_IPC, ipc, 0x00000200UL) | ||
20 | S_(SECCLASS_SEM, ipc, 0x00000200UL) | ||
21 | S_(SECCLASS_MSGQ, ipc, 0x00000200UL) | ||
22 | S_(SECCLASS_SHM, ipc, 0x00000200UL) | ||
23 | S_(SECCLASS_NETLINK_ROUTE_SOCKET, socket, 0x00400000UL) | ||
24 | S_(SECCLASS_NETLINK_FIREWALL_SOCKET, socket, 0x00400000UL) | ||
25 | S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, socket, 0x00400000UL) | ||
26 | S_(SECCLASS_NETLINK_NFLOG_SOCKET, socket, 0x00400000UL) | ||
27 | S_(SECCLASS_NETLINK_XFRM_SOCKET, socket, 0x00400000UL) | ||
28 | S_(SECCLASS_NETLINK_SELINUX_SOCKET, socket, 0x00400000UL) | ||
29 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, socket, 0x00400000UL) | ||
30 | S_(SECCLASS_NETLINK_IP6FW_SOCKET, socket, 0x00400000UL) | ||
31 | S_(SECCLASS_NETLINK_DNRT_SOCKET, socket, 0x00400000UL) | ||
32 | S_(SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET, socket, 0x00400000UL) | ||
33 | S_(SECCLASS_APPLETALK_SOCKET, socket, 0x00400000UL) | ||
34 | S_(SECCLASS_DCCP_SOCKET, socket, 0x00400000UL) | ||
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h deleted file mode 100644 index 2b683ad83d21..000000000000 --- a/security/selinux/include/av_perm_to_string.h +++ /dev/null | |||
@@ -1,183 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__MOUNT, "mount") | ||
3 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__REMOUNT, "remount") | ||
4 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__UNMOUNT, "unmount") | ||
5 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__GETATTR, "getattr") | ||
6 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELFROM, "relabelfrom") | ||
7 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELTO, "relabelto") | ||
8 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__TRANSITION, "transition") | ||
9 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__ASSOCIATE, "associate") | ||
10 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAMOD, "quotamod") | ||
11 | S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAGET, "quotaget") | ||
12 | S_(SECCLASS_DIR, DIR__ADD_NAME, "add_name") | ||
13 | S_(SECCLASS_DIR, DIR__REMOVE_NAME, "remove_name") | ||
14 | S_(SECCLASS_DIR, DIR__REPARENT, "reparent") | ||
15 | S_(SECCLASS_DIR, DIR__SEARCH, "search") | ||
16 | S_(SECCLASS_DIR, DIR__RMDIR, "rmdir") | ||
17 | S_(SECCLASS_DIR, DIR__OPEN, "open") | ||
18 | S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans") | ||
19 | S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint") | ||
20 | S_(SECCLASS_FILE, FILE__EXECMOD, "execmod") | ||
21 | S_(SECCLASS_FILE, FILE__OPEN, "open") | ||
22 | S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans") | ||
23 | S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint") | ||
24 | S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod") | ||
25 | S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open") | ||
26 | S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open") | ||
27 | S_(SECCLASS_SOCK_FILE, SOCK_FILE__OPEN, "open") | ||
28 | S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open") | ||
29 | S_(SECCLASS_FD, FD__USE, "use") | ||
30 | S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto") | ||
31 | S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn") | ||
32 | S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__ACCEPTFROM, "acceptfrom") | ||
33 | S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NODE_BIND, "node_bind") | ||
34 | S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NAME_CONNECT, "name_connect") | ||
35 | S_(SECCLASS_UDP_SOCKET, UDP_SOCKET__NODE_BIND, "node_bind") | ||
36 | S_(SECCLASS_RAWIP_SOCKET, RAWIP_SOCKET__NODE_BIND, "node_bind") | ||
37 | S_(SECCLASS_NODE, NODE__TCP_RECV, "tcp_recv") | ||
38 | S_(SECCLASS_NODE, NODE__TCP_SEND, "tcp_send") | ||
39 | S_(SECCLASS_NODE, NODE__UDP_RECV, "udp_recv") | ||
40 | S_(SECCLASS_NODE, NODE__UDP_SEND, "udp_send") | ||
41 | S_(SECCLASS_NODE, NODE__RAWIP_RECV, "rawip_recv") | ||
42 | S_(SECCLASS_NODE, NODE__RAWIP_SEND, "rawip_send") | ||
43 | S_(SECCLASS_NODE, NODE__ENFORCE_DEST, "enforce_dest") | ||
44 | S_(SECCLASS_NODE, NODE__DCCP_RECV, "dccp_recv") | ||
45 | S_(SECCLASS_NODE, NODE__DCCP_SEND, "dccp_send") | ||
46 | S_(SECCLASS_NODE, NODE__RECVFROM, "recvfrom") | ||
47 | S_(SECCLASS_NODE, NODE__SENDTO, "sendto") | ||
48 | S_(SECCLASS_NETIF, NETIF__TCP_RECV, "tcp_recv") | ||
49 | S_(SECCLASS_NETIF, NETIF__TCP_SEND, "tcp_send") | ||
50 | S_(SECCLASS_NETIF, NETIF__UDP_RECV, "udp_recv") | ||
51 | S_(SECCLASS_NETIF, NETIF__UDP_SEND, "udp_send") | ||
52 | S_(SECCLASS_NETIF, NETIF__RAWIP_RECV, "rawip_recv") | ||
53 | S_(SECCLASS_NETIF, NETIF__RAWIP_SEND, "rawip_send") | ||
54 | S_(SECCLASS_NETIF, NETIF__DCCP_RECV, "dccp_recv") | ||
55 | S_(SECCLASS_NETIF, NETIF__DCCP_SEND, "dccp_send") | ||
56 | S_(SECCLASS_NETIF, NETIF__INGRESS, "ingress") | ||
57 | S_(SECCLASS_NETIF, NETIF__EGRESS, "egress") | ||
58 | S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__CONNECTTO, "connectto") | ||
59 | S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__NEWCONN, "newconn") | ||
60 | S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__ACCEPTFROM, "acceptfrom") | ||
61 | S_(SECCLASS_PROCESS, PROCESS__FORK, "fork") | ||
62 | S_(SECCLASS_PROCESS, PROCESS__TRANSITION, "transition") | ||
63 | S_(SECCLASS_PROCESS, PROCESS__SIGCHLD, "sigchld") | ||
64 | S_(SECCLASS_PROCESS, PROCESS__SIGKILL, "sigkill") | ||
65 | S_(SECCLASS_PROCESS, PROCESS__SIGSTOP, "sigstop") | ||
66 | S_(SECCLASS_PROCESS, PROCESS__SIGNULL, "signull") | ||
67 | S_(SECCLASS_PROCESS, PROCESS__SIGNAL, "signal") | ||
68 | S_(SECCLASS_PROCESS, PROCESS__PTRACE, "ptrace") | ||
69 | S_(SECCLASS_PROCESS, PROCESS__GETSCHED, "getsched") | ||
70 | S_(SECCLASS_PROCESS, PROCESS__SETSCHED, "setsched") | ||
71 | S_(SECCLASS_PROCESS, PROCESS__GETSESSION, "getsession") | ||
72 | S_(SECCLASS_PROCESS, PROCESS__GETPGID, "getpgid") | ||
73 | S_(SECCLASS_PROCESS, PROCESS__SETPGID, "setpgid") | ||
74 | S_(SECCLASS_PROCESS, PROCESS__GETCAP, "getcap") | ||
75 | S_(SECCLASS_PROCESS, PROCESS__SETCAP, "setcap") | ||
76 | S_(SECCLASS_PROCESS, PROCESS__SHARE, "share") | ||
77 | S_(SECCLASS_PROCESS, PROCESS__GETATTR, "getattr") | ||
78 | S_(SECCLASS_PROCESS, PROCESS__SETEXEC, "setexec") | ||
79 | S_(SECCLASS_PROCESS, PROCESS__SETFSCREATE, "setfscreate") | ||
80 | S_(SECCLASS_PROCESS, PROCESS__NOATSECURE, "noatsecure") | ||
81 | S_(SECCLASS_PROCESS, PROCESS__SIGINH, "siginh") | ||
82 | S_(SECCLASS_PROCESS, PROCESS__SETRLIMIT, "setrlimit") | ||
83 | S_(SECCLASS_PROCESS, PROCESS__RLIMITINH, "rlimitinh") | ||
84 | S_(SECCLASS_PROCESS, PROCESS__DYNTRANSITION, "dyntransition") | ||
85 | S_(SECCLASS_PROCESS, PROCESS__SETCURRENT, "setcurrent") | ||
86 | S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem") | ||
87 | S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack") | ||
88 | S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap") | ||
89 | S_(SECCLASS_PROCESS, PROCESS__SETKEYCREATE, "setkeycreate") | ||
90 | S_(SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, "setsockcreate") | ||
91 | S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue") | ||
92 | S_(SECCLASS_MSG, MSG__SEND, "send") | ||
93 | S_(SECCLASS_MSG, MSG__RECEIVE, "receive") | ||
94 | S_(SECCLASS_SHM, SHM__LOCK, "lock") | ||
95 | S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av") | ||
96 | S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create") | ||
97 | S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member") | ||
98 | S_(SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, "check_context") | ||
99 | S_(SECCLASS_SECURITY, SECURITY__LOAD_POLICY, "load_policy") | ||
100 | S_(SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL, "compute_relabel") | ||
101 | S_(SECCLASS_SECURITY, SECURITY__COMPUTE_USER, "compute_user") | ||
102 | S_(SECCLASS_SECURITY, SECURITY__SETENFORCE, "setenforce") | ||
103 | S_(SECCLASS_SECURITY, SECURITY__SETBOOL, "setbool") | ||
104 | S_(SECCLASS_SECURITY, SECURITY__SETSECPARAM, "setsecparam") | ||
105 | S_(SECCLASS_SECURITY, SECURITY__SETCHECKREQPROT, "setcheckreqprot") | ||
106 | S_(SECCLASS_SYSTEM, SYSTEM__IPC_INFO, "ipc_info") | ||
107 | S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read") | ||
108 | S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod") | ||
109 | S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console") | ||
110 | S_(SECCLASS_SYSTEM, SYSTEM__MODULE_REQUEST, "module_request") | ||
111 | S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown") | ||
112 | S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override") | ||
113 | S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search") | ||
114 | S_(SECCLASS_CAPABILITY, CAPABILITY__FOWNER, "fowner") | ||
115 | S_(SECCLASS_CAPABILITY, CAPABILITY__FSETID, "fsetid") | ||
116 | S_(SECCLASS_CAPABILITY, CAPABILITY__KILL, "kill") | ||
117 | S_(SECCLASS_CAPABILITY, CAPABILITY__SETGID, "setgid") | ||
118 | S_(SECCLASS_CAPABILITY, CAPABILITY__SETUID, "setuid") | ||
119 | S_(SECCLASS_CAPABILITY, CAPABILITY__SETPCAP, "setpcap") | ||
120 | S_(SECCLASS_CAPABILITY, CAPABILITY__LINUX_IMMUTABLE, "linux_immutable") | ||
121 | S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BIND_SERVICE, "net_bind_service") | ||
122 | S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BROADCAST, "net_broadcast") | ||
123 | S_(SECCLASS_CAPABILITY, CAPABILITY__NET_ADMIN, "net_admin") | ||
124 | S_(SECCLASS_CAPABILITY, CAPABILITY__NET_RAW, "net_raw") | ||
125 | S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_LOCK, "ipc_lock") | ||
126 | S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_OWNER, "ipc_owner") | ||
127 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_MODULE, "sys_module") | ||
128 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RAWIO, "sys_rawio") | ||
129 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_CHROOT, "sys_chroot") | ||
130 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PTRACE, "sys_ptrace") | ||
131 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PACCT, "sys_pacct") | ||
132 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_ADMIN, "sys_admin") | ||
133 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_BOOT, "sys_boot") | ||
134 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_NICE, "sys_nice") | ||
135 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RESOURCE, "sys_resource") | ||
136 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TIME, "sys_time") | ||
137 | S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TTY_CONFIG, "sys_tty_config") | ||
138 | S_(SECCLASS_CAPABILITY, CAPABILITY__MKNOD, "mknod") | ||
139 | S_(SECCLASS_CAPABILITY, CAPABILITY__LEASE, "lease") | ||
140 | S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_WRITE, "audit_write") | ||
141 | S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_CONTROL, "audit_control") | ||
142 | S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap") | ||
143 | S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override") | ||
144 | S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin") | ||
145 | S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read") | ||
146 | S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
147 | S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read") | ||
148 | S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
149 | S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_READ, "nlmsg_read") | ||
150 | S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
151 | S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_READ, "nlmsg_read") | ||
152 | S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
153 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READ, "nlmsg_read") | ||
154 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
155 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_RELAY, "nlmsg_relay") | ||
156 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV, "nlmsg_readpriv") | ||
157 | S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT, "nlmsg_tty_audit") | ||
158 | S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_READ, "nlmsg_read") | ||
159 | S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_WRITE, "nlmsg_write") | ||
160 | S_(SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, "sendto") | ||
161 | S_(SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, "recvfrom") | ||
162 | S_(SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, "setcontext") | ||
163 | S_(SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, "polmatch") | ||
164 | S_(SECCLASS_PACKET, PACKET__SEND, "send") | ||
165 | S_(SECCLASS_PACKET, PACKET__RECV, "recv") | ||
166 | S_(SECCLASS_PACKET, PACKET__RELABELTO, "relabelto") | ||
167 | S_(SECCLASS_PACKET, PACKET__FLOW_IN, "flow_in") | ||
168 | S_(SECCLASS_PACKET, PACKET__FLOW_OUT, "flow_out") | ||
169 | S_(SECCLASS_PACKET, PACKET__FORWARD_IN, "forward_in") | ||
170 | S_(SECCLASS_PACKET, PACKET__FORWARD_OUT, "forward_out") | ||
171 | S_(SECCLASS_KEY, KEY__VIEW, "view") | ||
172 | S_(SECCLASS_KEY, KEY__READ, "read") | ||
173 | S_(SECCLASS_KEY, KEY__WRITE, "write") | ||
174 | S_(SECCLASS_KEY, KEY__SEARCH, "search") | ||
175 | S_(SECCLASS_KEY, KEY__LINK, "link") | ||
176 | S_(SECCLASS_KEY, KEY__SETATTR, "setattr") | ||
177 | S_(SECCLASS_KEY, KEY__CREATE, "create") | ||
178 | S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NODE_BIND, "node_bind") | ||
179 | S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NAME_CONNECT, "name_connect") | ||
180 | S_(SECCLASS_MEMPROTECT, MEMPROTECT__MMAP_ZERO, "mmap_zero") | ||
181 | S_(SECCLASS_PEER, PEER__RECV, "recv") | ||
182 | S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__USE_AS_OVERRIDE, "use_as_override") | ||
183 | S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__CREATE_FILES_AS, "create_files_as") | ||
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h deleted file mode 100644 index 0546d616ccac..000000000000 --- a/security/selinux/include/av_permissions.h +++ /dev/null | |||
@@ -1,870 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | #define COMMON_FILE__IOCTL 0x00000001UL | ||
3 | #define COMMON_FILE__READ 0x00000002UL | ||
4 | #define COMMON_FILE__WRITE 0x00000004UL | ||
5 | #define COMMON_FILE__CREATE 0x00000008UL | ||
6 | #define COMMON_FILE__GETATTR 0x00000010UL | ||
7 | #define COMMON_FILE__SETATTR 0x00000020UL | ||
8 | #define COMMON_FILE__LOCK 0x00000040UL | ||
9 | #define COMMON_FILE__RELABELFROM 0x00000080UL | ||
10 | #define COMMON_FILE__RELABELTO 0x00000100UL | ||
11 | #define COMMON_FILE__APPEND 0x00000200UL | ||
12 | #define COMMON_FILE__UNLINK 0x00000400UL | ||
13 | #define COMMON_FILE__LINK 0x00000800UL | ||
14 | #define COMMON_FILE__RENAME 0x00001000UL | ||
15 | #define COMMON_FILE__EXECUTE 0x00002000UL | ||
16 | #define COMMON_FILE__SWAPON 0x00004000UL | ||
17 | #define COMMON_FILE__QUOTAON 0x00008000UL | ||
18 | #define COMMON_FILE__MOUNTON 0x00010000UL | ||
19 | #define COMMON_SOCKET__IOCTL 0x00000001UL | ||
20 | #define COMMON_SOCKET__READ 0x00000002UL | ||
21 | #define COMMON_SOCKET__WRITE 0x00000004UL | ||
22 | #define COMMON_SOCKET__CREATE 0x00000008UL | ||
23 | #define COMMON_SOCKET__GETATTR 0x00000010UL | ||
24 | #define COMMON_SOCKET__SETATTR 0x00000020UL | ||
25 | #define COMMON_SOCKET__LOCK 0x00000040UL | ||
26 | #define COMMON_SOCKET__RELABELFROM 0x00000080UL | ||
27 | #define COMMON_SOCKET__RELABELTO 0x00000100UL | ||
28 | #define COMMON_SOCKET__APPEND 0x00000200UL | ||
29 | #define COMMON_SOCKET__BIND 0x00000400UL | ||
30 | #define COMMON_SOCKET__CONNECT 0x00000800UL | ||
31 | #define COMMON_SOCKET__LISTEN 0x00001000UL | ||
32 | #define COMMON_SOCKET__ACCEPT 0x00002000UL | ||
33 | #define COMMON_SOCKET__GETOPT 0x00004000UL | ||
34 | #define COMMON_SOCKET__SETOPT 0x00008000UL | ||
35 | #define COMMON_SOCKET__SHUTDOWN 0x00010000UL | ||
36 | #define COMMON_SOCKET__RECVFROM 0x00020000UL | ||
37 | #define COMMON_SOCKET__SENDTO 0x00040000UL | ||
38 | #define COMMON_SOCKET__RECV_MSG 0x00080000UL | ||
39 | #define COMMON_SOCKET__SEND_MSG 0x00100000UL | ||
40 | #define COMMON_SOCKET__NAME_BIND 0x00200000UL | ||
41 | #define COMMON_IPC__CREATE 0x00000001UL | ||
42 | #define COMMON_IPC__DESTROY 0x00000002UL | ||
43 | #define COMMON_IPC__GETATTR 0x00000004UL | ||
44 | #define COMMON_IPC__SETATTR 0x00000008UL | ||
45 | #define COMMON_IPC__READ 0x00000010UL | ||
46 | #define COMMON_IPC__WRITE 0x00000020UL | ||
47 | #define COMMON_IPC__ASSOCIATE 0x00000040UL | ||
48 | #define COMMON_IPC__UNIX_READ 0x00000080UL | ||
49 | #define COMMON_IPC__UNIX_WRITE 0x00000100UL | ||
50 | #define FILESYSTEM__MOUNT 0x00000001UL | ||
51 | #define FILESYSTEM__REMOUNT 0x00000002UL | ||
52 | #define FILESYSTEM__UNMOUNT 0x00000004UL | ||
53 | #define FILESYSTEM__GETATTR 0x00000008UL | ||
54 | #define FILESYSTEM__RELABELFROM 0x00000010UL | ||
55 | #define FILESYSTEM__RELABELTO 0x00000020UL | ||
56 | #define FILESYSTEM__TRANSITION 0x00000040UL | ||
57 | #define FILESYSTEM__ASSOCIATE 0x00000080UL | ||
58 | #define FILESYSTEM__QUOTAMOD 0x00000100UL | ||
59 | #define FILESYSTEM__QUOTAGET 0x00000200UL | ||
60 | #define DIR__IOCTL 0x00000001UL | ||
61 | #define DIR__READ 0x00000002UL | ||
62 | #define DIR__WRITE 0x00000004UL | ||
63 | #define DIR__CREATE 0x00000008UL | ||
64 | #define DIR__GETATTR 0x00000010UL | ||
65 | #define DIR__SETATTR 0x00000020UL | ||
66 | #define DIR__LOCK 0x00000040UL | ||
67 | #define DIR__RELABELFROM 0x00000080UL | ||
68 | #define DIR__RELABELTO 0x00000100UL | ||
69 | #define DIR__APPEND 0x00000200UL | ||
70 | #define DIR__UNLINK 0x00000400UL | ||
71 | #define DIR__LINK 0x00000800UL | ||
72 | #define DIR__RENAME 0x00001000UL | ||
73 | #define DIR__EXECUTE 0x00002000UL | ||
74 | #define DIR__SWAPON 0x00004000UL | ||
75 | #define DIR__QUOTAON 0x00008000UL | ||
76 | #define DIR__MOUNTON 0x00010000UL | ||
77 | #define DIR__ADD_NAME 0x00020000UL | ||
78 | #define DIR__REMOVE_NAME 0x00040000UL | ||
79 | #define DIR__REPARENT 0x00080000UL | ||
80 | #define DIR__SEARCH 0x00100000UL | ||
81 | #define DIR__RMDIR 0x00200000UL | ||
82 | #define DIR__OPEN 0x00400000UL | ||
83 | #define FILE__IOCTL 0x00000001UL | ||
84 | #define FILE__READ 0x00000002UL | ||
85 | #define FILE__WRITE 0x00000004UL | ||
86 | #define FILE__CREATE 0x00000008UL | ||
87 | #define FILE__GETATTR 0x00000010UL | ||
88 | #define FILE__SETATTR 0x00000020UL | ||
89 | #define FILE__LOCK 0x00000040UL | ||
90 | #define FILE__RELABELFROM 0x00000080UL | ||
91 | #define FILE__RELABELTO 0x00000100UL | ||
92 | #define FILE__APPEND 0x00000200UL | ||
93 | #define FILE__UNLINK 0x00000400UL | ||
94 | #define FILE__LINK 0x00000800UL | ||
95 | #define FILE__RENAME 0x00001000UL | ||
96 | #define FILE__EXECUTE 0x00002000UL | ||
97 | #define FILE__SWAPON 0x00004000UL | ||
98 | #define FILE__QUOTAON 0x00008000UL | ||
99 | #define FILE__MOUNTON 0x00010000UL | ||
100 | #define FILE__EXECUTE_NO_TRANS 0x00020000UL | ||
101 | #define FILE__ENTRYPOINT 0x00040000UL | ||
102 | #define FILE__EXECMOD 0x00080000UL | ||
103 | #define FILE__OPEN 0x00100000UL | ||
104 | #define LNK_FILE__IOCTL 0x00000001UL | ||
105 | #define LNK_FILE__READ 0x00000002UL | ||
106 | #define LNK_FILE__WRITE 0x00000004UL | ||
107 | #define LNK_FILE__CREATE 0x00000008UL | ||
108 | #define LNK_FILE__GETATTR 0x00000010UL | ||
109 | #define LNK_FILE__SETATTR 0x00000020UL | ||
110 | #define LNK_FILE__LOCK 0x00000040UL | ||
111 | #define LNK_FILE__RELABELFROM 0x00000080UL | ||
112 | #define LNK_FILE__RELABELTO 0x00000100UL | ||
113 | #define LNK_FILE__APPEND 0x00000200UL | ||
114 | #define LNK_FILE__UNLINK 0x00000400UL | ||
115 | #define LNK_FILE__LINK 0x00000800UL | ||
116 | #define LNK_FILE__RENAME 0x00001000UL | ||
117 | #define LNK_FILE__EXECUTE 0x00002000UL | ||
118 | #define LNK_FILE__SWAPON 0x00004000UL | ||
119 | #define LNK_FILE__QUOTAON 0x00008000UL | ||
120 | #define LNK_FILE__MOUNTON 0x00010000UL | ||
121 | #define CHR_FILE__IOCTL 0x00000001UL | ||
122 | #define CHR_FILE__READ 0x00000002UL | ||
123 | #define CHR_FILE__WRITE 0x00000004UL | ||
124 | #define CHR_FILE__CREATE 0x00000008UL | ||
125 | #define CHR_FILE__GETATTR 0x00000010UL | ||
126 | #define CHR_FILE__SETATTR 0x00000020UL | ||
127 | #define CHR_FILE__LOCK 0x00000040UL | ||
128 | #define CHR_FILE__RELABELFROM 0x00000080UL | ||
129 | #define CHR_FILE__RELABELTO 0x00000100UL | ||
130 | #define CHR_FILE__APPEND 0x00000200UL | ||
131 | #define CHR_FILE__UNLINK 0x00000400UL | ||
132 | #define CHR_FILE__LINK 0x00000800UL | ||
133 | #define CHR_FILE__RENAME 0x00001000UL | ||
134 | #define CHR_FILE__EXECUTE 0x00002000UL | ||
135 | #define CHR_FILE__SWAPON 0x00004000UL | ||
136 | #define CHR_FILE__QUOTAON 0x00008000UL | ||
137 | #define CHR_FILE__MOUNTON 0x00010000UL | ||
138 | #define CHR_FILE__EXECUTE_NO_TRANS 0x00020000UL | ||
139 | #define CHR_FILE__ENTRYPOINT 0x00040000UL | ||
140 | #define CHR_FILE__EXECMOD 0x00080000UL | ||
141 | #define CHR_FILE__OPEN 0x00100000UL | ||
142 | #define BLK_FILE__IOCTL 0x00000001UL | ||
143 | #define BLK_FILE__READ 0x00000002UL | ||
144 | #define BLK_FILE__WRITE 0x00000004UL | ||
145 | #define BLK_FILE__CREATE 0x00000008UL | ||
146 | #define BLK_FILE__GETATTR 0x00000010UL | ||
147 | #define BLK_FILE__SETATTR 0x00000020UL | ||
148 | #define BLK_FILE__LOCK 0x00000040UL | ||
149 | #define BLK_FILE__RELABELFROM 0x00000080UL | ||
150 | #define BLK_FILE__RELABELTO 0x00000100UL | ||
151 | #define BLK_FILE__APPEND 0x00000200UL | ||
152 | #define BLK_FILE__UNLINK 0x00000400UL | ||
153 | #define BLK_FILE__LINK 0x00000800UL | ||
154 | #define BLK_FILE__RENAME 0x00001000UL | ||
155 | #define BLK_FILE__EXECUTE 0x00002000UL | ||
156 | #define BLK_FILE__SWAPON 0x00004000UL | ||
157 | #define BLK_FILE__QUOTAON 0x00008000UL | ||
158 | #define BLK_FILE__MOUNTON 0x00010000UL | ||
159 | #define BLK_FILE__OPEN 0x00020000UL | ||
160 | #define SOCK_FILE__IOCTL 0x00000001UL | ||
161 | #define SOCK_FILE__READ 0x00000002UL | ||
162 | #define SOCK_FILE__WRITE 0x00000004UL | ||
163 | #define SOCK_FILE__CREATE 0x00000008UL | ||
164 | #define SOCK_FILE__GETATTR 0x00000010UL | ||
165 | #define SOCK_FILE__SETATTR 0x00000020UL | ||
166 | #define SOCK_FILE__LOCK 0x00000040UL | ||
167 | #define SOCK_FILE__RELABELFROM 0x00000080UL | ||
168 | #define SOCK_FILE__RELABELTO 0x00000100UL | ||
169 | #define SOCK_FILE__APPEND 0x00000200UL | ||
170 | #define SOCK_FILE__UNLINK 0x00000400UL | ||
171 | #define SOCK_FILE__LINK 0x00000800UL | ||
172 | #define SOCK_FILE__RENAME 0x00001000UL | ||
173 | #define SOCK_FILE__EXECUTE 0x00002000UL | ||
174 | #define SOCK_FILE__SWAPON 0x00004000UL | ||
175 | #define SOCK_FILE__QUOTAON 0x00008000UL | ||
176 | #define SOCK_FILE__MOUNTON 0x00010000UL | ||
177 | #define SOCK_FILE__OPEN 0x00020000UL | ||
178 | #define FIFO_FILE__IOCTL 0x00000001UL | ||
179 | #define FIFO_FILE__READ 0x00000002UL | ||
180 | #define FIFO_FILE__WRITE 0x00000004UL | ||
181 | #define FIFO_FILE__CREATE 0x00000008UL | ||
182 | #define FIFO_FILE__GETATTR 0x00000010UL | ||
183 | #define FIFO_FILE__SETATTR 0x00000020UL | ||
184 | #define FIFO_FILE__LOCK 0x00000040UL | ||
185 | #define FIFO_FILE__RELABELFROM 0x00000080UL | ||
186 | #define FIFO_FILE__RELABELTO 0x00000100UL | ||
187 | #define FIFO_FILE__APPEND 0x00000200UL | ||
188 | #define FIFO_FILE__UNLINK 0x00000400UL | ||
189 | #define FIFO_FILE__LINK 0x00000800UL | ||
190 | #define FIFO_FILE__RENAME 0x00001000UL | ||
191 | #define FIFO_FILE__EXECUTE 0x00002000UL | ||
192 | #define FIFO_FILE__SWAPON 0x00004000UL | ||
193 | #define FIFO_FILE__QUOTAON 0x00008000UL | ||
194 | #define FIFO_FILE__MOUNTON 0x00010000UL | ||
195 | #define FIFO_FILE__OPEN 0x00020000UL | ||
196 | #define FD__USE 0x00000001UL | ||
197 | #define SOCKET__IOCTL 0x00000001UL | ||
198 | #define SOCKET__READ 0x00000002UL | ||
199 | #define SOCKET__WRITE 0x00000004UL | ||
200 | #define SOCKET__CREATE 0x00000008UL | ||
201 | #define SOCKET__GETATTR 0x00000010UL | ||
202 | #define SOCKET__SETATTR 0x00000020UL | ||
203 | #define SOCKET__LOCK 0x00000040UL | ||
204 | #define SOCKET__RELABELFROM 0x00000080UL | ||
205 | #define SOCKET__RELABELTO 0x00000100UL | ||
206 | #define SOCKET__APPEND 0x00000200UL | ||
207 | #define SOCKET__BIND 0x00000400UL | ||
208 | #define SOCKET__CONNECT 0x00000800UL | ||
209 | #define SOCKET__LISTEN 0x00001000UL | ||
210 | #define SOCKET__ACCEPT 0x00002000UL | ||
211 | #define SOCKET__GETOPT 0x00004000UL | ||
212 | #define SOCKET__SETOPT 0x00008000UL | ||
213 | #define SOCKET__SHUTDOWN 0x00010000UL | ||
214 | #define SOCKET__RECVFROM 0x00020000UL | ||
215 | #define SOCKET__SENDTO 0x00040000UL | ||
216 | #define SOCKET__RECV_MSG 0x00080000UL | ||
217 | #define SOCKET__SEND_MSG 0x00100000UL | ||
218 | #define SOCKET__NAME_BIND 0x00200000UL | ||
219 | #define TCP_SOCKET__IOCTL 0x00000001UL | ||
220 | #define TCP_SOCKET__READ 0x00000002UL | ||
221 | #define TCP_SOCKET__WRITE 0x00000004UL | ||
222 | #define TCP_SOCKET__CREATE 0x00000008UL | ||
223 | #define TCP_SOCKET__GETATTR 0x00000010UL | ||
224 | #define TCP_SOCKET__SETATTR 0x00000020UL | ||
225 | #define TCP_SOCKET__LOCK 0x00000040UL | ||
226 | #define TCP_SOCKET__RELABELFROM 0x00000080UL | ||
227 | #define TCP_SOCKET__RELABELTO 0x00000100UL | ||
228 | #define TCP_SOCKET__APPEND 0x00000200UL | ||
229 | #define TCP_SOCKET__BIND 0x00000400UL | ||
230 | #define TCP_SOCKET__CONNECT 0x00000800UL | ||
231 | #define TCP_SOCKET__LISTEN 0x00001000UL | ||
232 | #define TCP_SOCKET__ACCEPT 0x00002000UL | ||
233 | #define TCP_SOCKET__GETOPT 0x00004000UL | ||
234 | #define TCP_SOCKET__SETOPT 0x00008000UL | ||
235 | #define TCP_SOCKET__SHUTDOWN 0x00010000UL | ||
236 | #define TCP_SOCKET__RECVFROM 0x00020000UL | ||
237 | #define TCP_SOCKET__SENDTO 0x00040000UL | ||
238 | #define TCP_SOCKET__RECV_MSG 0x00080000UL | ||
239 | #define TCP_SOCKET__SEND_MSG 0x00100000UL | ||
240 | #define TCP_SOCKET__NAME_BIND 0x00200000UL | ||
241 | #define TCP_SOCKET__CONNECTTO 0x00400000UL | ||
242 | #define TCP_SOCKET__NEWCONN 0x00800000UL | ||
243 | #define TCP_SOCKET__ACCEPTFROM 0x01000000UL | ||
244 | #define TCP_SOCKET__NODE_BIND 0x02000000UL | ||
245 | #define TCP_SOCKET__NAME_CONNECT 0x04000000UL | ||
246 | #define UDP_SOCKET__IOCTL 0x00000001UL | ||
247 | #define UDP_SOCKET__READ 0x00000002UL | ||
248 | #define UDP_SOCKET__WRITE 0x00000004UL | ||
249 | #define UDP_SOCKET__CREATE 0x00000008UL | ||
250 | #define UDP_SOCKET__GETATTR 0x00000010UL | ||
251 | #define UDP_SOCKET__SETATTR 0x00000020UL | ||
252 | #define UDP_SOCKET__LOCK 0x00000040UL | ||
253 | #define UDP_SOCKET__RELABELFROM 0x00000080UL | ||
254 | #define UDP_SOCKET__RELABELTO 0x00000100UL | ||
255 | #define UDP_SOCKET__APPEND 0x00000200UL | ||
256 | #define UDP_SOCKET__BIND 0x00000400UL | ||
257 | #define UDP_SOCKET__CONNECT 0x00000800UL | ||
258 | #define UDP_SOCKET__LISTEN 0x00001000UL | ||
259 | #define UDP_SOCKET__ACCEPT 0x00002000UL | ||
260 | #define UDP_SOCKET__GETOPT 0x00004000UL | ||
261 | #define UDP_SOCKET__SETOPT 0x00008000UL | ||
262 | #define UDP_SOCKET__SHUTDOWN 0x00010000UL | ||
263 | #define UDP_SOCKET__RECVFROM 0x00020000UL | ||
264 | #define UDP_SOCKET__SENDTO 0x00040000UL | ||
265 | #define UDP_SOCKET__RECV_MSG 0x00080000UL | ||
266 | #define UDP_SOCKET__SEND_MSG 0x00100000UL | ||
267 | #define UDP_SOCKET__NAME_BIND 0x00200000UL | ||
268 | #define UDP_SOCKET__NODE_BIND 0x00400000UL | ||
269 | #define RAWIP_SOCKET__IOCTL 0x00000001UL | ||
270 | #define RAWIP_SOCKET__READ 0x00000002UL | ||
271 | #define RAWIP_SOCKET__WRITE 0x00000004UL | ||
272 | #define RAWIP_SOCKET__CREATE 0x00000008UL | ||
273 | #define RAWIP_SOCKET__GETATTR 0x00000010UL | ||
274 | #define RAWIP_SOCKET__SETATTR 0x00000020UL | ||
275 | #define RAWIP_SOCKET__LOCK 0x00000040UL | ||
276 | #define RAWIP_SOCKET__RELABELFROM 0x00000080UL | ||
277 | #define RAWIP_SOCKET__RELABELTO 0x00000100UL | ||
278 | #define RAWIP_SOCKET__APPEND 0x00000200UL | ||
279 | #define RAWIP_SOCKET__BIND 0x00000400UL | ||
280 | #define RAWIP_SOCKET__CONNECT 0x00000800UL | ||
281 | #define RAWIP_SOCKET__LISTEN 0x00001000UL | ||
282 | #define RAWIP_SOCKET__ACCEPT 0x00002000UL | ||
283 | #define RAWIP_SOCKET__GETOPT 0x00004000UL | ||
284 | #define RAWIP_SOCKET__SETOPT 0x00008000UL | ||
285 | #define RAWIP_SOCKET__SHUTDOWN 0x00010000UL | ||
286 | #define RAWIP_SOCKET__RECVFROM 0x00020000UL | ||
287 | #define RAWIP_SOCKET__SENDTO 0x00040000UL | ||
288 | #define RAWIP_SOCKET__RECV_MSG 0x00080000UL | ||
289 | #define RAWIP_SOCKET__SEND_MSG 0x00100000UL | ||
290 | #define RAWIP_SOCKET__NAME_BIND 0x00200000UL | ||
291 | #define RAWIP_SOCKET__NODE_BIND 0x00400000UL | ||
292 | #define NODE__TCP_RECV 0x00000001UL | ||
293 | #define NODE__TCP_SEND 0x00000002UL | ||
294 | #define NODE__UDP_RECV 0x00000004UL | ||
295 | #define NODE__UDP_SEND 0x00000008UL | ||
296 | #define NODE__RAWIP_RECV 0x00000010UL | ||
297 | #define NODE__RAWIP_SEND 0x00000020UL | ||
298 | #define NODE__ENFORCE_DEST 0x00000040UL | ||
299 | #define NODE__DCCP_RECV 0x00000080UL | ||
300 | #define NODE__DCCP_SEND 0x00000100UL | ||
301 | #define NODE__RECVFROM 0x00000200UL | ||
302 | #define NODE__SENDTO 0x00000400UL | ||
303 | #define NETIF__TCP_RECV 0x00000001UL | ||
304 | #define NETIF__TCP_SEND 0x00000002UL | ||
305 | #define NETIF__UDP_RECV 0x00000004UL | ||
306 | #define NETIF__UDP_SEND 0x00000008UL | ||
307 | #define NETIF__RAWIP_RECV 0x00000010UL | ||
308 | #define NETIF__RAWIP_SEND 0x00000020UL | ||
309 | #define NETIF__DCCP_RECV 0x00000040UL | ||
310 | #define NETIF__DCCP_SEND 0x00000080UL | ||
311 | #define NETIF__INGRESS 0x00000100UL | ||
312 | #define NETIF__EGRESS 0x00000200UL | ||
313 | #define NETLINK_SOCKET__IOCTL 0x00000001UL | ||
314 | #define NETLINK_SOCKET__READ 0x00000002UL | ||
315 | #define NETLINK_SOCKET__WRITE 0x00000004UL | ||
316 | #define NETLINK_SOCKET__CREATE 0x00000008UL | ||
317 | #define NETLINK_SOCKET__GETATTR 0x00000010UL | ||
318 | #define NETLINK_SOCKET__SETATTR 0x00000020UL | ||
319 | #define NETLINK_SOCKET__LOCK 0x00000040UL | ||
320 | #define NETLINK_SOCKET__RELABELFROM 0x00000080UL | ||
321 | #define NETLINK_SOCKET__RELABELTO 0x00000100UL | ||
322 | #define NETLINK_SOCKET__APPEND 0x00000200UL | ||
323 | #define NETLINK_SOCKET__BIND 0x00000400UL | ||
324 | #define NETLINK_SOCKET__CONNECT 0x00000800UL | ||
325 | #define NETLINK_SOCKET__LISTEN 0x00001000UL | ||
326 | #define NETLINK_SOCKET__ACCEPT 0x00002000UL | ||
327 | #define NETLINK_SOCKET__GETOPT 0x00004000UL | ||
328 | #define NETLINK_SOCKET__SETOPT 0x00008000UL | ||
329 | #define NETLINK_SOCKET__SHUTDOWN 0x00010000UL | ||
330 | #define NETLINK_SOCKET__RECVFROM 0x00020000UL | ||
331 | #define NETLINK_SOCKET__SENDTO 0x00040000UL | ||
332 | #define NETLINK_SOCKET__RECV_MSG 0x00080000UL | ||
333 | #define NETLINK_SOCKET__SEND_MSG 0x00100000UL | ||
334 | #define NETLINK_SOCKET__NAME_BIND 0x00200000UL | ||
335 | #define PACKET_SOCKET__IOCTL 0x00000001UL | ||
336 | #define PACKET_SOCKET__READ 0x00000002UL | ||
337 | #define PACKET_SOCKET__WRITE 0x00000004UL | ||
338 | #define PACKET_SOCKET__CREATE 0x00000008UL | ||
339 | #define PACKET_SOCKET__GETATTR 0x00000010UL | ||
340 | #define PACKET_SOCKET__SETATTR 0x00000020UL | ||
341 | #define PACKET_SOCKET__LOCK 0x00000040UL | ||
342 | #define PACKET_SOCKET__RELABELFROM 0x00000080UL | ||
343 | #define PACKET_SOCKET__RELABELTO 0x00000100UL | ||
344 | #define PACKET_SOCKET__APPEND 0x00000200UL | ||
345 | #define PACKET_SOCKET__BIND 0x00000400UL | ||
346 | #define PACKET_SOCKET__CONNECT 0x00000800UL | ||
347 | #define PACKET_SOCKET__LISTEN 0x00001000UL | ||
348 | #define PACKET_SOCKET__ACCEPT 0x00002000UL | ||
349 | #define PACKET_SOCKET__GETOPT 0x00004000UL | ||
350 | #define PACKET_SOCKET__SETOPT 0x00008000UL | ||
351 | #define PACKET_SOCKET__SHUTDOWN 0x00010000UL | ||
352 | #define PACKET_SOCKET__RECVFROM 0x00020000UL | ||
353 | #define PACKET_SOCKET__SENDTO 0x00040000UL | ||
354 | #define PACKET_SOCKET__RECV_MSG 0x00080000UL | ||
355 | #define PACKET_SOCKET__SEND_MSG 0x00100000UL | ||
356 | #define PACKET_SOCKET__NAME_BIND 0x00200000UL | ||
357 | #define KEY_SOCKET__IOCTL 0x00000001UL | ||
358 | #define KEY_SOCKET__READ 0x00000002UL | ||
359 | #define KEY_SOCKET__WRITE 0x00000004UL | ||
360 | #define KEY_SOCKET__CREATE 0x00000008UL | ||
361 | #define KEY_SOCKET__GETATTR 0x00000010UL | ||
362 | #define KEY_SOCKET__SETATTR 0x00000020UL | ||
363 | #define KEY_SOCKET__LOCK 0x00000040UL | ||
364 | #define KEY_SOCKET__RELABELFROM 0x00000080UL | ||
365 | #define KEY_SOCKET__RELABELTO 0x00000100UL | ||
366 | #define KEY_SOCKET__APPEND 0x00000200UL | ||
367 | #define KEY_SOCKET__BIND 0x00000400UL | ||
368 | #define KEY_SOCKET__CONNECT 0x00000800UL | ||
369 | #define KEY_SOCKET__LISTEN 0x00001000UL | ||
370 | #define KEY_SOCKET__ACCEPT 0x00002000UL | ||
371 | #define KEY_SOCKET__GETOPT 0x00004000UL | ||
372 | #define KEY_SOCKET__SETOPT 0x00008000UL | ||
373 | #define KEY_SOCKET__SHUTDOWN 0x00010000UL | ||
374 | #define KEY_SOCKET__RECVFROM 0x00020000UL | ||
375 | #define KEY_SOCKET__SENDTO 0x00040000UL | ||
376 | #define KEY_SOCKET__RECV_MSG 0x00080000UL | ||
377 | #define KEY_SOCKET__SEND_MSG 0x00100000UL | ||
378 | #define KEY_SOCKET__NAME_BIND 0x00200000UL | ||
379 | #define UNIX_STREAM_SOCKET__IOCTL 0x00000001UL | ||
380 | #define UNIX_STREAM_SOCKET__READ 0x00000002UL | ||
381 | #define UNIX_STREAM_SOCKET__WRITE 0x00000004UL | ||
382 | #define UNIX_STREAM_SOCKET__CREATE 0x00000008UL | ||
383 | #define UNIX_STREAM_SOCKET__GETATTR 0x00000010UL | ||
384 | #define UNIX_STREAM_SOCKET__SETATTR 0x00000020UL | ||
385 | #define UNIX_STREAM_SOCKET__LOCK 0x00000040UL | ||
386 | #define UNIX_STREAM_SOCKET__RELABELFROM 0x00000080UL | ||
387 | #define UNIX_STREAM_SOCKET__RELABELTO 0x00000100UL | ||
388 | #define UNIX_STREAM_SOCKET__APPEND 0x00000200UL | ||
389 | #define UNIX_STREAM_SOCKET__BIND 0x00000400UL | ||
390 | #define UNIX_STREAM_SOCKET__CONNECT 0x00000800UL | ||
391 | #define UNIX_STREAM_SOCKET__LISTEN 0x00001000UL | ||
392 | #define UNIX_STREAM_SOCKET__ACCEPT 0x00002000UL | ||
393 | #define UNIX_STREAM_SOCKET__GETOPT 0x00004000UL | ||
394 | #define UNIX_STREAM_SOCKET__SETOPT 0x00008000UL | ||
395 | #define UNIX_STREAM_SOCKET__SHUTDOWN 0x00010000UL | ||
396 | #define UNIX_STREAM_SOCKET__RECVFROM 0x00020000UL | ||
397 | #define UNIX_STREAM_SOCKET__SENDTO 0x00040000UL | ||
398 | #define UNIX_STREAM_SOCKET__RECV_MSG 0x00080000UL | ||
399 | #define UNIX_STREAM_SOCKET__SEND_MSG 0x00100000UL | ||
400 | #define UNIX_STREAM_SOCKET__NAME_BIND 0x00200000UL | ||
401 | #define UNIX_STREAM_SOCKET__CONNECTTO 0x00400000UL | ||
402 | #define UNIX_STREAM_SOCKET__NEWCONN 0x00800000UL | ||
403 | #define UNIX_STREAM_SOCKET__ACCEPTFROM 0x01000000UL | ||
404 | #define UNIX_DGRAM_SOCKET__IOCTL 0x00000001UL | ||
405 | #define UNIX_DGRAM_SOCKET__READ 0x00000002UL | ||
406 | #define UNIX_DGRAM_SOCKET__WRITE 0x00000004UL | ||
407 | #define UNIX_DGRAM_SOCKET__CREATE 0x00000008UL | ||
408 | #define UNIX_DGRAM_SOCKET__GETATTR 0x00000010UL | ||
409 | #define UNIX_DGRAM_SOCKET__SETATTR 0x00000020UL | ||
410 | #define UNIX_DGRAM_SOCKET__LOCK 0x00000040UL | ||
411 | #define UNIX_DGRAM_SOCKET__RELABELFROM 0x00000080UL | ||
412 | #define UNIX_DGRAM_SOCKET__RELABELTO 0x00000100UL | ||
413 | #define UNIX_DGRAM_SOCKET__APPEND 0x00000200UL | ||
414 | #define UNIX_DGRAM_SOCKET__BIND 0x00000400UL | ||
415 | #define UNIX_DGRAM_SOCKET__CONNECT 0x00000800UL | ||
416 | #define UNIX_DGRAM_SOCKET__LISTEN 0x00001000UL | ||
417 | #define UNIX_DGRAM_SOCKET__ACCEPT 0x00002000UL | ||
418 | #define UNIX_DGRAM_SOCKET__GETOPT 0x00004000UL | ||
419 | #define UNIX_DGRAM_SOCKET__SETOPT 0x00008000UL | ||
420 | #define UNIX_DGRAM_SOCKET__SHUTDOWN 0x00010000UL | ||
421 | #define UNIX_DGRAM_SOCKET__RECVFROM 0x00020000UL | ||
422 | #define UNIX_DGRAM_SOCKET__SENDTO 0x00040000UL | ||
423 | #define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL | ||
424 | #define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL | ||
425 | #define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL | ||
426 | #define TUN_SOCKET__IOCTL 0x00000001UL | ||
427 | #define TUN_SOCKET__READ 0x00000002UL | ||
428 | #define TUN_SOCKET__WRITE 0x00000004UL | ||
429 | #define TUN_SOCKET__CREATE 0x00000008UL | ||
430 | #define TUN_SOCKET__GETATTR 0x00000010UL | ||
431 | #define TUN_SOCKET__SETATTR 0x00000020UL | ||
432 | #define TUN_SOCKET__LOCK 0x00000040UL | ||
433 | #define TUN_SOCKET__RELABELFROM 0x00000080UL | ||
434 | #define TUN_SOCKET__RELABELTO 0x00000100UL | ||
435 | #define TUN_SOCKET__APPEND 0x00000200UL | ||
436 | #define TUN_SOCKET__BIND 0x00000400UL | ||
437 | #define TUN_SOCKET__CONNECT 0x00000800UL | ||
438 | #define TUN_SOCKET__LISTEN 0x00001000UL | ||
439 | #define TUN_SOCKET__ACCEPT 0x00002000UL | ||
440 | #define TUN_SOCKET__GETOPT 0x00004000UL | ||
441 | #define TUN_SOCKET__SETOPT 0x00008000UL | ||
442 | #define TUN_SOCKET__SHUTDOWN 0x00010000UL | ||
443 | #define TUN_SOCKET__RECVFROM 0x00020000UL | ||
444 | #define TUN_SOCKET__SENDTO 0x00040000UL | ||
445 | #define TUN_SOCKET__RECV_MSG 0x00080000UL | ||
446 | #define TUN_SOCKET__SEND_MSG 0x00100000UL | ||
447 | #define TUN_SOCKET__NAME_BIND 0x00200000UL | ||
448 | #define PROCESS__FORK 0x00000001UL | ||
449 | #define PROCESS__TRANSITION 0x00000002UL | ||
450 | #define PROCESS__SIGCHLD 0x00000004UL | ||
451 | #define PROCESS__SIGKILL 0x00000008UL | ||
452 | #define PROCESS__SIGSTOP 0x00000010UL | ||
453 | #define PROCESS__SIGNULL 0x00000020UL | ||
454 | #define PROCESS__SIGNAL 0x00000040UL | ||
455 | #define PROCESS__PTRACE 0x00000080UL | ||
456 | #define PROCESS__GETSCHED 0x00000100UL | ||
457 | #define PROCESS__SETSCHED 0x00000200UL | ||
458 | #define PROCESS__GETSESSION 0x00000400UL | ||
459 | #define PROCESS__GETPGID 0x00000800UL | ||
460 | #define PROCESS__SETPGID 0x00001000UL | ||
461 | #define PROCESS__GETCAP 0x00002000UL | ||
462 | #define PROCESS__SETCAP 0x00004000UL | ||
463 | #define PROCESS__SHARE 0x00008000UL | ||
464 | #define PROCESS__GETATTR 0x00010000UL | ||
465 | #define PROCESS__SETEXEC 0x00020000UL | ||
466 | #define PROCESS__SETFSCREATE 0x00040000UL | ||
467 | #define PROCESS__NOATSECURE 0x00080000UL | ||
468 | #define PROCESS__SIGINH 0x00100000UL | ||
469 | #define PROCESS__SETRLIMIT 0x00200000UL | ||
470 | #define PROCESS__RLIMITINH 0x00400000UL | ||
471 | #define PROCESS__DYNTRANSITION 0x00800000UL | ||
472 | #define PROCESS__SETCURRENT 0x01000000UL | ||
473 | #define PROCESS__EXECMEM 0x02000000UL | ||
474 | #define PROCESS__EXECSTACK 0x04000000UL | ||
475 | #define PROCESS__EXECHEAP 0x08000000UL | ||
476 | #define PROCESS__SETKEYCREATE 0x10000000UL | ||
477 | #define PROCESS__SETSOCKCREATE 0x20000000UL | ||
478 | #define IPC__CREATE 0x00000001UL | ||
479 | #define IPC__DESTROY 0x00000002UL | ||
480 | #define IPC__GETATTR 0x00000004UL | ||
481 | #define IPC__SETATTR 0x00000008UL | ||
482 | #define IPC__READ 0x00000010UL | ||
483 | #define IPC__WRITE 0x00000020UL | ||
484 | #define IPC__ASSOCIATE 0x00000040UL | ||
485 | #define IPC__UNIX_READ 0x00000080UL | ||
486 | #define IPC__UNIX_WRITE 0x00000100UL | ||
487 | #define SEM__CREATE 0x00000001UL | ||
488 | #define SEM__DESTROY 0x00000002UL | ||
489 | #define SEM__GETATTR 0x00000004UL | ||
490 | #define SEM__SETATTR 0x00000008UL | ||
491 | #define SEM__READ 0x00000010UL | ||
492 | #define SEM__WRITE 0x00000020UL | ||
493 | #define SEM__ASSOCIATE 0x00000040UL | ||
494 | #define SEM__UNIX_READ 0x00000080UL | ||
495 | #define SEM__UNIX_WRITE 0x00000100UL | ||
496 | #define MSGQ__CREATE 0x00000001UL | ||
497 | #define MSGQ__DESTROY 0x00000002UL | ||
498 | #define MSGQ__GETATTR 0x00000004UL | ||
499 | #define MSGQ__SETATTR 0x00000008UL | ||
500 | #define MSGQ__READ 0x00000010UL | ||
501 | #define MSGQ__WRITE 0x00000020UL | ||
502 | #define MSGQ__ASSOCIATE 0x00000040UL | ||
503 | #define MSGQ__UNIX_READ 0x00000080UL | ||
504 | #define MSGQ__UNIX_WRITE 0x00000100UL | ||
505 | #define MSGQ__ENQUEUE 0x00000200UL | ||
506 | #define MSG__SEND 0x00000001UL | ||
507 | #define MSG__RECEIVE 0x00000002UL | ||
508 | #define SHM__CREATE 0x00000001UL | ||
509 | #define SHM__DESTROY 0x00000002UL | ||
510 | #define SHM__GETATTR 0x00000004UL | ||
511 | #define SHM__SETATTR 0x00000008UL | ||
512 | #define SHM__READ 0x00000010UL | ||
513 | #define SHM__WRITE 0x00000020UL | ||
514 | #define SHM__ASSOCIATE 0x00000040UL | ||
515 | #define SHM__UNIX_READ 0x00000080UL | ||
516 | #define SHM__UNIX_WRITE 0x00000100UL | ||
517 | #define SHM__LOCK 0x00000200UL | ||
518 | #define SECURITY__COMPUTE_AV 0x00000001UL | ||
519 | #define SECURITY__COMPUTE_CREATE 0x00000002UL | ||
520 | #define SECURITY__COMPUTE_MEMBER 0x00000004UL | ||
521 | #define SECURITY__CHECK_CONTEXT 0x00000008UL | ||
522 | #define SECURITY__LOAD_POLICY 0x00000010UL | ||
523 | #define SECURITY__COMPUTE_RELABEL 0x00000020UL | ||
524 | #define SECURITY__COMPUTE_USER 0x00000040UL | ||
525 | #define SECURITY__SETENFORCE 0x00000080UL | ||
526 | #define SECURITY__SETBOOL 0x00000100UL | ||
527 | #define SECURITY__SETSECPARAM 0x00000200UL | ||
528 | #define SECURITY__SETCHECKREQPROT 0x00000400UL | ||
529 | #define SYSTEM__IPC_INFO 0x00000001UL | ||
530 | #define SYSTEM__SYSLOG_READ 0x00000002UL | ||
531 | #define SYSTEM__SYSLOG_MOD 0x00000004UL | ||
532 | #define SYSTEM__SYSLOG_CONSOLE 0x00000008UL | ||
533 | #define SYSTEM__MODULE_REQUEST 0x00000010UL | ||
534 | #define CAPABILITY__CHOWN 0x00000001UL | ||
535 | #define CAPABILITY__DAC_OVERRIDE 0x00000002UL | ||
536 | #define CAPABILITY__DAC_READ_SEARCH 0x00000004UL | ||
537 | #define CAPABILITY__FOWNER 0x00000008UL | ||
538 | #define CAPABILITY__FSETID 0x00000010UL | ||
539 | #define CAPABILITY__KILL 0x00000020UL | ||
540 | #define CAPABILITY__SETGID 0x00000040UL | ||
541 | #define CAPABILITY__SETUID 0x00000080UL | ||
542 | #define CAPABILITY__SETPCAP 0x00000100UL | ||
543 | #define CAPABILITY__LINUX_IMMUTABLE 0x00000200UL | ||
544 | #define CAPABILITY__NET_BIND_SERVICE 0x00000400UL | ||
545 | #define CAPABILITY__NET_BROADCAST 0x00000800UL | ||
546 | #define CAPABILITY__NET_ADMIN 0x00001000UL | ||
547 | #define CAPABILITY__NET_RAW 0x00002000UL | ||
548 | #define CAPABILITY__IPC_LOCK 0x00004000UL | ||
549 | #define CAPABILITY__IPC_OWNER 0x00008000UL | ||
550 | #define CAPABILITY__SYS_MODULE 0x00010000UL | ||
551 | #define CAPABILITY__SYS_RAWIO 0x00020000UL | ||
552 | #define CAPABILITY__SYS_CHROOT 0x00040000UL | ||
553 | #define CAPABILITY__SYS_PTRACE 0x00080000UL | ||
554 | #define CAPABILITY__SYS_PACCT 0x00100000UL | ||
555 | #define CAPABILITY__SYS_ADMIN 0x00200000UL | ||
556 | #define CAPABILITY__SYS_BOOT 0x00400000UL | ||
557 | #define CAPABILITY__SYS_NICE 0x00800000UL | ||
558 | #define CAPABILITY__SYS_RESOURCE 0x01000000UL | ||
559 | #define CAPABILITY__SYS_TIME 0x02000000UL | ||
560 | #define CAPABILITY__SYS_TTY_CONFIG 0x04000000UL | ||
561 | #define CAPABILITY__MKNOD 0x08000000UL | ||
562 | #define CAPABILITY__LEASE 0x10000000UL | ||
563 | #define CAPABILITY__AUDIT_WRITE 0x20000000UL | ||
564 | #define CAPABILITY__AUDIT_CONTROL 0x40000000UL | ||
565 | #define CAPABILITY__SETFCAP 0x80000000UL | ||
566 | #define CAPABILITY2__MAC_OVERRIDE 0x00000001UL | ||
567 | #define CAPABILITY2__MAC_ADMIN 0x00000002UL | ||
568 | #define NETLINK_ROUTE_SOCKET__IOCTL 0x00000001UL | ||
569 | #define NETLINK_ROUTE_SOCKET__READ 0x00000002UL | ||
570 | #define NETLINK_ROUTE_SOCKET__WRITE 0x00000004UL | ||
571 | #define NETLINK_ROUTE_SOCKET__CREATE 0x00000008UL | ||
572 | #define NETLINK_ROUTE_SOCKET__GETATTR 0x00000010UL | ||
573 | #define NETLINK_ROUTE_SOCKET__SETATTR 0x00000020UL | ||
574 | #define NETLINK_ROUTE_SOCKET__LOCK 0x00000040UL | ||
575 | #define NETLINK_ROUTE_SOCKET__RELABELFROM 0x00000080UL | ||
576 | #define NETLINK_ROUTE_SOCKET__RELABELTO 0x00000100UL | ||
577 | #define NETLINK_ROUTE_SOCKET__APPEND 0x00000200UL | ||
578 | #define NETLINK_ROUTE_SOCKET__BIND 0x00000400UL | ||
579 | #define NETLINK_ROUTE_SOCKET__CONNECT 0x00000800UL | ||
580 | #define NETLINK_ROUTE_SOCKET__LISTEN 0x00001000UL | ||
581 | #define NETLINK_ROUTE_SOCKET__ACCEPT 0x00002000UL | ||
582 | #define NETLINK_ROUTE_SOCKET__GETOPT 0x00004000UL | ||
583 | #define NETLINK_ROUTE_SOCKET__SETOPT 0x00008000UL | ||
584 | #define NETLINK_ROUTE_SOCKET__SHUTDOWN 0x00010000UL | ||
585 | #define NETLINK_ROUTE_SOCKET__RECVFROM 0x00020000UL | ||
586 | #define NETLINK_ROUTE_SOCKET__SENDTO 0x00040000UL | ||
587 | #define NETLINK_ROUTE_SOCKET__RECV_MSG 0x00080000UL | ||
588 | #define NETLINK_ROUTE_SOCKET__SEND_MSG 0x00100000UL | ||
589 | #define NETLINK_ROUTE_SOCKET__NAME_BIND 0x00200000UL | ||
590 | #define NETLINK_ROUTE_SOCKET__NLMSG_READ 0x00400000UL | ||
591 | #define NETLINK_ROUTE_SOCKET__NLMSG_WRITE 0x00800000UL | ||
592 | #define NETLINK_FIREWALL_SOCKET__IOCTL 0x00000001UL | ||
593 | #define NETLINK_FIREWALL_SOCKET__READ 0x00000002UL | ||
594 | #define NETLINK_FIREWALL_SOCKET__WRITE 0x00000004UL | ||
595 | #define NETLINK_FIREWALL_SOCKET__CREATE 0x00000008UL | ||
596 | #define NETLINK_FIREWALL_SOCKET__GETATTR 0x00000010UL | ||
597 | #define NETLINK_FIREWALL_SOCKET__SETATTR 0x00000020UL | ||
598 | #define NETLINK_FIREWALL_SOCKET__LOCK 0x00000040UL | ||
599 | #define NETLINK_FIREWALL_SOCKET__RELABELFROM 0x00000080UL | ||
600 | #define NETLINK_FIREWALL_SOCKET__RELABELTO 0x00000100UL | ||
601 | #define NETLINK_FIREWALL_SOCKET__APPEND 0x00000200UL | ||
602 | #define NETLINK_FIREWALL_SOCKET__BIND 0x00000400UL | ||
603 | #define NETLINK_FIREWALL_SOCKET__CONNECT 0x00000800UL | ||
604 | #define NETLINK_FIREWALL_SOCKET__LISTEN 0x00001000UL | ||
605 | #define NETLINK_FIREWALL_SOCKET__ACCEPT 0x00002000UL | ||
606 | #define NETLINK_FIREWALL_SOCKET__GETOPT 0x00004000UL | ||
607 | #define NETLINK_FIREWALL_SOCKET__SETOPT 0x00008000UL | ||
608 | #define NETLINK_FIREWALL_SOCKET__SHUTDOWN 0x00010000UL | ||
609 | #define NETLINK_FIREWALL_SOCKET__RECVFROM 0x00020000UL | ||
610 | #define NETLINK_FIREWALL_SOCKET__SENDTO 0x00040000UL | ||
611 | #define NETLINK_FIREWALL_SOCKET__RECV_MSG 0x00080000UL | ||
612 | #define NETLINK_FIREWALL_SOCKET__SEND_MSG 0x00100000UL | ||
613 | #define NETLINK_FIREWALL_SOCKET__NAME_BIND 0x00200000UL | ||
614 | #define NETLINK_FIREWALL_SOCKET__NLMSG_READ 0x00400000UL | ||
615 | #define NETLINK_FIREWALL_SOCKET__NLMSG_WRITE 0x00800000UL | ||
616 | #define NETLINK_TCPDIAG_SOCKET__IOCTL 0x00000001UL | ||
617 | #define NETLINK_TCPDIAG_SOCKET__READ 0x00000002UL | ||
618 | #define NETLINK_TCPDIAG_SOCKET__WRITE 0x00000004UL | ||
619 | #define NETLINK_TCPDIAG_SOCKET__CREATE 0x00000008UL | ||
620 | #define NETLINK_TCPDIAG_SOCKET__GETATTR 0x00000010UL | ||
621 | #define NETLINK_TCPDIAG_SOCKET__SETATTR 0x00000020UL | ||
622 | #define NETLINK_TCPDIAG_SOCKET__LOCK 0x00000040UL | ||
623 | #define NETLINK_TCPDIAG_SOCKET__RELABELFROM 0x00000080UL | ||
624 | #define NETLINK_TCPDIAG_SOCKET__RELABELTO 0x00000100UL | ||
625 | #define NETLINK_TCPDIAG_SOCKET__APPEND 0x00000200UL | ||
626 | #define NETLINK_TCPDIAG_SOCKET__BIND 0x00000400UL | ||
627 | #define NETLINK_TCPDIAG_SOCKET__CONNECT 0x00000800UL | ||
628 | #define NETLINK_TCPDIAG_SOCKET__LISTEN 0x00001000UL | ||
629 | #define NETLINK_TCPDIAG_SOCKET__ACCEPT 0x00002000UL | ||
630 | #define NETLINK_TCPDIAG_SOCKET__GETOPT 0x00004000UL | ||
631 | #define NETLINK_TCPDIAG_SOCKET__SETOPT 0x00008000UL | ||
632 | #define NETLINK_TCPDIAG_SOCKET__SHUTDOWN 0x00010000UL | ||
633 | #define NETLINK_TCPDIAG_SOCKET__RECVFROM 0x00020000UL | ||
634 | #define NETLINK_TCPDIAG_SOCKET__SENDTO 0x00040000UL | ||
635 | #define NETLINK_TCPDIAG_SOCKET__RECV_MSG 0x00080000UL | ||
636 | #define NETLINK_TCPDIAG_SOCKET__SEND_MSG 0x00100000UL | ||
637 | #define NETLINK_TCPDIAG_SOCKET__NAME_BIND 0x00200000UL | ||
638 | #define NETLINK_TCPDIAG_SOCKET__NLMSG_READ 0x00400000UL | ||
639 | #define NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE 0x00800000UL | ||
640 | #define NETLINK_NFLOG_SOCKET__IOCTL 0x00000001UL | ||
641 | #define NETLINK_NFLOG_SOCKET__READ 0x00000002UL | ||
642 | #define NETLINK_NFLOG_SOCKET__WRITE 0x00000004UL | ||
643 | #define NETLINK_NFLOG_SOCKET__CREATE 0x00000008UL | ||
644 | #define NETLINK_NFLOG_SOCKET__GETATTR 0x00000010UL | ||
645 | #define NETLINK_NFLOG_SOCKET__SETATTR 0x00000020UL | ||
646 | #define NETLINK_NFLOG_SOCKET__LOCK 0x00000040UL | ||
647 | #define NETLINK_NFLOG_SOCKET__RELABELFROM 0x00000080UL | ||
648 | #define NETLINK_NFLOG_SOCKET__RELABELTO 0x00000100UL | ||
649 | #define NETLINK_NFLOG_SOCKET__APPEND 0x00000200UL | ||
650 | #define NETLINK_NFLOG_SOCKET__BIND 0x00000400UL | ||
651 | #define NETLINK_NFLOG_SOCKET__CONNECT 0x00000800UL | ||
652 | #define NETLINK_NFLOG_SOCKET__LISTEN 0x00001000UL | ||
653 | #define NETLINK_NFLOG_SOCKET__ACCEPT 0x00002000UL | ||
654 | #define NETLINK_NFLOG_SOCKET__GETOPT 0x00004000UL | ||
655 | #define NETLINK_NFLOG_SOCKET__SETOPT 0x00008000UL | ||
656 | #define NETLINK_NFLOG_SOCKET__SHUTDOWN 0x00010000UL | ||
657 | #define NETLINK_NFLOG_SOCKET__RECVFROM 0x00020000UL | ||
658 | #define NETLINK_NFLOG_SOCKET__SENDTO 0x00040000UL | ||
659 | #define NETLINK_NFLOG_SOCKET__RECV_MSG 0x00080000UL | ||
660 | #define NETLINK_NFLOG_SOCKET__SEND_MSG 0x00100000UL | ||
661 | #define NETLINK_NFLOG_SOCKET__NAME_BIND 0x00200000UL | ||
662 | #define NETLINK_XFRM_SOCKET__IOCTL 0x00000001UL | ||
663 | #define NETLINK_XFRM_SOCKET__READ 0x00000002UL | ||
664 | #define NETLINK_XFRM_SOCKET__WRITE 0x00000004UL | ||
665 | #define NETLINK_XFRM_SOCKET__CREATE 0x00000008UL | ||
666 | #define NETLINK_XFRM_SOCKET__GETATTR 0x00000010UL | ||
667 | #define NETLINK_XFRM_SOCKET__SETATTR 0x00000020UL | ||
668 | #define NETLINK_XFRM_SOCKET__LOCK 0x00000040UL | ||
669 | #define NETLINK_XFRM_SOCKET__RELABELFROM 0x00000080UL | ||
670 | #define NETLINK_XFRM_SOCKET__RELABELTO 0x00000100UL | ||
671 | #define NETLINK_XFRM_SOCKET__APPEND 0x00000200UL | ||
672 | #define NETLINK_XFRM_SOCKET__BIND 0x00000400UL | ||
673 | #define NETLINK_XFRM_SOCKET__CONNECT 0x00000800UL | ||
674 | #define NETLINK_XFRM_SOCKET__LISTEN 0x00001000UL | ||
675 | #define NETLINK_XFRM_SOCKET__ACCEPT 0x00002000UL | ||
676 | #define NETLINK_XFRM_SOCKET__GETOPT 0x00004000UL | ||
677 | #define NETLINK_XFRM_SOCKET__SETOPT 0x00008000UL | ||
678 | #define NETLINK_XFRM_SOCKET__SHUTDOWN 0x00010000UL | ||
679 | #define NETLINK_XFRM_SOCKET__RECVFROM 0x00020000UL | ||
680 | #define NETLINK_XFRM_SOCKET__SENDTO 0x00040000UL | ||
681 | #define NETLINK_XFRM_SOCKET__RECV_MSG 0x00080000UL | ||
682 | #define NETLINK_XFRM_SOCKET__SEND_MSG 0x00100000UL | ||
683 | #define NETLINK_XFRM_SOCKET__NAME_BIND 0x00200000UL | ||
684 | #define NETLINK_XFRM_SOCKET__NLMSG_READ 0x00400000UL | ||
685 | #define NETLINK_XFRM_SOCKET__NLMSG_WRITE 0x00800000UL | ||
686 | #define NETLINK_SELINUX_SOCKET__IOCTL 0x00000001UL | ||
687 | #define NETLINK_SELINUX_SOCKET__READ 0x00000002UL | ||
688 | #define NETLINK_SELINUX_SOCKET__WRITE 0x00000004UL | ||
689 | #define NETLINK_SELINUX_SOCKET__CREATE 0x00000008UL | ||
690 | #define NETLINK_SELINUX_SOCKET__GETATTR 0x00000010UL | ||
691 | #define NETLINK_SELINUX_SOCKET__SETATTR 0x00000020UL | ||
692 | #define NETLINK_SELINUX_SOCKET__LOCK 0x00000040UL | ||
693 | #define NETLINK_SELINUX_SOCKET__RELABELFROM 0x00000080UL | ||
694 | #define NETLINK_SELINUX_SOCKET__RELABELTO 0x00000100UL | ||
695 | #define NETLINK_SELINUX_SOCKET__APPEND 0x00000200UL | ||
696 | #define NETLINK_SELINUX_SOCKET__BIND 0x00000400UL | ||
697 | #define NETLINK_SELINUX_SOCKET__CONNECT 0x00000800UL | ||
698 | #define NETLINK_SELINUX_SOCKET__LISTEN 0x00001000UL | ||
699 | #define NETLINK_SELINUX_SOCKET__ACCEPT 0x00002000UL | ||
700 | #define NETLINK_SELINUX_SOCKET__GETOPT 0x00004000UL | ||
701 | #define NETLINK_SELINUX_SOCKET__SETOPT 0x00008000UL | ||
702 | #define NETLINK_SELINUX_SOCKET__SHUTDOWN 0x00010000UL | ||
703 | #define NETLINK_SELINUX_SOCKET__RECVFROM 0x00020000UL | ||
704 | #define NETLINK_SELINUX_SOCKET__SENDTO 0x00040000UL | ||
705 | #define NETLINK_SELINUX_SOCKET__RECV_MSG 0x00080000UL | ||
706 | #define NETLINK_SELINUX_SOCKET__SEND_MSG 0x00100000UL | ||
707 | #define NETLINK_SELINUX_SOCKET__NAME_BIND 0x00200000UL | ||
708 | #define NETLINK_AUDIT_SOCKET__IOCTL 0x00000001UL | ||
709 | #define NETLINK_AUDIT_SOCKET__READ 0x00000002UL | ||
710 | #define NETLINK_AUDIT_SOCKET__WRITE 0x00000004UL | ||
711 | #define NETLINK_AUDIT_SOCKET__CREATE 0x00000008UL | ||
712 | #define NETLINK_AUDIT_SOCKET__GETATTR 0x00000010UL | ||
713 | #define NETLINK_AUDIT_SOCKET__SETATTR 0x00000020UL | ||
714 | #define NETLINK_AUDIT_SOCKET__LOCK 0x00000040UL | ||
715 | #define NETLINK_AUDIT_SOCKET__RELABELFROM 0x00000080UL | ||
716 | #define NETLINK_AUDIT_SOCKET__RELABELTO 0x00000100UL | ||
717 | #define NETLINK_AUDIT_SOCKET__APPEND 0x00000200UL | ||
718 | #define NETLINK_AUDIT_SOCKET__BIND 0x00000400UL | ||
719 | #define NETLINK_AUDIT_SOCKET__CONNECT 0x00000800UL | ||
720 | #define NETLINK_AUDIT_SOCKET__LISTEN 0x00001000UL | ||
721 | #define NETLINK_AUDIT_SOCKET__ACCEPT 0x00002000UL | ||
722 | #define NETLINK_AUDIT_SOCKET__GETOPT 0x00004000UL | ||
723 | #define NETLINK_AUDIT_SOCKET__SETOPT 0x00008000UL | ||
724 | #define NETLINK_AUDIT_SOCKET__SHUTDOWN 0x00010000UL | ||
725 | #define NETLINK_AUDIT_SOCKET__RECVFROM 0x00020000UL | ||
726 | #define NETLINK_AUDIT_SOCKET__SENDTO 0x00040000UL | ||
727 | #define NETLINK_AUDIT_SOCKET__RECV_MSG 0x00080000UL | ||
728 | #define NETLINK_AUDIT_SOCKET__SEND_MSG 0x00100000UL | ||
729 | #define NETLINK_AUDIT_SOCKET__NAME_BIND 0x00200000UL | ||
730 | #define NETLINK_AUDIT_SOCKET__NLMSG_READ 0x00400000UL | ||
731 | #define NETLINK_AUDIT_SOCKET__NLMSG_WRITE 0x00800000UL | ||
732 | #define NETLINK_AUDIT_SOCKET__NLMSG_RELAY 0x01000000UL | ||
733 | #define NETLINK_AUDIT_SOCKET__NLMSG_READPRIV 0x02000000UL | ||
734 | #define NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT 0x04000000UL | ||
735 | #define NETLINK_IP6FW_SOCKET__IOCTL 0x00000001UL | ||
736 | #define NETLINK_IP6FW_SOCKET__READ 0x00000002UL | ||
737 | #define NETLINK_IP6FW_SOCKET__WRITE 0x00000004UL | ||
738 | #define NETLINK_IP6FW_SOCKET__CREATE 0x00000008UL | ||
739 | #define NETLINK_IP6FW_SOCKET__GETATTR 0x00000010UL | ||
740 | #define NETLINK_IP6FW_SOCKET__SETATTR 0x00000020UL | ||
741 | #define NETLINK_IP6FW_SOCKET__LOCK 0x00000040UL | ||
742 | #define NETLINK_IP6FW_SOCKET__RELABELFROM 0x00000080UL | ||
743 | #define NETLINK_IP6FW_SOCKET__RELABELTO 0x00000100UL | ||
744 | #define NETLINK_IP6FW_SOCKET__APPEND 0x00000200UL | ||
745 | #define NETLINK_IP6FW_SOCKET__BIND 0x00000400UL | ||
746 | #define NETLINK_IP6FW_SOCKET__CONNECT 0x00000800UL | ||
747 | #define NETLINK_IP6FW_SOCKET__LISTEN 0x00001000UL | ||
748 | #define NETLINK_IP6FW_SOCKET__ACCEPT 0x00002000UL | ||
749 | #define NETLINK_IP6FW_SOCKET__GETOPT 0x00004000UL | ||
750 | #define NETLINK_IP6FW_SOCKET__SETOPT 0x00008000UL | ||
751 | #define NETLINK_IP6FW_SOCKET__SHUTDOWN 0x00010000UL | ||
752 | #define NETLINK_IP6FW_SOCKET__RECVFROM 0x00020000UL | ||
753 | #define NETLINK_IP6FW_SOCKET__SENDTO 0x00040000UL | ||
754 | #define NETLINK_IP6FW_SOCKET__RECV_MSG 0x00080000UL | ||
755 | #define NETLINK_IP6FW_SOCKET__SEND_MSG 0x00100000UL | ||
756 | #define NETLINK_IP6FW_SOCKET__NAME_BIND 0x00200000UL | ||
757 | #define NETLINK_IP6FW_SOCKET__NLMSG_READ 0x00400000UL | ||
758 | #define NETLINK_IP6FW_SOCKET__NLMSG_WRITE 0x00800000UL | ||
759 | #define NETLINK_DNRT_SOCKET__IOCTL 0x00000001UL | ||
760 | #define NETLINK_DNRT_SOCKET__READ 0x00000002UL | ||
761 | #define NETLINK_DNRT_SOCKET__WRITE 0x00000004UL | ||
762 | #define NETLINK_DNRT_SOCKET__CREATE 0x00000008UL | ||
763 | #define NETLINK_DNRT_SOCKET__GETATTR 0x00000010UL | ||
764 | #define NETLINK_DNRT_SOCKET__SETATTR 0x00000020UL | ||
765 | #define NETLINK_DNRT_SOCKET__LOCK 0x00000040UL | ||
766 | #define NETLINK_DNRT_SOCKET__RELABELFROM 0x00000080UL | ||
767 | #define NETLINK_DNRT_SOCKET__RELABELTO 0x00000100UL | ||
768 | #define NETLINK_DNRT_SOCKET__APPEND 0x00000200UL | ||
769 | #define NETLINK_DNRT_SOCKET__BIND 0x00000400UL | ||
770 | #define NETLINK_DNRT_SOCKET__CONNECT 0x00000800UL | ||
771 | #define NETLINK_DNRT_SOCKET__LISTEN 0x00001000UL | ||
772 | #define NETLINK_DNRT_SOCKET__ACCEPT 0x00002000UL | ||
773 | #define NETLINK_DNRT_SOCKET__GETOPT 0x00004000UL | ||
774 | #define NETLINK_DNRT_SOCKET__SETOPT 0x00008000UL | ||
775 | #define NETLINK_DNRT_SOCKET__SHUTDOWN 0x00010000UL | ||
776 | #define NETLINK_DNRT_SOCKET__RECVFROM 0x00020000UL | ||
777 | #define NETLINK_DNRT_SOCKET__SENDTO 0x00040000UL | ||
778 | #define NETLINK_DNRT_SOCKET__RECV_MSG 0x00080000UL | ||
779 | #define NETLINK_DNRT_SOCKET__SEND_MSG 0x00100000UL | ||
780 | #define NETLINK_DNRT_SOCKET__NAME_BIND 0x00200000UL | ||
781 | #define ASSOCIATION__SENDTO 0x00000001UL | ||
782 | #define ASSOCIATION__RECVFROM 0x00000002UL | ||
783 | #define ASSOCIATION__SETCONTEXT 0x00000004UL | ||
784 | #define ASSOCIATION__POLMATCH 0x00000008UL | ||
785 | #define NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL 0x00000001UL | ||
786 | #define NETLINK_KOBJECT_UEVENT_SOCKET__READ 0x00000002UL | ||
787 | #define NETLINK_KOBJECT_UEVENT_SOCKET__WRITE 0x00000004UL | ||
788 | #define NETLINK_KOBJECT_UEVENT_SOCKET__CREATE 0x00000008UL | ||
789 | #define NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR 0x00000010UL | ||
790 | #define NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR 0x00000020UL | ||
791 | #define NETLINK_KOBJECT_UEVENT_SOCKET__LOCK 0x00000040UL | ||
792 | #define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM 0x00000080UL | ||
793 | #define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO 0x00000100UL | ||
794 | #define NETLINK_KOBJECT_UEVENT_SOCKET__APPEND 0x00000200UL | ||
795 | #define NETLINK_KOBJECT_UEVENT_SOCKET__BIND 0x00000400UL | ||
796 | #define NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT 0x00000800UL | ||
797 | #define NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN 0x00001000UL | ||
798 | #define NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT 0x00002000UL | ||
799 | #define NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT 0x00004000UL | ||
800 | #define NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT 0x00008000UL | ||
801 | #define NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN 0x00010000UL | ||
802 | #define NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM 0x00020000UL | ||
803 | #define NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO 0x00040000UL | ||
804 | #define NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG 0x00080000UL | ||
805 | #define NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG 0x00100000UL | ||
806 | #define NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND 0x00200000UL | ||
807 | #define APPLETALK_SOCKET__IOCTL 0x00000001UL | ||
808 | #define APPLETALK_SOCKET__READ 0x00000002UL | ||
809 | #define APPLETALK_SOCKET__WRITE 0x00000004UL | ||
810 | #define APPLETALK_SOCKET__CREATE 0x00000008UL | ||
811 | #define APPLETALK_SOCKET__GETATTR 0x00000010UL | ||
812 | #define APPLETALK_SOCKET__SETATTR 0x00000020UL | ||
813 | #define APPLETALK_SOCKET__LOCK 0x00000040UL | ||
814 | #define APPLETALK_SOCKET__RELABELFROM 0x00000080UL | ||
815 | #define APPLETALK_SOCKET__RELABELTO 0x00000100UL | ||
816 | #define APPLETALK_SOCKET__APPEND 0x00000200UL | ||
817 | #define APPLETALK_SOCKET__BIND 0x00000400UL | ||
818 | #define APPLETALK_SOCKET__CONNECT 0x00000800UL | ||
819 | #define APPLETALK_SOCKET__LISTEN 0x00001000UL | ||
820 | #define APPLETALK_SOCKET__ACCEPT 0x00002000UL | ||
821 | #define APPLETALK_SOCKET__GETOPT 0x00004000UL | ||
822 | #define APPLETALK_SOCKET__SETOPT 0x00008000UL | ||
823 | #define APPLETALK_SOCKET__SHUTDOWN 0x00010000UL | ||
824 | #define APPLETALK_SOCKET__RECVFROM 0x00020000UL | ||
825 | #define APPLETALK_SOCKET__SENDTO 0x00040000UL | ||
826 | #define APPLETALK_SOCKET__RECV_MSG 0x00080000UL | ||
827 | #define APPLETALK_SOCKET__SEND_MSG 0x00100000UL | ||
828 | #define APPLETALK_SOCKET__NAME_BIND 0x00200000UL | ||
829 | #define PACKET__SEND 0x00000001UL | ||
830 | #define PACKET__RECV 0x00000002UL | ||
831 | #define PACKET__RELABELTO 0x00000004UL | ||
832 | #define PACKET__FLOW_IN 0x00000008UL | ||
833 | #define PACKET__FLOW_OUT 0x00000010UL | ||
834 | #define PACKET__FORWARD_IN 0x00000020UL | ||
835 | #define PACKET__FORWARD_OUT 0x00000040UL | ||
836 | #define KEY__VIEW 0x00000001UL | ||
837 | #define KEY__READ 0x00000002UL | ||
838 | #define KEY__WRITE 0x00000004UL | ||
839 | #define KEY__SEARCH 0x00000008UL | ||
840 | #define KEY__LINK 0x00000010UL | ||
841 | #define KEY__SETATTR 0x00000020UL | ||
842 | #define KEY__CREATE 0x00000040UL | ||
843 | #define DCCP_SOCKET__IOCTL 0x00000001UL | ||
844 | #define DCCP_SOCKET__READ 0x00000002UL | ||
845 | #define DCCP_SOCKET__WRITE 0x00000004UL | ||
846 | #define DCCP_SOCKET__CREATE 0x00000008UL | ||
847 | #define DCCP_SOCKET__GETATTR 0x00000010UL | ||
848 | #define DCCP_SOCKET__SETATTR 0x00000020UL | ||
849 | #define DCCP_SOCKET__LOCK 0x00000040UL | ||
850 | #define DCCP_SOCKET__RELABELFROM 0x00000080UL | ||
851 | #define DCCP_SOCKET__RELABELTO 0x00000100UL | ||
852 | #define DCCP_SOCKET__APPEND 0x00000200UL | ||
853 | #define DCCP_SOCKET__BIND 0x00000400UL | ||
854 | #define DCCP_SOCKET__CONNECT 0x00000800UL | ||
855 | #define DCCP_SOCKET__LISTEN 0x00001000UL | ||
856 | #define DCCP_SOCKET__ACCEPT 0x00002000UL | ||
857 | #define DCCP_SOCKET__GETOPT 0x00004000UL | ||
858 | #define DCCP_SOCKET__SETOPT 0x00008000UL | ||
859 | #define DCCP_SOCKET__SHUTDOWN 0x00010000UL | ||
860 | #define DCCP_SOCKET__RECVFROM 0x00020000UL | ||
861 | #define DCCP_SOCKET__SENDTO 0x00040000UL | ||
862 | #define DCCP_SOCKET__RECV_MSG 0x00080000UL | ||
863 | #define DCCP_SOCKET__SEND_MSG 0x00100000UL | ||
864 | #define DCCP_SOCKET__NAME_BIND 0x00200000UL | ||
865 | #define DCCP_SOCKET__NODE_BIND 0x00400000UL | ||
866 | #define DCCP_SOCKET__NAME_CONNECT 0x00800000UL | ||
867 | #define MEMPROTECT__MMAP_ZERO 0x00000001UL | ||
868 | #define PEER__RECV 0x00000001UL | ||
869 | #define KERNEL_SERVICE__USE_AS_OVERRIDE 0x00000001UL | ||
870 | #define KERNEL_SERVICE__CREATE_FILES_AS 0x00000002UL | ||
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h index bb1ec801bdfe..4677aa519b04 100644 --- a/security/selinux/include/avc_ss.h +++ b/security/selinux/include/avc_ss.h | |||
@@ -10,26 +10,13 @@ | |||
10 | 10 | ||
11 | int avc_ss_reset(u32 seqno); | 11 | int avc_ss_reset(u32 seqno); |
12 | 12 | ||
13 | struct av_perm_to_string { | 13 | /* Class/perm mapping support */ |
14 | u16 tclass; | 14 | struct security_class_mapping { |
15 | u32 value; | ||
16 | const char *name; | 15 | const char *name; |
16 | const char *perms[sizeof(u32) * 8 + 1]; | ||
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct av_inherit { | 19 | extern struct security_class_mapping secclass_map[]; |
20 | const char **common_pts; | ||
21 | u32 common_base; | ||
22 | u16 tclass; | ||
23 | }; | ||
24 | |||
25 | struct selinux_class_perm { | ||
26 | const struct av_perm_to_string *av_perm_to_string; | ||
27 | u32 av_pts_len; | ||
28 | u32 cts_len; | ||
29 | const char **class_to_string; | ||
30 | const struct av_inherit *av_inherit; | ||
31 | u32 av_inherit_len; | ||
32 | }; | ||
33 | 20 | ||
34 | #endif /* _SELINUX_AVC_SS_H_ */ | 21 | #endif /* _SELINUX_AVC_SS_H_ */ |
35 | 22 | ||
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h deleted file mode 100644 index 7ab9299bfb6b..000000000000 --- a/security/selinux/include/class_to_string.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | /* | ||
3 | * Security object class definitions | ||
4 | */ | ||
5 | S_(NULL) | ||
6 | S_("security") | ||
7 | S_("process") | ||
8 | S_("system") | ||
9 | S_("capability") | ||
10 | S_("filesystem") | ||
11 | S_("file") | ||
12 | S_("dir") | ||
13 | S_("fd") | ||
14 | S_("lnk_file") | ||
15 | S_("chr_file") | ||
16 | S_("blk_file") | ||
17 | S_("sock_file") | ||
18 | S_("fifo_file") | ||
19 | S_("socket") | ||
20 | S_("tcp_socket") | ||
21 | S_("udp_socket") | ||
22 | S_("rawip_socket") | ||
23 | S_("node") | ||
24 | S_("netif") | ||
25 | S_("netlink_socket") | ||
26 | S_("packet_socket") | ||
27 | S_("key_socket") | ||
28 | S_("unix_stream_socket") | ||
29 | S_("unix_dgram_socket") | ||
30 | S_("sem") | ||
31 | S_("msg") | ||
32 | S_("msgq") | ||
33 | S_("shm") | ||
34 | S_("ipc") | ||
35 | S_(NULL) | ||
36 | S_(NULL) | ||
37 | S_(NULL) | ||
38 | S_(NULL) | ||
39 | S_(NULL) | ||
40 | S_(NULL) | ||
41 | S_(NULL) | ||
42 | S_(NULL) | ||
43 | S_(NULL) | ||
44 | S_(NULL) | ||
45 | S_(NULL) | ||
46 | S_(NULL) | ||
47 | S_(NULL) | ||
48 | S_("netlink_route_socket") | ||
49 | S_("netlink_firewall_socket") | ||
50 | S_("netlink_tcpdiag_socket") | ||
51 | S_("netlink_nflog_socket") | ||
52 | S_("netlink_xfrm_socket") | ||
53 | S_("netlink_selinux_socket") | ||
54 | S_("netlink_audit_socket") | ||
55 | S_("netlink_ip6fw_socket") | ||
56 | S_("netlink_dnrt_socket") | ||
57 | S_(NULL) | ||
58 | S_(NULL) | ||
59 | S_("association") | ||
60 | S_("netlink_kobject_uevent_socket") | ||
61 | S_("appletalk_socket") | ||
62 | S_("packet") | ||
63 | S_("key") | ||
64 | S_(NULL) | ||
65 | S_("dccp_socket") | ||
66 | S_("memprotect") | ||
67 | S_(NULL) | ||
68 | S_(NULL) | ||
69 | S_(NULL) | ||
70 | S_(NULL) | ||
71 | S_(NULL) | ||
72 | S_(NULL) | ||
73 | S_("peer") | ||
74 | S_("capability2") | ||
75 | S_(NULL) | ||
76 | S_(NULL) | ||
77 | S_(NULL) | ||
78 | S_(NULL) | ||
79 | S_("kernel_service") | ||
80 | S_("tun_socket") | ||
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h new file mode 100644 index 000000000000..8b32e959bb2e --- /dev/null +++ b/security/selinux/include/classmap.h | |||
@@ -0,0 +1,150 @@ | |||
1 | #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ | ||
2 | "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append" | ||
3 | |||
4 | #define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \ | ||
5 | "rename", "execute", "swapon", "quotaon", "mounton" | ||
6 | |||
7 | #define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \ | ||
8 | "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \ | ||
9 | "sendto", "recv_msg", "send_msg", "name_bind" | ||
10 | |||
11 | #define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \ | ||
12 | "write", "associate", "unix_read", "unix_write" | ||
13 | |||
14 | struct security_class_mapping secclass_map[] = { | ||
15 | { "security", | ||
16 | { "compute_av", "compute_create", "compute_member", | ||
17 | "check_context", "load_policy", "compute_relabel", | ||
18 | "compute_user", "setenforce", "setbool", "setsecparam", | ||
19 | "setcheckreqprot", NULL } }, | ||
20 | { "process", | ||
21 | { "fork", "transition", "sigchld", "sigkill", | ||
22 | "sigstop", "signull", "signal", "ptrace", "getsched", "setsched", | ||
23 | "getsession", "getpgid", "setpgid", "getcap", "setcap", "share", | ||
24 | "getattr", "setexec", "setfscreate", "noatsecure", "siginh", | ||
25 | "setrlimit", "rlimitinh", "dyntransition", "setcurrent", | ||
26 | "execmem", "execstack", "execheap", "setkeycreate", | ||
27 | "setsockcreate", NULL } }, | ||
28 | { "system", | ||
29 | { "ipc_info", "syslog_read", "syslog_mod", | ||
30 | "syslog_console", "module_request", NULL } }, | ||
31 | { "capability", | ||
32 | { "chown", "dac_override", "dac_read_search", | ||
33 | "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap", | ||
34 | "linux_immutable", "net_bind_service", "net_broadcast", | ||
35 | "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module", | ||
36 | "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin", | ||
37 | "sys_boot", "sys_nice", "sys_resource", "sys_time", | ||
38 | "sys_tty_config", "mknod", "lease", "audit_write", | ||
39 | "audit_control", "setfcap", NULL } }, | ||
40 | { "filesystem", | ||
41 | { "mount", "remount", "unmount", "getattr", | ||
42 | "relabelfrom", "relabelto", "transition", "associate", "quotamod", | ||
43 | "quotaget", NULL } }, | ||
44 | { "file", | ||
45 | { COMMON_FILE_PERMS, | ||
46 | "execute_no_trans", "entrypoint", "execmod", "open", NULL } }, | ||
47 | { "dir", | ||
48 | { COMMON_FILE_PERMS, "add_name", "remove_name", | ||
49 | "reparent", "search", "rmdir", "open", NULL } }, | ||
50 | { "fd", { "use", NULL } }, | ||
51 | { "lnk_file", | ||
52 | { COMMON_FILE_PERMS, NULL } }, | ||
53 | { "chr_file", | ||
54 | { COMMON_FILE_PERMS, | ||
55 | "execute_no_trans", "entrypoint", "execmod", "open", NULL } }, | ||
56 | { "blk_file", | ||
57 | { COMMON_FILE_PERMS, "open", NULL } }, | ||
58 | { "sock_file", | ||
59 | { COMMON_FILE_PERMS, "open", NULL } }, | ||
60 | { "fifo_file", | ||
61 | { COMMON_FILE_PERMS, "open", NULL } }, | ||
62 | { "socket", | ||
63 | { COMMON_SOCK_PERMS, NULL } }, | ||
64 | { "tcp_socket", | ||
65 | { COMMON_SOCK_PERMS, | ||
66 | "connectto", "newconn", "acceptfrom", "node_bind", "name_connect", | ||
67 | NULL } }, | ||
68 | { "udp_socket", | ||
69 | { COMMON_SOCK_PERMS, | ||
70 | "node_bind", NULL } }, | ||
71 | { "rawip_socket", | ||
72 | { COMMON_SOCK_PERMS, | ||
73 | "node_bind", NULL } }, | ||
74 | { "node", | ||
75 | { "tcp_recv", "tcp_send", "udp_recv", "udp_send", | ||
76 | "rawip_recv", "rawip_send", "enforce_dest", | ||
77 | "dccp_recv", "dccp_send", "recvfrom", "sendto", NULL } }, | ||
78 | { "netif", | ||
79 | { "tcp_recv", "tcp_send", "udp_recv", "udp_send", | ||
80 | "rawip_recv", "rawip_send", "dccp_recv", "dccp_send", | ||
81 | "ingress", "egress", NULL } }, | ||
82 | { "netlink_socket", | ||
83 | { COMMON_SOCK_PERMS, NULL } }, | ||
84 | { "packet_socket", | ||
85 | { COMMON_SOCK_PERMS, NULL } }, | ||
86 | { "key_socket", | ||
87 | { COMMON_SOCK_PERMS, NULL } }, | ||
88 | { "unix_stream_socket", | ||
89 | { COMMON_SOCK_PERMS, "connectto", "newconn", "acceptfrom", NULL | ||
90 | } }, | ||
91 | { "unix_dgram_socket", | ||
92 | { COMMON_SOCK_PERMS, NULL | ||
93 | } }, | ||
94 | { "sem", | ||
95 | { COMMON_IPC_PERMS, NULL } }, | ||
96 | { "msg", { "send", "receive", NULL } }, | ||
97 | { "msgq", | ||
98 | { COMMON_IPC_PERMS, "enqueue", NULL } }, | ||
99 | { "shm", | ||
100 | { COMMON_IPC_PERMS, "lock", NULL } }, | ||
101 | { "ipc", | ||
102 | { COMMON_IPC_PERMS, NULL } }, | ||
103 | { "netlink_route_socket", | ||
104 | { COMMON_SOCK_PERMS, | ||
105 | "nlmsg_read", "nlmsg_write", NULL } }, | ||
106 | { "netlink_firewall_socket", | ||
107 | { COMMON_SOCK_PERMS, | ||
108 | "nlmsg_read", "nlmsg_write", NULL } }, | ||
109 | { "netlink_tcpdiag_socket", | ||
110 | { COMMON_SOCK_PERMS, | ||
111 | "nlmsg_read", "nlmsg_write", NULL } }, | ||
112 | { "netlink_nflog_socket", | ||
113 | { COMMON_SOCK_PERMS, NULL } }, | ||
114 | { "netlink_xfrm_socket", | ||
115 | { COMMON_SOCK_PERMS, | ||
116 | "nlmsg_read", "nlmsg_write", NULL } }, | ||
117 | { "netlink_selinux_socket", | ||
118 | { COMMON_SOCK_PERMS, NULL } }, | ||
119 | { "netlink_audit_socket", | ||
120 | { COMMON_SOCK_PERMS, | ||
121 | "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv", | ||
122 | "nlmsg_tty_audit", NULL } }, | ||
123 | { "netlink_ip6fw_socket", | ||
124 | { COMMON_SOCK_PERMS, | ||
125 | "nlmsg_read", "nlmsg_write", NULL } }, | ||
126 | { "netlink_dnrt_socket", | ||
127 | { COMMON_SOCK_PERMS, NULL } }, | ||
128 | { "association", | ||
129 | { "sendto", "recvfrom", "setcontext", "polmatch", NULL } }, | ||
130 | { "netlink_kobject_uevent_socket", | ||
131 | { COMMON_SOCK_PERMS, NULL } }, | ||
132 | { "appletalk_socket", | ||
133 | { COMMON_SOCK_PERMS, NULL } }, | ||
134 | { "packet", | ||
135 | { "send", "recv", "relabelto", "flow_in", "flow_out", | ||
136 | "forward_in", "forward_out", NULL } }, | ||
137 | { "key", | ||
138 | { "view", "read", "write", "search", "link", "setattr", "create", | ||
139 | NULL } }, | ||
140 | { "dccp_socket", | ||
141 | { COMMON_SOCK_PERMS, | ||
142 | "node_bind", "name_connect", NULL } }, | ||
143 | { "memprotect", { "mmap_zero", NULL } }, | ||
144 | { "peer", { "recv", NULL } }, | ||
145 | { "capability2", { "mac_override", "mac_admin", NULL } }, | ||
146 | { "kernel_service", { "use_as_override", "create_files_as", NULL } }, | ||
147 | { "tun_socket", | ||
148 | { COMMON_SOCK_PERMS, NULL } }, | ||
149 | { NULL } | ||
150 | }; | ||
diff --git a/security/selinux/include/common_perm_to_string.h b/security/selinux/include/common_perm_to_string.h deleted file mode 100644 index ce5b6e2fe9dd..000000000000 --- a/security/selinux/include/common_perm_to_string.h +++ /dev/null | |||
@@ -1,58 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | TB_(common_file_perm_to_string) | ||
3 | S_("ioctl") | ||
4 | S_("read") | ||
5 | S_("write") | ||
6 | S_("create") | ||
7 | S_("getattr") | ||
8 | S_("setattr") | ||
9 | S_("lock") | ||
10 | S_("relabelfrom") | ||
11 | S_("relabelto") | ||
12 | S_("append") | ||
13 | S_("unlink") | ||
14 | S_("link") | ||
15 | S_("rename") | ||
16 | S_("execute") | ||
17 | S_("swapon") | ||
18 | S_("quotaon") | ||
19 | S_("mounton") | ||
20 | TE_(common_file_perm_to_string) | ||
21 | |||
22 | TB_(common_socket_perm_to_string) | ||
23 | S_("ioctl") | ||
24 | S_("read") | ||
25 | S_("write") | ||
26 | S_("create") | ||
27 | S_("getattr") | ||
28 | S_("setattr") | ||
29 | S_("lock") | ||
30 | S_("relabelfrom") | ||
31 | S_("relabelto") | ||
32 | S_("append") | ||
33 | S_("bind") | ||
34 | S_("connect") | ||
35 | S_("listen") | ||
36 | S_("accept") | ||
37 | S_("getopt") | ||
38 | S_("setopt") | ||
39 | S_("shutdown") | ||
40 | S_("recvfrom") | ||
41 | S_("sendto") | ||
42 | S_("recv_msg") | ||
43 | S_("send_msg") | ||
44 | S_("name_bind") | ||
45 | TE_(common_socket_perm_to_string) | ||
46 | |||
47 | TB_(common_ipc_perm_to_string) | ||
48 | S_("create") | ||
49 | S_("destroy") | ||
50 | S_("getattr") | ||
51 | S_("setattr") | ||
52 | S_("read") | ||
53 | S_("write") | ||
54 | S_("associate") | ||
55 | S_("unix_read") | ||
56 | S_("unix_write") | ||
57 | TE_(common_ipc_perm_to_string) | ||
58 | |||
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h deleted file mode 100644 index f248500a1e3c..000000000000 --- a/security/selinux/include/flask.h +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | /* This file is automatically generated. Do not edit. */ | ||
2 | #ifndef _SELINUX_FLASK_H_ | ||
3 | #define _SELINUX_FLASK_H_ | ||
4 | |||
5 | /* | ||
6 | * Security object class definitions | ||
7 | */ | ||
8 | #define SECCLASS_SECURITY 1 | ||
9 | #define SECCLASS_PROCESS 2 | ||
10 | #define SECCLASS_SYSTEM 3 | ||
11 | #define SECCLASS_CAPABILITY 4 | ||
12 | #define SECCLASS_FILESYSTEM 5 | ||
13 | #define SECCLASS_FILE 6 | ||
14 | #define SECCLASS_DIR 7 | ||
15 | #define SECCLASS_FD 8 | ||
16 | #define SECCLASS_LNK_FILE 9 | ||
17 | #define SECCLASS_CHR_FILE 10 | ||
18 | #define SECCLASS_BLK_FILE 11 | ||
19 | #define SECCLASS_SOCK_FILE 12 | ||
20 | #define SECCLASS_FIFO_FILE 13 | ||
21 | #define SECCLASS_SOCKET 14 | ||
22 | #define SECCLASS_TCP_SOCKET 15 | ||
23 | #define SECCLASS_UDP_SOCKET 16 | ||
24 | #define SECCLASS_RAWIP_SOCKET 17 | ||
25 | #define SECCLASS_NODE 18 | ||
26 | #define SECCLASS_NETIF 19 | ||
27 | #define SECCLASS_NETLINK_SOCKET 20 | ||
28 | #define SECCLASS_PACKET_SOCKET 21 | ||
29 | #define SECCLASS_KEY_SOCKET 22 | ||
30 | #define SECCLASS_UNIX_STREAM_SOCKET 23 | ||
31 | #define SECCLASS_UNIX_DGRAM_SOCKET 24 | ||
32 | #define SECCLASS_SEM 25 | ||
33 | #define SECCLASS_MSG 26 | ||
34 | #define SECCLASS_MSGQ 27 | ||
35 | #define SECCLASS_SHM 28 | ||
36 | #define SECCLASS_IPC 29 | ||
37 | #define SECCLASS_NETLINK_ROUTE_SOCKET 43 | ||
38 | #define SECCLASS_NETLINK_FIREWALL_SOCKET 44 | ||
39 | #define SECCLASS_NETLINK_TCPDIAG_SOCKET 45 | ||
40 | #define SECCLASS_NETLINK_NFLOG_SOCKET 46 | ||
41 | #define SECCLASS_NETLINK_XFRM_SOCKET 47 | ||
42 | #define SECCLASS_NETLINK_SELINUX_SOCKET 48 | ||
43 | #define SECCLASS_NETLINK_AUDIT_SOCKET 49 | ||
44 | #define SECCLASS_NETLINK_IP6FW_SOCKET 50 | ||
45 | #define SECCLASS_NETLINK_DNRT_SOCKET 51 | ||
46 | #define SECCLASS_ASSOCIATION 54 | ||
47 | #define SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET 55 | ||
48 | #define SECCLASS_APPLETALK_SOCKET 56 | ||
49 | #define SECCLASS_PACKET 57 | ||
50 | #define SECCLASS_KEY 58 | ||
51 | #define SECCLASS_DCCP_SOCKET 60 | ||
52 | #define SECCLASS_MEMPROTECT 61 | ||
53 | #define SECCLASS_PEER 68 | ||
54 | #define SECCLASS_CAPABILITY2 69 | ||
55 | #define SECCLASS_KERNEL_SERVICE 74 | ||
56 | #define SECCLASS_TUN_SOCKET 75 | ||
57 | |||
58 | /* | ||
59 | * Security identifier indices for initial entities | ||
60 | */ | ||
61 | #define SECINITSID_KERNEL 1 | ||
62 | #define SECINITSID_SECURITY 2 | ||
63 | #define SECINITSID_UNLABELED 3 | ||
64 | #define SECINITSID_FS 4 | ||
65 | #define SECINITSID_FILE 5 | ||
66 | #define SECINITSID_FILE_LABELS 6 | ||
67 | #define SECINITSID_INIT 7 | ||
68 | #define SECINITSID_ANY_SOCKET 8 | ||
69 | #define SECINITSID_PORT 9 | ||
70 | #define SECINITSID_NETIF 10 | ||
71 | #define SECINITSID_NETMSG 11 | ||
72 | #define SECINITSID_NODE 12 | ||
73 | #define SECINITSID_IGMP_PACKET 13 | ||
74 | #define SECINITSID_ICMP_SOCKET 14 | ||
75 | #define SECINITSID_TCP_SOCKET 15 | ||
76 | #define SECINITSID_SYSCTL_MODPROBE 16 | ||
77 | #define SECINITSID_SYSCTL 17 | ||
78 | #define SECINITSID_SYSCTL_FS 18 | ||
79 | #define SECINITSID_SYSCTL_KERNEL 19 | ||
80 | #define SECINITSID_SYSCTL_NET 20 | ||
81 | #define SECINITSID_SYSCTL_NET_UNIX 21 | ||
82 | #define SECINITSID_SYSCTL_VM 22 | ||
83 | #define SECINITSID_SYSCTL_DEV 23 | ||
84 | #define SECINITSID_KMOD 24 | ||
85 | #define SECINITSID_POLICY 25 | ||
86 | #define SECINITSID_SCMP_PACKET 26 | ||
87 | #define SECINITSID_DEVNULL 27 | ||
88 | |||
89 | #define SECINITSID_NUM 27 | ||
90 | |||
91 | #endif | ||
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index ca835795a8b3..2553266ad793 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
@@ -97,11 +97,18 @@ struct av_decision { | |||
97 | #define AVD_FLAGS_PERMISSIVE 0x0001 | 97 | #define AVD_FLAGS_PERMISSIVE 0x0001 |
98 | 98 | ||
99 | int security_compute_av(u32 ssid, u32 tsid, | 99 | int security_compute_av(u32 ssid, u32 tsid, |
100 | u16 tclass, u32 requested, | 100 | u16 tclass, u32 requested, |
101 | struct av_decision *avd); | 101 | struct av_decision *avd); |
102 | |||
103 | int security_compute_av_user(u32 ssid, u32 tsid, | ||
104 | u16 tclass, u32 requested, | ||
105 | struct av_decision *avd); | ||
102 | 106 | ||
103 | int security_transition_sid(u32 ssid, u32 tsid, | 107 | int security_transition_sid(u32 ssid, u32 tsid, |
104 | u16 tclass, u32 *out_sid); | 108 | u16 tclass, u32 *out_sid); |
109 | |||
110 | int security_transition_sid_user(u32 ssid, u32 tsid, | ||
111 | u16 tclass, u32 *out_sid); | ||
105 | 112 | ||
106 | int security_member_sid(u32 ssid, u32 tsid, | 113 | int security_member_sid(u32 ssid, u32 tsid, |
107 | u16 tclass, u32 *out_sid); | 114 | u16 tclass, u32 *out_sid); |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index b4fc506e7a87..fab36fdf2769 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -522,7 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) | |||
522 | if (length < 0) | 522 | if (length < 0) |
523 | goto out2; | 523 | goto out2; |
524 | 524 | ||
525 | length = security_compute_av(ssid, tsid, tclass, req, &avd); | 525 | length = security_compute_av_user(ssid, tsid, tclass, req, &avd); |
526 | if (length < 0) | 526 | if (length < 0) |
527 | goto out2; | 527 | goto out2; |
528 | 528 | ||
@@ -571,7 +571,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size) | |||
571 | if (length < 0) | 571 | if (length < 0) |
572 | goto out2; | 572 | goto out2; |
573 | 573 | ||
574 | length = security_transition_sid(ssid, tsid, tclass, &newsid); | 574 | length = security_transition_sid_user(ssid, tsid, tclass, &newsid); |
575 | if (length < 0) | 575 | if (length < 0) |
576 | goto out2; | 576 | goto out2; |
577 | 577 | ||
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile index bad78779b9b0..15d4e62917de 100644 --- a/security/selinux/ss/Makefile +++ b/security/selinux/ss/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for building the SELinux security server as part of the kernel tree. | 2 | # Makefile for building the SELinux security server as part of the kernel tree. |
3 | # | 3 | # |
4 | 4 | ||
5 | EXTRA_CFLAGS += -Isecurity/selinux/include | 5 | EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include |
6 | obj-y := ss.o | 6 | obj-y := ss.o |
7 | 7 | ||
8 | ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o | 8 | ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o |
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index b5407f16c2a4..3f2b2706b5bb 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c | |||
@@ -532,7 +532,7 @@ int mls_compute_sid(struct context *scontext, | |||
532 | } | 532 | } |
533 | /* Fallthrough */ | 533 | /* Fallthrough */ |
534 | case AVTAB_CHANGE: | 534 | case AVTAB_CHANGE: |
535 | if (tclass == SECCLASS_PROCESS) | 535 | if (tclass == policydb.process_class) |
536 | /* Use the process MLS attributes. */ | 536 | /* Use the process MLS attributes. */ |
537 | return mls_context_cpy(newcontext, scontext); | 537 | return mls_context_cpy(newcontext, scontext); |
538 | else | 538 | else |
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 72e4a54973aa..f03667213ea8 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
@@ -713,7 +713,6 @@ void policydb_destroy(struct policydb *p) | |||
713 | ebitmap_destroy(&p->type_attr_map[i]); | 713 | ebitmap_destroy(&p->type_attr_map[i]); |
714 | } | 714 | } |
715 | kfree(p->type_attr_map); | 715 | kfree(p->type_attr_map); |
716 | kfree(p->undefined_perms); | ||
717 | ebitmap_destroy(&p->policycaps); | 716 | ebitmap_destroy(&p->policycaps); |
718 | ebitmap_destroy(&p->permissive_map); | 717 | ebitmap_destroy(&p->permissive_map); |
719 | 718 | ||
@@ -1640,6 +1639,40 @@ static int policydb_bounds_sanity_check(struct policydb *p) | |||
1640 | 1639 | ||
1641 | extern int ss_initialized; | 1640 | extern int ss_initialized; |
1642 | 1641 | ||
1642 | u16 string_to_security_class(struct policydb *p, const char *name) | ||
1643 | { | ||
1644 | struct class_datum *cladatum; | ||
1645 | |||
1646 | cladatum = hashtab_search(p->p_classes.table, name); | ||
1647 | if (!cladatum) | ||
1648 | return 0; | ||
1649 | |||
1650 | return cladatum->value; | ||
1651 | } | ||
1652 | |||
1653 | u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name) | ||
1654 | { | ||
1655 | struct class_datum *cladatum; | ||
1656 | struct perm_datum *perdatum = NULL; | ||
1657 | struct common_datum *comdatum; | ||
1658 | |||
1659 | if (!tclass || tclass > p->p_classes.nprim) | ||
1660 | return 0; | ||
1661 | |||
1662 | cladatum = p->class_val_to_struct[tclass-1]; | ||
1663 | comdatum = cladatum->comdatum; | ||
1664 | if (comdatum) | ||
1665 | perdatum = hashtab_search(comdatum->permissions.table, | ||
1666 | name); | ||
1667 | if (!perdatum) | ||
1668 | perdatum = hashtab_search(cladatum->permissions.table, | ||
1669 | name); | ||
1670 | if (!perdatum) | ||
1671 | return 0; | ||
1672 | |||
1673 | return 1U << (perdatum->value-1); | ||
1674 | } | ||
1675 | |||
1643 | /* | 1676 | /* |
1644 | * Read the configuration data from a policy database binary | 1677 | * Read the configuration data from a policy database binary |
1645 | * representation file into a policy database structure. | 1678 | * representation file into a policy database structure. |
@@ -1861,6 +1894,16 @@ int policydb_read(struct policydb *p, void *fp) | |||
1861 | if (rc) | 1894 | if (rc) |
1862 | goto bad; | 1895 | goto bad; |
1863 | 1896 | ||
1897 | p->process_class = string_to_security_class(p, "process"); | ||
1898 | if (!p->process_class) | ||
1899 | goto bad; | ||
1900 | p->process_trans_perms = string_to_av_perm(p, p->process_class, | ||
1901 | "transition"); | ||
1902 | p->process_trans_perms |= string_to_av_perm(p, p->process_class, | ||
1903 | "dyntransition"); | ||
1904 | if (!p->process_trans_perms) | ||
1905 | goto bad; | ||
1906 | |||
1864 | for (i = 0; i < info->ocon_num; i++) { | 1907 | for (i = 0; i < info->ocon_num; i++) { |
1865 | rc = next_entry(buf, fp, sizeof(u32)); | 1908 | rc = next_entry(buf, fp, sizeof(u32)); |
1866 | if (rc < 0) | 1909 | if (rc < 0) |
@@ -2101,7 +2144,7 @@ int policydb_read(struct policydb *p, void *fp) | |||
2101 | goto bad; | 2144 | goto bad; |
2102 | rt->target_class = le32_to_cpu(buf[0]); | 2145 | rt->target_class = le32_to_cpu(buf[0]); |
2103 | } else | 2146 | } else |
2104 | rt->target_class = SECCLASS_PROCESS; | 2147 | rt->target_class = p->process_class; |
2105 | if (!policydb_type_isvalid(p, rt->source_type) || | 2148 | if (!policydb_type_isvalid(p, rt->source_type) || |
2106 | !policydb_type_isvalid(p, rt->target_type) || | 2149 | !policydb_type_isvalid(p, rt->target_type) || |
2107 | !policydb_class_isvalid(p, rt->target_class)) { | 2150 | !policydb_class_isvalid(p, rt->target_class)) { |
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 55152d498b53..cdcc5700946f 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h | |||
@@ -254,7 +254,9 @@ struct policydb { | |||
254 | 254 | ||
255 | unsigned int reject_unknown : 1; | 255 | unsigned int reject_unknown : 1; |
256 | unsigned int allow_unknown : 1; | 256 | unsigned int allow_unknown : 1; |
257 | u32 *undefined_perms; | 257 | |
258 | u16 process_class; | ||
259 | u32 process_trans_perms; | ||
258 | }; | 260 | }; |
259 | 261 | ||
260 | extern void policydb_destroy(struct policydb *p); | 262 | extern void policydb_destroy(struct policydb *p); |
@@ -295,5 +297,8 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) | |||
295 | return 0; | 297 | return 0; |
296 | } | 298 | } |
297 | 299 | ||
300 | extern u16 string_to_security_class(struct policydb *p, const char *name); | ||
301 | extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); | ||
302 | |||
298 | #endif /* _SS_POLICYDB_H_ */ | 303 | #endif /* _SS_POLICYDB_H_ */ |
299 | 304 | ||
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index ff17820d35ec..d6bb20cbad62 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -65,16 +65,10 @@ | |||
65 | #include "audit.h" | 65 | #include "audit.h" |
66 | 66 | ||
67 | extern void selnl_notify_policyload(u32 seqno); | 67 | extern void selnl_notify_policyload(u32 seqno); |
68 | unsigned int policydb_loaded_version; | ||
69 | 68 | ||
70 | int selinux_policycap_netpeer; | 69 | int selinux_policycap_netpeer; |
71 | int selinux_policycap_openperm; | 70 | int selinux_policycap_openperm; |
72 | 71 | ||
73 | /* | ||
74 | * This is declared in avc.c | ||
75 | */ | ||
76 | extern const struct selinux_class_perm selinux_class_perm; | ||
77 | |||
78 | static DEFINE_RWLOCK(policy_rwlock); | 72 | static DEFINE_RWLOCK(policy_rwlock); |
79 | 73 | ||
80 | static struct sidtab sidtab; | 74 | static struct sidtab sidtab; |
@@ -98,6 +92,165 @@ static int context_struct_compute_av(struct context *scontext, | |||
98 | u16 tclass, | 92 | u16 tclass, |
99 | u32 requested, | 93 | u32 requested, |
100 | struct av_decision *avd); | 94 | struct av_decision *avd); |
95 | |||
96 | struct selinux_mapping { | ||
97 | u16 value; /* policy value */ | ||
98 | unsigned num_perms; | ||
99 | u32 perms[sizeof(u32) * 8]; | ||
100 | }; | ||
101 | |||
102 | static struct selinux_mapping *current_mapping; | ||
103 | static u16 current_mapping_size; | ||
104 | |||
105 | static int selinux_set_mapping(struct policydb *pol, | ||
106 | struct security_class_mapping *map, | ||
107 | struct selinux_mapping **out_map_p, | ||
108 | u16 *out_map_size) | ||
109 | { | ||
110 | struct selinux_mapping *out_map = NULL; | ||
111 | size_t size = sizeof(struct selinux_mapping); | ||
112 | u16 i, j; | ||
113 | unsigned k; | ||
114 | bool print_unknown_handle = false; | ||
115 | |||
116 | /* Find number of classes in the input mapping */ | ||
117 | if (!map) | ||
118 | return -EINVAL; | ||
119 | i = 0; | ||
120 | while (map[i].name) | ||
121 | i++; | ||
122 | |||
123 | /* Allocate space for the class records, plus one for class zero */ | ||
124 | out_map = kcalloc(++i, size, GFP_ATOMIC); | ||
125 | if (!out_map) | ||
126 | return -ENOMEM; | ||
127 | |||
128 | /* Store the raw class and permission values */ | ||
129 | j = 0; | ||
130 | while (map[j].name) { | ||
131 | struct security_class_mapping *p_in = map + (j++); | ||
132 | struct selinux_mapping *p_out = out_map + j; | ||
133 | |||
134 | /* An empty class string skips ahead */ | ||
135 | if (!strcmp(p_in->name, "")) { | ||
136 | p_out->num_perms = 0; | ||
137 | continue; | ||
138 | } | ||
139 | |||
140 | p_out->value = string_to_security_class(pol, p_in->name); | ||
141 | if (!p_out->value) { | ||
142 | printk(KERN_INFO | ||
143 | "SELinux: Class %s not defined in policy.\n", | ||
144 | p_in->name); | ||
145 | if (pol->reject_unknown) | ||
146 | goto err; | ||
147 | p_out->num_perms = 0; | ||
148 | print_unknown_handle = true; | ||
149 | continue; | ||
150 | } | ||
151 | |||
152 | k = 0; | ||
153 | while (p_in->perms && p_in->perms[k]) { | ||
154 | /* An empty permission string skips ahead */ | ||
155 | if (!*p_in->perms[k]) { | ||
156 | k++; | ||
157 | continue; | ||
158 | } | ||
159 | p_out->perms[k] = string_to_av_perm(pol, p_out->value, | ||
160 | p_in->perms[k]); | ||
161 | if (!p_out->perms[k]) { | ||
162 | printk(KERN_INFO | ||
163 | "SELinux: Permission %s in class %s not defined in policy.\n", | ||
164 | p_in->perms[k], p_in->name); | ||
165 | if (pol->reject_unknown) | ||
166 | goto err; | ||
167 | print_unknown_handle = true; | ||
168 | } | ||
169 | |||
170 | k++; | ||
171 | } | ||
172 | p_out->num_perms = k; | ||
173 | } | ||
174 | |||
175 | if (print_unknown_handle) | ||
176 | printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n", | ||
177 | pol->allow_unknown ? "allowed" : "denied"); | ||
178 | |||
179 | *out_map_p = out_map; | ||
180 | *out_map_size = i; | ||
181 | return 0; | ||
182 | err: | ||
183 | kfree(out_map); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Get real, policy values from mapped values | ||
189 | */ | ||
190 | |||
191 | static u16 unmap_class(u16 tclass) | ||
192 | { | ||
193 | if (tclass < current_mapping_size) | ||
194 | return current_mapping[tclass].value; | ||
195 | |||
196 | return tclass; | ||
197 | } | ||
198 | |||
199 | static u32 unmap_perm(u16 tclass, u32 tperm) | ||
200 | { | ||
201 | if (tclass < current_mapping_size) { | ||
202 | unsigned i; | ||
203 | u32 kperm = 0; | ||
204 | |||
205 | for (i = 0; i < current_mapping[tclass].num_perms; i++) | ||
206 | if (tperm & (1<<i)) { | ||
207 | kperm |= current_mapping[tclass].perms[i]; | ||
208 | tperm &= ~(1<<i); | ||
209 | } | ||
210 | return kperm; | ||
211 | } | ||
212 | |||
213 | return tperm; | ||
214 | } | ||
215 | |||
216 | static void map_decision(u16 tclass, struct av_decision *avd, | ||
217 | int allow_unknown) | ||
218 | { | ||
219 | if (tclass < current_mapping_size) { | ||
220 | unsigned i, n = current_mapping[tclass].num_perms; | ||
221 | u32 result; | ||
222 | |||
223 | for (i = 0, result = 0; i < n; i++) { | ||
224 | if (avd->allowed & current_mapping[tclass].perms[i]) | ||
225 | result |= 1<<i; | ||
226 | if (allow_unknown && !current_mapping[tclass].perms[i]) | ||
227 | result |= 1<<i; | ||
228 | } | ||
229 | avd->allowed = result; | ||
230 | |||
231 | for (i = 0, result = 0; i < n; i++) | ||
232 | if (avd->auditallow & current_mapping[tclass].perms[i]) | ||
233 | result |= 1<<i; | ||
234 | avd->auditallow = result; | ||
235 | |||
236 | for (i = 0, result = 0; i < n; i++) { | ||
237 | if (avd->auditdeny & current_mapping[tclass].perms[i]) | ||
238 | result |= 1<<i; | ||
239 | if (!allow_unknown && !current_mapping[tclass].perms[i]) | ||
240 | result |= 1<<i; | ||
241 | } | ||
242 | /* | ||
243 | * In case the kernel has a bug and requests a permission | ||
244 | * between num_perms and the maximum permission number, we | ||
245 | * should audit that denial | ||
246 | */ | ||
247 | for (; i < (sizeof(u32)*8); i++) | ||
248 | result |= 1<<i; | ||
249 | avd->auditdeny = result; | ||
250 | } | ||
251 | } | ||
252 | |||
253 | |||
101 | /* | 254 | /* |
102 | * Return the boolean value of a constraint expression | 255 | * Return the boolean value of a constraint expression |
103 | * when it is applied to the specified source and target | 256 | * when it is applied to the specified source and target |
@@ -467,21 +620,9 @@ static int context_struct_compute_av(struct context *scontext, | |||
467 | struct class_datum *tclass_datum; | 620 | struct class_datum *tclass_datum; |
468 | struct ebitmap *sattr, *tattr; | 621 | struct ebitmap *sattr, *tattr; |
469 | struct ebitmap_node *snode, *tnode; | 622 | struct ebitmap_node *snode, *tnode; |
470 | const struct selinux_class_perm *kdefs = &selinux_class_perm; | ||
471 | unsigned int i, j; | 623 | unsigned int i, j; |
472 | 624 | ||
473 | /* | 625 | /* |
474 | * Remap extended Netlink classes for old policy versions. | ||
475 | * Do this here rather than socket_type_to_security_class() | ||
476 | * in case a newer policy version is loaded, allowing sockets | ||
477 | * to remain in the correct class. | ||
478 | */ | ||
479 | if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS) | ||
480 | if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET && | ||
481 | tclass <= SECCLASS_NETLINK_DNRT_SOCKET) | ||
482 | tclass = SECCLASS_NETLINK_SOCKET; | ||
483 | |||
484 | /* | ||
485 | * Initialize the access vectors to the default values. | 626 | * Initialize the access vectors to the default values. |
486 | */ | 627 | */ |
487 | avd->allowed = 0; | 628 | avd->allowed = 0; |
@@ -490,33 +631,11 @@ static int context_struct_compute_av(struct context *scontext, | |||
490 | avd->seqno = latest_granting; | 631 | avd->seqno = latest_granting; |
491 | avd->flags = 0; | 632 | avd->flags = 0; |
492 | 633 | ||
493 | /* | 634 | if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) { |
494 | * Check for all the invalid cases. | 635 | if (printk_ratelimit()) |
495 | * - tclass 0 | 636 | printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass); |
496 | * - tclass > policy and > kernel | 637 | return -EINVAL; |
497 | * - tclass > policy but is a userspace class | 638 | } |
498 | * - tclass > policy but we do not allow unknowns | ||
499 | */ | ||
500 | if (unlikely(!tclass)) | ||
501 | goto inval_class; | ||
502 | if (unlikely(tclass > policydb.p_classes.nprim)) | ||
503 | if (tclass > kdefs->cts_len || | ||
504 | !kdefs->class_to_string[tclass] || | ||
505 | !policydb.allow_unknown) | ||
506 | goto inval_class; | ||
507 | |||
508 | /* | ||
509 | * Kernel class and we allow unknown so pad the allow decision | ||
510 | * the pad will be all 1 for unknown classes. | ||
511 | */ | ||
512 | if (tclass <= kdefs->cts_len && policydb.allow_unknown) | ||
513 | avd->allowed = policydb.undefined_perms[tclass - 1]; | ||
514 | |||
515 | /* | ||
516 | * Not in policy. Since decision is completed (all 1 or all 0) return. | ||
517 | */ | ||
518 | if (unlikely(tclass > policydb.p_classes.nprim)) | ||
519 | return 0; | ||
520 | 639 | ||
521 | tclass_datum = policydb.class_val_to_struct[tclass - 1]; | 640 | tclass_datum = policydb.class_val_to_struct[tclass - 1]; |
522 | 641 | ||
@@ -568,8 +687,8 @@ static int context_struct_compute_av(struct context *scontext, | |||
568 | * role is changing, then check the (current_role, new_role) | 687 | * role is changing, then check the (current_role, new_role) |
569 | * pair. | 688 | * pair. |
570 | */ | 689 | */ |
571 | if (tclass == SECCLASS_PROCESS && | 690 | if (tclass == policydb.process_class && |
572 | (avd->allowed & (PROCESS__TRANSITION | PROCESS__DYNTRANSITION)) && | 691 | (avd->allowed & policydb.process_trans_perms) && |
573 | scontext->role != tcontext->role) { | 692 | scontext->role != tcontext->role) { |
574 | for (ra = policydb.role_allow; ra; ra = ra->next) { | 693 | for (ra = policydb.role_allow; ra; ra = ra->next) { |
575 | if (scontext->role == ra->role && | 694 | if (scontext->role == ra->role && |
@@ -577,8 +696,7 @@ static int context_struct_compute_av(struct context *scontext, | |||
577 | break; | 696 | break; |
578 | } | 697 | } |
579 | if (!ra) | 698 | if (!ra) |
580 | avd->allowed &= ~(PROCESS__TRANSITION | | 699 | avd->allowed &= ~policydb.process_trans_perms; |
581 | PROCESS__DYNTRANSITION); | ||
582 | } | 700 | } |
583 | 701 | ||
584 | /* | 702 | /* |
@@ -590,21 +708,6 @@ static int context_struct_compute_av(struct context *scontext, | |||
590 | tclass, requested, avd); | 708 | tclass, requested, avd); |
591 | 709 | ||
592 | return 0; | 710 | return 0; |
593 | |||
594 | inval_class: | ||
595 | if (!tclass || tclass > kdefs->cts_len || | ||
596 | !kdefs->class_to_string[tclass]) { | ||
597 | if (printk_ratelimit()) | ||
598 | printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", | ||
599 | __func__, tclass); | ||
600 | return -EINVAL; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Known to the kernel, but not to the policy. | ||
605 | * Handle as a denial (allowed is 0). | ||
606 | */ | ||
607 | return 0; | ||
608 | } | 711 | } |
609 | 712 | ||
610 | static int security_validtrans_handle_fail(struct context *ocontext, | 713 | static int security_validtrans_handle_fail(struct context *ocontext, |
@@ -636,13 +739,14 @@ out: | |||
636 | } | 739 | } |
637 | 740 | ||
638 | int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, | 741 | int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, |
639 | u16 tclass) | 742 | u16 orig_tclass) |
640 | { | 743 | { |
641 | struct context *ocontext; | 744 | struct context *ocontext; |
642 | struct context *ncontext; | 745 | struct context *ncontext; |
643 | struct context *tcontext; | 746 | struct context *tcontext; |
644 | struct class_datum *tclass_datum; | 747 | struct class_datum *tclass_datum; |
645 | struct constraint_node *constraint; | 748 | struct constraint_node *constraint; |
749 | u16 tclass; | ||
646 | int rc = 0; | 750 | int rc = 0; |
647 | 751 | ||
648 | if (!ss_initialized) | 752 | if (!ss_initialized) |
@@ -650,16 +754,7 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, | |||
650 | 754 | ||
651 | read_lock(&policy_rwlock); | 755 | read_lock(&policy_rwlock); |
652 | 756 | ||
653 | /* | 757 | tclass = unmap_class(orig_tclass); |
654 | * Remap extended Netlink classes for old policy versions. | ||
655 | * Do this here rather than socket_type_to_security_class() | ||
656 | * in case a newer policy version is loaded, allowing sockets | ||
657 | * to remain in the correct class. | ||
658 | */ | ||
659 | if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS) | ||
660 | if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET && | ||
661 | tclass <= SECCLASS_NETLINK_DNRT_SOCKET) | ||
662 | tclass = SECCLASS_NETLINK_SOCKET; | ||
663 | 758 | ||
664 | if (!tclass || tclass > policydb.p_classes.nprim) { | 759 | if (!tclass || tclass > policydb.p_classes.nprim) { |
665 | printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", | 760 | printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", |
@@ -792,6 +887,38 @@ out: | |||
792 | } | 887 | } |
793 | 888 | ||
794 | 889 | ||
890 | static int security_compute_av_core(u32 ssid, | ||
891 | u32 tsid, | ||
892 | u16 tclass, | ||
893 | u32 requested, | ||
894 | struct av_decision *avd) | ||
895 | { | ||
896 | struct context *scontext = NULL, *tcontext = NULL; | ||
897 | int rc = 0; | ||
898 | |||
899 | scontext = sidtab_search(&sidtab, ssid); | ||
900 | if (!scontext) { | ||
901 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", | ||
902 | __func__, ssid); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | tcontext = sidtab_search(&sidtab, tsid); | ||
906 | if (!tcontext) { | ||
907 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", | ||
908 | __func__, tsid); | ||
909 | return -EINVAL; | ||
910 | } | ||
911 | |||
912 | rc = context_struct_compute_av(scontext, tcontext, tclass, | ||
913 | requested, avd); | ||
914 | |||
915 | /* permissive domain? */ | ||
916 | if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) | ||
917 | avd->flags |= AVD_FLAGS_PERMISSIVE; | ||
918 | |||
919 | return rc; | ||
920 | } | ||
921 | |||
795 | /** | 922 | /** |
796 | * security_compute_av - Compute access vector decisions. | 923 | * security_compute_av - Compute access vector decisions. |
797 | * @ssid: source security identifier | 924 | * @ssid: source security identifier |
@@ -807,12 +934,49 @@ out: | |||
807 | */ | 934 | */ |
808 | int security_compute_av(u32 ssid, | 935 | int security_compute_av(u32 ssid, |
809 | u32 tsid, | 936 | u32 tsid, |
810 | u16 tclass, | 937 | u16 orig_tclass, |
811 | u32 requested, | 938 | u32 orig_requested, |
812 | struct av_decision *avd) | 939 | struct av_decision *avd) |
813 | { | 940 | { |
814 | struct context *scontext = NULL, *tcontext = NULL; | 941 | u16 tclass; |
815 | int rc = 0; | 942 | u32 requested; |
943 | int rc; | ||
944 | |||
945 | read_lock(&policy_rwlock); | ||
946 | |||
947 | if (!ss_initialized) | ||
948 | goto allow; | ||
949 | |||
950 | requested = unmap_perm(orig_tclass, orig_requested); | ||
951 | tclass = unmap_class(orig_tclass); | ||
952 | if (unlikely(orig_tclass && !tclass)) { | ||
953 | if (policydb.allow_unknown) | ||
954 | goto allow; | ||
955 | rc = -EINVAL; | ||
956 | goto out; | ||
957 | } | ||
958 | rc = security_compute_av_core(ssid, tsid, tclass, requested, avd); | ||
959 | map_decision(orig_tclass, avd, policydb.allow_unknown); | ||
960 | out: | ||
961 | read_unlock(&policy_rwlock); | ||
962 | return rc; | ||
963 | allow: | ||
964 | avd->allowed = 0xffffffff; | ||
965 | avd->auditallow = 0; | ||
966 | avd->auditdeny = 0xffffffff; | ||
967 | avd->seqno = latest_granting; | ||
968 | avd->flags = 0; | ||
969 | rc = 0; | ||
970 | goto out; | ||
971 | } | ||
972 | |||
973 | int security_compute_av_user(u32 ssid, | ||
974 | u32 tsid, | ||
975 | u16 tclass, | ||
976 | u32 requested, | ||
977 | struct av_decision *avd) | ||
978 | { | ||
979 | int rc; | ||
816 | 980 | ||
817 | if (!ss_initialized) { | 981 | if (!ss_initialized) { |
818 | avd->allowed = 0xffffffff; | 982 | avd->allowed = 0xffffffff; |
@@ -823,29 +987,7 @@ int security_compute_av(u32 ssid, | |||
823 | } | 987 | } |
824 | 988 | ||
825 | read_lock(&policy_rwlock); | 989 | read_lock(&policy_rwlock); |
826 | 990 | rc = security_compute_av_core(ssid, tsid, tclass, requested, avd); | |
827 | scontext = sidtab_search(&sidtab, ssid); | ||
828 | if (!scontext) { | ||
829 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", | ||
830 | __func__, ssid); | ||
831 | rc = -EINVAL; | ||
832 | goto out; | ||
833 | } | ||
834 | tcontext = sidtab_search(&sidtab, tsid); | ||
835 | if (!tcontext) { | ||
836 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", | ||
837 | __func__, tsid); | ||
838 | rc = -EINVAL; | ||
839 | goto out; | ||
840 | } | ||
841 | |||
842 | rc = context_struct_compute_av(scontext, tcontext, tclass, | ||
843 | requested, avd); | ||
844 | |||
845 | /* permissive domain? */ | ||
846 | if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) | ||
847 | avd->flags |= AVD_FLAGS_PERMISSIVE; | ||
848 | out: | ||
849 | read_unlock(&policy_rwlock); | 991 | read_unlock(&policy_rwlock); |
850 | return rc; | 992 | return rc; |
851 | } | 993 | } |
@@ -1204,20 +1346,22 @@ out: | |||
1204 | 1346 | ||
1205 | static int security_compute_sid(u32 ssid, | 1347 | static int security_compute_sid(u32 ssid, |
1206 | u32 tsid, | 1348 | u32 tsid, |
1207 | u16 tclass, | 1349 | u16 orig_tclass, |
1208 | u32 specified, | 1350 | u32 specified, |
1209 | u32 *out_sid) | 1351 | u32 *out_sid, |
1352 | bool kern) | ||
1210 | { | 1353 | { |
1211 | struct context *scontext = NULL, *tcontext = NULL, newcontext; | 1354 | struct context *scontext = NULL, *tcontext = NULL, newcontext; |
1212 | struct role_trans *roletr = NULL; | 1355 | struct role_trans *roletr = NULL; |
1213 | struct avtab_key avkey; | 1356 | struct avtab_key avkey; |
1214 | struct avtab_datum *avdatum; | 1357 | struct avtab_datum *avdatum; |
1215 | struct avtab_node *node; | 1358 | struct avtab_node *node; |
1359 | u16 tclass; | ||
1216 | int rc = 0; | 1360 | int rc = 0; |
1217 | 1361 | ||
1218 | if (!ss_initialized) { | 1362 | if (!ss_initialized) { |
1219 | switch (tclass) { | 1363 | switch (orig_tclass) { |
1220 | case SECCLASS_PROCESS: | 1364 | case SECCLASS_PROCESS: /* kernel value */ |
1221 | *out_sid = ssid; | 1365 | *out_sid = ssid; |
1222 | break; | 1366 | break; |
1223 | default: | 1367 | default: |
@@ -1231,6 +1375,11 @@ static int security_compute_sid(u32 ssid, | |||
1231 | 1375 | ||
1232 | read_lock(&policy_rwlock); | 1376 | read_lock(&policy_rwlock); |
1233 | 1377 | ||
1378 | if (kern) | ||
1379 | tclass = unmap_class(orig_tclass); | ||
1380 | else | ||
1381 | tclass = orig_tclass; | ||
1382 | |||
1234 | scontext = sidtab_search(&sidtab, ssid); | 1383 | scontext = sidtab_search(&sidtab, ssid); |
1235 | if (!scontext) { | 1384 | if (!scontext) { |
1236 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", | 1385 | printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", |
@@ -1260,13 +1409,11 @@ static int security_compute_sid(u32 ssid, | |||
1260 | } | 1409 | } |
1261 | 1410 | ||
1262 | /* Set the role and type to default values. */ | 1411 | /* Set the role and type to default values. */ |
1263 | switch (tclass) { | 1412 | if (tclass == policydb.process_class) { |
1264 | case SECCLASS_PROCESS: | ||
1265 | /* Use the current role and type of process. */ | 1413 | /* Use the current role and type of process. */ |
1266 | newcontext.role = scontext->role; | 1414 | newcontext.role = scontext->role; |
1267 | newcontext.type = scontext->type; | 1415 | newcontext.type = scontext->type; |
1268 | break; | 1416 | } else { |
1269 | default: | ||
1270 | /* Use the well-defined object role. */ | 1417 | /* Use the well-defined object role. */ |
1271 | newcontext.role = OBJECT_R_VAL; | 1418 | newcontext.role = OBJECT_R_VAL; |
1272 | /* Use the type of the related object. */ | 1419 | /* Use the type of the related object. */ |
@@ -1297,8 +1444,7 @@ static int security_compute_sid(u32 ssid, | |||
1297 | } | 1444 | } |
1298 | 1445 | ||
1299 | /* Check for class-specific changes. */ | 1446 | /* Check for class-specific changes. */ |
1300 | switch (tclass) { | 1447 | if (tclass == policydb.process_class) { |
1301 | case SECCLASS_PROCESS: | ||
1302 | if (specified & AVTAB_TRANSITION) { | 1448 | if (specified & AVTAB_TRANSITION) { |
1303 | /* Look for a role transition rule. */ | 1449 | /* Look for a role transition rule. */ |
1304 | for (roletr = policydb.role_tr; roletr; | 1450 | for (roletr = policydb.role_tr; roletr; |
@@ -1311,9 +1457,6 @@ static int security_compute_sid(u32 ssid, | |||
1311 | } | 1457 | } |
1312 | } | 1458 | } |
1313 | } | 1459 | } |
1314 | break; | ||
1315 | default: | ||
1316 | break; | ||
1317 | } | 1460 | } |
1318 | 1461 | ||
1319 | /* Set the MLS attributes. | 1462 | /* Set the MLS attributes. |
@@ -1358,7 +1501,17 @@ int security_transition_sid(u32 ssid, | |||
1358 | u16 tclass, | 1501 | u16 tclass, |
1359 | u32 *out_sid) | 1502 | u32 *out_sid) |
1360 | { | 1503 | { |
1361 | return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, out_sid); | 1504 | return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, |
1505 | out_sid, true); | ||
1506 | } | ||
1507 | |||
1508 | int security_transition_sid_user(u32 ssid, | ||
1509 | u32 tsid, | ||
1510 | u16 tclass, | ||
1511 | u32 *out_sid) | ||
1512 | { | ||
1513 | return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, | ||
1514 | out_sid, false); | ||
1362 | } | 1515 | } |
1363 | 1516 | ||
1364 | /** | 1517 | /** |
@@ -1379,7 +1532,8 @@ int security_member_sid(u32 ssid, | |||
1379 | u16 tclass, | 1532 | u16 tclass, |
1380 | u32 *out_sid) | 1533 | u32 *out_sid) |
1381 | { | 1534 | { |
1382 | return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid); | 1535 | return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid, |
1536 | false); | ||
1383 | } | 1537 | } |
1384 | 1538 | ||
1385 | /** | 1539 | /** |
@@ -1400,144 +1554,8 @@ int security_change_sid(u32 ssid, | |||
1400 | u16 tclass, | 1554 | u16 tclass, |
1401 | u32 *out_sid) | 1555 | u32 *out_sid) |
1402 | { | 1556 | { |
1403 | return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid); | 1557 | return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid, |
1404 | } | 1558 | false); |
1405 | |||
1406 | /* | ||
1407 | * Verify that each kernel class that is defined in the | ||
1408 | * policy is correct | ||
1409 | */ | ||
1410 | static int validate_classes(struct policydb *p) | ||
1411 | { | ||
1412 | int i, j; | ||
1413 | struct class_datum *cladatum; | ||
1414 | struct perm_datum *perdatum; | ||
1415 | u32 nprim, tmp, common_pts_len, perm_val, pol_val; | ||
1416 | u16 class_val; | ||
1417 | const struct selinux_class_perm *kdefs = &selinux_class_perm; | ||
1418 | const char *def_class, *def_perm, *pol_class; | ||
1419 | struct symtab *perms; | ||
1420 | bool print_unknown_handle = 0; | ||
1421 | |||
1422 | if (p->allow_unknown) { | ||
1423 | u32 num_classes = kdefs->cts_len; | ||
1424 | p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL); | ||
1425 | if (!p->undefined_perms) | ||
1426 | return -ENOMEM; | ||
1427 | } | ||
1428 | |||
1429 | for (i = 1; i < kdefs->cts_len; i++) { | ||
1430 | def_class = kdefs->class_to_string[i]; | ||
1431 | if (!def_class) | ||
1432 | continue; | ||
1433 | if (i > p->p_classes.nprim) { | ||
1434 | printk(KERN_INFO | ||
1435 | "SELinux: class %s not defined in policy\n", | ||
1436 | def_class); | ||
1437 | if (p->reject_unknown) | ||
1438 | return -EINVAL; | ||
1439 | if (p->allow_unknown) | ||
1440 | p->undefined_perms[i-1] = ~0U; | ||
1441 | print_unknown_handle = 1; | ||
1442 | continue; | ||
1443 | } | ||
1444 | pol_class = p->p_class_val_to_name[i-1]; | ||
1445 | if (strcmp(pol_class, def_class)) { | ||
1446 | printk(KERN_ERR | ||
1447 | "SELinux: class %d is incorrect, found %s but should be %s\n", | ||
1448 | i, pol_class, def_class); | ||
1449 | return -EINVAL; | ||
1450 | } | ||
1451 | } | ||
1452 | for (i = 0; i < kdefs->av_pts_len; i++) { | ||
1453 | class_val = kdefs->av_perm_to_string[i].tclass; | ||
1454 | perm_val = kdefs->av_perm_to_string[i].value; | ||
1455 | def_perm = kdefs->av_perm_to_string[i].name; | ||
1456 | if (class_val > p->p_classes.nprim) | ||
1457 | continue; | ||
1458 | pol_class = p->p_class_val_to_name[class_val-1]; | ||
1459 | cladatum = hashtab_search(p->p_classes.table, pol_class); | ||
1460 | BUG_ON(!cladatum); | ||
1461 | perms = &cladatum->permissions; | ||
1462 | nprim = 1 << (perms->nprim - 1); | ||
1463 | if (perm_val > nprim) { | ||
1464 | printk(KERN_INFO | ||
1465 | "SELinux: permission %s in class %s not defined in policy\n", | ||
1466 | def_perm, pol_class); | ||
1467 | if (p->reject_unknown) | ||
1468 | return -EINVAL; | ||
1469 | if (p->allow_unknown) | ||
1470 | p->undefined_perms[class_val-1] |= perm_val; | ||
1471 | print_unknown_handle = 1; | ||
1472 | continue; | ||
1473 | } | ||
1474 | perdatum = hashtab_search(perms->table, def_perm); | ||
1475 | if (perdatum == NULL) { | ||
1476 | printk(KERN_ERR | ||
1477 | "SELinux: permission %s in class %s not found in policy, bad policy\n", | ||
1478 | def_perm, pol_class); | ||
1479 | return -EINVAL; | ||
1480 | } | ||
1481 | pol_val = 1 << (perdatum->value - 1); | ||
1482 | if (pol_val != perm_val) { | ||
1483 | printk(KERN_ERR | ||
1484 | "SELinux: permission %s in class %s has incorrect value\n", | ||
1485 | def_perm, pol_class); | ||
1486 | return -EINVAL; | ||
1487 | } | ||
1488 | } | ||
1489 | for (i = 0; i < kdefs->av_inherit_len; i++) { | ||
1490 | class_val = kdefs->av_inherit[i].tclass; | ||
1491 | if (class_val > p->p_classes.nprim) | ||
1492 | continue; | ||
1493 | pol_class = p->p_class_val_to_name[class_val-1]; | ||
1494 | cladatum = hashtab_search(p->p_classes.table, pol_class); | ||
1495 | BUG_ON(!cladatum); | ||
1496 | if (!cladatum->comdatum) { | ||
1497 | printk(KERN_ERR | ||
1498 | "SELinux: class %s should have an inherits clause but does not\n", | ||
1499 | pol_class); | ||
1500 | return -EINVAL; | ||
1501 | } | ||
1502 | tmp = kdefs->av_inherit[i].common_base; | ||
1503 | common_pts_len = 0; | ||
1504 | while (!(tmp & 0x01)) { | ||
1505 | common_pts_len++; | ||
1506 | tmp >>= 1; | ||
1507 | } | ||
1508 | perms = &cladatum->comdatum->permissions; | ||
1509 | for (j = 0; j < common_pts_len; j++) { | ||
1510 | def_perm = kdefs->av_inherit[i].common_pts[j]; | ||
1511 | if (j >= perms->nprim) { | ||
1512 | printk(KERN_INFO | ||
1513 | "SELinux: permission %s in class %s not defined in policy\n", | ||
1514 | def_perm, pol_class); | ||
1515 | if (p->reject_unknown) | ||
1516 | return -EINVAL; | ||
1517 | if (p->allow_unknown) | ||
1518 | p->undefined_perms[class_val-1] |= (1 << j); | ||
1519 | print_unknown_handle = 1; | ||
1520 | continue; | ||
1521 | } | ||
1522 | perdatum = hashtab_search(perms->table, def_perm); | ||
1523 | if (perdatum == NULL) { | ||
1524 | printk(KERN_ERR | ||
1525 | "SELinux: permission %s in class %s not found in policy, bad policy\n", | ||
1526 | def_perm, pol_class); | ||
1527 | return -EINVAL; | ||
1528 | } | ||
1529 | if (perdatum->value != j + 1) { | ||
1530 | printk(KERN_ERR | ||
1531 | "SELinux: permission %s in class %s has incorrect value\n", | ||
1532 | def_perm, pol_class); | ||
1533 | return -EINVAL; | ||
1534 | } | ||
1535 | } | ||
1536 | } | ||
1537 | if (print_unknown_handle) | ||
1538 | printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n", | ||
1539 | (security_get_allow_unknown() ? "allowed" : "denied")); | ||
1540 | return 0; | ||
1541 | } | 1559 | } |
1542 | 1560 | ||
1543 | /* Clone the SID into the new SID table. */ | 1561 | /* Clone the SID into the new SID table. */ |
@@ -1710,8 +1728,10 @@ int security_load_policy(void *data, size_t len) | |||
1710 | { | 1728 | { |
1711 | struct policydb oldpolicydb, newpolicydb; | 1729 | struct policydb oldpolicydb, newpolicydb; |
1712 | struct sidtab oldsidtab, newsidtab; | 1730 | struct sidtab oldsidtab, newsidtab; |
1731 | struct selinux_mapping *oldmap, *map = NULL; | ||
1713 | struct convert_context_args args; | 1732 | struct convert_context_args args; |
1714 | u32 seqno; | 1733 | u32 seqno; |
1734 | u16 map_size; | ||
1715 | int rc = 0; | 1735 | int rc = 0; |
1716 | struct policy_file file = { data, len }, *fp = &file; | 1736 | struct policy_file file = { data, len }, *fp = &file; |
1717 | 1737 | ||
@@ -1721,22 +1741,19 @@ int security_load_policy(void *data, size_t len) | |||
1721 | avtab_cache_destroy(); | 1741 | avtab_cache_destroy(); |
1722 | return -EINVAL; | 1742 | return -EINVAL; |
1723 | } | 1743 | } |
1724 | if (policydb_load_isids(&policydb, &sidtab)) { | 1744 | if (selinux_set_mapping(&policydb, secclass_map, |
1745 | ¤t_mapping, | ||
1746 | ¤t_mapping_size)) { | ||
1725 | policydb_destroy(&policydb); | 1747 | policydb_destroy(&policydb); |
1726 | avtab_cache_destroy(); | 1748 | avtab_cache_destroy(); |
1727 | return -EINVAL; | 1749 | return -EINVAL; |
1728 | } | 1750 | } |
1729 | /* Verify that the kernel defined classes are correct. */ | 1751 | if (policydb_load_isids(&policydb, &sidtab)) { |
1730 | if (validate_classes(&policydb)) { | ||
1731 | printk(KERN_ERR | ||
1732 | "SELinux: the definition of a class is incorrect\n"); | ||
1733 | sidtab_destroy(&sidtab); | ||
1734 | policydb_destroy(&policydb); | 1752 | policydb_destroy(&policydb); |
1735 | avtab_cache_destroy(); | 1753 | avtab_cache_destroy(); |
1736 | return -EINVAL; | 1754 | return -EINVAL; |
1737 | } | 1755 | } |
1738 | security_load_policycaps(); | 1756 | security_load_policycaps(); |
1739 | policydb_loaded_version = policydb.policyvers; | ||
1740 | ss_initialized = 1; | 1757 | ss_initialized = 1; |
1741 | seqno = ++latest_granting; | 1758 | seqno = ++latest_granting; |
1742 | selinux_complete_init(); | 1759 | selinux_complete_init(); |
@@ -1759,13 +1776,9 @@ int security_load_policy(void *data, size_t len) | |||
1759 | return -ENOMEM; | 1776 | return -ENOMEM; |
1760 | } | 1777 | } |
1761 | 1778 | ||
1762 | /* Verify that the kernel defined classes are correct. */ | 1779 | if (selinux_set_mapping(&newpolicydb, secclass_map, |
1763 | if (validate_classes(&newpolicydb)) { | 1780 | &map, &map_size)) |
1764 | printk(KERN_ERR | ||
1765 | "SELinux: the definition of a class is incorrect\n"); | ||
1766 | rc = -EINVAL; | ||
1767 | goto err; | 1781 | goto err; |
1768 | } | ||
1769 | 1782 | ||
1770 | rc = security_preserve_bools(&newpolicydb); | 1783 | rc = security_preserve_bools(&newpolicydb); |
1771 | if (rc) { | 1784 | if (rc) { |
@@ -1799,13 +1812,16 @@ int security_load_policy(void *data, size_t len) | |||
1799 | memcpy(&policydb, &newpolicydb, sizeof policydb); | 1812 | memcpy(&policydb, &newpolicydb, sizeof policydb); |
1800 | sidtab_set(&sidtab, &newsidtab); | 1813 | sidtab_set(&sidtab, &newsidtab); |
1801 | security_load_policycaps(); | 1814 | security_load_policycaps(); |
1815 | oldmap = current_mapping; | ||
1816 | current_mapping = map; | ||
1817 | current_mapping_size = map_size; | ||
1802 | seqno = ++latest_granting; | 1818 | seqno = ++latest_granting; |
1803 | policydb_loaded_version = policydb.policyvers; | ||
1804 | write_unlock_irq(&policy_rwlock); | 1819 | write_unlock_irq(&policy_rwlock); |
1805 | 1820 | ||
1806 | /* Free the old policydb and SID table. */ | 1821 | /* Free the old policydb and SID table. */ |
1807 | policydb_destroy(&oldpolicydb); | 1822 | policydb_destroy(&oldpolicydb); |
1808 | sidtab_destroy(&oldsidtab); | 1823 | sidtab_destroy(&oldsidtab); |
1824 | kfree(oldmap); | ||
1809 | 1825 | ||
1810 | avc_ss_reset(seqno); | 1826 | avc_ss_reset(seqno); |
1811 | selnl_notify_policyload(seqno); | 1827 | selnl_notify_policyload(seqno); |
@@ -1815,6 +1831,7 @@ int security_load_policy(void *data, size_t len) | |||
1815 | return 0; | 1831 | return 0; |
1816 | 1832 | ||
1817 | err: | 1833 | err: |
1834 | kfree(map); | ||
1818 | sidtab_destroy(&newsidtab); | 1835 | sidtab_destroy(&newsidtab); |
1819 | policydb_destroy(&newpolicydb); | 1836 | policydb_destroy(&newpolicydb); |
1820 | return rc; | 1837 | return rc; |
@@ -2091,7 +2108,7 @@ out_unlock: | |||
2091 | } | 2108 | } |
2092 | for (i = 0, j = 0; i < mynel; i++) { | 2109 | for (i = 0, j = 0; i < mynel; i++) { |
2093 | rc = avc_has_perm_noaudit(fromsid, mysids[i], | 2110 | rc = avc_has_perm_noaudit(fromsid, mysids[i], |
2094 | SECCLASS_PROCESS, | 2111 | SECCLASS_PROCESS, /* kernel value */ |
2095 | PROCESS__TRANSITION, AVC_STRICT, | 2112 | PROCESS__TRANSITION, AVC_STRICT, |
2096 | NULL); | 2113 | NULL); |
2097 | if (!rc) | 2114 | if (!rc) |
@@ -2119,10 +2136,11 @@ out: | |||
2119 | */ | 2136 | */ |
2120 | int security_genfs_sid(const char *fstype, | 2137 | int security_genfs_sid(const char *fstype, |
2121 | char *path, | 2138 | char *path, |
2122 | u16 sclass, | 2139 | u16 orig_sclass, |
2123 | u32 *sid) | 2140 | u32 *sid) |
2124 | { | 2141 | { |
2125 | int len; | 2142 | int len; |
2143 | u16 sclass; | ||
2126 | struct genfs *genfs; | 2144 | struct genfs *genfs; |
2127 | struct ocontext *c; | 2145 | struct ocontext *c; |
2128 | int rc = 0, cmp = 0; | 2146 | int rc = 0, cmp = 0; |
@@ -2132,6 +2150,8 @@ int security_genfs_sid(const char *fstype, | |||
2132 | 2150 | ||
2133 | read_lock(&policy_rwlock); | 2151 | read_lock(&policy_rwlock); |
2134 | 2152 | ||
2153 | sclass = unmap_class(orig_sclass); | ||
2154 | |||
2135 | for (genfs = policydb.genfs; genfs; genfs = genfs->next) { | 2155 | for (genfs = policydb.genfs; genfs; genfs = genfs->next) { |
2136 | cmp = strcmp(fstype, genfs->fstype); | 2156 | cmp = strcmp(fstype, genfs->fstype); |
2137 | if (cmp <= 0) | 2157 | if (cmp <= 0) |
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 3c8bd8ee0b95..e0d0354008b7 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c | |||
@@ -187,6 +187,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type, | |||
187 | const s8 pattern_type, const s8 end_type, | 187 | const s8 pattern_type, const s8 end_type, |
188 | const char *function) | 188 | const char *function) |
189 | { | 189 | { |
190 | const char *const start = filename; | ||
191 | bool in_repetition = false; | ||
190 | bool contains_pattern = false; | 192 | bool contains_pattern = false; |
191 | unsigned char c; | 193 | unsigned char c; |
192 | unsigned char d; | 194 | unsigned char d; |
@@ -212,9 +214,13 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type, | |||
212 | if (c == '/') | 214 | if (c == '/') |
213 | goto out; | 215 | goto out; |
214 | } | 216 | } |
215 | while ((c = *filename++) != '\0') { | 217 | while (1) { |
218 | c = *filename++; | ||
219 | if (!c) | ||
220 | break; | ||
216 | if (c == '\\') { | 221 | if (c == '\\') { |
217 | switch ((c = *filename++)) { | 222 | c = *filename++; |
223 | switch (c) { | ||
218 | case '\\': /* "\\" */ | 224 | case '\\': /* "\\" */ |
219 | continue; | 225 | continue; |
220 | case '$': /* "\$" */ | 226 | case '$': /* "\$" */ |
@@ -231,6 +237,22 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type, | |||
231 | break; /* Must not contain pattern */ | 237 | break; /* Must not contain pattern */ |
232 | contains_pattern = true; | 238 | contains_pattern = true; |
233 | continue; | 239 | continue; |
240 | case '{': /* "/\{" */ | ||
241 | if (filename - 3 < start || | ||
242 | *(filename - 3) != '/') | ||
243 | break; | ||
244 | if (pattern_type == -1) | ||
245 | break; /* Must not contain pattern */ | ||
246 | contains_pattern = true; | ||
247 | in_repetition = true; | ||
248 | continue; | ||
249 | case '}': /* "\}/" */ | ||
250 | if (*filename != '/') | ||
251 | break; | ||
252 | if (!in_repetition) | ||
253 | break; | ||
254 | in_repetition = false; | ||
255 | continue; | ||
234 | case '0': /* "\ooo" */ | 256 | case '0': /* "\ooo" */ |
235 | case '1': | 257 | case '1': |
236 | case '2': | 258 | case '2': |
@@ -246,6 +268,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type, | |||
246 | continue; /* pattern is not \000 */ | 268 | continue; /* pattern is not \000 */ |
247 | } | 269 | } |
248 | goto out; | 270 | goto out; |
271 | } else if (in_repetition && c == '/') { | ||
272 | goto out; | ||
249 | } else if (tomoyo_is_invalid(c)) { | 273 | } else if (tomoyo_is_invalid(c)) { |
250 | goto out; | 274 | goto out; |
251 | } | 275 | } |
@@ -254,6 +278,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type, | |||
254 | if (!contains_pattern) | 278 | if (!contains_pattern) |
255 | goto out; | 279 | goto out; |
256 | } | 280 | } |
281 | if (in_repetition) | ||
282 | goto out; | ||
257 | return true; | 283 | return true; |
258 | out: | 284 | out: |
259 | printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function, | 285 | printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function, |
@@ -360,33 +386,6 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname) | |||
360 | } | 386 | } |
361 | 387 | ||
362 | /** | 388 | /** |
363 | * tomoyo_path_depth - Evaluate the number of '/' in a string. | ||
364 | * | ||
365 | * @pathname: The string to evaluate. | ||
366 | * | ||
367 | * Returns path depth of the string. | ||
368 | * | ||
369 | * I score 2 for each of the '/' in the @pathname | ||
370 | * and score 1 if the @pathname ends with '/'. | ||
371 | */ | ||
372 | static int tomoyo_path_depth(const char *pathname) | ||
373 | { | ||
374 | int i = 0; | ||
375 | |||
376 | if (pathname) { | ||
377 | const char *ep = pathname + strlen(pathname); | ||
378 | if (pathname < ep--) { | ||
379 | if (*ep != '/') | ||
380 | i++; | ||
381 | while (pathname <= ep) | ||
382 | if (*ep-- == '/') | ||
383 | i += 2; | ||
384 | } | ||
385 | } | ||
386 | return i; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token. | 389 | * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token. |
391 | * | 390 | * |
392 | * @filename: The string to evaluate. | 391 | * @filename: The string to evaluate. |
@@ -444,11 +443,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr) | |||
444 | ptr->is_dir = len && (name[len - 1] == '/'); | 443 | ptr->is_dir = len && (name[len - 1] == '/'); |
445 | ptr->is_patterned = (ptr->const_len < len); | 444 | ptr->is_patterned = (ptr->const_len < len); |
446 | ptr->hash = full_name_hash(name, len); | 445 | ptr->hash = full_name_hash(name, len); |
447 | ptr->depth = tomoyo_path_depth(name); | ||
448 | } | 446 | } |
449 | 447 | ||
450 | /** | 448 | /** |
451 | * tomoyo_file_matches_to_pattern2 - Pattern matching without '/' character | 449 | * tomoyo_file_matches_pattern2 - Pattern matching without '/' character |
452 | * and "\-" pattern. | 450 | * and "\-" pattern. |
453 | * | 451 | * |
454 | * @filename: The start of string to check. | 452 | * @filename: The start of string to check. |
@@ -458,10 +456,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr) | |||
458 | * | 456 | * |
459 | * Returns true if @filename matches @pattern, false otherwise. | 457 | * Returns true if @filename matches @pattern, false otherwise. |
460 | */ | 458 | */ |
461 | static bool tomoyo_file_matches_to_pattern2(const char *filename, | 459 | static bool tomoyo_file_matches_pattern2(const char *filename, |
462 | const char *filename_end, | 460 | const char *filename_end, |
463 | const char *pattern, | 461 | const char *pattern, |
464 | const char *pattern_end) | 462 | const char *pattern_end) |
465 | { | 463 | { |
466 | while (filename < filename_end && pattern < pattern_end) { | 464 | while (filename < filename_end && pattern < pattern_end) { |
467 | char c; | 465 | char c; |
@@ -519,7 +517,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename, | |||
519 | case '*': | 517 | case '*': |
520 | case '@': | 518 | case '@': |
521 | for (i = 0; i <= filename_end - filename; i++) { | 519 | for (i = 0; i <= filename_end - filename; i++) { |
522 | if (tomoyo_file_matches_to_pattern2( | 520 | if (tomoyo_file_matches_pattern2( |
523 | filename + i, filename_end, | 521 | filename + i, filename_end, |
524 | pattern + 1, pattern_end)) | 522 | pattern + 1, pattern_end)) |
525 | return true; | 523 | return true; |
@@ -550,7 +548,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename, | |||
550 | j++; | 548 | j++; |
551 | } | 549 | } |
552 | for (i = 1; i <= j; i++) { | 550 | for (i = 1; i <= j; i++) { |
553 | if (tomoyo_file_matches_to_pattern2( | 551 | if (tomoyo_file_matches_pattern2( |
554 | filename + i, filename_end, | 552 | filename + i, filename_end, |
555 | pattern + 1, pattern_end)) | 553 | pattern + 1, pattern_end)) |
556 | return true; | 554 | return true; |
@@ -567,7 +565,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename, | |||
567 | } | 565 | } |
568 | 566 | ||
569 | /** | 567 | /** |
570 | * tomoyo_file_matches_to_pattern - Pattern matching without without '/' character. | 568 | * tomoyo_file_matches_pattern - Pattern matching without without '/' character. |
571 | * | 569 | * |
572 | * @filename: The start of string to check. | 570 | * @filename: The start of string to check. |
573 | * @filename_end: The end of string to check. | 571 | * @filename_end: The end of string to check. |
@@ -576,7 +574,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename, | |||
576 | * | 574 | * |
577 | * Returns true if @filename matches @pattern, false otherwise. | 575 | * Returns true if @filename matches @pattern, false otherwise. |
578 | */ | 576 | */ |
579 | static bool tomoyo_file_matches_to_pattern(const char *filename, | 577 | static bool tomoyo_file_matches_pattern(const char *filename, |
580 | const char *filename_end, | 578 | const char *filename_end, |
581 | const char *pattern, | 579 | const char *pattern, |
582 | const char *pattern_end) | 580 | const char *pattern_end) |
@@ -589,10 +587,10 @@ static bool tomoyo_file_matches_to_pattern(const char *filename, | |||
589 | /* Split at "\-" pattern. */ | 587 | /* Split at "\-" pattern. */ |
590 | if (*pattern++ != '\\' || *pattern++ != '-') | 588 | if (*pattern++ != '\\' || *pattern++ != '-') |
591 | continue; | 589 | continue; |
592 | result = tomoyo_file_matches_to_pattern2(filename, | 590 | result = tomoyo_file_matches_pattern2(filename, |
593 | filename_end, | 591 | filename_end, |
594 | pattern_start, | 592 | pattern_start, |
595 | pattern - 2); | 593 | pattern - 2); |
596 | if (first) | 594 | if (first) |
597 | result = !result; | 595 | result = !result; |
598 | if (result) | 596 | if (result) |
@@ -600,13 +598,79 @@ static bool tomoyo_file_matches_to_pattern(const char *filename, | |||
600 | first = false; | 598 | first = false; |
601 | pattern_start = pattern; | 599 | pattern_start = pattern; |
602 | } | 600 | } |
603 | result = tomoyo_file_matches_to_pattern2(filename, filename_end, | 601 | result = tomoyo_file_matches_pattern2(filename, filename_end, |
604 | pattern_start, pattern_end); | 602 | pattern_start, pattern_end); |
605 | return first ? result : !result; | 603 | return first ? result : !result; |
606 | } | 604 | } |
607 | 605 | ||
608 | /** | 606 | /** |
607 | * tomoyo_path_matches_pattern2 - Do pathname pattern matching. | ||
608 | * | ||
609 | * @f: The start of string to check. | ||
610 | * @p: The start of pattern to compare. | ||
611 | * | ||
612 | * Returns true if @f matches @p, false otherwise. | ||
613 | */ | ||
614 | static bool tomoyo_path_matches_pattern2(const char *f, const char *p) | ||
615 | { | ||
616 | const char *f_delimiter; | ||
617 | const char *p_delimiter; | ||
618 | |||
619 | while (*f && *p) { | ||
620 | f_delimiter = strchr(f, '/'); | ||
621 | if (!f_delimiter) | ||
622 | f_delimiter = f + strlen(f); | ||
623 | p_delimiter = strchr(p, '/'); | ||
624 | if (!p_delimiter) | ||
625 | p_delimiter = p + strlen(p); | ||
626 | if (*p == '\\' && *(p + 1) == '{') | ||
627 | goto recursive; | ||
628 | if (!tomoyo_file_matches_pattern(f, f_delimiter, p, | ||
629 | p_delimiter)) | ||
630 | return false; | ||
631 | f = f_delimiter; | ||
632 | if (*f) | ||
633 | f++; | ||
634 | p = p_delimiter; | ||
635 | if (*p) | ||
636 | p++; | ||
637 | } | ||
638 | /* Ignore trailing "\*" and "\@" in @pattern. */ | ||
639 | while (*p == '\\' && | ||
640 | (*(p + 1) == '*' || *(p + 1) == '@')) | ||
641 | p += 2; | ||
642 | return !*f && !*p; | ||
643 | recursive: | ||
644 | /* | ||
645 | * The "\{" pattern is permitted only after '/' character. | ||
646 | * This guarantees that below "*(p - 1)" is safe. | ||
647 | * Also, the "\}" pattern is permitted only before '/' character | ||
648 | * so that "\{" + "\}" pair will not break the "\-" operator. | ||
649 | */ | ||
650 | if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' || | ||
651 | *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\') | ||
652 | return false; /* Bad pattern. */ | ||
653 | do { | ||
654 | /* Compare current component with pattern. */ | ||
655 | if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2, | ||
656 | p_delimiter - 2)) | ||
657 | break; | ||
658 | /* Proceed to next component. */ | ||
659 | f = f_delimiter; | ||
660 | if (!*f) | ||
661 | break; | ||
662 | f++; | ||
663 | /* Continue comparison. */ | ||
664 | if (tomoyo_path_matches_pattern2(f, p_delimiter + 1)) | ||
665 | return true; | ||
666 | f_delimiter = strchr(f, '/'); | ||
667 | } while (f_delimiter); | ||
668 | return false; /* Not matched. */ | ||
669 | } | ||
670 | |||
671 | /** | ||
609 | * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern. | 672 | * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern. |
673 | * | ||
610 | * @filename: The filename to check. | 674 | * @filename: The filename to check. |
611 | * @pattern: The pattern to compare. | 675 | * @pattern: The pattern to compare. |
612 | * | 676 | * |
@@ -615,24 +679,24 @@ static bool tomoyo_file_matches_to_pattern(const char *filename, | |||
615 | * The following patterns are available. | 679 | * The following patterns are available. |
616 | * \\ \ itself. | 680 | * \\ \ itself. |
617 | * \ooo Octal representation of a byte. | 681 | * \ooo Octal representation of a byte. |
618 | * \* More than or equals to 0 character other than '/'. | 682 | * \* Zero or more repetitions of characters other than '/'. |
619 | * \@ More than or equals to 0 character other than '/' or '.'. | 683 | * \@ Zero or more repetitions of characters other than '/' or '.'. |
620 | * \? 1 byte character other than '/'. | 684 | * \? 1 byte character other than '/'. |
621 | * \$ More than or equals to 1 decimal digit. | 685 | * \$ One or more repetitions of decimal digits. |
622 | * \+ 1 decimal digit. | 686 | * \+ 1 decimal digit. |
623 | * \X More than or equals to 1 hexadecimal digit. | 687 | * \X One or more repetitions of hexadecimal digits. |
624 | * \x 1 hexadecimal digit. | 688 | * \x 1 hexadecimal digit. |
625 | * \A More than or equals to 1 alphabet character. | 689 | * \A One or more repetitions of alphabet characters. |
626 | * \a 1 alphabet character. | 690 | * \a 1 alphabet character. |
691 | * | ||
627 | * \- Subtraction operator. | 692 | * \- Subtraction operator. |
693 | * | ||
694 | * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/ | ||
695 | * /dir/dir/dir/ ). | ||
628 | */ | 696 | */ |
629 | bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, | 697 | bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, |
630 | const struct tomoyo_path_info *pattern) | 698 | const struct tomoyo_path_info *pattern) |
631 | { | 699 | { |
632 | /* | ||
633 | if (!filename || !pattern) | ||
634 | return false; | ||
635 | */ | ||
636 | const char *f = filename->name; | 700 | const char *f = filename->name; |
637 | const char *p = pattern->name; | 701 | const char *p = pattern->name; |
638 | const int len = pattern->const_len; | 702 | const int len = pattern->const_len; |
@@ -640,37 +704,15 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, | |||
640 | /* If @pattern doesn't contain pattern, I can use strcmp(). */ | 704 | /* If @pattern doesn't contain pattern, I can use strcmp(). */ |
641 | if (!pattern->is_patterned) | 705 | if (!pattern->is_patterned) |
642 | return !tomoyo_pathcmp(filename, pattern); | 706 | return !tomoyo_pathcmp(filename, pattern); |
643 | /* Dont compare if the number of '/' differs. */ | 707 | /* Don't compare directory and non-directory. */ |
644 | if (filename->depth != pattern->depth) | 708 | if (filename->is_dir != pattern->is_dir) |
645 | return false; | 709 | return false; |
646 | /* Compare the initial length without patterns. */ | 710 | /* Compare the initial length without patterns. */ |
647 | if (strncmp(f, p, len)) | 711 | if (strncmp(f, p, len)) |
648 | return false; | 712 | return false; |
649 | f += len; | 713 | f += len; |
650 | p += len; | 714 | p += len; |
651 | /* Main loop. Compare each directory component. */ | 715 | return tomoyo_path_matches_pattern2(f, p); |
652 | while (*f && *p) { | ||
653 | const char *f_delimiter = strchr(f, '/'); | ||
654 | const char *p_delimiter = strchr(p, '/'); | ||
655 | if (!f_delimiter) | ||
656 | f_delimiter = f + strlen(f); | ||
657 | if (!p_delimiter) | ||
658 | p_delimiter = p + strlen(p); | ||
659 | if (!tomoyo_file_matches_to_pattern(f, f_delimiter, | ||
660 | p, p_delimiter)) | ||
661 | return false; | ||
662 | f = f_delimiter; | ||
663 | if (*f) | ||
664 | f++; | ||
665 | p = p_delimiter; | ||
666 | if (*p) | ||
667 | p++; | ||
668 | } | ||
669 | /* Ignore trailing "\*" and "\@" in @pattern. */ | ||
670 | while (*p == '\\' && | ||
671 | (*(p + 1) == '*' || *(p + 1) == '@')) | ||
672 | p += 2; | ||
673 | return !*f && !*p; | ||
674 | } | 716 | } |
675 | 717 | ||
676 | /** | 718 | /** |
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 31df541911f7..92169d29b2db 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h | |||
@@ -56,9 +56,6 @@ struct tomoyo_page_buffer { | |||
56 | * (5) "is_patterned" is a bool which is true if "name" contains wildcard | 56 | * (5) "is_patterned" is a bool which is true if "name" contains wildcard |
57 | * characters, false otherwise. This allows TOMOYO to use "hash" and | 57 | * characters, false otherwise. This allows TOMOYO to use "hash" and |
58 | * strcmp() for string comparison if "is_patterned" is false. | 58 | * strcmp() for string comparison if "is_patterned" is false. |
59 | * (6) "depth" is calculated using the number of "/" characters in "name". | ||
60 | * This allows TOMOYO to avoid comparing two pathnames which never match | ||
61 | * (e.g. whether "/var/www/html/index.html" matches "/tmp/sh-thd-\$"). | ||
62 | */ | 59 | */ |
63 | struct tomoyo_path_info { | 60 | struct tomoyo_path_info { |
64 | const char *name; | 61 | const char *name; |
@@ -66,7 +63,6 @@ struct tomoyo_path_info { | |||
66 | u16 const_len; /* = tomoyo_const_part_length(name) */ | 63 | u16 const_len; /* = tomoyo_const_part_length(name) */ |
67 | bool is_dir; /* = tomoyo_strendswith(name, "/") */ | 64 | bool is_dir; /* = tomoyo_strendswith(name, "/") */ |
68 | bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ | 65 | bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ |
69 | u16 depth; /* = tomoyo_path_depth(name) */ | ||
70 | }; | 66 | }; |
71 | 67 | ||
72 | /* | 68 | /* |
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index 5f2e33263371..917f564cdab1 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/mount.h> | 13 | #include <linux/mount.h> |
14 | #include <linux/mnt_namespace.h> | 14 | #include <linux/mnt_namespace.h> |
15 | #include <linux/fs_struct.h> | 15 | #include <linux/fs_struct.h> |
16 | #include <linux/hash.h> | ||
17 | |||
16 | #include "common.h" | 18 | #include "common.h" |
17 | #include "realpath.h" | 19 | #include "realpath.h" |
18 | 20 | ||
@@ -263,7 +265,8 @@ static unsigned int tomoyo_quota_for_savename; | |||
263 | * table. Frequency of appending strings is very low. So we don't need | 265 | * table. Frequency of appending strings is very low. So we don't need |
264 | * large (e.g. 64k) hash size. 256 will be sufficient. | 266 | * large (e.g. 64k) hash size. 256 will be sufficient. |
265 | */ | 267 | */ |
266 | #define TOMOYO_MAX_HASH 256 | 268 | #define TOMOYO_HASH_BITS 8 |
269 | #define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS) | ||
267 | 270 | ||
268 | /* | 271 | /* |
269 | * tomoyo_name_entry is a structure which is used for linking | 272 | * tomoyo_name_entry is a structure which is used for linking |
@@ -315,6 +318,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name) | |||
315 | struct tomoyo_free_memory_block_list *fmb; | 318 | struct tomoyo_free_memory_block_list *fmb; |
316 | int len; | 319 | int len; |
317 | char *cp; | 320 | char *cp; |
321 | struct list_head *head; | ||
318 | 322 | ||
319 | if (!name) | 323 | if (!name) |
320 | return NULL; | 324 | return NULL; |
@@ -325,9 +329,10 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name) | |||
325 | return NULL; | 329 | return NULL; |
326 | } | 330 | } |
327 | hash = full_name_hash((const unsigned char *) name, len - 1); | 331 | hash = full_name_hash((const unsigned char *) name, len - 1); |
332 | head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; | ||
333 | |||
328 | mutex_lock(&lock); | 334 | mutex_lock(&lock); |
329 | list_for_each_entry(ptr, &tomoyo_name_list[hash % TOMOYO_MAX_HASH], | 335 | list_for_each_entry(ptr, head, list) { |
330 | list) { | ||
331 | if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) | 336 | if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) |
332 | goto out; | 337 | goto out; |
333 | } | 338 | } |
@@ -365,7 +370,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name) | |||
365 | tomoyo_fill_path_info(&ptr->entry); | 370 | tomoyo_fill_path_info(&ptr->entry); |
366 | fmb->ptr += len; | 371 | fmb->ptr += len; |
367 | fmb->len -= len; | 372 | fmb->len -= len; |
368 | list_add_tail(&ptr->list, &tomoyo_name_list[hash % TOMOYO_MAX_HASH]); | 373 | list_add_tail(&ptr->list, head); |
369 | if (fmb->len == 0) { | 374 | if (fmb->len == 0) { |
370 | list_del(&fmb->list); | 375 | list_del(&fmb->list); |
371 | kfree(fmb); | 376 | kfree(fmb); |
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c index 64b859925c0b..7717e01fc071 100644 --- a/sound/pcmcia/pdaudiocf/pdaudiocf.c +++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c | |||
@@ -131,7 +131,7 @@ static int snd_pdacf_probe(struct pcmcia_device *link) | |||
131 | return err; | 131 | return err; |
132 | } | 132 | } |
133 | 133 | ||
134 | snd_card_set_dev(card, &handle_to_dev(link)); | 134 | snd_card_set_dev(card, &link->dev); |
135 | 135 | ||
136 | pdacf->index = i; | 136 | pdacf->index = i; |
137 | card_list[i] = card; | 137 | card_list[i] = card; |
@@ -142,12 +142,10 @@ static int snd_pdacf_probe(struct pcmcia_device *link) | |||
142 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 142 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
143 | link->io.NumPorts1 = 16; | 143 | link->io.NumPorts1 = 16; |
144 | 144 | ||
145 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT | IRQ_FORCED_PULSE; | 145 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE; |
146 | // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 146 | // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
147 | 147 | ||
148 | link->irq.IRQInfo1 = 0 /* | IRQ_LEVEL_ID */; | ||
149 | link->irq.Handler = pdacf_interrupt; | 148 | link->irq.Handler = pdacf_interrupt; |
150 | link->irq.Instance = pdacf; | ||
151 | link->conf.Attributes = CONF_ENABLE_IRQ; | 149 | link->conf.Attributes = CONF_ENABLE_IRQ; |
152 | link->conf.IntType = INT_MEMORY_AND_IO; | 150 | link->conf.IntType = INT_MEMORY_AND_IO; |
153 | link->conf.ConfigIndex = 1; | 151 | link->conf.ConfigIndex = 1; |
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c index 1492744ad67f..7be3b3357045 100644 --- a/sound/pcmcia/vx/vxpocket.c +++ b/sound/pcmcia/vx/vxpocket.c | |||
@@ -161,11 +161,9 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl, | |||
161 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 161 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
162 | link->io.NumPorts1 = 16; | 162 | link->io.NumPorts1 = 16; |
163 | 163 | ||
164 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 164 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; |
165 | 165 | ||
166 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | ||
167 | link->irq.Handler = &snd_vx_irq_handler; | 166 | link->irq.Handler = &snd_vx_irq_handler; |
168 | link->irq.Instance = chip; | ||
169 | 167 | ||
170 | link->conf.Attributes = CONF_ENABLE_IRQ; | 168 | link->conf.Attributes = CONF_ENABLE_IRQ; |
171 | link->conf.IntType = INT_MEMORY_AND_IO; | 169 | link->conf.IntType = INT_MEMORY_AND_IO; |
@@ -244,7 +242,7 @@ static int vxpocket_config(struct pcmcia_device *link) | |||
244 | if (ret) | 242 | if (ret) |
245 | goto failed; | 243 | goto failed; |
246 | 244 | ||
247 | chip->dev = &handle_to_dev(link); | 245 | chip->dev = &link->dev; |
248 | snd_card_set_dev(chip->card, chip->dev); | 246 | snd_card_set_dev(chip->card, chip->dev); |
249 | 247 | ||
250 | if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) | 248 | if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) |
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 0854f110bf7f..fe08660ce0bd 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore | |||
@@ -12,6 +12,7 @@ perf*.1 | |||
12 | perf*.xml | 12 | perf*.xml |
13 | perf*.html | 13 | perf*.html |
14 | common-cmds.h | 14 | common-cmds.h |
15 | perf.data | ||
15 | tags | 16 | tags |
16 | TAGS | 17 | TAGS |
17 | cscope* | 18 | cscope* |
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt new file mode 100644 index 000000000000..ae525ac5a2ce --- /dev/null +++ b/tools/perf/Documentation/perf-bench.txt | |||
@@ -0,0 +1,120 @@ | |||
1 | perf-bench(1) | ||
2 | ============ | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-bench - General framework for benchmark suites | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf bench' [<common options>] <subsystem> <suite> [<options>] | ||
12 | |||
13 | DESCRIPTION | ||
14 | ----------- | ||
15 | This 'perf bench' command is general framework for benchmark suites. | ||
16 | |||
17 | COMMON OPTIONS | ||
18 | -------------- | ||
19 | -f:: | ||
20 | --format=:: | ||
21 | Specify format style. | ||
22 | Current available format styles are, | ||
23 | |||
24 | 'default':: | ||
25 | Default style. This is mainly for human reading. | ||
26 | --------------------- | ||
27 | % perf bench sched pipe # with no style specify | ||
28 | (executing 1000000 pipe operations between two tasks) | ||
29 | Total time:5.855 sec | ||
30 | 5.855061 usecs/op | ||
31 | 170792 ops/sec | ||
32 | --------------------- | ||
33 | |||
34 | 'simple':: | ||
35 | This simple style is friendly for automated | ||
36 | processing by scripts. | ||
37 | --------------------- | ||
38 | % perf bench --format=simple sched pipe # specified simple | ||
39 | 5.988 | ||
40 | --------------------- | ||
41 | |||
42 | SUBSYSTEM | ||
43 | --------- | ||
44 | |||
45 | 'sched':: | ||
46 | Scheduler and IPC mechanisms. | ||
47 | |||
48 | SUITES FOR 'sched' | ||
49 | ~~~~~~~~~~~~~~~~~~ | ||
50 | *messaging*:: | ||
51 | Suite for evaluating performance of scheduler and IPC mechanisms. | ||
52 | Based on hackbench by Rusty Russell. | ||
53 | |||
54 | Options of *pipe* | ||
55 | ^^^^^^^^^^^^^^^^^ | ||
56 | -p:: | ||
57 | --pipe:: | ||
58 | Use pipe() instead of socketpair() | ||
59 | |||
60 | -t:: | ||
61 | --thread:: | ||
62 | Be multi thread instead of multi process | ||
63 | |||
64 | -g:: | ||
65 | --group=:: | ||
66 | Specify number of groups | ||
67 | |||
68 | -l:: | ||
69 | --loop=:: | ||
70 | Specify number of loops | ||
71 | |||
72 | Example of *messaging* | ||
73 | ^^^^^^^^^^^^^^^^^^^^^^ | ||
74 | |||
75 | --------------------- | ||
76 | % perf bench sched messaging # run with default | ||
77 | options (20 sender and receiver processes per group) | ||
78 | (10 groups == 400 processes run) | ||
79 | |||
80 | Total time:0.308 sec | ||
81 | |||
82 | % perf bench sched messaging -t -g 20 # be multi-thread,with 20 groups | ||
83 | (20 sender and receiver threads per group) | ||
84 | (20 groups == 800 threads run) | ||
85 | |||
86 | Total time:0.582 sec | ||
87 | --------------------- | ||
88 | |||
89 | *pipe*:: | ||
90 | Suite for pipe() system call. | ||
91 | Based on pipe-test-1m.c by Ingo Molnar. | ||
92 | |||
93 | Options of *pipe* | ||
94 | ^^^^^^^^^^^^^^^^^ | ||
95 | -l:: | ||
96 | --loop=:: | ||
97 | Specify number of loops. | ||
98 | |||
99 | Example of *pipe* | ||
100 | ^^^^^^^^^^^^^^^^^ | ||
101 | |||
102 | --------------------- | ||
103 | % perf bench sched pipe | ||
104 | (executing 1000000 pipe operations between two tasks) | ||
105 | |||
106 | Total time:8.091 sec | ||
107 | 8.091833 usecs/op | ||
108 | 123581 ops/sec | ||
109 | |||
110 | % perf bench sched pipe -l 1000 # loop 1000 | ||
111 | (executing 1000 pipe operations between two tasks) | ||
112 | |||
113 | Total time:0.016 sec | ||
114 | 16.948000 usecs/op | ||
115 | 59004 ops/sec | ||
116 | --------------------- | ||
117 | |||
118 | SEE ALSO | ||
119 | -------- | ||
120 | linkperf:perf[1] | ||
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt new file mode 100644 index 000000000000..01b642c0bf8f --- /dev/null +++ b/tools/perf/Documentation/perf-buildid-list.txt | |||
@@ -0,0 +1,34 @@ | |||
1 | perf-buildid-list(1) | ||
2 | ==================== | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-buildid-list - List the buildids in a perf.data file | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf buildid-list <options>' | ||
12 | |||
13 | DESCRIPTION | ||
14 | ----------- | ||
15 | This command displays the buildids found in a perf.data file, so that other | ||
16 | tools can be used to fetch packages with matching symbol tables for use by | ||
17 | perf report. | ||
18 | |||
19 | OPTIONS | ||
20 | ------- | ||
21 | -i:: | ||
22 | --input=:: | ||
23 | Input file name. (default: perf.data) | ||
24 | -f:: | ||
25 | --force:: | ||
26 | Don't do ownership validation. | ||
27 | -v:: | ||
28 | --verbose:: | ||
29 | Be more verbose. | ||
30 | |||
31 | SEE ALSO | ||
32 | -------- | ||
33 | linkperf:perf-record[1], linkperf:perf-top[1], | ||
34 | linkperf:perf-report[1] | ||
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt new file mode 100644 index 000000000000..44b0ce35c28a --- /dev/null +++ b/tools/perf/Documentation/perf-kmem.txt | |||
@@ -0,0 +1,44 @@ | |||
1 | perf-kmem(1) | ||
2 | ============== | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-kmem - Tool to trace/measure kernel memory(slab) properties | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf kmem' {record} [<options>] | ||
12 | |||
13 | DESCRIPTION | ||
14 | ----------- | ||
15 | There's two variants of perf kmem: | ||
16 | |||
17 | 'perf kmem record <command>' to record the kmem events | ||
18 | of an arbitrary workload. | ||
19 | |||
20 | 'perf kmem' to report kernel memory statistics. | ||
21 | |||
22 | OPTIONS | ||
23 | ------- | ||
24 | -i <file>:: | ||
25 | --input=<file>:: | ||
26 | Select the input file (default: perf.data) | ||
27 | |||
28 | --stat=<caller|alloc>:: | ||
29 | Select per callsite or per allocation statistics | ||
30 | |||
31 | -s <key[,key2...]>:: | ||
32 | --sort=<key[,key2...]>:: | ||
33 | Sort the output (default: frag,hit,bytes) | ||
34 | |||
35 | -l <num>:: | ||
36 | --line=<num>:: | ||
37 | Print n lines only | ||
38 | |||
39 | --raw-ip:: | ||
40 | Print raw ip instead of symbol | ||
41 | |||
42 | SEE ALSO | ||
43 | -------- | ||
44 | linkperf:perf-record[1] | ||
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt new file mode 100644 index 000000000000..9270594e6dfd --- /dev/null +++ b/tools/perf/Documentation/perf-probe.txt | |||
@@ -0,0 +1,49 @@ | |||
1 | perf-probe(1) | ||
2 | ============= | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-probe - Define new dynamic tracepoints | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf probe' [options] --add 'PROBE' [--add 'PROBE' ...] | ||
12 | or | ||
13 | 'perf probe' [options] 'PROBE' ['PROBE' ...] | ||
14 | |||
15 | |||
16 | DESCRIPTION | ||
17 | ----------- | ||
18 | This command defines dynamic tracepoint events, by symbol and registers | ||
19 | without debuginfo, or by C expressions (C line numbers, C function names, | ||
20 | and C local variables) with debuginfo. | ||
21 | |||
22 | |||
23 | OPTIONS | ||
24 | ------- | ||
25 | -k:: | ||
26 | --vmlinux=PATH:: | ||
27 | Specify vmlinux path which has debuginfo (Dwarf binary). | ||
28 | |||
29 | -v:: | ||
30 | --verbose:: | ||
31 | Be more verbose (show parsed arguments, etc). | ||
32 | |||
33 | -a:: | ||
34 | --add:: | ||
35 | Define a probe point (see PROBE SYNTAX for detail) | ||
36 | |||
37 | PROBE SYNTAX | ||
38 | ------------ | ||
39 | Probe points are defined by following syntax. | ||
40 | |||
41 | "FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" | ||
42 | |||
43 | 'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. | ||
44 | It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. | ||
45 | 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). | ||
46 | |||
47 | SEE ALSO | ||
48 | -------- | ||
49 | linkperf:perf-trace[1], linkperf:perf-record[1] | ||
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 0ff23de9e453..fc46c0b40f6e 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt | |||
@@ -26,11 +26,19 @@ OPTIONS | |||
26 | 26 | ||
27 | -e:: | 27 | -e:: |
28 | --event=:: | 28 | --event=:: |
29 | Select the PMU event. Selection can be a symbolic event name | 29 | Select the PMU event. Selection can be: |
30 | (use 'perf list' to list all events) or a raw PMU | ||
31 | event (eventsel+umask) in the form of rNNN where NNN is a | ||
32 | hexadecimal event descriptor. | ||
33 | 30 | ||
31 | - a symbolic event name (use 'perf list' to list all events) | ||
32 | |||
33 | - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a | ||
34 | hexadecimal event descriptor. | ||
35 | |||
36 | - a hardware breakpoint event in the form of '\mem:addr[:access]' | ||
37 | where addr is the address in memory you want to break in. | ||
38 | Access is the memory access type (read, write, execute) it can | ||
39 | be passed as follows: '\mem:addr[:[r][w][x]]'. | ||
40 | If you want to profile read-write accesses in 0x1000, just set | ||
41 | 'mem:0x1000:rw'. | ||
34 | -a:: | 42 | -a:: |
35 | System-wide collection. | 43 | System-wide collection. |
36 | 44 | ||
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 59f0b846cd71..9dccb180b7af 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt | |||
@@ -24,11 +24,11 @@ OPTIONS | |||
24 | --dsos=:: | 24 | --dsos=:: |
25 | Only consider symbols in these dsos. CSV that understands | 25 | Only consider symbols in these dsos. CSV that understands |
26 | file://filename entries. | 26 | file://filename entries. |
27 | -n | 27 | -n:: |
28 | --show-nr-samples | 28 | --show-nr-samples:: |
29 | Show the number of samples for each symbol | 29 | Show the number of samples for each symbol |
30 | -T | 30 | -T:: |
31 | --threads | 31 | --threads:: |
32 | Show per-thread event counters | 32 | Show per-thread event counters |
33 | -C:: | 33 | -C:: |
34 | --comms=:: | 34 | --comms=:: |
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index a7910099d6fd..4b1788355eca 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt | |||
@@ -31,9 +31,12 @@ OPTIONS | |||
31 | -w:: | 31 | -w:: |
32 | --width=:: | 32 | --width=:: |
33 | Select the width of the SVG file (default: 1000) | 33 | Select the width of the SVG file (default: 1000) |
34 | -p:: | 34 | -P:: |
35 | --power-only:: | 35 | --power-only:: |
36 | Only output the CPU power section of the diagram | 36 | Only output the CPU power section of the diagram |
37 | -p:: | ||
38 | --process:: | ||
39 | Select the processes to display, by name or PID | ||
37 | 40 | ||
38 | 41 | ||
39 | SEE ALSO | 42 | SEE ALSO |
diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt new file mode 100644 index 000000000000..c5f55f439091 --- /dev/null +++ b/tools/perf/Documentation/perf-trace-perl.txt | |||
@@ -0,0 +1,219 @@ | |||
1 | perf-trace-perl(1) | ||
2 | ================== | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-trace-perl - Process trace data with a Perl script | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf trace' [-s [lang]:script[.ext] ] | ||
12 | |||
13 | DESCRIPTION | ||
14 | ----------- | ||
15 | |||
16 | This perf trace option is used to process perf trace data using perf's | ||
17 | built-in Perl interpreter. It reads and processes the input file and | ||
18 | displays the results of the trace analysis implemented in the given | ||
19 | Perl script, if any. | ||
20 | |||
21 | STARTER SCRIPTS | ||
22 | --------------- | ||
23 | |||
24 | You can avoid reading the rest of this document by running 'perf trace | ||
25 | -g perl' in the same directory as an existing perf.data trace file. | ||
26 | That will generate a starter script containing a handler for each of | ||
27 | the event types in the trace file; it simply prints every available | ||
28 | field for each event in the trace file. | ||
29 | |||
30 | You can also look at the existing scripts in | ||
31 | ~/libexec/perf-core/scripts/perl for typical examples showing how to | ||
32 | do basic things like aggregate event data, print results, etc. Also, | ||
33 | the check-perf-trace.pl script, while not interesting for its results, | ||
34 | attempts to exercise all of the main scripting features. | ||
35 | |||
36 | EVENT HANDLERS | ||
37 | -------------- | ||
38 | |||
39 | When perf trace is invoked using a trace script, a user-defined | ||
40 | 'handler function' is called for each event in the trace. If there's | ||
41 | no handler function defined for a given event type, the event is | ||
42 | ignored (or passed to a 'trace_handled' function, see below) and the | ||
43 | next event is processed. | ||
44 | |||
45 | Most of the event's field values are passed as arguments to the | ||
46 | handler function; some of the less common ones aren't - those are | ||
47 | available as calls back into the perf executable (see below). | ||
48 | |||
49 | As an example, the following perf record command can be used to record | ||
50 | all sched_wakeup events in the system: | ||
51 | |||
52 | # perf record -c 1 -f -a -M -R -e sched:sched_wakeup | ||
53 | |||
54 | Traces meant to be processed using a script should be recorded with | ||
55 | the above options: -c 1 says to sample every event, -a to enable | ||
56 | system-wide collection, -M to multiplex the output, and -R to collect | ||
57 | raw samples. | ||
58 | |||
59 | The format file for the sched_wakep event defines the following fields | ||
60 | (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format): | ||
61 | |||
62 | ---- | ||
63 | format: | ||
64 | field:unsigned short common_type; | ||
65 | field:unsigned char common_flags; | ||
66 | field:unsigned char common_preempt_count; | ||
67 | field:int common_pid; | ||
68 | field:int common_lock_depth; | ||
69 | |||
70 | field:char comm[TASK_COMM_LEN]; | ||
71 | field:pid_t pid; | ||
72 | field:int prio; | ||
73 | field:int success; | ||
74 | field:int target_cpu; | ||
75 | ---- | ||
76 | |||
77 | The handler function for this event would be defined as: | ||
78 | |||
79 | ---- | ||
80 | sub sched::sched_wakeup | ||
81 | { | ||
82 | my ($event_name, $context, $common_cpu, $common_secs, | ||
83 | $common_nsecs, $common_pid, $common_comm, | ||
84 | $comm, $pid, $prio, $success, $target_cpu) = @_; | ||
85 | } | ||
86 | ---- | ||
87 | |||
88 | The handler function takes the form subsystem::event_name. | ||
89 | |||
90 | The $common_* arguments in the handler's argument list are the set of | ||
91 | arguments passed to all event handlers; some of the fields correspond | ||
92 | to the common_* fields in the format file, but some are synthesized, | ||
93 | and some of the common_* fields aren't common enough to to be passed | ||
94 | to every event as arguments but are available as library functions. | ||
95 | |||
96 | Here's a brief description of each of the invariant event args: | ||
97 | |||
98 | $event_name the name of the event as text | ||
99 | $context an opaque 'cookie' used in calls back into perf | ||
100 | $common_cpu the cpu the event occurred on | ||
101 | $common_secs the secs portion of the event timestamp | ||
102 | $common_nsecs the nsecs portion of the event timestamp | ||
103 | $common_pid the pid of the current task | ||
104 | $common_comm the name of the current process | ||
105 | |||
106 | All of the remaining fields in the event's format file have | ||
107 | counterparts as handler function arguments of the same name, as can be | ||
108 | seen in the example above. | ||
109 | |||
110 | The above provides the basics needed to directly access every field of | ||
111 | every event in a trace, which covers 90% of what you need to know to | ||
112 | write a useful trace script. The sections below cover the rest. | ||
113 | |||
114 | SCRIPT LAYOUT | ||
115 | ------------- | ||
116 | |||
117 | Every perf trace Perl script should start by setting up a Perl module | ||
118 | search path and 'use'ing a few support modules (see module | ||
119 | descriptions below): | ||
120 | |||
121 | ---- | ||
122 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
123 | use lib "./Perf-Trace-Util/lib"; | ||
124 | use Perf::Trace::Core; | ||
125 | use Perf::Trace::Context; | ||
126 | use Perf::Trace::Util; | ||
127 | ---- | ||
128 | |||
129 | The rest of the script can contain handler functions and support | ||
130 | functions in any order. | ||
131 | |||
132 | Aside from the event handler functions discussed above, every script | ||
133 | can implement a set of optional functions: | ||
134 | |||
135 | *trace_begin*, if defined, is called before any event is processed and | ||
136 | gives scripts a chance to do setup tasks: | ||
137 | |||
138 | ---- | ||
139 | sub trace_begin | ||
140 | { | ||
141 | } | ||
142 | ---- | ||
143 | |||
144 | *trace_end*, if defined, is called after all events have been | ||
145 | processed and gives scripts a chance to do end-of-script tasks, such | ||
146 | as display results: | ||
147 | |||
148 | ---- | ||
149 | sub trace_end | ||
150 | { | ||
151 | } | ||
152 | ---- | ||
153 | |||
154 | *trace_unhandled*, if defined, is called after for any event that | ||
155 | doesn't have a handler explicitly defined for it. The standard set | ||
156 | of common arguments are passed into it: | ||
157 | |||
158 | ---- | ||
159 | sub trace_unhandled | ||
160 | { | ||
161 | my ($event_name, $context, $common_cpu, $common_secs, | ||
162 | $common_nsecs, $common_pid, $common_comm) = @_; | ||
163 | } | ||
164 | ---- | ||
165 | |||
166 | The remaining sections provide descriptions of each of the available | ||
167 | built-in perf trace Perl modules and their associated functions. | ||
168 | |||
169 | AVAILABLE MODULES AND FUNCTIONS | ||
170 | ------------------------------- | ||
171 | |||
172 | The following sections describe the functions and variables available | ||
173 | via the various Perf::Trace::* Perl modules. To use the functions and | ||
174 | variables from the given module, add the corresponding 'use | ||
175 | Perf::Trace::XXX' line to your perf trace script. | ||
176 | |||
177 | Perf::Trace::Core Module | ||
178 | ~~~~~~~~~~~~~~~~~~~~~~~~ | ||
179 | |||
180 | These functions provide some essential functions to user scripts. | ||
181 | |||
182 | The *flag_str* and *symbol_str* functions provide human-readable | ||
183 | strings for flag and symbolic fields. These correspond to the strings | ||
184 | and values parsed from the 'print fmt' fields of the event format | ||
185 | files: | ||
186 | |||
187 | flag_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the flag field $field_name of event $event_name | ||
188 | symbol_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the symbolic field $field_name of event $event_name | ||
189 | |||
190 | Perf::Trace::Context Module | ||
191 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
192 | |||
193 | Some of the 'common' fields in the event format file aren't all that | ||
194 | common, but need to be made accessible to user scripts nonetheless. | ||
195 | |||
196 | Perf::Trace::Context defines a set of functions that can be used to | ||
197 | access this data in the context of the current event. Each of these | ||
198 | functions expects a $context variable, which is the same as the | ||
199 | $context variable passed into every event handler as the second | ||
200 | argument. | ||
201 | |||
202 | common_pc($context) - returns common_preempt count for the current event | ||
203 | common_flags($context) - returns common_flags for the current event | ||
204 | common_lock_depth($context) - returns common_lock_depth for the current event | ||
205 | |||
206 | Perf::Trace::Util Module | ||
207 | ~~~~~~~~~~~~~~~~~~~~~~~~ | ||
208 | |||
209 | Various utility functions for use with perf trace: | ||
210 | |||
211 | nsecs($secs, $nsecs) - returns total nsecs given secs/nsecs pair | ||
212 | nsecs_secs($nsecs) - returns whole secs portion given nsecs | ||
213 | nsecs_nsecs($nsecs) - returns nsecs remainder given nsecs | ||
214 | nsecs_str($nsecs) - returns printable string in the form secs.nsecs | ||
215 | avg($total, $n) - returns average given a sum and a total number of values | ||
216 | |||
217 | SEE ALSO | ||
218 | -------- | ||
219 | linkperf:perf-trace[1] | ||
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 41ed75398ca9..07065efa60e0 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt | |||
@@ -20,6 +20,15 @@ OPTIONS | |||
20 | --dump-raw-trace=:: | 20 | --dump-raw-trace=:: |
21 | Display verbose dump of the trace data. | 21 | Display verbose dump of the trace data. |
22 | 22 | ||
23 | -s:: | ||
24 | --script=:: | ||
25 | Process trace data with the given script ([lang]:script[.ext]). | ||
26 | |||
27 | -g:: | ||
28 | --gen-script=:: | ||
29 | Generate perf-trace.[ext] starter script for given language, | ||
30 | using current perf.data. | ||
31 | |||
23 | SEE ALSO | 32 | SEE ALSO |
24 | -------- | 33 | -------- |
25 | linkperf:perf-record[1] | 34 | linkperf:perf-record[1], linkperf:perf-trace-perl[1] |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 7e190d522cd5..23ec66098bdc 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -2,6 +2,7 @@ | |||
2 | all:: | 2 | all:: |
3 | 3 | ||
4 | # Define V=1 to have a more verbose compile. | 4 | # Define V=1 to have a more verbose compile. |
5 | # Define V=2 to have an even more verbose compile. | ||
5 | # | 6 | # |
6 | # Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() | 7 | # Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() |
7 | # or vsnprintf() return -1 instead of number of characters which would | 8 | # or vsnprintf() return -1 instead of number of characters which would |
@@ -145,6 +146,10 @@ all:: | |||
145 | # Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call | 146 | # Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call |
146 | # your external grep (e.g., if your system lacks grep, if its grep is | 147 | # your external grep (e.g., if your system lacks grep, if its grep is |
147 | # broken, or spawning external process is slower than built-in grep perf has). | 148 | # broken, or spawning external process is slower than built-in grep perf has). |
149 | # | ||
150 | # Define LDFLAGS=-static to build a static binary. | ||
151 | # | ||
152 | # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. | ||
148 | 153 | ||
149 | PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 154 | PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
150 | @$(SHELL_PATH) util/PERF-VERSION-GEN | 155 | @$(SHELL_PATH) util/PERF-VERSION-GEN |
@@ -157,20 +162,6 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') | |||
157 | uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') | 162 | uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') |
158 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') | 163 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') |
159 | 164 | ||
160 | # | ||
161 | # Add -m32 for cross-builds: | ||
162 | # | ||
163 | ifdef NO_64BIT | ||
164 | MBITS := -m32 | ||
165 | else | ||
166 | # | ||
167 | # If we're on a 64-bit kernel, use -m64: | ||
168 | # | ||
169 | ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) | ||
170 | MBITS := -m64 | ||
171 | endif | ||
172 | endif | ||
173 | |||
174 | # CFLAGS and LDFLAGS are for the users to override from the command line. | 165 | # CFLAGS and LDFLAGS are for the users to override from the command line. |
175 | 166 | ||
176 | # | 167 | # |
@@ -200,8 +191,15 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition | |||
200 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes | 191 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes |
201 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement | 192 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement |
202 | 193 | ||
203 | CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) | 194 | ifeq ("$(origin DEBUG)", "command line") |
204 | LDFLAGS = -lpthread -lrt -lelf -lm | 195 | PERF_DEBUG = $(DEBUG) |
196 | endif | ||
197 | ifndef PERF_DEBUG | ||
198 | CFLAGS_OPTIMIZE = -O6 | ||
199 | endif | ||
200 | |||
201 | CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) | ||
202 | EXTLIBS = -lpthread -lrt -lelf -lm | ||
205 | ALL_CFLAGS = $(CFLAGS) | 203 | ALL_CFLAGS = $(CFLAGS) |
206 | ALL_LDFLAGS = $(LDFLAGS) | 204 | ALL_LDFLAGS = $(LDFLAGS) |
207 | STRIP ?= strip | 205 | STRIP ?= strip |
@@ -252,6 +250,9 @@ PTHREAD_LIBS = -lpthread | |||
252 | # explicitly what architecture to check for. Fix this up for yours.. | 250 | # explicitly what architecture to check for. Fix this up for yours.. |
253 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ | 251 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ |
254 | 252 | ||
253 | ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null "$(QUIET_STDERR)" && echo y"), y) | ||
254 | CFLAGS := $(CFLAGS) -fstack-protector-all | ||
255 | endif | ||
255 | 256 | ||
256 | 257 | ||
257 | ### --- END CONFIGURATION SECTION --- | 258 | ### --- END CONFIGURATION SECTION --- |
@@ -327,8 +328,28 @@ LIB_FILE=libperf.a | |||
327 | LIB_H += ../../include/linux/perf_event.h | 328 | LIB_H += ../../include/linux/perf_event.h |
328 | LIB_H += ../../include/linux/rbtree.h | 329 | LIB_H += ../../include/linux/rbtree.h |
329 | LIB_H += ../../include/linux/list.h | 330 | LIB_H += ../../include/linux/list.h |
331 | LIB_H += ../../include/linux/stringify.h | ||
332 | LIB_H += util/include/linux/bitmap.h | ||
333 | LIB_H += util/include/linux/bitops.h | ||
334 | LIB_H += util/include/linux/compiler.h | ||
335 | LIB_H += util/include/linux/ctype.h | ||
336 | LIB_H += util/include/linux/kernel.h | ||
330 | LIB_H += util/include/linux/list.h | 337 | LIB_H += util/include/linux/list.h |
338 | LIB_H += util/include/linux/module.h | ||
339 | LIB_H += util/include/linux/poison.h | ||
340 | LIB_H += util/include/linux/prefetch.h | ||
341 | LIB_H += util/include/linux/rbtree.h | ||
342 | LIB_H += util/include/linux/string.h | ||
343 | LIB_H += util/include/linux/types.h | ||
344 | LIB_H += util/include/asm/asm-offsets.h | ||
345 | LIB_H += util/include/asm/bitops.h | ||
346 | LIB_H += util/include/asm/byteorder.h | ||
347 | LIB_H += util/include/asm/swab.h | ||
348 | LIB_H += util/include/asm/system.h | ||
349 | LIB_H += util/include/asm/uaccess.h | ||
331 | LIB_H += perf.h | 350 | LIB_H += perf.h |
351 | LIB_H += util/debugfs.h | ||
352 | LIB_H += util/event.h | ||
332 | LIB_H += util/types.h | 353 | LIB_H += util/types.h |
333 | LIB_H += util/levenshtein.h | 354 | LIB_H += util/levenshtein.h |
334 | LIB_H += util/parse-options.h | 355 | LIB_H += util/parse-options.h |
@@ -342,15 +363,22 @@ LIB_H += util/strlist.h | |||
342 | LIB_H += util/run-command.h | 363 | LIB_H += util/run-command.h |
343 | LIB_H += util/sigchain.h | 364 | LIB_H += util/sigchain.h |
344 | LIB_H += util/symbol.h | 365 | LIB_H += util/symbol.h |
345 | LIB_H += util/module.h | ||
346 | LIB_H += util/color.h | 366 | LIB_H += util/color.h |
347 | LIB_H += util/values.h | 367 | LIB_H += util/values.h |
368 | LIB_H += util/sort.h | ||
369 | LIB_H += util/hist.h | ||
370 | LIB_H += util/thread.h | ||
371 | LIB_H += util/data_map.h | ||
372 | LIB_H += util/probe-finder.h | ||
373 | LIB_H += util/probe-event.h | ||
348 | 374 | ||
349 | LIB_OBJS += util/abspath.o | 375 | LIB_OBJS += util/abspath.o |
350 | LIB_OBJS += util/alias.o | 376 | LIB_OBJS += util/alias.o |
351 | LIB_OBJS += util/config.o | 377 | LIB_OBJS += util/config.o |
352 | LIB_OBJS += util/ctype.o | 378 | LIB_OBJS += util/ctype.o |
379 | LIB_OBJS += util/debugfs.o | ||
353 | LIB_OBJS += util/environment.o | 380 | LIB_OBJS += util/environment.o |
381 | LIB_OBJS += util/event.o | ||
354 | LIB_OBJS += util/exec_cmd.o | 382 | LIB_OBJS += util/exec_cmd.o |
355 | LIB_OBJS += util/help.o | 383 | LIB_OBJS += util/help.o |
356 | LIB_OBJS += util/levenshtein.o | 384 | LIB_OBJS += util/levenshtein.o |
@@ -358,6 +386,9 @@ LIB_OBJS += util/parse-options.o | |||
358 | LIB_OBJS += util/parse-events.o | 386 | LIB_OBJS += util/parse-events.o |
359 | LIB_OBJS += util/path.o | 387 | LIB_OBJS += util/path.o |
360 | LIB_OBJS += util/rbtree.o | 388 | LIB_OBJS += util/rbtree.o |
389 | LIB_OBJS += util/bitmap.o | ||
390 | LIB_OBJS += util/hweight.o | ||
391 | LIB_OBJS += util/find_next_bit.o | ||
361 | LIB_OBJS += util/run-command.o | 392 | LIB_OBJS += util/run-command.o |
362 | LIB_OBJS += util/quote.o | 393 | LIB_OBJS += util/quote.o |
363 | LIB_OBJS += util/strbuf.o | 394 | LIB_OBJS += util/strbuf.o |
@@ -367,7 +398,6 @@ LIB_OBJS += util/usage.o | |||
367 | LIB_OBJS += util/wrapper.o | 398 | LIB_OBJS += util/wrapper.o |
368 | LIB_OBJS += util/sigchain.o | 399 | LIB_OBJS += util/sigchain.o |
369 | LIB_OBJS += util/symbol.o | 400 | LIB_OBJS += util/symbol.o |
370 | LIB_OBJS += util/module.o | ||
371 | LIB_OBJS += util/color.o | 401 | LIB_OBJS += util/color.o |
372 | LIB_OBJS += util/pager.o | 402 | LIB_OBJS += util/pager.o |
373 | LIB_OBJS += util/header.o | 403 | LIB_OBJS += util/header.o |
@@ -379,11 +409,25 @@ LIB_OBJS += util/thread.o | |||
379 | LIB_OBJS += util/trace-event-parse.o | 409 | LIB_OBJS += util/trace-event-parse.o |
380 | LIB_OBJS += util/trace-event-read.o | 410 | LIB_OBJS += util/trace-event-read.o |
381 | LIB_OBJS += util/trace-event-info.o | 411 | LIB_OBJS += util/trace-event-info.o |
412 | LIB_OBJS += util/trace-event-perl.o | ||
382 | LIB_OBJS += util/svghelper.o | 413 | LIB_OBJS += util/svghelper.o |
414 | LIB_OBJS += util/sort.o | ||
415 | LIB_OBJS += util/hist.o | ||
416 | LIB_OBJS += util/data_map.o | ||
417 | LIB_OBJS += util/probe-event.o | ||
383 | 418 | ||
384 | BUILTIN_OBJS += builtin-annotate.o | 419 | BUILTIN_OBJS += builtin-annotate.o |
420 | |||
421 | BUILTIN_OBJS += builtin-bench.o | ||
422 | |||
423 | # Benchmark modules | ||
424 | BUILTIN_OBJS += bench/sched-messaging.o | ||
425 | BUILTIN_OBJS += bench/sched-pipe.o | ||
426 | BUILTIN_OBJS += bench/mem-memcpy.o | ||
427 | |||
385 | BUILTIN_OBJS += builtin-help.o | 428 | BUILTIN_OBJS += builtin-help.o |
386 | BUILTIN_OBJS += builtin-sched.o | 429 | BUILTIN_OBJS += builtin-sched.o |
430 | BUILTIN_OBJS += builtin-buildid-list.o | ||
387 | BUILTIN_OBJS += builtin-list.o | 431 | BUILTIN_OBJS += builtin-list.o |
388 | BUILTIN_OBJS += builtin-record.o | 432 | BUILTIN_OBJS += builtin-record.o |
389 | BUILTIN_OBJS += builtin-report.o | 433 | BUILTIN_OBJS += builtin-report.o |
@@ -391,9 +435,16 @@ BUILTIN_OBJS += builtin-stat.o | |||
391 | BUILTIN_OBJS += builtin-timechart.o | 435 | BUILTIN_OBJS += builtin-timechart.o |
392 | BUILTIN_OBJS += builtin-top.o | 436 | BUILTIN_OBJS += builtin-top.o |
393 | BUILTIN_OBJS += builtin-trace.o | 437 | BUILTIN_OBJS += builtin-trace.o |
438 | BUILTIN_OBJS += builtin-probe.o | ||
439 | BUILTIN_OBJS += builtin-kmem.o | ||
394 | 440 | ||
395 | PERFLIBS = $(LIB_FILE) | 441 | PERFLIBS = $(LIB_FILE) |
396 | 442 | ||
443 | ifeq ($(V), 2) | ||
444 | QUIET_STDERR = ">/dev/null" | ||
445 | else | ||
446 | QUIET_STDERR = ">/dev/null 2>&1" | ||
447 | endif | ||
397 | # | 448 | # |
398 | # Platform specific tweaks | 449 | # Platform specific tweaks |
399 | # | 450 | # |
@@ -421,36 +472,58 @@ ifeq ($(uname_S),Darwin) | |||
421 | PTHREAD_LIBS = | 472 | PTHREAD_LIBS = |
422 | endif | 473 | endif |
423 | 474 | ||
424 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) | 475 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
425 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) | 476 | ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
477 | msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); | ||
478 | endif | ||
479 | |||
480 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | ||
426 | BASIC_CFLAGS += -DLIBELF_NO_MMAP | 481 | BASIC_CFLAGS += -DLIBELF_NO_MMAP |
427 | endif | 482 | endif |
428 | else | 483 | else |
429 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); | 484 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); |
430 | endif | 485 | endif |
431 | 486 | ||
487 | ifneq ($(shell sh -c "(echo '\#include <libdwarf/dwarf.h>'; echo '\#include <libdwarf/libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | ||
488 | msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); | ||
489 | BASIC_CFLAGS += -DNO_LIBDWARF | ||
490 | else | ||
491 | EXTLIBS += -lelf -ldwarf | ||
492 | LIB_OBJS += util/probe-finder.o | ||
493 | endif | ||
494 | |||
495 | PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null` | ||
496 | PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` | ||
497 | |||
498 | ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y) | ||
499 | BASIC_CFLAGS += -DNO_LIBPERL | ||
500 | else | ||
501 | ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) | ||
502 | LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o | ||
503 | endif | ||
504 | |||
432 | ifdef NO_DEMANGLE | 505 | ifdef NO_DEMANGLE |
433 | BASIC_CFLAGS += -DNO_DEMANGLE | 506 | BASIC_CFLAGS += -DNO_DEMANGLE |
434 | else | 507 | else |
435 | has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y") | 508 | has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y") |
436 | 509 | ||
437 | ifeq ($(has_bfd),y) | 510 | ifeq ($(has_bfd),y) |
438 | EXTLIBS += -lbfd | 511 | EXTLIBS += -lbfd |
439 | else | 512 | else |
440 | has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y") | 513 | has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y") |
441 | ifeq ($(has_bfd_iberty),y) | 514 | ifeq ($(has_bfd_iberty),y) |
442 | EXTLIBS += -lbfd -liberty | 515 | EXTLIBS += -lbfd -liberty |
443 | else | 516 | else |
444 | has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") | 517 | has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y") |
445 | ifeq ($(has_bfd_iberty_z),y) | 518 | ifeq ($(has_bfd_iberty_z),y) |
446 | EXTLIBS += -lbfd -liberty -lz | 519 | EXTLIBS += -lbfd -liberty -lz |
447 | else | 520 | else |
448 | has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y") | 521 | has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y") |
449 | ifeq ($(has_cplus_demangle),y) | 522 | ifeq ($(has_cplus_demangle),y) |
450 | EXTLIBS += -liberty | 523 | EXTLIBS += -liberty |
451 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE | 524 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE |
452 | else | 525 | else |
453 | msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling) | 526 | msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling) |
454 | BASIC_CFLAGS += -DNO_DEMANGLE | 527 | BASIC_CFLAGS += -DNO_DEMANGLE |
455 | endif | 528 | endif |
456 | endif | 529 | endif |
@@ -787,6 +860,25 @@ util/config.o: util/config.c PERF-CFLAGS | |||
787 | util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS | 860 | util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS |
788 | $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 861 | $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
789 | 862 | ||
863 | # some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing | ||
864 | # from <string.h> that comes from kernel headers wrapping. | ||
865 | KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` | ||
866 | |||
867 | util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS | ||
868 | $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | ||
869 | |||
870 | util/hweight.o: ../../lib/hweight.c PERF-CFLAGS | ||
871 | $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | ||
872 | |||
873 | util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS | ||
874 | $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | ||
875 | |||
876 | util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS | ||
877 | $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< | ||
878 | |||
879 | scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS | ||
880 | $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< | ||
881 | |||
790 | perf-%$X: %.o $(PERFLIBS) | 882 | perf-%$X: %.o $(PERFLIBS) |
791 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) | 883 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) |
792 | 884 | ||
@@ -894,6 +986,13 @@ export perfexec_instdir | |||
894 | install: all | 986 | install: all |
895 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' | 987 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' |
896 | $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' | 988 | $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' |
989 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' | ||
990 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' | ||
991 | $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' | ||
992 | $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' | ||
993 | $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' | ||
994 | $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' | ||
995 | $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' | ||
897 | ifdef BUILT_INS | 996 | ifdef BUILT_INS |
898 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | 997 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' |
899 | $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | 998 | $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' |
@@ -979,7 +1078,7 @@ distclean: clean | |||
979 | # $(RM) configure | 1078 | # $(RM) configure |
980 | 1079 | ||
981 | clean: | 1080 | clean: |
982 | $(RM) *.o */*.o $(LIB_FILE) | 1081 | $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE) |
983 | $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X | 1082 | $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X |
984 | $(RM) $(TEST_PROGRAMS) | 1083 | $(RM) $(TEST_PROGRAMS) |
985 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* | 1084 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* |
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h new file mode 100644 index 000000000000..f7781c6267c0 --- /dev/null +++ b/tools/perf/bench/bench.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef BENCH_H | ||
2 | #define BENCH_H | ||
3 | |||
4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | ||
5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | ||
6 | extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); | ||
7 | |||
8 | #define BENCH_FORMAT_DEFAULT_STR "default" | ||
9 | #define BENCH_FORMAT_DEFAULT 0 | ||
10 | #define BENCH_FORMAT_SIMPLE_STR "simple" | ||
11 | #define BENCH_FORMAT_SIMPLE 1 | ||
12 | |||
13 | #define BENCH_FORMAT_UNKNOWN -1 | ||
14 | |||
15 | extern int bench_format; | ||
16 | |||
17 | #endif | ||
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c new file mode 100644 index 000000000000..89773178e894 --- /dev/null +++ b/tools/perf/bench/mem-memcpy.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * mem-memcpy.c | ||
3 | * | ||
4 | * memcpy: Simple memory copy in various ways | ||
5 | * | ||
6 | * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
7 | */ | ||
8 | #include <ctype.h> | ||
9 | |||
10 | #include "../perf.h" | ||
11 | #include "../util/util.h" | ||
12 | #include "../util/parse-options.h" | ||
13 | #include "../util/string.h" | ||
14 | #include "../util/header.h" | ||
15 | #include "bench.h" | ||
16 | |||
17 | #include <stdio.h> | ||
18 | #include <stdlib.h> | ||
19 | #include <string.h> | ||
20 | #include <sys/time.h> | ||
21 | #include <errno.h> | ||
22 | |||
23 | #define K 1024 | ||
24 | |||
25 | static const char *length_str = "1MB"; | ||
26 | static const char *routine = "default"; | ||
27 | static int use_clock = 0; | ||
28 | static int clock_fd; | ||
29 | |||
30 | static const struct option options[] = { | ||
31 | OPT_STRING('l', "length", &length_str, "1MB", | ||
32 | "Specify length of memory to copy. " | ||
33 | "available unit: B, MB, GB (upper and lower)"), | ||
34 | OPT_STRING('r', "routine", &routine, "default", | ||
35 | "Specify routine to copy"), | ||
36 | OPT_BOOLEAN('c', "clock", &use_clock, | ||
37 | "Use CPU clock for measuring"), | ||
38 | OPT_END() | ||
39 | }; | ||
40 | |||
41 | struct routine { | ||
42 | const char *name; | ||
43 | const char *desc; | ||
44 | void * (*fn)(void *dst, const void *src, size_t len); | ||
45 | }; | ||
46 | |||
47 | struct routine routines[] = { | ||
48 | { "default", | ||
49 | "Default memcpy() provided by glibc", | ||
50 | memcpy }, | ||
51 | { NULL, | ||
52 | NULL, | ||
53 | NULL } | ||
54 | }; | ||
55 | |||
56 | static const char * const bench_mem_memcpy_usage[] = { | ||
57 | "perf bench mem memcpy <options>", | ||
58 | NULL | ||
59 | }; | ||
60 | |||
61 | static struct perf_event_attr clock_attr = { | ||
62 | .type = PERF_TYPE_HARDWARE, | ||
63 | .config = PERF_COUNT_HW_CPU_CYCLES | ||
64 | }; | ||
65 | |||
66 | static void init_clock(void) | ||
67 | { | ||
68 | clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); | ||
69 | |||
70 | if (clock_fd < 0 && errno == ENOSYS) | ||
71 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); | ||
72 | else | ||
73 | BUG_ON(clock_fd < 0); | ||
74 | } | ||
75 | |||
76 | static u64 get_clock(void) | ||
77 | { | ||
78 | int ret; | ||
79 | u64 clk; | ||
80 | |||
81 | ret = read(clock_fd, &clk, sizeof(u64)); | ||
82 | BUG_ON(ret != sizeof(u64)); | ||
83 | |||
84 | return clk; | ||
85 | } | ||
86 | |||
87 | static double timeval2double(struct timeval *ts) | ||
88 | { | ||
89 | return (double)ts->tv_sec + | ||
90 | (double)ts->tv_usec / (double)1000000; | ||
91 | } | ||
92 | |||
93 | int bench_mem_memcpy(int argc, const char **argv, | ||
94 | const char *prefix __used) | ||
95 | { | ||
96 | int i; | ||
97 | void *dst, *src; | ||
98 | size_t length; | ||
99 | double bps = 0.0; | ||
100 | struct timeval tv_start, tv_end, tv_diff; | ||
101 | u64 clock_start, clock_end, clock_diff; | ||
102 | |||
103 | clock_start = clock_end = clock_diff = 0ULL; | ||
104 | argc = parse_options(argc, argv, options, | ||
105 | bench_mem_memcpy_usage, 0); | ||
106 | |||
107 | tv_diff.tv_sec = 0; | ||
108 | tv_diff.tv_usec = 0; | ||
109 | length = (size_t)perf_atoll((char *)length_str); | ||
110 | |||
111 | if ((s64)length <= 0) { | ||
112 | fprintf(stderr, "Invalid length:%s\n", length_str); | ||
113 | return 1; | ||
114 | } | ||
115 | |||
116 | for (i = 0; routines[i].name; i++) { | ||
117 | if (!strcmp(routines[i].name, routine)) | ||
118 | break; | ||
119 | } | ||
120 | if (!routines[i].name) { | ||
121 | printf("Unknown routine:%s\n", routine); | ||
122 | printf("Available routines...\n"); | ||
123 | for (i = 0; routines[i].name; i++) { | ||
124 | printf("\t%s ... %s\n", | ||
125 | routines[i].name, routines[i].desc); | ||
126 | } | ||
127 | return 1; | ||
128 | } | ||
129 | |||
130 | dst = zalloc(length); | ||
131 | if (!dst) | ||
132 | die("memory allocation failed - maybe length is too large?\n"); | ||
133 | |||
134 | src = zalloc(length); | ||
135 | if (!src) | ||
136 | die("memory allocation failed - maybe length is too large?\n"); | ||
137 | |||
138 | if (bench_format == BENCH_FORMAT_DEFAULT) { | ||
139 | printf("# Copying %s Bytes from %p to %p ...\n\n", | ||
140 | length_str, src, dst); | ||
141 | } | ||
142 | |||
143 | if (use_clock) { | ||
144 | init_clock(); | ||
145 | clock_start = get_clock(); | ||
146 | } else { | ||
147 | BUG_ON(gettimeofday(&tv_start, NULL)); | ||
148 | } | ||
149 | |||
150 | routines[i].fn(dst, src, length); | ||
151 | |||
152 | if (use_clock) { | ||
153 | clock_end = get_clock(); | ||
154 | clock_diff = clock_end - clock_start; | ||
155 | } else { | ||
156 | BUG_ON(gettimeofday(&tv_end, NULL)); | ||
157 | timersub(&tv_end, &tv_start, &tv_diff); | ||
158 | bps = (double)((double)length / timeval2double(&tv_diff)); | ||
159 | } | ||
160 | |||
161 | switch (bench_format) { | ||
162 | case BENCH_FORMAT_DEFAULT: | ||
163 | if (use_clock) { | ||
164 | printf(" %14lf Clock/Byte\n", | ||
165 | (double)clock_diff / (double)length); | ||
166 | } else { | ||
167 | if (bps < K) | ||
168 | printf(" %14lf B/Sec\n", bps); | ||
169 | else if (bps < K * K) | ||
170 | printf(" %14lfd KB/Sec\n", bps / 1024); | ||
171 | else if (bps < K * K * K) | ||
172 | printf(" %14lf MB/Sec\n", bps / 1024 / 1024); | ||
173 | else { | ||
174 | printf(" %14lf GB/Sec\n", | ||
175 | bps / 1024 / 1024 / 1024); | ||
176 | } | ||
177 | } | ||
178 | break; | ||
179 | case BENCH_FORMAT_SIMPLE: | ||
180 | if (use_clock) { | ||
181 | printf("%14lf\n", | ||
182 | (double)clock_diff / (double)length); | ||
183 | } else | ||
184 | printf("%lf\n", bps); | ||
185 | break; | ||
186 | default: | ||
187 | /* reaching this means there's some disaster: */ | ||
188 | die("unknown format: %d\n", bench_format); | ||
189 | break; | ||
190 | } | ||
191 | |||
192 | return 0; | ||
193 | } | ||
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c new file mode 100644 index 000000000000..605a2a959aa8 --- /dev/null +++ b/tools/perf/bench/sched-messaging.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * | ||
3 | * builtin-bench-messaging.c | ||
4 | * | ||
5 | * messaging: Benchmark for scheduler and IPC mechanisms | ||
6 | * | ||
7 | * Based on hackbench by Rusty Russell <rusty@rustcorp.com.au> | ||
8 | * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include "../perf.h" | ||
13 | #include "../util/util.h" | ||
14 | #include "../util/parse-options.h" | ||
15 | #include "../builtin.h" | ||
16 | #include "bench.h" | ||
17 | |||
18 | /* Test groups of 20 processes spraying to 20 receivers */ | ||
19 | #include <pthread.h> | ||
20 | #include <stdio.h> | ||
21 | #include <stdlib.h> | ||
22 | #include <string.h> | ||
23 | #include <errno.h> | ||
24 | #include <unistd.h> | ||
25 | #include <sys/types.h> | ||
26 | #include <sys/socket.h> | ||
27 | #include <sys/wait.h> | ||
28 | #include <sys/time.h> | ||
29 | #include <sys/poll.h> | ||
30 | #include <limits.h> | ||
31 | |||
32 | #define DATASIZE 100 | ||
33 | |||
34 | static int use_pipes = 0; | ||
35 | static unsigned int loops = 100; | ||
36 | static unsigned int thread_mode = 0; | ||
37 | static unsigned int num_groups = 10; | ||
38 | |||
39 | struct sender_context { | ||
40 | unsigned int num_fds; | ||
41 | int ready_out; | ||
42 | int wakefd; | ||
43 | int out_fds[0]; | ||
44 | }; | ||
45 | |||
46 | struct receiver_context { | ||
47 | unsigned int num_packets; | ||
48 | int in_fds[2]; | ||
49 | int ready_out; | ||
50 | int wakefd; | ||
51 | }; | ||
52 | |||
53 | static void barf(const char *msg) | ||
54 | { | ||
55 | fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno)); | ||
56 | exit(1); | ||
57 | } | ||
58 | |||
59 | static void fdpair(int fds[2]) | ||
60 | { | ||
61 | if (use_pipes) { | ||
62 | if (pipe(fds) == 0) | ||
63 | return; | ||
64 | } else { | ||
65 | if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0) | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | barf(use_pipes ? "pipe()" : "socketpair()"); | ||
70 | } | ||
71 | |||
72 | /* Block until we're ready to go */ | ||
73 | static void ready(int ready_out, int wakefd) | ||
74 | { | ||
75 | char dummy; | ||
76 | struct pollfd pollfd = { .fd = wakefd, .events = POLLIN }; | ||
77 | |||
78 | /* Tell them we're ready. */ | ||
79 | if (write(ready_out, &dummy, 1) != 1) | ||
80 | barf("CLIENT: ready write"); | ||
81 | |||
82 | /* Wait for "GO" signal */ | ||
83 | if (poll(&pollfd, 1, -1) != 1) | ||
84 | barf("poll"); | ||
85 | } | ||
86 | |||
87 | /* Sender sprays loops messages down each file descriptor */ | ||
88 | static void *sender(struct sender_context *ctx) | ||
89 | { | ||
90 | char data[DATASIZE]; | ||
91 | unsigned int i, j; | ||
92 | |||
93 | ready(ctx->ready_out, ctx->wakefd); | ||
94 | |||
95 | /* Now pump to every receiver. */ | ||
96 | for (i = 0; i < loops; i++) { | ||
97 | for (j = 0; j < ctx->num_fds; j++) { | ||
98 | int ret, done = 0; | ||
99 | |||
100 | again: | ||
101 | ret = write(ctx->out_fds[j], data + done, | ||
102 | sizeof(data)-done); | ||
103 | if (ret < 0) | ||
104 | barf("SENDER: write"); | ||
105 | done += ret; | ||
106 | if (done < DATASIZE) | ||
107 | goto again; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | |||
115 | /* One receiver per fd */ | ||
116 | static void *receiver(struct receiver_context* ctx) | ||
117 | { | ||
118 | unsigned int i; | ||
119 | |||
120 | if (!thread_mode) | ||
121 | close(ctx->in_fds[1]); | ||
122 | |||
123 | /* Wait for start... */ | ||
124 | ready(ctx->ready_out, ctx->wakefd); | ||
125 | |||
126 | /* Receive them all */ | ||
127 | for (i = 0; i < ctx->num_packets; i++) { | ||
128 | char data[DATASIZE]; | ||
129 | int ret, done = 0; | ||
130 | |||
131 | again: | ||
132 | ret = read(ctx->in_fds[0], data + done, DATASIZE - done); | ||
133 | if (ret < 0) | ||
134 | barf("SERVER: read"); | ||
135 | done += ret; | ||
136 | if (done < DATASIZE) | ||
137 | goto again; | ||
138 | } | ||
139 | |||
140 | return NULL; | ||
141 | } | ||
142 | |||
143 | static pthread_t create_worker(void *ctx, void *(*func)(void *)) | ||
144 | { | ||
145 | pthread_attr_t attr; | ||
146 | pthread_t childid; | ||
147 | int err; | ||
148 | |||
149 | if (!thread_mode) { | ||
150 | /* process mode */ | ||
151 | /* Fork the receiver. */ | ||
152 | switch (fork()) { | ||
153 | case -1: | ||
154 | barf("fork()"); | ||
155 | break; | ||
156 | case 0: | ||
157 | (*func) (ctx); | ||
158 | exit(0); | ||
159 | break; | ||
160 | default: | ||
161 | break; | ||
162 | } | ||
163 | |||
164 | return (pthread_t)0; | ||
165 | } | ||
166 | |||
167 | if (pthread_attr_init(&attr) != 0) | ||
168 | barf("pthread_attr_init:"); | ||
169 | |||
170 | #ifndef __ia64__ | ||
171 | if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0) | ||
172 | barf("pthread_attr_setstacksize"); | ||
173 | #endif | ||
174 | |||
175 | err = pthread_create(&childid, &attr, func, ctx); | ||
176 | if (err != 0) { | ||
177 | fprintf(stderr, "pthread_create failed: %s (%d)\n", | ||
178 | strerror(err), err); | ||
179 | exit(-1); | ||
180 | } | ||
181 | return childid; | ||
182 | } | ||
183 | |||
184 | static void reap_worker(pthread_t id) | ||
185 | { | ||
186 | int proc_status; | ||
187 | void *thread_status; | ||
188 | |||
189 | if (!thread_mode) { | ||
190 | /* process mode */ | ||
191 | wait(&proc_status); | ||
192 | if (!WIFEXITED(proc_status)) | ||
193 | exit(1); | ||
194 | } else { | ||
195 | pthread_join(id, &thread_status); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | /* One group of senders and receivers */ | ||
200 | static unsigned int group(pthread_t *pth, | ||
201 | unsigned int num_fds, | ||
202 | int ready_out, | ||
203 | int wakefd) | ||
204 | { | ||
205 | unsigned int i; | ||
206 | struct sender_context *snd_ctx = malloc(sizeof(struct sender_context) | ||
207 | + num_fds * sizeof(int)); | ||
208 | |||
209 | if (!snd_ctx) | ||
210 | barf("malloc()"); | ||
211 | |||
212 | for (i = 0; i < num_fds; i++) { | ||
213 | int fds[2]; | ||
214 | struct receiver_context *ctx = malloc(sizeof(*ctx)); | ||
215 | |||
216 | if (!ctx) | ||
217 | barf("malloc()"); | ||
218 | |||
219 | |||
220 | /* Create the pipe between client and server */ | ||
221 | fdpair(fds); | ||
222 | |||
223 | ctx->num_packets = num_fds * loops; | ||
224 | ctx->in_fds[0] = fds[0]; | ||
225 | ctx->in_fds[1] = fds[1]; | ||
226 | ctx->ready_out = ready_out; | ||
227 | ctx->wakefd = wakefd; | ||
228 | |||
229 | pth[i] = create_worker(ctx, (void *)receiver); | ||
230 | |||
231 | snd_ctx->out_fds[i] = fds[1]; | ||
232 | if (!thread_mode) | ||
233 | close(fds[0]); | ||
234 | } | ||
235 | |||
236 | /* Now we have all the fds, fork the senders */ | ||
237 | for (i = 0; i < num_fds; i++) { | ||
238 | snd_ctx->ready_out = ready_out; | ||
239 | snd_ctx->wakefd = wakefd; | ||
240 | snd_ctx->num_fds = num_fds; | ||
241 | |||
242 | pth[num_fds+i] = create_worker(snd_ctx, (void *)sender); | ||
243 | } | ||
244 | |||
245 | /* Close the fds we have left */ | ||
246 | if (!thread_mode) | ||
247 | for (i = 0; i < num_fds; i++) | ||
248 | close(snd_ctx->out_fds[i]); | ||
249 | |||
250 | /* Return number of children to reap */ | ||
251 | return num_fds * 2; | ||
252 | } | ||
253 | |||
254 | static const struct option options[] = { | ||
255 | OPT_BOOLEAN('p', "pipe", &use_pipes, | ||
256 | "Use pipe() instead of socketpair()"), | ||
257 | OPT_BOOLEAN('t', "thread", &thread_mode, | ||
258 | "Be multi thread instead of multi process"), | ||
259 | OPT_INTEGER('g', "group", &num_groups, | ||
260 | "Specify number of groups"), | ||
261 | OPT_INTEGER('l', "loop", &loops, | ||
262 | "Specify number of loops"), | ||
263 | OPT_END() | ||
264 | }; | ||
265 | |||
266 | static const char * const bench_sched_message_usage[] = { | ||
267 | "perf bench sched messaging <options>", | ||
268 | NULL | ||
269 | }; | ||
270 | |||
271 | int bench_sched_messaging(int argc, const char **argv, | ||
272 | const char *prefix __used) | ||
273 | { | ||
274 | unsigned int i, total_children; | ||
275 | struct timeval start, stop, diff; | ||
276 | unsigned int num_fds = 20; | ||
277 | int readyfds[2], wakefds[2]; | ||
278 | char dummy; | ||
279 | pthread_t *pth_tab; | ||
280 | |||
281 | argc = parse_options(argc, argv, options, | ||
282 | bench_sched_message_usage, 0); | ||
283 | |||
284 | pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t)); | ||
285 | if (!pth_tab) | ||
286 | barf("main:malloc()"); | ||
287 | |||
288 | fdpair(readyfds); | ||
289 | fdpair(wakefds); | ||
290 | |||
291 | total_children = 0; | ||
292 | for (i = 0; i < num_groups; i++) | ||
293 | total_children += group(pth_tab+total_children, num_fds, | ||
294 | readyfds[1], wakefds[0]); | ||
295 | |||
296 | /* Wait for everyone to be ready */ | ||
297 | for (i = 0; i < total_children; i++) | ||
298 | if (read(readyfds[0], &dummy, 1) != 1) | ||
299 | barf("Reading for readyfds"); | ||
300 | |||
301 | gettimeofday(&start, NULL); | ||
302 | |||
303 | /* Kick them off */ | ||
304 | if (write(wakefds[1], &dummy, 1) != 1) | ||
305 | barf("Writing to start them"); | ||
306 | |||
307 | /* Reap them all */ | ||
308 | for (i = 0; i < total_children; i++) | ||
309 | reap_worker(pth_tab[i]); | ||
310 | |||
311 | gettimeofday(&stop, NULL); | ||
312 | |||
313 | timersub(&stop, &start, &diff); | ||
314 | |||
315 | switch (bench_format) { | ||
316 | case BENCH_FORMAT_DEFAULT: | ||
317 | printf("# %d sender and receiver %s per group\n", | ||
318 | num_fds, thread_mode ? "threads" : "processes"); | ||
319 | printf("# %d groups == %d %s run\n\n", | ||
320 | num_groups, num_groups * 2 * num_fds, | ||
321 | thread_mode ? "threads" : "processes"); | ||
322 | printf(" %14s: %lu.%03lu [sec]\n", "Total time", | ||
323 | diff.tv_sec, diff.tv_usec/1000); | ||
324 | break; | ||
325 | case BENCH_FORMAT_SIMPLE: | ||
326 | printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000); | ||
327 | break; | ||
328 | default: | ||
329 | /* reaching here is something disaster */ | ||
330 | fprintf(stderr, "Unknown format:%d\n", bench_format); | ||
331 | exit(1); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | return 0; | ||
336 | } | ||
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c new file mode 100644 index 000000000000..238185f97977 --- /dev/null +++ b/tools/perf/bench/sched-pipe.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * | ||
3 | * builtin-bench-pipe.c | ||
4 | * | ||
5 | * pipe: Benchmark for pipe() | ||
6 | * | ||
7 | * Based on pipe-test-1m.c by Ingo Molnar <mingo@redhat.com> | ||
8 | * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c | ||
9 | * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include "../perf.h" | ||
14 | #include "../util/util.h" | ||
15 | #include "../util/parse-options.h" | ||
16 | #include "../builtin.h" | ||
17 | #include "bench.h" | ||
18 | |||
19 | #include <unistd.h> | ||
20 | #include <stdio.h> | ||
21 | #include <stdlib.h> | ||
22 | #include <signal.h> | ||
23 | #include <sys/wait.h> | ||
24 | #include <linux/unistd.h> | ||
25 | #include <string.h> | ||
26 | #include <errno.h> | ||
27 | #include <assert.h> | ||
28 | #include <sys/time.h> | ||
29 | #include <sys/types.h> | ||
30 | |||
31 | #define LOOPS_DEFAULT 1000000 | ||
32 | static int loops = LOOPS_DEFAULT; | ||
33 | |||
34 | static const struct option options[] = { | ||
35 | OPT_INTEGER('l', "loop", &loops, | ||
36 | "Specify number of loops"), | ||
37 | OPT_END() | ||
38 | }; | ||
39 | |||
40 | static const char * const bench_sched_pipe_usage[] = { | ||
41 | "perf bench sched pipe <options>", | ||
42 | NULL | ||
43 | }; | ||
44 | |||
45 | int bench_sched_pipe(int argc, const char **argv, | ||
46 | const char *prefix __used) | ||
47 | { | ||
48 | int pipe_1[2], pipe_2[2]; | ||
49 | int m = 0, i; | ||
50 | struct timeval start, stop, diff; | ||
51 | unsigned long long result_usec = 0; | ||
52 | |||
53 | /* | ||
54 | * why does "ret" exist? | ||
55 | * discarding returned value of read(), write() | ||
56 | * causes error in building environment for perf | ||
57 | */ | ||
58 | int ret, wait_stat; | ||
59 | pid_t pid, retpid; | ||
60 | |||
61 | argc = parse_options(argc, argv, options, | ||
62 | bench_sched_pipe_usage, 0); | ||
63 | |||
64 | assert(!pipe(pipe_1)); | ||
65 | assert(!pipe(pipe_2)); | ||
66 | |||
67 | pid = fork(); | ||
68 | assert(pid >= 0); | ||
69 | |||
70 | gettimeofday(&start, NULL); | ||
71 | |||
72 | if (!pid) { | ||
73 | for (i = 0; i < loops; i++) { | ||
74 | ret = read(pipe_1[0], &m, sizeof(int)); | ||
75 | ret = write(pipe_2[1], &m, sizeof(int)); | ||
76 | } | ||
77 | } else { | ||
78 | for (i = 0; i < loops; i++) { | ||
79 | ret = write(pipe_1[1], &m, sizeof(int)); | ||
80 | ret = read(pipe_2[0], &m, sizeof(int)); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | gettimeofday(&stop, NULL); | ||
85 | timersub(&stop, &start, &diff); | ||
86 | |||
87 | if (pid) { | ||
88 | retpid = waitpid(pid, &wait_stat, 0); | ||
89 | assert((retpid == pid) && WIFEXITED(wait_stat)); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | switch (bench_format) { | ||
94 | case BENCH_FORMAT_DEFAULT: | ||
95 | printf("# Extecuted %d pipe operations between two tasks\n\n", | ||
96 | loops); | ||
97 | |||
98 | result_usec = diff.tv_sec * 1000000; | ||
99 | result_usec += diff.tv_usec; | ||
100 | |||
101 | printf(" %14s: %lu.%03lu [sec]\n\n", "Total time", | ||
102 | diff.tv_sec, diff.tv_usec/1000); | ||
103 | |||
104 | printf(" %14lf usecs/op\n", | ||
105 | (double)result_usec / (double)loops); | ||
106 | printf(" %14d ops/sec\n", | ||
107 | (int)((double)loops / | ||
108 | ((double)result_usec / (double)1000000))); | ||
109 | break; | ||
110 | |||
111 | case BENCH_FORMAT_SIMPLE: | ||
112 | printf("%lu.%03lu\n", | ||
113 | diff.tv_sec, diff.tv_usec / 1000); | ||
114 | break; | ||
115 | |||
116 | default: | ||
117 | /* reaching here is something disaster */ | ||
118 | fprintf(stderr, "Unknown format:%d\n", bench_format); | ||
119 | exit(1); | ||
120 | break; | ||
121 | } | ||
122 | |||
123 | return 0; | ||
124 | } | ||
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 1ec741615814..0bf2e8f9af57 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -19,29 +19,26 @@ | |||
19 | #include "perf.h" | 19 | #include "perf.h" |
20 | #include "util/debug.h" | 20 | #include "util/debug.h" |
21 | 21 | ||
22 | #include "util/event.h" | ||
22 | #include "util/parse-options.h" | 23 | #include "util/parse-options.h" |
23 | #include "util/parse-events.h" | 24 | #include "util/parse-events.h" |
24 | #include "util/thread.h" | 25 | #include "util/thread.h" |
26 | #include "util/sort.h" | ||
27 | #include "util/hist.h" | ||
28 | #include "util/data_map.h" | ||
25 | 29 | ||
26 | static char const *input_name = "perf.data"; | 30 | static char const *input_name = "perf.data"; |
27 | 31 | ||
28 | static char default_sort_order[] = "comm,symbol"; | ||
29 | static char *sort_order = default_sort_order; | ||
30 | |||
31 | static int force; | 32 | static int force; |
32 | static int input; | ||
33 | static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; | ||
34 | 33 | ||
35 | static int full_paths; | 34 | static int full_paths; |
36 | 35 | ||
37 | static int print_line; | 36 | static int print_line; |
38 | 37 | ||
39 | static unsigned long page_size; | 38 | struct sym_hist { |
40 | static unsigned long mmap_window = 32; | 39 | u64 sum; |
41 | 40 | u64 ip[0]; | |
42 | static struct rb_root threads; | 41 | }; |
43 | static struct thread *last_match; | ||
44 | |||
45 | 42 | ||
46 | struct sym_ext { | 43 | struct sym_ext { |
47 | struct rb_node node; | 44 | struct rb_node node; |
@@ -49,247 +46,38 @@ struct sym_ext { | |||
49 | char *path; | 46 | char *path; |
50 | }; | 47 | }; |
51 | 48 | ||
52 | /* | 49 | struct sym_priv { |
53 | * histogram, sorted on item, collects counts | 50 | struct sym_hist *hist; |
54 | */ | 51 | struct sym_ext *ext; |
55 | |||
56 | static struct rb_root hist; | ||
57 | |||
58 | struct hist_entry { | ||
59 | struct rb_node rb_node; | ||
60 | |||
61 | struct thread *thread; | ||
62 | struct map *map; | ||
63 | struct dso *dso; | ||
64 | struct symbol *sym; | ||
65 | u64 ip; | ||
66 | char level; | ||
67 | |||
68 | uint32_t count; | ||
69 | }; | ||
70 | |||
71 | /* | ||
72 | * configurable sorting bits | ||
73 | */ | ||
74 | |||
75 | struct sort_entry { | ||
76 | struct list_head list; | ||
77 | |||
78 | const char *header; | ||
79 | |||
80 | int64_t (*cmp)(struct hist_entry *, struct hist_entry *); | ||
81 | int64_t (*collapse)(struct hist_entry *, struct hist_entry *); | ||
82 | size_t (*print)(FILE *fp, struct hist_entry *); | ||
83 | }; | ||
84 | |||
85 | /* --sort pid */ | ||
86 | |||
87 | static int64_t | ||
88 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | ||
89 | { | ||
90 | return right->thread->pid - left->thread->pid; | ||
91 | } | ||
92 | |||
93 | static size_t | ||
94 | sort__thread_print(FILE *fp, struct hist_entry *self) | ||
95 | { | ||
96 | return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); | ||
97 | } | ||
98 | |||
99 | static struct sort_entry sort_thread = { | ||
100 | .header = " Command: Pid", | ||
101 | .cmp = sort__thread_cmp, | ||
102 | .print = sort__thread_print, | ||
103 | }; | ||
104 | |||
105 | /* --sort comm */ | ||
106 | |||
107 | static int64_t | ||
108 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | ||
109 | { | ||
110 | return right->thread->pid - left->thread->pid; | ||
111 | } | ||
112 | |||
113 | static int64_t | ||
114 | sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) | ||
115 | { | ||
116 | char *comm_l = left->thread->comm; | ||
117 | char *comm_r = right->thread->comm; | ||
118 | |||
119 | if (!comm_l || !comm_r) { | ||
120 | if (!comm_l && !comm_r) | ||
121 | return 0; | ||
122 | else if (!comm_l) | ||
123 | return -1; | ||
124 | else | ||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | return strcmp(comm_l, comm_r); | ||
129 | } | ||
130 | |||
131 | static size_t | ||
132 | sort__comm_print(FILE *fp, struct hist_entry *self) | ||
133 | { | ||
134 | return fprintf(fp, "%16s", self->thread->comm); | ||
135 | } | ||
136 | |||
137 | static struct sort_entry sort_comm = { | ||
138 | .header = " Command", | ||
139 | .cmp = sort__comm_cmp, | ||
140 | .collapse = sort__comm_collapse, | ||
141 | .print = sort__comm_print, | ||
142 | }; | ||
143 | |||
144 | /* --sort dso */ | ||
145 | |||
146 | static int64_t | ||
147 | sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) | ||
148 | { | ||
149 | struct dso *dso_l = left->dso; | ||
150 | struct dso *dso_r = right->dso; | ||
151 | |||
152 | if (!dso_l || !dso_r) { | ||
153 | if (!dso_l && !dso_r) | ||
154 | return 0; | ||
155 | else if (!dso_l) | ||
156 | return -1; | ||
157 | else | ||
158 | return 1; | ||
159 | } | ||
160 | |||
161 | return strcmp(dso_l->name, dso_r->name); | ||
162 | } | ||
163 | |||
164 | static size_t | ||
165 | sort__dso_print(FILE *fp, struct hist_entry *self) | ||
166 | { | ||
167 | if (self->dso) | ||
168 | return fprintf(fp, "%-25s", self->dso->name); | ||
169 | |||
170 | return fprintf(fp, "%016llx ", (u64)self->ip); | ||
171 | } | ||
172 | |||
173 | static struct sort_entry sort_dso = { | ||
174 | .header = "Shared Object ", | ||
175 | .cmp = sort__dso_cmp, | ||
176 | .print = sort__dso_print, | ||
177 | }; | ||
178 | |||
179 | /* --sort symbol */ | ||
180 | |||
181 | static int64_t | ||
182 | sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) | ||
183 | { | ||
184 | u64 ip_l, ip_r; | ||
185 | |||
186 | if (left->sym == right->sym) | ||
187 | return 0; | ||
188 | |||
189 | ip_l = left->sym ? left->sym->start : left->ip; | ||
190 | ip_r = right->sym ? right->sym->start : right->ip; | ||
191 | |||
192 | return (int64_t)(ip_r - ip_l); | ||
193 | } | ||
194 | |||
195 | static size_t | ||
196 | sort__sym_print(FILE *fp, struct hist_entry *self) | ||
197 | { | ||
198 | size_t ret = 0; | ||
199 | |||
200 | if (verbose) | ||
201 | ret += fprintf(fp, "%#018llx ", (u64)self->ip); | ||
202 | |||
203 | if (self->sym) { | ||
204 | ret += fprintf(fp, "[%c] %s", | ||
205 | self->dso == kernel_dso ? 'k' : '.', self->sym->name); | ||
206 | } else { | ||
207 | ret += fprintf(fp, "%#016llx", (u64)self->ip); | ||
208 | } | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | static struct sort_entry sort_sym = { | ||
214 | .header = "Symbol", | ||
215 | .cmp = sort__sym_cmp, | ||
216 | .print = sort__sym_print, | ||
217 | }; | ||
218 | |||
219 | static int sort__need_collapse = 0; | ||
220 | |||
221 | struct sort_dimension { | ||
222 | const char *name; | ||
223 | struct sort_entry *entry; | ||
224 | int taken; | ||
225 | }; | 52 | }; |
226 | 53 | ||
227 | static struct sort_dimension sort_dimensions[] = { | 54 | static struct symbol_conf symbol_conf = { |
228 | { .name = "pid", .entry = &sort_thread, }, | 55 | .priv_size = sizeof(struct sym_priv), |
229 | { .name = "comm", .entry = &sort_comm, }, | 56 | .try_vmlinux_path = true, |
230 | { .name = "dso", .entry = &sort_dso, }, | ||
231 | { .name = "symbol", .entry = &sort_sym, }, | ||
232 | }; | 57 | }; |
233 | 58 | ||
234 | static LIST_HEAD(hist_entry__sort_list); | 59 | static const char *sym_hist_filter; |
235 | 60 | ||
236 | static int sort_dimension__add(char *tok) | 61 | static int symbol_filter(struct map *map __used, struct symbol *sym) |
237 | { | 62 | { |
238 | unsigned int i; | 63 | if (sym_hist_filter == NULL || |
239 | 64 | strcmp(sym->name, sym_hist_filter) == 0) { | |
240 | for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { | 65 | struct sym_priv *priv = symbol__priv(sym); |
241 | struct sort_dimension *sd = &sort_dimensions[i]; | 66 | const int size = (sizeof(*priv->hist) + |
242 | 67 | (sym->end - sym->start) * sizeof(u64)); | |
243 | if (sd->taken) | ||
244 | continue; | ||
245 | |||
246 | if (strncasecmp(tok, sd->name, strlen(tok))) | ||
247 | continue; | ||
248 | |||
249 | if (sd->entry->collapse) | ||
250 | sort__need_collapse = 1; | ||
251 | |||
252 | list_add_tail(&sd->entry->list, &hist_entry__sort_list); | ||
253 | sd->taken = 1; | ||
254 | 68 | ||
69 | priv->hist = malloc(size); | ||
70 | if (priv->hist) | ||
71 | memset(priv->hist, 0, size); | ||
255 | return 0; | 72 | return 0; |
256 | } | 73 | } |
257 | 74 | /* | |
258 | return -ESRCH; | 75 | * FIXME: We should really filter it out, as we don't want to go thru symbols |
259 | } | 76 | * we're not interested, and if a DSO ends up with no symbols, delete it too, |
260 | 77 | * but right now the kernel loading routines in symbol.c bail out if no symbols | |
261 | static int64_t | 78 | * are found, fix it later. |
262 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | 79 | */ |
263 | { | 80 | return 0; |
264 | struct sort_entry *se; | ||
265 | int64_t cmp = 0; | ||
266 | |||
267 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
268 | cmp = se->cmp(left, right); | ||
269 | if (cmp) | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | return cmp; | ||
274 | } | ||
275 | |||
276 | static int64_t | ||
277 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | ||
278 | { | ||
279 | struct sort_entry *se; | ||
280 | int64_t cmp = 0; | ||
281 | |||
282 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
283 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | ||
284 | |||
285 | f = se->collapse ?: se->cmp; | ||
286 | |||
287 | cmp = f(left, right); | ||
288 | if (cmp) | ||
289 | break; | ||
290 | } | ||
291 | |||
292 | return cmp; | ||
293 | } | 81 | } |
294 | 82 | ||
295 | /* | 83 | /* |
@@ -299,380 +87,81 @@ static void hist_hit(struct hist_entry *he, u64 ip) | |||
299 | { | 87 | { |
300 | unsigned int sym_size, offset; | 88 | unsigned int sym_size, offset; |
301 | struct symbol *sym = he->sym; | 89 | struct symbol *sym = he->sym; |
90 | struct sym_priv *priv; | ||
91 | struct sym_hist *h; | ||
302 | 92 | ||
303 | he->count++; | 93 | he->count++; |
304 | 94 | ||
305 | if (!sym || !sym->hist) | 95 | if (!sym || !he->map) |
96 | return; | ||
97 | |||
98 | priv = symbol__priv(sym); | ||
99 | if (!priv->hist) | ||
306 | return; | 100 | return; |
307 | 101 | ||
308 | sym_size = sym->end - sym->start; | 102 | sym_size = sym->end - sym->start; |
309 | offset = ip - sym->start; | 103 | offset = ip - sym->start; |
310 | 104 | ||
105 | if (verbose) | ||
106 | fprintf(stderr, "%s: ip=%Lx\n", __func__, | ||
107 | he->map->unmap_ip(he->map, ip)); | ||
108 | |||
311 | if (offset >= sym_size) | 109 | if (offset >= sym_size) |
312 | return; | 110 | return; |
313 | 111 | ||
314 | sym->hist_sum++; | 112 | h = priv->hist; |
315 | sym->hist[offset]++; | 113 | h->sum++; |
114 | h->ip[offset]++; | ||
316 | 115 | ||
317 | if (verbose >= 3) | 116 | if (verbose >= 3) |
318 | printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", | 117 | printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", |
319 | (void *)(unsigned long)he->sym->start, | 118 | (void *)(unsigned long)he->sym->start, |
320 | he->sym->name, | 119 | he->sym->name, |
321 | (void *)(unsigned long)ip, ip - he->sym->start, | 120 | (void *)(unsigned long)ip, ip - he->sym->start, |
322 | sym->hist[offset]); | 121 | h->ip[offset]); |
323 | } | 122 | } |
324 | 123 | ||
325 | static int | 124 | static int hist_entry__add(struct addr_location *al, u64 count) |
326 | hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, | ||
327 | struct symbol *sym, u64 ip, char level) | ||
328 | { | 125 | { |
329 | struct rb_node **p = &hist.rb_node; | 126 | bool hit; |
330 | struct rb_node *parent = NULL; | 127 | struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit); |
331 | struct hist_entry *he; | 128 | if (he == NULL) |
332 | struct hist_entry entry = { | ||
333 | .thread = thread, | ||
334 | .map = map, | ||
335 | .dso = dso, | ||
336 | .sym = sym, | ||
337 | .ip = ip, | ||
338 | .level = level, | ||
339 | .count = 1, | ||
340 | }; | ||
341 | int cmp; | ||
342 | |||
343 | while (*p != NULL) { | ||
344 | parent = *p; | ||
345 | he = rb_entry(parent, struct hist_entry, rb_node); | ||
346 | |||
347 | cmp = hist_entry__cmp(&entry, he); | ||
348 | |||
349 | if (!cmp) { | ||
350 | hist_hit(he, ip); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | if (cmp < 0) | ||
356 | p = &(*p)->rb_left; | ||
357 | else | ||
358 | p = &(*p)->rb_right; | ||
359 | } | ||
360 | |||
361 | he = malloc(sizeof(*he)); | ||
362 | if (!he) | ||
363 | return -ENOMEM; | 129 | return -ENOMEM; |
364 | *he = entry; | 130 | hist_hit(he, al->addr); |
365 | rb_link_node(&he->rb_node, parent, p); | ||
366 | rb_insert_color(&he->rb_node, &hist); | ||
367 | |||
368 | return 0; | 131 | return 0; |
369 | } | 132 | } |
370 | 133 | ||
371 | static void hist_entry__free(struct hist_entry *he) | 134 | static int process_sample_event(event_t *event) |
372 | { | ||
373 | free(he); | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * collapse the histogram | ||
378 | */ | ||
379 | |||
380 | static struct rb_root collapse_hists; | ||
381 | |||
382 | static void collapse__insert_entry(struct hist_entry *he) | ||
383 | { | ||
384 | struct rb_node **p = &collapse_hists.rb_node; | ||
385 | struct rb_node *parent = NULL; | ||
386 | struct hist_entry *iter; | ||
387 | int64_t cmp; | ||
388 | |||
389 | while (*p != NULL) { | ||
390 | parent = *p; | ||
391 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
392 | |||
393 | cmp = hist_entry__collapse(iter, he); | ||
394 | |||
395 | if (!cmp) { | ||
396 | iter->count += he->count; | ||
397 | hist_entry__free(he); | ||
398 | return; | ||
399 | } | ||
400 | |||
401 | if (cmp < 0) | ||
402 | p = &(*p)->rb_left; | ||
403 | else | ||
404 | p = &(*p)->rb_right; | ||
405 | } | ||
406 | |||
407 | rb_link_node(&he->rb_node, parent, p); | ||
408 | rb_insert_color(&he->rb_node, &collapse_hists); | ||
409 | } | ||
410 | |||
411 | static void collapse__resort(void) | ||
412 | { | ||
413 | struct rb_node *next; | ||
414 | struct hist_entry *n; | ||
415 | |||
416 | if (!sort__need_collapse) | ||
417 | return; | ||
418 | |||
419 | next = rb_first(&hist); | ||
420 | while (next) { | ||
421 | n = rb_entry(next, struct hist_entry, rb_node); | ||
422 | next = rb_next(&n->rb_node); | ||
423 | |||
424 | rb_erase(&n->rb_node, &hist); | ||
425 | collapse__insert_entry(n); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * reverse the map, sort on count. | ||
431 | */ | ||
432 | |||
433 | static struct rb_root output_hists; | ||
434 | |||
435 | static void output__insert_entry(struct hist_entry *he) | ||
436 | { | 135 | { |
437 | struct rb_node **p = &output_hists.rb_node; | 136 | struct addr_location al; |
438 | struct rb_node *parent = NULL; | ||
439 | struct hist_entry *iter; | ||
440 | 137 | ||
441 | while (*p != NULL) { | 138 | dump_printf("(IP, %d): %d: %p\n", event->header.misc, |
442 | parent = *p; | 139 | event->ip.pid, (void *)(long)event->ip.ip); |
443 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
444 | 140 | ||
445 | if (he->count > iter->count) | 141 | if (event__preprocess_sample(event, &al, symbol_filter) < 0) { |
446 | p = &(*p)->rb_left; | ||
447 | else | ||
448 | p = &(*p)->rb_right; | ||
449 | } | ||
450 | |||
451 | rb_link_node(&he->rb_node, parent, p); | ||
452 | rb_insert_color(&he->rb_node, &output_hists); | ||
453 | } | ||
454 | |||
455 | static void output__resort(void) | ||
456 | { | ||
457 | struct rb_node *next; | ||
458 | struct hist_entry *n; | ||
459 | struct rb_root *tree = &hist; | ||
460 | |||
461 | if (sort__need_collapse) | ||
462 | tree = &collapse_hists; | ||
463 | |||
464 | next = rb_first(tree); | ||
465 | |||
466 | while (next) { | ||
467 | n = rb_entry(next, struct hist_entry, rb_node); | ||
468 | next = rb_next(&n->rb_node); | ||
469 | |||
470 | rb_erase(&n->rb_node, tree); | ||
471 | output__insert_entry(n); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | static unsigned long total = 0, | ||
476 | total_mmap = 0, | ||
477 | total_comm = 0, | ||
478 | total_fork = 0, | ||
479 | total_unknown = 0; | ||
480 | |||
481 | static int | ||
482 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | ||
483 | { | ||
484 | char level; | ||
485 | int show = 0; | ||
486 | struct dso *dso = NULL; | ||
487 | struct thread *thread; | ||
488 | u64 ip = event->ip.ip; | ||
489 | struct map *map = NULL; | ||
490 | |||
491 | thread = threads__findnew(event->ip.pid, &threads, &last_match); | ||
492 | |||
493 | dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", | ||
494 | (void *)(offset + head), | ||
495 | (void *)(long)(event->header.size), | ||
496 | event->header.misc, | ||
497 | event->ip.pid, | ||
498 | (void *)(long)ip); | ||
499 | |||
500 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
501 | |||
502 | if (thread == NULL) { | ||
503 | fprintf(stderr, "problem processing %d event, skipping it.\n", | 142 | fprintf(stderr, "problem processing %d event, skipping it.\n", |
504 | event->header.type); | 143 | event->header.type); |
505 | return -1; | 144 | return -1; |
506 | } | 145 | } |
507 | 146 | ||
508 | if (event->header.misc & PERF_RECORD_MISC_KERNEL) { | 147 | if (hist_entry__add(&al, 1)) { |
509 | show = SHOW_KERNEL; | 148 | fprintf(stderr, "problem incrementing symbol count, " |
510 | level = 'k'; | 149 | "skipping event\n"); |
511 | |||
512 | dso = kernel_dso; | ||
513 | |||
514 | dump_printf(" ...... dso: %s\n", dso->name); | ||
515 | |||
516 | } else if (event->header.misc & PERF_RECORD_MISC_USER) { | ||
517 | |||
518 | show = SHOW_USER; | ||
519 | level = '.'; | ||
520 | |||
521 | map = thread__find_map(thread, ip); | ||
522 | if (map != NULL) { | ||
523 | ip = map->map_ip(map, ip); | ||
524 | dso = map->dso; | ||
525 | } else { | ||
526 | /* | ||
527 | * If this is outside of all known maps, | ||
528 | * and is a negative address, try to look it | ||
529 | * up in the kernel dso, as it might be a | ||
530 | * vsyscall (which executes in user-mode): | ||
531 | */ | ||
532 | if ((long long)ip < 0) | ||
533 | dso = kernel_dso; | ||
534 | } | ||
535 | dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); | ||
536 | |||
537 | } else { | ||
538 | show = SHOW_HV; | ||
539 | level = 'H'; | ||
540 | dump_printf(" ...... dso: [hypervisor]\n"); | ||
541 | } | ||
542 | |||
543 | if (show & show_mask) { | ||
544 | struct symbol *sym = NULL; | ||
545 | |||
546 | if (dso) | ||
547 | sym = dso->find_symbol(dso, ip); | ||
548 | |||
549 | if (hist_entry__add(thread, map, dso, sym, ip, level)) { | ||
550 | fprintf(stderr, | ||
551 | "problem incrementing symbol count, skipping event\n"); | ||
552 | return -1; | ||
553 | } | ||
554 | } | ||
555 | total++; | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | static int | ||
561 | process_mmap_event(event_t *event, unsigned long offset, unsigned long head) | ||
562 | { | ||
563 | struct thread *thread; | ||
564 | struct map *map = map__new(&event->mmap, NULL, 0); | ||
565 | |||
566 | thread = threads__findnew(event->mmap.pid, &threads, &last_match); | ||
567 | |||
568 | dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", | ||
569 | (void *)(offset + head), | ||
570 | (void *)(long)(event->header.size), | ||
571 | event->mmap.pid, | ||
572 | (void *)(long)event->mmap.start, | ||
573 | (void *)(long)event->mmap.len, | ||
574 | (void *)(long)event->mmap.pgoff, | ||
575 | event->mmap.filename); | ||
576 | |||
577 | if (thread == NULL || map == NULL) { | ||
578 | dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | thread__insert_map(thread, map); | ||
583 | total_mmap++; | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | static int | ||
589 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | ||
590 | { | ||
591 | struct thread *thread; | ||
592 | |||
593 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | ||
594 | dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", | ||
595 | (void *)(offset + head), | ||
596 | (void *)(long)(event->header.size), | ||
597 | event->comm.comm, event->comm.pid); | ||
598 | |||
599 | if (thread == NULL || | ||
600 | thread__set_comm(thread, event->comm.comm)) { | ||
601 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | ||
602 | return -1; | ||
603 | } | ||
604 | total_comm++; | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | static int | ||
610 | process_fork_event(event_t *event, unsigned long offset, unsigned long head) | ||
611 | { | ||
612 | struct thread *thread; | ||
613 | struct thread *parent; | ||
614 | |||
615 | thread = threads__findnew(event->fork.pid, &threads, &last_match); | ||
616 | parent = threads__findnew(event->fork.ppid, &threads, &last_match); | ||
617 | dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", | ||
618 | (void *)(offset + head), | ||
619 | (void *)(long)(event->header.size), | ||
620 | event->fork.pid, event->fork.ppid); | ||
621 | |||
622 | /* | ||
623 | * A thread clone will have the same PID for both | ||
624 | * parent and child. | ||
625 | */ | ||
626 | if (thread == parent) | ||
627 | return 0; | ||
628 | |||
629 | if (!thread || !parent || thread__fork(thread, parent)) { | ||
630 | dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); | ||
631 | return -1; | ||
632 | } | ||
633 | total_fork++; | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static int | ||
639 | process_event(event_t *event, unsigned long offset, unsigned long head) | ||
640 | { | ||
641 | switch (event->header.type) { | ||
642 | case PERF_RECORD_SAMPLE: | ||
643 | return process_sample_event(event, offset, head); | ||
644 | |||
645 | case PERF_RECORD_MMAP: | ||
646 | return process_mmap_event(event, offset, head); | ||
647 | |||
648 | case PERF_RECORD_COMM: | ||
649 | return process_comm_event(event, offset, head); | ||
650 | |||
651 | case PERF_RECORD_FORK: | ||
652 | return process_fork_event(event, offset, head); | ||
653 | /* | ||
654 | * We dont process them right now but they are fine: | ||
655 | */ | ||
656 | |||
657 | case PERF_RECORD_THROTTLE: | ||
658 | case PERF_RECORD_UNTHROTTLE: | ||
659 | return 0; | ||
660 | |||
661 | default: | ||
662 | return -1; | 150 | return -1; |
663 | } | 151 | } |
664 | 152 | ||
665 | return 0; | 153 | return 0; |
666 | } | 154 | } |
667 | 155 | ||
668 | static int | 156 | static int parse_line(FILE *file, struct hist_entry *he, u64 len) |
669 | parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) | ||
670 | { | 157 | { |
158 | struct symbol *sym = he->sym; | ||
671 | char *line = NULL, *tmp, *tmp2; | 159 | char *line = NULL, *tmp, *tmp2; |
672 | static const char *prev_line; | 160 | static const char *prev_line; |
673 | static const char *prev_color; | 161 | static const char *prev_color; |
674 | unsigned int offset; | 162 | unsigned int offset; |
675 | size_t line_len; | 163 | size_t line_len; |
164 | u64 start; | ||
676 | s64 line_ip; | 165 | s64 line_ip; |
677 | int ret; | 166 | int ret; |
678 | char *c; | 167 | char *c; |
@@ -709,22 +198,26 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) | |||
709 | line_ip = -1; | 198 | line_ip = -1; |
710 | } | 199 | } |
711 | 200 | ||
201 | start = he->map->unmap_ip(he->map, sym->start); | ||
202 | |||
712 | if (line_ip != -1) { | 203 | if (line_ip != -1) { |
713 | const char *path = NULL; | 204 | const char *path = NULL; |
714 | unsigned int hits = 0; | 205 | unsigned int hits = 0; |
715 | double percent = 0.0; | 206 | double percent = 0.0; |
716 | const char *color; | 207 | const char *color; |
717 | struct sym_ext *sym_ext = sym->priv; | 208 | struct sym_priv *priv = symbol__priv(sym); |
209 | struct sym_ext *sym_ext = priv->ext; | ||
210 | struct sym_hist *h = priv->hist; | ||
718 | 211 | ||
719 | offset = line_ip - start; | 212 | offset = line_ip - start; |
720 | if (offset < len) | 213 | if (offset < len) |
721 | hits = sym->hist[offset]; | 214 | hits = h->ip[offset]; |
722 | 215 | ||
723 | if (offset < len && sym_ext) { | 216 | if (offset < len && sym_ext) { |
724 | path = sym_ext[offset].path; | 217 | path = sym_ext[offset].path; |
725 | percent = sym_ext[offset].percent; | 218 | percent = sym_ext[offset].percent; |
726 | } else if (sym->hist_sum) | 219 | } else if (h->sum) |
727 | percent = 100.0 * hits / sym->hist_sum; | 220 | percent = 100.0 * hits / h->sum; |
728 | 221 | ||
729 | color = get_percent_color(percent); | 222 | color = get_percent_color(percent); |
730 | 223 | ||
@@ -777,9 +270,10 @@ static void insert_source_line(struct sym_ext *sym_ext) | |||
777 | rb_insert_color(&sym_ext->node, &root_sym_ext); | 270 | rb_insert_color(&sym_ext->node, &root_sym_ext); |
778 | } | 271 | } |
779 | 272 | ||
780 | static void free_source_line(struct symbol *sym, int len) | 273 | static void free_source_line(struct hist_entry *he, int len) |
781 | { | 274 | { |
782 | struct sym_ext *sym_ext = sym->priv; | 275 | struct sym_priv *priv = symbol__priv(he->sym); |
276 | struct sym_ext *sym_ext = priv->ext; | ||
783 | int i; | 277 | int i; |
784 | 278 | ||
785 | if (!sym_ext) | 279 | if (!sym_ext) |
@@ -789,26 +283,30 @@ static void free_source_line(struct symbol *sym, int len) | |||
789 | free(sym_ext[i].path); | 283 | free(sym_ext[i].path); |
790 | free(sym_ext); | 284 | free(sym_ext); |
791 | 285 | ||
792 | sym->priv = NULL; | 286 | priv->ext = NULL; |
793 | root_sym_ext = RB_ROOT; | 287 | root_sym_ext = RB_ROOT; |
794 | } | 288 | } |
795 | 289 | ||
796 | /* Get the filename:line for the colored entries */ | 290 | /* Get the filename:line for the colored entries */ |
797 | static void | 291 | static void |
798 | get_source_line(struct symbol *sym, u64 start, int len, const char *filename) | 292 | get_source_line(struct hist_entry *he, int len, const char *filename) |
799 | { | 293 | { |
294 | struct symbol *sym = he->sym; | ||
295 | u64 start; | ||
800 | int i; | 296 | int i; |
801 | char cmd[PATH_MAX * 2]; | 297 | char cmd[PATH_MAX * 2]; |
802 | struct sym_ext *sym_ext; | 298 | struct sym_ext *sym_ext; |
299 | struct sym_priv *priv = symbol__priv(sym); | ||
300 | struct sym_hist *h = priv->hist; | ||
803 | 301 | ||
804 | if (!sym->hist_sum) | 302 | if (!h->sum) |
805 | return; | 303 | return; |
806 | 304 | ||
807 | sym->priv = calloc(len, sizeof(struct sym_ext)); | 305 | sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext)); |
808 | if (!sym->priv) | 306 | if (!priv->ext) |
809 | return; | 307 | return; |
810 | 308 | ||
811 | sym_ext = sym->priv; | 309 | start = he->map->unmap_ip(he->map, sym->start); |
812 | 310 | ||
813 | for (i = 0; i < len; i++) { | 311 | for (i = 0; i < len; i++) { |
814 | char *path = NULL; | 312 | char *path = NULL; |
@@ -816,7 +314,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename) | |||
816 | u64 offset; | 314 | u64 offset; |
817 | FILE *fp; | 315 | FILE *fp; |
818 | 316 | ||
819 | sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum; | 317 | sym_ext[i].percent = 100.0 * h->ip[i] / h->sum; |
820 | if (sym_ext[i].percent <= 0.5) | 318 | if (sym_ext[i].percent <= 0.5) |
821 | continue; | 319 | continue; |
822 | 320 | ||
@@ -870,33 +368,34 @@ static void print_summary(const char *filename) | |||
870 | } | 368 | } |
871 | } | 369 | } |
872 | 370 | ||
873 | static void annotate_sym(struct dso *dso, struct symbol *sym) | 371 | static void annotate_sym(struct hist_entry *he) |
874 | { | 372 | { |
875 | const char *filename = dso->name, *d_filename; | 373 | struct map *map = he->map; |
876 | u64 start, end, len; | 374 | struct dso *dso = map->dso; |
375 | struct symbol *sym = he->sym; | ||
376 | const char *filename = dso->long_name, *d_filename; | ||
377 | u64 len; | ||
877 | char command[PATH_MAX*2]; | 378 | char command[PATH_MAX*2]; |
878 | FILE *file; | 379 | FILE *file; |
879 | 380 | ||
880 | if (!filename) | 381 | if (!filename) |
881 | return; | 382 | return; |
882 | if (sym->module) | 383 | |
883 | filename = sym->module->path; | 384 | if (verbose) |
884 | else if (dso == kernel_dso) | 385 | fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n", |
885 | filename = vmlinux_name; | 386 | __func__, filename, sym->name, |
886 | 387 | map->unmap_ip(map, sym->start), | |
887 | start = sym->obj_start; | 388 | map->unmap_ip(map, sym->end)); |
888 | if (!start) | 389 | |
889 | start = sym->start; | ||
890 | if (full_paths) | 390 | if (full_paths) |
891 | d_filename = filename; | 391 | d_filename = filename; |
892 | else | 392 | else |
893 | d_filename = basename(filename); | 393 | d_filename = basename(filename); |
894 | 394 | ||
895 | end = start + sym->end - sym->start + 1; | ||
896 | len = sym->end - sym->start; | 395 | len = sym->end - sym->start; |
897 | 396 | ||
898 | if (print_line) { | 397 | if (print_line) { |
899 | get_source_line(sym, start, len, filename); | 398 | get_source_line(he, len, filename); |
900 | print_summary(filename); | 399 | print_summary(filename); |
901 | } | 400 | } |
902 | 401 | ||
@@ -905,10 +404,12 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) | |||
905 | printf("------------------------------------------------\n"); | 404 | printf("------------------------------------------------\n"); |
906 | 405 | ||
907 | if (verbose >= 2) | 406 | if (verbose >= 2) |
908 | printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); | 407 | printf("annotating [%p] %30s : [%p] %30s\n", |
408 | dso, dso->long_name, sym, sym->name); | ||
909 | 409 | ||
910 | sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", | 410 | sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", |
911 | (u64)start, (u64)end, filename, filename); | 411 | map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end), |
412 | filename, filename); | ||
912 | 413 | ||
913 | if (verbose >= 3) | 414 | if (verbose >= 3) |
914 | printf("doing: %s\n", command); | 415 | printf("doing: %s\n", command); |
@@ -918,159 +419,78 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) | |||
918 | return; | 419 | return; |
919 | 420 | ||
920 | while (!feof(file)) { | 421 | while (!feof(file)) { |
921 | if (parse_line(file, sym, start, len) < 0) | 422 | if (parse_line(file, he, len) < 0) |
922 | break; | 423 | break; |
923 | } | 424 | } |
924 | 425 | ||
925 | pclose(file); | 426 | pclose(file); |
926 | if (print_line) | 427 | if (print_line) |
927 | free_source_line(sym, len); | 428 | free_source_line(he, len); |
928 | } | 429 | } |
929 | 430 | ||
930 | static void find_annotations(void) | 431 | static void find_annotations(void) |
931 | { | 432 | { |
932 | struct rb_node *nd; | 433 | struct rb_node *nd; |
933 | struct dso *dso; | ||
934 | int count = 0; | ||
935 | |||
936 | list_for_each_entry(dso, &dsos, node) { | ||
937 | |||
938 | for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { | ||
939 | struct symbol *sym = rb_entry(nd, struct symbol, rb_node); | ||
940 | |||
941 | if (sym->hist) { | ||
942 | annotate_sym(dso, sym); | ||
943 | count++; | ||
944 | } | ||
945 | } | ||
946 | } | ||
947 | |||
948 | if (!count) | ||
949 | printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); | ||
950 | } | ||
951 | |||
952 | static int __cmd_annotate(void) | ||
953 | { | ||
954 | int ret, rc = EXIT_FAILURE; | ||
955 | unsigned long offset = 0; | ||
956 | unsigned long head = 0; | ||
957 | struct stat input_stat; | ||
958 | event_t *event; | ||
959 | uint32_t size; | ||
960 | char *buf; | ||
961 | |||
962 | register_idle_thread(&threads, &last_match); | ||
963 | |||
964 | input = open(input_name, O_RDONLY); | ||
965 | if (input < 0) { | ||
966 | perror("failed to open file"); | ||
967 | exit(-1); | ||
968 | } | ||
969 | |||
970 | ret = fstat(input, &input_stat); | ||
971 | if (ret < 0) { | ||
972 | perror("failed to stat file"); | ||
973 | exit(-1); | ||
974 | } | ||
975 | |||
976 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | ||
977 | fprintf(stderr, "file: %s not owned by current user or root\n", input_name); | ||
978 | exit(-1); | ||
979 | } | ||
980 | |||
981 | if (!input_stat.st_size) { | ||
982 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
983 | exit(0); | ||
984 | } | ||
985 | |||
986 | if (load_kernel() < 0) { | ||
987 | perror("failed to load kernel symbols"); | ||
988 | return EXIT_FAILURE; | ||
989 | } | ||
990 | |||
991 | remap: | ||
992 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | ||
993 | MAP_SHARED, input, offset); | ||
994 | if (buf == MAP_FAILED) { | ||
995 | perror("failed to mmap file"); | ||
996 | exit(-1); | ||
997 | } | ||
998 | |||
999 | more: | ||
1000 | event = (event_t *)(buf + head); | ||
1001 | 434 | ||
1002 | size = event->header.size; | 435 | for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { |
1003 | if (!size) | 436 | struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); |
1004 | size = 8; | 437 | struct sym_priv *priv; |
1005 | 438 | ||
1006 | if (head + event->header.size >= page_size * mmap_window) { | 439 | if (he->sym == NULL) |
1007 | unsigned long shift = page_size * (head / page_size); | 440 | continue; |
1008 | int munmap_ret; | ||
1009 | |||
1010 | munmap_ret = munmap(buf, page_size * mmap_window); | ||
1011 | assert(munmap_ret == 0); | ||
1012 | |||
1013 | offset += shift; | ||
1014 | head -= shift; | ||
1015 | goto remap; | ||
1016 | } | ||
1017 | |||
1018 | size = event->header.size; | ||
1019 | |||
1020 | dump_printf("%p [%p]: event: %d\n", | ||
1021 | (void *)(offset + head), | ||
1022 | (void *)(long)event->header.size, | ||
1023 | event->header.type); | ||
1024 | |||
1025 | if (!size || process_event(event, offset, head) < 0) { | ||
1026 | |||
1027 | dump_printf("%p [%p]: skipping unknown header type: %d\n", | ||
1028 | (void *)(offset + head), | ||
1029 | (void *)(long)(event->header.size), | ||
1030 | event->header.type); | ||
1031 | 441 | ||
1032 | total_unknown++; | 442 | priv = symbol__priv(he->sym); |
443 | if (priv->hist == NULL) | ||
444 | continue; | ||
1033 | 445 | ||
446 | annotate_sym(he); | ||
1034 | /* | 447 | /* |
1035 | * assume we lost track of the stream, check alignment, and | 448 | * Since we have a hist_entry per IP for the same symbol, free |
1036 | * increment a single u64 in the hope to catch on again 'soon'. | 449 | * he->sym->hist to signal we already processed this symbol. |
1037 | */ | 450 | */ |
1038 | 451 | free(priv->hist); | |
1039 | if (unlikely(head & 7)) | 452 | priv->hist = NULL; |
1040 | head &= ~7ULL; | ||
1041 | |||
1042 | size = 8; | ||
1043 | } | 453 | } |
454 | } | ||
1044 | 455 | ||
1045 | head += size; | 456 | static struct perf_file_handler file_handler = { |
457 | .process_sample_event = process_sample_event, | ||
458 | .process_mmap_event = event__process_mmap, | ||
459 | .process_comm_event = event__process_comm, | ||
460 | .process_fork_event = event__process_task, | ||
461 | }; | ||
1046 | 462 | ||
1047 | if (offset + head < (unsigned long)input_stat.st_size) | 463 | static int __cmd_annotate(void) |
1048 | goto more; | 464 | { |
465 | struct perf_header *header; | ||
466 | struct thread *idle; | ||
467 | int ret; | ||
1049 | 468 | ||
1050 | rc = EXIT_SUCCESS; | 469 | idle = register_idle_thread(); |
1051 | close(input); | 470 | register_perf_file_handler(&file_handler); |
1052 | 471 | ||
1053 | dump_printf(" IP events: %10ld\n", total); | 472 | ret = mmap_dispatch_perf_file(&header, input_name, 0, 0, |
1054 | dump_printf(" mmap events: %10ld\n", total_mmap); | 473 | &event__cwdlen, &event__cwd); |
1055 | dump_printf(" comm events: %10ld\n", total_comm); | 474 | if (ret) |
1056 | dump_printf(" fork events: %10ld\n", total_fork); | 475 | return ret; |
1057 | dump_printf(" unknown events: %10ld\n", total_unknown); | ||
1058 | 476 | ||
1059 | if (dump_trace) | 477 | if (dump_trace) { |
478 | event__print_totals(); | ||
1060 | return 0; | 479 | return 0; |
480 | } | ||
1061 | 481 | ||
1062 | if (verbose >= 3) | 482 | if (verbose > 3) |
1063 | threads__fprintf(stdout, &threads); | 483 | threads__fprintf(stdout); |
1064 | 484 | ||
1065 | if (verbose >= 2) | 485 | if (verbose > 2) |
1066 | dsos__fprintf(stdout); | 486 | dsos__fprintf(stdout); |
1067 | 487 | ||
1068 | collapse__resort(); | 488 | collapse__resort(); |
1069 | output__resort(); | 489 | output__resort(event__total[0]); |
1070 | 490 | ||
1071 | find_annotations(); | 491 | find_annotations(); |
1072 | 492 | ||
1073 | return rc; | 493 | return ret; |
1074 | } | 494 | } |
1075 | 495 | ||
1076 | static const char * const annotate_usage[] = { | 496 | static const char * const annotate_usage[] = { |
@@ -1088,8 +508,9 @@ static const struct option options[] = { | |||
1088 | "be more verbose (show symbol address, etc)"), | 508 | "be more verbose (show symbol address, etc)"), |
1089 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | 509 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1090 | "dump raw trace in ASCII"), | 510 | "dump raw trace in ASCII"), |
1091 | OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), | 511 | OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, |
1092 | OPT_BOOLEAN('m', "modules", &modules, | 512 | "file", "vmlinux pathname"), |
513 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, | ||
1093 | "load module symbols - WARNING: use only with -k and LIVE kernel"), | 514 | "load module symbols - WARNING: use only with -k and LIVE kernel"), |
1094 | OPT_BOOLEAN('l', "print-line", &print_line, | 515 | OPT_BOOLEAN('l', "print-line", &print_line, |
1095 | "print matching source lines (may be slow)"), | 516 | "print matching source lines (may be slow)"), |
@@ -1115,9 +536,8 @@ static void setup_sorting(void) | |||
1115 | 536 | ||
1116 | int cmd_annotate(int argc, const char **argv, const char *prefix __used) | 537 | int cmd_annotate(int argc, const char **argv, const char *prefix __used) |
1117 | { | 538 | { |
1118 | symbol__init(); | 539 | if (symbol__init(&symbol_conf) < 0) |
1119 | 540 | return -1; | |
1120 | page_size = getpagesize(); | ||
1121 | 541 | ||
1122 | argc = parse_options(argc, argv, options, annotate_usage, 0); | 542 | argc = parse_options(argc, argv, options, annotate_usage, 0); |
1123 | 543 | ||
@@ -1134,10 +554,13 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) | |||
1134 | sym_hist_filter = argv[0]; | 554 | sym_hist_filter = argv[0]; |
1135 | } | 555 | } |
1136 | 556 | ||
1137 | if (!sym_hist_filter) | ||
1138 | usage_with_options(annotate_usage, options); | ||
1139 | |||
1140 | setup_pager(); | 557 | setup_pager(); |
1141 | 558 | ||
559 | if (field_sep && *field_sep == '.') { | ||
560 | fputs("'.' is the only non valid --field-separator argument\n", | ||
561 | stderr); | ||
562 | exit(129); | ||
563 | } | ||
564 | |||
1142 | return __cmd_annotate(); | 565 | return __cmd_annotate(); |
1143 | } | 566 | } |
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c new file mode 100644 index 000000000000..e043eb83092a --- /dev/null +++ b/tools/perf/builtin-bench.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * | ||
3 | * builtin-bench.c | ||
4 | * | ||
5 | * General benchmarking subsystem provided by perf | ||
6 | * | ||
7 | * Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * | ||
13 | * Available subsystem list: | ||
14 | * sched ... scheduler and IPC mechanism | ||
15 | * mem ... memory access performance | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include "perf.h" | ||
20 | #include "util/util.h" | ||
21 | #include "util/parse-options.h" | ||
22 | #include "builtin.h" | ||
23 | #include "bench/bench.h" | ||
24 | |||
25 | #include <stdio.h> | ||
26 | #include <stdlib.h> | ||
27 | #include <string.h> | ||
28 | |||
29 | struct bench_suite { | ||
30 | const char *name; | ||
31 | const char *summary; | ||
32 | int (*fn)(int, const char **, const char *); | ||
33 | }; | ||
34 | |||
35 | static struct bench_suite sched_suites[] = { | ||
36 | { "messaging", | ||
37 | "Benchmark for scheduler and IPC mechanisms", | ||
38 | bench_sched_messaging }, | ||
39 | { "pipe", | ||
40 | "Flood of communication over pipe() between two processes", | ||
41 | bench_sched_pipe }, | ||
42 | { NULL, | ||
43 | NULL, | ||
44 | NULL } | ||
45 | }; | ||
46 | |||
47 | static struct bench_suite mem_suites[] = { | ||
48 | { "memcpy", | ||
49 | "Simple memory copy in various ways", | ||
50 | bench_mem_memcpy }, | ||
51 | { NULL, | ||
52 | NULL, | ||
53 | NULL } | ||
54 | }; | ||
55 | |||
56 | struct bench_subsys { | ||
57 | const char *name; | ||
58 | const char *summary; | ||
59 | struct bench_suite *suites; | ||
60 | }; | ||
61 | |||
62 | static struct bench_subsys subsystems[] = { | ||
63 | { "sched", | ||
64 | "scheduler and IPC mechanism", | ||
65 | sched_suites }, | ||
66 | { "mem", | ||
67 | "memory access performance", | ||
68 | mem_suites }, | ||
69 | { NULL, | ||
70 | NULL, | ||
71 | NULL } | ||
72 | }; | ||
73 | |||
74 | static void dump_suites(int subsys_index) | ||
75 | { | ||
76 | int i; | ||
77 | |||
78 | printf("List of available suites for %s...\n\n", | ||
79 | subsystems[subsys_index].name); | ||
80 | |||
81 | for (i = 0; subsystems[subsys_index].suites[i].name; i++) | ||
82 | printf("\t%s: %s\n", | ||
83 | subsystems[subsys_index].suites[i].name, | ||
84 | subsystems[subsys_index].suites[i].summary); | ||
85 | |||
86 | printf("\n"); | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | static char *bench_format_str; | ||
91 | int bench_format = BENCH_FORMAT_DEFAULT; | ||
92 | |||
93 | static const struct option bench_options[] = { | ||
94 | OPT_STRING('f', "format", &bench_format_str, "default", | ||
95 | "Specify format style"), | ||
96 | OPT_END() | ||
97 | }; | ||
98 | |||
99 | static const char * const bench_usage[] = { | ||
100 | "perf bench [<common options>] <subsystem> <suite> [<options>]", | ||
101 | NULL | ||
102 | }; | ||
103 | |||
104 | static void print_usage(void) | ||
105 | { | ||
106 | int i; | ||
107 | |||
108 | printf("Usage: \n"); | ||
109 | for (i = 0; bench_usage[i]; i++) | ||
110 | printf("\t%s\n", bench_usage[i]); | ||
111 | printf("\n"); | ||
112 | |||
113 | printf("List of available subsystems...\n\n"); | ||
114 | |||
115 | for (i = 0; subsystems[i].name; i++) | ||
116 | printf("\t%s: %s\n", | ||
117 | subsystems[i].name, subsystems[i].summary); | ||
118 | printf("\n"); | ||
119 | } | ||
120 | |||
121 | static int bench_str2int(char *str) | ||
122 | { | ||
123 | if (!str) | ||
124 | return BENCH_FORMAT_DEFAULT; | ||
125 | |||
126 | if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR)) | ||
127 | return BENCH_FORMAT_DEFAULT; | ||
128 | else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR)) | ||
129 | return BENCH_FORMAT_SIMPLE; | ||
130 | |||
131 | return BENCH_FORMAT_UNKNOWN; | ||
132 | } | ||
133 | |||
134 | int cmd_bench(int argc, const char **argv, const char *prefix __used) | ||
135 | { | ||
136 | int i, j, status = 0; | ||
137 | |||
138 | if (argc < 2) { | ||
139 | /* No subsystem specified. */ | ||
140 | print_usage(); | ||
141 | goto end; | ||
142 | } | ||
143 | |||
144 | argc = parse_options(argc, argv, bench_options, bench_usage, | ||
145 | PARSE_OPT_STOP_AT_NON_OPTION); | ||
146 | |||
147 | bench_format = bench_str2int(bench_format_str); | ||
148 | if (bench_format == BENCH_FORMAT_UNKNOWN) { | ||
149 | printf("Unknown format descriptor:%s\n", bench_format_str); | ||
150 | goto end; | ||
151 | } | ||
152 | |||
153 | if (argc < 1) { | ||
154 | print_usage(); | ||
155 | goto end; | ||
156 | } | ||
157 | |||
158 | for (i = 0; subsystems[i].name; i++) { | ||
159 | if (strcmp(subsystems[i].name, argv[0])) | ||
160 | continue; | ||
161 | |||
162 | if (argc < 2) { | ||
163 | /* No suite specified. */ | ||
164 | dump_suites(i); | ||
165 | goto end; | ||
166 | } | ||
167 | |||
168 | for (j = 0; subsystems[i].suites[j].name; j++) { | ||
169 | if (strcmp(subsystems[i].suites[j].name, argv[1])) | ||
170 | continue; | ||
171 | |||
172 | if (bench_format == BENCH_FORMAT_DEFAULT) | ||
173 | printf("# Running %s/%s benchmark...\n", | ||
174 | subsystems[i].name, | ||
175 | subsystems[i].suites[j].name); | ||
176 | status = subsystems[i].suites[j].fn(argc - 1, | ||
177 | argv + 1, prefix); | ||
178 | goto end; | ||
179 | } | ||
180 | |||
181 | if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { | ||
182 | dump_suites(i); | ||
183 | goto end; | ||
184 | } | ||
185 | |||
186 | printf("Unknown suite:%s for %s\n", argv[1], argv[0]); | ||
187 | status = 1; | ||
188 | goto end; | ||
189 | } | ||
190 | |||
191 | printf("Unknown subsystem:%s\n", argv[0]); | ||
192 | status = 1; | ||
193 | |||
194 | end: | ||
195 | return status; | ||
196 | } | ||
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c new file mode 100644 index 000000000000..7dee9d19ab7a --- /dev/null +++ b/tools/perf/builtin-buildid-list.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * builtin-buildid-list.c | ||
3 | * | ||
4 | * Builtin buildid-list command: list buildids in perf.data | ||
5 | * | ||
6 | * Copyright (C) 2009, Red Hat Inc. | ||
7 | * Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
8 | */ | ||
9 | #include "builtin.h" | ||
10 | #include "perf.h" | ||
11 | #include "util/cache.h" | ||
12 | #include "util/data_map.h" | ||
13 | #include "util/debug.h" | ||
14 | #include "util/header.h" | ||
15 | #include "util/parse-options.h" | ||
16 | #include "util/symbol.h" | ||
17 | |||
18 | static char const *input_name = "perf.data"; | ||
19 | static int force; | ||
20 | |||
21 | static const char *const buildid_list_usage[] = { | ||
22 | "perf report [<options>]", | ||
23 | NULL | ||
24 | }; | ||
25 | |||
26 | static const struct option options[] = { | ||
27 | OPT_STRING('i', "input", &input_name, "file", | ||
28 | "input file name"), | ||
29 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), | ||
30 | OPT_BOOLEAN('v', "verbose", &verbose, | ||
31 | "be more verbose"), | ||
32 | OPT_END() | ||
33 | }; | ||
34 | |||
35 | static int perf_file_section__process_buildids(struct perf_file_section *self, | ||
36 | int feat, int fd) | ||
37 | { | ||
38 | if (feat != HEADER_BUILD_ID) | ||
39 | return 0; | ||
40 | |||
41 | if (lseek(fd, self->offset, SEEK_SET) < 0) { | ||
42 | pr_warning("Failed to lseek to %Ld offset for buildids!\n", | ||
43 | self->offset); | ||
44 | return -1; | ||
45 | } | ||
46 | |||
47 | if (perf_header__read_build_ids(fd, self->offset, self->size)) { | ||
48 | pr_warning("Failed to read buildids!\n"); | ||
49 | return -1; | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int __cmd_buildid_list(void) | ||
56 | { | ||
57 | int err = -1; | ||
58 | struct perf_header *header; | ||
59 | struct perf_file_header f_header; | ||
60 | struct stat input_stat; | ||
61 | int input = open(input_name, O_RDONLY); | ||
62 | |||
63 | if (input < 0) { | ||
64 | pr_err("failed to open file: %s", input_name); | ||
65 | if (!strcmp(input_name, "perf.data")) | ||
66 | pr_err(" (try 'perf record' first)"); | ||
67 | pr_err("\n"); | ||
68 | goto out; | ||
69 | } | ||
70 | |||
71 | err = fstat(input, &input_stat); | ||
72 | if (err < 0) { | ||
73 | perror("failed to stat file"); | ||
74 | goto out_close; | ||
75 | } | ||
76 | |||
77 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | ||
78 | pr_err("file %s not owned by current user or root\n", | ||
79 | input_name); | ||
80 | goto out_close; | ||
81 | } | ||
82 | |||
83 | if (!input_stat.st_size) { | ||
84 | pr_info("zero-sized file, nothing to do!\n"); | ||
85 | goto out_close; | ||
86 | } | ||
87 | |||
88 | err = -1; | ||
89 | header = perf_header__new(); | ||
90 | if (header == NULL) | ||
91 | goto out_close; | ||
92 | |||
93 | if (perf_file_header__read(&f_header, header, input) < 0) { | ||
94 | pr_warning("incompatible file format"); | ||
95 | goto out_close; | ||
96 | } | ||
97 | |||
98 | err = perf_header__process_sections(header, input, | ||
99 | perf_file_section__process_buildids); | ||
100 | |||
101 | if (err < 0) | ||
102 | goto out_close; | ||
103 | |||
104 | dsos__fprintf_buildid(stdout); | ||
105 | out_close: | ||
106 | close(input); | ||
107 | out: | ||
108 | return err; | ||
109 | } | ||
110 | |||
111 | int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) | ||
112 | { | ||
113 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); | ||
114 | setup_pager(); | ||
115 | return __cmd_buildid_list(); | ||
116 | } | ||
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 4fb8734a796e..9f810b17c25c 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c | |||
@@ -61,8 +61,7 @@ static const char *get_man_viewer_info(const char *name) | |||
61 | { | 61 | { |
62 | struct man_viewer_info_list *viewer; | 62 | struct man_viewer_info_list *viewer; |
63 | 63 | ||
64 | for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) | 64 | for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) { |
65 | { | ||
66 | if (!strcasecmp(name, viewer->name)) | 65 | if (!strcasecmp(name, viewer->name)) |
67 | return viewer->info; | 66 | return viewer->info; |
68 | } | 67 | } |
@@ -115,7 +114,7 @@ static int check_emacsclient_version(void) | |||
115 | return 0; | 114 | return 0; |
116 | } | 115 | } |
117 | 116 | ||
118 | static void exec_woman_emacs(const char* path, const char *page) | 117 | static void exec_woman_emacs(const char *path, const char *page) |
119 | { | 118 | { |
120 | if (!check_emacsclient_version()) { | 119 | if (!check_emacsclient_version()) { |
121 | /* This works only with emacsclient version >= 22. */ | 120 | /* This works only with emacsclient version >= 22. */ |
@@ -129,7 +128,7 @@ static void exec_woman_emacs(const char* path, const char *page) | |||
129 | } | 128 | } |
130 | } | 129 | } |
131 | 130 | ||
132 | static void exec_man_konqueror(const char* path, const char *page) | 131 | static void exec_man_konqueror(const char *path, const char *page) |
133 | { | 132 | { |
134 | const char *display = getenv("DISPLAY"); | 133 | const char *display = getenv("DISPLAY"); |
135 | if (display && *display) { | 134 | if (display && *display) { |
@@ -157,7 +156,7 @@ static void exec_man_konqueror(const char* path, const char *page) | |||
157 | } | 156 | } |
158 | } | 157 | } |
159 | 158 | ||
160 | static void exec_man_man(const char* path, const char *page) | 159 | static void exec_man_man(const char *path, const char *page) |
161 | { | 160 | { |
162 | if (!path) | 161 | if (!path) |
163 | path = "man"; | 162 | path = "man"; |
@@ -180,7 +179,7 @@ static void add_man_viewer(const char *name) | |||
180 | 179 | ||
181 | while (*p) | 180 | while (*p) |
182 | p = &((*p)->next); | 181 | p = &((*p)->next); |
183 | *p = calloc(1, (sizeof(**p) + len + 1)); | 182 | *p = zalloc(sizeof(**p) + len + 1); |
184 | strncpy((*p)->name, name, len); | 183 | strncpy((*p)->name, name, len); |
185 | } | 184 | } |
186 | 185 | ||
@@ -195,7 +194,7 @@ static void do_add_man_viewer_info(const char *name, | |||
195 | size_t len, | 194 | size_t len, |
196 | const char *value) | 195 | const char *value) |
197 | { | 196 | { |
198 | struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1); | 197 | struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1); |
199 | 198 | ||
200 | strncpy(new->name, name, len); | 199 | strncpy(new->name, name, len); |
201 | new->info = strdup(value); | 200 | new->info = strdup(value); |
@@ -364,9 +363,8 @@ static void show_man_page(const char *perf_cmd) | |||
364 | 363 | ||
365 | setup_man_path(); | 364 | setup_man_path(); |
366 | for (viewer = man_viewer_list; viewer; viewer = viewer->next) | 365 | for (viewer = man_viewer_list; viewer; viewer = viewer->next) |
367 | { | ||
368 | exec_viewer(viewer->name, page); /* will return when unable */ | 366 | exec_viewer(viewer->name, page); /* will return when unable */ |
369 | } | 367 | |
370 | if (fallback) | 368 | if (fallback) |
371 | exec_viewer(fallback, page); | 369 | exec_viewer(fallback, page); |
372 | exec_viewer("man", page); | 370 | exec_viewer("man", page); |
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c new file mode 100644 index 000000000000..047fef74bd52 --- /dev/null +++ b/tools/perf/builtin-kmem.c | |||
@@ -0,0 +1,807 @@ | |||
1 | #include "builtin.h" | ||
2 | #include "perf.h" | ||
3 | |||
4 | #include "util/util.h" | ||
5 | #include "util/cache.h" | ||
6 | #include "util/symbol.h" | ||
7 | #include "util/thread.h" | ||
8 | #include "util/header.h" | ||
9 | |||
10 | #include "util/parse-options.h" | ||
11 | #include "util/trace-event.h" | ||
12 | |||
13 | #include "util/debug.h" | ||
14 | #include "util/data_map.h" | ||
15 | |||
16 | #include <linux/rbtree.h> | ||
17 | |||
18 | struct alloc_stat; | ||
19 | typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); | ||
20 | |||
21 | static char const *input_name = "perf.data"; | ||
22 | |||
23 | static struct perf_header *header; | ||
24 | static u64 sample_type; | ||
25 | |||
26 | static int alloc_flag; | ||
27 | static int caller_flag; | ||
28 | |||
29 | static int alloc_lines = -1; | ||
30 | static int caller_lines = -1; | ||
31 | |||
32 | static bool raw_ip; | ||
33 | |||
34 | static char default_sort_order[] = "frag,hit,bytes"; | ||
35 | |||
36 | static int *cpunode_map; | ||
37 | static int max_cpu_num; | ||
38 | |||
39 | struct alloc_stat { | ||
40 | u64 call_site; | ||
41 | u64 ptr; | ||
42 | u64 bytes_req; | ||
43 | u64 bytes_alloc; | ||
44 | u32 hit; | ||
45 | u32 pingpong; | ||
46 | |||
47 | short alloc_cpu; | ||
48 | |||
49 | struct rb_node node; | ||
50 | }; | ||
51 | |||
52 | static struct rb_root root_alloc_stat; | ||
53 | static struct rb_root root_alloc_sorted; | ||
54 | static struct rb_root root_caller_stat; | ||
55 | static struct rb_root root_caller_sorted; | ||
56 | |||
57 | static unsigned long total_requested, total_allocated; | ||
58 | static unsigned long nr_allocs, nr_cross_allocs; | ||
59 | |||
60 | struct raw_event_sample { | ||
61 | u32 size; | ||
62 | char data[0]; | ||
63 | }; | ||
64 | |||
65 | #define PATH_SYS_NODE "/sys/devices/system/node" | ||
66 | |||
67 | static void init_cpunode_map(void) | ||
68 | { | ||
69 | FILE *fp; | ||
70 | int i; | ||
71 | |||
72 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); | ||
73 | if (!fp) { | ||
74 | max_cpu_num = 4096; | ||
75 | return; | ||
76 | } | ||
77 | |||
78 | if (fscanf(fp, "%d", &max_cpu_num) < 1) | ||
79 | die("Failed to read 'kernel_max' from sysfs"); | ||
80 | max_cpu_num++; | ||
81 | |||
82 | cpunode_map = calloc(max_cpu_num, sizeof(int)); | ||
83 | if (!cpunode_map) | ||
84 | die("calloc"); | ||
85 | for (i = 0; i < max_cpu_num; i++) | ||
86 | cpunode_map[i] = -1; | ||
87 | fclose(fp); | ||
88 | } | ||
89 | |||
90 | static void setup_cpunode_map(void) | ||
91 | { | ||
92 | struct dirent *dent1, *dent2; | ||
93 | DIR *dir1, *dir2; | ||
94 | unsigned int cpu, mem; | ||
95 | char buf[PATH_MAX]; | ||
96 | |||
97 | init_cpunode_map(); | ||
98 | |||
99 | dir1 = opendir(PATH_SYS_NODE); | ||
100 | if (!dir1) | ||
101 | return; | ||
102 | |||
103 | while (true) { | ||
104 | dent1 = readdir(dir1); | ||
105 | if (!dent1) | ||
106 | break; | ||
107 | |||
108 | if (sscanf(dent1->d_name, "node%u", &mem) < 1) | ||
109 | continue; | ||
110 | |||
111 | snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); | ||
112 | dir2 = opendir(buf); | ||
113 | if (!dir2) | ||
114 | continue; | ||
115 | while (true) { | ||
116 | dent2 = readdir(dir2); | ||
117 | if (!dent2) | ||
118 | break; | ||
119 | if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1) | ||
120 | continue; | ||
121 | cpunode_map[cpu] = mem; | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | ||
127 | int bytes_req, int bytes_alloc, int cpu) | ||
128 | { | ||
129 | struct rb_node **node = &root_alloc_stat.rb_node; | ||
130 | struct rb_node *parent = NULL; | ||
131 | struct alloc_stat *data = NULL; | ||
132 | |||
133 | while (*node) { | ||
134 | parent = *node; | ||
135 | data = rb_entry(*node, struct alloc_stat, node); | ||
136 | |||
137 | if (ptr > data->ptr) | ||
138 | node = &(*node)->rb_right; | ||
139 | else if (ptr < data->ptr) | ||
140 | node = &(*node)->rb_left; | ||
141 | else | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | if (data && data->ptr == ptr) { | ||
146 | data->hit++; | ||
147 | data->bytes_req += bytes_req; | ||
148 | data->bytes_alloc += bytes_req; | ||
149 | } else { | ||
150 | data = malloc(sizeof(*data)); | ||
151 | if (!data) | ||
152 | die("malloc"); | ||
153 | data->ptr = ptr; | ||
154 | data->pingpong = 0; | ||
155 | data->hit = 1; | ||
156 | data->bytes_req = bytes_req; | ||
157 | data->bytes_alloc = bytes_alloc; | ||
158 | |||
159 | rb_link_node(&data->node, parent, node); | ||
160 | rb_insert_color(&data->node, &root_alloc_stat); | ||
161 | } | ||
162 | data->call_site = call_site; | ||
163 | data->alloc_cpu = cpu; | ||
164 | } | ||
165 | |||
166 | static void insert_caller_stat(unsigned long call_site, | ||
167 | int bytes_req, int bytes_alloc) | ||
168 | { | ||
169 | struct rb_node **node = &root_caller_stat.rb_node; | ||
170 | struct rb_node *parent = NULL; | ||
171 | struct alloc_stat *data = NULL; | ||
172 | |||
173 | while (*node) { | ||
174 | parent = *node; | ||
175 | data = rb_entry(*node, struct alloc_stat, node); | ||
176 | |||
177 | if (call_site > data->call_site) | ||
178 | node = &(*node)->rb_right; | ||
179 | else if (call_site < data->call_site) | ||
180 | node = &(*node)->rb_left; | ||
181 | else | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | if (data && data->call_site == call_site) { | ||
186 | data->hit++; | ||
187 | data->bytes_req += bytes_req; | ||
188 | data->bytes_alloc += bytes_req; | ||
189 | } else { | ||
190 | data = malloc(sizeof(*data)); | ||
191 | if (!data) | ||
192 | die("malloc"); | ||
193 | data->call_site = call_site; | ||
194 | data->pingpong = 0; | ||
195 | data->hit = 1; | ||
196 | data->bytes_req = bytes_req; | ||
197 | data->bytes_alloc = bytes_alloc; | ||
198 | |||
199 | rb_link_node(&data->node, parent, node); | ||
200 | rb_insert_color(&data->node, &root_caller_stat); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | static void process_alloc_event(struct raw_event_sample *raw, | ||
205 | struct event *event, | ||
206 | int cpu, | ||
207 | u64 timestamp __used, | ||
208 | struct thread *thread __used, | ||
209 | int node) | ||
210 | { | ||
211 | unsigned long call_site; | ||
212 | unsigned long ptr; | ||
213 | int bytes_req; | ||
214 | int bytes_alloc; | ||
215 | int node1, node2; | ||
216 | |||
217 | ptr = raw_field_value(event, "ptr", raw->data); | ||
218 | call_site = raw_field_value(event, "call_site", raw->data); | ||
219 | bytes_req = raw_field_value(event, "bytes_req", raw->data); | ||
220 | bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); | ||
221 | |||
222 | insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); | ||
223 | insert_caller_stat(call_site, bytes_req, bytes_alloc); | ||
224 | |||
225 | total_requested += bytes_req; | ||
226 | total_allocated += bytes_alloc; | ||
227 | |||
228 | if (node) { | ||
229 | node1 = cpunode_map[cpu]; | ||
230 | node2 = raw_field_value(event, "node", raw->data); | ||
231 | if (node1 != node2) | ||
232 | nr_cross_allocs++; | ||
233 | } | ||
234 | nr_allocs++; | ||
235 | } | ||
236 | |||
237 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); | ||
238 | static int callsite_cmp(struct alloc_stat *, struct alloc_stat *); | ||
239 | |||
240 | static struct alloc_stat *search_alloc_stat(unsigned long ptr, | ||
241 | unsigned long call_site, | ||
242 | struct rb_root *root, | ||
243 | sort_fn_t sort_fn) | ||
244 | { | ||
245 | struct rb_node *node = root->rb_node; | ||
246 | struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; | ||
247 | |||
248 | while (node) { | ||
249 | struct alloc_stat *data; | ||
250 | int cmp; | ||
251 | |||
252 | data = rb_entry(node, struct alloc_stat, node); | ||
253 | |||
254 | cmp = sort_fn(&key, data); | ||
255 | if (cmp < 0) | ||
256 | node = node->rb_left; | ||
257 | else if (cmp > 0) | ||
258 | node = node->rb_right; | ||
259 | else | ||
260 | return data; | ||
261 | } | ||
262 | return NULL; | ||
263 | } | ||
264 | |||
265 | static void process_free_event(struct raw_event_sample *raw, | ||
266 | struct event *event, | ||
267 | int cpu, | ||
268 | u64 timestamp __used, | ||
269 | struct thread *thread __used) | ||
270 | { | ||
271 | unsigned long ptr; | ||
272 | struct alloc_stat *s_alloc, *s_caller; | ||
273 | |||
274 | ptr = raw_field_value(event, "ptr", raw->data); | ||
275 | |||
276 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); | ||
277 | if (!s_alloc) | ||
278 | return; | ||
279 | |||
280 | if (cpu != s_alloc->alloc_cpu) { | ||
281 | s_alloc->pingpong++; | ||
282 | |||
283 | s_caller = search_alloc_stat(0, s_alloc->call_site, | ||
284 | &root_caller_stat, callsite_cmp); | ||
285 | assert(s_caller); | ||
286 | s_caller->pingpong++; | ||
287 | } | ||
288 | s_alloc->alloc_cpu = -1; | ||
289 | } | ||
290 | |||
291 | static void | ||
292 | process_raw_event(event_t *raw_event __used, void *more_data, | ||
293 | int cpu, u64 timestamp, struct thread *thread) | ||
294 | { | ||
295 | struct raw_event_sample *raw = more_data; | ||
296 | struct event *event; | ||
297 | int type; | ||
298 | |||
299 | type = trace_parse_common_type(raw->data); | ||
300 | event = trace_find_event(type); | ||
301 | |||
302 | if (!strcmp(event->name, "kmalloc") || | ||
303 | !strcmp(event->name, "kmem_cache_alloc")) { | ||
304 | process_alloc_event(raw, event, cpu, timestamp, thread, 0); | ||
305 | return; | ||
306 | } | ||
307 | |||
308 | if (!strcmp(event->name, "kmalloc_node") || | ||
309 | !strcmp(event->name, "kmem_cache_alloc_node")) { | ||
310 | process_alloc_event(raw, event, cpu, timestamp, thread, 1); | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | if (!strcmp(event->name, "kfree") || | ||
315 | !strcmp(event->name, "kmem_cache_free")) { | ||
316 | process_free_event(raw, event, cpu, timestamp, thread); | ||
317 | return; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | static int process_sample_event(event_t *event) | ||
322 | { | ||
323 | u64 ip = event->ip.ip; | ||
324 | u64 timestamp = -1; | ||
325 | u32 cpu = -1; | ||
326 | u64 period = 1; | ||
327 | void *more_data = event->ip.__more_data; | ||
328 | struct thread *thread = threads__findnew(event->ip.pid); | ||
329 | |||
330 | if (sample_type & PERF_SAMPLE_TIME) { | ||
331 | timestamp = *(u64 *)more_data; | ||
332 | more_data += sizeof(u64); | ||
333 | } | ||
334 | |||
335 | if (sample_type & PERF_SAMPLE_CPU) { | ||
336 | cpu = *(u32 *)more_data; | ||
337 | more_data += sizeof(u32); | ||
338 | more_data += sizeof(u32); /* reserved */ | ||
339 | } | ||
340 | |||
341 | if (sample_type & PERF_SAMPLE_PERIOD) { | ||
342 | period = *(u64 *)more_data; | ||
343 | more_data += sizeof(u64); | ||
344 | } | ||
345 | |||
346 | dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", | ||
347 | event->header.misc, | ||
348 | event->ip.pid, event->ip.tid, | ||
349 | (void *)(long)ip, | ||
350 | (long long)period); | ||
351 | |||
352 | if (thread == NULL) { | ||
353 | pr_debug("problem processing %d event, skipping it.\n", | ||
354 | event->header.type); | ||
355 | return -1; | ||
356 | } | ||
357 | |||
358 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
359 | |||
360 | process_raw_event(event, more_data, cpu, timestamp, thread); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static int sample_type_check(u64 type) | ||
366 | { | ||
367 | sample_type = type; | ||
368 | |||
369 | if (!(sample_type & PERF_SAMPLE_RAW)) { | ||
370 | fprintf(stderr, | ||
371 | "No trace sample to read. Did you call perf record " | ||
372 | "without -R?"); | ||
373 | return -1; | ||
374 | } | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static struct perf_file_handler file_handler = { | ||
380 | .process_sample_event = process_sample_event, | ||
381 | .process_comm_event = event__process_comm, | ||
382 | .sample_type_check = sample_type_check, | ||
383 | }; | ||
384 | |||
385 | static int read_events(void) | ||
386 | { | ||
387 | register_idle_thread(); | ||
388 | register_perf_file_handler(&file_handler); | ||
389 | |||
390 | return mmap_dispatch_perf_file(&header, input_name, 0, 0, | ||
391 | &event__cwdlen, &event__cwd); | ||
392 | } | ||
393 | |||
394 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) | ||
395 | { | ||
396 | if (n_alloc == 0) | ||
397 | return 0.0; | ||
398 | else | ||
399 | return 100.0 - (100.0 * n_req / n_alloc); | ||
400 | } | ||
401 | |||
402 | static void __print_result(struct rb_root *root, int n_lines, int is_caller) | ||
403 | { | ||
404 | struct rb_node *next; | ||
405 | |||
406 | printf("%.102s\n", graph_dotted_line); | ||
407 | printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); | ||
408 | printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); | ||
409 | printf("%.102s\n", graph_dotted_line); | ||
410 | |||
411 | next = rb_first(root); | ||
412 | |||
413 | while (next && n_lines--) { | ||
414 | struct alloc_stat *data = rb_entry(next, struct alloc_stat, | ||
415 | node); | ||
416 | struct symbol *sym = NULL; | ||
417 | char buf[BUFSIZ]; | ||
418 | u64 addr; | ||
419 | |||
420 | if (is_caller) { | ||
421 | addr = data->call_site; | ||
422 | if (!raw_ip) | ||
423 | sym = thread__find_function(kthread, addr, NULL); | ||
424 | } else | ||
425 | addr = data->ptr; | ||
426 | |||
427 | if (sym != NULL) | ||
428 | snprintf(buf, sizeof(buf), "%s+%Lx", sym->name, | ||
429 | addr - sym->start); | ||
430 | else | ||
431 | snprintf(buf, sizeof(buf), "%#Lx", addr); | ||
432 | printf(" %-34s |", buf); | ||
433 | |||
434 | printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n", | ||
435 | (unsigned long long)data->bytes_alloc, | ||
436 | (unsigned long)data->bytes_alloc / data->hit, | ||
437 | (unsigned long long)data->bytes_req, | ||
438 | (unsigned long)data->bytes_req / data->hit, | ||
439 | (unsigned long)data->hit, | ||
440 | (unsigned long)data->pingpong, | ||
441 | fragmentation(data->bytes_req, data->bytes_alloc)); | ||
442 | |||
443 | next = rb_next(next); | ||
444 | } | ||
445 | |||
446 | if (n_lines == -1) | ||
447 | printf(" ... | ... | ... | ... | ... | ... \n"); | ||
448 | |||
449 | printf("%.102s\n", graph_dotted_line); | ||
450 | } | ||
451 | |||
452 | static void print_summary(void) | ||
453 | { | ||
454 | printf("\nSUMMARY\n=======\n"); | ||
455 | printf("Total bytes requested: %lu\n", total_requested); | ||
456 | printf("Total bytes allocated: %lu\n", total_allocated); | ||
457 | printf("Total bytes wasted on internal fragmentation: %lu\n", | ||
458 | total_allocated - total_requested); | ||
459 | printf("Internal fragmentation: %f%%\n", | ||
460 | fragmentation(total_requested, total_allocated)); | ||
461 | printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); | ||
462 | } | ||
463 | |||
464 | static void print_result(void) | ||
465 | { | ||
466 | if (caller_flag) | ||
467 | __print_result(&root_caller_sorted, caller_lines, 1); | ||
468 | if (alloc_flag) | ||
469 | __print_result(&root_alloc_sorted, alloc_lines, 0); | ||
470 | print_summary(); | ||
471 | } | ||
472 | |||
473 | struct sort_dimension { | ||
474 | const char name[20]; | ||
475 | sort_fn_t cmp; | ||
476 | struct list_head list; | ||
477 | }; | ||
478 | |||
479 | static LIST_HEAD(caller_sort); | ||
480 | static LIST_HEAD(alloc_sort); | ||
481 | |||
482 | static void sort_insert(struct rb_root *root, struct alloc_stat *data, | ||
483 | struct list_head *sort_list) | ||
484 | { | ||
485 | struct rb_node **new = &(root->rb_node); | ||
486 | struct rb_node *parent = NULL; | ||
487 | struct sort_dimension *sort; | ||
488 | |||
489 | while (*new) { | ||
490 | struct alloc_stat *this; | ||
491 | int cmp = 0; | ||
492 | |||
493 | this = rb_entry(*new, struct alloc_stat, node); | ||
494 | parent = *new; | ||
495 | |||
496 | list_for_each_entry(sort, sort_list, list) { | ||
497 | cmp = sort->cmp(data, this); | ||
498 | if (cmp) | ||
499 | break; | ||
500 | } | ||
501 | |||
502 | if (cmp > 0) | ||
503 | new = &((*new)->rb_left); | ||
504 | else | ||
505 | new = &((*new)->rb_right); | ||
506 | } | ||
507 | |||
508 | rb_link_node(&data->node, parent, new); | ||
509 | rb_insert_color(&data->node, root); | ||
510 | } | ||
511 | |||
512 | static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, | ||
513 | struct list_head *sort_list) | ||
514 | { | ||
515 | struct rb_node *node; | ||
516 | struct alloc_stat *data; | ||
517 | |||
518 | for (;;) { | ||
519 | node = rb_first(root); | ||
520 | if (!node) | ||
521 | break; | ||
522 | |||
523 | rb_erase(node, root); | ||
524 | data = rb_entry(node, struct alloc_stat, node); | ||
525 | sort_insert(root_sorted, data, sort_list); | ||
526 | } | ||
527 | } | ||
528 | |||
529 | static void sort_result(void) | ||
530 | { | ||
531 | __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort); | ||
532 | __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); | ||
533 | } | ||
534 | |||
535 | static int __cmd_kmem(void) | ||
536 | { | ||
537 | setup_pager(); | ||
538 | read_events(); | ||
539 | sort_result(); | ||
540 | print_result(); | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static const char * const kmem_usage[] = { | ||
546 | "perf kmem [<options>] {record}", | ||
547 | NULL | ||
548 | }; | ||
549 | |||
550 | static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
551 | { | ||
552 | if (l->ptr < r->ptr) | ||
553 | return -1; | ||
554 | else if (l->ptr > r->ptr) | ||
555 | return 1; | ||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static struct sort_dimension ptr_sort_dimension = { | ||
560 | .name = "ptr", | ||
561 | .cmp = ptr_cmp, | ||
562 | }; | ||
563 | |||
564 | static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
565 | { | ||
566 | if (l->call_site < r->call_site) | ||
567 | return -1; | ||
568 | else if (l->call_site > r->call_site) | ||
569 | return 1; | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static struct sort_dimension callsite_sort_dimension = { | ||
574 | .name = "callsite", | ||
575 | .cmp = callsite_cmp, | ||
576 | }; | ||
577 | |||
578 | static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
579 | { | ||
580 | if (l->hit < r->hit) | ||
581 | return -1; | ||
582 | else if (l->hit > r->hit) | ||
583 | return 1; | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static struct sort_dimension hit_sort_dimension = { | ||
588 | .name = "hit", | ||
589 | .cmp = hit_cmp, | ||
590 | }; | ||
591 | |||
592 | static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
593 | { | ||
594 | if (l->bytes_alloc < r->bytes_alloc) | ||
595 | return -1; | ||
596 | else if (l->bytes_alloc > r->bytes_alloc) | ||
597 | return 1; | ||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static struct sort_dimension bytes_sort_dimension = { | ||
602 | .name = "bytes", | ||
603 | .cmp = bytes_cmp, | ||
604 | }; | ||
605 | |||
606 | static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
607 | { | ||
608 | double x, y; | ||
609 | |||
610 | x = fragmentation(l->bytes_req, l->bytes_alloc); | ||
611 | y = fragmentation(r->bytes_req, r->bytes_alloc); | ||
612 | |||
613 | if (x < y) | ||
614 | return -1; | ||
615 | else if (x > y) | ||
616 | return 1; | ||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | static struct sort_dimension frag_sort_dimension = { | ||
621 | .name = "frag", | ||
622 | .cmp = frag_cmp, | ||
623 | }; | ||
624 | |||
625 | static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
626 | { | ||
627 | if (l->pingpong < r->pingpong) | ||
628 | return -1; | ||
629 | else if (l->pingpong > r->pingpong) | ||
630 | return 1; | ||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | static struct sort_dimension pingpong_sort_dimension = { | ||
635 | .name = "pingpong", | ||
636 | .cmp = pingpong_cmp, | ||
637 | }; | ||
638 | |||
639 | static struct sort_dimension *avail_sorts[] = { | ||
640 | &ptr_sort_dimension, | ||
641 | &callsite_sort_dimension, | ||
642 | &hit_sort_dimension, | ||
643 | &bytes_sort_dimension, | ||
644 | &frag_sort_dimension, | ||
645 | &pingpong_sort_dimension, | ||
646 | }; | ||
647 | |||
648 | #define NUM_AVAIL_SORTS \ | ||
649 | (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) | ||
650 | |||
651 | static int sort_dimension__add(const char *tok, struct list_head *list) | ||
652 | { | ||
653 | struct sort_dimension *sort; | ||
654 | int i; | ||
655 | |||
656 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { | ||
657 | if (!strcmp(avail_sorts[i]->name, tok)) { | ||
658 | sort = malloc(sizeof(*sort)); | ||
659 | if (!sort) | ||
660 | die("malloc"); | ||
661 | memcpy(sort, avail_sorts[i], sizeof(*sort)); | ||
662 | list_add_tail(&sort->list, list); | ||
663 | return 0; | ||
664 | } | ||
665 | } | ||
666 | |||
667 | return -1; | ||
668 | } | ||
669 | |||
670 | static int setup_sorting(struct list_head *sort_list, const char *arg) | ||
671 | { | ||
672 | char *tok; | ||
673 | char *str = strdup(arg); | ||
674 | |||
675 | if (!str) | ||
676 | die("strdup"); | ||
677 | |||
678 | while (true) { | ||
679 | tok = strsep(&str, ","); | ||
680 | if (!tok) | ||
681 | break; | ||
682 | if (sort_dimension__add(tok, sort_list) < 0) { | ||
683 | error("Unknown --sort key: '%s'", tok); | ||
684 | return -1; | ||
685 | } | ||
686 | } | ||
687 | |||
688 | free(str); | ||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static int parse_sort_opt(const struct option *opt __used, | ||
693 | const char *arg, int unset __used) | ||
694 | { | ||
695 | if (!arg) | ||
696 | return -1; | ||
697 | |||
698 | if (caller_flag > alloc_flag) | ||
699 | return setup_sorting(&caller_sort, arg); | ||
700 | else | ||
701 | return setup_sorting(&alloc_sort, arg); | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | static int parse_stat_opt(const struct option *opt __used, | ||
707 | const char *arg, int unset __used) | ||
708 | { | ||
709 | if (!arg) | ||
710 | return -1; | ||
711 | |||
712 | if (strcmp(arg, "alloc") == 0) | ||
713 | alloc_flag = (caller_flag + 1); | ||
714 | else if (strcmp(arg, "caller") == 0) | ||
715 | caller_flag = (alloc_flag + 1); | ||
716 | else | ||
717 | return -1; | ||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static int parse_line_opt(const struct option *opt __used, | ||
722 | const char *arg, int unset __used) | ||
723 | { | ||
724 | int lines; | ||
725 | |||
726 | if (!arg) | ||
727 | return -1; | ||
728 | |||
729 | lines = strtoul(arg, NULL, 10); | ||
730 | |||
731 | if (caller_flag > alloc_flag) | ||
732 | caller_lines = lines; | ||
733 | else | ||
734 | alloc_lines = lines; | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static const struct option kmem_options[] = { | ||
740 | OPT_STRING('i', "input", &input_name, "file", | ||
741 | "input file name"), | ||
742 | OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>", | ||
743 | "stat selector, Pass 'alloc' or 'caller'.", | ||
744 | parse_stat_opt), | ||
745 | OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", | ||
746 | "sort by keys: ptr, call_site, bytes, hit, pingpong, frag", | ||
747 | parse_sort_opt), | ||
748 | OPT_CALLBACK('l', "line", NULL, "num", | ||
749 | "show n lins", | ||
750 | parse_line_opt), | ||
751 | OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), | ||
752 | OPT_END() | ||
753 | }; | ||
754 | |||
755 | static const char *record_args[] = { | ||
756 | "record", | ||
757 | "-a", | ||
758 | "-R", | ||
759 | "-M", | ||
760 | "-f", | ||
761 | "-c", "1", | ||
762 | "-e", "kmem:kmalloc", | ||
763 | "-e", "kmem:kmalloc_node", | ||
764 | "-e", "kmem:kfree", | ||
765 | "-e", "kmem:kmem_cache_alloc", | ||
766 | "-e", "kmem:kmem_cache_alloc_node", | ||
767 | "-e", "kmem:kmem_cache_free", | ||
768 | }; | ||
769 | |||
770 | static int __cmd_record(int argc, const char **argv) | ||
771 | { | ||
772 | unsigned int rec_argc, i, j; | ||
773 | const char **rec_argv; | ||
774 | |||
775 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | ||
776 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | ||
777 | |||
778 | for (i = 0; i < ARRAY_SIZE(record_args); i++) | ||
779 | rec_argv[i] = strdup(record_args[i]); | ||
780 | |||
781 | for (j = 1; j < (unsigned int)argc; j++, i++) | ||
782 | rec_argv[i] = argv[j]; | ||
783 | |||
784 | return cmd_record(i, rec_argv, NULL); | ||
785 | } | ||
786 | |||
787 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) | ||
788 | { | ||
789 | symbol__init(0); | ||
790 | |||
791 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); | ||
792 | |||
793 | if (argc && !strncmp(argv[0], "rec", 3)) | ||
794 | return __cmd_record(argc, argv); | ||
795 | else if (argc) | ||
796 | usage_with_options(kmem_usage, kmem_options); | ||
797 | |||
798 | if (list_empty(&caller_sort)) | ||
799 | setup_sorting(&caller_sort, default_sort_order); | ||
800 | if (list_empty(&alloc_sort)) | ||
801 | setup_sorting(&alloc_sort, default_sort_order); | ||
802 | |||
803 | setup_cpunode_map(); | ||
804 | |||
805 | return __cmd_kmem(); | ||
806 | } | ||
807 | |||
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c new file mode 100644 index 000000000000..a58e11b7ea80 --- /dev/null +++ b/tools/perf/builtin-probe.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * builtin-probe.c | ||
3 | * | ||
4 | * Builtin probe command: Set up probe events by C expression | ||
5 | * | ||
6 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | */ | ||
23 | #define _GNU_SOURCE | ||
24 | #include <sys/utsname.h> | ||
25 | #include <sys/types.h> | ||
26 | #include <sys/stat.h> | ||
27 | #include <fcntl.h> | ||
28 | #include <errno.h> | ||
29 | #include <stdio.h> | ||
30 | #include <unistd.h> | ||
31 | #include <stdlib.h> | ||
32 | #include <string.h> | ||
33 | |||
34 | #undef _GNU_SOURCE | ||
35 | #include "perf.h" | ||
36 | #include "builtin.h" | ||
37 | #include "util/util.h" | ||
38 | #include "util/event.h" | ||
39 | #include "util/debug.h" | ||
40 | #include "util/parse-options.h" | ||
41 | #include "util/parse-events.h" /* For debugfs_path */ | ||
42 | #include "util/probe-finder.h" | ||
43 | #include "util/probe-event.h" | ||
44 | |||
45 | /* Default vmlinux search paths */ | ||
46 | #define NR_SEARCH_PATH 3 | ||
47 | const char *default_search_path[NR_SEARCH_PATH] = { | ||
48 | "/lib/modules/%s/build/vmlinux", /* Custom build kernel */ | ||
49 | "/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */ | ||
50 | "/boot/vmlinux-debug-%s", /* Ubuntu */ | ||
51 | }; | ||
52 | |||
53 | #define MAX_PATH_LEN 256 | ||
54 | #define MAX_PROBES 128 | ||
55 | |||
56 | /* Session management structure */ | ||
57 | static struct { | ||
58 | char *vmlinux; | ||
59 | char *release; | ||
60 | int need_dwarf; | ||
61 | int nr_probe; | ||
62 | struct probe_point probes[MAX_PROBES]; | ||
63 | } session; | ||
64 | |||
65 | static bool listing; | ||
66 | |||
67 | /* Parse an event definition. Note that any error must die. */ | ||
68 | static void parse_probe_event(const char *str) | ||
69 | { | ||
70 | struct probe_point *pp = &session.probes[session.nr_probe]; | ||
71 | |||
72 | pr_debug("probe-definition(%d): %s\n", session.nr_probe, str); | ||
73 | if (++session.nr_probe == MAX_PROBES) | ||
74 | die("Too many probes (> %d) are specified.", MAX_PROBES); | ||
75 | |||
76 | /* Parse perf-probe event into probe_point */ | ||
77 | session.need_dwarf = parse_perf_probe_event(str, pp); | ||
78 | |||
79 | pr_debug("%d arguments\n", pp->nr_args); | ||
80 | } | ||
81 | |||
82 | static int opt_add_probe_event(const struct option *opt __used, | ||
83 | const char *str, int unset __used) | ||
84 | { | ||
85 | if (str) | ||
86 | parse_probe_event(str); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | #ifndef NO_LIBDWARF | ||
91 | static int open_default_vmlinux(void) | ||
92 | { | ||
93 | struct utsname uts; | ||
94 | char fname[MAX_PATH_LEN]; | ||
95 | int fd, ret, i; | ||
96 | |||
97 | ret = uname(&uts); | ||
98 | if (ret) { | ||
99 | pr_debug("uname() failed.\n"); | ||
100 | return -errno; | ||
101 | } | ||
102 | session.release = uts.release; | ||
103 | for (i = 0; i < NR_SEARCH_PATH; i++) { | ||
104 | ret = snprintf(fname, MAX_PATH_LEN, | ||
105 | default_search_path[i], session.release); | ||
106 | if (ret >= MAX_PATH_LEN || ret < 0) { | ||
107 | pr_debug("Filename(%d,%s) is too long.\n", i, | ||
108 | uts.release); | ||
109 | errno = E2BIG; | ||
110 | return -E2BIG; | ||
111 | } | ||
112 | pr_debug("try to open %s\n", fname); | ||
113 | fd = open(fname, O_RDONLY); | ||
114 | if (fd >= 0) | ||
115 | break; | ||
116 | } | ||
117 | return fd; | ||
118 | } | ||
119 | #endif | ||
120 | |||
121 | static const char * const probe_usage[] = { | ||
122 | "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]", | ||
123 | "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", | ||
124 | "perf probe --list", | ||
125 | NULL | ||
126 | }; | ||
127 | |||
128 | static const struct option options[] = { | ||
129 | OPT_BOOLEAN('v', "verbose", &verbose, | ||
130 | "be more verbose (show parsed arguments, etc)"), | ||
131 | #ifndef NO_LIBDWARF | ||
132 | OPT_STRING('k', "vmlinux", &session.vmlinux, "file", | ||
133 | "vmlinux/module pathname"), | ||
134 | #endif | ||
135 | OPT_BOOLEAN('l', "list", &listing, "list up current probes"), | ||
136 | OPT_CALLBACK('a', "add", NULL, | ||
137 | #ifdef NO_LIBDWARF | ||
138 | "FUNC[+OFFS|%return] [ARG ...]", | ||
139 | #else | ||
140 | "FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", | ||
141 | #endif | ||
142 | "probe point definition, where\n" | ||
143 | "\t\tGRP:\tGroup name (optional)\n" | ||
144 | "\t\tNAME:\tEvent name\n" | ||
145 | "\t\tFUNC:\tFunction name\n" | ||
146 | "\t\tOFFS:\tOffset from function entry (in byte)\n" | ||
147 | "\t\t%return:\tPut the probe at function return\n" | ||
148 | #ifdef NO_LIBDWARF | ||
149 | "\t\tARG:\tProbe argument (only \n" | ||
150 | #else | ||
151 | "\t\tSRC:\tSource code path\n" | ||
152 | "\t\tRLN:\tRelative line number from function entry.\n" | ||
153 | "\t\tALN:\tAbsolute line number in file.\n" | ||
154 | "\t\tARG:\tProbe argument (local variable name or\n" | ||
155 | #endif | ||
156 | "\t\t\tkprobe-tracer argument format.)\n", | ||
157 | opt_add_probe_event), | ||
158 | OPT_END() | ||
159 | }; | ||
160 | |||
161 | int cmd_probe(int argc, const char **argv, const char *prefix __used) | ||
162 | { | ||
163 | int i, j, ret; | ||
164 | #ifndef NO_LIBDWARF | ||
165 | int fd; | ||
166 | #endif | ||
167 | struct probe_point *pp; | ||
168 | |||
169 | argc = parse_options(argc, argv, options, probe_usage, | ||
170 | PARSE_OPT_STOP_AT_NON_OPTION); | ||
171 | for (i = 0; i < argc; i++) | ||
172 | parse_probe_event(argv[i]); | ||
173 | |||
174 | if ((session.nr_probe == 0 && !listing) || | ||
175 | (session.nr_probe != 0 && listing)) | ||
176 | usage_with_options(probe_usage, options); | ||
177 | |||
178 | if (listing) { | ||
179 | show_perf_probe_events(); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | if (session.need_dwarf) | ||
184 | #ifdef NO_LIBDWARF | ||
185 | die("Debuginfo-analysis is not supported"); | ||
186 | #else /* !NO_LIBDWARF */ | ||
187 | pr_debug("Some probes require debuginfo.\n"); | ||
188 | |||
189 | if (session.vmlinux) | ||
190 | fd = open(session.vmlinux, O_RDONLY); | ||
191 | else | ||
192 | fd = open_default_vmlinux(); | ||
193 | if (fd < 0) { | ||
194 | if (session.need_dwarf) | ||
195 | die("Could not open vmlinux/module file."); | ||
196 | |||
197 | pr_warning("Could not open vmlinux/module file." | ||
198 | " Try to use symbols.\n"); | ||
199 | goto end_dwarf; | ||
200 | } | ||
201 | |||
202 | /* Searching probe points */ | ||
203 | for (j = 0; j < session.nr_probe; j++) { | ||
204 | pp = &session.probes[j]; | ||
205 | if (pp->found) | ||
206 | continue; | ||
207 | |||
208 | lseek(fd, SEEK_SET, 0); | ||
209 | ret = find_probepoint(fd, pp); | ||
210 | if (ret < 0) { | ||
211 | if (session.need_dwarf) | ||
212 | die("Could not analyze debuginfo."); | ||
213 | |||
214 | pr_warning("An error occurred in debuginfo analysis. Try to use symbols.\n"); | ||
215 | break; | ||
216 | } | ||
217 | if (ret == 0) /* No error but failed to find probe point. */ | ||
218 | die("No probe point found."); | ||
219 | } | ||
220 | close(fd); | ||
221 | |||
222 | end_dwarf: | ||
223 | #endif /* !NO_LIBDWARF */ | ||
224 | |||
225 | /* Synthesize probes without dwarf */ | ||
226 | for (j = 0; j < session.nr_probe; j++) { | ||
227 | pp = &session.probes[j]; | ||
228 | if (pp->found) /* This probe is already found. */ | ||
229 | continue; | ||
230 | |||
231 | ret = synthesize_trace_kprobe_event(pp); | ||
232 | if (ret == -E2BIG) | ||
233 | die("probe point definition becomes too long."); | ||
234 | else if (ret < 0) | ||
235 | die("Failed to synthesize a probe point."); | ||
236 | } | ||
237 | |||
238 | /* Settng up probe points */ | ||
239 | add_trace_kprobe_events(session.probes, session.nr_probe); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a4be453fc8a9..0e519c667e3a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -17,55 +17,52 @@ | |||
17 | #include "util/header.h" | 17 | #include "util/header.h" |
18 | #include "util/event.h" | 18 | #include "util/event.h" |
19 | #include "util/debug.h" | 19 | #include "util/debug.h" |
20 | #include "util/trace-event.h" | 20 | #include "util/symbol.h" |
21 | 21 | ||
22 | #include <unistd.h> | 22 | #include <unistd.h> |
23 | #include <sched.h> | 23 | #include <sched.h> |
24 | 24 | ||
25 | #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) | ||
26 | #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) | ||
27 | |||
28 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | 25 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
29 | 26 | ||
30 | static long default_interval = 100000; | 27 | static long default_interval = 0; |
31 | 28 | ||
32 | static int nr_cpus = 0; | 29 | static int nr_cpus = 0; |
33 | static unsigned int page_size; | 30 | static unsigned int page_size; |
34 | static unsigned int mmap_pages = 128; | 31 | static unsigned int mmap_pages = 128; |
35 | static int freq = 0; | 32 | static int freq = 1000; |
36 | static int output; | 33 | static int output; |
37 | static const char *output_name = "perf.data"; | 34 | static const char *output_name = "perf.data"; |
38 | static int group = 0; | 35 | static int group = 0; |
39 | static unsigned int realtime_prio = 0; | 36 | static unsigned int realtime_prio = 0; |
40 | static int raw_samples = 0; | 37 | static int raw_samples = 0; |
41 | static int system_wide = 0; | 38 | static int system_wide = 0; |
42 | static int profile_cpu = -1; | 39 | static int profile_cpu = -1; |
43 | static pid_t target_pid = -1; | 40 | static pid_t target_pid = -1; |
44 | static pid_t child_pid = -1; | 41 | static pid_t child_pid = -1; |
45 | static int inherit = 1; | 42 | static int inherit = 1; |
46 | static int force = 0; | 43 | static int force = 0; |
47 | static int append_file = 0; | 44 | static int append_file = 0; |
48 | static int call_graph = 0; | 45 | static int call_graph = 0; |
49 | static int inherit_stat = 0; | 46 | static int inherit_stat = 0; |
50 | static int no_samples = 0; | 47 | static int no_samples = 0; |
51 | static int sample_address = 0; | 48 | static int sample_address = 0; |
52 | static int multiplex = 0; | 49 | static int multiplex = 0; |
53 | static int multiplex_fd = -1; | 50 | static int multiplex_fd = -1; |
54 | 51 | ||
55 | static long samples; | 52 | static long samples = 0; |
56 | static struct timeval last_read; | 53 | static struct timeval last_read; |
57 | static struct timeval this_read; | 54 | static struct timeval this_read; |
58 | 55 | ||
59 | static u64 bytes_written; | 56 | static u64 bytes_written = 0; |
60 | 57 | ||
61 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; | 58 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; |
62 | 59 | ||
63 | static int nr_poll; | 60 | static int nr_poll = 0; |
64 | static int nr_cpu; | 61 | static int nr_cpu = 0; |
65 | 62 | ||
66 | static int file_new = 1; | 63 | static int file_new = 1; |
67 | 64 | ||
68 | struct perf_header *header; | 65 | struct perf_header *header = NULL; |
69 | 66 | ||
70 | struct mmap_data { | 67 | struct mmap_data { |
71 | int counter; | 68 | int counter; |
@@ -113,6 +110,24 @@ static void write_output(void *buf, size_t size) | |||
113 | } | 110 | } |
114 | } | 111 | } |
115 | 112 | ||
113 | static void write_event(event_t *buf, size_t size) | ||
114 | { | ||
115 | /* | ||
116 | * Add it to the list of DSOs, so that when we finish this | ||
117 | * record session we can pick the available build-ids. | ||
118 | */ | ||
119 | if (buf->header.type == PERF_RECORD_MMAP) | ||
120 | dsos__findnew(buf->mmap.filename); | ||
121 | |||
122 | write_output(buf, size); | ||
123 | } | ||
124 | |||
125 | static int process_synthesized_event(event_t *event) | ||
126 | { | ||
127 | write_event(event, event->header.size); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
116 | static void mmap_read(struct mmap_data *md) | 131 | static void mmap_read(struct mmap_data *md) |
117 | { | 132 | { |
118 | unsigned int head = mmap_read_head(md); | 133 | unsigned int head = mmap_read_head(md); |
@@ -161,14 +176,14 @@ static void mmap_read(struct mmap_data *md) | |||
161 | size = md->mask + 1 - (old & md->mask); | 176 | size = md->mask + 1 - (old & md->mask); |
162 | old += size; | 177 | old += size; |
163 | 178 | ||
164 | write_output(buf, size); | 179 | write_event(buf, size); |
165 | } | 180 | } |
166 | 181 | ||
167 | buf = &data[old & md->mask]; | 182 | buf = &data[old & md->mask]; |
168 | size = head - old; | 183 | size = head - old; |
169 | old += size; | 184 | old += size; |
170 | 185 | ||
171 | write_output(buf, size); | 186 | write_event(buf, size); |
172 | 187 | ||
173 | md->prev = old; | 188 | md->prev = old; |
174 | mmap_write_tail(md, old); | 189 | mmap_write_tail(md, old); |
@@ -195,168 +210,6 @@ static void sig_atexit(void) | |||
195 | kill(getpid(), signr); | 210 | kill(getpid(), signr); |
196 | } | 211 | } |
197 | 212 | ||
198 | static pid_t pid_synthesize_comm_event(pid_t pid, int full) | ||
199 | { | ||
200 | struct comm_event comm_ev; | ||
201 | char filename[PATH_MAX]; | ||
202 | char bf[BUFSIZ]; | ||
203 | FILE *fp; | ||
204 | size_t size = 0; | ||
205 | DIR *tasks; | ||
206 | struct dirent dirent, *next; | ||
207 | pid_t tgid = 0; | ||
208 | |||
209 | snprintf(filename, sizeof(filename), "/proc/%d/status", pid); | ||
210 | |||
211 | fp = fopen(filename, "r"); | ||
212 | if (fp == NULL) { | ||
213 | /* | ||
214 | * We raced with a task exiting - just return: | ||
215 | */ | ||
216 | if (verbose) | ||
217 | fprintf(stderr, "couldn't open %s\n", filename); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | memset(&comm_ev, 0, sizeof(comm_ev)); | ||
222 | while (!comm_ev.comm[0] || !comm_ev.pid) { | ||
223 | if (fgets(bf, sizeof(bf), fp) == NULL) | ||
224 | goto out_failure; | ||
225 | |||
226 | if (memcmp(bf, "Name:", 5) == 0) { | ||
227 | char *name = bf + 5; | ||
228 | while (*name && isspace(*name)) | ||
229 | ++name; | ||
230 | size = strlen(name) - 1; | ||
231 | memcpy(comm_ev.comm, name, size++); | ||
232 | } else if (memcmp(bf, "Tgid:", 5) == 0) { | ||
233 | char *tgids = bf + 5; | ||
234 | while (*tgids && isspace(*tgids)) | ||
235 | ++tgids; | ||
236 | tgid = comm_ev.pid = atoi(tgids); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | comm_ev.header.type = PERF_RECORD_COMM; | ||
241 | size = ALIGN(size, sizeof(u64)); | ||
242 | comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); | ||
243 | |||
244 | if (!full) { | ||
245 | comm_ev.tid = pid; | ||
246 | |||
247 | write_output(&comm_ev, comm_ev.header.size); | ||
248 | goto out_fclose; | ||
249 | } | ||
250 | |||
251 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); | ||
252 | |||
253 | tasks = opendir(filename); | ||
254 | while (!readdir_r(tasks, &dirent, &next) && next) { | ||
255 | char *end; | ||
256 | pid = strtol(dirent.d_name, &end, 10); | ||
257 | if (*end) | ||
258 | continue; | ||
259 | |||
260 | comm_ev.tid = pid; | ||
261 | |||
262 | write_output(&comm_ev, comm_ev.header.size); | ||
263 | } | ||
264 | closedir(tasks); | ||
265 | |||
266 | out_fclose: | ||
267 | fclose(fp); | ||
268 | return tgid; | ||
269 | |||
270 | out_failure: | ||
271 | fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", | ||
272 | filename); | ||
273 | exit(EXIT_FAILURE); | ||
274 | } | ||
275 | |||
276 | static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid) | ||
277 | { | ||
278 | char filename[PATH_MAX]; | ||
279 | FILE *fp; | ||
280 | |||
281 | snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); | ||
282 | |||
283 | fp = fopen(filename, "r"); | ||
284 | if (fp == NULL) { | ||
285 | /* | ||
286 | * We raced with a task exiting - just return: | ||
287 | */ | ||
288 | if (verbose) | ||
289 | fprintf(stderr, "couldn't open %s\n", filename); | ||
290 | return; | ||
291 | } | ||
292 | while (1) { | ||
293 | char bf[BUFSIZ], *pbf = bf; | ||
294 | struct mmap_event mmap_ev = { | ||
295 | .header = { .type = PERF_RECORD_MMAP }, | ||
296 | }; | ||
297 | int n; | ||
298 | size_t size; | ||
299 | if (fgets(bf, sizeof(bf), fp) == NULL) | ||
300 | break; | ||
301 | |||
302 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ | ||
303 | n = hex2u64(pbf, &mmap_ev.start); | ||
304 | if (n < 0) | ||
305 | continue; | ||
306 | pbf += n + 1; | ||
307 | n = hex2u64(pbf, &mmap_ev.len); | ||
308 | if (n < 0) | ||
309 | continue; | ||
310 | pbf += n + 3; | ||
311 | if (*pbf == 'x') { /* vm_exec */ | ||
312 | char *execname = strchr(bf, '/'); | ||
313 | |||
314 | /* Catch VDSO */ | ||
315 | if (execname == NULL) | ||
316 | execname = strstr(bf, "[vdso]"); | ||
317 | |||
318 | if (execname == NULL) | ||
319 | continue; | ||
320 | |||
321 | size = strlen(execname); | ||
322 | execname[size - 1] = '\0'; /* Remove \n */ | ||
323 | memcpy(mmap_ev.filename, execname, size); | ||
324 | size = ALIGN(size, sizeof(u64)); | ||
325 | mmap_ev.len -= mmap_ev.start; | ||
326 | mmap_ev.header.size = (sizeof(mmap_ev) - | ||
327 | (sizeof(mmap_ev.filename) - size)); | ||
328 | mmap_ev.pid = tgid; | ||
329 | mmap_ev.tid = pid; | ||
330 | |||
331 | write_output(&mmap_ev, mmap_ev.header.size); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | fclose(fp); | ||
336 | } | ||
337 | |||
338 | static void synthesize_all(void) | ||
339 | { | ||
340 | DIR *proc; | ||
341 | struct dirent dirent, *next; | ||
342 | |||
343 | proc = opendir("/proc"); | ||
344 | |||
345 | while (!readdir_r(proc, &dirent, &next) && next) { | ||
346 | char *end; | ||
347 | pid_t pid, tgid; | ||
348 | |||
349 | pid = strtol(dirent.d_name, &end, 10); | ||
350 | if (*end) /* only interested in proper numerical dirents */ | ||
351 | continue; | ||
352 | |||
353 | tgid = pid_synthesize_comm_event(pid, 1); | ||
354 | pid_synthesize_mmap_samples(pid, tgid); | ||
355 | } | ||
356 | |||
357 | closedir(proc); | ||
358 | } | ||
359 | |||
360 | static int group_fd; | 213 | static int group_fd; |
361 | 214 | ||
362 | static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) | 215 | static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) |
@@ -367,7 +220,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n | |||
367 | h_attr = header->attr[nr]; | 220 | h_attr = header->attr[nr]; |
368 | } else { | 221 | } else { |
369 | h_attr = perf_header_attr__new(a); | 222 | h_attr = perf_header_attr__new(a); |
370 | perf_header__add_attr(header, h_attr); | 223 | if (h_attr != NULL) |
224 | if (perf_header__add_attr(header, h_attr) < 0) { | ||
225 | perf_header_attr__delete(h_attr); | ||
226 | h_attr = NULL; | ||
227 | } | ||
371 | } | 228 | } |
372 | 229 | ||
373 | return h_attr; | 230 | return h_attr; |
@@ -375,9 +232,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n | |||
375 | 232 | ||
376 | static void create_counter(int counter, int cpu, pid_t pid) | 233 | static void create_counter(int counter, int cpu, pid_t pid) |
377 | { | 234 | { |
235 | char *filter = filters[counter]; | ||
378 | struct perf_event_attr *attr = attrs + counter; | 236 | struct perf_event_attr *attr = attrs + counter; |
379 | struct perf_header_attr *h_attr; | 237 | struct perf_header_attr *h_attr; |
380 | int track = !counter; /* only the first counter needs these */ | 238 | int track = !counter; /* only the first counter needs these */ |
239 | int ret; | ||
381 | struct { | 240 | struct { |
382 | u64 count; | 241 | u64 count; |
383 | u64 time_enabled; | 242 | u64 time_enabled; |
@@ -448,11 +307,19 @@ try_again: | |||
448 | printf("\n"); | 307 | printf("\n"); |
449 | error("perfcounter syscall returned with %d (%s)\n", | 308 | error("perfcounter syscall returned with %d (%s)\n", |
450 | fd[nr_cpu][counter], strerror(err)); | 309 | fd[nr_cpu][counter], strerror(err)); |
310 | |||
311 | #if defined(__i386__) || defined(__x86_64__) | ||
312 | if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) | ||
313 | die("No hardware sampling interrupt available. No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.\n"); | ||
314 | #endif | ||
315 | |||
451 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); | 316 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); |
452 | exit(-1); | 317 | exit(-1); |
453 | } | 318 | } |
454 | 319 | ||
455 | h_attr = get_header_attr(attr, counter); | 320 | h_attr = get_header_attr(attr, counter); |
321 | if (h_attr == NULL) | ||
322 | die("nomem\n"); | ||
456 | 323 | ||
457 | if (!file_new) { | 324 | if (!file_new) { |
458 | if (memcmp(&h_attr->attr, attr, sizeof(*attr))) { | 325 | if (memcmp(&h_attr->attr, attr, sizeof(*attr))) { |
@@ -466,7 +333,10 @@ try_again: | |||
466 | exit(-1); | 333 | exit(-1); |
467 | } | 334 | } |
468 | 335 | ||
469 | perf_header_attr__add_id(h_attr, read_data.id); | 336 | if (perf_header_attr__add_id(h_attr, read_data.id) < 0) { |
337 | pr_warning("Not enough memory to add id\n"); | ||
338 | exit(-1); | ||
339 | } | ||
470 | 340 | ||
471 | assert(fd[nr_cpu][counter] >= 0); | 341 | assert(fd[nr_cpu][counter] >= 0); |
472 | fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); | 342 | fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); |
@@ -480,7 +350,6 @@ try_again: | |||
480 | multiplex_fd = fd[nr_cpu][counter]; | 350 | multiplex_fd = fd[nr_cpu][counter]; |
481 | 351 | ||
482 | if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { | 352 | if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { |
483 | int ret; | ||
484 | 353 | ||
485 | ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); | 354 | ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); |
486 | assert(ret != -1); | 355 | assert(ret != -1); |
@@ -500,6 +369,16 @@ try_again: | |||
500 | } | 369 | } |
501 | } | 370 | } |
502 | 371 | ||
372 | if (filter != NULL) { | ||
373 | ret = ioctl(fd[nr_cpu][counter], | ||
374 | PERF_EVENT_IOC_SET_FILTER, filter); | ||
375 | if (ret) { | ||
376 | error("failed to set filter with %d (%s)\n", errno, | ||
377 | strerror(errno)); | ||
378 | exit(-1); | ||
379 | } | ||
380 | } | ||
381 | |||
503 | ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); | 382 | ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); |
504 | } | 383 | } |
505 | 384 | ||
@@ -518,7 +397,7 @@ static void atexit_header(void) | |||
518 | { | 397 | { |
519 | header->data_size += bytes_written; | 398 | header->data_size += bytes_written; |
520 | 399 | ||
521 | perf_header__write(header, output); | 400 | perf_header__write(header, output, true); |
522 | } | 401 | } |
523 | 402 | ||
524 | static int __cmd_record(int argc, const char **argv) | 403 | static int __cmd_record(int argc, const char **argv) |
@@ -527,7 +406,7 @@ static int __cmd_record(int argc, const char **argv) | |||
527 | struct stat st; | 406 | struct stat st; |
528 | pid_t pid = 0; | 407 | pid_t pid = 0; |
529 | int flags; | 408 | int flags; |
530 | int ret; | 409 | int err; |
531 | unsigned long waking = 0; | 410 | unsigned long waking = 0; |
532 | 411 | ||
533 | page_size = sysconf(_SC_PAGE_SIZE); | 412 | page_size = sysconf(_SC_PAGE_SIZE); |
@@ -561,22 +440,29 @@ static int __cmd_record(int argc, const char **argv) | |||
561 | exit(-1); | 440 | exit(-1); |
562 | } | 441 | } |
563 | 442 | ||
564 | if (!file_new) | 443 | header = perf_header__new(); |
565 | header = perf_header__read(output); | 444 | if (header == NULL) { |
566 | else | 445 | pr_err("Not enough memory for reading perf file header\n"); |
567 | header = perf_header__new(); | 446 | return -1; |
447 | } | ||
568 | 448 | ||
449 | if (!file_new) { | ||
450 | err = perf_header__read(header, output); | ||
451 | if (err < 0) | ||
452 | return err; | ||
453 | } | ||
569 | 454 | ||
570 | if (raw_samples) { | 455 | if (raw_samples) { |
571 | read_tracing_data(attrs, nr_counters); | 456 | perf_header__set_feat(header, HEADER_TRACE_INFO); |
572 | } else { | 457 | } else { |
573 | for (i = 0; i < nr_counters; i++) { | 458 | for (i = 0; i < nr_counters; i++) { |
574 | if (attrs[i].sample_type & PERF_SAMPLE_RAW) { | 459 | if (attrs[i].sample_type & PERF_SAMPLE_RAW) { |
575 | read_tracing_data(attrs, nr_counters); | 460 | perf_header__set_feat(header, HEADER_TRACE_INFO); |
576 | break; | 461 | break; |
577 | } | 462 | } |
578 | } | 463 | } |
579 | } | 464 | } |
465 | |||
580 | atexit(atexit_header); | 466 | atexit(atexit_header); |
581 | 467 | ||
582 | if (!system_wide) { | 468 | if (!system_wide) { |
@@ -594,25 +480,36 @@ static int __cmd_record(int argc, const char **argv) | |||
594 | } | 480 | } |
595 | } | 481 | } |
596 | 482 | ||
597 | if (file_new) | 483 | if (file_new) { |
598 | perf_header__write(header, output); | 484 | err = perf_header__write(header, output, false); |
485 | if (err < 0) | ||
486 | return err; | ||
487 | } | ||
599 | 488 | ||
600 | if (!system_wide) { | 489 | if (!system_wide) |
601 | pid_t tgid = pid_synthesize_comm_event(pid, 0); | 490 | event__synthesize_thread(pid, process_synthesized_event); |
602 | pid_synthesize_mmap_samples(pid, tgid); | 491 | else |
603 | } else | 492 | event__synthesize_threads(process_synthesized_event); |
604 | synthesize_all(); | ||
605 | 493 | ||
606 | if (target_pid == -1 && argc) { | 494 | if (target_pid == -1 && argc) { |
607 | pid = fork(); | 495 | pid = fork(); |
608 | if (pid < 0) | 496 | if (pid < 0) |
609 | perror("failed to fork"); | 497 | die("failed to fork"); |
610 | 498 | ||
611 | if (!pid) { | 499 | if (!pid) { |
612 | if (execvp(argv[0], (char **)argv)) { | 500 | if (execvp(argv[0], (char **)argv)) { |
613 | perror(argv[0]); | 501 | perror(argv[0]); |
614 | exit(-1); | 502 | exit(-1); |
615 | } | 503 | } |
504 | } else { | ||
505 | /* | ||
506 | * Wait a bit for the execv'ed child to appear | ||
507 | * and be updated in /proc | ||
508 | * FIXME: Do you know a less heuristical solution? | ||
509 | */ | ||
510 | usleep(1000); | ||
511 | event__synthesize_thread(pid, | ||
512 | process_synthesized_event); | ||
616 | } | 513 | } |
617 | 514 | ||
618 | child_pid = pid; | 515 | child_pid = pid; |
@@ -623,7 +520,7 @@ static int __cmd_record(int argc, const char **argv) | |||
623 | 520 | ||
624 | param.sched_priority = realtime_prio; | 521 | param.sched_priority = realtime_prio; |
625 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { | 522 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { |
626 | printf("Could not set realtime priority.\n"); | 523 | pr_err("Could not set realtime priority.\n"); |
627 | exit(-1); | 524 | exit(-1); |
628 | } | 525 | } |
629 | } | 526 | } |
@@ -641,7 +538,7 @@ static int __cmd_record(int argc, const char **argv) | |||
641 | if (hits == samples) { | 538 | if (hits == samples) { |
642 | if (done) | 539 | if (done) |
643 | break; | 540 | break; |
644 | ret = poll(event_array, nr_poll, -1); | 541 | err = poll(event_array, nr_poll, -1); |
645 | waking++; | 542 | waking++; |
646 | } | 543 | } |
647 | 544 | ||
@@ -677,6 +574,8 @@ static const struct option options[] = { | |||
677 | OPT_CALLBACK('e', "event", NULL, "event", | 574 | OPT_CALLBACK('e', "event", NULL, "event", |
678 | "event selector. use 'perf list' to list available events", | 575 | "event selector. use 'perf list' to list available events", |
679 | parse_events), | 576 | parse_events), |
577 | OPT_CALLBACK(0, "filter", NULL, "filter", | ||
578 | "event filter", parse_filter), | ||
680 | OPT_INTEGER('p', "pid", &target_pid, | 579 | OPT_INTEGER('p', "pid", &target_pid, |
681 | "record events on existing pid"), | 580 | "record events on existing pid"), |
682 | OPT_INTEGER('r', "realtime", &realtime_prio, | 581 | OPT_INTEGER('r', "realtime", &realtime_prio, |
@@ -720,6 +619,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
720 | { | 619 | { |
721 | int counter; | 620 | int counter; |
722 | 621 | ||
622 | symbol__init(0); | ||
623 | |||
723 | argc = parse_options(argc, argv, options, record_usage, | 624 | argc = parse_options(argc, argv, options, record_usage, |
724 | PARSE_OPT_STOP_AT_NON_OPTION); | 625 | PARSE_OPT_STOP_AT_NON_OPTION); |
725 | if (!argc && target_pid == -1 && !system_wide) | 626 | if (!argc && target_pid == -1 && !system_wide) |
@@ -731,6 +632,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
731 | attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; | 632 | attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; |
732 | } | 633 | } |
733 | 634 | ||
635 | /* | ||
636 | * User specified count overrides default frequency. | ||
637 | */ | ||
638 | if (default_interval) | ||
639 | freq = 0; | ||
640 | else if (freq) { | ||
641 | default_interval = freq; | ||
642 | } else { | ||
643 | fprintf(stderr, "frequency and count are zero, aborting\n"); | ||
644 | exit(EXIT_FAILURE); | ||
645 | } | ||
646 | |||
734 | for (counter = 0; counter < nr_counters; counter++) { | 647 | for (counter = 0; counter < nr_counters; counter++) { |
735 | if (attrs[counter].sample_period) | 648 | if (attrs[counter].sample_period) |
736 | continue; | 649 | continue; |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 19669c20088e..383c4ab4f9af 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -26,20 +26,18 @@ | |||
26 | #include "util/parse-options.h" | 26 | #include "util/parse-options.h" |
27 | #include "util/parse-events.h" | 27 | #include "util/parse-events.h" |
28 | 28 | ||
29 | #include "util/data_map.h" | ||
29 | #include "util/thread.h" | 30 | #include "util/thread.h" |
31 | #include "util/sort.h" | ||
32 | #include "util/hist.h" | ||
30 | 33 | ||
31 | static char const *input_name = "perf.data"; | 34 | static char const *input_name = "perf.data"; |
32 | 35 | ||
33 | static char default_sort_order[] = "comm,dso,symbol"; | ||
34 | static char *sort_order = default_sort_order; | ||
35 | static char *dso_list_str, *comm_list_str, *sym_list_str, | 36 | static char *dso_list_str, *comm_list_str, *sym_list_str, |
36 | *col_width_list_str; | 37 | *col_width_list_str; |
37 | static struct strlist *dso_list, *comm_list, *sym_list; | 38 | static struct strlist *dso_list, *comm_list, *sym_list; |
38 | static char *field_sep; | ||
39 | 39 | ||
40 | static int force; | 40 | static int force; |
41 | static int input; | ||
42 | static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; | ||
43 | 41 | ||
44 | static int full_paths; | 42 | static int full_paths; |
45 | static int show_nr_samples; | 43 | static int show_nr_samples; |
@@ -50,374 +48,38 @@ static struct perf_read_values show_threads_values; | |||
50 | static char default_pretty_printing_style[] = "normal"; | 48 | static char default_pretty_printing_style[] = "normal"; |
51 | static char *pretty_printing_style = default_pretty_printing_style; | 49 | static char *pretty_printing_style = default_pretty_printing_style; |
52 | 50 | ||
53 | static unsigned long page_size; | ||
54 | static unsigned long mmap_window = 32; | ||
55 | |||
56 | static char default_parent_pattern[] = "^sys_|^do_page_fault"; | ||
57 | static char *parent_pattern = default_parent_pattern; | ||
58 | static regex_t parent_regex; | ||
59 | |||
60 | static int exclude_other = 1; | 51 | static int exclude_other = 1; |
61 | 52 | ||
62 | static char callchain_default_opt[] = "fractal,0.5"; | 53 | static char callchain_default_opt[] = "fractal,0.5"; |
63 | 54 | ||
64 | static int callchain; | ||
65 | |||
66 | static char __cwd[PATH_MAX]; | ||
67 | static char *cwd = __cwd; | ||
68 | static int cwdlen; | ||
69 | |||
70 | static struct rb_root threads; | ||
71 | static struct thread *last_match; | ||
72 | |||
73 | static struct perf_header *header; | 55 | static struct perf_header *header; |
74 | 56 | ||
75 | static | ||
76 | struct callchain_param callchain_param = { | ||
77 | .mode = CHAIN_GRAPH_REL, | ||
78 | .min_percent = 0.5 | ||
79 | }; | ||
80 | |||
81 | static u64 sample_type; | 57 | static u64 sample_type; |
82 | 58 | ||
83 | static int repsep_fprintf(FILE *fp, const char *fmt, ...) | 59 | struct symbol_conf symbol_conf; |
84 | { | ||
85 | int n; | ||
86 | va_list ap; | ||
87 | |||
88 | va_start(ap, fmt); | ||
89 | if (!field_sep) | ||
90 | n = vfprintf(fp, fmt, ap); | ||
91 | else { | ||
92 | char *bf = NULL; | ||
93 | n = vasprintf(&bf, fmt, ap); | ||
94 | if (n > 0) { | ||
95 | char *sep = bf; | ||
96 | |||
97 | while (1) { | ||
98 | sep = strchr(sep, *field_sep); | ||
99 | if (sep == NULL) | ||
100 | break; | ||
101 | *sep = '.'; | ||
102 | } | ||
103 | } | ||
104 | fputs(bf, fp); | ||
105 | free(bf); | ||
106 | } | ||
107 | va_end(ap); | ||
108 | return n; | ||
109 | } | ||
110 | |||
111 | static unsigned int dsos__col_width, | ||
112 | comms__col_width, | ||
113 | threads__col_width; | ||
114 | 60 | ||
115 | /* | ||
116 | * histogram, sorted on item, collects counts | ||
117 | */ | ||
118 | |||
119 | static struct rb_root hist; | ||
120 | |||
121 | struct hist_entry { | ||
122 | struct rb_node rb_node; | ||
123 | |||
124 | struct thread *thread; | ||
125 | struct map *map; | ||
126 | struct dso *dso; | ||
127 | struct symbol *sym; | ||
128 | struct symbol *parent; | ||
129 | u64 ip; | ||
130 | char level; | ||
131 | struct callchain_node callchain; | ||
132 | struct rb_root sorted_chain; | ||
133 | |||
134 | u64 count; | ||
135 | }; | ||
136 | |||
137 | /* | ||
138 | * configurable sorting bits | ||
139 | */ | ||
140 | |||
141 | struct sort_entry { | ||
142 | struct list_head list; | ||
143 | |||
144 | const char *header; | ||
145 | |||
146 | int64_t (*cmp)(struct hist_entry *, struct hist_entry *); | ||
147 | int64_t (*collapse)(struct hist_entry *, struct hist_entry *); | ||
148 | size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); | ||
149 | unsigned int *width; | ||
150 | bool elide; | ||
151 | }; | ||
152 | |||
153 | static int64_t cmp_null(void *l, void *r) | ||
154 | { | ||
155 | if (!l && !r) | ||
156 | return 0; | ||
157 | else if (!l) | ||
158 | return -1; | ||
159 | else | ||
160 | return 1; | ||
161 | } | ||
162 | |||
163 | /* --sort pid */ | ||
164 | |||
165 | static int64_t | ||
166 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | ||
167 | { | ||
168 | return right->thread->pid - left->thread->pid; | ||
169 | } | ||
170 | 61 | ||
171 | static size_t | 62 | static size_t |
172 | sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) | 63 | callchain__fprintf_left_margin(FILE *fp, int left_margin) |
173 | { | 64 | { |
174 | return repsep_fprintf(fp, "%*s:%5d", width - 6, | 65 | int i; |
175 | self->thread->comm ?: "", self->thread->pid); | 66 | int ret; |
176 | } | ||
177 | |||
178 | static struct sort_entry sort_thread = { | ||
179 | .header = "Command: Pid", | ||
180 | .cmp = sort__thread_cmp, | ||
181 | .print = sort__thread_print, | ||
182 | .width = &threads__col_width, | ||
183 | }; | ||
184 | |||
185 | /* --sort comm */ | ||
186 | |||
187 | static int64_t | ||
188 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | ||
189 | { | ||
190 | return right->thread->pid - left->thread->pid; | ||
191 | } | ||
192 | |||
193 | static int64_t | ||
194 | sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) | ||
195 | { | ||
196 | char *comm_l = left->thread->comm; | ||
197 | char *comm_r = right->thread->comm; | ||
198 | |||
199 | if (!comm_l || !comm_r) | ||
200 | return cmp_null(comm_l, comm_r); | ||
201 | |||
202 | return strcmp(comm_l, comm_r); | ||
203 | } | ||
204 | |||
205 | static size_t | ||
206 | sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
207 | { | ||
208 | return repsep_fprintf(fp, "%*s", width, self->thread->comm); | ||
209 | } | ||
210 | |||
211 | static struct sort_entry sort_comm = { | ||
212 | .header = "Command", | ||
213 | .cmp = sort__comm_cmp, | ||
214 | .collapse = sort__comm_collapse, | ||
215 | .print = sort__comm_print, | ||
216 | .width = &comms__col_width, | ||
217 | }; | ||
218 | |||
219 | /* --sort dso */ | ||
220 | |||
221 | static int64_t | ||
222 | sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) | ||
223 | { | ||
224 | struct dso *dso_l = left->dso; | ||
225 | struct dso *dso_r = right->dso; | ||
226 | |||
227 | if (!dso_l || !dso_r) | ||
228 | return cmp_null(dso_l, dso_r); | ||
229 | |||
230 | return strcmp(dso_l->name, dso_r->name); | ||
231 | } | ||
232 | |||
233 | static size_t | ||
234 | sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
235 | { | ||
236 | if (self->dso) | ||
237 | return repsep_fprintf(fp, "%-*s", width, self->dso->name); | ||
238 | |||
239 | return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); | ||
240 | } | ||
241 | |||
242 | static struct sort_entry sort_dso = { | ||
243 | .header = "Shared Object", | ||
244 | .cmp = sort__dso_cmp, | ||
245 | .print = sort__dso_print, | ||
246 | .width = &dsos__col_width, | ||
247 | }; | ||
248 | |||
249 | /* --sort symbol */ | ||
250 | |||
251 | static int64_t | ||
252 | sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) | ||
253 | { | ||
254 | u64 ip_l, ip_r; | ||
255 | |||
256 | if (left->sym == right->sym) | ||
257 | return 0; | ||
258 | |||
259 | ip_l = left->sym ? left->sym->start : left->ip; | ||
260 | ip_r = right->sym ? right->sym->start : right->ip; | ||
261 | |||
262 | return (int64_t)(ip_r - ip_l); | ||
263 | } | ||
264 | |||
265 | static size_t | ||
266 | sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) | ||
267 | { | ||
268 | size_t ret = 0; | ||
269 | 67 | ||
270 | if (verbose) | 68 | ret = fprintf(fp, " "); |
271 | ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, | ||
272 | dso__symtab_origin(self->dso)); | ||
273 | 69 | ||
274 | ret += repsep_fprintf(fp, "[%c] ", self->level); | 70 | for (i = 0; i < left_margin; i++) |
275 | if (self->sym) { | 71 | ret += fprintf(fp, " "); |
276 | ret += repsep_fprintf(fp, "%s", self->sym->name); | ||
277 | |||
278 | if (self->sym->module) | ||
279 | ret += repsep_fprintf(fp, "\t[%s]", | ||
280 | self->sym->module->name); | ||
281 | } else { | ||
282 | ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); | ||
283 | } | ||
284 | 72 | ||
285 | return ret; | 73 | return ret; |
286 | } | 74 | } |
287 | 75 | ||
288 | static struct sort_entry sort_sym = { | 76 | static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, |
289 | .header = "Symbol", | 77 | int left_margin) |
290 | .cmp = sort__sym_cmp, | ||
291 | .print = sort__sym_print, | ||
292 | }; | ||
293 | |||
294 | /* --sort parent */ | ||
295 | |||
296 | static int64_t | ||
297 | sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) | ||
298 | { | ||
299 | struct symbol *sym_l = left->parent; | ||
300 | struct symbol *sym_r = right->parent; | ||
301 | |||
302 | if (!sym_l || !sym_r) | ||
303 | return cmp_null(sym_l, sym_r); | ||
304 | |||
305 | return strcmp(sym_l->name, sym_r->name); | ||
306 | } | ||
307 | |||
308 | static size_t | ||
309 | sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
310 | { | ||
311 | return repsep_fprintf(fp, "%-*s", width, | ||
312 | self->parent ? self->parent->name : "[other]"); | ||
313 | } | ||
314 | |||
315 | static unsigned int parent_symbol__col_width; | ||
316 | |||
317 | static struct sort_entry sort_parent = { | ||
318 | .header = "Parent symbol", | ||
319 | .cmp = sort__parent_cmp, | ||
320 | .print = sort__parent_print, | ||
321 | .width = &parent_symbol__col_width, | ||
322 | }; | ||
323 | |||
324 | static int sort__need_collapse = 0; | ||
325 | static int sort__has_parent = 0; | ||
326 | |||
327 | struct sort_dimension { | ||
328 | const char *name; | ||
329 | struct sort_entry *entry; | ||
330 | int taken; | ||
331 | }; | ||
332 | |||
333 | static struct sort_dimension sort_dimensions[] = { | ||
334 | { .name = "pid", .entry = &sort_thread, }, | ||
335 | { .name = "comm", .entry = &sort_comm, }, | ||
336 | { .name = "dso", .entry = &sort_dso, }, | ||
337 | { .name = "symbol", .entry = &sort_sym, }, | ||
338 | { .name = "parent", .entry = &sort_parent, }, | ||
339 | }; | ||
340 | |||
341 | static LIST_HEAD(hist_entry__sort_list); | ||
342 | |||
343 | static int sort_dimension__add(const char *tok) | ||
344 | { | ||
345 | unsigned int i; | ||
346 | |||
347 | for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { | ||
348 | struct sort_dimension *sd = &sort_dimensions[i]; | ||
349 | |||
350 | if (sd->taken) | ||
351 | continue; | ||
352 | |||
353 | if (strncasecmp(tok, sd->name, strlen(tok))) | ||
354 | continue; | ||
355 | |||
356 | if (sd->entry->collapse) | ||
357 | sort__need_collapse = 1; | ||
358 | |||
359 | if (sd->entry == &sort_parent) { | ||
360 | int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); | ||
361 | if (ret) { | ||
362 | char err[BUFSIZ]; | ||
363 | |||
364 | regerror(ret, &parent_regex, err, sizeof(err)); | ||
365 | fprintf(stderr, "Invalid regex: %s\n%s", | ||
366 | parent_pattern, err); | ||
367 | exit(-1); | ||
368 | } | ||
369 | sort__has_parent = 1; | ||
370 | } | ||
371 | |||
372 | list_add_tail(&sd->entry->list, &hist_entry__sort_list); | ||
373 | sd->taken = 1; | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | return -ESRCH; | ||
379 | } | ||
380 | |||
381 | static int64_t | ||
382 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | ||
383 | { | ||
384 | struct sort_entry *se; | ||
385 | int64_t cmp = 0; | ||
386 | |||
387 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
388 | cmp = se->cmp(left, right); | ||
389 | if (cmp) | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | return cmp; | ||
394 | } | ||
395 | |||
396 | static int64_t | ||
397 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | ||
398 | { | ||
399 | struct sort_entry *se; | ||
400 | int64_t cmp = 0; | ||
401 | |||
402 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
403 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | ||
404 | |||
405 | f = se->collapse ?: se->cmp; | ||
406 | |||
407 | cmp = f(left, right); | ||
408 | if (cmp) | ||
409 | break; | ||
410 | } | ||
411 | |||
412 | return cmp; | ||
413 | } | ||
414 | |||
415 | static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) | ||
416 | { | 78 | { |
417 | int i; | 79 | int i; |
418 | size_t ret = 0; | 80 | size_t ret = 0; |
419 | 81 | ||
420 | ret += fprintf(fp, "%s", " "); | 82 | ret += callchain__fprintf_left_margin(fp, left_margin); |
421 | 83 | ||
422 | for (i = 0; i < depth; i++) | 84 | for (i = 0; i < depth; i++) |
423 | if (depth_mask & (1 << i)) | 85 | if (depth_mask & (1 << i)) |
@@ -432,12 +94,12 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) | |||
432 | static size_t | 94 | static size_t |
433 | ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, | 95 | ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, |
434 | int depth_mask, int count, u64 total_samples, | 96 | int depth_mask, int count, u64 total_samples, |
435 | int hits) | 97 | int hits, int left_margin) |
436 | { | 98 | { |
437 | int i; | 99 | int i; |
438 | size_t ret = 0; | 100 | size_t ret = 0; |
439 | 101 | ||
440 | ret += fprintf(fp, "%s", " "); | 102 | ret += callchain__fprintf_left_margin(fp, left_margin); |
441 | for (i = 0; i < depth; i++) { | 103 | for (i = 0; i < depth; i++) { |
442 | if (depth_mask & (1 << i)) | 104 | if (depth_mask & (1 << i)) |
443 | ret += fprintf(fp, "|"); | 105 | ret += fprintf(fp, "|"); |
@@ -475,8 +137,9 @@ static void init_rem_hits(void) | |||
475 | } | 137 | } |
476 | 138 | ||
477 | static size_t | 139 | static size_t |
478 | callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | 140 | __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, |
479 | u64 total_samples, int depth, int depth_mask) | 141 | u64 total_samples, int depth, int depth_mask, |
142 | int left_margin) | ||
480 | { | 143 | { |
481 | struct rb_node *node, *next; | 144 | struct rb_node *node, *next; |
482 | struct callchain_node *child; | 145 | struct callchain_node *child; |
@@ -517,7 +180,8 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
517 | * But we keep the older depth mask for the line seperator | 180 | * But we keep the older depth mask for the line seperator |
518 | * to keep the level link until we reach the last child | 181 | * to keep the level link until we reach the last child |
519 | */ | 182 | */ |
520 | ret += ipchain__fprintf_graph_line(fp, depth, depth_mask); | 183 | ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, |
184 | left_margin); | ||
521 | i = 0; | 185 | i = 0; |
522 | list_for_each_entry(chain, &child->val, list) { | 186 | list_for_each_entry(chain, &child->val, list) { |
523 | if (chain->ip >= PERF_CONTEXT_MAX) | 187 | if (chain->ip >= PERF_CONTEXT_MAX) |
@@ -525,11 +189,13 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
525 | ret += ipchain__fprintf_graph(fp, chain, depth, | 189 | ret += ipchain__fprintf_graph(fp, chain, depth, |
526 | new_depth_mask, i++, | 190 | new_depth_mask, i++, |
527 | new_total, | 191 | new_total, |
528 | cumul); | 192 | cumul, |
193 | left_margin); | ||
529 | } | 194 | } |
530 | ret += callchain__fprintf_graph(fp, child, new_total, | 195 | ret += __callchain__fprintf_graph(fp, child, new_total, |
531 | depth + 1, | 196 | depth + 1, |
532 | new_depth_mask | (1 << depth)); | 197 | new_depth_mask | (1 << depth), |
198 | left_margin); | ||
533 | node = next; | 199 | node = next; |
534 | } | 200 | } |
535 | 201 | ||
@@ -543,9 +209,48 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
543 | 209 | ||
544 | ret += ipchain__fprintf_graph(fp, &rem_hits, depth, | 210 | ret += ipchain__fprintf_graph(fp, &rem_hits, depth, |
545 | new_depth_mask, 0, new_total, | 211 | new_depth_mask, 0, new_total, |
546 | remaining); | 212 | remaining, left_margin); |
213 | } | ||
214 | |||
215 | return ret; | ||
216 | } | ||
217 | |||
218 | |||
219 | static size_t | ||
220 | callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | ||
221 | u64 total_samples, int left_margin) | ||
222 | { | ||
223 | struct callchain_list *chain; | ||
224 | bool printed = false; | ||
225 | int i = 0; | ||
226 | int ret = 0; | ||
227 | |||
228 | list_for_each_entry(chain, &self->val, list) { | ||
229 | if (chain->ip >= PERF_CONTEXT_MAX) | ||
230 | continue; | ||
231 | |||
232 | if (!i++ && sort__first_dimension == SORT_SYM) | ||
233 | continue; | ||
234 | |||
235 | if (!printed) { | ||
236 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
237 | ret += fprintf(fp, "|\n"); | ||
238 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
239 | ret += fprintf(fp, "---"); | ||
240 | |||
241 | left_margin += 3; | ||
242 | printed = true; | ||
243 | } else | ||
244 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
245 | |||
246 | if (chain->sym) | ||
247 | ret += fprintf(fp, " %s\n", chain->sym->name); | ||
248 | else | ||
249 | ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); | ||
547 | } | 250 | } |
548 | 251 | ||
252 | ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); | ||
253 | |||
549 | return ret; | 254 | return ret; |
550 | } | 255 | } |
551 | 256 | ||
@@ -577,7 +282,7 @@ callchain__fprintf_flat(FILE *fp, struct callchain_node *self, | |||
577 | 282 | ||
578 | static size_t | 283 | static size_t |
579 | hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, | 284 | hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, |
580 | u64 total_samples) | 285 | u64 total_samples, int left_margin) |
581 | { | 286 | { |
582 | struct rb_node *rb_node; | 287 | struct rb_node *rb_node; |
583 | struct callchain_node *chain; | 288 | struct callchain_node *chain; |
@@ -597,8 +302,8 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, | |||
597 | break; | 302 | break; |
598 | case CHAIN_GRAPH_ABS: /* Falldown */ | 303 | case CHAIN_GRAPH_ABS: /* Falldown */ |
599 | case CHAIN_GRAPH_REL: | 304 | case CHAIN_GRAPH_REL: |
600 | ret += callchain__fprintf_graph(fp, chain, | 305 | ret += callchain__fprintf_graph(fp, chain, total_samples, |
601 | total_samples, 1, 1); | 306 | left_margin); |
602 | case CHAIN_NONE: | 307 | case CHAIN_NONE: |
603 | default: | 308 | default: |
604 | break; | 309 | break; |
@@ -610,7 +315,6 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, | |||
610 | return ret; | 315 | return ret; |
611 | } | 316 | } |
612 | 317 | ||
613 | |||
614 | static size_t | 318 | static size_t |
615 | hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) | 319 | hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) |
616 | { | 320 | { |
@@ -644,8 +348,19 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) | |||
644 | 348 | ||
645 | ret += fprintf(fp, "\n"); | 349 | ret += fprintf(fp, "\n"); |
646 | 350 | ||
647 | if (callchain) | 351 | if (callchain) { |
648 | hist_entry_callchain__fprintf(fp, self, total_samples); | 352 | int left_margin = 0; |
353 | |||
354 | if (sort__first_dimension == SORT_COMM) { | ||
355 | se = list_first_entry(&hist_entry__sort_list, typeof(*se), | ||
356 | list); | ||
357 | left_margin = se->width ? *se->width : 0; | ||
358 | left_margin -= thread__comm_len(self->thread); | ||
359 | } | ||
360 | |||
361 | hist_entry_callchain__fprintf(fp, self, total_samples, | ||
362 | left_margin); | ||
363 | } | ||
649 | 364 | ||
650 | return ret; | 365 | return ret; |
651 | } | 366 | } |
@@ -693,63 +408,6 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) | |||
693 | return 0; | 408 | return 0; |
694 | } | 409 | } |
695 | 410 | ||
696 | |||
697 | static struct symbol * | ||
698 | resolve_symbol(struct thread *thread, struct map **mapp, | ||
699 | struct dso **dsop, u64 *ipp) | ||
700 | { | ||
701 | struct dso *dso = dsop ? *dsop : NULL; | ||
702 | struct map *map = mapp ? *mapp : NULL; | ||
703 | u64 ip = *ipp; | ||
704 | |||
705 | if (!thread) | ||
706 | return NULL; | ||
707 | |||
708 | if (dso) | ||
709 | goto got_dso; | ||
710 | |||
711 | if (map) | ||
712 | goto got_map; | ||
713 | |||
714 | map = thread__find_map(thread, ip); | ||
715 | if (map != NULL) { | ||
716 | /* | ||
717 | * We have to do this here as we may have a dso | ||
718 | * with no symbol hit that has a name longer than | ||
719 | * the ones with symbols sampled. | ||
720 | */ | ||
721 | if (!sort_dso.elide && !map->dso->slen_calculated) | ||
722 | dso__calc_col_width(map->dso); | ||
723 | |||
724 | if (mapp) | ||
725 | *mapp = map; | ||
726 | got_map: | ||
727 | ip = map->map_ip(map, ip); | ||
728 | |||
729 | dso = map->dso; | ||
730 | } else { | ||
731 | /* | ||
732 | * If this is outside of all known maps, | ||
733 | * and is a negative address, try to look it | ||
734 | * up in the kernel dso, as it might be a | ||
735 | * vsyscall (which executes in user-mode): | ||
736 | */ | ||
737 | if ((long long)ip < 0) | ||
738 | dso = kernel_dso; | ||
739 | } | ||
740 | dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); | ||
741 | dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); | ||
742 | *ipp = ip; | ||
743 | |||
744 | if (dsop) | ||
745 | *dsop = dso; | ||
746 | |||
747 | if (!dso) | ||
748 | return NULL; | ||
749 | got_dso: | ||
750 | return dso->find_symbol(dso, ip); | ||
751 | } | ||
752 | |||
753 | static int call__match(struct symbol *sym) | 411 | static int call__match(struct symbol *sym) |
754 | { | 412 | { |
755 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) | 413 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) |
@@ -758,11 +416,11 @@ static int call__match(struct symbol *sym) | |||
758 | return 0; | 416 | return 0; |
759 | } | 417 | } |
760 | 418 | ||
761 | static struct symbol ** | 419 | static struct symbol **resolve_callchain(struct thread *thread, |
762 | resolve_callchain(struct thread *thread, struct map *map __used, | 420 | struct ip_callchain *chain, |
763 | struct ip_callchain *chain, struct hist_entry *entry) | 421 | struct symbol **parent) |
764 | { | 422 | { |
765 | u64 context = PERF_CONTEXT_MAX; | 423 | u8 cpumode = PERF_RECORD_MISC_USER; |
766 | struct symbol **syms = NULL; | 424 | struct symbol **syms = NULL; |
767 | unsigned int i; | 425 | unsigned int i; |
768 | 426 | ||
@@ -776,34 +434,31 @@ resolve_callchain(struct thread *thread, struct map *map __used, | |||
776 | 434 | ||
777 | for (i = 0; i < chain->nr; i++) { | 435 | for (i = 0; i < chain->nr; i++) { |
778 | u64 ip = chain->ips[i]; | 436 | u64 ip = chain->ips[i]; |
779 | struct dso *dso = NULL; | 437 | struct addr_location al; |
780 | struct symbol *sym; | ||
781 | 438 | ||
782 | if (ip >= PERF_CONTEXT_MAX) { | 439 | if (ip >= PERF_CONTEXT_MAX) { |
783 | context = ip; | 440 | switch (ip) { |
441 | case PERF_CONTEXT_HV: | ||
442 | cpumode = PERF_RECORD_MISC_HYPERVISOR; break; | ||
443 | case PERF_CONTEXT_KERNEL: | ||
444 | cpumode = PERF_RECORD_MISC_KERNEL; break; | ||
445 | case PERF_CONTEXT_USER: | ||
446 | cpumode = PERF_RECORD_MISC_USER; break; | ||
447 | default: | ||
448 | break; | ||
449 | } | ||
784 | continue; | 450 | continue; |
785 | } | 451 | } |
786 | 452 | ||
787 | switch (context) { | 453 | thread__find_addr_location(thread, cpumode, MAP__FUNCTION, |
788 | case PERF_CONTEXT_HV: | 454 | ip, &al, NULL); |
789 | dso = hypervisor_dso; | 455 | if (al.sym != NULL) { |
790 | break; | 456 | if (sort__has_parent && !*parent && |
791 | case PERF_CONTEXT_KERNEL: | 457 | call__match(al.sym)) |
792 | dso = kernel_dso; | 458 | *parent = al.sym; |
793 | break; | ||
794 | default: | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | sym = resolve_symbol(thread, NULL, &dso, &ip); | ||
799 | |||
800 | if (sym) { | ||
801 | if (sort__has_parent && call__match(sym) && | ||
802 | !entry->parent) | ||
803 | entry->parent = sym; | ||
804 | if (!callchain) | 459 | if (!callchain) |
805 | break; | 460 | break; |
806 | syms[i] = sym; | 461 | syms[i] = al.sym; |
807 | } | 462 | } |
808 | } | 463 | } |
809 | 464 | ||
@@ -814,178 +469,33 @@ resolve_callchain(struct thread *thread, struct map *map __used, | |||
814 | * collect histogram counts | 469 | * collect histogram counts |
815 | */ | 470 | */ |
816 | 471 | ||
817 | static int | 472 | static int hist_entry__add(struct addr_location *al, |
818 | hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, | 473 | struct ip_callchain *chain, u64 count) |
819 | struct symbol *sym, u64 ip, struct ip_callchain *chain, | ||
820 | char level, u64 count) | ||
821 | { | 474 | { |
822 | struct rb_node **p = &hist.rb_node; | 475 | struct symbol **syms = NULL, *parent = NULL; |
823 | struct rb_node *parent = NULL; | 476 | bool hit; |
824 | struct hist_entry *he; | 477 | struct hist_entry *he; |
825 | struct symbol **syms = NULL; | ||
826 | struct hist_entry entry = { | ||
827 | .thread = thread, | ||
828 | .map = map, | ||
829 | .dso = dso, | ||
830 | .sym = sym, | ||
831 | .ip = ip, | ||
832 | .level = level, | ||
833 | .count = count, | ||
834 | .parent = NULL, | ||
835 | .sorted_chain = RB_ROOT | ||
836 | }; | ||
837 | int cmp; | ||
838 | 478 | ||
839 | if ((sort__has_parent || callchain) && chain) | 479 | if ((sort__has_parent || callchain) && chain) |
840 | syms = resolve_callchain(thread, map, chain, &entry); | 480 | syms = resolve_callchain(al->thread, chain, &parent); |
841 | 481 | ||
842 | while (*p != NULL) { | 482 | he = __hist_entry__add(al, parent, count, &hit); |
843 | parent = *p; | 483 | if (he == NULL) |
844 | he = rb_entry(parent, struct hist_entry, rb_node); | 484 | return -ENOMEM; |
845 | 485 | ||
846 | cmp = hist_entry__cmp(&entry, he); | 486 | if (hit) |
487 | he->count += count; | ||
847 | 488 | ||
848 | if (!cmp) { | ||
849 | he->count += count; | ||
850 | if (callchain) { | ||
851 | append_chain(&he->callchain, chain, syms); | ||
852 | free(syms); | ||
853 | } | ||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | if (cmp < 0) | ||
858 | p = &(*p)->rb_left; | ||
859 | else | ||
860 | p = &(*p)->rb_right; | ||
861 | } | ||
862 | |||
863 | he = malloc(sizeof(*he)); | ||
864 | if (!he) | ||
865 | return -ENOMEM; | ||
866 | *he = entry; | ||
867 | if (callchain) { | 489 | if (callchain) { |
868 | callchain_init(&he->callchain); | 490 | if (!hit) |
491 | callchain_init(&he->callchain); | ||
869 | append_chain(&he->callchain, chain, syms); | 492 | append_chain(&he->callchain, chain, syms); |
870 | free(syms); | 493 | free(syms); |
871 | } | 494 | } |
872 | rb_link_node(&he->rb_node, parent, p); | ||
873 | rb_insert_color(&he->rb_node, &hist); | ||
874 | 495 | ||
875 | return 0; | 496 | return 0; |
876 | } | 497 | } |
877 | 498 | ||
878 | static void hist_entry__free(struct hist_entry *he) | ||
879 | { | ||
880 | free(he); | ||
881 | } | ||
882 | |||
883 | /* | ||
884 | * collapse the histogram | ||
885 | */ | ||
886 | |||
887 | static struct rb_root collapse_hists; | ||
888 | |||
889 | static void collapse__insert_entry(struct hist_entry *he) | ||
890 | { | ||
891 | struct rb_node **p = &collapse_hists.rb_node; | ||
892 | struct rb_node *parent = NULL; | ||
893 | struct hist_entry *iter; | ||
894 | int64_t cmp; | ||
895 | |||
896 | while (*p != NULL) { | ||
897 | parent = *p; | ||
898 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
899 | |||
900 | cmp = hist_entry__collapse(iter, he); | ||
901 | |||
902 | if (!cmp) { | ||
903 | iter->count += he->count; | ||
904 | hist_entry__free(he); | ||
905 | return; | ||
906 | } | ||
907 | |||
908 | if (cmp < 0) | ||
909 | p = &(*p)->rb_left; | ||
910 | else | ||
911 | p = &(*p)->rb_right; | ||
912 | } | ||
913 | |||
914 | rb_link_node(&he->rb_node, parent, p); | ||
915 | rb_insert_color(&he->rb_node, &collapse_hists); | ||
916 | } | ||
917 | |||
918 | static void collapse__resort(void) | ||
919 | { | ||
920 | struct rb_node *next; | ||
921 | struct hist_entry *n; | ||
922 | |||
923 | if (!sort__need_collapse) | ||
924 | return; | ||
925 | |||
926 | next = rb_first(&hist); | ||
927 | while (next) { | ||
928 | n = rb_entry(next, struct hist_entry, rb_node); | ||
929 | next = rb_next(&n->rb_node); | ||
930 | |||
931 | rb_erase(&n->rb_node, &hist); | ||
932 | collapse__insert_entry(n); | ||
933 | } | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * reverse the map, sort on count. | ||
938 | */ | ||
939 | |||
940 | static struct rb_root output_hists; | ||
941 | |||
942 | static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) | ||
943 | { | ||
944 | struct rb_node **p = &output_hists.rb_node; | ||
945 | struct rb_node *parent = NULL; | ||
946 | struct hist_entry *iter; | ||
947 | |||
948 | if (callchain) | ||
949 | callchain_param.sort(&he->sorted_chain, &he->callchain, | ||
950 | min_callchain_hits, &callchain_param); | ||
951 | |||
952 | while (*p != NULL) { | ||
953 | parent = *p; | ||
954 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
955 | |||
956 | if (he->count > iter->count) | ||
957 | p = &(*p)->rb_left; | ||
958 | else | ||
959 | p = &(*p)->rb_right; | ||
960 | } | ||
961 | |||
962 | rb_link_node(&he->rb_node, parent, p); | ||
963 | rb_insert_color(&he->rb_node, &output_hists); | ||
964 | } | ||
965 | |||
966 | static void output__resort(u64 total_samples) | ||
967 | { | ||
968 | struct rb_node *next; | ||
969 | struct hist_entry *n; | ||
970 | struct rb_root *tree = &hist; | ||
971 | u64 min_callchain_hits; | ||
972 | |||
973 | min_callchain_hits = total_samples * (callchain_param.min_percent / 100); | ||
974 | |||
975 | if (sort__need_collapse) | ||
976 | tree = &collapse_hists; | ||
977 | |||
978 | next = rb_first(tree); | ||
979 | |||
980 | while (next) { | ||
981 | n = rb_entry(next, struct hist_entry, rb_node); | ||
982 | next = rb_next(&n->rb_node); | ||
983 | |||
984 | rb_erase(&n->rb_node, tree); | ||
985 | output__insert_entry(n, min_callchain_hits); | ||
986 | } | ||
987 | } | ||
988 | |||
989 | static size_t output__fprintf(FILE *fp, u64 total_samples) | 499 | static size_t output__fprintf(FILE *fp, u64 total_samples) |
990 | { | 500 | { |
991 | struct hist_entry *pos; | 501 | struct hist_entry *pos; |
@@ -1080,13 +590,6 @@ print_entries: | |||
1080 | return ret; | 590 | return ret; |
1081 | } | 591 | } |
1082 | 592 | ||
1083 | static unsigned long total = 0, | ||
1084 | total_mmap = 0, | ||
1085 | total_comm = 0, | ||
1086 | total_fork = 0, | ||
1087 | total_unknown = 0, | ||
1088 | total_lost = 0; | ||
1089 | |||
1090 | static int validate_chain(struct ip_callchain *chain, event_t *event) | 593 | static int validate_chain(struct ip_callchain *chain, event_t *event) |
1091 | { | 594 | { |
1092 | unsigned int chain_size; | 595 | unsigned int chain_size; |
@@ -1100,30 +603,22 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) | |||
1100 | return 0; | 603 | return 0; |
1101 | } | 604 | } |
1102 | 605 | ||
1103 | static int | 606 | static int process_sample_event(event_t *event) |
1104 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | ||
1105 | { | 607 | { |
1106 | char level; | ||
1107 | int show = 0; | ||
1108 | struct dso *dso = NULL; | ||
1109 | struct thread *thread; | ||
1110 | u64 ip = event->ip.ip; | 608 | u64 ip = event->ip.ip; |
1111 | u64 period = 1; | 609 | u64 period = 1; |
1112 | struct map *map = NULL; | ||
1113 | void *more_data = event->ip.__more_data; | 610 | void *more_data = event->ip.__more_data; |
1114 | struct ip_callchain *chain = NULL; | 611 | struct ip_callchain *chain = NULL; |
1115 | int cpumode; | 612 | int cpumode; |
1116 | 613 | struct addr_location al; | |
1117 | thread = threads__findnew(event->ip.pid, &threads, &last_match); | 614 | struct thread *thread = threads__findnew(event->ip.pid); |
1118 | 615 | ||
1119 | if (sample_type & PERF_SAMPLE_PERIOD) { | 616 | if (sample_type & PERF_SAMPLE_PERIOD) { |
1120 | period = *(u64 *)more_data; | 617 | period = *(u64 *)more_data; |
1121 | more_data += sizeof(u64); | 618 | more_data += sizeof(u64); |
1122 | } | 619 | } |
1123 | 620 | ||
1124 | dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", | 621 | dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", |
1125 | (void *)(offset + head), | ||
1126 | (void *)(long)(event->header.size), | ||
1127 | event->header.misc, | 622 | event->header.misc, |
1128 | event->ip.pid, event->ip.tid, | 623 | event->ip.pid, event->ip.tid, |
1129 | (void *)(long)ip, | 624 | (void *)(long)ip, |
@@ -1137,7 +632,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
1137 | dump_printf("... chain: nr:%Lu\n", chain->nr); | 632 | dump_printf("... chain: nr:%Lu\n", chain->nr); |
1138 | 633 | ||
1139 | if (validate_chain(chain, event) < 0) { | 634 | if (validate_chain(chain, event) < 0) { |
1140 | eprintf("call-chain problem with event, skipping it.\n"); | 635 | pr_debug("call-chain problem with event, " |
636 | "skipping it.\n"); | ||
1141 | return 0; | 637 | return 0; |
1142 | } | 638 | } |
1143 | 639 | ||
@@ -1147,163 +643,64 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
1147 | } | 643 | } |
1148 | } | 644 | } |
1149 | 645 | ||
1150 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
1151 | |||
1152 | if (thread == NULL) { | 646 | if (thread == NULL) { |
1153 | eprintf("problem processing %d event, skipping it.\n", | 647 | pr_debug("problem processing %d event, skipping it.\n", |
1154 | event->header.type); | 648 | event->header.type); |
1155 | return -1; | 649 | return -1; |
1156 | } | 650 | } |
1157 | 651 | ||
652 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
653 | |||
1158 | if (comm_list && !strlist__has_entry(comm_list, thread->comm)) | 654 | if (comm_list && !strlist__has_entry(comm_list, thread->comm)) |
1159 | return 0; | 655 | return 0; |
1160 | 656 | ||
1161 | cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 657 | cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
1162 | 658 | ||
1163 | if (cpumode == PERF_RECORD_MISC_KERNEL) { | 659 | thread__find_addr_location(thread, cpumode, |
1164 | show = SHOW_KERNEL; | 660 | MAP__FUNCTION, ip, &al, NULL); |
1165 | level = 'k'; | 661 | /* |
1166 | 662 | * We have to do this here as we may have a dso with no symbol hit that | |
1167 | dso = kernel_dso; | 663 | * has a name longer than the ones with symbols sampled. |
1168 | 664 | */ | |
1169 | dump_printf(" ...... dso: %s\n", dso->name); | 665 | if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated) |
1170 | 666 | dso__calc_col_width(al.map->dso); | |
1171 | } else if (cpumode == PERF_RECORD_MISC_USER) { | 667 | |
1172 | 668 | if (dso_list && | |
1173 | show = SHOW_USER; | 669 | (!al.map || !al.map->dso || |
1174 | level = '.'; | 670 | !(strlist__has_entry(dso_list, al.map->dso->short_name) || |
1175 | 671 | (al.map->dso->short_name != al.map->dso->long_name && | |
1176 | } else { | 672 | strlist__has_entry(dso_list, al.map->dso->long_name))))) |
1177 | show = SHOW_HV; | 673 | return 0; |
1178 | level = 'H'; | ||
1179 | |||
1180 | dso = hypervisor_dso; | ||
1181 | |||
1182 | dump_printf(" ...... dso: [hypervisor]\n"); | ||
1183 | } | ||
1184 | |||
1185 | if (show & show_mask) { | ||
1186 | struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); | ||
1187 | |||
1188 | if (dso_list && (!dso || !dso->name || | ||
1189 | !strlist__has_entry(dso_list, dso->name))) | ||
1190 | return 0; | ||
1191 | |||
1192 | if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name))) | ||
1193 | return 0; | ||
1194 | |||
1195 | if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { | ||
1196 | eprintf("problem incrementing symbol count, skipping event\n"); | ||
1197 | return -1; | ||
1198 | } | ||
1199 | } | ||
1200 | total += period; | ||
1201 | |||
1202 | return 0; | ||
1203 | } | ||
1204 | 674 | ||
1205 | static int | 675 | if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name)) |
1206 | process_mmap_event(event_t *event, unsigned long offset, unsigned long head) | ||
1207 | { | ||
1208 | struct thread *thread; | ||
1209 | struct map *map = map__new(&event->mmap, cwd, cwdlen); | ||
1210 | |||
1211 | thread = threads__findnew(event->mmap.pid, &threads, &last_match); | ||
1212 | |||
1213 | dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", | ||
1214 | (void *)(offset + head), | ||
1215 | (void *)(long)(event->header.size), | ||
1216 | event->mmap.pid, | ||
1217 | event->mmap.tid, | ||
1218 | (void *)(long)event->mmap.start, | ||
1219 | (void *)(long)event->mmap.len, | ||
1220 | (void *)(long)event->mmap.pgoff, | ||
1221 | event->mmap.filename); | ||
1222 | |||
1223 | if (thread == NULL || map == NULL) { | ||
1224 | dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); | ||
1225 | return 0; | 676 | return 0; |
677 | |||
678 | if (hist_entry__add(&al, chain, period)) { | ||
679 | pr_debug("problem incrementing symbol count, skipping event\n"); | ||
680 | return -1; | ||
1226 | } | 681 | } |
1227 | 682 | ||
1228 | thread__insert_map(thread, map); | 683 | event__stats.total += period; |
1229 | total_mmap++; | ||
1230 | 684 | ||
1231 | return 0; | 685 | return 0; |
1232 | } | 686 | } |
1233 | 687 | ||
1234 | static int | 688 | static int process_comm_event(event_t *event) |
1235 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | ||
1236 | { | 689 | { |
1237 | struct thread *thread; | 690 | struct thread *thread = threads__findnew(event->comm.pid); |
1238 | |||
1239 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | ||
1240 | 691 | ||
1241 | dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", | 692 | dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); |
1242 | (void *)(offset + head), | ||
1243 | (void *)(long)(event->header.size), | ||
1244 | event->comm.comm, event->comm.pid); | ||
1245 | 693 | ||
1246 | if (thread == NULL || | 694 | if (thread == NULL || |
1247 | thread__set_comm_adjust(thread, event->comm.comm)) { | 695 | thread__set_comm_adjust(thread, event->comm.comm)) { |
1248 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | 696 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); |
1249 | return -1; | 697 | return -1; |
1250 | } | 698 | } |
1251 | total_comm++; | ||
1252 | |||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | static int | ||
1257 | process_task_event(event_t *event, unsigned long offset, unsigned long head) | ||
1258 | { | ||
1259 | struct thread *thread; | ||
1260 | struct thread *parent; | ||
1261 | |||
1262 | thread = threads__findnew(event->fork.pid, &threads, &last_match); | ||
1263 | parent = threads__findnew(event->fork.ppid, &threads, &last_match); | ||
1264 | |||
1265 | dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", | ||
1266 | (void *)(offset + head), | ||
1267 | (void *)(long)(event->header.size), | ||
1268 | event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", | ||
1269 | event->fork.pid, event->fork.tid, | ||
1270 | event->fork.ppid, event->fork.ptid); | ||
1271 | |||
1272 | /* | ||
1273 | * A thread clone will have the same PID for both | ||
1274 | * parent and child. | ||
1275 | */ | ||
1276 | if (thread == parent) | ||
1277 | return 0; | ||
1278 | |||
1279 | if (event->header.type == PERF_RECORD_EXIT) | ||
1280 | return 0; | ||
1281 | |||
1282 | if (!thread || !parent || thread__fork(thread, parent)) { | ||
1283 | dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); | ||
1284 | return -1; | ||
1285 | } | ||
1286 | total_fork++; | ||
1287 | 699 | ||
1288 | return 0; | 700 | return 0; |
1289 | } | 701 | } |
1290 | 702 | ||
1291 | static int | 703 | static int process_read_event(event_t *event) |
1292 | process_lost_event(event_t *event, unsigned long offset, unsigned long head) | ||
1293 | { | ||
1294 | dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n", | ||
1295 | (void *)(offset + head), | ||
1296 | (void *)(long)(event->header.size), | ||
1297 | event->lost.id, | ||
1298 | event->lost.lost); | ||
1299 | |||
1300 | total_lost += event->lost.lost; | ||
1301 | |||
1302 | return 0; | ||
1303 | } | ||
1304 | |||
1305 | static int | ||
1306 | process_read_event(event_t *event, unsigned long offset, unsigned long head) | ||
1307 | { | 704 | { |
1308 | struct perf_event_attr *attr; | 705 | struct perf_event_attr *attr; |
1309 | 706 | ||
@@ -1319,238 +716,91 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) | |||
1319 | event->read.value); | 716 | event->read.value); |
1320 | } | 717 | } |
1321 | 718 | ||
1322 | dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n", | 719 | dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid, |
1323 | (void *)(offset + head), | 720 | attr ? __event_name(attr->type, attr->config) : "FAIL", |
1324 | (void *)(long)(event->header.size), | 721 | event->read.value); |
1325 | event->read.pid, | ||
1326 | event->read.tid, | ||
1327 | attr ? __event_name(attr->type, attr->config) | ||
1328 | : "FAIL", | ||
1329 | event->read.value); | ||
1330 | |||
1331 | return 0; | ||
1332 | } | ||
1333 | |||
1334 | static int | ||
1335 | process_event(event_t *event, unsigned long offset, unsigned long head) | ||
1336 | { | ||
1337 | trace_event(event); | ||
1338 | |||
1339 | switch (event->header.type) { | ||
1340 | case PERF_RECORD_SAMPLE: | ||
1341 | return process_sample_event(event, offset, head); | ||
1342 | |||
1343 | case PERF_RECORD_MMAP: | ||
1344 | return process_mmap_event(event, offset, head); | ||
1345 | |||
1346 | case PERF_RECORD_COMM: | ||
1347 | return process_comm_event(event, offset, head); | ||
1348 | |||
1349 | case PERF_RECORD_FORK: | ||
1350 | case PERF_RECORD_EXIT: | ||
1351 | return process_task_event(event, offset, head); | ||
1352 | |||
1353 | case PERF_RECORD_LOST: | ||
1354 | return process_lost_event(event, offset, head); | ||
1355 | |||
1356 | case PERF_RECORD_READ: | ||
1357 | return process_read_event(event, offset, head); | ||
1358 | |||
1359 | /* | ||
1360 | * We dont process them right now but they are fine: | ||
1361 | */ | ||
1362 | |||
1363 | case PERF_RECORD_THROTTLE: | ||
1364 | case PERF_RECORD_UNTHROTTLE: | ||
1365 | return 0; | ||
1366 | |||
1367 | default: | ||
1368 | return -1; | ||
1369 | } | ||
1370 | 722 | ||
1371 | return 0; | 723 | return 0; |
1372 | } | 724 | } |
1373 | 725 | ||
1374 | static int __cmd_report(void) | 726 | static int sample_type_check(u64 type) |
1375 | { | 727 | { |
1376 | int ret, rc = EXIT_FAILURE; | 728 | sample_type = type; |
1377 | unsigned long offset = 0; | ||
1378 | unsigned long head, shift; | ||
1379 | struct stat input_stat; | ||
1380 | struct thread *idle; | ||
1381 | event_t *event; | ||
1382 | uint32_t size; | ||
1383 | char *buf; | ||
1384 | |||
1385 | idle = register_idle_thread(&threads, &last_match); | ||
1386 | thread__comm_adjust(idle); | ||
1387 | |||
1388 | if (show_threads) | ||
1389 | perf_read_values_init(&show_threads_values); | ||
1390 | |||
1391 | input = open(input_name, O_RDONLY); | ||
1392 | if (input < 0) { | ||
1393 | fprintf(stderr, " failed to open file: %s", input_name); | ||
1394 | if (!strcmp(input_name, "perf.data")) | ||
1395 | fprintf(stderr, " (try 'perf record' first)"); | ||
1396 | fprintf(stderr, "\n"); | ||
1397 | exit(-1); | ||
1398 | } | ||
1399 | |||
1400 | ret = fstat(input, &input_stat); | ||
1401 | if (ret < 0) { | ||
1402 | perror("failed to stat file"); | ||
1403 | exit(-1); | ||
1404 | } | ||
1405 | |||
1406 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | ||
1407 | fprintf(stderr, "file: %s not owned by current user or root\n", input_name); | ||
1408 | exit(-1); | ||
1409 | } | ||
1410 | |||
1411 | if (!input_stat.st_size) { | ||
1412 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
1413 | exit(0); | ||
1414 | } | ||
1415 | |||
1416 | header = perf_header__read(input); | ||
1417 | head = header->data_offset; | ||
1418 | |||
1419 | sample_type = perf_header__sample_type(header); | ||
1420 | 729 | ||
1421 | if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { | 730 | if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { |
1422 | if (sort__has_parent) { | 731 | if (sort__has_parent) { |
1423 | fprintf(stderr, "selected --sort parent, but no" | 732 | fprintf(stderr, "selected --sort parent, but no" |
1424 | " callchain data. Did you call" | 733 | " callchain data. Did you call" |
1425 | " perf record without -g?\n"); | 734 | " perf record without -g?\n"); |
1426 | exit(-1); | 735 | return -1; |
1427 | } | 736 | } |
1428 | if (callchain) { | 737 | if (callchain) { |
1429 | fprintf(stderr, "selected -g but no callchain data." | 738 | fprintf(stderr, "selected -g but no callchain data." |
1430 | " Did you call perf record without" | 739 | " Did you call perf record without" |
1431 | " -g?\n"); | 740 | " -g?\n"); |
1432 | exit(-1); | 741 | return -1; |
1433 | } | 742 | } |
1434 | } else if (callchain_param.mode != CHAIN_NONE && !callchain) { | 743 | } else if (callchain_param.mode != CHAIN_NONE && !callchain) { |
1435 | callchain = 1; | 744 | callchain = 1; |
1436 | if (register_callchain_param(&callchain_param) < 0) { | 745 | if (register_callchain_param(&callchain_param) < 0) { |
1437 | fprintf(stderr, "Can't register callchain" | 746 | fprintf(stderr, "Can't register callchain" |
1438 | " params\n"); | 747 | " params\n"); |
1439 | exit(-1); | 748 | return -1; |
1440 | } | 749 | } |
1441 | } | 750 | } |
1442 | 751 | ||
1443 | if (load_kernel() < 0) { | 752 | return 0; |
1444 | perror("failed to load kernel symbols"); | 753 | } |
1445 | return EXIT_FAILURE; | ||
1446 | } | ||
1447 | |||
1448 | if (!full_paths) { | ||
1449 | if (getcwd(__cwd, sizeof(__cwd)) == NULL) { | ||
1450 | perror("failed to get the current directory"); | ||
1451 | return EXIT_FAILURE; | ||
1452 | } | ||
1453 | cwdlen = strlen(cwd); | ||
1454 | } else { | ||
1455 | cwd = NULL; | ||
1456 | cwdlen = 0; | ||
1457 | } | ||
1458 | |||
1459 | shift = page_size * (head / page_size); | ||
1460 | offset += shift; | ||
1461 | head -= shift; | ||
1462 | |||
1463 | remap: | ||
1464 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | ||
1465 | MAP_SHARED, input, offset); | ||
1466 | if (buf == MAP_FAILED) { | ||
1467 | perror("failed to mmap file"); | ||
1468 | exit(-1); | ||
1469 | } | ||
1470 | |||
1471 | more: | ||
1472 | event = (event_t *)(buf + head); | ||
1473 | |||
1474 | size = event->header.size; | ||
1475 | if (!size) | ||
1476 | size = 8; | ||
1477 | |||
1478 | if (head + event->header.size >= page_size * mmap_window) { | ||
1479 | int munmap_ret; | ||
1480 | |||
1481 | shift = page_size * (head / page_size); | ||
1482 | |||
1483 | munmap_ret = munmap(buf, page_size * mmap_window); | ||
1484 | assert(munmap_ret == 0); | ||
1485 | |||
1486 | offset += shift; | ||
1487 | head -= shift; | ||
1488 | goto remap; | ||
1489 | } | ||
1490 | |||
1491 | size = event->header.size; | ||
1492 | |||
1493 | dump_printf("\n%p [%p]: event: %d\n", | ||
1494 | (void *)(offset + head), | ||
1495 | (void *)(long)event->header.size, | ||
1496 | event->header.type); | ||
1497 | |||
1498 | if (!size || process_event(event, offset, head) < 0) { | ||
1499 | |||
1500 | dump_printf("%p [%p]: skipping unknown header type: %d\n", | ||
1501 | (void *)(offset + head), | ||
1502 | (void *)(long)(event->header.size), | ||
1503 | event->header.type); | ||
1504 | |||
1505 | total_unknown++; | ||
1506 | |||
1507 | /* | ||
1508 | * assume we lost track of the stream, check alignment, and | ||
1509 | * increment a single u64 in the hope to catch on again 'soon'. | ||
1510 | */ | ||
1511 | 754 | ||
1512 | if (unlikely(head & 7)) | 755 | static struct perf_file_handler file_handler = { |
1513 | head &= ~7ULL; | 756 | .process_sample_event = process_sample_event, |
757 | .process_mmap_event = event__process_mmap, | ||
758 | .process_comm_event = process_comm_event, | ||
759 | .process_exit_event = event__process_task, | ||
760 | .process_fork_event = event__process_task, | ||
761 | .process_lost_event = event__process_lost, | ||
762 | .process_read_event = process_read_event, | ||
763 | .sample_type_check = sample_type_check, | ||
764 | }; | ||
1514 | 765 | ||
1515 | size = 8; | ||
1516 | } | ||
1517 | 766 | ||
1518 | head += size; | 767 | static int __cmd_report(void) |
768 | { | ||
769 | struct thread *idle; | ||
770 | int ret; | ||
1519 | 771 | ||
1520 | if (offset + head >= header->data_offset + header->data_size) | 772 | idle = register_idle_thread(); |
1521 | goto done; | 773 | thread__comm_adjust(idle); |
1522 | 774 | ||
1523 | if (offset + head < (unsigned long)input_stat.st_size) | 775 | if (show_threads) |
1524 | goto more; | 776 | perf_read_values_init(&show_threads_values); |
1525 | 777 | ||
1526 | done: | 778 | register_perf_file_handler(&file_handler); |
1527 | rc = EXIT_SUCCESS; | ||
1528 | close(input); | ||
1529 | 779 | ||
1530 | dump_printf(" IP events: %10ld\n", total); | 780 | ret = mmap_dispatch_perf_file(&header, input_name, force, |
1531 | dump_printf(" mmap events: %10ld\n", total_mmap); | 781 | full_paths, &event__cwdlen, &event__cwd); |
1532 | dump_printf(" comm events: %10ld\n", total_comm); | 782 | if (ret) |
1533 | dump_printf(" fork events: %10ld\n", total_fork); | 783 | return ret; |
1534 | dump_printf(" lost events: %10ld\n", total_lost); | ||
1535 | dump_printf(" unknown events: %10ld\n", total_unknown); | ||
1536 | 784 | ||
1537 | if (dump_trace) | 785 | if (dump_trace) { |
786 | event__print_totals(); | ||
1538 | return 0; | 787 | return 0; |
788 | } | ||
1539 | 789 | ||
1540 | if (verbose >= 3) | 790 | if (verbose > 3) |
1541 | threads__fprintf(stdout, &threads); | 791 | threads__fprintf(stdout); |
1542 | 792 | ||
1543 | if (verbose >= 2) | 793 | if (verbose > 2) |
1544 | dsos__fprintf(stdout); | 794 | dsos__fprintf(stdout); |
1545 | 795 | ||
1546 | collapse__resort(); | 796 | collapse__resort(); |
1547 | output__resort(total); | 797 | output__resort(event__stats.total); |
1548 | output__fprintf(stdout, total); | 798 | output__fprintf(stdout, event__stats.total); |
1549 | 799 | ||
1550 | if (show_threads) | 800 | if (show_threads) |
1551 | perf_read_values_destroy(&show_threads_values); | 801 | perf_read_values_destroy(&show_threads_values); |
1552 | 802 | ||
1553 | return rc; | 803 | return ret; |
1554 | } | 804 | } |
1555 | 805 | ||
1556 | static int | 806 | static int |
@@ -1606,7 +856,8 @@ setup: | |||
1606 | return 0; | 856 | return 0; |
1607 | } | 857 | } |
1608 | 858 | ||
1609 | static const char * const report_usage[] = { | 859 | //static const char * const report_usage[] = { |
860 | const char * const report_usage[] = { | ||
1610 | "perf report [<options>] <command>", | 861 | "perf report [<options>] <command>", |
1611 | NULL | 862 | NULL |
1612 | }; | 863 | }; |
@@ -1618,9 +869,10 @@ static const struct option options[] = { | |||
1618 | "be more verbose (show symbol address, etc)"), | 869 | "be more verbose (show symbol address, etc)"), |
1619 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | 870 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1620 | "dump raw trace in ASCII"), | 871 | "dump raw trace in ASCII"), |
1621 | OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), | 872 | OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, |
873 | "file", "vmlinux pathname"), | ||
1622 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), | 874 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), |
1623 | OPT_BOOLEAN('m', "modules", &modules, | 875 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, |
1624 | "load module symbols - WARNING: use only with -k and LIVE kernel"), | 876 | "load module symbols - WARNING: use only with -k and LIVE kernel"), |
1625 | OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, | 877 | OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, |
1626 | "Show a column with the number of samples"), | 878 | "Show a column with the number of samples"), |
@@ -1690,9 +942,8 @@ static void setup_list(struct strlist **list, const char *list_str, | |||
1690 | 942 | ||
1691 | int cmd_report(int argc, const char **argv, const char *prefix __used) | 943 | int cmd_report(int argc, const char **argv, const char *prefix __used) |
1692 | { | 944 | { |
1693 | symbol__init(); | 945 | if (symbol__init(&symbol_conf) < 0) |
1694 | 946 | return -1; | |
1695 | page_size = getpagesize(); | ||
1696 | 947 | ||
1697 | argc = parse_options(argc, argv, options, report_usage, 0); | 948 | argc = parse_options(argc, argv, options, report_usage, 0); |
1698 | 949 | ||
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index ce2d5be4f30e..26b782f26ee1 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include "util/trace-event.h" | 11 | #include "util/trace-event.h" |
12 | 12 | ||
13 | #include "util/debug.h" | 13 | #include "util/debug.h" |
14 | #include "util/data_map.h" | ||
14 | 15 | ||
15 | #include <sys/types.h> | 16 | #include <sys/types.h> |
16 | #include <sys/prctl.h> | 17 | #include <sys/prctl.h> |
@@ -20,14 +21,6 @@ | |||
20 | #include <math.h> | 21 | #include <math.h> |
21 | 22 | ||
22 | static char const *input_name = "perf.data"; | 23 | static char const *input_name = "perf.data"; |
23 | static int input; | ||
24 | static unsigned long page_size; | ||
25 | static unsigned long mmap_window = 32; | ||
26 | |||
27 | static unsigned long total_comm = 0; | ||
28 | |||
29 | static struct rb_root threads; | ||
30 | static struct thread *last_match; | ||
31 | 24 | ||
32 | static struct perf_header *header; | 25 | static struct perf_header *header; |
33 | static u64 sample_type; | 26 | static u64 sample_type; |
@@ -35,11 +28,11 @@ static u64 sample_type; | |||
35 | static char default_sort_order[] = "avg, max, switch, runtime"; | 28 | static char default_sort_order[] = "avg, max, switch, runtime"; |
36 | static char *sort_order = default_sort_order; | 29 | static char *sort_order = default_sort_order; |
37 | 30 | ||
31 | static int profile_cpu = -1; | ||
32 | |||
38 | #define PR_SET_NAME 15 /* Set process name */ | 33 | #define PR_SET_NAME 15 /* Set process name */ |
39 | #define MAX_CPUS 4096 | 34 | #define MAX_CPUS 4096 |
40 | 35 | ||
41 | #define BUG_ON(x) assert(!(x)) | ||
42 | |||
43 | static u64 run_measurement_overhead; | 36 | static u64 run_measurement_overhead; |
44 | static u64 sleep_measurement_overhead; | 37 | static u64 sleep_measurement_overhead; |
45 | 38 | ||
@@ -74,6 +67,7 @@ enum sched_event_type { | |||
74 | SCHED_EVENT_RUN, | 67 | SCHED_EVENT_RUN, |
75 | SCHED_EVENT_SLEEP, | 68 | SCHED_EVENT_SLEEP, |
76 | SCHED_EVENT_WAKEUP, | 69 | SCHED_EVENT_WAKEUP, |
70 | SCHED_EVENT_MIGRATION, | ||
77 | }; | 71 | }; |
78 | 72 | ||
79 | struct sched_atom { | 73 | struct sched_atom { |
@@ -226,7 +220,7 @@ static void calibrate_sleep_measurement_overhead(void) | |||
226 | static struct sched_atom * | 220 | static struct sched_atom * |
227 | get_new_event(struct task_desc *task, u64 timestamp) | 221 | get_new_event(struct task_desc *task, u64 timestamp) |
228 | { | 222 | { |
229 | struct sched_atom *event = calloc(1, sizeof(*event)); | 223 | struct sched_atom *event = zalloc(sizeof(*event)); |
230 | unsigned long idx = task->nr_events; | 224 | unsigned long idx = task->nr_events; |
231 | size_t size; | 225 | size_t size; |
232 | 226 | ||
@@ -294,7 +288,7 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | |||
294 | return; | 288 | return; |
295 | } | 289 | } |
296 | 290 | ||
297 | wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem)); | 291 | wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); |
298 | sem_init(wakee_event->wait_sem, 0, 0); | 292 | sem_init(wakee_event->wait_sem, 0, 0); |
299 | wakee_event->specific_wait = 1; | 293 | wakee_event->specific_wait = 1; |
300 | event->wait_sem = wakee_event->wait_sem; | 294 | event->wait_sem = wakee_event->wait_sem; |
@@ -324,7 +318,7 @@ static struct task_desc *register_pid(unsigned long pid, const char *comm) | |||
324 | if (task) | 318 | if (task) |
325 | return task; | 319 | return task; |
326 | 320 | ||
327 | task = calloc(1, sizeof(*task)); | 321 | task = zalloc(sizeof(*task)); |
328 | task->pid = pid; | 322 | task->pid = pid; |
329 | task->nr = nr_tasks; | 323 | task->nr = nr_tasks; |
330 | strcpy(task->comm, comm); | 324 | strcpy(task->comm, comm); |
@@ -398,6 +392,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) | |||
398 | ret = sem_post(atom->wait_sem); | 392 | ret = sem_post(atom->wait_sem); |
399 | BUG_ON(ret); | 393 | BUG_ON(ret); |
400 | break; | 394 | break; |
395 | case SCHED_EVENT_MIGRATION: | ||
396 | break; | ||
401 | default: | 397 | default: |
402 | BUG_ON(1); | 398 | BUG_ON(1); |
403 | } | 399 | } |
@@ -632,29 +628,6 @@ static void test_calibrations(void) | |||
632 | printf("the sleep test took %Ld nsecs\n", T1-T0); | 628 | printf("the sleep test took %Ld nsecs\n", T1-T0); |
633 | } | 629 | } |
634 | 630 | ||
635 | static int | ||
636 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | ||
637 | { | ||
638 | struct thread *thread; | ||
639 | |||
640 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | ||
641 | |||
642 | dump_printf("%p [%p]: perf_event_comm: %s:%d\n", | ||
643 | (void *)(offset + head), | ||
644 | (void *)(long)(event->header.size), | ||
645 | event->comm.comm, event->comm.pid); | ||
646 | |||
647 | if (thread == NULL || | ||
648 | thread__set_comm(thread, event->comm.comm)) { | ||
649 | dump_printf("problem processing perf_event_comm, skipping event.\n"); | ||
650 | return -1; | ||
651 | } | ||
652 | total_comm++; | ||
653 | |||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | |||
658 | struct raw_event_sample { | 631 | struct raw_event_sample { |
659 | u32 size; | 632 | u32 size; |
660 | char data[0]; | 633 | char data[0]; |
@@ -745,6 +718,22 @@ struct trace_fork_event { | |||
745 | u32 child_pid; | 718 | u32 child_pid; |
746 | }; | 719 | }; |
747 | 720 | ||
721 | struct trace_migrate_task_event { | ||
722 | u32 size; | ||
723 | |||
724 | u16 common_type; | ||
725 | u8 common_flags; | ||
726 | u8 common_preempt_count; | ||
727 | u32 common_pid; | ||
728 | u32 common_tgid; | ||
729 | |||
730 | char comm[16]; | ||
731 | u32 pid; | ||
732 | |||
733 | u32 prio; | ||
734 | u32 cpu; | ||
735 | }; | ||
736 | |||
748 | struct trace_sched_handler { | 737 | struct trace_sched_handler { |
749 | void (*switch_event)(struct trace_switch_event *, | 738 | void (*switch_event)(struct trace_switch_event *, |
750 | struct event *, | 739 | struct event *, |
@@ -769,6 +758,12 @@ struct trace_sched_handler { | |||
769 | int cpu, | 758 | int cpu, |
770 | u64 timestamp, | 759 | u64 timestamp, |
771 | struct thread *thread); | 760 | struct thread *thread); |
761 | |||
762 | void (*migrate_task_event)(struct trace_migrate_task_event *, | ||
763 | struct event *, | ||
764 | int cpu, | ||
765 | u64 timestamp, | ||
766 | struct thread *thread); | ||
772 | }; | 767 | }; |
773 | 768 | ||
774 | 769 | ||
@@ -941,9 +936,7 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, | |||
941 | 936 | ||
942 | static void thread_atoms_insert(struct thread *thread) | 937 | static void thread_atoms_insert(struct thread *thread) |
943 | { | 938 | { |
944 | struct work_atoms *atoms; | 939 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); |
945 | |||
946 | atoms = calloc(sizeof(*atoms), 1); | ||
947 | if (!atoms) | 940 | if (!atoms) |
948 | die("No memory"); | 941 | die("No memory"); |
949 | 942 | ||
@@ -975,9 +968,7 @@ add_sched_out_event(struct work_atoms *atoms, | |||
975 | char run_state, | 968 | char run_state, |
976 | u64 timestamp) | 969 | u64 timestamp) |
977 | { | 970 | { |
978 | struct work_atom *atom; | 971 | struct work_atom *atom = zalloc(sizeof(*atom)); |
979 | |||
980 | atom = calloc(sizeof(*atom), 1); | ||
981 | if (!atom) | 972 | if (!atom) |
982 | die("Non memory"); | 973 | die("Non memory"); |
983 | 974 | ||
@@ -1058,8 +1049,8 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1058 | die("hm, delta: %Ld < 0 ?\n", delta); | 1049 | die("hm, delta: %Ld < 0 ?\n", delta); |
1059 | 1050 | ||
1060 | 1051 | ||
1061 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); | 1052 | sched_out = threads__findnew(switch_event->prev_pid); |
1062 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); | 1053 | sched_in = threads__findnew(switch_event->next_pid); |
1063 | 1054 | ||
1064 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 1055 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); |
1065 | if (!out_events) { | 1056 | if (!out_events) { |
@@ -1092,13 +1083,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, | |||
1092 | u64 timestamp, | 1083 | u64 timestamp, |
1093 | struct thread *this_thread __used) | 1084 | struct thread *this_thread __used) |
1094 | { | 1085 | { |
1095 | struct work_atoms *atoms; | 1086 | struct thread *thread = threads__findnew(runtime_event->pid); |
1096 | struct thread *thread; | 1087 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); |
1097 | 1088 | ||
1098 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 1089 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1099 | |||
1100 | thread = threads__findnew(runtime_event->pid, &threads, &last_match); | ||
1101 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | ||
1102 | if (!atoms) { | 1090 | if (!atoms) { |
1103 | thread_atoms_insert(thread); | 1091 | thread_atoms_insert(thread); |
1104 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 1092 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); |
@@ -1125,7 +1113,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1125 | if (!wakeup_event->success) | 1113 | if (!wakeup_event->success) |
1126 | return; | 1114 | return; |
1127 | 1115 | ||
1128 | wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); | 1116 | wakee = threads__findnew(wakeup_event->pid); |
1129 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1117 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); |
1130 | if (!atoms) { | 1118 | if (!atoms) { |
1131 | thread_atoms_insert(wakee); | 1119 | thread_atoms_insert(wakee); |
@@ -1139,7 +1127,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1139 | 1127 | ||
1140 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); | 1128 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
1141 | 1129 | ||
1142 | if (atom->state != THREAD_SLEEPING) | 1130 | /* |
1131 | * You WILL be missing events if you've recorded only | ||
1132 | * one CPU, or are only looking at only one, so don't | ||
1133 | * make useless noise. | ||
1134 | */ | ||
1135 | if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) | ||
1143 | nr_state_machine_bugs++; | 1136 | nr_state_machine_bugs++; |
1144 | 1137 | ||
1145 | nr_timestamps++; | 1138 | nr_timestamps++; |
@@ -1152,11 +1145,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1152 | atom->wake_up_time = timestamp; | 1145 | atom->wake_up_time = timestamp; |
1153 | } | 1146 | } |
1154 | 1147 | ||
1148 | static void | ||
1149 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | ||
1150 | struct event *__event __used, | ||
1151 | int cpu __used, | ||
1152 | u64 timestamp, | ||
1153 | struct thread *thread __used) | ||
1154 | { | ||
1155 | struct work_atoms *atoms; | ||
1156 | struct work_atom *atom; | ||
1157 | struct thread *migrant; | ||
1158 | |||
1159 | /* | ||
1160 | * Only need to worry about migration when profiling one CPU. | ||
1161 | */ | ||
1162 | if (profile_cpu == -1) | ||
1163 | return; | ||
1164 | |||
1165 | migrant = threads__findnew(migrate_task_event->pid); | ||
1166 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | ||
1167 | if (!atoms) { | ||
1168 | thread_atoms_insert(migrant); | ||
1169 | register_pid(migrant->pid, migrant->comm); | ||
1170 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | ||
1171 | if (!atoms) | ||
1172 | die("migration-event: Internal tree error"); | ||
1173 | add_sched_out_event(atoms, 'R', timestamp); | ||
1174 | } | ||
1175 | |||
1176 | BUG_ON(list_empty(&atoms->work_list)); | ||
1177 | |||
1178 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); | ||
1179 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; | ||
1180 | |||
1181 | nr_timestamps++; | ||
1182 | |||
1183 | if (atom->sched_out_time > timestamp) | ||
1184 | nr_unordered_timestamps++; | ||
1185 | } | ||
1186 | |||
1155 | static struct trace_sched_handler lat_ops = { | 1187 | static struct trace_sched_handler lat_ops = { |
1156 | .wakeup_event = latency_wakeup_event, | 1188 | .wakeup_event = latency_wakeup_event, |
1157 | .switch_event = latency_switch_event, | 1189 | .switch_event = latency_switch_event, |
1158 | .runtime_event = latency_runtime_event, | 1190 | .runtime_event = latency_runtime_event, |
1159 | .fork_event = latency_fork_event, | 1191 | .fork_event = latency_fork_event, |
1192 | .migrate_task_event = latency_migrate_task_event, | ||
1160 | }; | 1193 | }; |
1161 | 1194 | ||
1162 | static void output_lat_thread(struct work_atoms *work_list) | 1195 | static void output_lat_thread(struct work_atoms *work_list) |
@@ -1385,8 +1418,8 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1385 | die("hm, delta: %Ld < 0 ?\n", delta); | 1418 | die("hm, delta: %Ld < 0 ?\n", delta); |
1386 | 1419 | ||
1387 | 1420 | ||
1388 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); | 1421 | sched_out = threads__findnew(switch_event->prev_pid); |
1389 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); | 1422 | sched_in = threads__findnew(switch_event->next_pid); |
1390 | 1423 | ||
1391 | curr_thread[this_cpu] = sched_in; | 1424 | curr_thread[this_cpu] = sched_in; |
1392 | 1425 | ||
@@ -1517,6 +1550,26 @@ process_sched_exit_event(struct event *event, | |||
1517 | } | 1550 | } |
1518 | 1551 | ||
1519 | static void | 1552 | static void |
1553 | process_sched_migrate_task_event(struct raw_event_sample *raw, | ||
1554 | struct event *event, | ||
1555 | int cpu __used, | ||
1556 | u64 timestamp __used, | ||
1557 | struct thread *thread __used) | ||
1558 | { | ||
1559 | struct trace_migrate_task_event migrate_task_event; | ||
1560 | |||
1561 | FILL_COMMON_FIELDS(migrate_task_event, event, raw->data); | ||
1562 | |||
1563 | FILL_ARRAY(migrate_task_event, comm, event, raw->data); | ||
1564 | FILL_FIELD(migrate_task_event, pid, event, raw->data); | ||
1565 | FILL_FIELD(migrate_task_event, prio, event, raw->data); | ||
1566 | FILL_FIELD(migrate_task_event, cpu, event, raw->data); | ||
1567 | |||
1568 | if (trace_handler->migrate_task_event) | ||
1569 | trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); | ||
1570 | } | ||
1571 | |||
1572 | static void | ||
1520 | process_raw_event(event_t *raw_event __used, void *more_data, | 1573 | process_raw_event(event_t *raw_event __used, void *more_data, |
1521 | int cpu, u64 timestamp, struct thread *thread) | 1574 | int cpu, u64 timestamp, struct thread *thread) |
1522 | { | 1575 | { |
@@ -1539,23 +1592,23 @@ process_raw_event(event_t *raw_event __used, void *more_data, | |||
1539 | process_sched_fork_event(raw, event, cpu, timestamp, thread); | 1592 | process_sched_fork_event(raw, event, cpu, timestamp, thread); |
1540 | if (!strcmp(event->name, "sched_process_exit")) | 1593 | if (!strcmp(event->name, "sched_process_exit")) |
1541 | process_sched_exit_event(event, cpu, timestamp, thread); | 1594 | process_sched_exit_event(event, cpu, timestamp, thread); |
1595 | if (!strcmp(event->name, "sched_migrate_task")) | ||
1596 | process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); | ||
1542 | } | 1597 | } |
1543 | 1598 | ||
1544 | static int | 1599 | static int process_sample_event(event_t *event) |
1545 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | ||
1546 | { | 1600 | { |
1547 | char level; | ||
1548 | int show = 0; | ||
1549 | struct dso *dso = NULL; | ||
1550 | struct thread *thread; | 1601 | struct thread *thread; |
1551 | u64 ip = event->ip.ip; | 1602 | u64 ip = event->ip.ip; |
1552 | u64 timestamp = -1; | 1603 | u64 timestamp = -1; |
1553 | u32 cpu = -1; | 1604 | u32 cpu = -1; |
1554 | u64 period = 1; | 1605 | u64 period = 1; |
1555 | void *more_data = event->ip.__more_data; | 1606 | void *more_data = event->ip.__more_data; |
1556 | int cpumode; | ||
1557 | 1607 | ||
1558 | thread = threads__findnew(event->ip.pid, &threads, &last_match); | 1608 | if (!(sample_type & PERF_SAMPLE_RAW)) |
1609 | return 0; | ||
1610 | |||
1611 | thread = threads__findnew(event->ip.pid); | ||
1559 | 1612 | ||
1560 | if (sample_type & PERF_SAMPLE_TIME) { | 1613 | if (sample_type & PERF_SAMPLE_TIME) { |
1561 | timestamp = *(u64 *)more_data; | 1614 | timestamp = *(u64 *)more_data; |
@@ -1573,177 +1626,64 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
1573 | more_data += sizeof(u64); | 1626 | more_data += sizeof(u64); |
1574 | } | 1627 | } |
1575 | 1628 | ||
1576 | dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", | 1629 | dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", |
1577 | (void *)(offset + head), | ||
1578 | (void *)(long)(event->header.size), | ||
1579 | event->header.misc, | 1630 | event->header.misc, |
1580 | event->ip.pid, event->ip.tid, | 1631 | event->ip.pid, event->ip.tid, |
1581 | (void *)(long)ip, | 1632 | (void *)(long)ip, |
1582 | (long long)period); | 1633 | (long long)period); |
1583 | 1634 | ||
1584 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
1585 | |||
1586 | if (thread == NULL) { | 1635 | if (thread == NULL) { |
1587 | eprintf("problem processing %d event, skipping it.\n", | 1636 | pr_debug("problem processing %d event, skipping it.\n", |
1588 | event->header.type); | 1637 | event->header.type); |
1589 | return -1; | 1638 | return -1; |
1590 | } | 1639 | } |
1591 | 1640 | ||
1592 | cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 1641 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); |
1593 | |||
1594 | if (cpumode == PERF_RECORD_MISC_KERNEL) { | ||
1595 | show = SHOW_KERNEL; | ||
1596 | level = 'k'; | ||
1597 | |||
1598 | dso = kernel_dso; | ||
1599 | |||
1600 | dump_printf(" ...... dso: %s\n", dso->name); | ||
1601 | |||
1602 | } else if (cpumode == PERF_RECORD_MISC_USER) { | ||
1603 | |||
1604 | show = SHOW_USER; | ||
1605 | level = '.'; | ||
1606 | |||
1607 | } else { | ||
1608 | show = SHOW_HV; | ||
1609 | level = 'H'; | ||
1610 | |||
1611 | dso = hypervisor_dso; | ||
1612 | 1642 | ||
1613 | dump_printf(" ...... dso: [hypervisor]\n"); | 1643 | if (profile_cpu != -1 && profile_cpu != (int) cpu) |
1614 | } | 1644 | return 0; |
1615 | 1645 | ||
1616 | if (sample_type & PERF_SAMPLE_RAW) | 1646 | process_raw_event(event, more_data, cpu, timestamp, thread); |
1617 | process_raw_event(event, more_data, cpu, timestamp, thread); | ||
1618 | 1647 | ||
1619 | return 0; | 1648 | return 0; |
1620 | } | 1649 | } |
1621 | 1650 | ||
1622 | static int | 1651 | static int process_lost_event(event_t *event __used) |
1623 | process_event(event_t *event, unsigned long offset, unsigned long head) | ||
1624 | { | 1652 | { |
1625 | trace_event(event); | 1653 | nr_lost_chunks++; |
1626 | 1654 | nr_lost_events += event->lost.lost; | |
1627 | nr_events++; | ||
1628 | switch (event->header.type) { | ||
1629 | case PERF_RECORD_MMAP: | ||
1630 | return 0; | ||
1631 | case PERF_RECORD_LOST: | ||
1632 | nr_lost_chunks++; | ||
1633 | nr_lost_events += event->lost.lost; | ||
1634 | return 0; | ||
1635 | |||
1636 | case PERF_RECORD_COMM: | ||
1637 | return process_comm_event(event, offset, head); | ||
1638 | 1655 | ||
1639 | case PERF_RECORD_EXIT ... PERF_RECORD_READ: | 1656 | return 0; |
1640 | return 0; | 1657 | } |
1641 | 1658 | ||
1642 | case PERF_RECORD_SAMPLE: | 1659 | static int sample_type_check(u64 type) |
1643 | return process_sample_event(event, offset, head); | 1660 | { |
1661 | sample_type = type; | ||
1644 | 1662 | ||
1645 | case PERF_RECORD_MAX: | 1663 | if (!(sample_type & PERF_SAMPLE_RAW)) { |
1646 | default: | 1664 | fprintf(stderr, |
1665 | "No trace sample to read. Did you call perf record " | ||
1666 | "without -R?"); | ||
1647 | return -1; | 1667 | return -1; |
1648 | } | 1668 | } |
1649 | 1669 | ||
1650 | return 0; | 1670 | return 0; |
1651 | } | 1671 | } |
1652 | 1672 | ||
1673 | static struct perf_file_handler file_handler = { | ||
1674 | .process_sample_event = process_sample_event, | ||
1675 | .process_comm_event = event__process_comm, | ||
1676 | .process_lost_event = process_lost_event, | ||
1677 | .sample_type_check = sample_type_check, | ||
1678 | }; | ||
1679 | |||
1653 | static int read_events(void) | 1680 | static int read_events(void) |
1654 | { | 1681 | { |
1655 | int ret, rc = EXIT_FAILURE; | 1682 | register_idle_thread(); |
1656 | unsigned long offset = 0; | 1683 | register_perf_file_handler(&file_handler); |
1657 | unsigned long head = 0; | ||
1658 | struct stat perf_stat; | ||
1659 | event_t *event; | ||
1660 | uint32_t size; | ||
1661 | char *buf; | ||
1662 | |||
1663 | trace_report(); | ||
1664 | register_idle_thread(&threads, &last_match); | ||
1665 | |||
1666 | input = open(input_name, O_RDONLY); | ||
1667 | if (input < 0) { | ||
1668 | perror("failed to open file"); | ||
1669 | exit(-1); | ||
1670 | } | ||
1671 | |||
1672 | ret = fstat(input, &perf_stat); | ||
1673 | if (ret < 0) { | ||
1674 | perror("failed to stat file"); | ||
1675 | exit(-1); | ||
1676 | } | ||
1677 | |||
1678 | if (!perf_stat.st_size) { | ||
1679 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
1680 | exit(0); | ||
1681 | } | ||
1682 | header = perf_header__read(input); | ||
1683 | head = header->data_offset; | ||
1684 | sample_type = perf_header__sample_type(header); | ||
1685 | |||
1686 | if (!(sample_type & PERF_SAMPLE_RAW)) | ||
1687 | die("No trace sample to read. Did you call perf record " | ||
1688 | "without -R?"); | ||
1689 | |||
1690 | if (load_kernel() < 0) { | ||
1691 | perror("failed to load kernel symbols"); | ||
1692 | return EXIT_FAILURE; | ||
1693 | } | ||
1694 | |||
1695 | remap: | ||
1696 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | ||
1697 | MAP_SHARED, input, offset); | ||
1698 | if (buf == MAP_FAILED) { | ||
1699 | perror("failed to mmap file"); | ||
1700 | exit(-1); | ||
1701 | } | ||
1702 | |||
1703 | more: | ||
1704 | event = (event_t *)(buf + head); | ||
1705 | |||
1706 | size = event->header.size; | ||
1707 | if (!size) | ||
1708 | size = 8; | ||
1709 | |||
1710 | if (head + event->header.size >= page_size * mmap_window) { | ||
1711 | unsigned long shift = page_size * (head / page_size); | ||
1712 | int res; | ||
1713 | |||
1714 | res = munmap(buf, page_size * mmap_window); | ||
1715 | assert(res == 0); | ||
1716 | |||
1717 | offset += shift; | ||
1718 | head -= shift; | ||
1719 | goto remap; | ||
1720 | } | ||
1721 | |||
1722 | size = event->header.size; | ||
1723 | |||
1724 | |||
1725 | if (!size || process_event(event, offset, head) < 0) { | ||
1726 | |||
1727 | /* | ||
1728 | * assume we lost track of the stream, check alignment, and | ||
1729 | * increment a single u64 in the hope to catch on again 'soon'. | ||
1730 | */ | ||
1731 | |||
1732 | if (unlikely(head & 7)) | ||
1733 | head &= ~7ULL; | ||
1734 | |||
1735 | size = 8; | ||
1736 | } | ||
1737 | |||
1738 | head += size; | ||
1739 | |||
1740 | if (offset + head < (unsigned long)perf_stat.st_size) | ||
1741 | goto more; | ||
1742 | |||
1743 | rc = EXIT_SUCCESS; | ||
1744 | close(input); | ||
1745 | 1684 | ||
1746 | return rc; | 1685 | return mmap_dispatch_perf_file(&header, input_name, 0, 0, |
1686 | &event__cwdlen, &event__cwd); | ||
1747 | } | 1687 | } |
1748 | 1688 | ||
1749 | static void print_bad_events(void) | 1689 | static void print_bad_events(void) |
@@ -1883,6 +1823,8 @@ static const struct option latency_options[] = { | |||
1883 | "sort by key(s): runtime, switch, avg, max"), | 1823 | "sort by key(s): runtime, switch, avg, max"), |
1884 | OPT_BOOLEAN('v', "verbose", &verbose, | 1824 | OPT_BOOLEAN('v', "verbose", &verbose, |
1885 | "be more verbose (show symbol address, etc)"), | 1825 | "be more verbose (show symbol address, etc)"), |
1826 | OPT_INTEGER('C', "CPU", &profile_cpu, | ||
1827 | "CPU to profile on"), | ||
1886 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | 1828 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1887 | "dump raw trace in ASCII"), | 1829 | "dump raw trace in ASCII"), |
1888 | OPT_END() | 1830 | OPT_END() |
@@ -1960,8 +1902,7 @@ static int __cmd_record(int argc, const char **argv) | |||
1960 | 1902 | ||
1961 | int cmd_sched(int argc, const char **argv, const char *prefix __used) | 1903 | int cmd_sched(int argc, const char **argv, const char *prefix __used) |
1962 | { | 1904 | { |
1963 | symbol__init(); | 1905 | symbol__init(0); |
1964 | page_size = getpagesize(); | ||
1965 | 1906 | ||
1966 | argc = parse_options(argc, argv, sched_options, sched_usage, | 1907 | argc = parse_options(argc, argv, sched_options, sched_usage, |
1967 | PARSE_OPT_STOP_AT_NON_OPTION); | 1908 | PARSE_OPT_STOP_AT_NON_OPTION); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3db31e7bf173..c70d72003557 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -50,15 +50,17 @@ | |||
50 | 50 | ||
51 | static struct perf_event_attr default_attrs[] = { | 51 | static struct perf_event_attr default_attrs[] = { |
52 | 52 | ||
53 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, | 53 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, |
54 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, | 54 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, |
55 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, | 55 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, |
56 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, | 56 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, |
57 | 57 | ||
58 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, | 58 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, |
59 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, | 59 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, |
60 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, | 60 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
61 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, | 61 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, |
62 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, | ||
63 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, | ||
62 | 64 | ||
63 | }; | 65 | }; |
64 | 66 | ||
@@ -125,6 +127,7 @@ struct stats event_res_stats[MAX_COUNTERS][3]; | |||
125 | struct stats runtime_nsecs_stats; | 127 | struct stats runtime_nsecs_stats; |
126 | struct stats walltime_nsecs_stats; | 128 | struct stats walltime_nsecs_stats; |
127 | struct stats runtime_cycles_stats; | 129 | struct stats runtime_cycles_stats; |
130 | struct stats runtime_branches_stats; | ||
128 | 131 | ||
129 | #define MATCH_EVENT(t, c, counter) \ | 132 | #define MATCH_EVENT(t, c, counter) \ |
130 | (attrs[counter].type == PERF_TYPE_##t && \ | 133 | (attrs[counter].type == PERF_TYPE_##t && \ |
@@ -235,6 +238,8 @@ static void read_counter(int counter) | |||
235 | update_stats(&runtime_nsecs_stats, count[0]); | 238 | update_stats(&runtime_nsecs_stats, count[0]); |
236 | if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) | 239 | if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) |
237 | update_stats(&runtime_cycles_stats, count[0]); | 240 | update_stats(&runtime_cycles_stats, count[0]); |
241 | if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) | ||
242 | update_stats(&runtime_branches_stats, count[0]); | ||
238 | } | 243 | } |
239 | 244 | ||
240 | static int run_perf_stat(int argc __used, const char **argv) | 245 | static int run_perf_stat(int argc __used, const char **argv) |
@@ -352,7 +357,16 @@ static void abs_printout(int counter, double avg) | |||
352 | ratio = avg / total; | 357 | ratio = avg / total; |
353 | 358 | ||
354 | fprintf(stderr, " # %10.3f IPC ", ratio); | 359 | fprintf(stderr, " # %10.3f IPC ", ratio); |
355 | } else { | 360 | } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) && |
361 | runtime_branches_stats.n != 0) { | ||
362 | total = avg_stats(&runtime_branches_stats); | ||
363 | |||
364 | if (total) | ||
365 | ratio = avg * 100 / total; | ||
366 | |||
367 | fprintf(stderr, " # %10.3f %% ", ratio); | ||
368 | |||
369 | } else if (runtime_nsecs_stats.n != 0) { | ||
356 | total = avg_stats(&runtime_nsecs_stats); | 370 | total = avg_stats(&runtime_nsecs_stats); |
357 | 371 | ||
358 | if (total) | 372 | if (total) |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index e8a510d935e5..cb58b6605fcc 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -29,14 +29,14 @@ | |||
29 | #include "util/header.h" | 29 | #include "util/header.h" |
30 | #include "util/parse-options.h" | 30 | #include "util/parse-options.h" |
31 | #include "util/parse-events.h" | 31 | #include "util/parse-events.h" |
32 | #include "util/event.h" | ||
33 | #include "util/data_map.h" | ||
32 | #include "util/svghelper.h" | 34 | #include "util/svghelper.h" |
33 | 35 | ||
34 | static char const *input_name = "perf.data"; | 36 | static char const *input_name = "perf.data"; |
35 | static char const *output_name = "output.svg"; | 37 | static char const *output_name = "output.svg"; |
36 | 38 | ||
37 | 39 | ||
38 | static unsigned long page_size; | ||
39 | static unsigned long mmap_window = 32; | ||
40 | static u64 sample_type; | 40 | static u64 sample_type; |
41 | 41 | ||
42 | static unsigned int numcpus; | 42 | static unsigned int numcpus; |
@@ -49,8 +49,6 @@ static u64 first_time, last_time; | |||
49 | static int power_only; | 49 | static int power_only; |
50 | 50 | ||
51 | 51 | ||
52 | static struct perf_header *header; | ||
53 | |||
54 | struct per_pid; | 52 | struct per_pid; |
55 | struct per_pidcomm; | 53 | struct per_pidcomm; |
56 | 54 | ||
@@ -153,6 +151,17 @@ static struct wake_event *wake_events; | |||
153 | 151 | ||
154 | struct sample_wrapper *all_samples; | 152 | struct sample_wrapper *all_samples; |
155 | 153 | ||
154 | |||
155 | struct process_filter; | ||
156 | struct process_filter { | ||
157 | char *name; | ||
158 | int pid; | ||
159 | struct process_filter *next; | ||
160 | }; | ||
161 | |||
162 | static struct process_filter *process_filter; | ||
163 | |||
164 | |||
156 | static struct per_pid *find_create_pid(int pid) | 165 | static struct per_pid *find_create_pid(int pid) |
157 | { | 166 | { |
158 | struct per_pid *cursor = all_data; | 167 | struct per_pid *cursor = all_data; |
@@ -763,11 +772,11 @@ static void draw_wakeups(void) | |||
763 | c = p->all; | 772 | c = p->all; |
764 | while (c) { | 773 | while (c) { |
765 | if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { | 774 | if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { |
766 | if (p->pid == we->waker) { | 775 | if (p->pid == we->waker && !from) { |
767 | from = c->Y; | 776 | from = c->Y; |
768 | task_from = strdup(c->comm); | 777 | task_from = strdup(c->comm); |
769 | } | 778 | } |
770 | if (p->pid == we->wakee) { | 779 | if (p->pid == we->wakee && !to) { |
771 | to = c->Y; | 780 | to = c->Y; |
772 | task_to = strdup(c->comm); | 781 | task_to = strdup(c->comm); |
773 | } | 782 | } |
@@ -882,12 +891,89 @@ static void draw_process_bars(void) | |||
882 | } | 891 | } |
883 | } | 892 | } |
884 | 893 | ||
894 | static void add_process_filter(const char *string) | ||
895 | { | ||
896 | struct process_filter *filt; | ||
897 | int pid; | ||
898 | |||
899 | pid = strtoull(string, NULL, 10); | ||
900 | filt = malloc(sizeof(struct process_filter)); | ||
901 | if (!filt) | ||
902 | return; | ||
903 | |||
904 | filt->name = strdup(string); | ||
905 | filt->pid = pid; | ||
906 | filt->next = process_filter; | ||
907 | |||
908 | process_filter = filt; | ||
909 | } | ||
910 | |||
911 | static int passes_filter(struct per_pid *p, struct per_pidcomm *c) | ||
912 | { | ||
913 | struct process_filter *filt; | ||
914 | if (!process_filter) | ||
915 | return 1; | ||
916 | |||
917 | filt = process_filter; | ||
918 | while (filt) { | ||
919 | if (filt->pid && p->pid == filt->pid) | ||
920 | return 1; | ||
921 | if (strcmp(filt->name, c->comm) == 0) | ||
922 | return 1; | ||
923 | filt = filt->next; | ||
924 | } | ||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | static int determine_display_tasks_filtered(void) | ||
929 | { | ||
930 | struct per_pid *p; | ||
931 | struct per_pidcomm *c; | ||
932 | int count = 0; | ||
933 | |||
934 | p = all_data; | ||
935 | while (p) { | ||
936 | p->display = 0; | ||
937 | if (p->start_time == 1) | ||
938 | p->start_time = first_time; | ||
939 | |||
940 | /* no exit marker, task kept running to the end */ | ||
941 | if (p->end_time == 0) | ||
942 | p->end_time = last_time; | ||
943 | |||
944 | c = p->all; | ||
945 | |||
946 | while (c) { | ||
947 | c->display = 0; | ||
948 | |||
949 | if (c->start_time == 1) | ||
950 | c->start_time = first_time; | ||
951 | |||
952 | if (passes_filter(p, c)) { | ||
953 | c->display = 1; | ||
954 | p->display = 1; | ||
955 | count++; | ||
956 | } | ||
957 | |||
958 | if (c->end_time == 0) | ||
959 | c->end_time = last_time; | ||
960 | |||
961 | c = c->next; | ||
962 | } | ||
963 | p = p->next; | ||
964 | } | ||
965 | return count; | ||
966 | } | ||
967 | |||
885 | static int determine_display_tasks(u64 threshold) | 968 | static int determine_display_tasks(u64 threshold) |
886 | { | 969 | { |
887 | struct per_pid *p; | 970 | struct per_pid *p; |
888 | struct per_pidcomm *c; | 971 | struct per_pidcomm *c; |
889 | int count = 0; | 972 | int count = 0; |
890 | 973 | ||
974 | if (process_filter) | ||
975 | return determine_display_tasks_filtered(); | ||
976 | |||
891 | p = all_data; | 977 | p = all_data; |
892 | while (p) { | 978 | while (p) { |
893 | p->display = 0; | 979 | p->display = 0; |
@@ -957,36 +1043,6 @@ static void write_svg_file(const char *filename) | |||
957 | svg_close(); | 1043 | svg_close(); |
958 | } | 1044 | } |
959 | 1045 | ||
960 | static int | ||
961 | process_event(event_t *event) | ||
962 | { | ||
963 | |||
964 | switch (event->header.type) { | ||
965 | |||
966 | case PERF_RECORD_COMM: | ||
967 | return process_comm_event(event); | ||
968 | case PERF_RECORD_FORK: | ||
969 | return process_fork_event(event); | ||
970 | case PERF_RECORD_EXIT: | ||
971 | return process_exit_event(event); | ||
972 | case PERF_RECORD_SAMPLE: | ||
973 | return queue_sample_event(event); | ||
974 | |||
975 | /* | ||
976 | * We dont process them right now but they are fine: | ||
977 | */ | ||
978 | case PERF_RECORD_MMAP: | ||
979 | case PERF_RECORD_THROTTLE: | ||
980 | case PERF_RECORD_UNTHROTTLE: | ||
981 | return 0; | ||
982 | |||
983 | default: | ||
984 | return -1; | ||
985 | } | ||
986 | |||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static void process_samples(void) | 1046 | static void process_samples(void) |
991 | { | 1047 | { |
992 | struct sample_wrapper *cursor; | 1048 | struct sample_wrapper *cursor; |
@@ -1002,107 +1058,38 @@ static void process_samples(void) | |||
1002 | } | 1058 | } |
1003 | } | 1059 | } |
1004 | 1060 | ||
1005 | 1061 | static int sample_type_check(u64 type) | |
1006 | static int __cmd_timechart(void) | ||
1007 | { | 1062 | { |
1008 | int ret, rc = EXIT_FAILURE; | 1063 | sample_type = type; |
1009 | unsigned long offset = 0; | ||
1010 | unsigned long head, shift; | ||
1011 | struct stat statbuf; | ||
1012 | event_t *event; | ||
1013 | uint32_t size; | ||
1014 | char *buf; | ||
1015 | int input; | ||
1016 | |||
1017 | input = open(input_name, O_RDONLY); | ||
1018 | if (input < 0) { | ||
1019 | fprintf(stderr, " failed to open file: %s", input_name); | ||
1020 | if (!strcmp(input_name, "perf.data")) | ||
1021 | fprintf(stderr, " (try 'perf record' first)"); | ||
1022 | fprintf(stderr, "\n"); | ||
1023 | exit(-1); | ||
1024 | } | ||
1025 | |||
1026 | ret = fstat(input, &statbuf); | ||
1027 | if (ret < 0) { | ||
1028 | perror("failed to stat file"); | ||
1029 | exit(-1); | ||
1030 | } | ||
1031 | |||
1032 | if (!statbuf.st_size) { | ||
1033 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
1034 | exit(0); | ||
1035 | } | ||
1036 | |||
1037 | header = perf_header__read(input); | ||
1038 | head = header->data_offset; | ||
1039 | |||
1040 | sample_type = perf_header__sample_type(header); | ||
1041 | 1064 | ||
1042 | shift = page_size * (head / page_size); | 1065 | if (!(sample_type & PERF_SAMPLE_RAW)) { |
1043 | offset += shift; | 1066 | fprintf(stderr, "No trace samples found in the file.\n" |
1044 | head -= shift; | 1067 | "Have you used 'perf timechart record' to record it?\n"); |
1045 | 1068 | return -1; | |
1046 | remap: | ||
1047 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | ||
1048 | MAP_SHARED, input, offset); | ||
1049 | if (buf == MAP_FAILED) { | ||
1050 | perror("failed to mmap file"); | ||
1051 | exit(-1); | ||
1052 | } | ||
1053 | |||
1054 | more: | ||
1055 | event = (event_t *)(buf + head); | ||
1056 | |||
1057 | size = event->header.size; | ||
1058 | if (!size) | ||
1059 | size = 8; | ||
1060 | |||
1061 | if (head + event->header.size >= page_size * mmap_window) { | ||
1062 | int ret2; | ||
1063 | |||
1064 | shift = page_size * (head / page_size); | ||
1065 | |||
1066 | ret2 = munmap(buf, page_size * mmap_window); | ||
1067 | assert(ret2 == 0); | ||
1068 | |||
1069 | offset += shift; | ||
1070 | head -= shift; | ||
1071 | goto remap; | ||
1072 | } | ||
1073 | |||
1074 | size = event->header.size; | ||
1075 | |||
1076 | if (!size || process_event(event) < 0) { | ||
1077 | |||
1078 | printf("%p [%p]: skipping unknown header type: %d\n", | ||
1079 | (void *)(offset + head), | ||
1080 | (void *)(long)(event->header.size), | ||
1081 | event->header.type); | ||
1082 | |||
1083 | /* | ||
1084 | * assume we lost track of the stream, check alignment, and | ||
1085 | * increment a single u64 in the hope to catch on again 'soon'. | ||
1086 | */ | ||
1087 | |||
1088 | if (unlikely(head & 7)) | ||
1089 | head &= ~7ULL; | ||
1090 | |||
1091 | size = 8; | ||
1092 | } | 1069 | } |
1093 | 1070 | ||
1094 | head += size; | 1071 | return 0; |
1072 | } | ||
1095 | 1073 | ||
1096 | if (offset + head >= header->data_offset + header->data_size) | 1074 | static struct perf_file_handler file_handler = { |
1097 | goto done; | 1075 | .process_comm_event = process_comm_event, |
1076 | .process_fork_event = process_fork_event, | ||
1077 | .process_exit_event = process_exit_event, | ||
1078 | .process_sample_event = queue_sample_event, | ||
1079 | .sample_type_check = sample_type_check, | ||
1080 | }; | ||
1098 | 1081 | ||
1099 | if (offset + head < (unsigned long)statbuf.st_size) | 1082 | static int __cmd_timechart(void) |
1100 | goto more; | 1083 | { |
1084 | struct perf_header *header; | ||
1085 | int ret; | ||
1101 | 1086 | ||
1102 | done: | 1087 | register_perf_file_handler(&file_handler); |
1103 | rc = EXIT_SUCCESS; | ||
1104 | close(input); | ||
1105 | 1088 | ||
1089 | ret = mmap_dispatch_perf_file(&header, input_name, 0, 0, | ||
1090 | &event__cwdlen, &event__cwd); | ||
1091 | if (ret) | ||
1092 | return EXIT_FAILURE; | ||
1106 | 1093 | ||
1107 | process_samples(); | 1094 | process_samples(); |
1108 | 1095 | ||
@@ -1112,9 +1099,10 @@ done: | |||
1112 | 1099 | ||
1113 | write_svg_file(output_name); | 1100 | write_svg_file(output_name); |
1114 | 1101 | ||
1115 | printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); | 1102 | pr_info("Written %2.1f seconds of trace to %s.\n", |
1103 | (last_time - first_time) / 1000000000.0, output_name); | ||
1116 | 1104 | ||
1117 | return rc; | 1105 | return EXIT_SUCCESS; |
1118 | } | 1106 | } |
1119 | 1107 | ||
1120 | static const char * const timechart_usage[] = { | 1108 | static const char * const timechart_usage[] = { |
@@ -1153,6 +1141,14 @@ static int __cmd_record(int argc, const char **argv) | |||
1153 | return cmd_record(i, rec_argv, NULL); | 1141 | return cmd_record(i, rec_argv, NULL); |
1154 | } | 1142 | } |
1155 | 1143 | ||
1144 | static int | ||
1145 | parse_process(const struct option *opt __used, const char *arg, int __used unset) | ||
1146 | { | ||
1147 | if (arg) | ||
1148 | add_process_filter(arg); | ||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1156 | static const struct option options[] = { | 1152 | static const struct option options[] = { |
1157 | OPT_STRING('i', "input", &input_name, "file", | 1153 | OPT_STRING('i', "input", &input_name, "file", |
1158 | "input file name"), | 1154 | "input file name"), |
@@ -1160,17 +1156,18 @@ static const struct option options[] = { | |||
1160 | "output file name"), | 1156 | "output file name"), |
1161 | OPT_INTEGER('w', "width", &svg_page_width, | 1157 | OPT_INTEGER('w', "width", &svg_page_width, |
1162 | "page width"), | 1158 | "page width"), |
1163 | OPT_BOOLEAN('p', "power-only", &power_only, | 1159 | OPT_BOOLEAN('P', "power-only", &power_only, |
1164 | "output power data only"), | 1160 | "output power data only"), |
1161 | OPT_CALLBACK('p', "process", NULL, "process", | ||
1162 | "process selector. Pass a pid or process name.", | ||
1163 | parse_process), | ||
1165 | OPT_END() | 1164 | OPT_END() |
1166 | }; | 1165 | }; |
1167 | 1166 | ||
1168 | 1167 | ||
1169 | int cmd_timechart(int argc, const char **argv, const char *prefix __used) | 1168 | int cmd_timechart(int argc, const char **argv, const char *prefix __used) |
1170 | { | 1169 | { |
1171 | symbol__init(); | 1170 | symbol__init(0); |
1172 | |||
1173 | page_size = getpagesize(); | ||
1174 | 1171 | ||
1175 | argc = parse_options(argc, argv, options, timechart_usage, | 1172 | argc = parse_options(argc, argv, options, timechart_usage, |
1176 | PARSE_OPT_STOP_AT_NON_OPTION); | 1173 | PARSE_OPT_STOP_AT_NON_OPTION); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e23bc74e734f..e0a374d0e43a 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include "util/symbol.h" | 23 | #include "util/symbol.h" |
24 | #include "util/color.h" | 24 | #include "util/color.h" |
25 | #include "util/thread.h" | ||
25 | #include "util/util.h" | 26 | #include "util/util.h" |
26 | #include <linux/rbtree.h> | 27 | #include <linux/rbtree.h> |
27 | #include "util/parse-options.h" | 28 | #include "util/parse-options.h" |
@@ -54,26 +55,31 @@ | |||
54 | 55 | ||
55 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | 56 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
56 | 57 | ||
57 | static int system_wide = 0; | 58 | static int system_wide = 0; |
58 | 59 | ||
59 | static int default_interval = 100000; | 60 | static int default_interval = 0; |
60 | 61 | ||
61 | static int count_filter = 5; | 62 | static int count_filter = 5; |
62 | static int print_entries = 15; | 63 | static int print_entries; |
63 | 64 | ||
64 | static int target_pid = -1; | 65 | static int target_pid = -1; |
65 | static int inherit = 0; | 66 | static int inherit = 0; |
66 | static int profile_cpu = -1; | 67 | static int profile_cpu = -1; |
67 | static int nr_cpus = 0; | 68 | static int nr_cpus = 0; |
68 | static unsigned int realtime_prio = 0; | 69 | static unsigned int realtime_prio = 0; |
69 | static int group = 0; | 70 | static int group = 0; |
70 | static unsigned int page_size; | 71 | static unsigned int page_size; |
71 | static unsigned int mmap_pages = 16; | 72 | static unsigned int mmap_pages = 16; |
72 | static int freq = 0; | 73 | static int freq = 1000; /* 1 KHz */ |
73 | 74 | ||
74 | static int delay_secs = 2; | 75 | static int delay_secs = 2; |
75 | static int zero; | 76 | static int zero = 0; |
76 | static int dump_symtab; | 77 | static int dump_symtab = 0; |
78 | |||
79 | static bool hide_kernel_symbols = false; | ||
80 | static bool hide_user_symbols = false; | ||
81 | static struct winsize winsize; | ||
82 | struct symbol_conf symbol_conf; | ||
77 | 83 | ||
78 | /* | 84 | /* |
79 | * Source | 85 | * Source |
@@ -86,83 +92,126 @@ struct source_line { | |||
86 | struct source_line *next; | 92 | struct source_line *next; |
87 | }; | 93 | }; |
88 | 94 | ||
89 | static char *sym_filter = NULL; | 95 | static char *sym_filter = NULL; |
90 | struct sym_entry *sym_filter_entry = NULL; | 96 | struct sym_entry *sym_filter_entry = NULL; |
91 | static int sym_pcnt_filter = 5; | 97 | static int sym_pcnt_filter = 5; |
92 | static int sym_counter = 0; | 98 | static int sym_counter = 0; |
93 | static int display_weighted = -1; | 99 | static int display_weighted = -1; |
94 | 100 | ||
95 | /* | 101 | /* |
96 | * Symbols | 102 | * Symbols |
97 | */ | 103 | */ |
98 | 104 | ||
99 | static u64 min_ip; | 105 | struct sym_entry_source { |
100 | static u64 max_ip = -1ll; | 106 | struct source_line *source; |
107 | struct source_line *lines; | ||
108 | struct source_line **lines_tail; | ||
109 | pthread_mutex_t lock; | ||
110 | }; | ||
101 | 111 | ||
102 | struct sym_entry { | 112 | struct sym_entry { |
103 | struct rb_node rb_node; | 113 | struct rb_node rb_node; |
104 | struct list_head node; | 114 | struct list_head node; |
105 | unsigned long count[MAX_COUNTERS]; | ||
106 | unsigned long snap_count; | 115 | unsigned long snap_count; |
107 | double weight; | 116 | double weight; |
108 | int skip; | 117 | int skip; |
109 | struct source_line *source; | 118 | u16 name_len; |
110 | struct source_line *lines; | 119 | u8 origin; |
111 | struct source_line **lines_tail; | 120 | struct map *map; |
112 | pthread_mutex_t source_lock; | 121 | struct sym_entry_source *src; |
122 | unsigned long count[0]; | ||
113 | }; | 123 | }; |
114 | 124 | ||
115 | /* | 125 | /* |
116 | * Source functions | 126 | * Source functions |
117 | */ | 127 | */ |
118 | 128 | ||
129 | static inline struct symbol *sym_entry__symbol(struct sym_entry *self) | ||
130 | { | ||
131 | return ((void *)self) + symbol_conf.priv_size; | ||
132 | } | ||
133 | |||
134 | static void get_term_dimensions(struct winsize *ws) | ||
135 | { | ||
136 | char *s = getenv("LINES"); | ||
137 | |||
138 | if (s != NULL) { | ||
139 | ws->ws_row = atoi(s); | ||
140 | s = getenv("COLUMNS"); | ||
141 | if (s != NULL) { | ||
142 | ws->ws_col = atoi(s); | ||
143 | if (ws->ws_row && ws->ws_col) | ||
144 | return; | ||
145 | } | ||
146 | } | ||
147 | #ifdef TIOCGWINSZ | ||
148 | if (ioctl(1, TIOCGWINSZ, ws) == 0 && | ||
149 | ws->ws_row && ws->ws_col) | ||
150 | return; | ||
151 | #endif | ||
152 | ws->ws_row = 25; | ||
153 | ws->ws_col = 80; | ||
154 | } | ||
155 | |||
156 | static void update_print_entries(struct winsize *ws) | ||
157 | { | ||
158 | print_entries = ws->ws_row; | ||
159 | |||
160 | if (print_entries > 9) | ||
161 | print_entries -= 9; | ||
162 | } | ||
163 | |||
164 | static void sig_winch_handler(int sig __used) | ||
165 | { | ||
166 | get_term_dimensions(&winsize); | ||
167 | update_print_entries(&winsize); | ||
168 | } | ||
169 | |||
119 | static void parse_source(struct sym_entry *syme) | 170 | static void parse_source(struct sym_entry *syme) |
120 | { | 171 | { |
121 | struct symbol *sym; | 172 | struct symbol *sym; |
122 | struct module *module; | 173 | struct sym_entry_source *source; |
123 | struct section *section = NULL; | 174 | struct map *map; |
124 | FILE *file; | 175 | FILE *file; |
125 | char command[PATH_MAX*2]; | 176 | char command[PATH_MAX*2]; |
126 | const char *path = vmlinux_name; | 177 | const char *path; |
127 | u64 start, end, len; | 178 | u64 len; |
128 | 179 | ||
129 | if (!syme) | 180 | if (!syme) |
130 | return; | 181 | return; |
131 | 182 | ||
132 | if (syme->lines) { | 183 | if (syme->src == NULL) { |
133 | pthread_mutex_lock(&syme->source_lock); | 184 | syme->src = zalloc(sizeof(*source)); |
134 | goto out_assign; | 185 | if (syme->src == NULL) |
186 | return; | ||
187 | pthread_mutex_init(&syme->src->lock, NULL); | ||
135 | } | 188 | } |
136 | 189 | ||
137 | sym = (struct symbol *)(syme + 1); | 190 | source = syme->src; |
138 | module = sym->module; | ||
139 | |||
140 | if (module) | ||
141 | path = module->path; | ||
142 | if (!path) | ||
143 | return; | ||
144 | |||
145 | start = sym->obj_start; | ||
146 | if (!start) | ||
147 | start = sym->start; | ||
148 | 191 | ||
149 | if (module) { | 192 | if (source->lines) { |
150 | section = module->sections->find_section(module->sections, ".text"); | 193 | pthread_mutex_lock(&source->lock); |
151 | if (section) | 194 | goto out_assign; |
152 | start -= section->vma; | ||
153 | } | 195 | } |
154 | 196 | ||
155 | end = start + sym->end - sym->start + 1; | 197 | sym = sym_entry__symbol(syme); |
198 | map = syme->map; | ||
199 | path = map->dso->long_name; | ||
200 | |||
156 | len = sym->end - sym->start; | 201 | len = sym->end - sym->start; |
157 | 202 | ||
158 | sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path); | 203 | sprintf(command, |
204 | "objdump --start-address=0x%016Lx " | ||
205 | "--stop-address=0x%016Lx -dS %s", | ||
206 | map->unmap_ip(map, sym->start), | ||
207 | map->unmap_ip(map, sym->end), path); | ||
159 | 208 | ||
160 | file = popen(command, "r"); | 209 | file = popen(command, "r"); |
161 | if (!file) | 210 | if (!file) |
162 | return; | 211 | return; |
163 | 212 | ||
164 | pthread_mutex_lock(&syme->source_lock); | 213 | pthread_mutex_lock(&source->lock); |
165 | syme->lines_tail = &syme->lines; | 214 | source->lines_tail = &source->lines; |
166 | while (!feof(file)) { | 215 | while (!feof(file)) { |
167 | struct source_line *src; | 216 | struct source_line *src; |
168 | size_t dummy = 0; | 217 | size_t dummy = 0; |
@@ -182,24 +231,22 @@ static void parse_source(struct sym_entry *syme) | |||
182 | *c = 0; | 231 | *c = 0; |
183 | 232 | ||
184 | src->next = NULL; | 233 | src->next = NULL; |
185 | *syme->lines_tail = src; | 234 | *source->lines_tail = src; |
186 | syme->lines_tail = &src->next; | 235 | source->lines_tail = &src->next; |
187 | 236 | ||
188 | if (strlen(src->line)>8 && src->line[8] == ':') { | 237 | if (strlen(src->line)>8 && src->line[8] == ':') { |
189 | src->eip = strtoull(src->line, NULL, 16); | 238 | src->eip = strtoull(src->line, NULL, 16); |
190 | if (section) | 239 | src->eip = map->unmap_ip(map, src->eip); |
191 | src->eip += section->vma; | ||
192 | } | 240 | } |
193 | if (strlen(src->line)>8 && src->line[16] == ':') { | 241 | if (strlen(src->line)>8 && src->line[16] == ':') { |
194 | src->eip = strtoull(src->line, NULL, 16); | 242 | src->eip = strtoull(src->line, NULL, 16); |
195 | if (section) | 243 | src->eip = map->unmap_ip(map, src->eip); |
196 | src->eip += section->vma; | ||
197 | } | 244 | } |
198 | } | 245 | } |
199 | pclose(file); | 246 | pclose(file); |
200 | out_assign: | 247 | out_assign: |
201 | sym_filter_entry = syme; | 248 | sym_filter_entry = syme; |
202 | pthread_mutex_unlock(&syme->source_lock); | 249 | pthread_mutex_unlock(&source->lock); |
203 | } | 250 | } |
204 | 251 | ||
205 | static void __zero_source_counters(struct sym_entry *syme) | 252 | static void __zero_source_counters(struct sym_entry *syme) |
@@ -207,7 +254,7 @@ static void __zero_source_counters(struct sym_entry *syme) | |||
207 | int i; | 254 | int i; |
208 | struct source_line *line; | 255 | struct source_line *line; |
209 | 256 | ||
210 | line = syme->lines; | 257 | line = syme->src->lines; |
211 | while (line) { | 258 | while (line) { |
212 | for (i = 0; i < nr_counters; i++) | 259 | for (i = 0; i < nr_counters; i++) |
213 | line->count[i] = 0; | 260 | line->count[i] = 0; |
@@ -222,13 +269,13 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) | |||
222 | if (syme != sym_filter_entry) | 269 | if (syme != sym_filter_entry) |
223 | return; | 270 | return; |
224 | 271 | ||
225 | if (pthread_mutex_trylock(&syme->source_lock)) | 272 | if (pthread_mutex_trylock(&syme->src->lock)) |
226 | return; | 273 | return; |
227 | 274 | ||
228 | if (!syme->source) | 275 | if (syme->src == NULL || syme->src->source == NULL) |
229 | goto out_unlock; | 276 | goto out_unlock; |
230 | 277 | ||
231 | for (line = syme->lines; line; line = line->next) { | 278 | for (line = syme->src->lines; line; line = line->next) { |
232 | if (line->eip == ip) { | 279 | if (line->eip == ip) { |
233 | line->count[counter]++; | 280 | line->count[counter]++; |
234 | break; | 281 | break; |
@@ -237,32 +284,25 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) | |||
237 | break; | 284 | break; |
238 | } | 285 | } |
239 | out_unlock: | 286 | out_unlock: |
240 | pthread_mutex_unlock(&syme->source_lock); | 287 | pthread_mutex_unlock(&syme->src->lock); |
241 | } | 288 | } |
242 | 289 | ||
243 | static void lookup_sym_source(struct sym_entry *syme) | 290 | static void lookup_sym_source(struct sym_entry *syme) |
244 | { | 291 | { |
245 | struct symbol *symbol = (struct symbol *)(syme + 1); | 292 | struct symbol *symbol = sym_entry__symbol(syme); |
246 | struct source_line *line; | 293 | struct source_line *line; |
247 | char pattern[PATH_MAX]; | 294 | char pattern[PATH_MAX]; |
248 | char *idx; | ||
249 | 295 | ||
250 | sprintf(pattern, "<%s>:", symbol->name); | 296 | sprintf(pattern, "<%s>:", symbol->name); |
251 | 297 | ||
252 | if (symbol->module) { | 298 | pthread_mutex_lock(&syme->src->lock); |
253 | idx = strstr(pattern, "\t"); | 299 | for (line = syme->src->lines; line; line = line->next) { |
254 | if (idx) | ||
255 | *idx = 0; | ||
256 | } | ||
257 | |||
258 | pthread_mutex_lock(&syme->source_lock); | ||
259 | for (line = syme->lines; line; line = line->next) { | ||
260 | if (strstr(line->line, pattern)) { | 300 | if (strstr(line->line, pattern)) { |
261 | syme->source = line; | 301 | syme->src->source = line; |
262 | break; | 302 | break; |
263 | } | 303 | } |
264 | } | 304 | } |
265 | pthread_mutex_unlock(&syme->source_lock); | 305 | pthread_mutex_unlock(&syme->src->lock); |
266 | } | 306 | } |
267 | 307 | ||
268 | static void show_lines(struct source_line *queue, int count, int total) | 308 | static void show_lines(struct source_line *queue, int count, int total) |
@@ -292,24 +332,24 @@ static void show_details(struct sym_entry *syme) | |||
292 | if (!syme) | 332 | if (!syme) |
293 | return; | 333 | return; |
294 | 334 | ||
295 | if (!syme->source) | 335 | if (!syme->src->source) |
296 | lookup_sym_source(syme); | 336 | lookup_sym_source(syme); |
297 | 337 | ||
298 | if (!syme->source) | 338 | if (!syme->src->source) |
299 | return; | 339 | return; |
300 | 340 | ||
301 | symbol = (struct symbol *)(syme + 1); | 341 | symbol = sym_entry__symbol(syme); |
302 | printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); | 342 | printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); |
303 | printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); | 343 | printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); |
304 | 344 | ||
305 | pthread_mutex_lock(&syme->source_lock); | 345 | pthread_mutex_lock(&syme->src->lock); |
306 | line = syme->source; | 346 | line = syme->src->source; |
307 | while (line) { | 347 | while (line) { |
308 | total += line->count[sym_counter]; | 348 | total += line->count[sym_counter]; |
309 | line = line->next; | 349 | line = line->next; |
310 | } | 350 | } |
311 | 351 | ||
312 | line = syme->source; | 352 | line = syme->src->source; |
313 | while (line) { | 353 | while (line) { |
314 | float pcnt = 0.0; | 354 | float pcnt = 0.0; |
315 | 355 | ||
@@ -334,13 +374,13 @@ static void show_details(struct sym_entry *syme) | |||
334 | line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8; | 374 | line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8; |
335 | line = line->next; | 375 | line = line->next; |
336 | } | 376 | } |
337 | pthread_mutex_unlock(&syme->source_lock); | 377 | pthread_mutex_unlock(&syme->src->lock); |
338 | if (more) | 378 | if (more) |
339 | printf("%d lines not displayed, maybe increase display entries [e]\n", more); | 379 | printf("%d lines not displayed, maybe increase display entries [e]\n", more); |
340 | } | 380 | } |
341 | 381 | ||
342 | /* | 382 | /* |
343 | * Symbols will be added here in record_ip and will get out | 383 | * Symbols will be added here in event__process_sample and will get out |
344 | * after decayed. | 384 | * after decayed. |
345 | */ | 385 | */ |
346 | static LIST_HEAD(active_symbols); | 386 | static LIST_HEAD(active_symbols); |
@@ -411,6 +451,8 @@ static void print_sym_table(void) | |||
411 | struct sym_entry *syme, *n; | 451 | struct sym_entry *syme, *n; |
412 | struct rb_root tmp = RB_ROOT; | 452 | struct rb_root tmp = RB_ROOT; |
413 | struct rb_node *nd; | 453 | struct rb_node *nd; |
454 | int sym_width = 0, dso_width = 0, max_dso_width; | ||
455 | const int win_width = winsize.ws_col - 1; | ||
414 | 456 | ||
415 | samples = userspace_samples = 0; | 457 | samples = userspace_samples = 0; |
416 | 458 | ||
@@ -422,6 +464,14 @@ static void print_sym_table(void) | |||
422 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | 464 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { |
423 | syme->snap_count = syme->count[snap]; | 465 | syme->snap_count = syme->count[snap]; |
424 | if (syme->snap_count != 0) { | 466 | if (syme->snap_count != 0) { |
467 | |||
468 | if ((hide_user_symbols && | ||
469 | syme->origin == PERF_RECORD_MISC_USER) || | ||
470 | (hide_kernel_symbols && | ||
471 | syme->origin == PERF_RECORD_MISC_KERNEL)) { | ||
472 | list_remove_active_sym(syme); | ||
473 | continue; | ||
474 | } | ||
425 | syme->weight = sym_weight(syme); | 475 | syme->weight = sym_weight(syme); |
426 | rb_insert_active_sym(&tmp, syme); | 476 | rb_insert_active_sym(&tmp, syme); |
427 | sum_ksamples += syme->snap_count; | 477 | sum_ksamples += syme->snap_count; |
@@ -434,8 +484,7 @@ static void print_sym_table(void) | |||
434 | 484 | ||
435 | puts(CONSOLE_CLEAR); | 485 | puts(CONSOLE_CLEAR); |
436 | 486 | ||
437 | printf( | 487 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); |
438 | "------------------------------------------------------------------------------\n"); | ||
439 | printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", | 488 | printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", |
440 | samples_per_sec, | 489 | samples_per_sec, |
441 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); | 490 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); |
@@ -473,33 +522,57 @@ static void print_sym_table(void) | |||
473 | printf(", %d CPUs)\n", nr_cpus); | 522 | printf(", %d CPUs)\n", nr_cpus); |
474 | } | 523 | } |
475 | 524 | ||
476 | printf("------------------------------------------------------------------------------\n\n"); | 525 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); |
477 | 526 | ||
478 | if (sym_filter_entry) { | 527 | if (sym_filter_entry) { |
479 | show_details(sym_filter_entry); | 528 | show_details(sym_filter_entry); |
480 | return; | 529 | return; |
481 | } | 530 | } |
482 | 531 | ||
532 | /* | ||
533 | * Find the longest symbol name that will be displayed | ||
534 | */ | ||
535 | for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { | ||
536 | syme = rb_entry(nd, struct sym_entry, rb_node); | ||
537 | if (++printed > print_entries || | ||
538 | (int)syme->snap_count < count_filter) | ||
539 | continue; | ||
540 | |||
541 | if (syme->map->dso->long_name_len > dso_width) | ||
542 | dso_width = syme->map->dso->long_name_len; | ||
543 | |||
544 | if (syme->name_len > sym_width) | ||
545 | sym_width = syme->name_len; | ||
546 | } | ||
547 | |||
548 | printed = 0; | ||
549 | |||
550 | max_dso_width = winsize.ws_col - sym_width - 29; | ||
551 | if (dso_width > max_dso_width) | ||
552 | dso_width = max_dso_width; | ||
553 | putchar('\n'); | ||
483 | if (nr_counters == 1) | 554 | if (nr_counters == 1) |
484 | printf(" samples pcnt"); | 555 | printf(" samples pcnt"); |
485 | else | 556 | else |
486 | printf(" weight samples pcnt"); | 557 | printf(" weight samples pcnt"); |
487 | 558 | ||
488 | if (verbose) | 559 | if (verbose) |
489 | printf(" RIP "); | 560 | printf(" RIP "); |
490 | printf(" kernel function\n"); | 561 | printf(" %-*.*s DSO\n", sym_width, sym_width, "function"); |
491 | printf(" %s _______ _____", | 562 | printf(" %s _______ _____", |
492 | nr_counters == 1 ? " " : "______"); | 563 | nr_counters == 1 ? " " : "______"); |
493 | if (verbose) | 564 | if (verbose) |
494 | printf(" ________________"); | 565 | printf(" ________________"); |
495 | printf(" _______________\n\n"); | 566 | printf(" %-*.*s", sym_width, sym_width, graph_line); |
567 | printf(" %-*.*s", dso_width, dso_width, graph_line); | ||
568 | puts("\n"); | ||
496 | 569 | ||
497 | for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { | 570 | for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { |
498 | struct symbol *sym; | 571 | struct symbol *sym; |
499 | double pcnt; | 572 | double pcnt; |
500 | 573 | ||
501 | syme = rb_entry(nd, struct sym_entry, rb_node); | 574 | syme = rb_entry(nd, struct sym_entry, rb_node); |
502 | sym = (struct symbol *)(syme + 1); | 575 | sym = sym_entry__symbol(syme); |
503 | 576 | ||
504 | if (++printed > print_entries || (int)syme->snap_count < count_filter) | 577 | if (++printed > print_entries || (int)syme->snap_count < count_filter) |
505 | continue; | 578 | continue; |
@@ -508,17 +581,18 @@ static void print_sym_table(void) | |||
508 | sum_ksamples)); | 581 | sum_ksamples)); |
509 | 582 | ||
510 | if (nr_counters == 1 || !display_weighted) | 583 | if (nr_counters == 1 || !display_weighted) |
511 | printf("%20.2f - ", syme->weight); | 584 | printf("%20.2f ", syme->weight); |
512 | else | 585 | else |
513 | printf("%9.1f %10ld - ", syme->weight, syme->snap_count); | 586 | printf("%9.1f %10ld ", syme->weight, syme->snap_count); |
514 | 587 | ||
515 | percent_color_fprintf(stdout, "%4.1f%%", pcnt); | 588 | percent_color_fprintf(stdout, "%4.1f%%", pcnt); |
516 | if (verbose) | 589 | if (verbose) |
517 | printf(" - %016llx", sym->start); | 590 | printf(" %016llx", sym->start); |
518 | printf(" : %s", sym->name); | 591 | printf(" %-*.*s", sym_width, sym_width, sym->name); |
519 | if (sym->module) | 592 | printf(" %-*.*s\n", dso_width, dso_width, |
520 | printf("\t[%s]", sym->module->name); | 593 | dso_width >= syme->map->dso->long_name_len ? |
521 | printf("\n"); | 594 | syme->map->dso->long_name : |
595 | syme->map->dso->short_name); | ||
522 | } | 596 | } |
523 | } | 597 | } |
524 | 598 | ||
@@ -565,10 +639,10 @@ static void prompt_symbol(struct sym_entry **target, const char *msg) | |||
565 | 639 | ||
566 | /* zero counters of active symbol */ | 640 | /* zero counters of active symbol */ |
567 | if (syme) { | 641 | if (syme) { |
568 | pthread_mutex_lock(&syme->source_lock); | 642 | pthread_mutex_lock(&syme->src->lock); |
569 | __zero_source_counters(syme); | 643 | __zero_source_counters(syme); |
570 | *target = NULL; | 644 | *target = NULL; |
571 | pthread_mutex_unlock(&syme->source_lock); | 645 | pthread_mutex_unlock(&syme->src->lock); |
572 | } | 646 | } |
573 | 647 | ||
574 | fprintf(stdout, "\n%s: ", msg); | 648 | fprintf(stdout, "\n%s: ", msg); |
@@ -584,7 +658,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg) | |||
584 | pthread_mutex_unlock(&active_symbols_lock); | 658 | pthread_mutex_unlock(&active_symbols_lock); |
585 | 659 | ||
586 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | 660 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { |
587 | struct symbol *sym = (struct symbol *)(syme + 1); | 661 | struct symbol *sym = sym_entry__symbol(syme); |
588 | 662 | ||
589 | if (!strcmp(buf, sym->name)) { | 663 | if (!strcmp(buf, sym->name)) { |
590 | found = syme; | 664 | found = syme; |
@@ -608,7 +682,7 @@ static void print_mapped_keys(void) | |||
608 | char *name = NULL; | 682 | char *name = NULL; |
609 | 683 | ||
610 | if (sym_filter_entry) { | 684 | if (sym_filter_entry) { |
611 | struct symbol *sym = (struct symbol *)(sym_filter_entry+1); | 685 | struct symbol *sym = sym_entry__symbol(sym_filter_entry); |
612 | name = sym->name; | 686 | name = sym->name; |
613 | } | 687 | } |
614 | 688 | ||
@@ -621,7 +695,7 @@ static void print_mapped_keys(void) | |||
621 | 695 | ||
622 | fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); | 696 | fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); |
623 | 697 | ||
624 | if (vmlinux_name) { | 698 | if (symbol_conf.vmlinux_name) { |
625 | fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); | 699 | fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); |
626 | fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); | 700 | fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); |
627 | fprintf(stdout, "\t[S] stop annotation.\n"); | 701 | fprintf(stdout, "\t[S] stop annotation.\n"); |
@@ -630,6 +704,12 @@ static void print_mapped_keys(void) | |||
630 | if (nr_counters > 1) | 704 | if (nr_counters > 1) |
631 | fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); | 705 | fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); |
632 | 706 | ||
707 | fprintf(stdout, | ||
708 | "\t[K] hide kernel_symbols symbols. \t(%s)\n", | ||
709 | hide_kernel_symbols ? "yes" : "no"); | ||
710 | fprintf(stdout, | ||
711 | "\t[U] hide user symbols. \t(%s)\n", | ||
712 | hide_user_symbols ? "yes" : "no"); | ||
633 | fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0); | 713 | fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0); |
634 | fprintf(stdout, "\t[qQ] quit.\n"); | 714 | fprintf(stdout, "\t[qQ] quit.\n"); |
635 | } | 715 | } |
@@ -643,6 +723,8 @@ static int key_mapped(int c) | |||
643 | case 'z': | 723 | case 'z': |
644 | case 'q': | 724 | case 'q': |
645 | case 'Q': | 725 | case 'Q': |
726 | case 'K': | ||
727 | case 'U': | ||
646 | return 1; | 728 | return 1; |
647 | case 'E': | 729 | case 'E': |
648 | case 'w': | 730 | case 'w': |
@@ -650,7 +732,7 @@ static int key_mapped(int c) | |||
650 | case 'F': | 732 | case 'F': |
651 | case 's': | 733 | case 's': |
652 | case 'S': | 734 | case 'S': |
653 | return vmlinux_name ? 1 : 0; | 735 | return symbol_conf.vmlinux_name ? 1 : 0; |
654 | default: | 736 | default: |
655 | break; | 737 | break; |
656 | } | 738 | } |
@@ -691,6 +773,11 @@ static void handle_keypress(int c) | |||
691 | break; | 773 | break; |
692 | case 'e': | 774 | case 'e': |
693 | prompt_integer(&print_entries, "Enter display entries (lines)"); | 775 | prompt_integer(&print_entries, "Enter display entries (lines)"); |
776 | if (print_entries == 0) { | ||
777 | sig_winch_handler(SIGWINCH); | ||
778 | signal(SIGWINCH, sig_winch_handler); | ||
779 | } else | ||
780 | signal(SIGWINCH, SIG_DFL); | ||
694 | break; | 781 | break; |
695 | case 'E': | 782 | case 'E': |
696 | if (nr_counters > 1) { | 783 | if (nr_counters > 1) { |
@@ -715,9 +802,14 @@ static void handle_keypress(int c) | |||
715 | case 'F': | 802 | case 'F': |
716 | prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); | 803 | prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); |
717 | break; | 804 | break; |
805 | case 'K': | ||
806 | hide_kernel_symbols = !hide_kernel_symbols; | ||
807 | break; | ||
718 | case 'q': | 808 | case 'q': |
719 | case 'Q': | 809 | case 'Q': |
720 | printf("exiting.\n"); | 810 | printf("exiting.\n"); |
811 | if (dump_symtab) | ||
812 | dsos__fprintf(stderr); | ||
721 | exit(0); | 813 | exit(0); |
722 | case 's': | 814 | case 's': |
723 | prompt_symbol(&sym_filter_entry, "Enter details symbol"); | 815 | prompt_symbol(&sym_filter_entry, "Enter details symbol"); |
@@ -728,12 +820,15 @@ static void handle_keypress(int c) | |||
728 | else { | 820 | else { |
729 | struct sym_entry *syme = sym_filter_entry; | 821 | struct sym_entry *syme = sym_filter_entry; |
730 | 822 | ||
731 | pthread_mutex_lock(&syme->source_lock); | 823 | pthread_mutex_lock(&syme->src->lock); |
732 | sym_filter_entry = NULL; | 824 | sym_filter_entry = NULL; |
733 | __zero_source_counters(syme); | 825 | __zero_source_counters(syme); |
734 | pthread_mutex_unlock(&syme->source_lock); | 826 | pthread_mutex_unlock(&syme->src->lock); |
735 | } | 827 | } |
736 | break; | 828 | break; |
829 | case 'U': | ||
830 | hide_user_symbols = !hide_user_symbols; | ||
831 | break; | ||
737 | case 'w': | 832 | case 'w': |
738 | display_weighted = ~display_weighted; | 833 | display_weighted = ~display_weighted; |
739 | break; | 834 | break; |
@@ -790,7 +885,7 @@ static const char *skip_symbols[] = { | |||
790 | NULL | 885 | NULL |
791 | }; | 886 | }; |
792 | 887 | ||
793 | static int symbol_filter(struct dso *self, struct symbol *sym) | 888 | static int symbol_filter(struct map *map, struct symbol *sym) |
794 | { | 889 | { |
795 | struct sym_entry *syme; | 890 | struct sym_entry *syme; |
796 | const char *name = sym->name; | 891 | const char *name = sym->name; |
@@ -812,8 +907,9 @@ static int symbol_filter(struct dso *self, struct symbol *sym) | |||
812 | strstr(name, "_text_end")) | 907 | strstr(name, "_text_end")) |
813 | return 1; | 908 | return 1; |
814 | 909 | ||
815 | syme = dso__sym_priv(self, sym); | 910 | syme = symbol__priv(sym); |
816 | pthread_mutex_init(&syme->source_lock, NULL); | 911 | syme->map = map; |
912 | syme->src = NULL; | ||
817 | if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) | 913 | if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) |
818 | sym_filter_entry = syme; | 914 | sym_filter_entry = syme; |
819 | 915 | ||
@@ -824,75 +920,65 @@ static int symbol_filter(struct dso *self, struct symbol *sym) | |||
824 | } | 920 | } |
825 | } | 921 | } |
826 | 922 | ||
827 | return 0; | 923 | if (!syme->skip) |
828 | } | 924 | syme->name_len = strlen(sym->name); |
829 | |||
830 | static int parse_symbols(void) | ||
831 | { | ||
832 | struct rb_node *node; | ||
833 | struct symbol *sym; | ||
834 | int use_modules = vmlinux_name ? 1 : 0; | ||
835 | |||
836 | kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); | ||
837 | if (kernel_dso == NULL) | ||
838 | return -1; | ||
839 | |||
840 | if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) | ||
841 | goto out_delete_dso; | ||
842 | |||
843 | node = rb_first(&kernel_dso->syms); | ||
844 | sym = rb_entry(node, struct symbol, rb_node); | ||
845 | min_ip = sym->start; | ||
846 | |||
847 | node = rb_last(&kernel_dso->syms); | ||
848 | sym = rb_entry(node, struct symbol, rb_node); | ||
849 | max_ip = sym->end; | ||
850 | |||
851 | if (dump_symtab) | ||
852 | dso__fprintf(kernel_dso, stderr); | ||
853 | 925 | ||
854 | return 0; | 926 | return 0; |
855 | |||
856 | out_delete_dso: | ||
857 | dso__delete(kernel_dso); | ||
858 | kernel_dso = NULL; | ||
859 | return -1; | ||
860 | } | 927 | } |
861 | 928 | ||
862 | /* | 929 | static void event__process_sample(const event_t *self, int counter) |
863 | * Binary search in the histogram table and record the hit: | ||
864 | */ | ||
865 | static void record_ip(u64 ip, int counter) | ||
866 | { | 930 | { |
867 | struct symbol *sym = dso__find_symbol(kernel_dso, ip); | 931 | u64 ip = self->ip.ip; |
868 | 932 | struct sym_entry *syme; | |
869 | if (sym != NULL) { | 933 | struct addr_location al; |
870 | struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); | 934 | u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
871 | 935 | ||
872 | if (!syme->skip) { | 936 | switch (origin) { |
873 | syme->count[counter]++; | 937 | case PERF_RECORD_MISC_USER: |
874 | record_precise_ip(syme, counter, ip); | 938 | if (hide_user_symbols) |
875 | pthread_mutex_lock(&active_symbols_lock); | ||
876 | if (list_empty(&syme->node) || !syme->node.next) | ||
877 | __list_insert_active_sym(syme); | ||
878 | pthread_mutex_unlock(&active_symbols_lock); | ||
879 | return; | 939 | return; |
880 | } | 940 | break; |
941 | case PERF_RECORD_MISC_KERNEL: | ||
942 | if (hide_kernel_symbols) | ||
943 | return; | ||
944 | break; | ||
945 | default: | ||
946 | return; | ||
881 | } | 947 | } |
882 | 948 | ||
883 | samples--; | 949 | if (event__preprocess_sample(self, &al, symbol_filter) < 0 || |
950 | al.sym == NULL) | ||
951 | return; | ||
952 | |||
953 | syme = symbol__priv(al.sym); | ||
954 | if (!syme->skip) { | ||
955 | syme->count[counter]++; | ||
956 | syme->origin = origin; | ||
957 | record_precise_ip(syme, counter, ip); | ||
958 | pthread_mutex_lock(&active_symbols_lock); | ||
959 | if (list_empty(&syme->node) || !syme->node.next) | ||
960 | __list_insert_active_sym(syme); | ||
961 | pthread_mutex_unlock(&active_symbols_lock); | ||
962 | if (origin == PERF_RECORD_MISC_USER) | ||
963 | ++userspace_samples; | ||
964 | ++samples; | ||
965 | } | ||
884 | } | 966 | } |
885 | 967 | ||
886 | static void process_event(u64 ip, int counter, int user) | 968 | static int event__process(event_t *event) |
887 | { | 969 | { |
888 | samples++; | 970 | switch (event->header.type) { |
889 | 971 | case PERF_RECORD_COMM: | |
890 | if (user) { | 972 | event__process_comm(event); |
891 | userspace_samples++; | 973 | break; |
892 | return; | 974 | case PERF_RECORD_MMAP: |
975 | event__process_mmap(event); | ||
976 | break; | ||
977 | default: | ||
978 | break; | ||
893 | } | 979 | } |
894 | 980 | ||
895 | record_ip(ip, counter); | 981 | return 0; |
896 | } | 982 | } |
897 | 983 | ||
898 | struct mmap_data { | 984 | struct mmap_data { |
@@ -913,8 +999,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) | |||
913 | return head; | 999 | return head; |
914 | } | 1000 | } |
915 | 1001 | ||
916 | struct timeval last_read, this_read; | ||
917 | |||
918 | static void mmap_read_counter(struct mmap_data *md) | 1002 | static void mmap_read_counter(struct mmap_data *md) |
919 | { | 1003 | { |
920 | unsigned int head = mmap_read_head(md); | 1004 | unsigned int head = mmap_read_head(md); |
@@ -922,8 +1006,6 @@ static void mmap_read_counter(struct mmap_data *md) | |||
922 | unsigned char *data = md->base + page_size; | 1006 | unsigned char *data = md->base + page_size; |
923 | int diff; | 1007 | int diff; |
924 | 1008 | ||
925 | gettimeofday(&this_read, NULL); | ||
926 | |||
927 | /* | 1009 | /* |
928 | * If we're further behind than half the buffer, there's a chance | 1010 | * If we're further behind than half the buffer, there's a chance |
929 | * the writer will bite our tail and mess up the samples under us. | 1011 | * the writer will bite our tail and mess up the samples under us. |
@@ -934,14 +1016,7 @@ static void mmap_read_counter(struct mmap_data *md) | |||
934 | */ | 1016 | */ |
935 | diff = head - old; | 1017 | diff = head - old; |
936 | if (diff > md->mask / 2 || diff < 0) { | 1018 | if (diff > md->mask / 2 || diff < 0) { |
937 | struct timeval iv; | 1019 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); |
938 | unsigned long msecs; | ||
939 | |||
940 | timersub(&this_read, &last_read, &iv); | ||
941 | msecs = iv.tv_sec*1000 + iv.tv_usec/1000; | ||
942 | |||
943 | fprintf(stderr, "WARNING: failed to keep up with mmap data." | ||
944 | " Last read %lu msecs ago.\n", msecs); | ||
945 | 1020 | ||
946 | /* | 1021 | /* |
947 | * head points to a known good entry, start there. | 1022 | * head points to a known good entry, start there. |
@@ -949,8 +1024,6 @@ static void mmap_read_counter(struct mmap_data *md) | |||
949 | old = head; | 1024 | old = head; |
950 | } | 1025 | } |
951 | 1026 | ||
952 | last_read = this_read; | ||
953 | |||
954 | for (; old != head;) { | 1027 | for (; old != head;) { |
955 | event_t *event = (event_t *)&data[old & md->mask]; | 1028 | event_t *event = (event_t *)&data[old & md->mask]; |
956 | 1029 | ||
@@ -978,13 +1051,11 @@ static void mmap_read_counter(struct mmap_data *md) | |||
978 | event = &event_copy; | 1051 | event = &event_copy; |
979 | } | 1052 | } |
980 | 1053 | ||
1054 | if (event->header.type == PERF_RECORD_SAMPLE) | ||
1055 | event__process_sample(event, md->counter); | ||
1056 | else | ||
1057 | event__process(event); | ||
981 | old += size; | 1058 | old += size; |
982 | |||
983 | if (event->header.type == PERF_RECORD_SAMPLE) { | ||
984 | int user = | ||
985 | (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER; | ||
986 | process_event(event->ip.ip, md->counter, user); | ||
987 | } | ||
988 | } | 1059 | } |
989 | 1060 | ||
990 | md->prev = old; | 1061 | md->prev = old; |
@@ -1018,8 +1089,15 @@ static void start_counter(int i, int counter) | |||
1018 | attr = attrs + counter; | 1089 | attr = attrs + counter; |
1019 | 1090 | ||
1020 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 1091 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
1021 | attr->freq = freq; | 1092 | |
1093 | if (freq) { | ||
1094 | attr->sample_type |= PERF_SAMPLE_PERIOD; | ||
1095 | attr->freq = 1; | ||
1096 | attr->sample_freq = freq; | ||
1097 | } | ||
1098 | |||
1022 | attr->inherit = (cpu < 0) && inherit; | 1099 | attr->inherit = (cpu < 0) && inherit; |
1100 | attr->mmap = 1; | ||
1023 | 1101 | ||
1024 | try_again: | 1102 | try_again: |
1025 | fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0); | 1103 | fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0); |
@@ -1078,6 +1156,11 @@ static int __cmd_top(void) | |||
1078 | int i, counter; | 1156 | int i, counter; |
1079 | int ret; | 1157 | int ret; |
1080 | 1158 | ||
1159 | if (target_pid != -1) | ||
1160 | event__synthesize_thread(target_pid, event__process); | ||
1161 | else | ||
1162 | event__synthesize_threads(event__process); | ||
1163 | |||
1081 | for (i = 0; i < nr_cpus; i++) { | 1164 | for (i = 0; i < nr_cpus; i++) { |
1082 | group_fd = -1; | 1165 | group_fd = -1; |
1083 | for (counter = 0; counter < nr_counters; counter++) | 1166 | for (counter = 0; counter < nr_counters; counter++) |
@@ -1133,7 +1216,10 @@ static const struct option options[] = { | |||
1133 | "system-wide collection from all CPUs"), | 1216 | "system-wide collection from all CPUs"), |
1134 | OPT_INTEGER('C', "CPU", &profile_cpu, | 1217 | OPT_INTEGER('C', "CPU", &profile_cpu, |
1135 | "CPU to profile on"), | 1218 | "CPU to profile on"), |
1136 | OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), | 1219 | OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, |
1220 | "file", "vmlinux pathname"), | ||
1221 | OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols, | ||
1222 | "hide kernel symbols"), | ||
1137 | OPT_INTEGER('m', "mmap-pages", &mmap_pages, | 1223 | OPT_INTEGER('m', "mmap-pages", &mmap_pages, |
1138 | "number of mmap data pages"), | 1224 | "number of mmap data pages"), |
1139 | OPT_INTEGER('r', "realtime", &realtime_prio, | 1225 | OPT_INTEGER('r', "realtime", &realtime_prio, |
@@ -1156,6 +1242,8 @@ static const struct option options[] = { | |||
1156 | "profile at this frequency"), | 1242 | "profile at this frequency"), |
1157 | OPT_INTEGER('E', "entries", &print_entries, | 1243 | OPT_INTEGER('E', "entries", &print_entries, |
1158 | "display this many functions"), | 1244 | "display this many functions"), |
1245 | OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, | ||
1246 | "hide user symbols"), | ||
1159 | OPT_BOOLEAN('v', "verbose", &verbose, | 1247 | OPT_BOOLEAN('v', "verbose", &verbose, |
1160 | "be more verbose (show counter open errors, etc)"), | 1248 | "be more verbose (show counter open errors, etc)"), |
1161 | OPT_END() | 1249 | OPT_END() |
@@ -1165,19 +1253,12 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1165 | { | 1253 | { |
1166 | int counter; | 1254 | int counter; |
1167 | 1255 | ||
1168 | symbol__init(); | ||
1169 | |||
1170 | page_size = sysconf(_SC_PAGE_SIZE); | 1256 | page_size = sysconf(_SC_PAGE_SIZE); |
1171 | 1257 | ||
1172 | argc = parse_options(argc, argv, options, top_usage, 0); | 1258 | argc = parse_options(argc, argv, options, top_usage, 0); |
1173 | if (argc) | 1259 | if (argc) |
1174 | usage_with_options(top_usage, options); | 1260 | usage_with_options(top_usage, options); |
1175 | 1261 | ||
1176 | if (freq) { | ||
1177 | default_interval = freq; | ||
1178 | freq = 1; | ||
1179 | } | ||
1180 | |||
1181 | /* CPU and PID are mutually exclusive */ | 1262 | /* CPU and PID are mutually exclusive */ |
1182 | if (target_pid != -1 && profile_cpu != -1) { | 1263 | if (target_pid != -1 && profile_cpu != -1) { |
1183 | printf("WARNING: PID switch overriding CPU\n"); | 1264 | printf("WARNING: PID switch overriding CPU\n"); |
@@ -1188,13 +1269,31 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1188 | if (!nr_counters) | 1269 | if (!nr_counters) |
1189 | nr_counters = 1; | 1270 | nr_counters = 1; |
1190 | 1271 | ||
1272 | symbol_conf.priv_size = (sizeof(struct sym_entry) + | ||
1273 | (nr_counters + 1) * sizeof(unsigned long)); | ||
1274 | if (symbol_conf.vmlinux_name == NULL) | ||
1275 | symbol_conf.try_vmlinux_path = true; | ||
1276 | if (symbol__init(&symbol_conf) < 0) | ||
1277 | return -1; | ||
1278 | |||
1191 | if (delay_secs < 1) | 1279 | if (delay_secs < 1) |
1192 | delay_secs = 1; | 1280 | delay_secs = 1; |
1193 | 1281 | ||
1194 | parse_symbols(); | ||
1195 | parse_source(sym_filter_entry); | 1282 | parse_source(sym_filter_entry); |
1196 | 1283 | ||
1197 | /* | 1284 | /* |
1285 | * User specified count overrides default frequency. | ||
1286 | */ | ||
1287 | if (default_interval) | ||
1288 | freq = 0; | ||
1289 | else if (freq) { | ||
1290 | default_interval = freq; | ||
1291 | } else { | ||
1292 | fprintf(stderr, "frequency and count are zero, aborting\n"); | ||
1293 | exit(EXIT_FAILURE); | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1198 | * Fill in the ones not specifically initialized via -c: | 1297 | * Fill in the ones not specifically initialized via -c: |
1199 | */ | 1298 | */ |
1200 | for (counter = 0; counter < nr_counters; counter++) { | 1299 | for (counter = 0; counter < nr_counters; counter++) { |
@@ -1211,5 +1310,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1211 | if (target_pid != -1 || profile_cpu != -1) | 1310 | if (target_pid != -1 || profile_cpu != -1) |
1212 | nr_cpus = 1; | 1311 | nr_cpus = 1; |
1213 | 1312 | ||
1313 | get_term_dimensions(&winsize); | ||
1314 | if (print_entries == 0) { | ||
1315 | update_print_entries(&winsize); | ||
1316 | signal(SIGWINCH, sig_winch_handler); | ||
1317 | } | ||
1318 | |||
1214 | return __cmd_top(); | 1319 | return __cmd_top(); |
1215 | } | 1320 | } |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 0c5e4f72f2ba..abb914aa7be6 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -5,66 +5,73 @@ | |||
5 | #include "util/symbol.h" | 5 | #include "util/symbol.h" |
6 | #include "util/thread.h" | 6 | #include "util/thread.h" |
7 | #include "util/header.h" | 7 | #include "util/header.h" |
8 | #include "util/exec_cmd.h" | ||
9 | #include "util/trace-event.h" | ||
8 | 10 | ||
9 | #include "util/parse-options.h" | 11 | static char const *script_name; |
12 | static char const *generate_script_lang; | ||
10 | 13 | ||
11 | #include "perf.h" | 14 | static int default_start_script(const char *script __attribute((unused))) |
12 | #include "util/debug.h" | 15 | { |
16 | return 0; | ||
17 | } | ||
13 | 18 | ||
14 | #include "util/trace-event.h" | 19 | static int default_stop_script(void) |
20 | { | ||
21 | return 0; | ||
22 | } | ||
15 | 23 | ||
16 | static char const *input_name = "perf.data"; | 24 | static int default_generate_script(const char *outfile __attribute ((unused))) |
17 | static int input; | 25 | { |
18 | static unsigned long page_size; | 26 | return 0; |
19 | static unsigned long mmap_window = 32; | 27 | } |
20 | 28 | ||
21 | static unsigned long total = 0; | 29 | static struct scripting_ops default_scripting_ops = { |
22 | static unsigned long total_comm = 0; | 30 | .start_script = default_start_script, |
31 | .stop_script = default_stop_script, | ||
32 | .process_event = print_event, | ||
33 | .generate_script = default_generate_script, | ||
34 | }; | ||
35 | |||
36 | static struct scripting_ops *scripting_ops; | ||
23 | 37 | ||
24 | static struct rb_root threads; | 38 | static void setup_scripting(void) |
25 | static struct thread *last_match; | 39 | { |
40 | /* make sure PERF_EXEC_PATH is set for scripts */ | ||
41 | perf_set_argv_exec_path(perf_exec_path()); | ||
26 | 42 | ||
27 | static struct perf_header *header; | 43 | setup_perl_scripting(); |
28 | static u64 sample_type; | ||
29 | 44 | ||
45 | scripting_ops = &default_scripting_ops; | ||
46 | } | ||
30 | 47 | ||
31 | static int | 48 | static int cleanup_scripting(void) |
32 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | ||
33 | { | 49 | { |
34 | struct thread *thread; | 50 | return scripting_ops->stop_script(); |
51 | } | ||
35 | 52 | ||
36 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | 53 | #include "util/parse-options.h" |
37 | 54 | ||
38 | dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", | 55 | #include "perf.h" |
39 | (void *)(offset + head), | 56 | #include "util/debug.h" |
40 | (void *)(long)(event->header.size), | ||
41 | event->comm.comm, event->comm.pid); | ||
42 | 57 | ||
43 | if (thread == NULL || | 58 | #include "util/trace-event.h" |
44 | thread__set_comm(thread, event->comm.comm)) { | 59 | #include "util/data_map.h" |
45 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | 60 | #include "util/exec_cmd.h" |
46 | return -1; | ||
47 | } | ||
48 | total_comm++; | ||
49 | 61 | ||
50 | return 0; | 62 | static char const *input_name = "perf.data"; |
51 | } | ||
52 | 63 | ||
53 | static int | 64 | static struct perf_header *header; |
54 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | 65 | static u64 sample_type; |
66 | |||
67 | static int process_sample_event(event_t *event) | ||
55 | { | 68 | { |
56 | char level; | ||
57 | int show = 0; | ||
58 | struct dso *dso = NULL; | ||
59 | struct thread *thread; | ||
60 | u64 ip = event->ip.ip; | 69 | u64 ip = event->ip.ip; |
61 | u64 timestamp = -1; | 70 | u64 timestamp = -1; |
62 | u32 cpu = -1; | 71 | u32 cpu = -1; |
63 | u64 period = 1; | 72 | u64 period = 1; |
64 | void *more_data = event->ip.__more_data; | 73 | void *more_data = event->ip.__more_data; |
65 | int cpumode; | 74 | struct thread *thread = threads__findnew(event->ip.pid); |
66 | |||
67 | thread = threads__findnew(event->ip.pid, &threads, &last_match); | ||
68 | 75 | ||
69 | if (sample_type & PERF_SAMPLE_TIME) { | 76 | if (sample_type & PERF_SAMPLE_TIME) { |
70 | timestamp = *(u64 *)more_data; | 77 | timestamp = *(u64 *)more_data; |
@@ -82,45 +89,19 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
82 | more_data += sizeof(u64); | 89 | more_data += sizeof(u64); |
83 | } | 90 | } |
84 | 91 | ||
85 | dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", | 92 | dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", |
86 | (void *)(offset + head), | ||
87 | (void *)(long)(event->header.size), | ||
88 | event->header.misc, | 93 | event->header.misc, |
89 | event->ip.pid, event->ip.tid, | 94 | event->ip.pid, event->ip.tid, |
90 | (void *)(long)ip, | 95 | (void *)(long)ip, |
91 | (long long)period); | 96 | (long long)period); |
92 | 97 | ||
93 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
94 | |||
95 | if (thread == NULL) { | 98 | if (thread == NULL) { |
96 | eprintf("problem processing %d event, skipping it.\n", | 99 | pr_debug("problem processing %d event, skipping it.\n", |
97 | event->header.type); | 100 | event->header.type); |
98 | return -1; | 101 | return -1; |
99 | } | 102 | } |
100 | 103 | ||
101 | cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 104 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); |
102 | |||
103 | if (cpumode == PERF_RECORD_MISC_KERNEL) { | ||
104 | show = SHOW_KERNEL; | ||
105 | level = 'k'; | ||
106 | |||
107 | dso = kernel_dso; | ||
108 | |||
109 | dump_printf(" ...... dso: %s\n", dso->name); | ||
110 | |||
111 | } else if (cpumode == PERF_RECORD_MISC_USER) { | ||
112 | |||
113 | show = SHOW_USER; | ||
114 | level = '.'; | ||
115 | |||
116 | } else { | ||
117 | show = SHOW_HV; | ||
118 | level = 'H'; | ||
119 | |||
120 | dso = hypervisor_dso; | ||
121 | |||
122 | dump_printf(" ...... dso: [hypervisor]\n"); | ||
123 | } | ||
124 | 105 | ||
125 | if (sample_type & PERF_SAMPLE_RAW) { | 106 | if (sample_type & PERF_SAMPLE_RAW) { |
126 | struct { | 107 | struct { |
@@ -133,128 +114,189 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
133 | * field, although it should be the same than this perf | 114 | * field, although it should be the same than this perf |
134 | * event pid | 115 | * event pid |
135 | */ | 116 | */ |
136 | print_event(cpu, raw->data, raw->size, timestamp, thread->comm); | 117 | scripting_ops->process_event(cpu, raw->data, raw->size, |
118 | timestamp, thread->comm); | ||
137 | } | 119 | } |
138 | total += period; | 120 | event__stats.total += period; |
139 | 121 | ||
140 | return 0; | 122 | return 0; |
141 | } | 123 | } |
142 | 124 | ||
143 | static int | 125 | static int sample_type_check(u64 type) |
144 | process_event(event_t *event, unsigned long offset, unsigned long head) | ||
145 | { | 126 | { |
146 | trace_event(event); | 127 | sample_type = type; |
147 | |||
148 | switch (event->header.type) { | ||
149 | case PERF_RECORD_MMAP ... PERF_RECORD_LOST: | ||
150 | return 0; | ||
151 | |||
152 | case PERF_RECORD_COMM: | ||
153 | return process_comm_event(event, offset, head); | ||
154 | |||
155 | case PERF_RECORD_EXIT ... PERF_RECORD_READ: | ||
156 | return 0; | ||
157 | |||
158 | case PERF_RECORD_SAMPLE: | ||
159 | return process_sample_event(event, offset, head); | ||
160 | 128 | ||
161 | case PERF_RECORD_MAX: | 129 | if (!(sample_type & PERF_SAMPLE_RAW)) { |
162 | default: | 130 | fprintf(stderr, |
131 | "No trace sample to read. Did you call perf record " | ||
132 | "without -R?"); | ||
163 | return -1; | 133 | return -1; |
164 | } | 134 | } |
165 | 135 | ||
166 | return 0; | 136 | return 0; |
167 | } | 137 | } |
168 | 138 | ||
139 | static struct perf_file_handler file_handler = { | ||
140 | .process_sample_event = process_sample_event, | ||
141 | .process_comm_event = event__process_comm, | ||
142 | .sample_type_check = sample_type_check, | ||
143 | }; | ||
144 | |||
169 | static int __cmd_trace(void) | 145 | static int __cmd_trace(void) |
170 | { | 146 | { |
171 | int ret, rc = EXIT_FAILURE; | 147 | register_idle_thread(); |
172 | unsigned long offset = 0; | 148 | register_perf_file_handler(&file_handler); |
173 | unsigned long head = 0; | ||
174 | struct stat perf_stat; | ||
175 | event_t *event; | ||
176 | uint32_t size; | ||
177 | char *buf; | ||
178 | |||
179 | trace_report(); | ||
180 | register_idle_thread(&threads, &last_match); | ||
181 | |||
182 | input = open(input_name, O_RDONLY); | ||
183 | if (input < 0) { | ||
184 | perror("failed to open file"); | ||
185 | exit(-1); | ||
186 | } | ||
187 | 149 | ||
188 | ret = fstat(input, &perf_stat); | 150 | return mmap_dispatch_perf_file(&header, input_name, |
189 | if (ret < 0) { | 151 | 0, 0, &event__cwdlen, &event__cwd); |
190 | perror("failed to stat file"); | 152 | } |
191 | exit(-1); | ||
192 | } | ||
193 | 153 | ||
194 | if (!perf_stat.st_size) { | 154 | struct script_spec { |
195 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | 155 | struct list_head node; |
196 | exit(0); | 156 | struct scripting_ops *ops; |
197 | } | 157 | char spec[0]; |
198 | header = perf_header__read(input); | 158 | }; |
199 | head = header->data_offset; | ||
200 | sample_type = perf_header__sample_type(header); | ||
201 | 159 | ||
202 | if (!(sample_type & PERF_SAMPLE_RAW)) | 160 | LIST_HEAD(script_specs); |
203 | die("No trace sample to read. Did you call perf record " | ||
204 | "without -R?"); | ||
205 | 161 | ||
206 | if (load_kernel() < 0) { | 162 | static struct script_spec *script_spec__new(const char *spec, |
207 | perror("failed to load kernel symbols"); | 163 | struct scripting_ops *ops) |
208 | return EXIT_FAILURE; | 164 | { |
209 | } | 165 | struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1); |
210 | 166 | ||
211 | remap: | 167 | if (s != NULL) { |
212 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | 168 | strcpy(s->spec, spec); |
213 | MAP_SHARED, input, offset); | 169 | s->ops = ops; |
214 | if (buf == MAP_FAILED) { | ||
215 | perror("failed to mmap file"); | ||
216 | exit(-1); | ||
217 | } | 170 | } |
218 | 171 | ||
219 | more: | 172 | return s; |
220 | event = (event_t *)(buf + head); | 173 | } |
221 | 174 | ||
222 | if (head + event->header.size >= page_size * mmap_window) { | 175 | static void script_spec__delete(struct script_spec *s) |
223 | unsigned long shift = page_size * (head / page_size); | 176 | { |
224 | int res; | 177 | free(s->spec); |
178 | free(s); | ||
179 | } | ||
225 | 180 | ||
226 | res = munmap(buf, page_size * mmap_window); | 181 | static void script_spec__add(struct script_spec *s) |
227 | assert(res == 0); | 182 | { |
183 | list_add_tail(&s->node, &script_specs); | ||
184 | } | ||
228 | 185 | ||
229 | offset += shift; | 186 | static struct script_spec *script_spec__find(const char *spec) |
230 | head -= shift; | 187 | { |
231 | goto remap; | 188 | struct script_spec *s; |
232 | } | ||
233 | 189 | ||
234 | size = event->header.size; | 190 | list_for_each_entry(s, &script_specs, node) |
191 | if (strcasecmp(s->spec, spec) == 0) | ||
192 | return s; | ||
193 | return NULL; | ||
194 | } | ||
235 | 195 | ||
236 | if (!size || process_event(event, offset, head) < 0) { | 196 | static struct script_spec *script_spec__findnew(const char *spec, |
197 | struct scripting_ops *ops) | ||
198 | { | ||
199 | struct script_spec *s = script_spec__find(spec); | ||
237 | 200 | ||
238 | /* | 201 | if (s) |
239 | * assume we lost track of the stream, check alignment, and | 202 | return s; |
240 | * increment a single u64 in the hope to catch on again 'soon'. | ||
241 | */ | ||
242 | 203 | ||
243 | if (unlikely(head & 7)) | 204 | s = script_spec__new(spec, ops); |
244 | head &= ~7ULL; | 205 | if (!s) |
206 | goto out_delete_spec; | ||
245 | 207 | ||
246 | size = 8; | 208 | script_spec__add(s); |
247 | } | 209 | |
210 | return s; | ||
248 | 211 | ||
249 | head += size; | 212 | out_delete_spec: |
213 | script_spec__delete(s); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
250 | 217 | ||
251 | if (offset + head < (unsigned long)perf_stat.st_size) | 218 | int script_spec_register(const char *spec, struct scripting_ops *ops) |
252 | goto more; | 219 | { |
220 | struct script_spec *s; | ||
221 | |||
222 | s = script_spec__find(spec); | ||
223 | if (s) | ||
224 | return -1; | ||
253 | 225 | ||
254 | rc = EXIT_SUCCESS; | 226 | s = script_spec__findnew(spec, ops); |
255 | close(input); | 227 | if (!s) |
228 | return -1; | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static struct scripting_ops *script_spec__lookup(const char *spec) | ||
234 | { | ||
235 | struct script_spec *s = script_spec__find(spec); | ||
236 | if (!s) | ||
237 | return NULL; | ||
256 | 238 | ||
257 | return rc; | 239 | return s->ops; |
240 | } | ||
241 | |||
242 | static void list_available_languages(void) | ||
243 | { | ||
244 | struct script_spec *s; | ||
245 | |||
246 | fprintf(stderr, "\n"); | ||
247 | fprintf(stderr, "Scripting language extensions (used in " | ||
248 | "perf trace -s [spec:]script.[spec]):\n\n"); | ||
249 | |||
250 | list_for_each_entry(s, &script_specs, node) | ||
251 | fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name); | ||
252 | |||
253 | fprintf(stderr, "\n"); | ||
254 | } | ||
255 | |||
256 | static int parse_scriptname(const struct option *opt __used, | ||
257 | const char *str, int unset __used) | ||
258 | { | ||
259 | char spec[PATH_MAX]; | ||
260 | const char *script, *ext; | ||
261 | int len; | ||
262 | |||
263 | if (strcmp(str, "list") == 0) { | ||
264 | list_available_languages(); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | script = strchr(str, ':'); | ||
269 | if (script) { | ||
270 | len = script - str; | ||
271 | if (len >= PATH_MAX) { | ||
272 | fprintf(stderr, "invalid language specifier"); | ||
273 | return -1; | ||
274 | } | ||
275 | strncpy(spec, str, len); | ||
276 | spec[len] = '\0'; | ||
277 | scripting_ops = script_spec__lookup(spec); | ||
278 | if (!scripting_ops) { | ||
279 | fprintf(stderr, "invalid language specifier"); | ||
280 | return -1; | ||
281 | } | ||
282 | script++; | ||
283 | } else { | ||
284 | script = str; | ||
285 | ext = strchr(script, '.'); | ||
286 | if (!ext) { | ||
287 | fprintf(stderr, "invalid script extension"); | ||
288 | return -1; | ||
289 | } | ||
290 | scripting_ops = script_spec__lookup(++ext); | ||
291 | if (!scripting_ops) { | ||
292 | fprintf(stderr, "invalid script extension"); | ||
293 | return -1; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | script_name = strdup(script); | ||
298 | |||
299 | return 0; | ||
258 | } | 300 | } |
259 | 301 | ||
260 | static const char * const annotate_usage[] = { | 302 | static const char * const annotate_usage[] = { |
@@ -267,13 +309,24 @@ static const struct option options[] = { | |||
267 | "dump raw trace in ASCII"), | 309 | "dump raw trace in ASCII"), |
268 | OPT_BOOLEAN('v', "verbose", &verbose, | 310 | OPT_BOOLEAN('v', "verbose", &verbose, |
269 | "be more verbose (show symbol address, etc)"), | 311 | "be more verbose (show symbol address, etc)"), |
312 | OPT_BOOLEAN('l', "latency", &latency_format, | ||
313 | "show latency attributes (irqs/preemption disabled, etc)"), | ||
314 | OPT_CALLBACK('s', "script", NULL, "name", | ||
315 | "script file name (lang:script name, script name, or *)", | ||
316 | parse_scriptname), | ||
317 | OPT_STRING('g', "gen-script", &generate_script_lang, "lang", | ||
318 | "generate perf-trace.xx script in specified language"), | ||
319 | |||
270 | OPT_END() | 320 | OPT_END() |
271 | }; | 321 | }; |
272 | 322 | ||
273 | int cmd_trace(int argc, const char **argv, const char *prefix __used) | 323 | int cmd_trace(int argc, const char **argv, const char *prefix __used) |
274 | { | 324 | { |
275 | symbol__init(); | 325 | int err; |
276 | page_size = getpagesize(); | 326 | |
327 | symbol__init(0); | ||
328 | |||
329 | setup_scripting(); | ||
277 | 330 | ||
278 | argc = parse_options(argc, argv, options, annotate_usage, 0); | 331 | argc = parse_options(argc, argv, options, annotate_usage, 0); |
279 | if (argc) { | 332 | if (argc) { |
@@ -287,5 +340,50 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) | |||
287 | 340 | ||
288 | setup_pager(); | 341 | setup_pager(); |
289 | 342 | ||
290 | return __cmd_trace(); | 343 | if (generate_script_lang) { |
344 | struct stat perf_stat; | ||
345 | |||
346 | int input = open(input_name, O_RDONLY); | ||
347 | if (input < 0) { | ||
348 | perror("failed to open file"); | ||
349 | exit(-1); | ||
350 | } | ||
351 | |||
352 | err = fstat(input, &perf_stat); | ||
353 | if (err < 0) { | ||
354 | perror("failed to stat file"); | ||
355 | exit(-1); | ||
356 | } | ||
357 | |||
358 | if (!perf_stat.st_size) { | ||
359 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
360 | exit(0); | ||
361 | } | ||
362 | |||
363 | scripting_ops = script_spec__lookup(generate_script_lang); | ||
364 | if (!scripting_ops) { | ||
365 | fprintf(stderr, "invalid language specifier"); | ||
366 | return -1; | ||
367 | } | ||
368 | |||
369 | header = perf_header__new(); | ||
370 | if (header == NULL) | ||
371 | return -1; | ||
372 | |||
373 | perf_header__read(header, input); | ||
374 | err = scripting_ops->generate_script("perf-trace"); | ||
375 | goto out; | ||
376 | } | ||
377 | |||
378 | if (script_name) { | ||
379 | err = scripting_ops->start_script(script_name); | ||
380 | if (err) | ||
381 | goto out; | ||
382 | } | ||
383 | |||
384 | err = __cmd_trace(); | ||
385 | |||
386 | cleanup_scripting(); | ||
387 | out: | ||
388 | return err; | ||
291 | } | 389 | } |
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index e11d8d231c3b..a3d8bf65f26c 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h | |||
@@ -15,6 +15,8 @@ extern int read_line_with_nul(char *buf, int size, FILE *file); | |||
15 | extern int check_pager_config(const char *cmd); | 15 | extern int check_pager_config(const char *cmd); |
16 | 16 | ||
17 | extern int cmd_annotate(int argc, const char **argv, const char *prefix); | 17 | extern int cmd_annotate(int argc, const char **argv, const char *prefix); |
18 | extern int cmd_bench(int argc, const char **argv, const char *prefix); | ||
19 | extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); | ||
18 | extern int cmd_help(int argc, const char **argv, const char *prefix); | 20 | extern int cmd_help(int argc, const char **argv, const char *prefix); |
19 | extern int cmd_sched(int argc, const char **argv, const char *prefix); | 21 | extern int cmd_sched(int argc, const char **argv, const char *prefix); |
20 | extern int cmd_list(int argc, const char **argv, const char *prefix); | 22 | extern int cmd_list(int argc, const char **argv, const char *prefix); |
@@ -25,5 +27,7 @@ extern int cmd_timechart(int argc, const char **argv, const char *prefix); | |||
25 | extern int cmd_top(int argc, const char **argv, const char *prefix); | 27 | extern int cmd_top(int argc, const char **argv, const char *prefix); |
26 | extern int cmd_trace(int argc, const char **argv, const char *prefix); | 28 | extern int cmd_trace(int argc, const char **argv, const char *prefix); |
27 | extern int cmd_version(int argc, const char **argv, const char *prefix); | 29 | extern int cmd_version(int argc, const char **argv, const char *prefix); |
30 | extern int cmd_probe(int argc, const char **argv, const char *prefix); | ||
31 | extern int cmd_kmem(int argc, const char **argv, const char *prefix); | ||
28 | 32 | ||
29 | #endif | 33 | #endif |
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 00326e230d87..02b09ea17a3e 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt | |||
@@ -3,6 +3,8 @@ | |||
3 | # command name category [deprecated] [common] | 3 | # command name category [deprecated] [common] |
4 | # | 4 | # |
5 | perf-annotate mainporcelain common | 5 | perf-annotate mainporcelain common |
6 | perf-bench mainporcelain common | ||
7 | perf-buildid-list mainporcelain common | ||
6 | perf-list mainporcelain common | 8 | perf-list mainporcelain common |
7 | perf-sched mainporcelain common | 9 | perf-sched mainporcelain common |
8 | perf-record mainporcelain common | 10 | perf-record mainporcelain common |
@@ -11,3 +13,5 @@ perf-stat mainporcelain common | |||
11 | perf-timechart mainporcelain common | 13 | perf-timechart mainporcelain common |
12 | perf-top mainporcelain common | 14 | perf-top mainporcelain common |
13 | perf-trace mainporcelain common | 15 | perf-trace mainporcelain common |
16 | perf-probe mainporcelain common | ||
17 | perf-kmem mainporcelain common | ||
diff --git a/tools/perf/design.txt b/tools/perf/design.txt index fdd42a824c98..f000c30877ac 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt | |||
@@ -137,6 +137,8 @@ enum sw_event_ids { | |||
137 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 137 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
138 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 138 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
139 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 139 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
140 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
141 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
140 | }; | 142 | }; |
141 | 143 | ||
142 | Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event | 144 | Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event |
diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 19fc7feb9d59..cf64049bc9bd 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "util/run-command.h" | 14 | #include "util/run-command.h" |
15 | #include "util/parse-events.h" | 15 | #include "util/parse-events.h" |
16 | #include "util/string.h" | 16 | #include "util/string.h" |
17 | #include "util/debugfs.h" | ||
17 | 18 | ||
18 | const char perf_usage_string[] = | 19 | const char perf_usage_string[] = |
19 | "perf [--version] [--help] COMMAND [ARGS]"; | 20 | "perf [--version] [--help] COMMAND [ARGS]"; |
@@ -89,8 +90,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) | |||
89 | /* | 90 | /* |
90 | * Check remaining flags. | 91 | * Check remaining flags. |
91 | */ | 92 | */ |
92 | if (!prefixcmp(cmd, "--exec-path")) { | 93 | if (!prefixcmp(cmd, CMD_EXEC_PATH)) { |
93 | cmd += 11; | 94 | cmd += strlen(CMD_EXEC_PATH); |
94 | if (*cmd == '=') | 95 | if (*cmd == '=') |
95 | perf_set_argv_exec_path(cmd + 1); | 96 | perf_set_argv_exec_path(cmd + 1); |
96 | else { | 97 | else { |
@@ -117,8 +118,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) | |||
117 | (*argv)++; | 118 | (*argv)++; |
118 | (*argc)--; | 119 | (*argc)--; |
119 | handled++; | 120 | handled++; |
120 | } else if (!prefixcmp(cmd, "--perf-dir=")) { | 121 | } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { |
121 | setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); | 122 | setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); |
122 | if (envchanged) | 123 | if (envchanged) |
123 | *envchanged = 1; | 124 | *envchanged = 1; |
124 | } else if (!strcmp(cmd, "--work-tree")) { | 125 | } else if (!strcmp(cmd, "--work-tree")) { |
@@ -131,8 +132,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) | |||
131 | *envchanged = 1; | 132 | *envchanged = 1; |
132 | (*argv)++; | 133 | (*argv)++; |
133 | (*argc)--; | 134 | (*argc)--; |
134 | } else if (!prefixcmp(cmd, "--work-tree=")) { | 135 | } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { |
135 | setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); | 136 | setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); |
136 | if (envchanged) | 137 | if (envchanged) |
137 | *envchanged = 1; | 138 | *envchanged = 1; |
138 | } else if (!strcmp(cmd, "--debugfs-dir")) { | 139 | } else if (!strcmp(cmd, "--debugfs-dir")) { |
@@ -146,8 +147,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) | |||
146 | *envchanged = 1; | 147 | *envchanged = 1; |
147 | (*argv)++; | 148 | (*argv)++; |
148 | (*argc)--; | 149 | (*argc)--; |
149 | } else if (!prefixcmp(cmd, "--debugfs-dir=")) { | 150 | } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { |
150 | strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN); | 151 | strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); |
151 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; | 152 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; |
152 | if (envchanged) | 153 | if (envchanged) |
153 | *envchanged = 1; | 154 | *envchanged = 1; |
@@ -284,17 +285,21 @@ static void handle_internal_command(int argc, const char **argv) | |||
284 | { | 285 | { |
285 | const char *cmd = argv[0]; | 286 | const char *cmd = argv[0]; |
286 | static struct cmd_struct commands[] = { | 287 | static struct cmd_struct commands[] = { |
287 | { "help", cmd_help, 0 }, | 288 | { "buildid-list", cmd_buildid_list, 0 }, |
288 | { "list", cmd_list, 0 }, | 289 | { "help", cmd_help, 0 }, |
289 | { "record", cmd_record, 0 }, | 290 | { "list", cmd_list, 0 }, |
290 | { "report", cmd_report, 0 }, | 291 | { "record", cmd_record, 0 }, |
291 | { "stat", cmd_stat, 0 }, | 292 | { "report", cmd_report, 0 }, |
292 | { "timechart", cmd_timechart, 0 }, | 293 | { "bench", cmd_bench, 0 }, |
293 | { "top", cmd_top, 0 }, | 294 | { "stat", cmd_stat, 0 }, |
294 | { "annotate", cmd_annotate, 0 }, | 295 | { "timechart", cmd_timechart, 0 }, |
295 | { "version", cmd_version, 0 }, | 296 | { "top", cmd_top, 0 }, |
296 | { "trace", cmd_trace, 0 }, | 297 | { "annotate", cmd_annotate, 0 }, |
297 | { "sched", cmd_sched, 0 }, | 298 | { "version", cmd_version, 0 }, |
299 | { "trace", cmd_trace, 0 }, | ||
300 | { "sched", cmd_sched, 0 }, | ||
301 | { "probe", cmd_probe, 0 }, | ||
302 | { "kmem", cmd_kmem, 0 }, | ||
298 | }; | 303 | }; |
299 | unsigned int i; | 304 | unsigned int i; |
300 | static const char ext[] = STRIP_EXTENSION; | 305 | static const char ext[] = STRIP_EXTENSION; |
@@ -382,45 +387,12 @@ static int run_argv(int *argcp, const char ***argv) | |||
382 | /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ | 387 | /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ |
383 | static void get_debugfs_mntpt(void) | 388 | static void get_debugfs_mntpt(void) |
384 | { | 389 | { |
385 | FILE *file; | 390 | const char *path = debugfs_find_mountpoint(); |
386 | char fs_type[100]; | ||
387 | char debugfs[MAXPATHLEN]; | ||
388 | 391 | ||
389 | /* | 392 | if (path) |
390 | * try the standard location | 393 | strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); |
391 | */ | 394 | else |
392 | if (valid_debugfs_mount("/sys/kernel/debug/") == 0) { | 395 | debugfs_mntpt[0] = '\0'; |
393 | strcpy(debugfs_mntpt, "/sys/kernel/debug/"); | ||
394 | return; | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * try the sane location | ||
399 | */ | ||
400 | if (valid_debugfs_mount("/debug/") == 0) { | ||
401 | strcpy(debugfs_mntpt, "/debug/"); | ||
402 | return; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * give up and parse /proc/mounts | ||
407 | */ | ||
408 | file = fopen("/proc/mounts", "r"); | ||
409 | if (file == NULL) | ||
410 | return; | ||
411 | |||
412 | while (fscanf(file, "%*s %" | ||
413 | STR(MAXPATHLEN) | ||
414 | "s %99s %*s %*d %*d\n", | ||
415 | debugfs, fs_type) == 2) { | ||
416 | if (strcmp(fs_type, "debugfs") == 0) | ||
417 | break; | ||
418 | } | ||
419 | fclose(file); | ||
420 | if (strcmp(fs_type, "debugfs") == 0) { | ||
421 | strncpy(debugfs_mntpt, debugfs, MAXPATHLEN); | ||
422 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; | ||
423 | } | ||
424 | } | 396 | } |
425 | 397 | ||
426 | int main(int argc, const char **argv) | 398 | int main(int argc, const char **argv) |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 8cc4623afd6f..454d5d55f32d 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -47,6 +47,18 @@ | |||
47 | #define cpu_relax() asm volatile("":::"memory") | 47 | #define cpu_relax() asm volatile("":::"memory") |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef __alpha__ | ||
51 | #include "../../arch/alpha/include/asm/unistd.h" | ||
52 | #define rmb() asm volatile("mb" ::: "memory") | ||
53 | #define cpu_relax() asm volatile("" ::: "memory") | ||
54 | #endif | ||
55 | |||
56 | #ifdef __ia64__ | ||
57 | #include "../../arch/ia64/include/asm/unistd.h" | ||
58 | #define rmb() asm volatile ("mf" ::: "memory") | ||
59 | #define cpu_relax() asm volatile ("hint @pause" ::: "memory") | ||
60 | #endif | ||
61 | |||
50 | #include <time.h> | 62 | #include <time.h> |
51 | #include <unistd.h> | 63 | #include <unistd.h> |
52 | #include <sys/types.h> | 64 | #include <sys/types.h> |
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c new file mode 100644 index 000000000000..af78d9a52a7d --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the | ||
3 | * contents of Context.xs. Do not edit this file, edit Context.xs instead. | ||
4 | * | ||
5 | * ANY CHANGES MADE HERE WILL BE LOST! | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #line 1 "Context.xs" | ||
10 | /* | ||
11 | * Context.xs. XS interfaces for perf trace. | ||
12 | * | ||
13 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, | ||
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
23 | * GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with this program; if not, write to the Free Software | ||
27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include "EXTERN.h" | ||
32 | #include "perl.h" | ||
33 | #include "XSUB.h" | ||
34 | #include "../../../util/trace-event-perl.h" | ||
35 | |||
36 | #ifndef PERL_UNUSED_VAR | ||
37 | # define PERL_UNUSED_VAR(var) if (0) var = var | ||
38 | #endif | ||
39 | |||
40 | #line 41 "Context.c" | ||
41 | |||
42 | XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ | ||
43 | XS(XS_Perf__Trace__Context_common_pc) | ||
44 | { | ||
45 | #ifdef dVAR | ||
46 | dVAR; dXSARGS; | ||
47 | #else | ||
48 | dXSARGS; | ||
49 | #endif | ||
50 | if (items != 1) | ||
51 | Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); | ||
52 | PERL_UNUSED_VAR(cv); /* -W */ | ||
53 | { | ||
54 | struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); | ||
55 | int RETVAL; | ||
56 | dXSTARG; | ||
57 | |||
58 | RETVAL = common_pc(context); | ||
59 | XSprePUSH; PUSHi((IV)RETVAL); | ||
60 | } | ||
61 | XSRETURN(1); | ||
62 | } | ||
63 | |||
64 | |||
65 | XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */ | ||
66 | XS(XS_Perf__Trace__Context_common_flags) | ||
67 | { | ||
68 | #ifdef dVAR | ||
69 | dVAR; dXSARGS; | ||
70 | #else | ||
71 | dXSARGS; | ||
72 | #endif | ||
73 | if (items != 1) | ||
74 | Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); | ||
75 | PERL_UNUSED_VAR(cv); /* -W */ | ||
76 | { | ||
77 | struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); | ||
78 | int RETVAL; | ||
79 | dXSTARG; | ||
80 | |||
81 | RETVAL = common_flags(context); | ||
82 | XSprePUSH; PUSHi((IV)RETVAL); | ||
83 | } | ||
84 | XSRETURN(1); | ||
85 | } | ||
86 | |||
87 | |||
88 | XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ | ||
89 | XS(XS_Perf__Trace__Context_common_lock_depth) | ||
90 | { | ||
91 | #ifdef dVAR | ||
92 | dVAR; dXSARGS; | ||
93 | #else | ||
94 | dXSARGS; | ||
95 | #endif | ||
96 | if (items != 1) | ||
97 | Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); | ||
98 | PERL_UNUSED_VAR(cv); /* -W */ | ||
99 | { | ||
100 | struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); | ||
101 | int RETVAL; | ||
102 | dXSTARG; | ||
103 | |||
104 | RETVAL = common_lock_depth(context); | ||
105 | XSprePUSH; PUSHi((IV)RETVAL); | ||
106 | } | ||
107 | XSRETURN(1); | ||
108 | } | ||
109 | |||
110 | #ifdef __cplusplus | ||
111 | extern "C" | ||
112 | #endif | ||
113 | XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */ | ||
114 | XS(boot_Perf__Trace__Context) | ||
115 | { | ||
116 | #ifdef dVAR | ||
117 | dVAR; dXSARGS; | ||
118 | #else | ||
119 | dXSARGS; | ||
120 | #endif | ||
121 | const char* file = __FILE__; | ||
122 | |||
123 | PERL_UNUSED_VAR(cv); /* -W */ | ||
124 | PERL_UNUSED_VAR(items); /* -W */ | ||
125 | XS_VERSION_BOOTCHECK ; | ||
126 | |||
127 | newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$"); | ||
128 | newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$"); | ||
129 | newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$"); | ||
130 | if (PL_unitcheckav) | ||
131 | call_list(PL_scopestack_ix, PL_unitcheckav); | ||
132 | XSRETURN_YES; | ||
133 | } | ||
134 | |||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs new file mode 100644 index 000000000000..fb78006c165e --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Context.xs. XS interfaces for perf trace. | ||
3 | * | ||
4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "EXTERN.h" | ||
23 | #include "perl.h" | ||
24 | #include "XSUB.h" | ||
25 | #include "../../../util/trace-event-perl.h" | ||
26 | |||
27 | MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context | ||
28 | PROTOTYPES: ENABLE | ||
29 | |||
30 | int | ||
31 | common_pc(context) | ||
32 | struct scripting_context * context | ||
33 | |||
34 | int | ||
35 | common_flags(context) | ||
36 | struct scripting_context * context | ||
37 | |||
38 | int | ||
39 | common_lock_depth(context) | ||
40 | struct scripting_context * context | ||
41 | |||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL new file mode 100644 index 000000000000..decdeb0f6789 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL | |||
@@ -0,0 +1,17 @@ | |||
1 | use 5.010000; | ||
2 | use ExtUtils::MakeMaker; | ||
3 | # See lib/ExtUtils/MakeMaker.pm for details of how to influence | ||
4 | # the contents of the Makefile that is written. | ||
5 | WriteMakefile( | ||
6 | NAME => 'Perf::Trace::Context', | ||
7 | VERSION_FROM => 'lib/Perf/Trace/Context.pm', # finds $VERSION | ||
8 | PREREQ_PM => {}, # e.g., Module::Name => 1.1 | ||
9 | ($] >= 5.005 ? ## Add these new keywords supported since 5.005 | ||
10 | (ABSTRACT_FROM => 'lib/Perf/Trace/Context.pm', # retrieve abstract from module | ||
11 | AUTHOR => 'Tom Zanussi <tzanussi@gmail.com>') : ()), | ||
12 | LIBS => [''], # e.g., '-lm' | ||
13 | DEFINE => '-I ../..', # e.g., '-DHAVE_SOMETHING' | ||
14 | INC => '-I.', # e.g., '-I. -I/usr/include/other' | ||
15 | # Un-comment this if you add C files to link with later: | ||
16 | OBJECT => 'Context.o', # link all the C files too | ||
17 | ); | ||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README new file mode 100644 index 000000000000..9a9707630791 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/README | |||
@@ -0,0 +1,59 @@ | |||
1 | Perf-Trace-Util version 0.01 | ||
2 | ============================ | ||
3 | |||
4 | This module contains utility functions for use with perf trace. | ||
5 | |||
6 | Core.pm and Util.pm are pure Perl modules; Core.pm contains routines | ||
7 | that the core perf support for Perl calls on and should always be | ||
8 | 'used', while Util.pm contains useful but optional utility functions | ||
9 | that scripts may want to use. Context.pm contains the Perl->C | ||
10 | interface that allows scripts to access data in the embedding perf | ||
11 | executable; scripts wishing to do that should 'use Context.pm'. | ||
12 | |||
13 | The Perl->C perf interface is completely driven by Context.xs. If you | ||
14 | want to add new Perl functions that end up accessing C data in the | ||
15 | perf executable, you add desciptions of the new functions here. | ||
16 | scripting_context is a pointer to the perf data in the perf executable | ||
17 | that you want to access - it's passed as the second parameter, | ||
18 | $context, to all handler functions. | ||
19 | |||
20 | After you do that: | ||
21 | |||
22 | perl Makefile.PL # to create a Makefile for the next step | ||
23 | make # to create Context.c | ||
24 | |||
25 | edit Context.c to add const to the char* file = __FILE__ line in | ||
26 | XS(boot_Perf__Trace__Context) to silence a warning/error. | ||
27 | |||
28 | You can delete the Makefile, object files and anything else that was | ||
29 | generated e.g. blib and shared library, etc, except for of course | ||
30 | Context.c | ||
31 | |||
32 | You should then be able to run the normal perf make as usual. | ||
33 | |||
34 | INSTALLATION | ||
35 | |||
36 | Building perf with perf trace Perl scripting should install this | ||
37 | module in the right place. | ||
38 | |||
39 | You should make sure libperl and ExtUtils/Embed.pm are installed first | ||
40 | e.g. apt-get install libperl-dev or yum install perl-ExtUtils-Embed. | ||
41 | |||
42 | DEPENDENCIES | ||
43 | |||
44 | This module requires these other modules and libraries: | ||
45 | |||
46 | None | ||
47 | |||
48 | COPYRIGHT AND LICENCE | ||
49 | |||
50 | Copyright (C) 2009 by Tom Zanussi <tzanussi@gmail.com> | ||
51 | |||
52 | This library is free software; you can redistribute it and/or modify | ||
53 | it under the same terms as Perl itself, either Perl version 5.10.0 or, | ||
54 | at your option, any later version of Perl 5 you may have available. | ||
55 | |||
56 | Alternatively, this software may be distributed under the terms of the | ||
57 | GNU General Public License ("GPL") version 2 as published by the Free | ||
58 | Software Foundation. | ||
59 | |||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm new file mode 100644 index 000000000000..6c7f3659cb17 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm | |||
@@ -0,0 +1,55 @@ | |||
1 | package Perf::Trace::Context; | ||
2 | |||
3 | use 5.010000; | ||
4 | use strict; | ||
5 | use warnings; | ||
6 | |||
7 | require Exporter; | ||
8 | |||
9 | our @ISA = qw(Exporter); | ||
10 | |||
11 | our %EXPORT_TAGS = ( 'all' => [ qw( | ||
12 | ) ] ); | ||
13 | |||
14 | our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); | ||
15 | |||
16 | our @EXPORT = qw( | ||
17 | common_pc common_flags common_lock_depth | ||
18 | ); | ||
19 | |||
20 | our $VERSION = '0.01'; | ||
21 | |||
22 | require XSLoader; | ||
23 | XSLoader::load('Perf::Trace::Context', $VERSION); | ||
24 | |||
25 | 1; | ||
26 | __END__ | ||
27 | =head1 NAME | ||
28 | |||
29 | Perf::Trace::Context - Perl extension for accessing functions in perf. | ||
30 | |||
31 | =head1 SYNOPSIS | ||
32 | |||
33 | use Perf::Trace::Context; | ||
34 | |||
35 | =head1 SEE ALSO | ||
36 | |||
37 | Perf (trace) documentation | ||
38 | |||
39 | =head1 AUTHOR | ||
40 | |||
41 | Tom Zanussi, E<lt>tzanussi@gmail.com<gt> | ||
42 | |||
43 | =head1 COPYRIGHT AND LICENSE | ||
44 | |||
45 | Copyright (C) 2009 by Tom Zanussi | ||
46 | |||
47 | This library is free software; you can redistribute it and/or modify | ||
48 | it under the same terms as Perl itself, either Perl version 5.10.0 or, | ||
49 | at your option, any later version of Perl 5 you may have available. | ||
50 | |||
51 | Alternatively, this software may be distributed under the terms of the | ||
52 | GNU General Public License ("GPL") version 2 as published by the Free | ||
53 | Software Foundation. | ||
54 | |||
55 | =cut | ||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm new file mode 100644 index 000000000000..9df376a9f629 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm | |||
@@ -0,0 +1,192 @@ | |||
1 | package Perf::Trace::Core; | ||
2 | |||
3 | use 5.010000; | ||
4 | use strict; | ||
5 | use warnings; | ||
6 | |||
7 | require Exporter; | ||
8 | |||
9 | our @ISA = qw(Exporter); | ||
10 | |||
11 | our %EXPORT_TAGS = ( 'all' => [ qw( | ||
12 | ) ] ); | ||
13 | |||
14 | our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); | ||
15 | |||
16 | our @EXPORT = qw( | ||
17 | define_flag_field define_flag_value flag_str dump_flag_fields | ||
18 | define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields | ||
19 | trace_flag_str | ||
20 | ); | ||
21 | |||
22 | our $VERSION = '0.01'; | ||
23 | |||
24 | my %trace_flags = (0x00 => "NONE", | ||
25 | 0x01 => "IRQS_OFF", | ||
26 | 0x02 => "IRQS_NOSUPPORT", | ||
27 | 0x04 => "NEED_RESCHED", | ||
28 | 0x08 => "HARDIRQ", | ||
29 | 0x10 => "SOFTIRQ"); | ||
30 | |||
31 | sub trace_flag_str | ||
32 | { | ||
33 | my ($value) = @_; | ||
34 | |||
35 | my $string; | ||
36 | |||
37 | my $print_delim = 0; | ||
38 | |||
39 | foreach my $idx (sort {$a <=> $b} keys %trace_flags) { | ||
40 | if (!$value && !$idx) { | ||
41 | $string .= "NONE"; | ||
42 | last; | ||
43 | } | ||
44 | |||
45 | if ($idx && ($value & $idx) == $idx) { | ||
46 | if ($print_delim) { | ||
47 | $string .= " | "; | ||
48 | } | ||
49 | $string .= "$trace_flags{$idx}"; | ||
50 | $print_delim = 1; | ||
51 | $value &= ~$idx; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | return $string; | ||
56 | } | ||
57 | |||
58 | my %flag_fields; | ||
59 | my %symbolic_fields; | ||
60 | |||
61 | sub flag_str | ||
62 | { | ||
63 | my ($event_name, $field_name, $value) = @_; | ||
64 | |||
65 | my $string; | ||
66 | |||
67 | if ($flag_fields{$event_name}{$field_name}) { | ||
68 | my $print_delim = 0; | ||
69 | foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event_name}{$field_name}{"values"}}) { | ||
70 | if (!$value && !$idx) { | ||
71 | $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}"; | ||
72 | last; | ||
73 | } | ||
74 | if ($idx && ($value & $idx) == $idx) { | ||
75 | if ($print_delim && $flag_fields{$event_name}{$field_name}{'delim'}) { | ||
76 | $string .= " $flag_fields{$event_name}{$field_name}{'delim'} "; | ||
77 | } | ||
78 | $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}"; | ||
79 | $print_delim = 1; | ||
80 | $value &= ~$idx; | ||
81 | } | ||
82 | } | ||
83 | } | ||
84 | |||
85 | return $string; | ||
86 | } | ||
87 | |||
88 | sub define_flag_field | ||
89 | { | ||
90 | my ($event_name, $field_name, $delim) = @_; | ||
91 | |||
92 | $flag_fields{$event_name}{$field_name}{"delim"} = $delim; | ||
93 | } | ||
94 | |||
95 | sub define_flag_value | ||
96 | { | ||
97 | my ($event_name, $field_name, $value, $field_str) = @_; | ||
98 | |||
99 | $flag_fields{$event_name}{$field_name}{"values"}{$value} = $field_str; | ||
100 | } | ||
101 | |||
102 | sub dump_flag_fields | ||
103 | { | ||
104 | for my $event (keys %flag_fields) { | ||
105 | print "event $event:\n"; | ||
106 | for my $field (keys %{$flag_fields{$event}}) { | ||
107 | print " field: $field:\n"; | ||
108 | print " delim: $flag_fields{$event}{$field}{'delim'}\n"; | ||
109 | foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) { | ||
110 | print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n"; | ||
111 | } | ||
112 | } | ||
113 | } | ||
114 | } | ||
115 | |||
116 | sub symbol_str | ||
117 | { | ||
118 | my ($event_name, $field_name, $value) = @_; | ||
119 | |||
120 | if ($symbolic_fields{$event_name}{$field_name}) { | ||
121 | foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event_name}{$field_name}{"values"}}) { | ||
122 | if (!$value && !$idx) { | ||
123 | return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}"; | ||
124 | last; | ||
125 | } | ||
126 | if ($value == $idx) { | ||
127 | return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}"; | ||
128 | } | ||
129 | } | ||
130 | } | ||
131 | |||
132 | return undef; | ||
133 | } | ||
134 | |||
135 | sub define_symbolic_field | ||
136 | { | ||
137 | my ($event_name, $field_name) = @_; | ||
138 | |||
139 | # nothing to do, really | ||
140 | } | ||
141 | |||
142 | sub define_symbolic_value | ||
143 | { | ||
144 | my ($event_name, $field_name, $value, $field_str) = @_; | ||
145 | |||
146 | $symbolic_fields{$event_name}{$field_name}{"values"}{$value} = $field_str; | ||
147 | } | ||
148 | |||
149 | sub dump_symbolic_fields | ||
150 | { | ||
151 | for my $event (keys %symbolic_fields) { | ||
152 | print "event $event:\n"; | ||
153 | for my $field (keys %{$symbolic_fields{$event}}) { | ||
154 | print " field: $field:\n"; | ||
155 | foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) { | ||
156 | print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n"; | ||
157 | } | ||
158 | } | ||
159 | } | ||
160 | } | ||
161 | |||
162 | 1; | ||
163 | __END__ | ||
164 | =head1 NAME | ||
165 | |||
166 | Perf::Trace::Core - Perl extension for perf trace | ||
167 | |||
168 | =head1 SYNOPSIS | ||
169 | |||
170 | use Perf::Trace::Core | ||
171 | |||
172 | =head1 SEE ALSO | ||
173 | |||
174 | Perf (trace) documentation | ||
175 | |||
176 | =head1 AUTHOR | ||
177 | |||
178 | Tom Zanussi, E<lt>tzanussi@gmail.com<gt> | ||
179 | |||
180 | =head1 COPYRIGHT AND LICENSE | ||
181 | |||
182 | Copyright (C) 2009 by Tom Zanussi | ||
183 | |||
184 | This library is free software; you can redistribute it and/or modify | ||
185 | it under the same terms as Perl itself, either Perl version 5.10.0 or, | ||
186 | at your option, any later version of Perl 5 you may have available. | ||
187 | |||
188 | Alternatively, this software may be distributed under the terms of the | ||
189 | GNU General Public License ("GPL") version 2 as published by the Free | ||
190 | Software Foundation. | ||
191 | |||
192 | =cut | ||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm new file mode 100644 index 000000000000..052f132ced24 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm | |||
@@ -0,0 +1,88 @@ | |||
1 | package Perf::Trace::Util; | ||
2 | |||
3 | use 5.010000; | ||
4 | use strict; | ||
5 | use warnings; | ||
6 | |||
7 | require Exporter; | ||
8 | |||
9 | our @ISA = qw(Exporter); | ||
10 | |||
11 | our %EXPORT_TAGS = ( 'all' => [ qw( | ||
12 | ) ] ); | ||
13 | |||
14 | our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); | ||
15 | |||
16 | our @EXPORT = qw( | ||
17 | avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs | ||
18 | ); | ||
19 | |||
20 | our $VERSION = '0.01'; | ||
21 | |||
22 | sub avg | ||
23 | { | ||
24 | my ($total, $n) = @_; | ||
25 | |||
26 | return $total / $n; | ||
27 | } | ||
28 | |||
29 | my $NSECS_PER_SEC = 1000000000; | ||
30 | |||
31 | sub nsecs | ||
32 | { | ||
33 | my ($secs, $nsecs) = @_; | ||
34 | |||
35 | return $secs * $NSECS_PER_SEC + $nsecs; | ||
36 | } | ||
37 | |||
38 | sub nsecs_secs { | ||
39 | my ($nsecs) = @_; | ||
40 | |||
41 | return $nsecs / $NSECS_PER_SEC; | ||
42 | } | ||
43 | |||
44 | sub nsecs_nsecs { | ||
45 | my ($nsecs) = @_; | ||
46 | |||
47 | return $nsecs - nsecs_secs($nsecs); | ||
48 | } | ||
49 | |||
50 | sub nsecs_str { | ||
51 | my ($nsecs) = @_; | ||
52 | |||
53 | my $str = sprintf("%5u.%09u", nsecs_secs($nsecs), nsecs_nsecs($nsecs)); | ||
54 | |||
55 | return $str; | ||
56 | } | ||
57 | |||
58 | 1; | ||
59 | __END__ | ||
60 | =head1 NAME | ||
61 | |||
62 | Perf::Trace::Util - Perl extension for perf trace | ||
63 | |||
64 | =head1 SYNOPSIS | ||
65 | |||
66 | use Perf::Trace::Util; | ||
67 | |||
68 | =head1 SEE ALSO | ||
69 | |||
70 | Perf (trace) documentation | ||
71 | |||
72 | =head1 AUTHOR | ||
73 | |||
74 | Tom Zanussi, E<lt>tzanussi@gmail.com<gt> | ||
75 | |||
76 | =head1 COPYRIGHT AND LICENSE | ||
77 | |||
78 | Copyright (C) 2009 by Tom Zanussi | ||
79 | |||
80 | This library is free software; you can redistribute it and/or modify | ||
81 | it under the same terms as Perl itself, either Perl version 5.10.0 or, | ||
82 | at your option, any later version of Perl 5 you may have available. | ||
83 | |||
84 | Alternatively, this software may be distributed under the terms of the | ||
85 | GNU General Public License ("GPL") version 2 as published by the Free | ||
86 | Software Foundation. | ||
87 | |||
88 | =cut | ||
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/typemap b/tools/perf/scripts/perl/Perf-Trace-Util/typemap new file mode 100644 index 000000000000..840836804aa7 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/typemap | |||
@@ -0,0 +1 @@ | |||
struct scripting_context * T_PTR | |||
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record new file mode 100644 index 000000000000..c7ec5de2f535 --- /dev/null +++ b/tools/perf/scripts/perl/bin/check-perf-trace-record | |||
@@ -0,0 +1,7 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry | ||
3 | |||
4 | |||
5 | |||
6 | |||
7 | |||
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report new file mode 100644 index 000000000000..89948b015020 --- /dev/null +++ b/tools/perf/scripts/perl/bin/check-perf-trace-report | |||
@@ -0,0 +1,5 @@ | |||
1 | #!/bin/bash | ||
2 | perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl | ||
3 | |||
4 | |||
5 | |||
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record new file mode 100644 index 000000000000..b25056ebf963 --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-file-record | |||
@@ -0,0 +1,2 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write | ||
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report new file mode 100644 index 000000000000..f5dcf9cb5bd2 --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-file-report | |||
@@ -0,0 +1,5 @@ | |||
1 | #!/bin/bash | ||
2 | perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl | ||
3 | |||
4 | |||
5 | |||
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record new file mode 100644 index 000000000000..8903979c5b6c --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-pid-record | |||
@@ -0,0 +1,2 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write | ||
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report new file mode 100644 index 000000000000..cea16f78a3a2 --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-pid-report | |||
@@ -0,0 +1,5 @@ | |||
1 | #!/bin/bash | ||
2 | perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl | ||
3 | |||
4 | |||
5 | |||
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record new file mode 100644 index 000000000000..6abedda911a4 --- /dev/null +++ b/tools/perf/scripts/perl/bin/wakeup-latency-record | |||
@@ -0,0 +1,6 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -c 1 -f -a -M -R -e sched:sched_switch -e sched:sched_wakeup | ||
3 | |||
4 | |||
5 | |||
6 | |||
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report new file mode 100644 index 000000000000..85769dc456eb --- /dev/null +++ b/tools/perf/scripts/perl/bin/wakeup-latency-report | |||
@@ -0,0 +1,5 @@ | |||
1 | #!/bin/bash | ||
2 | perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl | ||
3 | |||
4 | |||
5 | |||
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record new file mode 100644 index 000000000000..fce6637b19ba --- /dev/null +++ b/tools/perf/scripts/perl/bin/workqueue-stats-record | |||
@@ -0,0 +1,2 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -c 1 -f -a -M -R -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion | ||
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report new file mode 100644 index 000000000000..aa68435be926 --- /dev/null +++ b/tools/perf/scripts/perl/bin/workqueue-stats-report | |||
@@ -0,0 +1,6 @@ | |||
1 | #!/bin/bash | ||
2 | perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl | ||
3 | |||
4 | |||
5 | |||
6 | |||
diff --git a/tools/perf/scripts/perl/check-perf-trace.pl b/tools/perf/scripts/perl/check-perf-trace.pl new file mode 100644 index 000000000000..4e7dc0a407a5 --- /dev/null +++ b/tools/perf/scripts/perl/check-perf-trace.pl | |||
@@ -0,0 +1,106 @@ | |||
1 | # perf trace event handlers, generated by perf trace -g perl | ||
2 | # (c) 2009, Tom Zanussi <tzanussi@gmail.com> | ||
3 | # Licensed under the terms of the GNU GPL License version 2 | ||
4 | |||
5 | # This script tests basic functionality such as flag and symbol | ||
6 | # strings, common_xxx() calls back into perf, begin, end, unhandled | ||
7 | # events, etc. Basically, if this script runs successfully and | ||
8 | # displays expected results, perl scripting support should be ok. | ||
9 | |||
10 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
11 | use lib "./Perf-Trace-Util/lib"; | ||
12 | use Perf::Trace::Core; | ||
13 | use Perf::Trace::Context; | ||
14 | use Perf::Trace::Util; | ||
15 | |||
16 | sub trace_begin | ||
17 | { | ||
18 | print "trace_begin\n"; | ||
19 | } | ||
20 | |||
21 | sub trace_end | ||
22 | { | ||
23 | print "trace_end\n"; | ||
24 | |||
25 | print_unhandled(); | ||
26 | } | ||
27 | |||
28 | sub irq::softirq_entry | ||
29 | { | ||
30 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
31 | $common_pid, $common_comm, | ||
32 | $vec) = @_; | ||
33 | |||
34 | print_header($event_name, $common_cpu, $common_secs, $common_nsecs, | ||
35 | $common_pid, $common_comm); | ||
36 | |||
37 | print_uncommon($context); | ||
38 | |||
39 | printf("vec=%s\n", | ||
40 | symbol_str("irq::softirq_entry", "vec", $vec)); | ||
41 | } | ||
42 | |||
43 | sub kmem::kmalloc | ||
44 | { | ||
45 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
46 | $common_pid, $common_comm, | ||
47 | $call_site, $ptr, $bytes_req, $bytes_alloc, | ||
48 | $gfp_flags) = @_; | ||
49 | |||
50 | print_header($event_name, $common_cpu, $common_secs, $common_nsecs, | ||
51 | $common_pid, $common_comm); | ||
52 | |||
53 | print_uncommon($context); | ||
54 | |||
55 | printf("call_site=%p, ptr=%p, bytes_req=%u, bytes_alloc=%u, ". | ||
56 | "gfp_flags=%s\n", | ||
57 | $call_site, $ptr, $bytes_req, $bytes_alloc, | ||
58 | |||
59 | flag_str("kmem::kmalloc", "gfp_flags", $gfp_flags)); | ||
60 | } | ||
61 | |||
62 | # print trace fields not included in handler args | ||
63 | sub print_uncommon | ||
64 | { | ||
65 | my ($context) = @_; | ||
66 | |||
67 | printf("common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, ", | ||
68 | common_pc($context), trace_flag_str(common_flags($context)), | ||
69 | common_lock_depth($context)); | ||
70 | |||
71 | } | ||
72 | |||
73 | my %unhandled; | ||
74 | |||
75 | sub print_unhandled | ||
76 | { | ||
77 | if ((scalar keys %unhandled) == 0) { | ||
78 | return; | ||
79 | } | ||
80 | |||
81 | print "\nunhandled events:\n\n"; | ||
82 | |||
83 | printf("%-40s %10s\n", "event", "count"); | ||
84 | printf("%-40s %10s\n", "----------------------------------------", | ||
85 | "-----------"); | ||
86 | |||
87 | foreach my $event_name (keys %unhandled) { | ||
88 | printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | sub trace_unhandled | ||
93 | { | ||
94 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
95 | $common_pid, $common_comm) = @_; | ||
96 | |||
97 | $unhandled{$event_name}++; | ||
98 | } | ||
99 | |||
100 | sub print_header | ||
101 | { | ||
102 | my ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_; | ||
103 | |||
104 | printf("%-20s %5u %05u.%09u %8u %-20s ", | ||
105 | $event_name, $cpu, $secs, $nsecs, $pid, $comm); | ||
106 | } | ||
diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl new file mode 100644 index 000000000000..61f91561d848 --- /dev/null +++ b/tools/perf/scripts/perl/rw-by-file.pl | |||
@@ -0,0 +1,105 @@ | |||
1 | #!/usr/bin/perl -w | ||
2 | # (c) 2009, Tom Zanussi <tzanussi@gmail.com> | ||
3 | # Licensed under the terms of the GNU GPL License version 2 | ||
4 | |||
5 | # Display r/w activity for files read/written to for a given program | ||
6 | |||
7 | # The common_* event handler fields are the most useful fields common to | ||
8 | # all events. They don't necessarily correspond to the 'common_*' fields | ||
9 | # in the status files. Those fields not available as handler params can | ||
10 | # be retrieved via script functions of the form get_common_*(). | ||
11 | |||
12 | use 5.010000; | ||
13 | use strict; | ||
14 | use warnings; | ||
15 | |||
16 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
17 | use lib "./Perf-Trace-Util/lib"; | ||
18 | use Perf::Trace::Core; | ||
19 | use Perf::Trace::Util; | ||
20 | |||
21 | # change this to the comm of the program you're interested in | ||
22 | my $for_comm = "perf"; | ||
23 | |||
24 | my %reads; | ||
25 | my %writes; | ||
26 | |||
27 | sub syscalls::sys_enter_read | ||
28 | { | ||
29 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
30 | $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_; | ||
31 | |||
32 | if ($common_comm eq $for_comm) { | ||
33 | $reads{$fd}{bytes_requested} += $count; | ||
34 | $reads{$fd}{total_reads}++; | ||
35 | } | ||
36 | } | ||
37 | |||
38 | sub syscalls::sys_enter_write | ||
39 | { | ||
40 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
41 | $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_; | ||
42 | |||
43 | if ($common_comm eq $for_comm) { | ||
44 | $writes{$fd}{bytes_written} += $count; | ||
45 | $writes{$fd}{total_writes}++; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | sub trace_end | ||
50 | { | ||
51 | printf("file read counts for $for_comm:\n\n"); | ||
52 | |||
53 | printf("%6s %10s %10s\n", "fd", "# reads", "bytes_requested"); | ||
54 | printf("%6s %10s %10s\n", "------", "----------", "-----------"); | ||
55 | |||
56 | foreach my $fd (sort {$reads{$b}{bytes_requested} <=> | ||
57 | $reads{$a}{bytes_requested}} keys %reads) { | ||
58 | my $total_reads = $reads{$fd}{total_reads}; | ||
59 | my $bytes_requested = $reads{$fd}{bytes_requested}; | ||
60 | printf("%6u %10u %10u\n", $fd, $total_reads, $bytes_requested); | ||
61 | } | ||
62 | |||
63 | printf("\nfile write counts for $for_comm:\n\n"); | ||
64 | |||
65 | printf("%6s %10s %10s\n", "fd", "# writes", "bytes_written"); | ||
66 | printf("%6s %10s %10s\n", "------", "----------", "-----------"); | ||
67 | |||
68 | foreach my $fd (sort {$writes{$b}{bytes_written} <=> | ||
69 | $writes{$a}{bytes_written}} keys %writes) { | ||
70 | my $total_writes = $writes{$fd}{total_writes}; | ||
71 | my $bytes_written = $writes{$fd}{bytes_written}; | ||
72 | printf("%6u %10u %10u\n", $fd, $total_writes, $bytes_written); | ||
73 | } | ||
74 | |||
75 | print_unhandled(); | ||
76 | } | ||
77 | |||
78 | my %unhandled; | ||
79 | |||
80 | sub print_unhandled | ||
81 | { | ||
82 | if ((scalar keys %unhandled) == 0) { | ||
83 | return; | ||
84 | } | ||
85 | |||
86 | print "\nunhandled events:\n\n"; | ||
87 | |||
88 | printf("%-40s %10s\n", "event", "count"); | ||
89 | printf("%-40s %10s\n", "----------------------------------------", | ||
90 | "-----------"); | ||
91 | |||
92 | foreach my $event_name (keys %unhandled) { | ||
93 | printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | sub trace_unhandled | ||
98 | { | ||
99 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
100 | $common_pid, $common_comm) = @_; | ||
101 | |||
102 | $unhandled{$event_name}++; | ||
103 | } | ||
104 | |||
105 | |||
diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl new file mode 100644 index 000000000000..da601fae1a00 --- /dev/null +++ b/tools/perf/scripts/perl/rw-by-pid.pl | |||
@@ -0,0 +1,170 @@ | |||
1 | #!/usr/bin/perl -w | ||
2 | # (c) 2009, Tom Zanussi <tzanussi@gmail.com> | ||
3 | # Licensed under the terms of the GNU GPL License version 2 | ||
4 | |||
5 | # Display r/w activity for all processes | ||
6 | |||
7 | # The common_* event handler fields are the most useful fields common to | ||
8 | # all events. They don't necessarily correspond to the 'common_*' fields | ||
9 | # in the status files. Those fields not available as handler params can | ||
10 | # be retrieved via script functions of the form get_common_*(). | ||
11 | |||
12 | use 5.010000; | ||
13 | use strict; | ||
14 | use warnings; | ||
15 | |||
16 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
17 | use lib "./Perf-Trace-Util/lib"; | ||
18 | use Perf::Trace::Core; | ||
19 | use Perf::Trace::Util; | ||
20 | |||
21 | my %reads; | ||
22 | my %writes; | ||
23 | |||
24 | sub syscalls::sys_exit_read | ||
25 | { | ||
26 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
27 | $common_pid, $common_comm, | ||
28 | $nr, $ret) = @_; | ||
29 | |||
30 | if ($ret > 0) { | ||
31 | $reads{$common_pid}{bytes_read} += $ret; | ||
32 | } else { | ||
33 | if (!defined ($reads{$common_pid}{bytes_read})) { | ||
34 | $reads{$common_pid}{bytes_read} = 0; | ||
35 | } | ||
36 | $reads{$common_pid}{errors}{$ret}++; | ||
37 | } | ||
38 | } | ||
39 | |||
40 | sub syscalls::sys_enter_read | ||
41 | { | ||
42 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
43 | $common_pid, $common_comm, | ||
44 | $nr, $fd, $buf, $count) = @_; | ||
45 | |||
46 | $reads{$common_pid}{bytes_requested} += $count; | ||
47 | $reads{$common_pid}{total_reads}++; | ||
48 | $reads{$common_pid}{comm} = $common_comm; | ||
49 | } | ||
50 | |||
51 | sub syscalls::sys_exit_write | ||
52 | { | ||
53 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
54 | $common_pid, $common_comm, | ||
55 | $nr, $ret) = @_; | ||
56 | |||
57 | if ($ret <= 0) { | ||
58 | $writes{$common_pid}{errors}{$ret}++; | ||
59 | } | ||
60 | } | ||
61 | |||
62 | sub syscalls::sys_enter_write | ||
63 | { | ||
64 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
65 | $common_pid, $common_comm, | ||
66 | $nr, $fd, $buf, $count) = @_; | ||
67 | |||
68 | $writes{$common_pid}{bytes_written} += $count; | ||
69 | $writes{$common_pid}{total_writes}++; | ||
70 | $writes{$common_pid}{comm} = $common_comm; | ||
71 | } | ||
72 | |||
73 | sub trace_end | ||
74 | { | ||
75 | printf("read counts by pid:\n\n"); | ||
76 | |||
77 | printf("%6s %20s %10s %10s %10s\n", "pid", "comm", | ||
78 | "# reads", "bytes_requested", "bytes_read"); | ||
79 | printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------", | ||
80 | "-----------", "----------", "----------"); | ||
81 | |||
82 | foreach my $pid (sort {$reads{$b}{bytes_read} <=> | ||
83 | $reads{$a}{bytes_read}} keys %reads) { | ||
84 | my $comm = $reads{$pid}{comm}; | ||
85 | my $total_reads = $reads{$pid}{total_reads}; | ||
86 | my $bytes_requested = $reads{$pid}{bytes_requested}; | ||
87 | my $bytes_read = $reads{$pid}{bytes_read}; | ||
88 | |||
89 | printf("%6s %-20s %10s %10s %10s\n", $pid, $comm, | ||
90 | $total_reads, $bytes_requested, $bytes_read); | ||
91 | } | ||
92 | |||
93 | printf("\nfailed reads by pid:\n\n"); | ||
94 | |||
95 | printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors"); | ||
96 | printf("%6s %20s %6s %10s\n", "------", "--------------------", | ||
97 | "------", "----------"); | ||
98 | |||
99 | foreach my $pid (keys %reads) { | ||
100 | my $comm = $reads{$pid}{comm}; | ||
101 | foreach my $err (sort {$reads{$b}{comm} cmp $reads{$a}{comm}} | ||
102 | keys %{$reads{$pid}{errors}}) { | ||
103 | my $errors = $reads{$pid}{errors}{$err}; | ||
104 | |||
105 | printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | printf("\nwrite counts by pid:\n\n"); | ||
110 | |||
111 | printf("%6s %20s %10s %10s\n", "pid", "comm", | ||
112 | "# writes", "bytes_written"); | ||
113 | printf("%6s %-20s %10s %10s\n", "------", "--------------------", | ||
114 | "-----------", "----------"); | ||
115 | |||
116 | foreach my $pid (sort {$writes{$b}{bytes_written} <=> | ||
117 | $writes{$a}{bytes_written}} keys %writes) { | ||
118 | my $comm = $writes{$pid}{comm}; | ||
119 | my $total_writes = $writes{$pid}{total_writes}; | ||
120 | my $bytes_written = $writes{$pid}{bytes_written}; | ||
121 | |||
122 | printf("%6s %-20s %10s %10s\n", $pid, $comm, | ||
123 | $total_writes, $bytes_written); | ||
124 | } | ||
125 | |||
126 | printf("\nfailed writes by pid:\n\n"); | ||
127 | |||
128 | printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors"); | ||
129 | printf("%6s %20s %6s %10s\n", "------", "--------------------", | ||
130 | "------", "----------"); | ||
131 | |||
132 | foreach my $pid (keys %writes) { | ||
133 | my $comm = $writes{$pid}{comm}; | ||
134 | foreach my $err (sort {$writes{$b}{comm} cmp $writes{$a}{comm}} | ||
135 | keys %{$writes{$pid}{errors}}) { | ||
136 | my $errors = $writes{$pid}{errors}{$err}; | ||
137 | |||
138 | printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | print_unhandled(); | ||
143 | } | ||
144 | |||
145 | my %unhandled; | ||
146 | |||
147 | sub print_unhandled | ||
148 | { | ||
149 | if ((scalar keys %unhandled) == 0) { | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | print "\nunhandled events:\n\n"; | ||
154 | |||
155 | printf("%-40s %10s\n", "event", "count"); | ||
156 | printf("%-40s %10s\n", "----------------------------------------", | ||
157 | "-----------"); | ||
158 | |||
159 | foreach my $event_name (keys %unhandled) { | ||
160 | printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | sub trace_unhandled | ||
165 | { | ||
166 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
167 | $common_pid, $common_comm) = @_; | ||
168 | |||
169 | $unhandled{$event_name}++; | ||
170 | } | ||
diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl new file mode 100644 index 000000000000..ed58ef284e23 --- /dev/null +++ b/tools/perf/scripts/perl/wakeup-latency.pl | |||
@@ -0,0 +1,103 @@ | |||
1 | #!/usr/bin/perl -w | ||
2 | # (c) 2009, Tom Zanussi <tzanussi@gmail.com> | ||
3 | # Licensed under the terms of the GNU GPL License version 2 | ||
4 | |||
5 | # Display avg/min/max wakeup latency | ||
6 | |||
7 | # The common_* event handler fields are the most useful fields common to | ||
8 | # all events. They don't necessarily correspond to the 'common_*' fields | ||
9 | # in the status files. Those fields not available as handler params can | ||
10 | # be retrieved via script functions of the form get_common_*(). | ||
11 | |||
12 | use 5.010000; | ||
13 | use strict; | ||
14 | use warnings; | ||
15 | |||
16 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
17 | use lib "./Perf-Trace-Util/lib"; | ||
18 | use Perf::Trace::Core; | ||
19 | use Perf::Trace::Util; | ||
20 | |||
21 | my %last_wakeup; | ||
22 | |||
23 | my $max_wakeup_latency; | ||
24 | my $min_wakeup_latency; | ||
25 | my $total_wakeup_latency; | ||
26 | my $total_wakeups; | ||
27 | |||
28 | sub sched::sched_switch | ||
29 | { | ||
30 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
31 | $common_pid, $common_comm, | ||
32 | $prev_comm, $prev_pid, $prev_prio, $prev_state, $next_comm, $next_pid, | ||
33 | $next_prio) = @_; | ||
34 | |||
35 | my $wakeup_ts = $last_wakeup{$common_cpu}{ts}; | ||
36 | if ($wakeup_ts) { | ||
37 | my $switch_ts = nsecs($common_secs, $common_nsecs); | ||
38 | my $wakeup_latency = $switch_ts - $wakeup_ts; | ||
39 | if ($wakeup_latency > $max_wakeup_latency) { | ||
40 | $max_wakeup_latency = $wakeup_latency; | ||
41 | } | ||
42 | if ($wakeup_latency < $min_wakeup_latency) { | ||
43 | $min_wakeup_latency = $wakeup_latency; | ||
44 | } | ||
45 | $total_wakeup_latency += $wakeup_latency; | ||
46 | $total_wakeups++; | ||
47 | } | ||
48 | $last_wakeup{$common_cpu}{ts} = 0; | ||
49 | } | ||
50 | |||
51 | sub sched::sched_wakeup | ||
52 | { | ||
53 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
54 | $common_pid, $common_comm, | ||
55 | $comm, $pid, $prio, $success, $target_cpu) = @_; | ||
56 | |||
57 | $last_wakeup{$target_cpu}{ts} = nsecs($common_secs, $common_nsecs); | ||
58 | } | ||
59 | |||
60 | sub trace_begin | ||
61 | { | ||
62 | $min_wakeup_latency = 1000000000; | ||
63 | $max_wakeup_latency = 0; | ||
64 | } | ||
65 | |||
66 | sub trace_end | ||
67 | { | ||
68 | printf("wakeup_latency stats:\n\n"); | ||
69 | print "total_wakeups: $total_wakeups\n"; | ||
70 | printf("avg_wakeup_latency (ns): %u\n", | ||
71 | avg($total_wakeup_latency, $total_wakeups)); | ||
72 | printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency); | ||
73 | printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency); | ||
74 | |||
75 | print_unhandled(); | ||
76 | } | ||
77 | |||
78 | my %unhandled; | ||
79 | |||
80 | sub print_unhandled | ||
81 | { | ||
82 | if ((scalar keys %unhandled) == 0) { | ||
83 | return; | ||
84 | } | ||
85 | |||
86 | print "\nunhandled events:\n\n"; | ||
87 | |||
88 | printf("%-40s %10s\n", "event", "count"); | ||
89 | printf("%-40s %10s\n", "----------------------------------------", | ||
90 | "-----------"); | ||
91 | |||
92 | foreach my $event_name (keys %unhandled) { | ||
93 | printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | sub trace_unhandled | ||
98 | { | ||
99 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
100 | $common_pid, $common_comm) = @_; | ||
101 | |||
102 | $unhandled{$event_name}++; | ||
103 | } | ||
diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl new file mode 100644 index 000000000000..511302c8a494 --- /dev/null +++ b/tools/perf/scripts/perl/workqueue-stats.pl | |||
@@ -0,0 +1,129 @@ | |||
1 | #!/usr/bin/perl -w | ||
2 | # (c) 2009, Tom Zanussi <tzanussi@gmail.com> | ||
3 | # Licensed under the terms of the GNU GPL License version 2 | ||
4 | |||
5 | # Displays workqueue stats | ||
6 | # | ||
7 | # Usage: | ||
8 | # | ||
9 | # perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e | ||
10 | # workqueue:workqueue_destruction -e workqueue:workqueue_execution | ||
11 | # -e workqueue:workqueue_insertion | ||
12 | # | ||
13 | # perf trace -p -s tools/perf/scripts/perl/workqueue-stats.pl | ||
14 | |||
15 | use 5.010000; | ||
16 | use strict; | ||
17 | use warnings; | ||
18 | |||
19 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; | ||
20 | use lib "./Perf-Trace-Util/lib"; | ||
21 | use Perf::Trace::Core; | ||
22 | use Perf::Trace::Util; | ||
23 | |||
24 | my @cpus; | ||
25 | |||
26 | sub workqueue::workqueue_destruction | ||
27 | { | ||
28 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
29 | $common_pid, $common_comm, | ||
30 | $thread_comm, $thread_pid) = @_; | ||
31 | |||
32 | $cpus[$common_cpu]{$thread_pid}{destroyed}++; | ||
33 | $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; | ||
34 | } | ||
35 | |||
36 | sub workqueue::workqueue_creation | ||
37 | { | ||
38 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
39 | $common_pid, $common_comm, | ||
40 | $thread_comm, $thread_pid, $cpu) = @_; | ||
41 | |||
42 | $cpus[$common_cpu]{$thread_pid}{created}++; | ||
43 | $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; | ||
44 | } | ||
45 | |||
46 | sub workqueue::workqueue_execution | ||
47 | { | ||
48 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
49 | $common_pid, $common_comm, | ||
50 | $thread_comm, $thread_pid, $func) = @_; | ||
51 | |||
52 | $cpus[$common_cpu]{$thread_pid}{executed}++; | ||
53 | $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; | ||
54 | } | ||
55 | |||
56 | sub workqueue::workqueue_insertion | ||
57 | { | ||
58 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
59 | $common_pid, $common_comm, | ||
60 | $thread_comm, $thread_pid, $func) = @_; | ||
61 | |||
62 | $cpus[$common_cpu]{$thread_pid}{inserted}++; | ||
63 | $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; | ||
64 | } | ||
65 | |||
66 | sub trace_end | ||
67 | { | ||
68 | print "workqueue work stats:\n\n"; | ||
69 | my $cpu = 0; | ||
70 | printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name"); | ||
71 | printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----"); | ||
72 | foreach my $pidhash (@cpus) { | ||
73 | while ((my $pid, my $wqhash) = each %$pidhash) { | ||
74 | my $ins = $$wqhash{'inserted'}; | ||
75 | my $exe = $$wqhash{'executed'}; | ||
76 | my $comm = $$wqhash{'comm'}; | ||
77 | if ($ins || $exe) { | ||
78 | printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm); | ||
79 | } | ||
80 | } | ||
81 | $cpu++; | ||
82 | } | ||
83 | |||
84 | $cpu = 0; | ||
85 | print "\nworkqueue lifecycle stats:\n\n"; | ||
86 | printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name"); | ||
87 | printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----"); | ||
88 | foreach my $pidhash (@cpus) { | ||
89 | while ((my $pid, my $wqhash) = each %$pidhash) { | ||
90 | my $created = $$wqhash{'created'}; | ||
91 | my $destroyed = $$wqhash{'destroyed'}; | ||
92 | my $comm = $$wqhash{'comm'}; | ||
93 | if ($created || $destroyed) { | ||
94 | printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed, | ||
95 | $comm); | ||
96 | } | ||
97 | } | ||
98 | $cpu++; | ||
99 | } | ||
100 | |||
101 | print_unhandled(); | ||
102 | } | ||
103 | |||
104 | my %unhandled; | ||
105 | |||
106 | sub print_unhandled | ||
107 | { | ||
108 | if ((scalar keys %unhandled) == 0) { | ||
109 | return; | ||
110 | } | ||
111 | |||
112 | print "\nunhandled events:\n\n"; | ||
113 | |||
114 | printf("%-40s %10s\n", "event", "count"); | ||
115 | printf("%-40s %10s\n", "----------------------------------------", | ||
116 | "-----------"); | ||
117 | |||
118 | foreach my $event_name (keys %unhandled) { | ||
119 | printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | sub trace_unhandled | ||
124 | { | ||
125 | my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, | ||
126 | $common_pid, $common_comm) = @_; | ||
127 | |||
128 | $unhandled{$event_name}++; | ||
129 | } | ||
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 6f8ea9d210b6..918eb376abe3 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -1,10 +1,15 @@ | |||
1 | #ifndef CACHE_H | 1 | #ifndef __PERF_CACHE_H |
2 | #define CACHE_H | 2 | #define __PERF_CACHE_H |
3 | 3 | ||
4 | #include "util.h" | 4 | #include "util.h" |
5 | #include "strbuf.h" | 5 | #include "strbuf.h" |
6 | #include "../perf.h" | 6 | #include "../perf.h" |
7 | 7 | ||
8 | #define CMD_EXEC_PATH "--exec-path" | ||
9 | #define CMD_PERF_DIR "--perf-dir=" | ||
10 | #define CMD_WORK_TREE "--work-tree=" | ||
11 | #define CMD_DEBUGFS_DIR "--debugfs-dir=" | ||
12 | |||
8 | #define PERF_DIR_ENVIRONMENT "PERF_DIR" | 13 | #define PERF_DIR_ENVIRONMENT "PERF_DIR" |
9 | #define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" | 14 | #define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" |
10 | #define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" | 15 | #define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" |
@@ -117,4 +122,4 @@ extern char *perf_pathdup(const char *fmt, ...) | |||
117 | 122 | ||
118 | extern size_t strlcpy(char *dest, const char *src, size_t size); | 123 | extern size_t strlcpy(char *dest, const char *src, size_t size); |
119 | 124 | ||
120 | #endif /* CACHE_H */ | 125 | #endif /* __PERF_CACHE_H */ |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3b8380f1b478..b3b71258272a 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -206,7 +206,7 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain, | |||
206 | } | 206 | } |
207 | node->val_nr = chain->nr - start; | 207 | node->val_nr = chain->nr - start; |
208 | if (!node->val_nr) | 208 | if (!node->val_nr) |
209 | printf("Warning: empty node in callchain tree\n"); | 209 | pr_warning("Warning: empty node in callchain tree\n"); |
210 | } | 210 | } |
211 | 211 | ||
212 | static void | 212 | static void |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 43cf3ea9e088..ad4626de4c2b 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node) | |||
58 | int register_callchain_param(struct callchain_param *param); | 58 | int register_callchain_param(struct callchain_param *param); |
59 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, | 59 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, |
60 | struct symbol **syms); | 60 | struct symbol **syms); |
61 | #endif | 61 | #endif /* __PERF_CALLCHAIN_H */ |
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 58d597564b99..24e8809210bb 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef COLOR_H | 1 | #ifndef __PERF_COLOR_H |
2 | #define COLOR_H | 2 | #define __PERF_COLOR_H |
3 | 3 | ||
4 | /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ | 4 | /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ |
5 | #define COLOR_MAXLEN 24 | 5 | #define COLOR_MAXLEN 24 |
@@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu | |||
39 | int percent_color_fprintf(FILE *fp, const char *fmt, double percent); | 39 | int percent_color_fprintf(FILE *fp, const char *fmt, double percent); |
40 | const char *get_percent_color(double percent); | 40 | const char *get_percent_color(double percent); |
41 | 41 | ||
42 | #endif /* COLOR_H */ | 42 | #endif /* __PERF_COLOR_H */ |
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c index 0b791bd346bc..35073621e5de 100644 --- a/tools/perf/util/ctype.c +++ b/tools/perf/util/ctype.c | |||
@@ -29,3 +29,11 @@ unsigned char sane_ctype[256] = { | |||
29 | A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ | 29 | A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ |
30 | /* Nothing in the 128.. range */ | 30 | /* Nothing in the 128.. range */ |
31 | }; | 31 | }; |
32 | |||
33 | const char *graph_line = | ||
34 | "_____________________________________________________________________" | ||
35 | "_____________________________________________________________________"; | ||
36 | const char *graph_dotted_line = | ||
37 | "---------------------------------------------------------------------" | ||
38 | "---------------------------------------------------------------------" | ||
39 | "---------------------------------------------------------------------"; | ||
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c new file mode 100644 index 000000000000..ca0bedf637c2 --- /dev/null +++ b/tools/perf/util/data_map.c | |||
@@ -0,0 +1,291 @@ | |||
1 | #include "data_map.h" | ||
2 | #include "symbol.h" | ||
3 | #include "util.h" | ||
4 | #include "debug.h" | ||
5 | |||
6 | |||
7 | static struct perf_file_handler *curr_handler; | ||
8 | static unsigned long mmap_window = 32; | ||
9 | static char __cwd[PATH_MAX]; | ||
10 | |||
11 | static int process_event_stub(event_t *event __used) | ||
12 | { | ||
13 | dump_printf(": unhandled!\n"); | ||
14 | return 0; | ||
15 | } | ||
16 | |||
17 | void register_perf_file_handler(struct perf_file_handler *handler) | ||
18 | { | ||
19 | if (!handler->process_sample_event) | ||
20 | handler->process_sample_event = process_event_stub; | ||
21 | if (!handler->process_mmap_event) | ||
22 | handler->process_mmap_event = process_event_stub; | ||
23 | if (!handler->process_comm_event) | ||
24 | handler->process_comm_event = process_event_stub; | ||
25 | if (!handler->process_fork_event) | ||
26 | handler->process_fork_event = process_event_stub; | ||
27 | if (!handler->process_exit_event) | ||
28 | handler->process_exit_event = process_event_stub; | ||
29 | if (!handler->process_lost_event) | ||
30 | handler->process_lost_event = process_event_stub; | ||
31 | if (!handler->process_read_event) | ||
32 | handler->process_read_event = process_event_stub; | ||
33 | if (!handler->process_throttle_event) | ||
34 | handler->process_throttle_event = process_event_stub; | ||
35 | if (!handler->process_unthrottle_event) | ||
36 | handler->process_unthrottle_event = process_event_stub; | ||
37 | |||
38 | curr_handler = handler; | ||
39 | } | ||
40 | |||
41 | static const char *event__name[] = { | ||
42 | [0] = "TOTAL", | ||
43 | [PERF_RECORD_MMAP] = "MMAP", | ||
44 | [PERF_RECORD_LOST] = "LOST", | ||
45 | [PERF_RECORD_COMM] = "COMM", | ||
46 | [PERF_RECORD_EXIT] = "EXIT", | ||
47 | [PERF_RECORD_THROTTLE] = "THROTTLE", | ||
48 | [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", | ||
49 | [PERF_RECORD_FORK] = "FORK", | ||
50 | [PERF_RECORD_READ] = "READ", | ||
51 | [PERF_RECORD_SAMPLE] = "SAMPLE", | ||
52 | }; | ||
53 | |||
54 | unsigned long event__total[PERF_RECORD_MAX]; | ||
55 | |||
56 | void event__print_totals(void) | ||
57 | { | ||
58 | int i; | ||
59 | for (i = 0; i < PERF_RECORD_MAX; ++i) | ||
60 | pr_info("%10s events: %10ld\n", | ||
61 | event__name[i], event__total[i]); | ||
62 | } | ||
63 | |||
64 | static int | ||
65 | process_event(event_t *event, unsigned long offset, unsigned long head) | ||
66 | { | ||
67 | trace_event(event); | ||
68 | |||
69 | if (event->header.type < PERF_RECORD_MAX) { | ||
70 | dump_printf("%p [%p]: PERF_RECORD_%s", | ||
71 | (void *)(offset + head), | ||
72 | (void *)(long)(event->header.size), | ||
73 | event__name[event->header.type]); | ||
74 | ++event__total[0]; | ||
75 | ++event__total[event->header.type]; | ||
76 | } | ||
77 | |||
78 | switch (event->header.type) { | ||
79 | case PERF_RECORD_SAMPLE: | ||
80 | return curr_handler->process_sample_event(event); | ||
81 | case PERF_RECORD_MMAP: | ||
82 | return curr_handler->process_mmap_event(event); | ||
83 | case PERF_RECORD_COMM: | ||
84 | return curr_handler->process_comm_event(event); | ||
85 | case PERF_RECORD_FORK: | ||
86 | return curr_handler->process_fork_event(event); | ||
87 | case PERF_RECORD_EXIT: | ||
88 | return curr_handler->process_exit_event(event); | ||
89 | case PERF_RECORD_LOST: | ||
90 | return curr_handler->process_lost_event(event); | ||
91 | case PERF_RECORD_READ: | ||
92 | return curr_handler->process_read_event(event); | ||
93 | case PERF_RECORD_THROTTLE: | ||
94 | return curr_handler->process_throttle_event(event); | ||
95 | case PERF_RECORD_UNTHROTTLE: | ||
96 | return curr_handler->process_unthrottle_event(event); | ||
97 | default: | ||
98 | curr_handler->total_unknown++; | ||
99 | return -1; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | int perf_header__read_build_ids(int input, off_t offset, off_t size) | ||
104 | { | ||
105 | struct build_id_event bev; | ||
106 | char filename[PATH_MAX]; | ||
107 | off_t limit = offset + size; | ||
108 | int err = -1; | ||
109 | |||
110 | while (offset < limit) { | ||
111 | struct dso *dso; | ||
112 | ssize_t len; | ||
113 | |||
114 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) | ||
115 | goto out; | ||
116 | |||
117 | len = bev.header.size - sizeof(bev); | ||
118 | if (read(input, filename, len) != len) | ||
119 | goto out; | ||
120 | |||
121 | dso = dsos__findnew(filename); | ||
122 | if (dso != NULL) | ||
123 | dso__set_build_id(dso, &bev.build_id); | ||
124 | |||
125 | offset += bev.header.size; | ||
126 | } | ||
127 | err = 0; | ||
128 | out: | ||
129 | return err; | ||
130 | } | ||
131 | |||
132 | int mmap_dispatch_perf_file(struct perf_header **pheader, | ||
133 | const char *input_name, | ||
134 | int force, | ||
135 | int full_paths, | ||
136 | int *cwdlen, | ||
137 | char **cwd) | ||
138 | { | ||
139 | int err; | ||
140 | struct perf_header *header; | ||
141 | unsigned long head, shift; | ||
142 | unsigned long offset = 0; | ||
143 | struct stat input_stat; | ||
144 | size_t page_size; | ||
145 | u64 sample_type; | ||
146 | event_t *event; | ||
147 | uint32_t size; | ||
148 | int input; | ||
149 | char *buf; | ||
150 | |||
151 | if (curr_handler == NULL) { | ||
152 | pr_debug("Forgot to register perf file handler\n"); | ||
153 | return -EINVAL; | ||
154 | } | ||
155 | |||
156 | page_size = getpagesize(); | ||
157 | |||
158 | input = open(input_name, O_RDONLY); | ||
159 | if (input < 0) { | ||
160 | pr_err("Failed to open file: %s", input_name); | ||
161 | if (!strcmp(input_name, "perf.data")) | ||
162 | pr_err(" (try 'perf record' first)"); | ||
163 | pr_err("\n"); | ||
164 | return -errno; | ||
165 | } | ||
166 | |||
167 | if (fstat(input, &input_stat) < 0) { | ||
168 | pr_err("failed to stat file"); | ||
169 | err = -errno; | ||
170 | goto out_close; | ||
171 | } | ||
172 | |||
173 | err = -EACCES; | ||
174 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | ||
175 | pr_err("file: %s not owned by current user or root\n", | ||
176 | input_name); | ||
177 | goto out_close; | ||
178 | } | ||
179 | |||
180 | if (input_stat.st_size == 0) { | ||
181 | pr_info("zero-sized file, nothing to do!\n"); | ||
182 | goto done; | ||
183 | } | ||
184 | |||
185 | err = -ENOMEM; | ||
186 | header = perf_header__new(); | ||
187 | if (header == NULL) | ||
188 | goto out_close; | ||
189 | |||
190 | err = perf_header__read(header, input); | ||
191 | if (err < 0) | ||
192 | goto out_delete; | ||
193 | *pheader = header; | ||
194 | head = header->data_offset; | ||
195 | |||
196 | sample_type = perf_header__sample_type(header); | ||
197 | |||
198 | err = -EINVAL; | ||
199 | if (curr_handler->sample_type_check && | ||
200 | curr_handler->sample_type_check(sample_type) < 0) | ||
201 | goto out_delete; | ||
202 | |||
203 | if (!full_paths) { | ||
204 | if (getcwd(__cwd, sizeof(__cwd)) == NULL) { | ||
205 | pr_err("failed to get the current directory\n"); | ||
206 | err = -errno; | ||
207 | goto out_delete; | ||
208 | } | ||
209 | *cwd = __cwd; | ||
210 | *cwdlen = strlen(*cwd); | ||
211 | } else { | ||
212 | *cwd = NULL; | ||
213 | *cwdlen = 0; | ||
214 | } | ||
215 | |||
216 | shift = page_size * (head / page_size); | ||
217 | offset += shift; | ||
218 | head -= shift; | ||
219 | |||
220 | remap: | ||
221 | buf = mmap(NULL, page_size * mmap_window, PROT_READ, | ||
222 | MAP_SHARED, input, offset); | ||
223 | if (buf == MAP_FAILED) { | ||
224 | pr_err("failed to mmap file\n"); | ||
225 | err = -errno; | ||
226 | goto out_delete; | ||
227 | } | ||
228 | |||
229 | more: | ||
230 | event = (event_t *)(buf + head); | ||
231 | |||
232 | size = event->header.size; | ||
233 | if (!size) | ||
234 | size = 8; | ||
235 | |||
236 | if (head + event->header.size >= page_size * mmap_window) { | ||
237 | int munmap_ret; | ||
238 | |||
239 | shift = page_size * (head / page_size); | ||
240 | |||
241 | munmap_ret = munmap(buf, page_size * mmap_window); | ||
242 | assert(munmap_ret == 0); | ||
243 | |||
244 | offset += shift; | ||
245 | head -= shift; | ||
246 | goto remap; | ||
247 | } | ||
248 | |||
249 | size = event->header.size; | ||
250 | |||
251 | dump_printf("\n%p [%p]: event: %d\n", | ||
252 | (void *)(offset + head), | ||
253 | (void *)(long)event->header.size, | ||
254 | event->header.type); | ||
255 | |||
256 | if (!size || process_event(event, offset, head) < 0) { | ||
257 | |||
258 | dump_printf("%p [%p]: skipping unknown header type: %d\n", | ||
259 | (void *)(offset + head), | ||
260 | (void *)(long)(event->header.size), | ||
261 | event->header.type); | ||
262 | |||
263 | /* | ||
264 | * assume we lost track of the stream, check alignment, and | ||
265 | * increment a single u64 in the hope to catch on again 'soon'. | ||
266 | */ | ||
267 | |||
268 | if (unlikely(head & 7)) | ||
269 | head &= ~7ULL; | ||
270 | |||
271 | size = 8; | ||
272 | } | ||
273 | |||
274 | head += size; | ||
275 | |||
276 | if (offset + head >= header->data_offset + header->data_size) | ||
277 | goto done; | ||
278 | |||
279 | if (offset + head < (unsigned long)input_stat.st_size) | ||
280 | goto more; | ||
281 | |||
282 | done: | ||
283 | err = 0; | ||
284 | out_close: | ||
285 | close(input); | ||
286 | |||
287 | return err; | ||
288 | out_delete: | ||
289 | perf_header__delete(header); | ||
290 | goto out_close; | ||
291 | } | ||
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h new file mode 100644 index 000000000000..3180ff7e3633 --- /dev/null +++ b/tools/perf/util/data_map.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __PERF_DATAMAP_H | ||
2 | #define __PERF_DATAMAP_H | ||
3 | |||
4 | #include "event.h" | ||
5 | #include "header.h" | ||
6 | |||
7 | typedef int (*event_type_handler_t)(event_t *); | ||
8 | |||
9 | struct perf_file_handler { | ||
10 | event_type_handler_t process_sample_event; | ||
11 | event_type_handler_t process_mmap_event; | ||
12 | event_type_handler_t process_comm_event; | ||
13 | event_type_handler_t process_fork_event; | ||
14 | event_type_handler_t process_exit_event; | ||
15 | event_type_handler_t process_lost_event; | ||
16 | event_type_handler_t process_read_event; | ||
17 | event_type_handler_t process_throttle_event; | ||
18 | event_type_handler_t process_unthrottle_event; | ||
19 | int (*sample_type_check)(u64 sample_type); | ||
20 | unsigned long total_unknown; | ||
21 | }; | ||
22 | |||
23 | void register_perf_file_handler(struct perf_file_handler *handler); | ||
24 | int mmap_dispatch_perf_file(struct perf_header **pheader, | ||
25 | const char *input_name, | ||
26 | int force, | ||
27 | int full_paths, | ||
28 | int *cwdlen, | ||
29 | char **cwd); | ||
30 | int perf_header__read_build_ids(int input, off_t offset, off_t file_size); | ||
31 | |||
32 | #endif | ||
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index e8ca98fe0bd4..28d520d5a1fb 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c | |||
@@ -13,12 +13,12 @@ | |||
13 | int verbose = 0; | 13 | int verbose = 0; |
14 | int dump_trace = 0; | 14 | int dump_trace = 0; |
15 | 15 | ||
16 | int eprintf(const char *fmt, ...) | 16 | int eprintf(int level, const char *fmt, ...) |
17 | { | 17 | { |
18 | va_list args; | 18 | va_list args; |
19 | int ret = 0; | 19 | int ret = 0; |
20 | 20 | ||
21 | if (verbose) { | 21 | if (verbose >= level) { |
22 | va_start(args, fmt); | 22 | va_start(args, fmt); |
23 | ret = vfprintf(stderr, fmt, args); | 23 | ret = vfprintf(stderr, fmt, args); |
24 | va_end(args); | 24 | va_end(args); |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 437eea58ce40..c6c24c522dea 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -1,8 +1,15 @@ | |||
1 | /* For debugging general purposes */ | 1 | /* For debugging general purposes */ |
2 | #ifndef __PERF_DEBUG_H | ||
3 | #define __PERF_DEBUG_H | ||
4 | |||
5 | #include "event.h" | ||
2 | 6 | ||
3 | extern int verbose; | 7 | extern int verbose; |
4 | extern int dump_trace; | 8 | extern int dump_trace; |
5 | 9 | ||
6 | int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); | 10 | int eprintf(int level, |
11 | const char *fmt, ...) __attribute__((format(printf, 2, 3))); | ||
7 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); | 12 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); |
8 | void trace_event(event_t *event); | 13 | void trace_event(event_t *event); |
14 | |||
15 | #endif /* __PERF_DEBUG_H */ | ||
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c new file mode 100644 index 000000000000..06b73ee02c49 --- /dev/null +++ b/tools/perf/util/debugfs.c | |||
@@ -0,0 +1,241 @@ | |||
1 | #include "util.h" | ||
2 | #include "debugfs.h" | ||
3 | #include "cache.h" | ||
4 | |||
5 | static int debugfs_premounted; | ||
6 | static char debugfs_mountpoint[MAX_PATH+1]; | ||
7 | |||
8 | static const char *debugfs_known_mountpoints[] = { | ||
9 | "/sys/kernel/debug/", | ||
10 | "/debug/", | ||
11 | 0, | ||
12 | }; | ||
13 | |||
14 | /* use this to force a umount */ | ||
15 | void debugfs_force_cleanup(void) | ||
16 | { | ||
17 | debugfs_find_mountpoint(); | ||
18 | debugfs_premounted = 0; | ||
19 | debugfs_umount(); | ||
20 | } | ||
21 | |||
22 | /* construct a full path to a debugfs element */ | ||
23 | int debugfs_make_path(const char *element, char *buffer, int size) | ||
24 | { | ||
25 | int len; | ||
26 | |||
27 | if (strlen(debugfs_mountpoint) == 0) { | ||
28 | buffer[0] = '\0'; | ||
29 | return -1; | ||
30 | } | ||
31 | |||
32 | len = strlen(debugfs_mountpoint) + strlen(element) + 1; | ||
33 | if (len >= size) | ||
34 | return len+1; | ||
35 | |||
36 | snprintf(buffer, size-1, "%s/%s", debugfs_mountpoint, element); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static int debugfs_found; | ||
41 | |||
42 | /* find the path to the mounted debugfs */ | ||
43 | const char *debugfs_find_mountpoint(void) | ||
44 | { | ||
45 | const char **ptr; | ||
46 | char type[100]; | ||
47 | FILE *fp; | ||
48 | |||
49 | if (debugfs_found) | ||
50 | return (const char *) debugfs_mountpoint; | ||
51 | |||
52 | ptr = debugfs_known_mountpoints; | ||
53 | while (*ptr) { | ||
54 | if (debugfs_valid_mountpoint(*ptr) == 0) { | ||
55 | debugfs_found = 1; | ||
56 | strcpy(debugfs_mountpoint, *ptr); | ||
57 | return debugfs_mountpoint; | ||
58 | } | ||
59 | ptr++; | ||
60 | } | ||
61 | |||
62 | /* give up and parse /proc/mounts */ | ||
63 | fp = fopen("/proc/mounts", "r"); | ||
64 | if (fp == NULL) | ||
65 | die("Can't open /proc/mounts for read"); | ||
66 | |||
67 | while (fscanf(fp, "%*s %" | ||
68 | STR(MAX_PATH) | ||
69 | "s %99s %*s %*d %*d\n", | ||
70 | debugfs_mountpoint, type) == 2) { | ||
71 | if (strcmp(type, "debugfs") == 0) | ||
72 | break; | ||
73 | } | ||
74 | fclose(fp); | ||
75 | |||
76 | if (strcmp(type, "debugfs") != 0) | ||
77 | return NULL; | ||
78 | |||
79 | debugfs_found = 1; | ||
80 | |||
81 | return debugfs_mountpoint; | ||
82 | } | ||
83 | |||
84 | /* verify that a mountpoint is actually a debugfs instance */ | ||
85 | |||
86 | int debugfs_valid_mountpoint(const char *debugfs) | ||
87 | { | ||
88 | struct statfs st_fs; | ||
89 | |||
90 | if (statfs(debugfs, &st_fs) < 0) | ||
91 | return -ENOENT; | ||
92 | else if (st_fs.f_type != (long) DEBUGFS_MAGIC) | ||
93 | return -ENOENT; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | |||
99 | int debugfs_valid_entry(const char *path) | ||
100 | { | ||
101 | struct stat st; | ||
102 | |||
103 | if (stat(path, &st)) | ||
104 | return -errno; | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /* mount the debugfs somewhere */ | ||
110 | |||
111 | int debugfs_mount(const char *mountpoint) | ||
112 | { | ||
113 | char mountcmd[128]; | ||
114 | |||
115 | /* see if it's already mounted */ | ||
116 | if (debugfs_find_mountpoint()) { | ||
117 | debugfs_premounted = 1; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /* if not mounted and no argument */ | ||
122 | if (mountpoint == NULL) { | ||
123 | /* see if environment variable set */ | ||
124 | mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT); | ||
125 | /* if no environment variable, use default */ | ||
126 | if (mountpoint == NULL) | ||
127 | mountpoint = "/sys/kernel/debug"; | ||
128 | } | ||
129 | |||
130 | /* save the mountpoint */ | ||
131 | strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); | ||
132 | |||
133 | /* mount it */ | ||
134 | snprintf(mountcmd, sizeof(mountcmd), | ||
135 | "/bin/mount -t debugfs debugfs %s", mountpoint); | ||
136 | return system(mountcmd); | ||
137 | } | ||
138 | |||
139 | /* umount the debugfs */ | ||
140 | |||
141 | int debugfs_umount(void) | ||
142 | { | ||
143 | char umountcmd[128]; | ||
144 | int ret; | ||
145 | |||
146 | /* if it was already mounted, leave it */ | ||
147 | if (debugfs_premounted) | ||
148 | return 0; | ||
149 | |||
150 | /* make sure it's a valid mount point */ | ||
151 | ret = debugfs_valid_mountpoint(debugfs_mountpoint); | ||
152 | if (ret) | ||
153 | return ret; | ||
154 | |||
155 | snprintf(umountcmd, sizeof(umountcmd), | ||
156 | "/bin/umount %s", debugfs_mountpoint); | ||
157 | return system(umountcmd); | ||
158 | } | ||
159 | |||
160 | int debugfs_write(const char *entry, const char *value) | ||
161 | { | ||
162 | char path[MAX_PATH+1]; | ||
163 | int ret, count; | ||
164 | int fd; | ||
165 | |||
166 | /* construct the path */ | ||
167 | snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); | ||
168 | |||
169 | /* verify that it exists */ | ||
170 | ret = debugfs_valid_entry(path); | ||
171 | if (ret) | ||
172 | return ret; | ||
173 | |||
174 | /* get how many chars we're going to write */ | ||
175 | count = strlen(value); | ||
176 | |||
177 | /* open the debugfs entry */ | ||
178 | fd = open(path, O_RDWR); | ||
179 | if (fd < 0) | ||
180 | return -errno; | ||
181 | |||
182 | while (count > 0) { | ||
183 | /* write it */ | ||
184 | ret = write(fd, value, count); | ||
185 | if (ret <= 0) { | ||
186 | if (ret == EAGAIN) | ||
187 | continue; | ||
188 | close(fd); | ||
189 | return -errno; | ||
190 | } | ||
191 | count -= ret; | ||
192 | } | ||
193 | |||
194 | /* close it */ | ||
195 | close(fd); | ||
196 | |||
197 | /* return success */ | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * read a debugfs entry | ||
203 | * returns the number of chars read or a negative errno | ||
204 | */ | ||
205 | int debugfs_read(const char *entry, char *buffer, size_t size) | ||
206 | { | ||
207 | char path[MAX_PATH+1]; | ||
208 | int ret; | ||
209 | int fd; | ||
210 | |||
211 | /* construct the path */ | ||
212 | snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); | ||
213 | |||
214 | /* verify that it exists */ | ||
215 | ret = debugfs_valid_entry(path); | ||
216 | if (ret) | ||
217 | return ret; | ||
218 | |||
219 | /* open the debugfs entry */ | ||
220 | fd = open(path, O_RDONLY); | ||
221 | if (fd < 0) | ||
222 | return -errno; | ||
223 | |||
224 | do { | ||
225 | /* read it */ | ||
226 | ret = read(fd, buffer, size); | ||
227 | if (ret == 0) { | ||
228 | close(fd); | ||
229 | return EOF; | ||
230 | } | ||
231 | } while (ret < 0 && errno == EAGAIN); | ||
232 | |||
233 | /* close it */ | ||
234 | close(fd); | ||
235 | |||
236 | /* make *sure* there's a null character at the end */ | ||
237 | buffer[ret] = '\0'; | ||
238 | |||
239 | /* return the number of chars read */ | ||
240 | return ret; | ||
241 | } | ||
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h new file mode 100644 index 000000000000..3cd14f9ae784 --- /dev/null +++ b/tools/perf/util/debugfs.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __DEBUGFS_H__ | ||
2 | #define __DEBUGFS_H__ | ||
3 | |||
4 | #include <sys/mount.h> | ||
5 | |||
6 | #ifndef MAX_PATH | ||
7 | # define MAX_PATH 256 | ||
8 | #endif | ||
9 | |||
10 | #ifndef STR | ||
11 | # define _STR(x) #x | ||
12 | # define STR(x) _STR(x) | ||
13 | #endif | ||
14 | |||
15 | extern const char *debugfs_find_mountpoint(void); | ||
16 | extern int debugfs_valid_mountpoint(const char *debugfs); | ||
17 | extern int debugfs_valid_entry(const char *path); | ||
18 | extern int debugfs_mount(const char *mountpoint); | ||
19 | extern int debugfs_umount(void); | ||
20 | extern int debugfs_write(const char *entry, const char *value); | ||
21 | extern int debugfs_read(const char *entry, char *buffer, size_t size); | ||
22 | extern void debugfs_force_cleanup(void); | ||
23 | extern int debugfs_make_path(const char *element, char *buffer, int size); | ||
24 | |||
25 | #endif /* __DEBUGFS_H__ */ | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c new file mode 100644 index 000000000000..414b89d1bde9 --- /dev/null +++ b/tools/perf/util/event.c | |||
@@ -0,0 +1,312 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include "event.h" | ||
3 | #include "debug.h" | ||
4 | #include "string.h" | ||
5 | #include "thread.h" | ||
6 | |||
7 | static pid_t event__synthesize_comm(pid_t pid, int full, | ||
8 | int (*process)(event_t *event)) | ||
9 | { | ||
10 | event_t ev; | ||
11 | char filename[PATH_MAX]; | ||
12 | char bf[BUFSIZ]; | ||
13 | FILE *fp; | ||
14 | size_t size = 0; | ||
15 | DIR *tasks; | ||
16 | struct dirent dirent, *next; | ||
17 | pid_t tgid = 0; | ||
18 | |||
19 | snprintf(filename, sizeof(filename), "/proc/%d/status", pid); | ||
20 | |||
21 | fp = fopen(filename, "r"); | ||
22 | if (fp == NULL) { | ||
23 | out_race: | ||
24 | /* | ||
25 | * We raced with a task exiting - just return: | ||
26 | */ | ||
27 | pr_debug("couldn't open %s\n", filename); | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | memset(&ev.comm, 0, sizeof(ev.comm)); | ||
32 | while (!ev.comm.comm[0] || !ev.comm.pid) { | ||
33 | if (fgets(bf, sizeof(bf), fp) == NULL) | ||
34 | goto out_failure; | ||
35 | |||
36 | if (memcmp(bf, "Name:", 5) == 0) { | ||
37 | char *name = bf + 5; | ||
38 | while (*name && isspace(*name)) | ||
39 | ++name; | ||
40 | size = strlen(name) - 1; | ||
41 | memcpy(ev.comm.comm, name, size++); | ||
42 | } else if (memcmp(bf, "Tgid:", 5) == 0) { | ||
43 | char *tgids = bf + 5; | ||
44 | while (*tgids && isspace(*tgids)) | ||
45 | ++tgids; | ||
46 | tgid = ev.comm.pid = atoi(tgids); | ||
47 | } | ||
48 | } | ||
49 | |||
50 | ev.comm.header.type = PERF_RECORD_COMM; | ||
51 | size = ALIGN(size, sizeof(u64)); | ||
52 | ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); | ||
53 | |||
54 | if (!full) { | ||
55 | ev.comm.tid = pid; | ||
56 | |||
57 | process(&ev); | ||
58 | goto out_fclose; | ||
59 | } | ||
60 | |||
61 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); | ||
62 | |||
63 | tasks = opendir(filename); | ||
64 | if (tasks == NULL) | ||
65 | goto out_race; | ||
66 | |||
67 | while (!readdir_r(tasks, &dirent, &next) && next) { | ||
68 | char *end; | ||
69 | pid = strtol(dirent.d_name, &end, 10); | ||
70 | if (*end) | ||
71 | continue; | ||
72 | |||
73 | ev.comm.tid = pid; | ||
74 | |||
75 | process(&ev); | ||
76 | } | ||
77 | closedir(tasks); | ||
78 | |||
79 | out_fclose: | ||
80 | fclose(fp); | ||
81 | return tgid; | ||
82 | |||
83 | out_failure: | ||
84 | pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); | ||
85 | return -1; | ||
86 | } | ||
87 | |||
88 | static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | ||
89 | int (*process)(event_t *event)) | ||
90 | { | ||
91 | char filename[PATH_MAX]; | ||
92 | FILE *fp; | ||
93 | |||
94 | snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); | ||
95 | |||
96 | fp = fopen(filename, "r"); | ||
97 | if (fp == NULL) { | ||
98 | /* | ||
99 | * We raced with a task exiting - just return: | ||
100 | */ | ||
101 | pr_debug("couldn't open %s\n", filename); | ||
102 | return -1; | ||
103 | } | ||
104 | |||
105 | while (1) { | ||
106 | char bf[BUFSIZ], *pbf = bf; | ||
107 | event_t ev = { | ||
108 | .header = { .type = PERF_RECORD_MMAP }, | ||
109 | }; | ||
110 | int n; | ||
111 | size_t size; | ||
112 | if (fgets(bf, sizeof(bf), fp) == NULL) | ||
113 | break; | ||
114 | |||
115 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ | ||
116 | n = hex2u64(pbf, &ev.mmap.start); | ||
117 | if (n < 0) | ||
118 | continue; | ||
119 | pbf += n + 1; | ||
120 | n = hex2u64(pbf, &ev.mmap.len); | ||
121 | if (n < 0) | ||
122 | continue; | ||
123 | pbf += n + 3; | ||
124 | if (*pbf == 'x') { /* vm_exec */ | ||
125 | char *execname = strchr(bf, '/'); | ||
126 | |||
127 | /* Catch VDSO */ | ||
128 | if (execname == NULL) | ||
129 | execname = strstr(bf, "[vdso]"); | ||
130 | |||
131 | if (execname == NULL) | ||
132 | continue; | ||
133 | |||
134 | size = strlen(execname); | ||
135 | execname[size - 1] = '\0'; /* Remove \n */ | ||
136 | memcpy(ev.mmap.filename, execname, size); | ||
137 | size = ALIGN(size, sizeof(u64)); | ||
138 | ev.mmap.len -= ev.mmap.start; | ||
139 | ev.mmap.header.size = (sizeof(ev.mmap) - | ||
140 | (sizeof(ev.mmap.filename) - size)); | ||
141 | ev.mmap.pid = tgid; | ||
142 | ev.mmap.tid = pid; | ||
143 | |||
144 | process(&ev); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | fclose(fp); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)) | ||
153 | { | ||
154 | pid_t tgid = event__synthesize_comm(pid, 1, process); | ||
155 | if (tgid == -1) | ||
156 | return -1; | ||
157 | return event__synthesize_mmap_events(pid, tgid, process); | ||
158 | } | ||
159 | |||
160 | void event__synthesize_threads(int (*process)(event_t *event)) | ||
161 | { | ||
162 | DIR *proc; | ||
163 | struct dirent dirent, *next; | ||
164 | |||
165 | proc = opendir("/proc"); | ||
166 | |||
167 | while (!readdir_r(proc, &dirent, &next) && next) { | ||
168 | char *end; | ||
169 | pid_t pid = strtol(dirent.d_name, &end, 10); | ||
170 | |||
171 | if (*end) /* only interested in proper numerical dirents */ | ||
172 | continue; | ||
173 | |||
174 | event__synthesize_thread(pid, process); | ||
175 | } | ||
176 | |||
177 | closedir(proc); | ||
178 | } | ||
179 | |||
180 | char *event__cwd; | ||
181 | int event__cwdlen; | ||
182 | |||
183 | struct events_stats event__stats; | ||
184 | |||
185 | int event__process_comm(event_t *self) | ||
186 | { | ||
187 | struct thread *thread = threads__findnew(self->comm.pid); | ||
188 | |||
189 | dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); | ||
190 | |||
191 | if (thread == NULL || thread__set_comm(thread, self->comm.comm)) { | ||
192 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | ||
193 | return -1; | ||
194 | } | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | int event__process_lost(event_t *self) | ||
200 | { | ||
201 | dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); | ||
202 | event__stats.lost += self->lost.lost; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | int event__process_mmap(event_t *self) | ||
207 | { | ||
208 | struct thread *thread = threads__findnew(self->mmap.pid); | ||
209 | struct map *map = map__new(&self->mmap, MAP__FUNCTION, | ||
210 | event__cwd, event__cwdlen); | ||
211 | |||
212 | dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", | ||
213 | self->mmap.pid, self->mmap.tid, | ||
214 | (void *)(long)self->mmap.start, | ||
215 | (void *)(long)self->mmap.len, | ||
216 | (void *)(long)self->mmap.pgoff, | ||
217 | self->mmap.filename); | ||
218 | |||
219 | if (thread == NULL || map == NULL) | ||
220 | dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); | ||
221 | else | ||
222 | thread__insert_map(thread, map); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | int event__process_task(event_t *self) | ||
228 | { | ||
229 | struct thread *thread = threads__findnew(self->fork.pid); | ||
230 | struct thread *parent = threads__findnew(self->fork.ppid); | ||
231 | |||
232 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | ||
233 | self->fork.ppid, self->fork.ptid); | ||
234 | /* | ||
235 | * A thread clone will have the same PID for both parent and child. | ||
236 | */ | ||
237 | if (thread == parent) | ||
238 | return 0; | ||
239 | |||
240 | if (self->header.type == PERF_RECORD_EXIT) | ||
241 | return 0; | ||
242 | |||
243 | if (thread == NULL || parent == NULL || | ||
244 | thread__fork(thread, parent) < 0) { | ||
245 | dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); | ||
246 | return -1; | ||
247 | } | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | void thread__find_addr_location(struct thread *self, u8 cpumode, | ||
253 | enum map_type type, u64 addr, | ||
254 | struct addr_location *al, | ||
255 | symbol_filter_t filter) | ||
256 | { | ||
257 | struct thread *thread = al->thread = self; | ||
258 | |||
259 | al->addr = addr; | ||
260 | |||
261 | if (cpumode & PERF_RECORD_MISC_KERNEL) { | ||
262 | al->level = 'k'; | ||
263 | thread = kthread; | ||
264 | } else if (cpumode & PERF_RECORD_MISC_USER) | ||
265 | al->level = '.'; | ||
266 | else { | ||
267 | al->level = 'H'; | ||
268 | al->map = NULL; | ||
269 | al->sym = NULL; | ||
270 | return; | ||
271 | } | ||
272 | try_again: | ||
273 | al->map = thread__find_map(thread, type, al->addr); | ||
274 | if (al->map == NULL) { | ||
275 | /* | ||
276 | * If this is outside of all known maps, and is a negative | ||
277 | * address, try to look it up in the kernel dso, as it might be | ||
278 | * a vsyscall or vdso (which executes in user-mode). | ||
279 | * | ||
280 | * XXX This is nasty, we should have a symbol list in the | ||
281 | * "[vdso]" dso, but for now lets use the old trick of looking | ||
282 | * in the whole kernel symbol list. | ||
283 | */ | ||
284 | if ((long long)al->addr < 0 && thread != kthread) { | ||
285 | thread = kthread; | ||
286 | goto try_again; | ||
287 | } | ||
288 | al->sym = NULL; | ||
289 | } else { | ||
290 | al->addr = al->map->map_ip(al->map, al->addr); | ||
291 | al->sym = map__find_symbol(al->map, al->addr, filter); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | int event__preprocess_sample(const event_t *self, struct addr_location *al, | ||
296 | symbol_filter_t filter) | ||
297 | { | ||
298 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | ||
299 | struct thread *thread = threads__findnew(self->ip.pid); | ||
300 | |||
301 | if (thread == NULL) | ||
302 | return -1; | ||
303 | |||
304 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
305 | |||
306 | thread__find_addr_location(thread, cpumode, MAP__FUNCTION, | ||
307 | self->ip.ip, al, filter); | ||
308 | dump_printf(" ...... dso: %s\n", | ||
309 | al->map ? al->map->dso->long_name : | ||
310 | al->level == 'H' ? "[hypervisor]" : "<not found>"); | ||
311 | return 0; | ||
312 | } | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2c9c26d6ded0..a4cc8105cf67 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -1,14 +1,10 @@ | |||
1 | #ifndef __PERF_RECORD_H | 1 | #ifndef __PERF_RECORD_H |
2 | #define __PERF_RECORD_H | 2 | #define __PERF_RECORD_H |
3 | |||
3 | #include "../perf.h" | 4 | #include "../perf.h" |
4 | #include "util.h" | 5 | #include "util.h" |
5 | #include <linux/list.h> | 6 | #include <linux/list.h> |
6 | 7 | #include <linux/rbtree.h> | |
7 | enum { | ||
8 | SHOW_KERNEL = 1, | ||
9 | SHOW_USER = 2, | ||
10 | SHOW_HV = 4, | ||
11 | }; | ||
12 | 8 | ||
13 | /* | 9 | /* |
14 | * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * | 10 | * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * |
@@ -65,6 +61,13 @@ struct sample_event{ | |||
65 | u64 array[]; | 61 | u64 array[]; |
66 | }; | 62 | }; |
67 | 63 | ||
64 | #define BUILD_ID_SIZE 20 | ||
65 | |||
66 | struct build_id_event { | ||
67 | struct perf_event_header header; | ||
68 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | ||
69 | char filename[]; | ||
70 | }; | ||
68 | 71 | ||
69 | typedef union event_union { | 72 | typedef union event_union { |
70 | struct perf_event_header header; | 73 | struct perf_event_header header; |
@@ -77,12 +80,30 @@ typedef union event_union { | |||
77 | struct sample_event sample; | 80 | struct sample_event sample; |
78 | } event_t; | 81 | } event_t; |
79 | 82 | ||
83 | struct events_stats { | ||
84 | unsigned long total; | ||
85 | unsigned long lost; | ||
86 | }; | ||
87 | |||
88 | void event__print_totals(void); | ||
89 | |||
90 | enum map_type { | ||
91 | MAP__FUNCTION = 0, | ||
92 | |||
93 | MAP__NR_TYPES, | ||
94 | }; | ||
95 | |||
80 | struct map { | 96 | struct map { |
81 | struct list_head node; | 97 | union { |
98 | struct rb_node rb_node; | ||
99 | struct list_head node; | ||
100 | }; | ||
82 | u64 start; | 101 | u64 start; |
83 | u64 end; | 102 | u64 end; |
103 | enum map_type type; | ||
84 | u64 pgoff; | 104 | u64 pgoff; |
85 | u64 (*map_ip)(struct map *, u64); | 105 | u64 (*map_ip)(struct map *, u64); |
106 | u64 (*unmap_ip)(struct map *, u64); | ||
86 | struct dso *dso; | 107 | struct dso *dso; |
87 | }; | 108 | }; |
88 | 109 | ||
@@ -91,14 +112,48 @@ static inline u64 map__map_ip(struct map *map, u64 ip) | |||
91 | return ip - map->start + map->pgoff; | 112 | return ip - map->start + map->pgoff; |
92 | } | 113 | } |
93 | 114 | ||
94 | static inline u64 vdso__map_ip(struct map *map __used, u64 ip) | 115 | static inline u64 map__unmap_ip(struct map *map, u64 ip) |
116 | { | ||
117 | return ip + map->start - map->pgoff; | ||
118 | } | ||
119 | |||
120 | static inline u64 identity__map_ip(struct map *map __used, u64 ip) | ||
95 | { | 121 | { |
96 | return ip; | 122 | return ip; |
97 | } | 123 | } |
98 | 124 | ||
99 | struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); | 125 | struct symbol; |
126 | |||
127 | typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); | ||
128 | |||
129 | void map__init(struct map *self, enum map_type type, | ||
130 | u64 start, u64 end, u64 pgoff, struct dso *dso); | ||
131 | struct map *map__new(struct mmap_event *event, enum map_type, | ||
132 | char *cwd, int cwdlen); | ||
133 | void map__delete(struct map *self); | ||
100 | struct map *map__clone(struct map *self); | 134 | struct map *map__clone(struct map *self); |
101 | int map__overlap(struct map *l, struct map *r); | 135 | int map__overlap(struct map *l, struct map *r); |
102 | size_t map__fprintf(struct map *self, FILE *fp); | 136 | size_t map__fprintf(struct map *self, FILE *fp); |
137 | struct symbol *map__find_symbol(struct map *self, u64 addr, | ||
138 | symbol_filter_t filter); | ||
139 | void map__fixup_start(struct map *self); | ||
140 | void map__fixup_end(struct map *self); | ||
141 | |||
142 | int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); | ||
143 | void event__synthesize_threads(int (*process)(event_t *event)); | ||
144 | |||
145 | extern char *event__cwd; | ||
146 | extern int event__cwdlen; | ||
147 | extern struct events_stats event__stats; | ||
148 | extern unsigned long event__total[PERF_RECORD_MAX]; | ||
149 | |||
150 | int event__process_comm(event_t *self); | ||
151 | int event__process_lost(event_t *self); | ||
152 | int event__process_mmap(event_t *self); | ||
153 | int event__process_task(event_t *self); | ||
154 | |||
155 | struct addr_location; | ||
156 | int event__preprocess_sample(const event_t *self, struct addr_location *al, | ||
157 | symbol_filter_t filter); | ||
103 | 158 | ||
104 | #endif | 159 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h index effe25eb1545..31647ac92ed1 100644 --- a/tools/perf/util/exec_cmd.h +++ b/tools/perf/util/exec_cmd.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef PERF_EXEC_CMD_H | 1 | #ifndef __PERF_EXEC_CMD_H |
2 | #define PERF_EXEC_CMD_H | 2 | #define __PERF_EXEC_CMD_H |
3 | 3 | ||
4 | extern void perf_set_argv_exec_path(const char *exec_path); | 4 | extern void perf_set_argv_exec_path(const char *exec_path); |
5 | extern const char *perf_extract_argv0_path(const char *path); | 5 | extern const char *perf_extract_argv0_path(const char *path); |
@@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */ | |||
10 | extern int execl_perf_cmd(const char *cmd, ...); | 10 | extern int execl_perf_cmd(const char *cmd, ...); |
11 | extern const char *system_path(const char *path); | 11 | extern const char *system_path(const char *path); |
12 | 12 | ||
13 | #endif /* PERF_EXEC_CMD_H */ | 13 | #endif /* __PERF_EXEC_CMD_H */ |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e306857b2c2b..4805e6dfd23c 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -2,9 +2,15 @@ | |||
2 | #include <unistd.h> | 2 | #include <unistd.h> |
3 | #include <stdio.h> | 3 | #include <stdio.h> |
4 | #include <stdlib.h> | 4 | #include <stdlib.h> |
5 | #include <linux/list.h> | ||
5 | 6 | ||
6 | #include "util.h" | 7 | #include "util.h" |
7 | #include "header.h" | 8 | #include "header.h" |
9 | #include "../perf.h" | ||
10 | #include "trace-event.h" | ||
11 | #include "symbol.h" | ||
12 | #include "data_map.h" | ||
13 | #include "debug.h" | ||
8 | 14 | ||
9 | /* | 15 | /* |
10 | * Create new perf.data header attribute: | 16 | * Create new perf.data header attribute: |
@@ -13,32 +19,43 @@ struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) | |||
13 | { | 19 | { |
14 | struct perf_header_attr *self = malloc(sizeof(*self)); | 20 | struct perf_header_attr *self = malloc(sizeof(*self)); |
15 | 21 | ||
16 | if (!self) | 22 | if (self != NULL) { |
17 | die("nomem"); | 23 | self->attr = *attr; |
18 | 24 | self->ids = 0; | |
19 | self->attr = *attr; | 25 | self->size = 1; |
20 | self->ids = 0; | 26 | self->id = malloc(sizeof(u64)); |
21 | self->size = 1; | 27 | if (self->id == NULL) { |
22 | self->id = malloc(sizeof(u64)); | 28 | free(self); |
23 | 29 | self = NULL; | |
24 | if (!self->id) | 30 | } |
25 | die("nomem"); | 31 | } |
26 | 32 | ||
27 | return self; | 33 | return self; |
28 | } | 34 | } |
29 | 35 | ||
30 | void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | 36 | void perf_header_attr__delete(struct perf_header_attr *self) |
37 | { | ||
38 | free(self->id); | ||
39 | free(self); | ||
40 | } | ||
41 | |||
42 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | ||
31 | { | 43 | { |
32 | int pos = self->ids; | 44 | int pos = self->ids; |
33 | 45 | ||
34 | self->ids++; | 46 | self->ids++; |
35 | if (self->ids > self->size) { | 47 | if (self->ids > self->size) { |
36 | self->size *= 2; | 48 | int nsize = self->size * 2; |
37 | self->id = realloc(self->id, self->size * sizeof(u64)); | 49 | u64 *nid = realloc(self->id, nsize * sizeof(u64)); |
38 | if (!self->id) | 50 | |
39 | die("nomem"); | 51 | if (nid == NULL) |
52 | return -1; | ||
53 | |||
54 | self->size = nsize; | ||
55 | self->id = nid; | ||
40 | } | 56 | } |
41 | self->id[pos] = id; | 57 | self->id[pos] = id; |
58 | return 0; | ||
42 | } | 59 | } |
43 | 60 | ||
44 | /* | 61 | /* |
@@ -46,42 +63,52 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | |||
46 | */ | 63 | */ |
47 | struct perf_header *perf_header__new(void) | 64 | struct perf_header *perf_header__new(void) |
48 | { | 65 | { |
49 | struct perf_header *self = malloc(sizeof(*self)); | 66 | struct perf_header *self = zalloc(sizeof(*self)); |
50 | 67 | ||
51 | if (!self) | 68 | if (self != NULL) { |
52 | die("nomem"); | 69 | self->size = 1; |
70 | self->attr = malloc(sizeof(void *)); | ||
53 | 71 | ||
54 | self->frozen = 0; | 72 | if (self->attr == NULL) { |
73 | free(self); | ||
74 | self = NULL; | ||
75 | } | ||
76 | } | ||
55 | 77 | ||
56 | self->attrs = 0; | 78 | return self; |
57 | self->size = 1; | 79 | } |
58 | self->attr = malloc(sizeof(void *)); | ||
59 | 80 | ||
60 | if (!self->attr) | 81 | void perf_header__delete(struct perf_header *self) |
61 | die("nomem"); | 82 | { |
83 | int i; | ||
62 | 84 | ||
63 | self->data_offset = 0; | 85 | for (i = 0; i < self->attrs; ++i) |
64 | self->data_size = 0; | 86 | perf_header_attr__delete(self->attr[i]); |
65 | 87 | ||
66 | return self; | 88 | free(self->attr); |
89 | free(self); | ||
67 | } | 90 | } |
68 | 91 | ||
69 | void perf_header__add_attr(struct perf_header *self, | 92 | int perf_header__add_attr(struct perf_header *self, |
70 | struct perf_header_attr *attr) | 93 | struct perf_header_attr *attr) |
71 | { | 94 | { |
72 | int pos = self->attrs; | ||
73 | |||
74 | if (self->frozen) | 95 | if (self->frozen) |
75 | die("frozen"); | 96 | return -1; |
76 | 97 | ||
77 | self->attrs++; | 98 | if (self->attrs == self->size) { |
78 | if (self->attrs > self->size) { | 99 | int nsize = self->size * 2; |
79 | self->size *= 2; | 100 | struct perf_header_attr **nattr; |
80 | self->attr = realloc(self->attr, self->size * sizeof(void *)); | 101 | |
81 | if (!self->attr) | 102 | nattr = realloc(self->attr, nsize * sizeof(void *)); |
82 | die("nomem"); | 103 | if (nattr == NULL) |
104 | return -1; | ||
105 | |||
106 | self->size = nsize; | ||
107 | self->attr = nattr; | ||
83 | } | 108 | } |
84 | self->attr[pos] = attr; | 109 | |
110 | self->attr[self->attrs++] = attr; | ||
111 | return 0; | ||
85 | } | 112 | } |
86 | 113 | ||
87 | #define MAX_EVENT_NAME 64 | 114 | #define MAX_EVENT_NAME 64 |
@@ -97,7 +124,7 @@ static struct perf_trace_event_type *events; | |||
97 | void perf_header__push_event(u64 id, const char *name) | 124 | void perf_header__push_event(u64 id, const char *name) |
98 | { | 125 | { |
99 | if (strlen(name) > MAX_EVENT_NAME) | 126 | if (strlen(name) > MAX_EVENT_NAME) |
100 | printf("Event %s will be truncated\n", name); | 127 | pr_warning("Event %s will be truncated\n", name); |
101 | 128 | ||
102 | if (!events) { | 129 | if (!events) { |
103 | events = malloc(sizeof(struct perf_trace_event_type)); | 130 | events = malloc(sizeof(struct perf_trace_event_type)); |
@@ -128,44 +155,137 @@ static const char *__perf_magic = "PERFFILE"; | |||
128 | 155 | ||
129 | #define PERF_MAGIC (*(u64 *)__perf_magic) | 156 | #define PERF_MAGIC (*(u64 *)__perf_magic) |
130 | 157 | ||
131 | struct perf_file_section { | ||
132 | u64 offset; | ||
133 | u64 size; | ||
134 | }; | ||
135 | |||
136 | struct perf_file_attr { | 158 | struct perf_file_attr { |
137 | struct perf_event_attr attr; | 159 | struct perf_event_attr attr; |
138 | struct perf_file_section ids; | 160 | struct perf_file_section ids; |
139 | }; | 161 | }; |
140 | 162 | ||
141 | struct perf_file_header { | 163 | void perf_header__set_feat(struct perf_header *self, int feat) |
142 | u64 magic; | 164 | { |
143 | u64 size; | 165 | set_bit(feat, self->adds_features); |
144 | u64 attr_size; | 166 | } |
145 | struct perf_file_section attrs; | ||
146 | struct perf_file_section data; | ||
147 | struct perf_file_section event_types; | ||
148 | }; | ||
149 | 167 | ||
150 | static void do_write(int fd, void *buf, size_t size) | 168 | bool perf_header__has_feat(const struct perf_header *self, int feat) |
169 | { | ||
170 | return test_bit(feat, self->adds_features); | ||
171 | } | ||
172 | |||
173 | static int do_write(int fd, const void *buf, size_t size) | ||
151 | { | 174 | { |
152 | while (size) { | 175 | while (size) { |
153 | int ret = write(fd, buf, size); | 176 | int ret = write(fd, buf, size); |
154 | 177 | ||
155 | if (ret < 0) | 178 | if (ret < 0) |
156 | die("failed to write"); | 179 | return -errno; |
157 | 180 | ||
158 | size -= ret; | 181 | size -= ret; |
159 | buf += ret; | 182 | buf += ret; |
160 | } | 183 | } |
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int __dsos__write_buildid_table(struct list_head *head, int fd) | ||
189 | { | ||
190 | struct dso *pos; | ||
191 | |||
192 | list_for_each_entry(pos, head, node) { | ||
193 | int err; | ||
194 | struct build_id_event b; | ||
195 | size_t len; | ||
196 | |||
197 | if (!pos->has_build_id) | ||
198 | continue; | ||
199 | len = pos->long_name_len + 1; | ||
200 | len = ALIGN(len, 64); | ||
201 | memset(&b, 0, sizeof(b)); | ||
202 | memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); | ||
203 | b.header.size = sizeof(b) + len; | ||
204 | err = do_write(fd, &b, sizeof(b)); | ||
205 | if (err < 0) | ||
206 | return err; | ||
207 | err = do_write(fd, pos->long_name, len); | ||
208 | if (err < 0) | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | return 0; | ||
161 | } | 213 | } |
162 | 214 | ||
163 | void perf_header__write(struct perf_header *self, int fd) | 215 | static int dsos__write_buildid_table(int fd) |
216 | { | ||
217 | int err = __dsos__write_buildid_table(&dsos__kernel, fd); | ||
218 | if (err == 0) | ||
219 | err = __dsos__write_buildid_table(&dsos__user, fd); | ||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static int perf_header__adds_write(struct perf_header *self, int fd) | ||
224 | { | ||
225 | int nr_sections; | ||
226 | struct perf_file_section *feat_sec; | ||
227 | int sec_size; | ||
228 | u64 sec_start; | ||
229 | int idx = 0, err; | ||
230 | |||
231 | if (dsos__read_build_ids()) | ||
232 | perf_header__set_feat(self, HEADER_BUILD_ID); | ||
233 | |||
234 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | ||
235 | if (!nr_sections) | ||
236 | return 0; | ||
237 | |||
238 | feat_sec = calloc(sizeof(*feat_sec), nr_sections); | ||
239 | if (feat_sec == NULL) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | sec_size = sizeof(*feat_sec) * nr_sections; | ||
243 | |||
244 | sec_start = self->data_offset + self->data_size; | ||
245 | lseek(fd, sec_start + sec_size, SEEK_SET); | ||
246 | |||
247 | if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { | ||
248 | struct perf_file_section *trace_sec; | ||
249 | |||
250 | trace_sec = &feat_sec[idx++]; | ||
251 | |||
252 | /* Write trace info */ | ||
253 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); | ||
254 | read_tracing_data(fd, attrs, nr_counters); | ||
255 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; | ||
256 | } | ||
257 | |||
258 | |||
259 | if (perf_header__has_feat(self, HEADER_BUILD_ID)) { | ||
260 | struct perf_file_section *buildid_sec; | ||
261 | |||
262 | buildid_sec = &feat_sec[idx++]; | ||
263 | |||
264 | /* Write build-ids */ | ||
265 | buildid_sec->offset = lseek(fd, 0, SEEK_CUR); | ||
266 | err = dsos__write_buildid_table(fd); | ||
267 | if (err < 0) { | ||
268 | pr_debug("failed to write buildid table\n"); | ||
269 | goto out_free; | ||
270 | } | ||
271 | buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; | ||
272 | } | ||
273 | |||
274 | lseek(fd, sec_start, SEEK_SET); | ||
275 | err = do_write(fd, feat_sec, sec_size); | ||
276 | if (err < 0) | ||
277 | pr_debug("failed to write feature section\n"); | ||
278 | out_free: | ||
279 | free(feat_sec); | ||
280 | return err; | ||
281 | } | ||
282 | |||
283 | int perf_header__write(struct perf_header *self, int fd, bool at_exit) | ||
164 | { | 284 | { |
165 | struct perf_file_header f_header; | 285 | struct perf_file_header f_header; |
166 | struct perf_file_attr f_attr; | 286 | struct perf_file_attr f_attr; |
167 | struct perf_header_attr *attr; | 287 | struct perf_header_attr *attr; |
168 | int i; | 288 | int i, err; |
169 | 289 | ||
170 | lseek(fd, sizeof(f_header), SEEK_SET); | 290 | lseek(fd, sizeof(f_header), SEEK_SET); |
171 | 291 | ||
@@ -174,7 +294,11 @@ void perf_header__write(struct perf_header *self, int fd) | |||
174 | attr = self->attr[i]; | 294 | attr = self->attr[i]; |
175 | 295 | ||
176 | attr->id_offset = lseek(fd, 0, SEEK_CUR); | 296 | attr->id_offset = lseek(fd, 0, SEEK_CUR); |
177 | do_write(fd, attr->id, attr->ids * sizeof(u64)); | 297 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); |
298 | if (err < 0) { | ||
299 | pr_debug("failed to write perf header\n"); | ||
300 | return err; | ||
301 | } | ||
178 | } | 302 | } |
179 | 303 | ||
180 | 304 | ||
@@ -190,17 +314,31 @@ void perf_header__write(struct perf_header *self, int fd) | |||
190 | .size = attr->ids * sizeof(u64), | 314 | .size = attr->ids * sizeof(u64), |
191 | } | 315 | } |
192 | }; | 316 | }; |
193 | do_write(fd, &f_attr, sizeof(f_attr)); | 317 | err = do_write(fd, &f_attr, sizeof(f_attr)); |
318 | if (err < 0) { | ||
319 | pr_debug("failed to write perf header attribute\n"); | ||
320 | return err; | ||
321 | } | ||
194 | } | 322 | } |
195 | 323 | ||
196 | self->event_offset = lseek(fd, 0, SEEK_CUR); | 324 | self->event_offset = lseek(fd, 0, SEEK_CUR); |
197 | self->event_size = event_count * sizeof(struct perf_trace_event_type); | 325 | self->event_size = event_count * sizeof(struct perf_trace_event_type); |
198 | if (events) | 326 | if (events) { |
199 | do_write(fd, events, self->event_size); | 327 | err = do_write(fd, events, self->event_size); |
200 | 328 | if (err < 0) { | |
329 | pr_debug("failed to write perf header events\n"); | ||
330 | return err; | ||
331 | } | ||
332 | } | ||
201 | 333 | ||
202 | self->data_offset = lseek(fd, 0, SEEK_CUR); | 334 | self->data_offset = lseek(fd, 0, SEEK_CUR); |
203 | 335 | ||
336 | if (at_exit) { | ||
337 | err = perf_header__adds_write(self, fd); | ||
338 | if (err < 0) | ||
339 | return err; | ||
340 | } | ||
341 | |||
204 | f_header = (struct perf_file_header){ | 342 | f_header = (struct perf_file_header){ |
205 | .magic = PERF_MAGIC, | 343 | .magic = PERF_MAGIC, |
206 | .size = sizeof(f_header), | 344 | .size = sizeof(f_header), |
@@ -219,11 +357,18 @@ void perf_header__write(struct perf_header *self, int fd) | |||
219 | }, | 357 | }, |
220 | }; | 358 | }; |
221 | 359 | ||
360 | memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); | ||
361 | |||
222 | lseek(fd, 0, SEEK_SET); | 362 | lseek(fd, 0, SEEK_SET); |
223 | do_write(fd, &f_header, sizeof(f_header)); | 363 | err = do_write(fd, &f_header, sizeof(f_header)); |
364 | if (err < 0) { | ||
365 | pr_debug("failed to write perf header\n"); | ||
366 | return err; | ||
367 | } | ||
224 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | 368 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); |
225 | 369 | ||
226 | self->frozen = 1; | 370 | self->frozen = 1; |
371 | return 0; | ||
227 | } | 372 | } |
228 | 373 | ||
229 | static void do_read(int fd, void *buf, size_t size) | 374 | static void do_read(int fd, void *buf, size_t size) |
@@ -241,22 +386,109 @@ static void do_read(int fd, void *buf, size_t size) | |||
241 | } | 386 | } |
242 | } | 387 | } |
243 | 388 | ||
244 | struct perf_header *perf_header__read(int fd) | 389 | int perf_header__process_sections(struct perf_header *self, int fd, |
390 | int (*process)(struct perf_file_section *self, | ||
391 | int feat, int fd)) | ||
392 | { | ||
393 | struct perf_file_section *feat_sec; | ||
394 | int nr_sections; | ||
395 | int sec_size; | ||
396 | int idx = 0; | ||
397 | int err = 0, feat = 1; | ||
398 | |||
399 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | ||
400 | if (!nr_sections) | ||
401 | return 0; | ||
402 | |||
403 | feat_sec = calloc(sizeof(*feat_sec), nr_sections); | ||
404 | if (!feat_sec) | ||
405 | return -1; | ||
406 | |||
407 | sec_size = sizeof(*feat_sec) * nr_sections; | ||
408 | |||
409 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | ||
410 | |||
411 | do_read(fd, feat_sec, sec_size); | ||
412 | |||
413 | while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { | ||
414 | if (perf_header__has_feat(self, feat)) { | ||
415 | struct perf_file_section *sec = &feat_sec[idx++]; | ||
416 | |||
417 | err = process(sec, feat, fd); | ||
418 | if (err < 0) | ||
419 | break; | ||
420 | } | ||
421 | ++feat; | ||
422 | } | ||
423 | |||
424 | free(feat_sec); | ||
425 | return err; | ||
426 | }; | ||
427 | |||
428 | int perf_file_header__read(struct perf_file_header *self, | ||
429 | struct perf_header *ph, int fd) | ||
430 | { | ||
431 | lseek(fd, 0, SEEK_SET); | ||
432 | do_read(fd, self, sizeof(*self)); | ||
433 | |||
434 | if (self->magic != PERF_MAGIC || | ||
435 | self->attr_size != sizeof(struct perf_file_attr)) | ||
436 | return -1; | ||
437 | |||
438 | if (self->size != sizeof(*self)) { | ||
439 | /* Support the previous format */ | ||
440 | if (self->size == offsetof(typeof(*self), adds_features)) | ||
441 | bitmap_zero(self->adds_features, HEADER_FEAT_BITS); | ||
442 | else | ||
443 | return -1; | ||
444 | } | ||
445 | |||
446 | memcpy(&ph->adds_features, &self->adds_features, | ||
447 | sizeof(self->adds_features)); | ||
448 | |||
449 | ph->event_offset = self->event_types.offset; | ||
450 | ph->event_size = self->event_types.size; | ||
451 | ph->data_offset = self->data.offset; | ||
452 | ph->data_size = self->data.size; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int perf_file_section__process(struct perf_file_section *self, | ||
457 | int feat, int fd) | ||
458 | { | ||
459 | if (lseek(fd, self->offset, SEEK_SET) < 0) { | ||
460 | pr_debug("Failed to lseek to %Ld offset for feature %d, " | ||
461 | "continuing...\n", self->offset, feat); | ||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | switch (feat) { | ||
466 | case HEADER_TRACE_INFO: | ||
467 | trace_report(fd); | ||
468 | break; | ||
469 | |||
470 | case HEADER_BUILD_ID: | ||
471 | if (perf_header__read_build_ids(fd, self->offset, self->size)) | ||
472 | pr_debug("Failed to read buildids, continuing...\n"); | ||
473 | break; | ||
474 | default: | ||
475 | pr_debug("unknown feature %d, continuing...\n", feat); | ||
476 | } | ||
477 | |||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | int perf_header__read(struct perf_header *self, int fd) | ||
245 | { | 482 | { |
246 | struct perf_header *self = perf_header__new(); | ||
247 | struct perf_file_header f_header; | 483 | struct perf_file_header f_header; |
248 | struct perf_file_attr f_attr; | 484 | struct perf_file_attr f_attr; |
249 | u64 f_id; | 485 | u64 f_id; |
250 | |||
251 | int nr_attrs, nr_ids, i, j; | 486 | int nr_attrs, nr_ids, i, j; |
252 | 487 | ||
253 | lseek(fd, 0, SEEK_SET); | 488 | if (perf_file_header__read(&f_header, self, fd) < 0) { |
254 | do_read(fd, &f_header, sizeof(f_header)); | 489 | pr_debug("incompatible file format\n"); |
255 | 490 | return -EINVAL; | |
256 | if (f_header.magic != PERF_MAGIC || | 491 | } |
257 | f_header.size != sizeof(f_header) || | ||
258 | f_header.attr_size != sizeof(f_attr)) | ||
259 | die("incompatible file format"); | ||
260 | 492 | ||
261 | nr_attrs = f_header.attrs.size / sizeof(f_attr); | 493 | nr_attrs = f_header.attrs.size / sizeof(f_attr); |
262 | lseek(fd, f_header.attrs.offset, SEEK_SET); | 494 | lseek(fd, f_header.attrs.offset, SEEK_SET); |
@@ -269,6 +501,8 @@ struct perf_header *perf_header__read(int fd) | |||
269 | tmp = lseek(fd, 0, SEEK_CUR); | 501 | tmp = lseek(fd, 0, SEEK_CUR); |
270 | 502 | ||
271 | attr = perf_header_attr__new(&f_attr.attr); | 503 | attr = perf_header_attr__new(&f_attr.attr); |
504 | if (attr == NULL) | ||
505 | return -ENOMEM; | ||
272 | 506 | ||
273 | nr_ids = f_attr.ids.size / sizeof(u64); | 507 | nr_ids = f_attr.ids.size / sizeof(u64); |
274 | lseek(fd, f_attr.ids.offset, SEEK_SET); | 508 | lseek(fd, f_attr.ids.offset, SEEK_SET); |
@@ -276,31 +510,34 @@ struct perf_header *perf_header__read(int fd) | |||
276 | for (j = 0; j < nr_ids; j++) { | 510 | for (j = 0; j < nr_ids; j++) { |
277 | do_read(fd, &f_id, sizeof(f_id)); | 511 | do_read(fd, &f_id, sizeof(f_id)); |
278 | 512 | ||
279 | perf_header_attr__add_id(attr, f_id); | 513 | if (perf_header_attr__add_id(attr, f_id) < 0) { |
514 | perf_header_attr__delete(attr); | ||
515 | return -ENOMEM; | ||
516 | } | ||
280 | } | 517 | } |
281 | perf_header__add_attr(self, attr); | 518 | if (perf_header__add_attr(self, attr) < 0) { |
519 | perf_header_attr__delete(attr); | ||
520 | return -ENOMEM; | ||
521 | } | ||
522 | |||
282 | lseek(fd, tmp, SEEK_SET); | 523 | lseek(fd, tmp, SEEK_SET); |
283 | } | 524 | } |
284 | 525 | ||
285 | if (f_header.event_types.size) { | 526 | if (f_header.event_types.size) { |
286 | lseek(fd, f_header.event_types.offset, SEEK_SET); | 527 | lseek(fd, f_header.event_types.offset, SEEK_SET); |
287 | events = malloc(f_header.event_types.size); | 528 | events = malloc(f_header.event_types.size); |
288 | if (!events) | 529 | if (events == NULL) |
289 | die("nomem"); | 530 | return -ENOMEM; |
290 | do_read(fd, events, f_header.event_types.size); | 531 | do_read(fd, events, f_header.event_types.size); |
291 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); | 532 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); |
292 | } | 533 | } |
293 | self->event_offset = f_header.event_types.offset; | ||
294 | self->event_size = f_header.event_types.size; | ||
295 | 534 | ||
296 | self->data_offset = f_header.data.offset; | 535 | perf_header__process_sections(self, fd, perf_file_section__process); |
297 | self->data_size = f_header.data.size; | ||
298 | 536 | ||
299 | lseek(fd, self->data_offset, SEEK_SET); | 537 | lseek(fd, self->data_offset, SEEK_SET); |
300 | 538 | ||
301 | self->frozen = 1; | 539 | self->frozen = 1; |
302 | 540 | return 0; | |
303 | return self; | ||
304 | } | 541 | } |
305 | 542 | ||
306 | u64 perf_header__sample_type(struct perf_header *header) | 543 | u64 perf_header__sample_type(struct perf_header *header) |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a0761bc7863c..d1dbe2b79c42 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -1,10 +1,13 @@ | |||
1 | #ifndef _PERF_HEADER_H | 1 | #ifndef __PERF_HEADER_H |
2 | #define _PERF_HEADER_H | 2 | #define __PERF_HEADER_H |
3 | 3 | ||
4 | #include "../../../include/linux/perf_event.h" | 4 | #include "../../../include/linux/perf_event.h" |
5 | #include <sys/types.h> | 5 | #include <sys/types.h> |
6 | #include <stdbool.h> | ||
6 | #include "types.h" | 7 | #include "types.h" |
7 | 8 | ||
9 | #include <linux/bitmap.h> | ||
10 | |||
8 | struct perf_header_attr { | 11 | struct perf_header_attr { |
9 | struct perf_event_attr attr; | 12 | struct perf_event_attr attr; |
10 | int ids, size; | 13 | int ids, size; |
@@ -12,36 +15,71 @@ struct perf_header_attr { | |||
12 | off_t id_offset; | 15 | off_t id_offset; |
13 | }; | 16 | }; |
14 | 17 | ||
18 | enum { | ||
19 | HEADER_TRACE_INFO = 1, | ||
20 | HEADER_BUILD_ID, | ||
21 | HEADER_LAST_FEATURE, | ||
22 | }; | ||
23 | |||
24 | #define HEADER_FEAT_BITS 256 | ||
25 | |||
26 | struct perf_file_section { | ||
27 | u64 offset; | ||
28 | u64 size; | ||
29 | }; | ||
30 | |||
31 | struct perf_file_header { | ||
32 | u64 magic; | ||
33 | u64 size; | ||
34 | u64 attr_size; | ||
35 | struct perf_file_section attrs; | ||
36 | struct perf_file_section data; | ||
37 | struct perf_file_section event_types; | ||
38 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | ||
39 | }; | ||
40 | |||
41 | struct perf_header; | ||
42 | |||
43 | int perf_file_header__read(struct perf_file_header *self, | ||
44 | struct perf_header *ph, int fd); | ||
45 | |||
15 | struct perf_header { | 46 | struct perf_header { |
16 | int frozen; | 47 | int frozen; |
17 | int attrs, size; | 48 | int attrs, size; |
18 | struct perf_header_attr **attr; | 49 | struct perf_header_attr **attr; |
19 | s64 attr_offset; | 50 | s64 attr_offset; |
20 | u64 data_offset; | 51 | u64 data_offset; |
21 | u64 data_size; | 52 | u64 data_size; |
22 | u64 event_offset; | 53 | u64 event_offset; |
23 | u64 event_size; | 54 | u64 event_size; |
55 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | ||
24 | }; | 56 | }; |
25 | 57 | ||
26 | struct perf_header *perf_header__read(int fd); | 58 | struct perf_header *perf_header__new(void); |
27 | void perf_header__write(struct perf_header *self, int fd); | 59 | void perf_header__delete(struct perf_header *self); |
28 | 60 | ||
29 | void perf_header__add_attr(struct perf_header *self, | 61 | int perf_header__read(struct perf_header *self, int fd); |
30 | struct perf_header_attr *attr); | 62 | int perf_header__write(struct perf_header *self, int fd, bool at_exit); |
63 | |||
64 | int perf_header__add_attr(struct perf_header *self, | ||
65 | struct perf_header_attr *attr); | ||
31 | 66 | ||
32 | void perf_header__push_event(u64 id, const char *name); | 67 | void perf_header__push_event(u64 id, const char *name); |
33 | char *perf_header__find_event(u64 id); | 68 | char *perf_header__find_event(u64 id); |
34 | 69 | ||
70 | struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); | ||
71 | void perf_header_attr__delete(struct perf_header_attr *self); | ||
35 | 72 | ||
36 | struct perf_header_attr * | 73 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); |
37 | perf_header_attr__new(struct perf_event_attr *attr); | ||
38 | void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); | ||
39 | 74 | ||
40 | u64 perf_header__sample_type(struct perf_header *header); | 75 | u64 perf_header__sample_type(struct perf_header *header); |
41 | struct perf_event_attr * | 76 | struct perf_event_attr * |
42 | perf_header__find_attr(u64 id, struct perf_header *header); | 77 | perf_header__find_attr(u64 id, struct perf_header *header); |
78 | void perf_header__set_feat(struct perf_header *self, int feat); | ||
79 | bool perf_header__has_feat(const struct perf_header *self, int feat); | ||
43 | 80 | ||
81 | int perf_header__process_sections(struct perf_header *self, int fd, | ||
82 | int (*process)(struct perf_file_section *self, | ||
83 | int feat, int fd)); | ||
44 | 84 | ||
45 | struct perf_header *perf_header__new(void); | 85 | #endif /* __PERF_HEADER_H */ |
46 | |||
47 | #endif /* _PERF_HEADER_H */ | ||
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h index 7128783637b4..7f5c6dedd714 100644 --- a/tools/perf/util/help.h +++ b/tools/perf/util/help.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef HELP_H | 1 | #ifndef __PERF_HELP_H |
2 | #define HELP_H | 2 | #define __PERF_HELP_H |
3 | 3 | ||
4 | struct cmdnames { | 4 | struct cmdnames { |
5 | size_t alloc; | 5 | size_t alloc; |
@@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s); | |||
26 | void list_commands(const char *title, struct cmdnames *main_cmds, | 26 | void list_commands(const char *title, struct cmdnames *main_cmds, |
27 | struct cmdnames *other_cmds); | 27 | struct cmdnames *other_cmds); |
28 | 28 | ||
29 | #endif /* HELP_H */ | 29 | #endif /* __PERF_HELP_H */ |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c new file mode 100644 index 000000000000..0ebf6ee16caa --- /dev/null +++ b/tools/perf/util/hist.c | |||
@@ -0,0 +1,202 @@ | |||
1 | #include "hist.h" | ||
2 | |||
3 | struct rb_root hist; | ||
4 | struct rb_root collapse_hists; | ||
5 | struct rb_root output_hists; | ||
6 | int callchain; | ||
7 | |||
8 | struct callchain_param callchain_param = { | ||
9 | .mode = CHAIN_GRAPH_REL, | ||
10 | .min_percent = 0.5 | ||
11 | }; | ||
12 | |||
13 | /* | ||
14 | * histogram, sorted on item, collects counts | ||
15 | */ | ||
16 | |||
17 | struct hist_entry *__hist_entry__add(struct addr_location *al, | ||
18 | struct symbol *sym_parent, | ||
19 | u64 count, bool *hit) | ||
20 | { | ||
21 | struct rb_node **p = &hist.rb_node; | ||
22 | struct rb_node *parent = NULL; | ||
23 | struct hist_entry *he; | ||
24 | struct hist_entry entry = { | ||
25 | .thread = al->thread, | ||
26 | .map = al->map, | ||
27 | .sym = al->sym, | ||
28 | .ip = al->addr, | ||
29 | .level = al->level, | ||
30 | .count = count, | ||
31 | .parent = sym_parent, | ||
32 | }; | ||
33 | int cmp; | ||
34 | |||
35 | while (*p != NULL) { | ||
36 | parent = *p; | ||
37 | he = rb_entry(parent, struct hist_entry, rb_node); | ||
38 | |||
39 | cmp = hist_entry__cmp(&entry, he); | ||
40 | |||
41 | if (!cmp) { | ||
42 | *hit = true; | ||
43 | return he; | ||
44 | } | ||
45 | |||
46 | if (cmp < 0) | ||
47 | p = &(*p)->rb_left; | ||
48 | else | ||
49 | p = &(*p)->rb_right; | ||
50 | } | ||
51 | |||
52 | he = malloc(sizeof(*he)); | ||
53 | if (!he) | ||
54 | return NULL; | ||
55 | *he = entry; | ||
56 | rb_link_node(&he->rb_node, parent, p); | ||
57 | rb_insert_color(&he->rb_node, &hist); | ||
58 | *hit = false; | ||
59 | return he; | ||
60 | } | ||
61 | |||
62 | int64_t | ||
63 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | ||
64 | { | ||
65 | struct sort_entry *se; | ||
66 | int64_t cmp = 0; | ||
67 | |||
68 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
69 | cmp = se->cmp(left, right); | ||
70 | if (cmp) | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | return cmp; | ||
75 | } | ||
76 | |||
77 | int64_t | ||
78 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | ||
79 | { | ||
80 | struct sort_entry *se; | ||
81 | int64_t cmp = 0; | ||
82 | |||
83 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
84 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | ||
85 | |||
86 | f = se->collapse ?: se->cmp; | ||
87 | |||
88 | cmp = f(left, right); | ||
89 | if (cmp) | ||
90 | break; | ||
91 | } | ||
92 | |||
93 | return cmp; | ||
94 | } | ||
95 | |||
96 | void hist_entry__free(struct hist_entry *he) | ||
97 | { | ||
98 | free(he); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * collapse the histogram | ||
103 | */ | ||
104 | |||
105 | void collapse__insert_entry(struct hist_entry *he) | ||
106 | { | ||
107 | struct rb_node **p = &collapse_hists.rb_node; | ||
108 | struct rb_node *parent = NULL; | ||
109 | struct hist_entry *iter; | ||
110 | int64_t cmp; | ||
111 | |||
112 | while (*p != NULL) { | ||
113 | parent = *p; | ||
114 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
115 | |||
116 | cmp = hist_entry__collapse(iter, he); | ||
117 | |||
118 | if (!cmp) { | ||
119 | iter->count += he->count; | ||
120 | hist_entry__free(he); | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | if (cmp < 0) | ||
125 | p = &(*p)->rb_left; | ||
126 | else | ||
127 | p = &(*p)->rb_right; | ||
128 | } | ||
129 | |||
130 | rb_link_node(&he->rb_node, parent, p); | ||
131 | rb_insert_color(&he->rb_node, &collapse_hists); | ||
132 | } | ||
133 | |||
134 | void collapse__resort(void) | ||
135 | { | ||
136 | struct rb_node *next; | ||
137 | struct hist_entry *n; | ||
138 | |||
139 | if (!sort__need_collapse) | ||
140 | return; | ||
141 | |||
142 | next = rb_first(&hist); | ||
143 | while (next) { | ||
144 | n = rb_entry(next, struct hist_entry, rb_node); | ||
145 | next = rb_next(&n->rb_node); | ||
146 | |||
147 | rb_erase(&n->rb_node, &hist); | ||
148 | collapse__insert_entry(n); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * reverse the map, sort on count. | ||
154 | */ | ||
155 | |||
156 | void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) | ||
157 | { | ||
158 | struct rb_node **p = &output_hists.rb_node; | ||
159 | struct rb_node *parent = NULL; | ||
160 | struct hist_entry *iter; | ||
161 | |||
162 | if (callchain) | ||
163 | callchain_param.sort(&he->sorted_chain, &he->callchain, | ||
164 | min_callchain_hits, &callchain_param); | ||
165 | |||
166 | while (*p != NULL) { | ||
167 | parent = *p; | ||
168 | iter = rb_entry(parent, struct hist_entry, rb_node); | ||
169 | |||
170 | if (he->count > iter->count) | ||
171 | p = &(*p)->rb_left; | ||
172 | else | ||
173 | p = &(*p)->rb_right; | ||
174 | } | ||
175 | |||
176 | rb_link_node(&he->rb_node, parent, p); | ||
177 | rb_insert_color(&he->rb_node, &output_hists); | ||
178 | } | ||
179 | |||
180 | void output__resort(u64 total_samples) | ||
181 | { | ||
182 | struct rb_node *next; | ||
183 | struct hist_entry *n; | ||
184 | struct rb_root *tree = &hist; | ||
185 | u64 min_callchain_hits; | ||
186 | |||
187 | min_callchain_hits = | ||
188 | total_samples * (callchain_param.min_percent / 100); | ||
189 | |||
190 | if (sort__need_collapse) | ||
191 | tree = &collapse_hists; | ||
192 | |||
193 | next = rb_first(tree); | ||
194 | |||
195 | while (next) { | ||
196 | n = rb_entry(next, struct hist_entry, rb_node); | ||
197 | next = rb_next(&n->rb_node); | ||
198 | |||
199 | rb_erase(&n->rb_node, tree); | ||
200 | output__insert_entry(n, min_callchain_hits); | ||
201 | } | ||
202 | } | ||
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h new file mode 100644 index 000000000000..3020db0c9292 --- /dev/null +++ b/tools/perf/util/hist.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef __PERF_HIST_H | ||
2 | #define __PERF_HIST_H | ||
3 | #include "../builtin.h" | ||
4 | |||
5 | #include "util.h" | ||
6 | |||
7 | #include "color.h" | ||
8 | #include <linux/list.h> | ||
9 | #include "cache.h" | ||
10 | #include <linux/rbtree.h> | ||
11 | #include "symbol.h" | ||
12 | #include "string.h" | ||
13 | #include "callchain.h" | ||
14 | #include "strlist.h" | ||
15 | #include "values.h" | ||
16 | |||
17 | #include "../perf.h" | ||
18 | #include "debug.h" | ||
19 | #include "header.h" | ||
20 | |||
21 | #include "parse-options.h" | ||
22 | #include "parse-events.h" | ||
23 | |||
24 | #include "thread.h" | ||
25 | #include "sort.h" | ||
26 | |||
27 | extern struct rb_root hist; | ||
28 | extern struct rb_root collapse_hists; | ||
29 | extern struct rb_root output_hists; | ||
30 | extern int callchain; | ||
31 | extern struct callchain_param callchain_param; | ||
32 | extern unsigned long total; | ||
33 | extern unsigned long total_mmap; | ||
34 | extern unsigned long total_comm; | ||
35 | extern unsigned long total_fork; | ||
36 | extern unsigned long total_unknown; | ||
37 | extern unsigned long total_lost; | ||
38 | |||
39 | struct hist_entry *__hist_entry__add(struct addr_location *al, | ||
40 | struct symbol *parent, | ||
41 | u64 count, bool *hit); | ||
42 | extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); | ||
43 | extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); | ||
44 | extern void hist_entry__free(struct hist_entry *); | ||
45 | extern void collapse__insert_entry(struct hist_entry *); | ||
46 | extern void collapse__resort(void); | ||
47 | extern void output__insert_entry(struct hist_entry *, u64); | ||
48 | extern void output__resort(u64); | ||
49 | |||
50 | #endif /* __PERF_HIST_H */ | ||
diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h new file mode 100644 index 000000000000..ed538942523d --- /dev/null +++ b/tools/perf/util/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
/* stub */ | |||
diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h new file mode 100644 index 000000000000..58e9817ffae0 --- /dev/null +++ b/tools/perf/util/include/asm/bitops.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _PERF_ASM_BITOPS_H_ | ||
2 | #define _PERF_ASM_BITOPS_H_ | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | #include "../../types.h" | ||
6 | #include <linux/compiler.h> | ||
7 | |||
8 | /* CHECKME: Not sure both always match */ | ||
9 | #define BITS_PER_LONG __WORDSIZE | ||
10 | |||
11 | #include "../../../../include/asm-generic/bitops/__fls.h" | ||
12 | #include "../../../../include/asm-generic/bitops/fls.h" | ||
13 | #include "../../../../include/asm-generic/bitops/fls64.h" | ||
14 | #include "../../../../include/asm-generic/bitops/__ffs.h" | ||
15 | #include "../../../../include/asm-generic/bitops/ffz.h" | ||
16 | #include "../../../../include/asm-generic/bitops/hweight.h" | ||
17 | |||
18 | #endif | ||
diff --git a/tools/perf/util/include/asm/bug.h b/tools/perf/util/include/asm/bug.h new file mode 100644 index 000000000000..7fcc6810adc2 --- /dev/null +++ b/tools/perf/util/include/asm/bug.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _PERF_ASM_GENERIC_BUG_H | ||
2 | #define _PERF_ASM_GENERIC_BUG_H | ||
3 | |||
4 | #define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0) | ||
5 | |||
6 | #define WARN(condition, format...) ({ \ | ||
7 | int __ret_warn_on = !!(condition); \ | ||
8 | if (unlikely(__ret_warn_on)) \ | ||
9 | __WARN_printf(format); \ | ||
10 | unlikely(__ret_warn_on); \ | ||
11 | }) | ||
12 | |||
13 | #define WARN_ONCE(condition, format...) ({ \ | ||
14 | static int __warned; \ | ||
15 | int __ret_warn_once = !!(condition); \ | ||
16 | \ | ||
17 | if (unlikely(__ret_warn_once)) \ | ||
18 | if (WARN(!__warned, format)) \ | ||
19 | __warned = 1; \ | ||
20 | unlikely(__ret_warn_once); \ | ||
21 | }) | ||
22 | #endif | ||
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h new file mode 100644 index 000000000000..b722abe3a626 --- /dev/null +++ b/tools/perf/util/include/asm/byteorder.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <asm/types.h> | ||
2 | #include "../../../../include/linux/swab.h" | ||
diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h new file mode 100644 index 000000000000..ed538942523d --- /dev/null +++ b/tools/perf/util/include/asm/swab.h | |||
@@ -0,0 +1 @@ | |||
/* stub */ | |||
diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h new file mode 100644 index 000000000000..d0f72b8fcc35 --- /dev/null +++ b/tools/perf/util/include/asm/uaccess.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _PERF_ASM_UACCESS_H_ | ||
2 | #define _PERF_ASM_UACCESS_H_ | ||
3 | |||
4 | #define __get_user(src, dest) \ | ||
5 | ({ \ | ||
6 | (src) = *dest; \ | ||
7 | 0; \ | ||
8 | }) | ||
9 | |||
10 | #define get_user __get_user | ||
11 | |||
12 | #define access_ok(type, addr, size) 1 | ||
13 | |||
14 | #endif | ||
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h new file mode 100644 index 000000000000..94507639a8c4 --- /dev/null +++ b/tools/perf/util/include/linux/bitmap.h | |||
@@ -0,0 +1,3 @@ | |||
1 | #include "../../../../include/linux/bitmap.h" | ||
2 | #include "../../../../include/asm-generic/bitops/find.h" | ||
3 | #include <linux/errno.h> | ||
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h new file mode 100644 index 000000000000..8d63116e9435 --- /dev/null +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _PERF_LINUX_BITOPS_H_ | ||
2 | #define _PERF_LINUX_BITOPS_H_ | ||
3 | |||
4 | #define __KERNEL__ | ||
5 | |||
6 | #define CONFIG_GENERIC_FIND_NEXT_BIT | ||
7 | #define CONFIG_GENERIC_FIND_FIRST_BIT | ||
8 | #include "../../../../include/linux/bitops.h" | ||
9 | |||
10 | #undef __KERNEL__ | ||
11 | |||
12 | static inline void set_bit(int nr, unsigned long *addr) | ||
13 | { | ||
14 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); | ||
15 | } | ||
16 | |||
17 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) | ||
18 | { | ||
19 | return ((1UL << (nr % BITS_PER_LONG)) & | ||
20 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | ||
21 | } | ||
22 | |||
23 | unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned | ||
24 | long size, unsigned long offset); | ||
25 | |||
26 | unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned | ||
27 | long size, unsigned long offset); | ||
28 | |||
29 | #endif | ||
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h new file mode 100644 index 000000000000..dfb0713ed47f --- /dev/null +++ b/tools/perf/util/include/linux/compiler.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _PERF_LINUX_COMPILER_H_ | ||
2 | #define _PERF_LINUX_COMPILER_H_ | ||
3 | |||
4 | #ifndef __always_inline | ||
5 | #define __always_inline inline | ||
6 | #endif | ||
7 | #define __user | ||
8 | #define __attribute_const__ | ||
9 | |||
10 | #endif | ||
diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h new file mode 100644 index 000000000000..a53d4ee1e0b7 --- /dev/null +++ b/tools/perf/util/include/linux/ctype.h | |||
@@ -0,0 +1 @@ | |||
#include "../util.h" | |||
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index a6b87390cb52..21c0274c02fa 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h | |||
@@ -1,6 +1,16 @@ | |||
1 | #ifndef PERF_LINUX_KERNEL_H_ | 1 | #ifndef PERF_LINUX_KERNEL_H_ |
2 | #define PERF_LINUX_KERNEL_H_ | 2 | #define PERF_LINUX_KERNEL_H_ |
3 | 3 | ||
4 | #include <stdarg.h> | ||
5 | #include <stdio.h> | ||
6 | #include <stdlib.h> | ||
7 | #include <assert.h> | ||
8 | |||
9 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | ||
10 | |||
11 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) | ||
12 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) | ||
13 | |||
4 | #ifndef offsetof | 14 | #ifndef offsetof |
5 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | 15 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
6 | #endif | 16 | #endif |
@@ -26,4 +36,70 @@ | |||
26 | _max1 > _max2 ? _max1 : _max2; }) | 36 | _max1 > _max2 ? _max1 : _max2; }) |
27 | #endif | 37 | #endif |
28 | 38 | ||
39 | #ifndef min | ||
40 | #define min(x, y) ({ \ | ||
41 | typeof(x) _min1 = (x); \ | ||
42 | typeof(y) _min2 = (y); \ | ||
43 | (void) (&_min1 == &_min2); \ | ||
44 | _min1 < _min2 ? _min1 : _min2; }) | ||
45 | #endif | ||
46 | |||
47 | #ifndef BUG_ON | ||
48 | #define BUG_ON(cond) assert(!(cond)) | ||
49 | #endif | ||
50 | |||
51 | /* | ||
52 | * Both need more care to handle endianness | ||
53 | * (Don't use bitmap_copy_le() for now) | ||
54 | */ | ||
55 | #define cpu_to_le64(x) (x) | ||
56 | #define cpu_to_le32(x) (x) | ||
57 | |||
58 | static inline int | ||
59 | vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | ||
60 | { | ||
61 | int i; | ||
62 | ssize_t ssize = size; | ||
63 | |||
64 | i = vsnprintf(buf, size, fmt, args); | ||
65 | |||
66 | return (i >= ssize) ? (ssize - 1) : i; | ||
67 | } | ||
68 | |||
69 | static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) | ||
70 | { | ||
71 | va_list args; | ||
72 | ssize_t ssize = size; | ||
73 | int i; | ||
74 | |||
75 | va_start(args, fmt); | ||
76 | i = vsnprintf(buf, size, fmt, args); | ||
77 | va_end(args); | ||
78 | |||
79 | return (i >= ssize) ? (ssize - 1) : i; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long | ||
83 | simple_strtoul(const char *nptr, char **endptr, int base) | ||
84 | { | ||
85 | return strtoul(nptr, endptr, base); | ||
86 | } | ||
87 | |||
88 | #ifndef pr_fmt | ||
89 | #define pr_fmt(fmt) fmt | ||
90 | #endif | ||
91 | |||
92 | #define pr_err(fmt, ...) \ | ||
93 | do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) | ||
94 | #define pr_warning(fmt, ...) \ | ||
95 | do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) | ||
96 | #define pr_info(fmt, ...) \ | ||
97 | do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) | ||
98 | #define pr_debug(fmt, ...) \ | ||
99 | eprintf(1, pr_fmt(fmt), ##__VA_ARGS__) | ||
100 | #define pr_debugN(n, fmt, ...) \ | ||
101 | eprintf(n, pr_fmt(fmt), ##__VA_ARGS__) | ||
102 | #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) | ||
103 | #define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) | ||
104 | |||
29 | #endif | 105 | #endif |
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h new file mode 100644 index 000000000000..3b2f5900276f --- /dev/null +++ b/tools/perf/util/include/linux/string.h | |||
@@ -0,0 +1 @@ | |||
#include <string.h> | |||
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h new file mode 100644 index 000000000000..196862a81a21 --- /dev/null +++ b/tools/perf/util/include/linux/types.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _PERF_LINUX_TYPES_H_ | ||
2 | #define _PERF_LINUX_TYPES_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | #define DECLARE_BITMAP(name,bits) \ | ||
7 | unsigned long name[BITS_TO_LONGS(bits)] | ||
8 | |||
9 | #endif | ||
diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h index 0173abeef52c..b0fcb6d8a881 100644 --- a/tools/perf/util/levenshtein.h +++ b/tools/perf/util/levenshtein.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef LEVENSHTEIN_H | 1 | #ifndef __PERF_LEVENSHTEIN_H |
2 | #define LEVENSHTEIN_H | 2 | #define __PERF_LEVENSHTEIN_H |
3 | 3 | ||
4 | int levenshtein(const char *string1, const char *string2, | 4 | int levenshtein(const char *string1, const char *string2, |
5 | int swap_penalty, int substition_penalty, | 5 | int swap_penalty, int substition_penalty, |
6 | int insertion_penalty, int deletion_penalty); | 6 | int insertion_penalty, int deletion_penalty); |
7 | 7 | ||
8 | #endif | 8 | #endif /* __PERF_LEVENSHTEIN_H */ |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 804e02382739..69f94fe9db20 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <stdlib.h> | 3 | #include <stdlib.h> |
4 | #include <string.h> | 4 | #include <string.h> |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | #include "debug.h" | ||
6 | 7 | ||
7 | static inline int is_anon_memory(const char *filename) | 8 | static inline int is_anon_memory(const char *filename) |
8 | { | 9 | { |
@@ -19,13 +20,28 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) | |||
19 | return n; | 20 | return n; |
20 | } | 21 | } |
21 | 22 | ||
22 | struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) | 23 | void map__init(struct map *self, enum map_type type, |
24 | u64 start, u64 end, u64 pgoff, struct dso *dso) | ||
25 | { | ||
26 | self->type = type; | ||
27 | self->start = start; | ||
28 | self->end = end; | ||
29 | self->pgoff = pgoff; | ||
30 | self->dso = dso; | ||
31 | self->map_ip = map__map_ip; | ||
32 | self->unmap_ip = map__unmap_ip; | ||
33 | RB_CLEAR_NODE(&self->rb_node); | ||
34 | } | ||
35 | |||
36 | struct map *map__new(struct mmap_event *event, enum map_type type, | ||
37 | char *cwd, int cwdlen) | ||
23 | { | 38 | { |
24 | struct map *self = malloc(sizeof(*self)); | 39 | struct map *self = malloc(sizeof(*self)); |
25 | 40 | ||
26 | if (self != NULL) { | 41 | if (self != NULL) { |
27 | const char *filename = event->filename; | 42 | const char *filename = event->filename; |
28 | char newfilename[PATH_MAX]; | 43 | char newfilename[PATH_MAX]; |
44 | struct dso *dso; | ||
29 | int anon; | 45 | int anon; |
30 | 46 | ||
31 | if (cwd) { | 47 | if (cwd) { |
@@ -45,18 +61,15 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) | |||
45 | filename = newfilename; | 61 | filename = newfilename; |
46 | } | 62 | } |
47 | 63 | ||
48 | self->start = event->start; | 64 | dso = dsos__findnew(filename); |
49 | self->end = event->start + event->len; | 65 | if (dso == NULL) |
50 | self->pgoff = event->pgoff; | ||
51 | |||
52 | self->dso = dsos__findnew(filename); | ||
53 | if (self->dso == NULL) | ||
54 | goto out_delete; | 66 | goto out_delete; |
55 | 67 | ||
68 | map__init(self, type, event->start, event->start + event->len, | ||
69 | event->pgoff, dso); | ||
70 | |||
56 | if (self->dso == vdso || anon) | 71 | if (self->dso == vdso || anon) |
57 | self->map_ip = vdso__map_ip; | 72 | self->map_ip = self->unmap_ip = identity__map_ip; |
58 | else | ||
59 | self->map_ip = map__map_ip; | ||
60 | } | 73 | } |
61 | return self; | 74 | return self; |
62 | out_delete: | 75 | out_delete: |
@@ -64,6 +77,72 @@ out_delete: | |||
64 | return NULL; | 77 | return NULL; |
65 | } | 78 | } |
66 | 79 | ||
80 | void map__delete(struct map *self) | ||
81 | { | ||
82 | free(self); | ||
83 | } | ||
84 | |||
85 | void map__fixup_start(struct map *self) | ||
86 | { | ||
87 | struct rb_root *symbols = &self->dso->symbols[self->type]; | ||
88 | struct rb_node *nd = rb_first(symbols); | ||
89 | if (nd != NULL) { | ||
90 | struct symbol *sym = rb_entry(nd, struct symbol, rb_node); | ||
91 | self->start = sym->start; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | void map__fixup_end(struct map *self) | ||
96 | { | ||
97 | struct rb_root *symbols = &self->dso->symbols[self->type]; | ||
98 | struct rb_node *nd = rb_last(symbols); | ||
99 | if (nd != NULL) { | ||
100 | struct symbol *sym = rb_entry(nd, struct symbol, rb_node); | ||
101 | self->end = sym->end; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | #define DSO__DELETED "(deleted)" | ||
106 | |||
107 | struct symbol *map__find_symbol(struct map *self, u64 addr, | ||
108 | symbol_filter_t filter) | ||
109 | { | ||
110 | if (!dso__loaded(self->dso, self->type)) { | ||
111 | int nr = dso__load(self->dso, self, filter); | ||
112 | |||
113 | if (nr < 0) { | ||
114 | if (self->dso->has_build_id) { | ||
115 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | ||
116 | |||
117 | build_id__sprintf(self->dso->build_id, | ||
118 | sizeof(self->dso->build_id), | ||
119 | sbuild_id); | ||
120 | pr_warning("%s with build id %s not found", | ||
121 | self->dso->long_name, sbuild_id); | ||
122 | } else | ||
123 | pr_warning("Failed to open %s", | ||
124 | self->dso->long_name); | ||
125 | pr_warning(", continuing without symbols\n"); | ||
126 | return NULL; | ||
127 | } else if (nr == 0) { | ||
128 | const char *name = self->dso->long_name; | ||
129 | const size_t len = strlen(name); | ||
130 | const size_t real_len = len - sizeof(DSO__DELETED); | ||
131 | |||
132 | if (len > sizeof(DSO__DELETED) && | ||
133 | strcmp(name + real_len + 1, DSO__DELETED) == 0) { | ||
134 | pr_warning("%.*s was updated, restart the long running apps that use it!\n", | ||
135 | (int)real_len, name); | ||
136 | } else { | ||
137 | pr_warning("no symbols found in %s, maybe install a debug package?\n", name); | ||
138 | } | ||
139 | return NULL; | ||
140 | } | ||
141 | } | ||
142 | |||
143 | return self->dso->find_symbol(self->dso, self->type, addr); | ||
144 | } | ||
145 | |||
67 | struct map *map__clone(struct map *self) | 146 | struct map *map__clone(struct map *self) |
68 | { | 147 | { |
69 | struct map *map = malloc(sizeof(*self)); | 148 | struct map *map = malloc(sizeof(*self)); |
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c deleted file mode 100644 index 0d8c85defcd2..000000000000 --- a/tools/perf/util/module.c +++ /dev/null | |||
@@ -1,545 +0,0 @@ | |||
1 | #include "util.h" | ||
2 | #include "../perf.h" | ||
3 | #include "string.h" | ||
4 | #include "module.h" | ||
5 | |||
6 | #include <libelf.h> | ||
7 | #include <libgen.h> | ||
8 | #include <gelf.h> | ||
9 | #include <elf.h> | ||
10 | #include <dirent.h> | ||
11 | #include <sys/utsname.h> | ||
12 | |||
13 | static unsigned int crc32(const char *p, unsigned int len) | ||
14 | { | ||
15 | int i; | ||
16 | unsigned int crc = 0; | ||
17 | |||
18 | while (len--) { | ||
19 | crc ^= *p++; | ||
20 | for (i = 0; i < 8; i++) | ||
21 | crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); | ||
22 | } | ||
23 | return crc; | ||
24 | } | ||
25 | |||
26 | /* module section methods */ | ||
27 | |||
28 | struct sec_dso *sec_dso__new_dso(const char *name) | ||
29 | { | ||
30 | struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1); | ||
31 | |||
32 | if (self != NULL) { | ||
33 | strcpy(self->name, name); | ||
34 | self->secs = RB_ROOT; | ||
35 | self->find_section = sec_dso__find_section; | ||
36 | } | ||
37 | |||
38 | return self; | ||
39 | } | ||
40 | |||
41 | static void sec_dso__delete_section(struct section *self) | ||
42 | { | ||
43 | free(((void *)self)); | ||
44 | } | ||
45 | |||
46 | void sec_dso__delete_sections(struct sec_dso *self) | ||
47 | { | ||
48 | struct section *pos; | ||
49 | struct rb_node *next = rb_first(&self->secs); | ||
50 | |||
51 | while (next) { | ||
52 | pos = rb_entry(next, struct section, rb_node); | ||
53 | next = rb_next(&pos->rb_node); | ||
54 | rb_erase(&pos->rb_node, &self->secs); | ||
55 | sec_dso__delete_section(pos); | ||
56 | } | ||
57 | } | ||
58 | |||
59 | void sec_dso__delete_self(struct sec_dso *self) | ||
60 | { | ||
61 | sec_dso__delete_sections(self); | ||
62 | free(self); | ||
63 | } | ||
64 | |||
65 | static void sec_dso__insert_section(struct sec_dso *self, struct section *sec) | ||
66 | { | ||
67 | struct rb_node **p = &self->secs.rb_node; | ||
68 | struct rb_node *parent = NULL; | ||
69 | const u64 hash = sec->hash; | ||
70 | struct section *s; | ||
71 | |||
72 | while (*p != NULL) { | ||
73 | parent = *p; | ||
74 | s = rb_entry(parent, struct section, rb_node); | ||
75 | if (hash < s->hash) | ||
76 | p = &(*p)->rb_left; | ||
77 | else | ||
78 | p = &(*p)->rb_right; | ||
79 | } | ||
80 | rb_link_node(&sec->rb_node, parent, p); | ||
81 | rb_insert_color(&sec->rb_node, &self->secs); | ||
82 | } | ||
83 | |||
84 | struct section *sec_dso__find_section(struct sec_dso *self, const char *name) | ||
85 | { | ||
86 | struct rb_node *n; | ||
87 | u64 hash; | ||
88 | int len; | ||
89 | |||
90 | if (self == NULL) | ||
91 | return NULL; | ||
92 | |||
93 | len = strlen(name); | ||
94 | hash = crc32(name, len); | ||
95 | |||
96 | n = self->secs.rb_node; | ||
97 | |||
98 | while (n) { | ||
99 | struct section *s = rb_entry(n, struct section, rb_node); | ||
100 | |||
101 | if (hash < s->hash) | ||
102 | n = n->rb_left; | ||
103 | else if (hash > s->hash) | ||
104 | n = n->rb_right; | ||
105 | else { | ||
106 | if (!strcmp(name, s->name)) | ||
107 | return s; | ||
108 | else | ||
109 | n = rb_next(&s->rb_node); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | return NULL; | ||
114 | } | ||
115 | |||
116 | static size_t sec_dso__fprintf_section(struct section *self, FILE *fp) | ||
117 | { | ||
118 | return fprintf(fp, "name:%s vma:%llx path:%s\n", | ||
119 | self->name, self->vma, self->path); | ||
120 | } | ||
121 | |||
122 | size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp) | ||
123 | { | ||
124 | size_t ret = fprintf(fp, "dso: %s\n", self->name); | ||
125 | |||
126 | struct rb_node *nd; | ||
127 | for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) { | ||
128 | struct section *pos = rb_entry(nd, struct section, rb_node); | ||
129 | ret += sec_dso__fprintf_section(pos, fp); | ||
130 | } | ||
131 | |||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static struct section *section__new(const char *name, const char *path) | ||
136 | { | ||
137 | struct section *self = calloc(1, sizeof(*self)); | ||
138 | |||
139 | if (!self) | ||
140 | goto out_failure; | ||
141 | |||
142 | self->name = calloc(1, strlen(name) + 1); | ||
143 | if (!self->name) | ||
144 | goto out_failure; | ||
145 | |||
146 | self->path = calloc(1, strlen(path) + 1); | ||
147 | if (!self->path) | ||
148 | goto out_failure; | ||
149 | |||
150 | strcpy(self->name, name); | ||
151 | strcpy(self->path, path); | ||
152 | self->hash = crc32(self->name, strlen(name)); | ||
153 | |||
154 | return self; | ||
155 | |||
156 | out_failure: | ||
157 | if (self) { | ||
158 | if (self->name) | ||
159 | free(self->name); | ||
160 | if (self->path) | ||
161 | free(self->path); | ||
162 | free(self); | ||
163 | } | ||
164 | |||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | /* module methods */ | ||
169 | |||
170 | struct mod_dso *mod_dso__new_dso(const char *name) | ||
171 | { | ||
172 | struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1); | ||
173 | |||
174 | if (self != NULL) { | ||
175 | strcpy(self->name, name); | ||
176 | self->mods = RB_ROOT; | ||
177 | self->find_module = mod_dso__find_module; | ||
178 | } | ||
179 | |||
180 | return self; | ||
181 | } | ||
182 | |||
183 | static void mod_dso__delete_module(struct module *self) | ||
184 | { | ||
185 | free(((void *)self)); | ||
186 | } | ||
187 | |||
188 | void mod_dso__delete_modules(struct mod_dso *self) | ||
189 | { | ||
190 | struct module *pos; | ||
191 | struct rb_node *next = rb_first(&self->mods); | ||
192 | |||
193 | while (next) { | ||
194 | pos = rb_entry(next, struct module, rb_node); | ||
195 | next = rb_next(&pos->rb_node); | ||
196 | rb_erase(&pos->rb_node, &self->mods); | ||
197 | mod_dso__delete_module(pos); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | void mod_dso__delete_self(struct mod_dso *self) | ||
202 | { | ||
203 | mod_dso__delete_modules(self); | ||
204 | free(self); | ||
205 | } | ||
206 | |||
207 | static void mod_dso__insert_module(struct mod_dso *self, struct module *mod) | ||
208 | { | ||
209 | struct rb_node **p = &self->mods.rb_node; | ||
210 | struct rb_node *parent = NULL; | ||
211 | const u64 hash = mod->hash; | ||
212 | struct module *m; | ||
213 | |||
214 | while (*p != NULL) { | ||
215 | parent = *p; | ||
216 | m = rb_entry(parent, struct module, rb_node); | ||
217 | if (hash < m->hash) | ||
218 | p = &(*p)->rb_left; | ||
219 | else | ||
220 | p = &(*p)->rb_right; | ||
221 | } | ||
222 | rb_link_node(&mod->rb_node, parent, p); | ||
223 | rb_insert_color(&mod->rb_node, &self->mods); | ||
224 | } | ||
225 | |||
226 | struct module *mod_dso__find_module(struct mod_dso *self, const char *name) | ||
227 | { | ||
228 | struct rb_node *n; | ||
229 | u64 hash; | ||
230 | int len; | ||
231 | |||
232 | if (self == NULL) | ||
233 | return NULL; | ||
234 | |||
235 | len = strlen(name); | ||
236 | hash = crc32(name, len); | ||
237 | |||
238 | n = self->mods.rb_node; | ||
239 | |||
240 | while (n) { | ||
241 | struct module *m = rb_entry(n, struct module, rb_node); | ||
242 | |||
243 | if (hash < m->hash) | ||
244 | n = n->rb_left; | ||
245 | else if (hash > m->hash) | ||
246 | n = n->rb_right; | ||
247 | else { | ||
248 | if (!strcmp(name, m->name)) | ||
249 | return m; | ||
250 | else | ||
251 | n = rb_next(&m->rb_node); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | return NULL; | ||
256 | } | ||
257 | |||
258 | static size_t mod_dso__fprintf_module(struct module *self, FILE *fp) | ||
259 | { | ||
260 | return fprintf(fp, "name:%s path:%s\n", self->name, self->path); | ||
261 | } | ||
262 | |||
263 | size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp) | ||
264 | { | ||
265 | struct rb_node *nd; | ||
266 | size_t ret; | ||
267 | |||
268 | ret = fprintf(fp, "dso: %s\n", self->name); | ||
269 | |||
270 | for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) { | ||
271 | struct module *pos = rb_entry(nd, struct module, rb_node); | ||
272 | |||
273 | ret += mod_dso__fprintf_module(pos, fp); | ||
274 | } | ||
275 | |||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | static struct module *module__new(const char *name, const char *path) | ||
280 | { | ||
281 | struct module *self = calloc(1, sizeof(*self)); | ||
282 | |||
283 | if (!self) | ||
284 | goto out_failure; | ||
285 | |||
286 | self->name = calloc(1, strlen(name) + 1); | ||
287 | if (!self->name) | ||
288 | goto out_failure; | ||
289 | |||
290 | self->path = calloc(1, strlen(path) + 1); | ||
291 | if (!self->path) | ||
292 | goto out_failure; | ||
293 | |||
294 | strcpy(self->name, name); | ||
295 | strcpy(self->path, path); | ||
296 | self->hash = crc32(self->name, strlen(name)); | ||
297 | |||
298 | return self; | ||
299 | |||
300 | out_failure: | ||
301 | if (self) { | ||
302 | if (self->name) | ||
303 | free(self->name); | ||
304 | if (self->path) | ||
305 | free(self->path); | ||
306 | free(self); | ||
307 | } | ||
308 | |||
309 | return NULL; | ||
310 | } | ||
311 | |||
312 | static int mod_dso__load_sections(struct module *mod) | ||
313 | { | ||
314 | int count = 0, path_len; | ||
315 | struct dirent *entry; | ||
316 | char *line = NULL; | ||
317 | char *dir_path; | ||
318 | DIR *dir; | ||
319 | size_t n; | ||
320 | |||
321 | path_len = strlen("/sys/module/"); | ||
322 | path_len += strlen(mod->name); | ||
323 | path_len += strlen("/sections/"); | ||
324 | |||
325 | dir_path = calloc(1, path_len + 1); | ||
326 | if (dir_path == NULL) | ||
327 | goto out_failure; | ||
328 | |||
329 | strcat(dir_path, "/sys/module/"); | ||
330 | strcat(dir_path, mod->name); | ||
331 | strcat(dir_path, "/sections/"); | ||
332 | |||
333 | dir = opendir(dir_path); | ||
334 | if (dir == NULL) | ||
335 | goto out_free; | ||
336 | |||
337 | while ((entry = readdir(dir))) { | ||
338 | struct section *section; | ||
339 | char *path, *vma; | ||
340 | int line_len; | ||
341 | FILE *file; | ||
342 | |||
343 | if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name)) | ||
344 | continue; | ||
345 | |||
346 | path = calloc(1, path_len + strlen(entry->d_name) + 1); | ||
347 | if (path == NULL) | ||
348 | break; | ||
349 | strcat(path, dir_path); | ||
350 | strcat(path, entry->d_name); | ||
351 | |||
352 | file = fopen(path, "r"); | ||
353 | if (file == NULL) { | ||
354 | free(path); | ||
355 | break; | ||
356 | } | ||
357 | |||
358 | line_len = getline(&line, &n, file); | ||
359 | if (line_len < 0) { | ||
360 | free(path); | ||
361 | fclose(file); | ||
362 | break; | ||
363 | } | ||
364 | |||
365 | if (!line) { | ||
366 | free(path); | ||
367 | fclose(file); | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | line[--line_len] = '\0'; /* \n */ | ||
372 | |||
373 | vma = strstr(line, "0x"); | ||
374 | if (!vma) { | ||
375 | free(path); | ||
376 | fclose(file); | ||
377 | break; | ||
378 | } | ||
379 | vma += 2; | ||
380 | |||
381 | section = section__new(entry->d_name, path); | ||
382 | if (!section) { | ||
383 | fprintf(stderr, "load_sections: allocation error\n"); | ||
384 | free(path); | ||
385 | fclose(file); | ||
386 | break; | ||
387 | } | ||
388 | |||
389 | hex2u64(vma, §ion->vma); | ||
390 | sec_dso__insert_section(mod->sections, section); | ||
391 | |||
392 | free(path); | ||
393 | fclose(file); | ||
394 | count++; | ||
395 | } | ||
396 | |||
397 | closedir(dir); | ||
398 | free(line); | ||
399 | free(dir_path); | ||
400 | |||
401 | return count; | ||
402 | |||
403 | out_free: | ||
404 | free(dir_path); | ||
405 | |||
406 | out_failure: | ||
407 | return count; | ||
408 | } | ||
409 | |||
410 | static int mod_dso__load_module_paths(struct mod_dso *self) | ||
411 | { | ||
412 | struct utsname uts; | ||
413 | int count = 0, len, err = -1; | ||
414 | char *line = NULL; | ||
415 | FILE *file; | ||
416 | char *dpath, *dir; | ||
417 | size_t n; | ||
418 | |||
419 | if (uname(&uts) < 0) | ||
420 | return err; | ||
421 | |||
422 | len = strlen("/lib/modules/"); | ||
423 | len += strlen(uts.release); | ||
424 | len += strlen("/modules.dep"); | ||
425 | |||
426 | dpath = calloc(1, len + 1); | ||
427 | if (dpath == NULL) | ||
428 | return err; | ||
429 | |||
430 | strcat(dpath, "/lib/modules/"); | ||
431 | strcat(dpath, uts.release); | ||
432 | strcat(dpath, "/modules.dep"); | ||
433 | |||
434 | file = fopen(dpath, "r"); | ||
435 | if (file == NULL) | ||
436 | goto out_failure; | ||
437 | |||
438 | dir = dirname(dpath); | ||
439 | if (!dir) | ||
440 | goto out_failure; | ||
441 | strcat(dir, "/"); | ||
442 | |||
443 | while (!feof(file)) { | ||
444 | struct module *module; | ||
445 | char *name, *path, *tmp; | ||
446 | FILE *modfile; | ||
447 | int line_len; | ||
448 | |||
449 | line_len = getline(&line, &n, file); | ||
450 | if (line_len < 0) | ||
451 | break; | ||
452 | |||
453 | if (!line) | ||
454 | break; | ||
455 | |||
456 | line[--line_len] = '\0'; /* \n */ | ||
457 | |||
458 | path = strchr(line, ':'); | ||
459 | if (!path) | ||
460 | break; | ||
461 | *path = '\0'; | ||
462 | |||
463 | path = strdup(line); | ||
464 | if (!path) | ||
465 | break; | ||
466 | |||
467 | if (!strstr(path, dir)) { | ||
468 | if (strncmp(path, "kernel/", 7)) | ||
469 | break; | ||
470 | |||
471 | free(path); | ||
472 | path = calloc(1, strlen(dir) + strlen(line) + 1); | ||
473 | if (!path) | ||
474 | break; | ||
475 | strcat(path, dir); | ||
476 | strcat(path, line); | ||
477 | } | ||
478 | |||
479 | modfile = fopen(path, "r"); | ||
480 | if (modfile == NULL) | ||
481 | break; | ||
482 | fclose(modfile); | ||
483 | |||
484 | name = strdup(path); | ||
485 | if (!name) | ||
486 | break; | ||
487 | |||
488 | name = strtok(name, "/"); | ||
489 | tmp = name; | ||
490 | |||
491 | while (tmp) { | ||
492 | tmp = strtok(NULL, "/"); | ||
493 | if (tmp) | ||
494 | name = tmp; | ||
495 | } | ||
496 | |||
497 | name = strsep(&name, "."); | ||
498 | if (!name) | ||
499 | break; | ||
500 | |||
501 | /* Quirk: replace '-' with '_' in all modules */ | ||
502 | for (len = strlen(name); len; len--) { | ||
503 | if (*(name+len) == '-') | ||
504 | *(name+len) = '_'; | ||
505 | } | ||
506 | |||
507 | module = module__new(name, path); | ||
508 | if (!module) | ||
509 | break; | ||
510 | mod_dso__insert_module(self, module); | ||
511 | |||
512 | module->sections = sec_dso__new_dso("sections"); | ||
513 | if (!module->sections) | ||
514 | break; | ||
515 | |||
516 | module->active = mod_dso__load_sections(module); | ||
517 | |||
518 | if (module->active > 0) | ||
519 | count++; | ||
520 | } | ||
521 | |||
522 | if (feof(file)) | ||
523 | err = count; | ||
524 | else | ||
525 | fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n"); | ||
526 | |||
527 | out_failure: | ||
528 | if (dpath) | ||
529 | free(dpath); | ||
530 | if (file) | ||
531 | fclose(file); | ||
532 | if (line) | ||
533 | free(line); | ||
534 | |||
535 | return err; | ||
536 | } | ||
537 | |||
538 | int mod_dso__load_modules(struct mod_dso *dso) | ||
539 | { | ||
540 | int err; | ||
541 | |||
542 | err = mod_dso__load_module_paths(dso); | ||
543 | |||
544 | return err; | ||
545 | } | ||
diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h deleted file mode 100644 index 8a592ef641ca..000000000000 --- a/tools/perf/util/module.h +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | #ifndef _PERF_MODULE_ | ||
2 | #define _PERF_MODULE_ 1 | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include "../types.h" | ||
6 | #include <linux/list.h> | ||
7 | #include <linux/rbtree.h> | ||
8 | |||
9 | struct section { | ||
10 | struct rb_node rb_node; | ||
11 | u64 hash; | ||
12 | u64 vma; | ||
13 | char *name; | ||
14 | char *path; | ||
15 | }; | ||
16 | |||
17 | struct sec_dso { | ||
18 | struct list_head node; | ||
19 | struct rb_root secs; | ||
20 | struct section *(*find_section)(struct sec_dso *, const char *name); | ||
21 | char name[0]; | ||
22 | }; | ||
23 | |||
24 | struct module { | ||
25 | struct rb_node rb_node; | ||
26 | u64 hash; | ||
27 | char *name; | ||
28 | char *path; | ||
29 | struct sec_dso *sections; | ||
30 | int active; | ||
31 | }; | ||
32 | |||
33 | struct mod_dso { | ||
34 | struct list_head node; | ||
35 | struct rb_root mods; | ||
36 | struct module *(*find_module)(struct mod_dso *, const char *name); | ||
37 | char name[0]; | ||
38 | }; | ||
39 | |||
40 | struct sec_dso *sec_dso__new_dso(const char *name); | ||
41 | void sec_dso__delete_sections(struct sec_dso *self); | ||
42 | void sec_dso__delete_self(struct sec_dso *self); | ||
43 | size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp); | ||
44 | struct section *sec_dso__find_section(struct sec_dso *self, const char *name); | ||
45 | |||
46 | struct mod_dso *mod_dso__new_dso(const char *name); | ||
47 | void mod_dso__delete_modules(struct mod_dso *self); | ||
48 | void mod_dso__delete_self(struct mod_dso *self); | ||
49 | size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); | ||
50 | struct module *mod_dso__find_module(struct mod_dso *self, const char *name); | ||
51 | int mod_dso__load_modules(struct mod_dso *dso); | ||
52 | |||
53 | #endif /* _PERF_MODULE_ */ | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8cfb48cbbea0..9e5dbd66d34d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -1,4 +1,4 @@ | |||
1 | 1 | #include "../../../include/linux/hw_breakpoint.h" | |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include "../perf.h" | 3 | #include "../perf.h" |
4 | #include "parse-options.h" | 4 | #include "parse-options.h" |
@@ -7,10 +7,12 @@ | |||
7 | #include "string.h" | 7 | #include "string.h" |
8 | #include "cache.h" | 8 | #include "cache.h" |
9 | #include "header.h" | 9 | #include "header.h" |
10 | #include "debugfs.h" | ||
10 | 11 | ||
11 | int nr_counters; | 12 | int nr_counters; |
12 | 13 | ||
13 | struct perf_event_attr attrs[MAX_COUNTERS]; | 14 | struct perf_event_attr attrs[MAX_COUNTERS]; |
15 | char *filters[MAX_COUNTERS]; | ||
14 | 16 | ||
15 | struct event_symbol { | 17 | struct event_symbol { |
16 | u8 type; | 18 | u8 type; |
@@ -46,6 +48,8 @@ static struct event_symbol event_symbols[] = { | |||
46 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, | 48 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, |
47 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, | 49 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, |
48 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, | 50 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, |
51 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | ||
52 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | ||
49 | }; | 53 | }; |
50 | 54 | ||
51 | #define __PERF_EVENT_FIELD(config, name) \ | 55 | #define __PERF_EVENT_FIELD(config, name) \ |
@@ -74,6 +78,8 @@ static const char *sw_event_names[] = { | |||
74 | "CPU-migrations", | 78 | "CPU-migrations", |
75 | "minor-faults", | 79 | "minor-faults", |
76 | "major-faults", | 80 | "major-faults", |
81 | "alignment-faults", | ||
82 | "emulation-faults", | ||
77 | }; | 83 | }; |
78 | 84 | ||
79 | #define MAX_ALIASES 8 | 85 | #define MAX_ALIASES 8 |
@@ -148,16 +154,6 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) | |||
148 | 154 | ||
149 | #define MAX_EVENT_LENGTH 512 | 155 | #define MAX_EVENT_LENGTH 512 |
150 | 156 | ||
151 | int valid_debugfs_mount(const char *debugfs) | ||
152 | { | ||
153 | struct statfs st_fs; | ||
154 | |||
155 | if (statfs(debugfs, &st_fs) < 0) | ||
156 | return -ENOENT; | ||
157 | else if (st_fs.f_type != (long) DEBUGFS_MAGIC) | ||
158 | return -ENOENT; | ||
159 | return 0; | ||
160 | } | ||
161 | 157 | ||
162 | struct tracepoint_path *tracepoint_id_to_path(u64 config) | 158 | struct tracepoint_path *tracepoint_id_to_path(u64 config) |
163 | { | 159 | { |
@@ -170,7 +166,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) | |||
170 | char evt_path[MAXPATHLEN]; | 166 | char evt_path[MAXPATHLEN]; |
171 | char dir_path[MAXPATHLEN]; | 167 | char dir_path[MAXPATHLEN]; |
172 | 168 | ||
173 | if (valid_debugfs_mount(debugfs_path)) | 169 | if (debugfs_valid_mountpoint(debugfs_path)) |
174 | return NULL; | 170 | return NULL; |
175 | 171 | ||
176 | sys_dir = opendir(debugfs_path); | 172 | sys_dir = opendir(debugfs_path); |
@@ -201,7 +197,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) | |||
201 | if (id == config) { | 197 | if (id == config) { |
202 | closedir(evt_dir); | 198 | closedir(evt_dir); |
203 | closedir(sys_dir); | 199 | closedir(sys_dir); |
204 | path = calloc(1, sizeof(path)); | 200 | path = zalloc(sizeof(path)); |
205 | path->system = malloc(MAX_EVENT_LENGTH); | 201 | path->system = malloc(MAX_EVENT_LENGTH); |
206 | if (!path->system) { | 202 | if (!path->system) { |
207 | free(path); | 203 | free(path); |
@@ -509,7 +505,7 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
509 | char sys_name[MAX_EVENT_LENGTH]; | 505 | char sys_name[MAX_EVENT_LENGTH]; |
510 | unsigned int sys_length, evt_length; | 506 | unsigned int sys_length, evt_length; |
511 | 507 | ||
512 | if (valid_debugfs_mount(debugfs_path)) | 508 | if (debugfs_valid_mountpoint(debugfs_path)) |
513 | return 0; | 509 | return 0; |
514 | 510 | ||
515 | evt_name = strchr(*strp, ':'); | 511 | evt_name = strchr(*strp, ':'); |
@@ -544,6 +540,81 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
544 | attr, strp); | 540 | attr, strp); |
545 | } | 541 | } |
546 | 542 | ||
543 | static enum event_result | ||
544 | parse_breakpoint_type(const char *type, const char **strp, | ||
545 | struct perf_event_attr *attr) | ||
546 | { | ||
547 | int i; | ||
548 | |||
549 | for (i = 0; i < 3; i++) { | ||
550 | if (!type[i]) | ||
551 | break; | ||
552 | |||
553 | switch (type[i]) { | ||
554 | case 'r': | ||
555 | attr->bp_type |= HW_BREAKPOINT_R; | ||
556 | break; | ||
557 | case 'w': | ||
558 | attr->bp_type |= HW_BREAKPOINT_W; | ||
559 | break; | ||
560 | case 'x': | ||
561 | attr->bp_type |= HW_BREAKPOINT_X; | ||
562 | break; | ||
563 | default: | ||
564 | return EVT_FAILED; | ||
565 | } | ||
566 | } | ||
567 | if (!attr->bp_type) /* Default */ | ||
568 | attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; | ||
569 | |||
570 | *strp = type + i; | ||
571 | |||
572 | return EVT_HANDLED; | ||
573 | } | ||
574 | |||
575 | static enum event_result | ||
576 | parse_breakpoint_event(const char **strp, struct perf_event_attr *attr) | ||
577 | { | ||
578 | const char *target; | ||
579 | const char *type; | ||
580 | char *endaddr; | ||
581 | u64 addr; | ||
582 | enum event_result err; | ||
583 | |||
584 | target = strchr(*strp, ':'); | ||
585 | if (!target) | ||
586 | return EVT_FAILED; | ||
587 | |||
588 | if (strncmp(*strp, "mem", target - *strp) != 0) | ||
589 | return EVT_FAILED; | ||
590 | |||
591 | target++; | ||
592 | |||
593 | addr = strtoull(target, &endaddr, 0); | ||
594 | if (target == endaddr) | ||
595 | return EVT_FAILED; | ||
596 | |||
597 | attr->bp_addr = addr; | ||
598 | *strp = endaddr; | ||
599 | |||
600 | type = strchr(target, ':'); | ||
601 | |||
602 | /* If no type is defined, just rw as default */ | ||
603 | if (!type) { | ||
604 | attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; | ||
605 | } else { | ||
606 | err = parse_breakpoint_type(++type, strp, attr); | ||
607 | if (err == EVT_FAILED) | ||
608 | return EVT_FAILED; | ||
609 | } | ||
610 | |||
611 | /* We should find a nice way to override the access type */ | ||
612 | attr->bp_len = HW_BREAKPOINT_LEN_4; | ||
613 | attr->type = PERF_TYPE_BREAKPOINT; | ||
614 | |||
615 | return EVT_HANDLED; | ||
616 | } | ||
617 | |||
547 | static int check_events(const char *str, unsigned int i) | 618 | static int check_events(const char *str, unsigned int i) |
548 | { | 619 | { |
549 | int n; | 620 | int n; |
@@ -677,6 +748,12 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr) | |||
677 | if (ret != EVT_FAILED) | 748 | if (ret != EVT_FAILED) |
678 | goto modifier; | 749 | goto modifier; |
679 | 750 | ||
751 | ret = parse_breakpoint_event(str, attr); | ||
752 | if (ret != EVT_FAILED) | ||
753 | goto modifier; | ||
754 | |||
755 | fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); | ||
756 | fprintf(stderr, "Run 'perf list' for a list of valid events\n"); | ||
680 | return EVT_FAILED; | 757 | return EVT_FAILED; |
681 | 758 | ||
682 | modifier: | 759 | modifier: |
@@ -708,7 +785,6 @@ static void store_event_type(const char *orgname) | |||
708 | perf_header__push_event(id, orgname); | 785 | perf_header__push_event(id, orgname); |
709 | } | 786 | } |
710 | 787 | ||
711 | |||
712 | int parse_events(const struct option *opt __used, const char *str, int unset __used) | 788 | int parse_events(const struct option *opt __used, const char *str, int unset __used) |
713 | { | 789 | { |
714 | struct perf_event_attr attr; | 790 | struct perf_event_attr attr; |
@@ -745,6 +821,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
745 | return 0; | 821 | return 0; |
746 | } | 822 | } |
747 | 823 | ||
824 | int parse_filter(const struct option *opt __used, const char *str, | ||
825 | int unset __used) | ||
826 | { | ||
827 | int i = nr_counters - 1; | ||
828 | int len = strlen(str); | ||
829 | |||
830 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { | ||
831 | fprintf(stderr, | ||
832 | "-F option should follow a -e tracepoint option\n"); | ||
833 | return -1; | ||
834 | } | ||
835 | |||
836 | filters[i] = malloc(len + 1); | ||
837 | if (!filters[i]) { | ||
838 | fprintf(stderr, "not enough memory to hold filter string\n"); | ||
839 | return -1; | ||
840 | } | ||
841 | strcpy(filters[i], str); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
748 | static const char * const event_type_descriptors[] = { | 846 | static const char * const event_type_descriptors[] = { |
749 | "", | 847 | "", |
750 | "Hardware event", | 848 | "Hardware event", |
@@ -764,7 +862,7 @@ static void print_tracepoint_events(void) | |||
764 | char evt_path[MAXPATHLEN]; | 862 | char evt_path[MAXPATHLEN]; |
765 | char dir_path[MAXPATHLEN]; | 863 | char dir_path[MAXPATHLEN]; |
766 | 864 | ||
767 | if (valid_debugfs_mount(debugfs_path)) | 865 | if (debugfs_valid_mountpoint(debugfs_path)) |
768 | return; | 866 | return; |
769 | 867 | ||
770 | sys_dir = opendir(debugfs_path); | 868 | sys_dir = opendir(debugfs_path); |
@@ -782,7 +880,7 @@ static void print_tracepoint_events(void) | |||
782 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | 880 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { |
783 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 881 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
784 | sys_dirent.d_name, evt_dirent.d_name); | 882 | sys_dirent.d_name, evt_dirent.d_name); |
785 | fprintf(stderr, " %-42s [%s]\n", evt_path, | 883 | printf(" %-42s [%s]\n", evt_path, |
786 | event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); | 884 | event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); |
787 | } | 885 | } |
788 | closedir(evt_dir); | 886 | closedir(evt_dir); |
@@ -799,8 +897,8 @@ void print_events(void) | |||
799 | unsigned int i, type, op, prev_type = -1; | 897 | unsigned int i, type, op, prev_type = -1; |
800 | char name[40]; | 898 | char name[40]; |
801 | 899 | ||
802 | fprintf(stderr, "\n"); | 900 | printf("\n"); |
803 | fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); | 901 | printf("List of pre-defined events (to be used in -e):\n"); |
804 | 902 | ||
805 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | 903 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { |
806 | type = syms->type + 1; | 904 | type = syms->type + 1; |
@@ -808,19 +906,19 @@ void print_events(void) | |||
808 | type = 0; | 906 | type = 0; |
809 | 907 | ||
810 | if (type != prev_type) | 908 | if (type != prev_type) |
811 | fprintf(stderr, "\n"); | 909 | printf("\n"); |
812 | 910 | ||
813 | if (strlen(syms->alias)) | 911 | if (strlen(syms->alias)) |
814 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | 912 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); |
815 | else | 913 | else |
816 | strcpy(name, syms->symbol); | 914 | strcpy(name, syms->symbol); |
817 | fprintf(stderr, " %-42s [%s]\n", name, | 915 | printf(" %-42s [%s]\n", name, |
818 | event_type_descriptors[type]); | 916 | event_type_descriptors[type]); |
819 | 917 | ||
820 | prev_type = type; | 918 | prev_type = type; |
821 | } | 919 | } |
822 | 920 | ||
823 | fprintf(stderr, "\n"); | 921 | printf("\n"); |
824 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | 922 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { |
825 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | 923 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { |
826 | /* skip invalid cache type */ | 924 | /* skip invalid cache type */ |
@@ -828,17 +926,20 @@ void print_events(void) | |||
828 | continue; | 926 | continue; |
829 | 927 | ||
830 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | 928 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { |
831 | fprintf(stderr, " %-42s [%s]\n", | 929 | printf(" %-42s [%s]\n", |
832 | event_cache_name(type, op, i), | 930 | event_cache_name(type, op, i), |
833 | event_type_descriptors[4]); | 931 | event_type_descriptors[4]); |
834 | } | 932 | } |
835 | } | 933 | } |
836 | } | 934 | } |
837 | 935 | ||
838 | fprintf(stderr, "\n"); | 936 | printf("\n"); |
839 | fprintf(stderr, " %-42s [raw hardware event descriptor]\n", | 937 | printf(" %-42s [raw hardware event descriptor]\n", |
840 | "rNNN"); | 938 | "rNNN"); |
841 | fprintf(stderr, "\n"); | 939 | printf("\n"); |
940 | |||
941 | printf(" %-42s [hardware breakpoint]\n", "mem:<addr>[:access]"); | ||
942 | printf("\n"); | ||
842 | 943 | ||
843 | print_tracepoint_events(); | 944 | print_tracepoint_events(); |
844 | 945 | ||
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 30c608112845..b8c1f64bc935 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _PARSE_EVENTS_H | 1 | #ifndef __PERF_PARSE_EVENTS_H |
2 | #define _PARSE_EVENTS_H | 2 | #define __PERF_PARSE_EVENTS_H |
3 | /* | 3 | /* |
4 | * Parse symbolic events/counts passed in as options: | 4 | * Parse symbolic events/counts passed in as options: |
5 | */ | 5 | */ |
@@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | |||
17 | extern int nr_counters; | 17 | extern int nr_counters; |
18 | 18 | ||
19 | extern struct perf_event_attr attrs[MAX_COUNTERS]; | 19 | extern struct perf_event_attr attrs[MAX_COUNTERS]; |
20 | extern char *filters[MAX_COUNTERS]; | ||
20 | 21 | ||
21 | extern const char *event_name(int ctr); | 22 | extern const char *event_name(int ctr); |
22 | extern const char *__event_name(int type, u64 config); | 23 | extern const char *__event_name(int type, u64 config); |
23 | 24 | ||
24 | extern int parse_events(const struct option *opt, const char *str, int unset); | 25 | extern int parse_events(const struct option *opt, const char *str, int unset); |
26 | extern int parse_filter(const struct option *opt, const char *str, int unset); | ||
25 | 27 | ||
26 | #define EVENTS_HELP_MAX (128*1024) | 28 | #define EVENTS_HELP_MAX (128*1024) |
27 | 29 | ||
@@ -31,4 +33,4 @@ extern char debugfs_path[]; | |||
31 | extern int valid_debugfs_mount(const char *debugfs); | 33 | extern int valid_debugfs_mount(const char *debugfs); |
32 | 34 | ||
33 | 35 | ||
34 | #endif /* _PARSE_EVENTS_H */ | 36 | #endif /* __PERF_PARSE_EVENTS_H */ |
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index 2ee248ff27e5..948805af43c2 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef PARSE_OPTIONS_H | 1 | #ifndef __PERF_PARSE_OPTIONS_H |
2 | #define PARSE_OPTIONS_H | 2 | #define __PERF_PARSE_OPTIONS_H |
3 | 3 | ||
4 | enum parse_opt_type { | 4 | enum parse_opt_type { |
5 | /* special types */ | 5 | /* special types */ |
@@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int); | |||
174 | 174 | ||
175 | extern const char *parse_options_fix_filename(const char *prefix, const char *file); | 175 | extern const char *parse_options_fix_filename(const char *prefix, const char *file); |
176 | 176 | ||
177 | #endif | 177 | #endif /* __PERF_PARSE_OPTIONS_H */ |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c new file mode 100644 index 000000000000..cd7fbda5e2a5 --- /dev/null +++ b/tools/perf/util/probe-event.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * probe-event.c : perf-probe definition to kprobe_events format converter | ||
3 | * | ||
4 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #define _GNU_SOURCE | ||
23 | #include <sys/utsname.h> | ||
24 | #include <sys/types.h> | ||
25 | #include <sys/stat.h> | ||
26 | #include <fcntl.h> | ||
27 | #include <errno.h> | ||
28 | #include <stdio.h> | ||
29 | #include <unistd.h> | ||
30 | #include <stdlib.h> | ||
31 | #include <string.h> | ||
32 | #include <stdarg.h> | ||
33 | #include <limits.h> | ||
34 | |||
35 | #undef _GNU_SOURCE | ||
36 | #include "event.h" | ||
37 | #include "string.h" | ||
38 | #include "strlist.h" | ||
39 | #include "debug.h" | ||
40 | #include "parse-events.h" /* For debugfs_path */ | ||
41 | #include "probe-event.h" | ||
42 | |||
43 | #define MAX_CMDLEN 256 | ||
44 | #define MAX_PROBE_ARGS 128 | ||
45 | #define PERFPROBE_GROUP "probe" | ||
46 | |||
47 | #define semantic_error(msg ...) die("Semantic error :" msg) | ||
48 | |||
49 | /* If there is no space to write, returns -E2BIG. */ | ||
50 | static int e_snprintf(char *str, size_t size, const char *format, ...) | ||
51 | { | ||
52 | int ret; | ||
53 | va_list ap; | ||
54 | va_start(ap, format); | ||
55 | ret = vsnprintf(str, size, format, ap); | ||
56 | va_end(ap); | ||
57 | if (ret >= (int)size) | ||
58 | ret = -E2BIG; | ||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | /* Parse probepoint definition. */ | ||
63 | static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp) | ||
64 | { | ||
65 | char *ptr, *tmp; | ||
66 | char c, nc = 0; | ||
67 | /* | ||
68 | * <Syntax> | ||
69 | * perf probe SRC:LN | ||
70 | * perf probe FUNC[+OFFS|%return][@SRC] | ||
71 | */ | ||
72 | |||
73 | ptr = strpbrk(arg, ":+@%"); | ||
74 | if (ptr) { | ||
75 | nc = *ptr; | ||
76 | *ptr++ = '\0'; | ||
77 | } | ||
78 | |||
79 | /* Check arg is function or file and copy it */ | ||
80 | if (strchr(arg, '.')) /* File */ | ||
81 | pp->file = strdup(arg); | ||
82 | else /* Function */ | ||
83 | pp->function = strdup(arg); | ||
84 | DIE_IF(pp->file == NULL && pp->function == NULL); | ||
85 | |||
86 | /* Parse other options */ | ||
87 | while (ptr) { | ||
88 | arg = ptr; | ||
89 | c = nc; | ||
90 | ptr = strpbrk(arg, ":+@%"); | ||
91 | if (ptr) { | ||
92 | nc = *ptr; | ||
93 | *ptr++ = '\0'; | ||
94 | } | ||
95 | switch (c) { | ||
96 | case ':': /* Line number */ | ||
97 | pp->line = strtoul(arg, &tmp, 0); | ||
98 | if (*tmp != '\0') | ||
99 | semantic_error("There is non-digit charactor" | ||
100 | " in line number."); | ||
101 | break; | ||
102 | case '+': /* Byte offset from a symbol */ | ||
103 | pp->offset = strtoul(arg, &tmp, 0); | ||
104 | if (*tmp != '\0') | ||
105 | semantic_error("There is non-digit charactor" | ||
106 | " in offset."); | ||
107 | break; | ||
108 | case '@': /* File name */ | ||
109 | if (pp->file) | ||
110 | semantic_error("SRC@SRC is not allowed."); | ||
111 | pp->file = strdup(arg); | ||
112 | DIE_IF(pp->file == NULL); | ||
113 | if (ptr) | ||
114 | semantic_error("@SRC must be the last " | ||
115 | "option."); | ||
116 | break; | ||
117 | case '%': /* Probe places */ | ||
118 | if (strcmp(arg, "return") == 0) { | ||
119 | pp->retprobe = 1; | ||
120 | } else /* Others not supported yet */ | ||
121 | semantic_error("%%%s is not supported.", arg); | ||
122 | break; | ||
123 | default: | ||
124 | DIE_IF("Program has a bug."); | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /* Exclusion check */ | ||
130 | if (pp->line && pp->offset) | ||
131 | semantic_error("Offset can't be used with line number."); | ||
132 | |||
133 | if (!pp->line && pp->file && !pp->function) | ||
134 | semantic_error("File always requires line number."); | ||
135 | |||
136 | if (pp->offset && !pp->function) | ||
137 | semantic_error("Offset requires an entry function."); | ||
138 | |||
139 | if (pp->retprobe && !pp->function) | ||
140 | semantic_error("Return probe requires an entry function."); | ||
141 | |||
142 | if ((pp->offset || pp->line) && pp->retprobe) | ||
143 | semantic_error("Offset/Line can't be used with return probe."); | ||
144 | |||
145 | pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", | ||
146 | pp->function, pp->file, pp->line, pp->offset, pp->retprobe); | ||
147 | } | ||
148 | |||
149 | /* Parse perf-probe event definition */ | ||
150 | int parse_perf_probe_event(const char *str, struct probe_point *pp) | ||
151 | { | ||
152 | char **argv; | ||
153 | int argc, i, need_dwarf = 0; | ||
154 | |||
155 | argv = argv_split(str, &argc); | ||
156 | if (!argv) | ||
157 | die("argv_split failed."); | ||
158 | if (argc > MAX_PROBE_ARGS + 1) | ||
159 | semantic_error("Too many arguments"); | ||
160 | |||
161 | /* Parse probe point */ | ||
162 | parse_perf_probe_probepoint(argv[0], pp); | ||
163 | if (pp->file || pp->line) | ||
164 | need_dwarf = 1; | ||
165 | |||
166 | /* Copy arguments and ensure return probe has no C argument */ | ||
167 | pp->nr_args = argc - 1; | ||
168 | pp->args = zalloc(sizeof(char *) * pp->nr_args); | ||
169 | for (i = 0; i < pp->nr_args; i++) { | ||
170 | pp->args[i] = strdup(argv[i + 1]); | ||
171 | if (!pp->args[i]) | ||
172 | die("Failed to copy argument."); | ||
173 | if (is_c_varname(pp->args[i])) { | ||
174 | if (pp->retprobe) | ||
175 | semantic_error("You can't specify local" | ||
176 | " variable for kretprobe"); | ||
177 | need_dwarf = 1; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | argv_free(argv); | ||
182 | return need_dwarf; | ||
183 | } | ||
184 | |||
185 | /* Parse kprobe_events event into struct probe_point */ | ||
186 | void parse_trace_kprobe_event(const char *str, char **group, char **event, | ||
187 | struct probe_point *pp) | ||
188 | { | ||
189 | char pr; | ||
190 | char *p; | ||
191 | int ret, i, argc; | ||
192 | char **argv; | ||
193 | |||
194 | pr_debug("Parsing kprobe_events: %s\n", str); | ||
195 | argv = argv_split(str, &argc); | ||
196 | if (!argv) | ||
197 | die("argv_split failed."); | ||
198 | if (argc < 2) | ||
199 | semantic_error("Too less arguments."); | ||
200 | |||
201 | /* Scan event and group name. */ | ||
202 | ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", | ||
203 | &pr, (float *)(void *)group, (float *)(void *)event); | ||
204 | if (ret != 3) | ||
205 | semantic_error("Failed to parse event name: %s", argv[0]); | ||
206 | pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr); | ||
207 | |||
208 | if (!pp) | ||
209 | goto end; | ||
210 | |||
211 | pp->retprobe = (pr == 'r'); | ||
212 | |||
213 | /* Scan function name and offset */ | ||
214 | ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset); | ||
215 | if (ret == 1) | ||
216 | pp->offset = 0; | ||
217 | |||
218 | /* kprobe_events doesn't have this information */ | ||
219 | pp->line = 0; | ||
220 | pp->file = NULL; | ||
221 | |||
222 | pp->nr_args = argc - 2; | ||
223 | pp->args = zalloc(sizeof(char *) * pp->nr_args); | ||
224 | for (i = 0; i < pp->nr_args; i++) { | ||
225 | p = strchr(argv[i + 2], '='); | ||
226 | if (p) /* We don't need which register is assigned. */ | ||
227 | *p = '\0'; | ||
228 | pp->args[i] = strdup(argv[i + 2]); | ||
229 | if (!pp->args[i]) | ||
230 | die("Failed to copy argument."); | ||
231 | } | ||
232 | |||
233 | end: | ||
234 | argv_free(argv); | ||
235 | } | ||
236 | |||
237 | int synthesize_perf_probe_event(struct probe_point *pp) | ||
238 | { | ||
239 | char *buf; | ||
240 | char offs[64] = "", line[64] = ""; | ||
241 | int i, len, ret; | ||
242 | |||
243 | pp->probes[0] = buf = zalloc(MAX_CMDLEN); | ||
244 | if (!buf) | ||
245 | die("Failed to allocate memory by zalloc."); | ||
246 | if (pp->offset) { | ||
247 | ret = e_snprintf(offs, 64, "+%d", pp->offset); | ||
248 | if (ret <= 0) | ||
249 | goto error; | ||
250 | } | ||
251 | if (pp->line) { | ||
252 | ret = e_snprintf(line, 64, ":%d", pp->line); | ||
253 | if (ret <= 0) | ||
254 | goto error; | ||
255 | } | ||
256 | |||
257 | if (pp->function) | ||
258 | ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function, | ||
259 | offs, pp->retprobe ? "%return" : "", line); | ||
260 | else | ||
261 | ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->file, line); | ||
262 | if (ret <= 0) | ||
263 | goto error; | ||
264 | len = ret; | ||
265 | |||
266 | for (i = 0; i < pp->nr_args; i++) { | ||
267 | ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", | ||
268 | pp->args[i]); | ||
269 | if (ret <= 0) | ||
270 | goto error; | ||
271 | len += ret; | ||
272 | } | ||
273 | pp->found = 1; | ||
274 | |||
275 | return pp->found; | ||
276 | error: | ||
277 | free(pp->probes[0]); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | int synthesize_trace_kprobe_event(struct probe_point *pp) | ||
283 | { | ||
284 | char *buf; | ||
285 | int i, len, ret; | ||
286 | |||
287 | pp->probes[0] = buf = zalloc(MAX_CMDLEN); | ||
288 | if (!buf) | ||
289 | die("Failed to allocate memory by zalloc."); | ||
290 | ret = e_snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); | ||
291 | if (ret <= 0) | ||
292 | goto error; | ||
293 | len = ret; | ||
294 | |||
295 | for (i = 0; i < pp->nr_args; i++) { | ||
296 | ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", | ||
297 | pp->args[i]); | ||
298 | if (ret <= 0) | ||
299 | goto error; | ||
300 | len += ret; | ||
301 | } | ||
302 | pp->found = 1; | ||
303 | |||
304 | return pp->found; | ||
305 | error: | ||
306 | free(pp->probes[0]); | ||
307 | |||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | static int open_kprobe_events(int flags, int mode) | ||
312 | { | ||
313 | char buf[PATH_MAX]; | ||
314 | int ret; | ||
315 | |||
316 | ret = e_snprintf(buf, PATH_MAX, "%s/../kprobe_events", debugfs_path); | ||
317 | if (ret < 0) | ||
318 | die("Failed to make kprobe_events path."); | ||
319 | |||
320 | ret = open(buf, flags, mode); | ||
321 | if (ret < 0) { | ||
322 | if (errno == ENOENT) | ||
323 | die("kprobe_events file does not exist -" | ||
324 | " please rebuild with CONFIG_KPROBE_TRACER."); | ||
325 | else | ||
326 | die("Could not open kprobe_events file: %s", | ||
327 | strerror(errno)); | ||
328 | } | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | /* Get raw string list of current kprobe_events */ | ||
333 | static struct strlist *get_trace_kprobe_event_rawlist(int fd) | ||
334 | { | ||
335 | int ret, idx; | ||
336 | FILE *fp; | ||
337 | char buf[MAX_CMDLEN]; | ||
338 | char *p; | ||
339 | struct strlist *sl; | ||
340 | |||
341 | sl = strlist__new(true, NULL); | ||
342 | |||
343 | fp = fdopen(dup(fd), "r"); | ||
344 | while (!feof(fp)) { | ||
345 | p = fgets(buf, MAX_CMDLEN, fp); | ||
346 | if (!p) | ||
347 | break; | ||
348 | |||
349 | idx = strlen(p) - 1; | ||
350 | if (p[idx] == '\n') | ||
351 | p[idx] = '\0'; | ||
352 | ret = strlist__add(sl, buf); | ||
353 | if (ret < 0) | ||
354 | die("strlist__add failed: %s", strerror(-ret)); | ||
355 | } | ||
356 | fclose(fp); | ||
357 | |||
358 | return sl; | ||
359 | } | ||
360 | |||
361 | /* Free and zero clear probe_point */ | ||
362 | static void clear_probe_point(struct probe_point *pp) | ||
363 | { | ||
364 | int i; | ||
365 | |||
366 | if (pp->function) | ||
367 | free(pp->function); | ||
368 | if (pp->file) | ||
369 | free(pp->file); | ||
370 | for (i = 0; i < pp->nr_args; i++) | ||
371 | free(pp->args[i]); | ||
372 | if (pp->args) | ||
373 | free(pp->args); | ||
374 | for (i = 0; i < pp->found; i++) | ||
375 | free(pp->probes[i]); | ||
376 | memset(pp, 0, sizeof(pp)); | ||
377 | } | ||
378 | |||
379 | /* List up current perf-probe events */ | ||
380 | void show_perf_probe_events(void) | ||
381 | { | ||
382 | unsigned int i; | ||
383 | int fd; | ||
384 | char *group, *event; | ||
385 | struct probe_point pp; | ||
386 | struct strlist *rawlist; | ||
387 | struct str_node *ent; | ||
388 | |||
389 | fd = open_kprobe_events(O_RDONLY, 0); | ||
390 | rawlist = get_trace_kprobe_event_rawlist(fd); | ||
391 | close(fd); | ||
392 | |||
393 | for (i = 0; i < strlist__nr_entries(rawlist); i++) { | ||
394 | ent = strlist__entry(rawlist, i); | ||
395 | parse_trace_kprobe_event(ent->s, &group, &event, &pp); | ||
396 | synthesize_perf_probe_event(&pp); | ||
397 | printf("[%s:%s]\t%s\n", group, event, pp.probes[0]); | ||
398 | free(group); | ||
399 | free(event); | ||
400 | clear_probe_point(&pp); | ||
401 | } | ||
402 | |||
403 | strlist__delete(rawlist); | ||
404 | } | ||
405 | |||
406 | /* Get current perf-probe event names */ | ||
407 | static struct strlist *get_perf_event_names(int fd) | ||
408 | { | ||
409 | unsigned int i; | ||
410 | char *group, *event; | ||
411 | struct strlist *sl, *rawlist; | ||
412 | struct str_node *ent; | ||
413 | |||
414 | rawlist = get_trace_kprobe_event_rawlist(fd); | ||
415 | |||
416 | sl = strlist__new(false, NULL); | ||
417 | for (i = 0; i < strlist__nr_entries(rawlist); i++) { | ||
418 | ent = strlist__entry(rawlist, i); | ||
419 | parse_trace_kprobe_event(ent->s, &group, &event, NULL); | ||
420 | strlist__add(sl, event); | ||
421 | free(group); | ||
422 | } | ||
423 | |||
424 | strlist__delete(rawlist); | ||
425 | |||
426 | return sl; | ||
427 | } | ||
428 | |||
429 | static int write_trace_kprobe_event(int fd, const char *buf) | ||
430 | { | ||
431 | int ret; | ||
432 | |||
433 | ret = write(fd, buf, strlen(buf)); | ||
434 | if (ret <= 0) | ||
435 | die("Failed to create event."); | ||
436 | else | ||
437 | printf("Added new event: %s\n", buf); | ||
438 | |||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static void get_new_event_name(char *buf, size_t len, const char *base, | ||
443 | struct strlist *namelist) | ||
444 | { | ||
445 | int i, ret; | ||
446 | for (i = 0; i < MAX_EVENT_INDEX; i++) { | ||
447 | ret = e_snprintf(buf, len, "%s_%d", base, i); | ||
448 | if (ret < 0) | ||
449 | die("snprintf() failed: %s", strerror(-ret)); | ||
450 | if (!strlist__has_entry(namelist, buf)) | ||
451 | break; | ||
452 | } | ||
453 | if (i == MAX_EVENT_INDEX) | ||
454 | die("Too many events are on the same function."); | ||
455 | } | ||
456 | |||
457 | void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) | ||
458 | { | ||
459 | int i, j, fd; | ||
460 | struct probe_point *pp; | ||
461 | char buf[MAX_CMDLEN]; | ||
462 | char event[64]; | ||
463 | struct strlist *namelist; | ||
464 | |||
465 | fd = open_kprobe_events(O_RDWR, O_APPEND); | ||
466 | /* Get current event names */ | ||
467 | namelist = get_perf_event_names(fd); | ||
468 | |||
469 | for (j = 0; j < nr_probes; j++) { | ||
470 | pp = probes + j; | ||
471 | for (i = 0; i < pp->found; i++) { | ||
472 | /* Get an unused new event name */ | ||
473 | get_new_event_name(event, 64, pp->function, namelist); | ||
474 | snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", | ||
475 | pp->retprobe ? 'r' : 'p', | ||
476 | PERFPROBE_GROUP, event, | ||
477 | pp->probes[i]); | ||
478 | write_trace_kprobe_event(fd, buf); | ||
479 | /* Add added event name to namelist */ | ||
480 | strlist__add(namelist, event); | ||
481 | } | ||
482 | } | ||
483 | close(fd); | ||
484 | } | ||
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h new file mode 100644 index 000000000000..0c6fe56fe38a --- /dev/null +++ b/tools/perf/util/probe-event.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _PROBE_EVENT_H | ||
2 | #define _PROBE_EVENT_H | ||
3 | |||
4 | #include "probe-finder.h" | ||
5 | #include "strlist.h" | ||
6 | |||
7 | extern int parse_perf_probe_event(const char *str, struct probe_point *pp); | ||
8 | extern int synthesize_perf_probe_event(struct probe_point *pp); | ||
9 | extern void parse_trace_kprobe_event(const char *str, char **group, | ||
10 | char **event, struct probe_point *pp); | ||
11 | extern int synthesize_trace_kprobe_event(struct probe_point *pp); | ||
12 | extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); | ||
13 | extern void show_perf_probe_events(void); | ||
14 | |||
15 | /* Maximum index number of event-name postfix */ | ||
16 | #define MAX_EVENT_INDEX 1024 | ||
17 | |||
18 | #endif /*_PROBE_EVENT_H */ | ||
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c new file mode 100644 index 000000000000..293cdfc1b8ca --- /dev/null +++ b/tools/perf/util/probe-finder.c | |||
@@ -0,0 +1,732 @@ | |||
1 | /* | ||
2 | * probe-finder.c : C expression to kprobe event converter | ||
3 | * | ||
4 | * Written by Masami Hiramatsu <mhiramat@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <sys/utsname.h> | ||
23 | #include <sys/types.h> | ||
24 | #include <sys/stat.h> | ||
25 | #include <fcntl.h> | ||
26 | #include <errno.h> | ||
27 | #include <stdio.h> | ||
28 | #include <unistd.h> | ||
29 | #include <getopt.h> | ||
30 | #include <stdlib.h> | ||
31 | #include <string.h> | ||
32 | #include <stdarg.h> | ||
33 | #include <ctype.h> | ||
34 | |||
35 | #include "event.h" | ||
36 | #include "debug.h" | ||
37 | #include "util.h" | ||
38 | #include "probe-finder.h" | ||
39 | |||
40 | |||
41 | /* Dwarf_Die Linkage to parent Die */ | ||
42 | struct die_link { | ||
43 | struct die_link *parent; /* Parent die */ | ||
44 | Dwarf_Die die; /* Current die */ | ||
45 | }; | ||
46 | |||
47 | static Dwarf_Debug __dw_debug; | ||
48 | static Dwarf_Error __dw_error; | ||
49 | |||
50 | /* | ||
51 | * Generic dwarf analysis helpers | ||
52 | */ | ||
53 | |||
54 | #define X86_32_MAX_REGS 8 | ||
55 | const char *x86_32_regs_table[X86_32_MAX_REGS] = { | ||
56 | "%ax", | ||
57 | "%cx", | ||
58 | "%dx", | ||
59 | "%bx", | ||
60 | "$stack", /* Stack address instead of %sp */ | ||
61 | "%bp", | ||
62 | "%si", | ||
63 | "%di", | ||
64 | }; | ||
65 | |||
66 | #define X86_64_MAX_REGS 16 | ||
67 | const char *x86_64_regs_table[X86_64_MAX_REGS] = { | ||
68 | "%ax", | ||
69 | "%dx", | ||
70 | "%cx", | ||
71 | "%bx", | ||
72 | "%si", | ||
73 | "%di", | ||
74 | "%bp", | ||
75 | "%sp", | ||
76 | "%r8", | ||
77 | "%r9", | ||
78 | "%r10", | ||
79 | "%r11", | ||
80 | "%r12", | ||
81 | "%r13", | ||
82 | "%r14", | ||
83 | "%r15", | ||
84 | }; | ||
85 | |||
86 | /* TODO: switching by dwarf address size */ | ||
87 | #ifdef __x86_64__ | ||
88 | #define ARCH_MAX_REGS X86_64_MAX_REGS | ||
89 | #define arch_regs_table x86_64_regs_table | ||
90 | #else | ||
91 | #define ARCH_MAX_REGS X86_32_MAX_REGS | ||
92 | #define arch_regs_table x86_32_regs_table | ||
93 | #endif | ||
94 | |||
95 | /* Return architecture dependent register string (for kprobe-tracer) */ | ||
96 | static const char *get_arch_regstr(unsigned int n) | ||
97 | { | ||
98 | return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Compare the tail of two strings. | ||
103 | * Return 0 if whole of either string is same as another's tail part. | ||
104 | */ | ||
105 | static int strtailcmp(const char *s1, const char *s2) | ||
106 | { | ||
107 | int i1 = strlen(s1); | ||
108 | int i2 = strlen(s2); | ||
109 | while (--i1 > 0 && --i2 > 0) { | ||
110 | if (s1[i1] != s2[i2]) | ||
111 | return s1[i1] - s2[i2]; | ||
112 | } | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* Find the fileno of the target file. */ | ||
117 | static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname) | ||
118 | { | ||
119 | Dwarf_Signed cnt, i; | ||
120 | Dwarf_Unsigned found = 0; | ||
121 | char **srcs; | ||
122 | int ret; | ||
123 | |||
124 | if (!fname) | ||
125 | return 0; | ||
126 | |||
127 | ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error); | ||
128 | if (ret == DW_DLV_OK) { | ||
129 | for (i = 0; i < cnt && !found; i++) { | ||
130 | if (strtailcmp(srcs[i], fname) == 0) | ||
131 | found = i + 1; | ||
132 | dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); | ||
133 | } | ||
134 | for (; i < cnt; i++) | ||
135 | dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); | ||
136 | dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST); | ||
137 | } | ||
138 | if (found) | ||
139 | pr_debug("found fno: %d\n", (int)found); | ||
140 | return found; | ||
141 | } | ||
142 | |||
143 | /* Compare diename and tname */ | ||
144 | static int die_compare_name(Dwarf_Die dw_die, const char *tname) | ||
145 | { | ||
146 | char *name; | ||
147 | int ret; | ||
148 | ret = dwarf_diename(dw_die, &name, &__dw_error); | ||
149 | DIE_IF(ret == DW_DLV_ERROR); | ||
150 | if (ret == DW_DLV_OK) { | ||
151 | ret = strcmp(tname, name); | ||
152 | dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); | ||
153 | } else | ||
154 | ret = -1; | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | /* Check the address is in the subprogram(function). */ | ||
159 | static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr, | ||
160 | Dwarf_Signed *offs) | ||
161 | { | ||
162 | Dwarf_Addr lopc, hipc; | ||
163 | int ret; | ||
164 | |||
165 | /* TODO: check ranges */ | ||
166 | ret = dwarf_lowpc(sp_die, &lopc, &__dw_error); | ||
167 | DIE_IF(ret == DW_DLV_ERROR); | ||
168 | if (ret == DW_DLV_NO_ENTRY) | ||
169 | return 0; | ||
170 | ret = dwarf_highpc(sp_die, &hipc, &__dw_error); | ||
171 | DIE_IF(ret != DW_DLV_OK); | ||
172 | if (lopc <= addr && addr < hipc) { | ||
173 | *offs = addr - lopc; | ||
174 | return 1; | ||
175 | } else | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | /* Check the die is inlined function */ | ||
180 | static Dwarf_Bool die_inlined_subprogram(Dwarf_Die dw_die) | ||
181 | { | ||
182 | /* TODO: check strictly */ | ||
183 | Dwarf_Bool inl; | ||
184 | int ret; | ||
185 | |||
186 | ret = dwarf_hasattr(dw_die, DW_AT_inline, &inl, &__dw_error); | ||
187 | DIE_IF(ret == DW_DLV_ERROR); | ||
188 | return inl; | ||
189 | } | ||
190 | |||
191 | /* Get the offset of abstruct_origin */ | ||
192 | static Dwarf_Off die_get_abstract_origin(Dwarf_Die dw_die) | ||
193 | { | ||
194 | Dwarf_Attribute attr; | ||
195 | Dwarf_Off cu_offs; | ||
196 | int ret; | ||
197 | |||
198 | ret = dwarf_attr(dw_die, DW_AT_abstract_origin, &attr, &__dw_error); | ||
199 | DIE_IF(ret != DW_DLV_OK); | ||
200 | ret = dwarf_formref(attr, &cu_offs, &__dw_error); | ||
201 | DIE_IF(ret != DW_DLV_OK); | ||
202 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
203 | return cu_offs; | ||
204 | } | ||
205 | |||
206 | /* Get entry pc(or low pc, 1st entry of ranges) of the die */ | ||
207 | static Dwarf_Addr die_get_entrypc(Dwarf_Die dw_die) | ||
208 | { | ||
209 | Dwarf_Attribute attr; | ||
210 | Dwarf_Addr addr; | ||
211 | Dwarf_Off offs; | ||
212 | Dwarf_Ranges *ranges; | ||
213 | Dwarf_Signed cnt; | ||
214 | int ret; | ||
215 | |||
216 | /* Try to get entry pc */ | ||
217 | ret = dwarf_attr(dw_die, DW_AT_entry_pc, &attr, &__dw_error); | ||
218 | DIE_IF(ret == DW_DLV_ERROR); | ||
219 | if (ret == DW_DLV_OK) { | ||
220 | ret = dwarf_formaddr(attr, &addr, &__dw_error); | ||
221 | DIE_IF(ret != DW_DLV_OK); | ||
222 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
223 | return addr; | ||
224 | } | ||
225 | |||
226 | /* Try to get low pc */ | ||
227 | ret = dwarf_lowpc(dw_die, &addr, &__dw_error); | ||
228 | DIE_IF(ret == DW_DLV_ERROR); | ||
229 | if (ret == DW_DLV_OK) | ||
230 | return addr; | ||
231 | |||
232 | /* Try to get ranges */ | ||
233 | ret = dwarf_attr(dw_die, DW_AT_ranges, &attr, &__dw_error); | ||
234 | DIE_IF(ret != DW_DLV_OK); | ||
235 | ret = dwarf_formref(attr, &offs, &__dw_error); | ||
236 | DIE_IF(ret != DW_DLV_OK); | ||
237 | ret = dwarf_get_ranges(__dw_debug, offs, &ranges, &cnt, NULL, | ||
238 | &__dw_error); | ||
239 | DIE_IF(ret != DW_DLV_OK); | ||
240 | addr = ranges[0].dwr_addr1; | ||
241 | dwarf_ranges_dealloc(__dw_debug, ranges, cnt); | ||
242 | return addr; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Search a Die from Die tree. | ||
247 | * Note: cur_link->die should be deallocated in this function. | ||
248 | */ | ||
249 | static int __search_die_tree(struct die_link *cur_link, | ||
250 | int (*die_cb)(struct die_link *, void *), | ||
251 | void *data) | ||
252 | { | ||
253 | Dwarf_Die new_die; | ||
254 | struct die_link new_link; | ||
255 | int ret; | ||
256 | |||
257 | if (!die_cb) | ||
258 | return 0; | ||
259 | |||
260 | /* Check current die */ | ||
261 | while (!(ret = die_cb(cur_link, data))) { | ||
262 | /* Check child die */ | ||
263 | ret = dwarf_child(cur_link->die, &new_die, &__dw_error); | ||
264 | DIE_IF(ret == DW_DLV_ERROR); | ||
265 | if (ret == DW_DLV_OK) { | ||
266 | new_link.parent = cur_link; | ||
267 | new_link.die = new_die; | ||
268 | ret = __search_die_tree(&new_link, die_cb, data); | ||
269 | if (ret) | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | /* Move to next sibling */ | ||
274 | ret = dwarf_siblingof(__dw_debug, cur_link->die, &new_die, | ||
275 | &__dw_error); | ||
276 | DIE_IF(ret == DW_DLV_ERROR); | ||
277 | dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE); | ||
278 | cur_link->die = new_die; | ||
279 | if (ret == DW_DLV_NO_ENTRY) | ||
280 | return 0; | ||
281 | } | ||
282 | dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | /* Search a die in its children's die tree */ | ||
287 | static int search_die_from_children(Dwarf_Die parent_die, | ||
288 | int (*die_cb)(struct die_link *, void *), | ||
289 | void *data) | ||
290 | { | ||
291 | struct die_link new_link; | ||
292 | int ret; | ||
293 | |||
294 | new_link.parent = NULL; | ||
295 | ret = dwarf_child(parent_die, &new_link.die, &__dw_error); | ||
296 | DIE_IF(ret == DW_DLV_ERROR); | ||
297 | if (ret == DW_DLV_OK) | ||
298 | return __search_die_tree(&new_link, die_cb, data); | ||
299 | else | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | /* Find a locdesc corresponding to the address */ | ||
304 | static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, | ||
305 | Dwarf_Addr addr) | ||
306 | { | ||
307 | Dwarf_Signed lcnt; | ||
308 | Dwarf_Locdesc **llbuf; | ||
309 | int ret, i; | ||
310 | |||
311 | ret = dwarf_loclist_n(attr, &llbuf, &lcnt, &__dw_error); | ||
312 | DIE_IF(ret != DW_DLV_OK); | ||
313 | ret = DW_DLV_NO_ENTRY; | ||
314 | for (i = 0; i < lcnt; ++i) { | ||
315 | if (llbuf[i]->ld_lopc <= addr && | ||
316 | llbuf[i]->ld_hipc > addr) { | ||
317 | memcpy(desc, llbuf[i], sizeof(Dwarf_Locdesc)); | ||
318 | desc->ld_s = | ||
319 | malloc(sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); | ||
320 | DIE_IF(desc->ld_s == NULL); | ||
321 | memcpy(desc->ld_s, llbuf[i]->ld_s, | ||
322 | sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); | ||
323 | ret = DW_DLV_OK; | ||
324 | break; | ||
325 | } | ||
326 | dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK); | ||
327 | dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC); | ||
328 | } | ||
329 | /* Releasing loop */ | ||
330 | for (; i < lcnt; ++i) { | ||
331 | dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK); | ||
332 | dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC); | ||
333 | } | ||
334 | dwarf_dealloc(__dw_debug, llbuf, DW_DLA_LIST); | ||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | /* Get decl_file attribute value (file number) */ | ||
339 | static Dwarf_Unsigned die_get_decl_file(Dwarf_Die sp_die) | ||
340 | { | ||
341 | Dwarf_Attribute attr; | ||
342 | Dwarf_Unsigned fno; | ||
343 | int ret; | ||
344 | |||
345 | ret = dwarf_attr(sp_die, DW_AT_decl_file, &attr, &__dw_error); | ||
346 | DIE_IF(ret != DW_DLV_OK); | ||
347 | dwarf_formudata(attr, &fno, &__dw_error); | ||
348 | DIE_IF(ret != DW_DLV_OK); | ||
349 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
350 | return fno; | ||
351 | } | ||
352 | |||
353 | /* Get decl_line attribute value (line number) */ | ||
354 | static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die) | ||
355 | { | ||
356 | Dwarf_Attribute attr; | ||
357 | Dwarf_Unsigned lno; | ||
358 | int ret; | ||
359 | |||
360 | ret = dwarf_attr(sp_die, DW_AT_decl_line, &attr, &__dw_error); | ||
361 | DIE_IF(ret != DW_DLV_OK); | ||
362 | dwarf_formudata(attr, &lno, &__dw_error); | ||
363 | DIE_IF(ret != DW_DLV_OK); | ||
364 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
365 | return lno; | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Probe finder related functions | ||
370 | */ | ||
371 | |||
372 | /* Show a location */ | ||
373 | static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) | ||
374 | { | ||
375 | Dwarf_Small op; | ||
376 | Dwarf_Unsigned regn; | ||
377 | Dwarf_Signed offs; | ||
378 | int deref = 0, ret; | ||
379 | const char *regs; | ||
380 | |||
381 | op = loc->lr_atom; | ||
382 | |||
383 | /* If this is based on frame buffer, set the offset */ | ||
384 | if (op == DW_OP_fbreg) { | ||
385 | deref = 1; | ||
386 | offs = (Dwarf_Signed)loc->lr_number; | ||
387 | op = pf->fbloc.ld_s[0].lr_atom; | ||
388 | loc = &pf->fbloc.ld_s[0]; | ||
389 | } else | ||
390 | offs = 0; | ||
391 | |||
392 | if (op >= DW_OP_breg0 && op <= DW_OP_breg31) { | ||
393 | regn = op - DW_OP_breg0; | ||
394 | offs += (Dwarf_Signed)loc->lr_number; | ||
395 | deref = 1; | ||
396 | } else if (op >= DW_OP_reg0 && op <= DW_OP_reg31) { | ||
397 | regn = op - DW_OP_reg0; | ||
398 | } else if (op == DW_OP_bregx) { | ||
399 | regn = loc->lr_number; | ||
400 | offs += (Dwarf_Signed)loc->lr_number2; | ||
401 | deref = 1; | ||
402 | } else if (op == DW_OP_regx) { | ||
403 | regn = loc->lr_number; | ||
404 | } else | ||
405 | die("Dwarf_OP %d is not supported.\n", op); | ||
406 | |||
407 | regs = get_arch_regstr(regn); | ||
408 | if (!regs) | ||
409 | die("%lld exceeds max register number.\n", regn); | ||
410 | |||
411 | if (deref) | ||
412 | ret = snprintf(pf->buf, pf->len, | ||
413 | " %s=%+lld(%s)", pf->var, offs, regs); | ||
414 | else | ||
415 | ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs); | ||
416 | DIE_IF(ret < 0); | ||
417 | DIE_IF(ret >= pf->len); | ||
418 | } | ||
419 | |||
420 | /* Show a variables in kprobe event format */ | ||
421 | static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) | ||
422 | { | ||
423 | Dwarf_Attribute attr; | ||
424 | Dwarf_Locdesc ld; | ||
425 | int ret; | ||
426 | |||
427 | ret = dwarf_attr(vr_die, DW_AT_location, &attr, &__dw_error); | ||
428 | if (ret != DW_DLV_OK) | ||
429 | goto error; | ||
430 | ret = attr_get_locdesc(attr, &ld, (pf->addr - pf->cu_base)); | ||
431 | if (ret != DW_DLV_OK) | ||
432 | goto error; | ||
433 | /* TODO? */ | ||
434 | DIE_IF(ld.ld_cents != 1); | ||
435 | show_location(&ld.ld_s[0], pf); | ||
436 | free(ld.ld_s); | ||
437 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
438 | return ; | ||
439 | error: | ||
440 | die("Failed to find the location of %s at this address.\n" | ||
441 | " Perhaps, it has been optimized out.\n", pf->var); | ||
442 | } | ||
443 | |||
444 | static int variable_callback(struct die_link *dlink, void *data) | ||
445 | { | ||
446 | struct probe_finder *pf = (struct probe_finder *)data; | ||
447 | Dwarf_Half tag; | ||
448 | int ret; | ||
449 | |||
450 | ret = dwarf_tag(dlink->die, &tag, &__dw_error); | ||
451 | DIE_IF(ret == DW_DLV_ERROR); | ||
452 | if ((tag == DW_TAG_formal_parameter || | ||
453 | tag == DW_TAG_variable) && | ||
454 | (die_compare_name(dlink->die, pf->var) == 0)) { | ||
455 | show_variable(dlink->die, pf); | ||
456 | return 1; | ||
457 | } | ||
458 | /* TODO: Support struct members and arrays */ | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | /* Find a variable in a subprogram die */ | ||
463 | static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) | ||
464 | { | ||
465 | int ret; | ||
466 | |||
467 | if (!is_c_varname(pf->var)) { | ||
468 | /* Output raw parameters */ | ||
469 | ret = snprintf(pf->buf, pf->len, " %s", pf->var); | ||
470 | DIE_IF(ret < 0); | ||
471 | DIE_IF(ret >= pf->len); | ||
472 | return ; | ||
473 | } | ||
474 | |||
475 | pr_debug("Searching '%s' variable in context.\n", pf->var); | ||
476 | /* Search child die for local variables and parameters. */ | ||
477 | ret = search_die_from_children(sp_die, variable_callback, pf); | ||
478 | if (!ret) | ||
479 | die("Failed to find '%s' in this function.\n", pf->var); | ||
480 | } | ||
481 | |||
482 | /* Get a frame base on the address */ | ||
483 | static void get_current_frame_base(Dwarf_Die sp_die, struct probe_finder *pf) | ||
484 | { | ||
485 | Dwarf_Attribute attr; | ||
486 | int ret; | ||
487 | |||
488 | ret = dwarf_attr(sp_die, DW_AT_frame_base, &attr, &__dw_error); | ||
489 | DIE_IF(ret != DW_DLV_OK); | ||
490 | ret = attr_get_locdesc(attr, &pf->fbloc, (pf->addr - pf->cu_base)); | ||
491 | DIE_IF(ret != DW_DLV_OK); | ||
492 | dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); | ||
493 | } | ||
494 | |||
495 | static void free_current_frame_base(struct probe_finder *pf) | ||
496 | { | ||
497 | free(pf->fbloc.ld_s); | ||
498 | memset(&pf->fbloc, 0, sizeof(Dwarf_Locdesc)); | ||
499 | } | ||
500 | |||
501 | /* Show a probe point to output buffer */ | ||
502 | static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, | ||
503 | struct probe_finder *pf) | ||
504 | { | ||
505 | struct probe_point *pp = pf->pp; | ||
506 | char *name; | ||
507 | char tmp[MAX_PROBE_BUFFER]; | ||
508 | int ret, i, len; | ||
509 | |||
510 | /* Output name of probe point */ | ||
511 | ret = dwarf_diename(sp_die, &name, &__dw_error); | ||
512 | DIE_IF(ret == DW_DLV_ERROR); | ||
513 | if (ret == DW_DLV_OK) { | ||
514 | ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name, | ||
515 | (unsigned int)offs); | ||
516 | /* Copy the function name if possible */ | ||
517 | if (!pp->function) { | ||
518 | pp->function = strdup(name); | ||
519 | pp->offset = offs; | ||
520 | } | ||
521 | dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); | ||
522 | } else { | ||
523 | /* This function has no name. */ | ||
524 | ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr); | ||
525 | if (!pp->function) { | ||
526 | /* TODO: Use _stext */ | ||
527 | pp->function = strdup(""); | ||
528 | pp->offset = (int)pf->addr; | ||
529 | } | ||
530 | } | ||
531 | DIE_IF(ret < 0); | ||
532 | DIE_IF(ret >= MAX_PROBE_BUFFER); | ||
533 | len = ret; | ||
534 | pr_debug("Probe point found: %s\n", tmp); | ||
535 | |||
536 | /* Find each argument */ | ||
537 | get_current_frame_base(sp_die, pf); | ||
538 | for (i = 0; i < pp->nr_args; i++) { | ||
539 | pf->var = pp->args[i]; | ||
540 | pf->buf = &tmp[len]; | ||
541 | pf->len = MAX_PROBE_BUFFER - len; | ||
542 | find_variable(sp_die, pf); | ||
543 | len += strlen(pf->buf); | ||
544 | } | ||
545 | free_current_frame_base(pf); | ||
546 | |||
547 | pp->probes[pp->found] = strdup(tmp); | ||
548 | pp->found++; | ||
549 | } | ||
550 | |||
551 | static int probeaddr_callback(struct die_link *dlink, void *data) | ||
552 | { | ||
553 | struct probe_finder *pf = (struct probe_finder *)data; | ||
554 | Dwarf_Half tag; | ||
555 | Dwarf_Signed offs; | ||
556 | int ret; | ||
557 | |||
558 | ret = dwarf_tag(dlink->die, &tag, &__dw_error); | ||
559 | DIE_IF(ret == DW_DLV_ERROR); | ||
560 | /* Check the address is in this subprogram */ | ||
561 | if (tag == DW_TAG_subprogram && | ||
562 | die_within_subprogram(dlink->die, pf->addr, &offs)) { | ||
563 | show_probepoint(dlink->die, offs, pf); | ||
564 | return 1; | ||
565 | } | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | /* Find probe point from its line number */ | ||
570 | static void find_by_line(struct probe_finder *pf) | ||
571 | { | ||
572 | Dwarf_Signed cnt, i, clm; | ||
573 | Dwarf_Line *lines; | ||
574 | Dwarf_Unsigned lineno = 0; | ||
575 | Dwarf_Addr addr; | ||
576 | Dwarf_Unsigned fno; | ||
577 | int ret; | ||
578 | |||
579 | ret = dwarf_srclines(pf->cu_die, &lines, &cnt, &__dw_error); | ||
580 | DIE_IF(ret != DW_DLV_OK); | ||
581 | |||
582 | for (i = 0; i < cnt; i++) { | ||
583 | ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); | ||
584 | DIE_IF(ret != DW_DLV_OK); | ||
585 | if (fno != pf->fno) | ||
586 | continue; | ||
587 | |||
588 | ret = dwarf_lineno(lines[i], &lineno, &__dw_error); | ||
589 | DIE_IF(ret != DW_DLV_OK); | ||
590 | if (lineno != pf->lno) | ||
591 | continue; | ||
592 | |||
593 | ret = dwarf_lineoff(lines[i], &clm, &__dw_error); | ||
594 | DIE_IF(ret != DW_DLV_OK); | ||
595 | |||
596 | ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); | ||
597 | DIE_IF(ret != DW_DLV_OK); | ||
598 | pr_debug("Probe line found: line[%d]:%u,%d addr:0x%llx\n", | ||
599 | (int)i, (unsigned)lineno, (int)clm, addr); | ||
600 | pf->addr = addr; | ||
601 | /* Search a real subprogram including this line, */ | ||
602 | ret = search_die_from_children(pf->cu_die, | ||
603 | probeaddr_callback, pf); | ||
604 | if (ret == 0) | ||
605 | die("Probe point is not found in subprograms.\n"); | ||
606 | /* Continuing, because target line might be inlined. */ | ||
607 | } | ||
608 | dwarf_srclines_dealloc(__dw_debug, lines, cnt); | ||
609 | } | ||
610 | |||
611 | /* Search function from function name */ | ||
612 | static int probefunc_callback(struct die_link *dlink, void *data) | ||
613 | { | ||
614 | struct probe_finder *pf = (struct probe_finder *)data; | ||
615 | struct probe_point *pp = pf->pp; | ||
616 | struct die_link *lk; | ||
617 | Dwarf_Signed offs; | ||
618 | Dwarf_Half tag; | ||
619 | int ret; | ||
620 | |||
621 | ret = dwarf_tag(dlink->die, &tag, &__dw_error); | ||
622 | DIE_IF(ret == DW_DLV_ERROR); | ||
623 | if (tag == DW_TAG_subprogram) { | ||
624 | if (die_compare_name(dlink->die, pp->function) == 0) { | ||
625 | if (pp->line) { /* Function relative line */ | ||
626 | pf->fno = die_get_decl_file(dlink->die); | ||
627 | pf->lno = die_get_decl_line(dlink->die) | ||
628 | + pp->line; | ||
629 | find_by_line(pf); | ||
630 | return 1; | ||
631 | } | ||
632 | if (die_inlined_subprogram(dlink->die)) { | ||
633 | /* Inlined function, save it. */ | ||
634 | ret = dwarf_die_CU_offset(dlink->die, | ||
635 | &pf->inl_offs, | ||
636 | &__dw_error); | ||
637 | DIE_IF(ret != DW_DLV_OK); | ||
638 | pr_debug("inline definition offset %lld\n", | ||
639 | pf->inl_offs); | ||
640 | return 0; /* Continue to search */ | ||
641 | } | ||
642 | /* Get probe address */ | ||
643 | pf->addr = die_get_entrypc(dlink->die); | ||
644 | pf->addr += pp->offset; | ||
645 | /* TODO: Check the address in this function */ | ||
646 | show_probepoint(dlink->die, pp->offset, pf); | ||
647 | return 1; /* Exit; no same symbol in this CU. */ | ||
648 | } | ||
649 | } else if (tag == DW_TAG_inlined_subroutine && pf->inl_offs) { | ||
650 | if (die_get_abstract_origin(dlink->die) == pf->inl_offs) { | ||
651 | /* Get probe address */ | ||
652 | pf->addr = die_get_entrypc(dlink->die); | ||
653 | pf->addr += pp->offset; | ||
654 | pr_debug("found inline addr: 0x%llx\n", pf->addr); | ||
655 | /* Inlined function. Get a real subprogram */ | ||
656 | for (lk = dlink->parent; lk != NULL; lk = lk->parent) { | ||
657 | tag = 0; | ||
658 | dwarf_tag(lk->die, &tag, &__dw_error); | ||
659 | DIE_IF(ret == DW_DLV_ERROR); | ||
660 | if (tag == DW_TAG_subprogram && | ||
661 | !die_inlined_subprogram(lk->die)) | ||
662 | goto found; | ||
663 | } | ||
664 | die("Failed to find real subprogram.\n"); | ||
665 | found: | ||
666 | /* Get offset from subprogram */ | ||
667 | ret = die_within_subprogram(lk->die, pf->addr, &offs); | ||
668 | DIE_IF(!ret); | ||
669 | show_probepoint(lk->die, offs, pf); | ||
670 | /* Continue to search */ | ||
671 | } | ||
672 | } | ||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static void find_by_func(struct probe_finder *pf) | ||
677 | { | ||
678 | search_die_from_children(pf->cu_die, probefunc_callback, pf); | ||
679 | } | ||
680 | |||
681 | /* Find a probe point */ | ||
682 | int find_probepoint(int fd, struct probe_point *pp) | ||
683 | { | ||
684 | Dwarf_Half addr_size = 0; | ||
685 | Dwarf_Unsigned next_cuh = 0; | ||
686 | int cu_number = 0, ret; | ||
687 | struct probe_finder pf = {.pp = pp}; | ||
688 | |||
689 | ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); | ||
690 | if (ret != DW_DLV_OK) { | ||
691 | pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); | ||
692 | return -ENOENT; | ||
693 | } | ||
694 | |||
695 | pp->found = 0; | ||
696 | while (++cu_number) { | ||
697 | /* Search CU (Compilation Unit) */ | ||
698 | ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, | ||
699 | &addr_size, &next_cuh, &__dw_error); | ||
700 | DIE_IF(ret == DW_DLV_ERROR); | ||
701 | if (ret == DW_DLV_NO_ENTRY) | ||
702 | break; | ||
703 | |||
704 | /* Get the DIE(Debugging Information Entry) of this CU */ | ||
705 | ret = dwarf_siblingof(__dw_debug, 0, &pf.cu_die, &__dw_error); | ||
706 | DIE_IF(ret != DW_DLV_OK); | ||
707 | |||
708 | /* Check if target file is included. */ | ||
709 | if (pp->file) | ||
710 | pf.fno = cu_find_fileno(pf.cu_die, pp->file); | ||
711 | |||
712 | if (!pp->file || pf.fno) { | ||
713 | /* Save CU base address (for frame_base) */ | ||
714 | ret = dwarf_lowpc(pf.cu_die, &pf.cu_base, &__dw_error); | ||
715 | DIE_IF(ret == DW_DLV_ERROR); | ||
716 | if (ret == DW_DLV_NO_ENTRY) | ||
717 | pf.cu_base = 0; | ||
718 | if (pp->function) | ||
719 | find_by_func(&pf); | ||
720 | else { | ||
721 | pf.lno = pp->line; | ||
722 | find_by_line(&pf); | ||
723 | } | ||
724 | } | ||
725 | dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE); | ||
726 | } | ||
727 | ret = dwarf_finish(__dw_debug, &__dw_error); | ||
728 | DIE_IF(ret != DW_DLV_OK); | ||
729 | |||
730 | return pp->found; | ||
731 | } | ||
732 | |||
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h new file mode 100644 index 000000000000..bdebca6697d2 --- /dev/null +++ b/tools/perf/util/probe-finder.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _PROBE_FINDER_H | ||
2 | #define _PROBE_FINDER_H | ||
3 | |||
4 | #define MAX_PATH_LEN 256 | ||
5 | #define MAX_PROBE_BUFFER 1024 | ||
6 | #define MAX_PROBES 128 | ||
7 | |||
8 | static inline int is_c_varname(const char *name) | ||
9 | { | ||
10 | /* TODO */ | ||
11 | return isalpha(name[0]) || name[0] == '_'; | ||
12 | } | ||
13 | |||
14 | struct probe_point { | ||
15 | /* Inputs */ | ||
16 | char *file; /* File name */ | ||
17 | int line; /* Line number */ | ||
18 | |||
19 | char *function; /* Function name */ | ||
20 | int offset; /* Offset bytes */ | ||
21 | |||
22 | int nr_args; /* Number of arguments */ | ||
23 | char **args; /* Arguments */ | ||
24 | |||
25 | int retprobe; /* Return probe */ | ||
26 | |||
27 | /* Output */ | ||
28 | int found; /* Number of found probe points */ | ||
29 | char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ | ||
30 | }; | ||
31 | |||
32 | #ifndef NO_LIBDWARF | ||
33 | extern int find_probepoint(int fd, struct probe_point *pp); | ||
34 | |||
35 | #include <libdwarf/dwarf.h> | ||
36 | #include <libdwarf/libdwarf.h> | ||
37 | |||
38 | struct probe_finder { | ||
39 | struct probe_point *pp; /* Target probe point */ | ||
40 | |||
41 | /* For function searching */ | ||
42 | Dwarf_Addr addr; /* Address */ | ||
43 | Dwarf_Unsigned fno; /* File number */ | ||
44 | Dwarf_Unsigned lno; /* Line number */ | ||
45 | Dwarf_Off inl_offs; /* Inline offset */ | ||
46 | Dwarf_Die cu_die; /* Current CU */ | ||
47 | |||
48 | /* For variable searching */ | ||
49 | Dwarf_Addr cu_base; /* Current CU base address */ | ||
50 | Dwarf_Locdesc fbloc; /* Location of Current Frame Base */ | ||
51 | const char *var; /* Current variable name */ | ||
52 | char *buf; /* Current output buffer */ | ||
53 | int len; /* Length of output buffer */ | ||
54 | }; | ||
55 | #endif /* NO_LIBDWARF */ | ||
56 | |||
57 | #endif /*_PROBE_FINDER_H */ | ||
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h index a5454a1d1c13..b6a019733919 100644 --- a/tools/perf/util/quote.h +++ b/tools/perf/util/quote.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef QUOTE_H | 1 | #ifndef __PERF_QUOTE_H |
2 | #define QUOTE_H | 2 | #define __PERF_QUOTE_H |
3 | 3 | ||
4 | #include <stddef.h> | 4 | #include <stddef.h> |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
@@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src); | |||
65 | extern void python_quote_print(FILE *stream, const char *src); | 65 | extern void python_quote_print(FILE *stream, const char *src); |
66 | extern void tcl_quote_print(FILE *stream, const char *src); | 66 | extern void tcl_quote_print(FILE *stream, const char *src); |
67 | 67 | ||
68 | #endif | 68 | #endif /* __PERF_QUOTE_H */ |
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h index cc1837deba88..d79028727ce2 100644 --- a/tools/perf/util/run-command.h +++ b/tools/perf/util/run-command.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef RUN_COMMAND_H | 1 | #ifndef __PERF_RUN_COMMAND_H |
2 | #define RUN_COMMAND_H | 2 | #define __PERF_RUN_COMMAND_H |
3 | 3 | ||
4 | enum { | 4 | enum { |
5 | ERR_RUN_COMMAND_FORK = 10000, | 5 | ERR_RUN_COMMAND_FORK = 10000, |
@@ -85,4 +85,4 @@ struct async { | |||
85 | int start_async(struct async *async); | 85 | int start_async(struct async *async); |
86 | int finish_async(struct async *async); | 86 | int finish_async(struct async *async); |
87 | 87 | ||
88 | #endif | 88 | #endif /* __PERF_RUN_COMMAND_H */ |
diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h index 618083bce0c6..1a53c11265fd 100644 --- a/tools/perf/util/sigchain.h +++ b/tools/perf/util/sigchain.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef SIGCHAIN_H | 1 | #ifndef __PERF_SIGCHAIN_H |
2 | #define SIGCHAIN_H | 2 | #define __PERF_SIGCHAIN_H |
3 | 3 | ||
4 | typedef void (*sigchain_fun)(int); | 4 | typedef void (*sigchain_fun)(int); |
5 | 5 | ||
@@ -8,4 +8,4 @@ int sigchain_pop(int sig); | |||
8 | 8 | ||
9 | void sigchain_push_common(sigchain_fun f); | 9 | void sigchain_push_common(sigchain_fun f); |
10 | 10 | ||
11 | #endif /* SIGCHAIN_H */ | 11 | #endif /* __PERF_SIGCHAIN_H */ |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c new file mode 100644 index 000000000000..b490354d1b23 --- /dev/null +++ b/tools/perf/util/sort.c | |||
@@ -0,0 +1,290 @@ | |||
1 | #include "sort.h" | ||
2 | |||
3 | regex_t parent_regex; | ||
4 | char default_parent_pattern[] = "^sys_|^do_page_fault"; | ||
5 | char *parent_pattern = default_parent_pattern; | ||
6 | char default_sort_order[] = "comm,dso,symbol"; | ||
7 | char *sort_order = default_sort_order; | ||
8 | int sort__need_collapse = 0; | ||
9 | int sort__has_parent = 0; | ||
10 | |||
11 | enum sort_type sort__first_dimension; | ||
12 | |||
13 | unsigned int dsos__col_width; | ||
14 | unsigned int comms__col_width; | ||
15 | unsigned int threads__col_width; | ||
16 | static unsigned int parent_symbol__col_width; | ||
17 | char * field_sep; | ||
18 | |||
19 | LIST_HEAD(hist_entry__sort_list); | ||
20 | |||
21 | struct sort_entry sort_thread = { | ||
22 | .header = "Command: Pid", | ||
23 | .cmp = sort__thread_cmp, | ||
24 | .print = sort__thread_print, | ||
25 | .width = &threads__col_width, | ||
26 | }; | ||
27 | |||
28 | struct sort_entry sort_comm = { | ||
29 | .header = "Command", | ||
30 | .cmp = sort__comm_cmp, | ||
31 | .collapse = sort__comm_collapse, | ||
32 | .print = sort__comm_print, | ||
33 | .width = &comms__col_width, | ||
34 | }; | ||
35 | |||
36 | struct sort_entry sort_dso = { | ||
37 | .header = "Shared Object", | ||
38 | .cmp = sort__dso_cmp, | ||
39 | .print = sort__dso_print, | ||
40 | .width = &dsos__col_width, | ||
41 | }; | ||
42 | |||
43 | struct sort_entry sort_sym = { | ||
44 | .header = "Symbol", | ||
45 | .cmp = sort__sym_cmp, | ||
46 | .print = sort__sym_print, | ||
47 | }; | ||
48 | |||
49 | struct sort_entry sort_parent = { | ||
50 | .header = "Parent symbol", | ||
51 | .cmp = sort__parent_cmp, | ||
52 | .print = sort__parent_print, | ||
53 | .width = &parent_symbol__col_width, | ||
54 | }; | ||
55 | |||
56 | struct sort_dimension { | ||
57 | const char *name; | ||
58 | struct sort_entry *entry; | ||
59 | int taken; | ||
60 | }; | ||
61 | |||
62 | static struct sort_dimension sort_dimensions[] = { | ||
63 | { .name = "pid", .entry = &sort_thread, }, | ||
64 | { .name = "comm", .entry = &sort_comm, }, | ||
65 | { .name = "dso", .entry = &sort_dso, }, | ||
66 | { .name = "symbol", .entry = &sort_sym, }, | ||
67 | { .name = "parent", .entry = &sort_parent, }, | ||
68 | }; | ||
69 | |||
70 | int64_t cmp_null(void *l, void *r) | ||
71 | { | ||
72 | if (!l && !r) | ||
73 | return 0; | ||
74 | else if (!l) | ||
75 | return -1; | ||
76 | else | ||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | /* --sort pid */ | ||
81 | |||
82 | int64_t | ||
83 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | ||
84 | { | ||
85 | return right->thread->pid - left->thread->pid; | ||
86 | } | ||
87 | |||
88 | int repsep_fprintf(FILE *fp, const char *fmt, ...) | ||
89 | { | ||
90 | int n; | ||
91 | va_list ap; | ||
92 | |||
93 | va_start(ap, fmt); | ||
94 | if (!field_sep) | ||
95 | n = vfprintf(fp, fmt, ap); | ||
96 | else { | ||
97 | char *bf = NULL; | ||
98 | n = vasprintf(&bf, fmt, ap); | ||
99 | if (n > 0) { | ||
100 | char *sep = bf; | ||
101 | |||
102 | while (1) { | ||
103 | sep = strchr(sep, *field_sep); | ||
104 | if (sep == NULL) | ||
105 | break; | ||
106 | *sep = '.'; | ||
107 | } | ||
108 | } | ||
109 | fputs(bf, fp); | ||
110 | free(bf); | ||
111 | } | ||
112 | va_end(ap); | ||
113 | return n; | ||
114 | } | ||
115 | |||
116 | size_t | ||
117 | sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
118 | { | ||
119 | return repsep_fprintf(fp, "%*s:%5d", width - 6, | ||
120 | self->thread->comm ?: "", self->thread->pid); | ||
121 | } | ||
122 | |||
123 | size_t | ||
124 | sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
125 | { | ||
126 | return repsep_fprintf(fp, "%*s", width, self->thread->comm); | ||
127 | } | ||
128 | |||
129 | /* --sort dso */ | ||
130 | |||
131 | int64_t | ||
132 | sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) | ||
133 | { | ||
134 | struct dso *dso_l = left->map ? left->map->dso : NULL; | ||
135 | struct dso *dso_r = right->map ? right->map->dso : NULL; | ||
136 | const char *dso_name_l, *dso_name_r; | ||
137 | |||
138 | if (!dso_l || !dso_r) | ||
139 | return cmp_null(dso_l, dso_r); | ||
140 | |||
141 | if (verbose) { | ||
142 | dso_name_l = dso_l->long_name; | ||
143 | dso_name_r = dso_r->long_name; | ||
144 | } else { | ||
145 | dso_name_l = dso_l->short_name; | ||
146 | dso_name_r = dso_r->short_name; | ||
147 | } | ||
148 | |||
149 | return strcmp(dso_name_l, dso_name_r); | ||
150 | } | ||
151 | |||
152 | size_t | ||
153 | sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
154 | { | ||
155 | if (self->map && self->map->dso) { | ||
156 | const char *dso_name = !verbose ? self->map->dso->short_name : | ||
157 | self->map->dso->long_name; | ||
158 | return repsep_fprintf(fp, "%-*s", width, dso_name); | ||
159 | } | ||
160 | |||
161 | return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); | ||
162 | } | ||
163 | |||
164 | /* --sort symbol */ | ||
165 | |||
166 | int64_t | ||
167 | sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) | ||
168 | { | ||
169 | u64 ip_l, ip_r; | ||
170 | |||
171 | if (left->sym == right->sym) | ||
172 | return 0; | ||
173 | |||
174 | ip_l = left->sym ? left->sym->start : left->ip; | ||
175 | ip_r = right->sym ? right->sym->start : right->ip; | ||
176 | |||
177 | return (int64_t)(ip_r - ip_l); | ||
178 | } | ||
179 | |||
180 | |||
181 | size_t | ||
182 | sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) | ||
183 | { | ||
184 | size_t ret = 0; | ||
185 | |||
186 | if (verbose) { | ||
187 | char o = self->map ? dso__symtab_origin(self->map->dso) : '!'; | ||
188 | ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o); | ||
189 | } | ||
190 | |||
191 | ret += repsep_fprintf(fp, "[%c] ", self->level); | ||
192 | if (self->sym) | ||
193 | ret += repsep_fprintf(fp, "%s", self->sym->name); | ||
194 | else | ||
195 | ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); | ||
196 | |||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | /* --sort comm */ | ||
201 | |||
202 | int64_t | ||
203 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | ||
204 | { | ||
205 | return right->thread->pid - left->thread->pid; | ||
206 | } | ||
207 | |||
208 | int64_t | ||
209 | sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) | ||
210 | { | ||
211 | char *comm_l = left->thread->comm; | ||
212 | char *comm_r = right->thread->comm; | ||
213 | |||
214 | if (!comm_l || !comm_r) | ||
215 | return cmp_null(comm_l, comm_r); | ||
216 | |||
217 | return strcmp(comm_l, comm_r); | ||
218 | } | ||
219 | |||
220 | /* --sort parent */ | ||
221 | |||
222 | int64_t | ||
223 | sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) | ||
224 | { | ||
225 | struct symbol *sym_l = left->parent; | ||
226 | struct symbol *sym_r = right->parent; | ||
227 | |||
228 | if (!sym_l || !sym_r) | ||
229 | return cmp_null(sym_l, sym_r); | ||
230 | |||
231 | return strcmp(sym_l->name, sym_r->name); | ||
232 | } | ||
233 | |||
234 | size_t | ||
235 | sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) | ||
236 | { | ||
237 | return repsep_fprintf(fp, "%-*s", width, | ||
238 | self->parent ? self->parent->name : "[other]"); | ||
239 | } | ||
240 | |||
241 | int sort_dimension__add(const char *tok) | ||
242 | { | ||
243 | unsigned int i; | ||
244 | |||
245 | for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { | ||
246 | struct sort_dimension *sd = &sort_dimensions[i]; | ||
247 | |||
248 | if (sd->taken) | ||
249 | continue; | ||
250 | |||
251 | if (strncasecmp(tok, sd->name, strlen(tok))) | ||
252 | continue; | ||
253 | |||
254 | if (sd->entry->collapse) | ||
255 | sort__need_collapse = 1; | ||
256 | |||
257 | if (sd->entry == &sort_parent) { | ||
258 | int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); | ||
259 | if (ret) { | ||
260 | char err[BUFSIZ]; | ||
261 | |||
262 | regerror(ret, &parent_regex, err, sizeof(err)); | ||
263 | fprintf(stderr, "Invalid regex: %s\n%s", | ||
264 | parent_pattern, err); | ||
265 | exit(-1); | ||
266 | } | ||
267 | sort__has_parent = 1; | ||
268 | } | ||
269 | |||
270 | if (list_empty(&hist_entry__sort_list)) { | ||
271 | if (!strcmp(sd->name, "pid")) | ||
272 | sort__first_dimension = SORT_PID; | ||
273 | else if (!strcmp(sd->name, "comm")) | ||
274 | sort__first_dimension = SORT_COMM; | ||
275 | else if (!strcmp(sd->name, "dso")) | ||
276 | sort__first_dimension = SORT_DSO; | ||
277 | else if (!strcmp(sd->name, "symbol")) | ||
278 | sort__first_dimension = SORT_SYM; | ||
279 | else if (!strcmp(sd->name, "parent")) | ||
280 | sort__first_dimension = SORT_PARENT; | ||
281 | } | ||
282 | |||
283 | list_add_tail(&sd->entry->list, &hist_entry__sort_list); | ||
284 | sd->taken = 1; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | return -ESRCH; | ||
290 | } | ||
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h new file mode 100644 index 000000000000..333e664ff45f --- /dev/null +++ b/tools/perf/util/sort.h | |||
@@ -0,0 +1,99 @@ | |||
1 | #ifndef __PERF_SORT_H | ||
2 | #define __PERF_SORT_H | ||
3 | #include "../builtin.h" | ||
4 | |||
5 | #include "util.h" | ||
6 | |||
7 | #include "color.h" | ||
8 | #include <linux/list.h> | ||
9 | #include "cache.h" | ||
10 | #include <linux/rbtree.h> | ||
11 | #include "symbol.h" | ||
12 | #include "string.h" | ||
13 | #include "callchain.h" | ||
14 | #include "strlist.h" | ||
15 | #include "values.h" | ||
16 | |||
17 | #include "../perf.h" | ||
18 | #include "debug.h" | ||
19 | #include "header.h" | ||
20 | |||
21 | #include "parse-options.h" | ||
22 | #include "parse-events.h" | ||
23 | |||
24 | #include "thread.h" | ||
25 | #include "sort.h" | ||
26 | |||
27 | extern regex_t parent_regex; | ||
28 | extern char *sort_order; | ||
29 | extern char default_parent_pattern[]; | ||
30 | extern char *parent_pattern; | ||
31 | extern char default_sort_order[]; | ||
32 | extern int sort__need_collapse; | ||
33 | extern int sort__has_parent; | ||
34 | extern char *field_sep; | ||
35 | extern struct sort_entry sort_comm; | ||
36 | extern struct sort_entry sort_dso; | ||
37 | extern struct sort_entry sort_sym; | ||
38 | extern struct sort_entry sort_parent; | ||
39 | extern unsigned int dsos__col_width; | ||
40 | extern unsigned int comms__col_width; | ||
41 | extern unsigned int threads__col_width; | ||
42 | extern enum sort_type sort__first_dimension; | ||
43 | |||
44 | struct hist_entry { | ||
45 | struct rb_node rb_node; | ||
46 | u64 count; | ||
47 | struct thread *thread; | ||
48 | struct map *map; | ||
49 | struct symbol *sym; | ||
50 | u64 ip; | ||
51 | char level; | ||
52 | struct symbol *parent; | ||
53 | struct callchain_node callchain; | ||
54 | struct rb_root sorted_chain; | ||
55 | }; | ||
56 | |||
57 | enum sort_type { | ||
58 | SORT_PID, | ||
59 | SORT_COMM, | ||
60 | SORT_DSO, | ||
61 | SORT_SYM, | ||
62 | SORT_PARENT | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * configurable sorting bits | ||
67 | */ | ||
68 | |||
69 | struct sort_entry { | ||
70 | struct list_head list; | ||
71 | |||
72 | const char *header; | ||
73 | |||
74 | int64_t (*cmp)(struct hist_entry *, struct hist_entry *); | ||
75 | int64_t (*collapse)(struct hist_entry *, struct hist_entry *); | ||
76 | size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); | ||
77 | unsigned int *width; | ||
78 | bool elide; | ||
79 | }; | ||
80 | |||
81 | extern struct sort_entry sort_thread; | ||
82 | extern struct list_head hist_entry__sort_list; | ||
83 | |||
84 | extern int repsep_fprintf(FILE *fp, const char *fmt, ...); | ||
85 | extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); | ||
86 | extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); | ||
87 | extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); | ||
88 | extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used); | ||
89 | extern int64_t cmp_null(void *, void *); | ||
90 | extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *); | ||
91 | extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *); | ||
92 | extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *); | ||
93 | extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *); | ||
94 | extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *); | ||
95 | extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); | ||
96 | extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); | ||
97 | extern int sort_dimension__add(const char *); | ||
98 | |||
99 | #endif /* __PERF_SORT_H */ | ||
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h index d2aa86c014c1..a3d121d6c83e 100644 --- a/tools/perf/util/strbuf.h +++ b/tools/perf/util/strbuf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef STRBUF_H | 1 | #ifndef __PERF_STRBUF_H |
2 | #define STRBUF_H | 2 | #define __PERF_STRBUF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Strbuf's can be use in many ways: as a byte array, or to store arbitrary | 5 | * Strbuf's can be use in many ways: as a byte array, or to store arbitrary |
@@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co | |||
134 | extern int strbuf_branchname(struct strbuf *sb, const char *name); | 134 | extern int strbuf_branchname(struct strbuf *sb, const char *name); |
135 | extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); | 135 | extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); |
136 | 136 | ||
137 | #endif /* STRBUF_H */ | 137 | #endif /* __PERF_STRBUF_H */ |
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index c93eca9a7be3..f24a8cc933d5 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include "string.h" | 1 | #include "string.h" |
2 | #include "util.h" | ||
2 | 3 | ||
3 | static int hex(char ch) | 4 | static int hex(char ch) |
4 | { | 5 | { |
@@ -32,3 +33,196 @@ int hex2u64(const char *ptr, u64 *long_val) | |||
32 | 33 | ||
33 | return p - ptr; | 34 | return p - ptr; |
34 | } | 35 | } |
36 | |||
37 | char *strxfrchar(char *s, char from, char to) | ||
38 | { | ||
39 | char *p = s; | ||
40 | |||
41 | while ((p = strchr(p, from)) != NULL) | ||
42 | *p++ = to; | ||
43 | |||
44 | return s; | ||
45 | } | ||
46 | |||
47 | #define K 1024LL | ||
48 | /* | ||
49 | * perf_atoll() | ||
50 | * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB") | ||
51 | * and return its numeric value | ||
52 | */ | ||
53 | s64 perf_atoll(const char *str) | ||
54 | { | ||
55 | unsigned int i; | ||
56 | s64 length = -1, unit = 1; | ||
57 | |||
58 | if (!isdigit(str[0])) | ||
59 | goto out_err; | ||
60 | |||
61 | for (i = 1; i < strlen(str); i++) { | ||
62 | switch (str[i]) { | ||
63 | case 'B': | ||
64 | case 'b': | ||
65 | break; | ||
66 | case 'K': | ||
67 | if (str[i + 1] != 'B') | ||
68 | goto out_err; | ||
69 | else | ||
70 | goto kilo; | ||
71 | case 'k': | ||
72 | if (str[i + 1] != 'b') | ||
73 | goto out_err; | ||
74 | kilo: | ||
75 | unit = K; | ||
76 | break; | ||
77 | case 'M': | ||
78 | if (str[i + 1] != 'B') | ||
79 | goto out_err; | ||
80 | else | ||
81 | goto mega; | ||
82 | case 'm': | ||
83 | if (str[i + 1] != 'b') | ||
84 | goto out_err; | ||
85 | mega: | ||
86 | unit = K * K; | ||
87 | break; | ||
88 | case 'G': | ||
89 | if (str[i + 1] != 'B') | ||
90 | goto out_err; | ||
91 | else | ||
92 | goto giga; | ||
93 | case 'g': | ||
94 | if (str[i + 1] != 'b') | ||
95 | goto out_err; | ||
96 | giga: | ||
97 | unit = K * K * K; | ||
98 | break; | ||
99 | case 'T': | ||
100 | if (str[i + 1] != 'B') | ||
101 | goto out_err; | ||
102 | else | ||
103 | goto tera; | ||
104 | case 't': | ||
105 | if (str[i + 1] != 'b') | ||
106 | goto out_err; | ||
107 | tera: | ||
108 | unit = K * K * K * K; | ||
109 | break; | ||
110 | case '\0': /* only specified figures */ | ||
111 | unit = 1; | ||
112 | break; | ||
113 | default: | ||
114 | if (!isdigit(str[i])) | ||
115 | goto out_err; | ||
116 | break; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | length = atoll(str) * unit; | ||
121 | goto out; | ||
122 | |||
123 | out_err: | ||
124 | length = -1; | ||
125 | out: | ||
126 | return length; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Helper function for splitting a string into an argv-like array. | ||
131 | * originaly copied from lib/argv_split.c | ||
132 | */ | ||
133 | static const char *skip_sep(const char *cp) | ||
134 | { | ||
135 | while (*cp && isspace(*cp)) | ||
136 | cp++; | ||
137 | |||
138 | return cp; | ||
139 | } | ||
140 | |||
141 | static const char *skip_arg(const char *cp) | ||
142 | { | ||
143 | while (*cp && !isspace(*cp)) | ||
144 | cp++; | ||
145 | |||
146 | return cp; | ||
147 | } | ||
148 | |||
149 | static int count_argc(const char *str) | ||
150 | { | ||
151 | int count = 0; | ||
152 | |||
153 | while (*str) { | ||
154 | str = skip_sep(str); | ||
155 | if (*str) { | ||
156 | count++; | ||
157 | str = skip_arg(str); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | return count; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * argv_free - free an argv | ||
166 | * @argv - the argument vector to be freed | ||
167 | * | ||
168 | * Frees an argv and the strings it points to. | ||
169 | */ | ||
170 | void argv_free(char **argv) | ||
171 | { | ||
172 | char **p; | ||
173 | for (p = argv; *p; p++) | ||
174 | free(*p); | ||
175 | |||
176 | free(argv); | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * argv_split - split a string at whitespace, returning an argv | ||
181 | * @str: the string to be split | ||
182 | * @argcp: returned argument count | ||
183 | * | ||
184 | * Returns an array of pointers to strings which are split out from | ||
185 | * @str. This is performed by strictly splitting on white-space; no | ||
186 | * quote processing is performed. Multiple whitespace characters are | ||
187 | * considered to be a single argument separator. The returned array | ||
188 | * is always NULL-terminated. Returns NULL on memory allocation | ||
189 | * failure. | ||
190 | */ | ||
191 | char **argv_split(const char *str, int *argcp) | ||
192 | { | ||
193 | int argc = count_argc(str); | ||
194 | char **argv = zalloc(sizeof(*argv) * (argc+1)); | ||
195 | char **argvp; | ||
196 | |||
197 | if (argv == NULL) | ||
198 | goto out; | ||
199 | |||
200 | if (argcp) | ||
201 | *argcp = argc; | ||
202 | |||
203 | argvp = argv; | ||
204 | |||
205 | while (*str) { | ||
206 | str = skip_sep(str); | ||
207 | |||
208 | if (*str) { | ||
209 | const char *p = str; | ||
210 | char *t; | ||
211 | |||
212 | str = skip_arg(str); | ||
213 | |||
214 | t = strndup(p, str-p); | ||
215 | if (t == NULL) | ||
216 | goto fail; | ||
217 | *argvp++ = t; | ||
218 | } | ||
219 | } | ||
220 | *argvp = NULL; | ||
221 | |||
222 | out: | ||
223 | return argv; | ||
224 | |||
225 | fail: | ||
226 | argv_free(argv); | ||
227 | return NULL; | ||
228 | } | ||
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index bf39dfadfd24..bfecec265a1a 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h | |||
@@ -1,11 +1,15 @@ | |||
1 | #ifndef _PERF_STRING_H_ | 1 | #ifndef __PERF_STRING_H_ |
2 | #define _PERF_STRING_H_ | 2 | #define __PERF_STRING_H_ |
3 | 3 | ||
4 | #include "types.h" | 4 | #include "types.h" |
5 | 5 | ||
6 | int hex2u64(const char *ptr, u64 *val); | 6 | int hex2u64(const char *ptr, u64 *val); |
7 | char *strxfrchar(char *s, char from, char to); | ||
8 | s64 perf_atoll(const char *str); | ||
9 | char **argv_split(const char *str, int *argcp); | ||
10 | void argv_free(char **argv); | ||
7 | 11 | ||
8 | #define _STR(x) #x | 12 | #define _STR(x) #x |
9 | #define STR(x) _STR(x) | 13 | #define STR(x) _STR(x) |
10 | 14 | ||
11 | #endif | 15 | #endif /* __PERF_STRING_H */ |
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index 921818e44a54..cb4659306d7b 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef STRLIST_H_ | 1 | #ifndef __PERF_STRLIST_H |
2 | #define STRLIST_H_ | 2 | #define __PERF_STRLIST_H |
3 | 3 | ||
4 | #include <linux/rbtree.h> | 4 | #include <linux/rbtree.h> |
5 | #include <stdbool.h> | 5 | #include <stdbool.h> |
@@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | int strlist__parse_list(struct strlist *self, const char *s); | 38 | int strlist__parse_list(struct strlist *self, const char *s); |
39 | #endif /* STRLIST_H_ */ | 39 | #endif /* __PERF_STRLIST_H */ |
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h index cd93195aedb3..e0781989cc31 100644 --- a/tools/perf/util/svghelper.h +++ b/tools/perf/util/svghelper.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _INCLUDE_GUARD_SVG_HELPER_ | 1 | #ifndef __PERF_SVGHELPER_H |
2 | #define _INCLUDE_GUARD_SVG_HELPER_ | 2 | #define __PERF_SVGHELPER_H |
3 | 3 | ||
4 | #include "types.h" | 4 | #include "types.h" |
5 | 5 | ||
@@ -25,4 +25,4 @@ extern void svg_close(void); | |||
25 | 25 | ||
26 | extern int svg_page_width; | 26 | extern int svg_page_width; |
27 | 27 | ||
28 | #endif | 28 | #endif /* __PERF_SVGHELPER_H */ |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 226f44a2357d..fffcb937cdcb 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -2,14 +2,20 @@ | |||
2 | #include "../perf.h" | 2 | #include "../perf.h" |
3 | #include "string.h" | 3 | #include "string.h" |
4 | #include "symbol.h" | 4 | #include "symbol.h" |
5 | #include "thread.h" | ||
5 | 6 | ||
6 | #include "debug.h" | 7 | #include "debug.h" |
7 | 8 | ||
9 | #include <asm/bug.h> | ||
8 | #include <libelf.h> | 10 | #include <libelf.h> |
9 | #include <gelf.h> | 11 | #include <gelf.h> |
10 | #include <elf.h> | 12 | #include <elf.h> |
13 | #include <limits.h> | ||
14 | #include <sys/utsname.h> | ||
11 | 15 | ||
12 | const char *sym_hist_filter; | 16 | #ifndef NT_GNU_BUILD_ID |
17 | #define NT_GNU_BUILD_ID 3 | ||
18 | #endif | ||
13 | 19 | ||
14 | enum dso_origin { | 20 | enum dso_origin { |
15 | DSO__ORIG_KERNEL = 0, | 21 | DSO__ORIG_KERNEL = 0, |
@@ -18,94 +24,189 @@ enum dso_origin { | |||
18 | DSO__ORIG_UBUNTU, | 24 | DSO__ORIG_UBUNTU, |
19 | DSO__ORIG_BUILDID, | 25 | DSO__ORIG_BUILDID, |
20 | DSO__ORIG_DSO, | 26 | DSO__ORIG_DSO, |
27 | DSO__ORIG_KMODULE, | ||
21 | DSO__ORIG_NOT_FOUND, | 28 | DSO__ORIG_NOT_FOUND, |
22 | }; | 29 | }; |
23 | 30 | ||
24 | static struct symbol *symbol__new(u64 start, u64 len, | 31 | static void dsos__add(struct list_head *head, struct dso *dso); |
25 | const char *name, unsigned int priv_size, | 32 | static struct map *thread__find_map_by_name(struct thread *self, char *name); |
26 | u64 obj_start, int v) | 33 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); |
34 | struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); | ||
35 | static int dso__load_kernel_sym(struct dso *self, struct map *map, | ||
36 | struct thread *thread, symbol_filter_t filter); | ||
37 | unsigned int symbol__priv_size; | ||
38 | static int vmlinux_path__nr_entries; | ||
39 | static char **vmlinux_path; | ||
40 | |||
41 | static struct symbol_conf symbol_conf__defaults = { | ||
42 | .use_modules = true, | ||
43 | .try_vmlinux_path = true, | ||
44 | }; | ||
45 | |||
46 | static struct thread kthread_mem; | ||
47 | struct thread *kthread = &kthread_mem; | ||
48 | |||
49 | bool dso__loaded(const struct dso *self, enum map_type type) | ||
27 | { | 50 | { |
28 | size_t namelen = strlen(name) + 1; | 51 | return self->loaded & (1 << type); |
29 | struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); | 52 | } |
30 | 53 | ||
31 | if (!self) | 54 | static void dso__set_loaded(struct dso *self, enum map_type type) |
32 | return NULL; | 55 | { |
56 | self->loaded |= (1 << type); | ||
57 | } | ||
33 | 58 | ||
34 | if (v >= 2) | 59 | static void symbols__fixup_end(struct rb_root *self) |
35 | printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", | 60 | { |
36 | (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); | 61 | struct rb_node *nd, *prevnd = rb_first(self); |
62 | struct symbol *curr, *prev; | ||
63 | |||
64 | if (prevnd == NULL) | ||
65 | return; | ||
37 | 66 | ||
38 | self->obj_start= obj_start; | 67 | curr = rb_entry(prevnd, struct symbol, rb_node); |
39 | self->hist = NULL; | ||
40 | self->hist_sum = 0; | ||
41 | 68 | ||
42 | if (sym_hist_filter && !strcmp(name, sym_hist_filter)) | 69 | for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { |
43 | self->hist = calloc(sizeof(u64), len); | 70 | prev = curr; |
71 | curr = rb_entry(nd, struct symbol, rb_node); | ||
44 | 72 | ||
45 | if (priv_size) { | 73 | if (prev->end == prev->start) |
46 | memset(self, 0, priv_size); | 74 | prev->end = curr->start - 1; |
47 | self = ((void *)self) + priv_size; | ||
48 | } | 75 | } |
76 | |||
77 | /* Last entry */ | ||
78 | if (curr->end == curr->start) | ||
79 | curr->end = roundup(curr->start, 4096); | ||
80 | } | ||
81 | |||
82 | static void __thread__fixup_maps_end(struct thread *self, enum map_type type) | ||
83 | { | ||
84 | struct map *prev, *curr; | ||
85 | struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); | ||
86 | |||
87 | if (prevnd == NULL) | ||
88 | return; | ||
89 | |||
90 | curr = rb_entry(prevnd, struct map, rb_node); | ||
91 | |||
92 | for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { | ||
93 | prev = curr; | ||
94 | curr = rb_entry(nd, struct map, rb_node); | ||
95 | prev->end = curr->start - 1; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * We still haven't the actual symbols, so guess the | ||
100 | * last map final address. | ||
101 | */ | ||
102 | curr->end = ~0UL; | ||
103 | } | ||
104 | |||
105 | static void thread__fixup_maps_end(struct thread *self) | ||
106 | { | ||
107 | int i; | ||
108 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
109 | __thread__fixup_maps_end(self, i); | ||
110 | } | ||
111 | |||
112 | static struct symbol *symbol__new(u64 start, u64 len, const char *name) | ||
113 | { | ||
114 | size_t namelen = strlen(name) + 1; | ||
115 | struct symbol *self = zalloc(symbol__priv_size + | ||
116 | sizeof(*self) + namelen); | ||
117 | if (self == NULL) | ||
118 | return NULL; | ||
119 | |||
120 | if (symbol__priv_size) | ||
121 | self = ((void *)self) + symbol__priv_size; | ||
122 | |||
49 | self->start = start; | 123 | self->start = start; |
50 | self->end = len ? start + len - 1 : start; | 124 | self->end = len ? start + len - 1 : start; |
125 | |||
126 | pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); | ||
127 | |||
51 | memcpy(self->name, name, namelen); | 128 | memcpy(self->name, name, namelen); |
52 | 129 | ||
53 | return self; | 130 | return self; |
54 | } | 131 | } |
55 | 132 | ||
56 | static void symbol__delete(struct symbol *self, unsigned int priv_size) | 133 | static void symbol__delete(struct symbol *self) |
57 | { | 134 | { |
58 | free(((void *)self) - priv_size); | 135 | free(((void *)self) - symbol__priv_size); |
59 | } | 136 | } |
60 | 137 | ||
61 | static size_t symbol__fprintf(struct symbol *self, FILE *fp) | 138 | static size_t symbol__fprintf(struct symbol *self, FILE *fp) |
62 | { | 139 | { |
63 | if (!self->module) | 140 | return fprintf(fp, " %llx-%llx %s\n", |
64 | return fprintf(fp, " %llx-%llx %s\n", | ||
65 | self->start, self->end, self->name); | 141 | self->start, self->end, self->name); |
66 | else | ||
67 | return fprintf(fp, " %llx-%llx %s \t[%s]\n", | ||
68 | self->start, self->end, self->name, self->module->name); | ||
69 | } | 142 | } |
70 | 143 | ||
71 | struct dso *dso__new(const char *name, unsigned int sym_priv_size) | 144 | static void dso__set_long_name(struct dso *self, char *name) |
145 | { | ||
146 | if (name == NULL) | ||
147 | return; | ||
148 | self->long_name = name; | ||
149 | self->long_name_len = strlen(name); | ||
150 | } | ||
151 | |||
152 | static void dso__set_basename(struct dso *self) | ||
153 | { | ||
154 | self->short_name = basename(self->long_name); | ||
155 | } | ||
156 | |||
157 | struct dso *dso__new(const char *name) | ||
72 | { | 158 | { |
73 | struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); | 159 | struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); |
74 | 160 | ||
75 | if (self != NULL) { | 161 | if (self != NULL) { |
162 | int i; | ||
76 | strcpy(self->name, name); | 163 | strcpy(self->name, name); |
77 | self->syms = RB_ROOT; | 164 | dso__set_long_name(self, self->name); |
78 | self->sym_priv_size = sym_priv_size; | 165 | self->short_name = self->name; |
166 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
167 | self->symbols[i] = RB_ROOT; | ||
79 | self->find_symbol = dso__find_symbol; | 168 | self->find_symbol = dso__find_symbol; |
80 | self->slen_calculated = 0; | 169 | self->slen_calculated = 0; |
81 | self->origin = DSO__ORIG_NOT_FOUND; | 170 | self->origin = DSO__ORIG_NOT_FOUND; |
171 | self->loaded = 0; | ||
172 | self->has_build_id = 0; | ||
82 | } | 173 | } |
83 | 174 | ||
84 | return self; | 175 | return self; |
85 | } | 176 | } |
86 | 177 | ||
87 | static void dso__delete_symbols(struct dso *self) | 178 | static void symbols__delete(struct rb_root *self) |
88 | { | 179 | { |
89 | struct symbol *pos; | 180 | struct symbol *pos; |
90 | struct rb_node *next = rb_first(&self->syms); | 181 | struct rb_node *next = rb_first(self); |
91 | 182 | ||
92 | while (next) { | 183 | while (next) { |
93 | pos = rb_entry(next, struct symbol, rb_node); | 184 | pos = rb_entry(next, struct symbol, rb_node); |
94 | next = rb_next(&pos->rb_node); | 185 | next = rb_next(&pos->rb_node); |
95 | rb_erase(&pos->rb_node, &self->syms); | 186 | rb_erase(&pos->rb_node, self); |
96 | symbol__delete(pos, self->sym_priv_size); | 187 | symbol__delete(pos); |
97 | } | 188 | } |
98 | } | 189 | } |
99 | 190 | ||
100 | void dso__delete(struct dso *self) | 191 | void dso__delete(struct dso *self) |
101 | { | 192 | { |
102 | dso__delete_symbols(self); | 193 | int i; |
194 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
195 | symbols__delete(&self->symbols[i]); | ||
196 | if (self->long_name != self->name) | ||
197 | free(self->long_name); | ||
103 | free(self); | 198 | free(self); |
104 | } | 199 | } |
105 | 200 | ||
106 | static void dso__insert_symbol(struct dso *self, struct symbol *sym) | 201 | void dso__set_build_id(struct dso *self, void *build_id) |
107 | { | 202 | { |
108 | struct rb_node **p = &self->syms.rb_node; | 203 | memcpy(self->build_id, build_id, sizeof(self->build_id)); |
204 | self->has_build_id = 1; | ||
205 | } | ||
206 | |||
207 | static void symbols__insert(struct rb_root *self, struct symbol *sym) | ||
208 | { | ||
209 | struct rb_node **p = &self->rb_node; | ||
109 | struct rb_node *parent = NULL; | 210 | struct rb_node *parent = NULL; |
110 | const u64 ip = sym->start; | 211 | const u64 ip = sym->start; |
111 | struct symbol *s; | 212 | struct symbol *s; |
@@ -119,17 +220,17 @@ static void dso__insert_symbol(struct dso *self, struct symbol *sym) | |||
119 | p = &(*p)->rb_right; | 220 | p = &(*p)->rb_right; |
120 | } | 221 | } |
121 | rb_link_node(&sym->rb_node, parent, p); | 222 | rb_link_node(&sym->rb_node, parent, p); |
122 | rb_insert_color(&sym->rb_node, &self->syms); | 223 | rb_insert_color(&sym->rb_node, self); |
123 | } | 224 | } |
124 | 225 | ||
125 | struct symbol *dso__find_symbol(struct dso *self, u64 ip) | 226 | static struct symbol *symbols__find(struct rb_root *self, u64 ip) |
126 | { | 227 | { |
127 | struct rb_node *n; | 228 | struct rb_node *n; |
128 | 229 | ||
129 | if (self == NULL) | 230 | if (self == NULL) |
130 | return NULL; | 231 | return NULL; |
131 | 232 | ||
132 | n = self->syms.rb_node; | 233 | n = self->rb_node; |
133 | 234 | ||
134 | while (n) { | 235 | while (n) { |
135 | struct symbol *s = rb_entry(n, struct symbol, rb_node); | 236 | struct symbol *s = rb_entry(n, struct symbol, rb_node); |
@@ -145,12 +246,42 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) | |||
145 | return NULL; | 246 | return NULL; |
146 | } | 247 | } |
147 | 248 | ||
148 | size_t dso__fprintf(struct dso *self, FILE *fp) | 249 | struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr) |
149 | { | 250 | { |
150 | size_t ret = fprintf(fp, "dso: %s\n", self->name); | 251 | return symbols__find(&self->symbols[type], addr); |
252 | } | ||
253 | |||
254 | int build_id__sprintf(u8 *self, int len, char *bf) | ||
255 | { | ||
256 | char *bid = bf; | ||
257 | u8 *raw = self; | ||
258 | int i; | ||
259 | |||
260 | for (i = 0; i < len; ++i) { | ||
261 | sprintf(bid, "%02x", *raw); | ||
262 | ++raw; | ||
263 | bid += 2; | ||
264 | } | ||
265 | |||
266 | return raw - self; | ||
267 | } | ||
268 | |||
269 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp) | ||
270 | { | ||
271 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | ||
272 | |||
273 | build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); | ||
274 | return fprintf(fp, "%s", sbuild_id); | ||
275 | } | ||
151 | 276 | ||
277 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) | ||
278 | { | ||
152 | struct rb_node *nd; | 279 | struct rb_node *nd; |
153 | for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { | 280 | size_t ret = fprintf(fp, "dso: %s (", self->short_name); |
281 | |||
282 | ret += dso__fprintf_buildid(self, fp); | ||
283 | ret += fprintf(fp, ")\n"); | ||
284 | for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { | ||
154 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 285 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
155 | ret += symbol__fprintf(pos, fp); | 286 | ret += symbol__fprintf(pos, fp); |
156 | } | 287 | } |
@@ -158,13 +289,17 @@ size_t dso__fprintf(struct dso *self, FILE *fp) | |||
158 | return ret; | 289 | return ret; |
159 | } | 290 | } |
160 | 291 | ||
161 | static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) | 292 | /* |
293 | * Loads the function entries in /proc/kallsyms into kernel_map->dso, | ||
294 | * so that we can in the next step set the symbol ->end address and then | ||
295 | * call kernel_maps__split_kallsyms. | ||
296 | */ | ||
297 | static int dso__load_all_kallsyms(struct dso *self, struct map *map) | ||
162 | { | 298 | { |
163 | struct rb_node *nd, *prevnd; | ||
164 | char *line = NULL; | 299 | char *line = NULL; |
165 | size_t n; | 300 | size_t n; |
301 | struct rb_root *root = &self->symbols[map->type]; | ||
166 | FILE *file = fopen("/proc/kallsyms", "r"); | 302 | FILE *file = fopen("/proc/kallsyms", "r"); |
167 | int count = 0; | ||
168 | 303 | ||
169 | if (file == NULL) | 304 | if (file == NULL) |
170 | goto out_failure; | 305 | goto out_failure; |
@@ -174,6 +309,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) | |||
174 | struct symbol *sym; | 309 | struct symbol *sym; |
175 | int line_len, len; | 310 | int line_len, len; |
176 | char symbol_type; | 311 | char symbol_type; |
312 | char *symbol_name; | ||
177 | 313 | ||
178 | line_len = getline(&line, &n, file); | 314 | line_len = getline(&line, &n, file); |
179 | if (line_len < 0) | 315 | if (line_len < 0) |
@@ -196,44 +332,26 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) | |||
196 | */ | 332 | */ |
197 | if (symbol_type != 'T' && symbol_type != 'W') | 333 | if (symbol_type != 'T' && symbol_type != 'W') |
198 | continue; | 334 | continue; |
335 | |||
336 | symbol_name = line + len + 2; | ||
199 | /* | 337 | /* |
200 | * Well fix up the end later, when we have all sorted. | 338 | * Will fix up the end later, when we have all symbols sorted. |
201 | */ | 339 | */ |
202 | sym = symbol__new(start, 0xdead, line + len + 2, | 340 | sym = symbol__new(start, 0, symbol_name); |
203 | self->sym_priv_size, 0, v); | ||
204 | 341 | ||
205 | if (sym == NULL) | 342 | if (sym == NULL) |
206 | goto out_delete_line; | 343 | goto out_delete_line; |
207 | 344 | /* | |
208 | if (filter && filter(self, sym)) | 345 | * We will pass the symbols to the filter later, in |
209 | symbol__delete(sym, self->sym_priv_size); | 346 | * map__split_kallsyms, when we have split the maps per module |
210 | else { | 347 | */ |
211 | dso__insert_symbol(self, sym); | 348 | symbols__insert(root, sym); |
212 | count++; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Now that we have all sorted out, just set the ->end of all | ||
218 | * symbols | ||
219 | */ | ||
220 | prevnd = rb_first(&self->syms); | ||
221 | |||
222 | if (prevnd == NULL) | ||
223 | goto out_delete_line; | ||
224 | |||
225 | for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { | ||
226 | struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), | ||
227 | *curr = rb_entry(nd, struct symbol, rb_node); | ||
228 | |||
229 | prev->end = curr->start - 1; | ||
230 | prevnd = nd; | ||
231 | } | 349 | } |
232 | 350 | ||
233 | free(line); | 351 | free(line); |
234 | fclose(file); | 352 | fclose(file); |
235 | 353 | ||
236 | return count; | 354 | return 0; |
237 | 355 | ||
238 | out_delete_line: | 356 | out_delete_line: |
239 | free(line); | 357 | free(line); |
@@ -241,14 +359,114 @@ out_failure: | |||
241 | return -1; | 359 | return -1; |
242 | } | 360 | } |
243 | 361 | ||
244 | static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) | 362 | /* |
363 | * Split the symbols into maps, making sure there are no overlaps, i.e. the | ||
364 | * kernel range is broken in several maps, named [kernel].N, as we don't have | ||
365 | * the original ELF section names vmlinux have. | ||
366 | */ | ||
367 | static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread *thread, | ||
368 | symbol_filter_t filter) | ||
369 | { | ||
370 | struct map *curr_map = map; | ||
371 | struct symbol *pos; | ||
372 | int count = 0; | ||
373 | struct rb_root *root = &self->symbols[map->type]; | ||
374 | struct rb_node *next = rb_first(root); | ||
375 | int kernel_range = 0; | ||
376 | |||
377 | while (next) { | ||
378 | char *module; | ||
379 | |||
380 | pos = rb_entry(next, struct symbol, rb_node); | ||
381 | next = rb_next(&pos->rb_node); | ||
382 | |||
383 | module = strchr(pos->name, '\t'); | ||
384 | if (module) { | ||
385 | if (!thread->use_modules) | ||
386 | goto discard_symbol; | ||
387 | |||
388 | *module++ = '\0'; | ||
389 | |||
390 | if (strcmp(self->name, module)) { | ||
391 | curr_map = thread__find_map_by_name(thread, module); | ||
392 | if (curr_map == NULL) { | ||
393 | pr_debug("/proc/{kallsyms,modules} " | ||
394 | "inconsistency!\n"); | ||
395 | return -1; | ||
396 | } | ||
397 | } | ||
398 | /* | ||
399 | * So that we look just like we get from .ko files, | ||
400 | * i.e. not prelinked, relative to map->start. | ||
401 | */ | ||
402 | pos->start = curr_map->map_ip(curr_map, pos->start); | ||
403 | pos->end = curr_map->map_ip(curr_map, pos->end); | ||
404 | } else if (curr_map != map) { | ||
405 | char dso_name[PATH_MAX]; | ||
406 | struct dso *dso; | ||
407 | |||
408 | snprintf(dso_name, sizeof(dso_name), "[kernel].%d", | ||
409 | kernel_range++); | ||
410 | |||
411 | dso = dso__new(dso_name); | ||
412 | if (dso == NULL) | ||
413 | return -1; | ||
414 | |||
415 | curr_map = map__new2(pos->start, dso, map->type); | ||
416 | if (map == NULL) { | ||
417 | dso__delete(dso); | ||
418 | return -1; | ||
419 | } | ||
420 | |||
421 | curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; | ||
422 | __thread__insert_map(thread, curr_map); | ||
423 | ++kernel_range; | ||
424 | } | ||
425 | |||
426 | if (filter && filter(curr_map, pos)) { | ||
427 | discard_symbol: rb_erase(&pos->rb_node, root); | ||
428 | symbol__delete(pos); | ||
429 | } else { | ||
430 | if (curr_map != map) { | ||
431 | rb_erase(&pos->rb_node, root); | ||
432 | symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); | ||
433 | } | ||
434 | count++; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | return count; | ||
439 | } | ||
440 | |||
441 | |||
442 | static int dso__load_kallsyms(struct dso *self, struct map *map, | ||
443 | struct thread *thread, symbol_filter_t filter) | ||
444 | { | ||
445 | if (dso__load_all_kallsyms(self, map) < 0) | ||
446 | return -1; | ||
447 | |||
448 | symbols__fixup_end(&self->symbols[map->type]); | ||
449 | self->origin = DSO__ORIG_KERNEL; | ||
450 | |||
451 | return dso__split_kallsyms(self, map, thread, filter); | ||
452 | } | ||
453 | |||
454 | size_t kernel_maps__fprintf(FILE *fp) | ||
455 | { | ||
456 | size_t printed = fprintf(fp, "Kernel maps:\n"); | ||
457 | printed += thread__fprintf_maps(kthread, fp); | ||
458 | return printed + fprintf(fp, "END kernel maps\n"); | ||
459 | } | ||
460 | |||
461 | static int dso__load_perf_map(struct dso *self, struct map *map, | ||
462 | symbol_filter_t filter) | ||
245 | { | 463 | { |
246 | char *line = NULL; | 464 | char *line = NULL; |
247 | size_t n; | 465 | size_t n; |
248 | FILE *file; | 466 | FILE *file; |
249 | int nr_syms = 0; | 467 | int nr_syms = 0; |
250 | 468 | ||
251 | file = fopen(self->name, "r"); | 469 | file = fopen(self->long_name, "r"); |
252 | if (file == NULL) | 470 | if (file == NULL) |
253 | goto out_failure; | 471 | goto out_failure; |
254 | 472 | ||
@@ -278,16 +496,15 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) | |||
278 | if (len + 2 >= line_len) | 496 | if (len + 2 >= line_len) |
279 | continue; | 497 | continue; |
280 | 498 | ||
281 | sym = symbol__new(start, size, line + len, | 499 | sym = symbol__new(start, size, line + len); |
282 | self->sym_priv_size, start, v); | ||
283 | 500 | ||
284 | if (sym == NULL) | 501 | if (sym == NULL) |
285 | goto out_delete_line; | 502 | goto out_delete_line; |
286 | 503 | ||
287 | if (filter && filter(self, sym)) | 504 | if (filter && filter(map, sym)) |
288 | symbol__delete(sym, self->sym_priv_size); | 505 | symbol__delete(sym); |
289 | else { | 506 | else { |
290 | dso__insert_symbol(self, sym); | 507 | symbols__insert(&self->symbols[map->type], sym); |
291 | nr_syms++; | 508 | nr_syms++; |
292 | } | 509 | } |
293 | } | 510 | } |
@@ -393,7 +610,8 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | |||
393 | * And always look at the original dso, not at debuginfo packages, that | 610 | * And always look at the original dso, not at debuginfo packages, that |
394 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). | 611 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). |
395 | */ | 612 | */ |
396 | static int dso__synthesize_plt_symbols(struct dso *self, int v) | 613 | static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, |
614 | symbol_filter_t filter) | ||
397 | { | 615 | { |
398 | uint32_t nr_rel_entries, idx; | 616 | uint32_t nr_rel_entries, idx; |
399 | GElf_Sym sym; | 617 | GElf_Sym sym; |
@@ -409,7 +627,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) | |||
409 | Elf *elf; | 627 | Elf *elf; |
410 | int nr = 0, symidx, fd, err = 0; | 628 | int nr = 0, symidx, fd, err = 0; |
411 | 629 | ||
412 | fd = open(self->name, O_RDONLY); | 630 | fd = open(self->long_name, O_RDONLY); |
413 | if (fd < 0) | 631 | if (fd < 0) |
414 | goto out; | 632 | goto out; |
415 | 633 | ||
@@ -477,12 +695,16 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) | |||
477 | "%s@plt", elf_sym__name(&sym, symstrs)); | 695 | "%s@plt", elf_sym__name(&sym, symstrs)); |
478 | 696 | ||
479 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | 697 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, |
480 | sympltname, self->sym_priv_size, 0, v); | 698 | sympltname); |
481 | if (!f) | 699 | if (!f) |
482 | goto out_elf_end; | 700 | goto out_elf_end; |
483 | 701 | ||
484 | dso__insert_symbol(self, f); | 702 | if (filter && filter(map, f)) |
485 | ++nr; | 703 | symbol__delete(f); |
704 | else { | ||
705 | symbols__insert(&self->symbols[map->type], f); | ||
706 | ++nr; | ||
707 | } | ||
486 | } | 708 | } |
487 | } else if (shdr_rel_plt.sh_type == SHT_REL) { | 709 | } else if (shdr_rel_plt.sh_type == SHT_REL) { |
488 | GElf_Rel pos_mem, *pos; | 710 | GElf_Rel pos_mem, *pos; |
@@ -495,12 +717,16 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) | |||
495 | "%s@plt", elf_sym__name(&sym, symstrs)); | 717 | "%s@plt", elf_sym__name(&sym, symstrs)); |
496 | 718 | ||
497 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | 719 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, |
498 | sympltname, self->sym_priv_size, 0, v); | 720 | sympltname); |
499 | if (!f) | 721 | if (!f) |
500 | goto out_elf_end; | 722 | goto out_elf_end; |
501 | 723 | ||
502 | dso__insert_symbol(self, f); | 724 | if (filter && filter(map, f)) |
503 | ++nr; | 725 | symbol__delete(f); |
726 | else { | ||
727 | symbols__insert(&self->symbols[map->type], f); | ||
728 | ++nr; | ||
729 | } | ||
504 | } | 730 | } |
505 | } | 731 | } |
506 | 732 | ||
@@ -513,14 +739,18 @@ out_close: | |||
513 | if (err == 0) | 739 | if (err == 0) |
514 | return nr; | 740 | return nr; |
515 | out: | 741 | out: |
516 | fprintf(stderr, "%s: problems reading %s PLT info.\n", | 742 | pr_warning("%s: problems reading %s PLT info.\n", |
517 | __func__, self->name); | 743 | __func__, self->long_name); |
518 | return 0; | 744 | return 0; |
519 | } | 745 | } |
520 | 746 | ||
521 | static int dso__load_sym(struct dso *self, int fd, const char *name, | 747 | static int dso__load_sym(struct dso *self, struct map *map, |
522 | symbol_filter_t filter, int v, struct module *mod) | 748 | struct thread *thread, const char *name, int fd, |
749 | symbol_filter_t filter, int kernel, int kmodule) | ||
523 | { | 750 | { |
751 | struct map *curr_map = map; | ||
752 | struct dso *curr_dso = self; | ||
753 | size_t dso_name_len = strlen(self->short_name); | ||
524 | Elf_Data *symstrs, *secstrs; | 754 | Elf_Data *symstrs, *secstrs; |
525 | uint32_t nr_syms; | 755 | uint32_t nr_syms; |
526 | int err = -1; | 756 | int err = -1; |
@@ -531,19 +761,16 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
531 | GElf_Sym sym; | 761 | GElf_Sym sym; |
532 | Elf_Scn *sec, *sec_strndx; | 762 | Elf_Scn *sec, *sec_strndx; |
533 | Elf *elf; | 763 | Elf *elf; |
534 | int nr = 0, kernel = !strcmp("[kernel]", self->name); | 764 | int nr = 0; |
535 | 765 | ||
536 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | 766 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); |
537 | if (elf == NULL) { | 767 | if (elf == NULL) { |
538 | if (v) | 768 | pr_err("%s: cannot read %s ELF file.\n", __func__, name); |
539 | fprintf(stderr, "%s: cannot read %s ELF file.\n", | ||
540 | __func__, name); | ||
541 | goto out_close; | 769 | goto out_close; |
542 | } | 770 | } |
543 | 771 | ||
544 | if (gelf_getehdr(elf, &ehdr) == NULL) { | 772 | if (gelf_getehdr(elf, &ehdr) == NULL) { |
545 | if (v) | 773 | pr_err("%s: cannot get elf header.\n", __func__); |
546 | fprintf(stderr, "%s: cannot get elf header.\n", __func__); | ||
547 | goto out_elf_end; | 774 | goto out_elf_end; |
548 | } | 775 | } |
549 | 776 | ||
@@ -587,9 +814,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
587 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | 814 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { |
588 | struct symbol *f; | 815 | struct symbol *f; |
589 | const char *elf_name; | 816 | const char *elf_name; |
590 | char *demangled; | 817 | char *demangled = NULL; |
591 | u64 obj_start; | ||
592 | struct section *section = NULL; | ||
593 | int is_label = elf_sym__is_label(&sym); | 818 | int is_label = elf_sym__is_label(&sym); |
594 | const char *section_name; | 819 | const char *section_name; |
595 | 820 | ||
@@ -605,52 +830,85 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
605 | if (is_label && !elf_sec__is_text(&shdr, secstrs)) | 830 | if (is_label && !elf_sec__is_text(&shdr, secstrs)) |
606 | continue; | 831 | continue; |
607 | 832 | ||
833 | elf_name = elf_sym__name(&sym, symstrs); | ||
608 | section_name = elf_sec__name(&shdr, secstrs); | 834 | section_name = elf_sec__name(&shdr, secstrs); |
609 | obj_start = sym.st_value; | ||
610 | 835 | ||
611 | if (self->adjust_symbols) { | 836 | if (kernel || kmodule) { |
612 | if (v >= 2) | 837 | char dso_name[PATH_MAX]; |
613 | printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", | ||
614 | (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); | ||
615 | 838 | ||
616 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | 839 | if (strcmp(section_name, |
617 | } | 840 | curr_dso->short_name + dso_name_len) == 0) |
841 | goto new_symbol; | ||
618 | 842 | ||
619 | if (mod) { | 843 | if (strcmp(section_name, ".text") == 0) { |
620 | section = mod->sections->find_section(mod->sections, section_name); | 844 | curr_map = map; |
621 | if (section) | 845 | curr_dso = self; |
622 | sym.st_value += section->vma; | 846 | goto new_symbol; |
623 | else { | ||
624 | fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n", | ||
625 | mod->name, section_name); | ||
626 | goto out_elf_end; | ||
627 | } | 847 | } |
848 | |||
849 | snprintf(dso_name, sizeof(dso_name), | ||
850 | "%s%s", self->short_name, section_name); | ||
851 | |||
852 | curr_map = thread__find_map_by_name(thread, dso_name); | ||
853 | if (curr_map == NULL) { | ||
854 | u64 start = sym.st_value; | ||
855 | |||
856 | if (kmodule) | ||
857 | start += map->start + shdr.sh_offset; | ||
858 | |||
859 | curr_dso = dso__new(dso_name); | ||
860 | if (curr_dso == NULL) | ||
861 | goto out_elf_end; | ||
862 | curr_map = map__new2(start, curr_dso, | ||
863 | MAP__FUNCTION); | ||
864 | if (curr_map == NULL) { | ||
865 | dso__delete(curr_dso); | ||
866 | goto out_elf_end; | ||
867 | } | ||
868 | curr_map->map_ip = identity__map_ip; | ||
869 | curr_map->unmap_ip = identity__map_ip; | ||
870 | curr_dso->origin = DSO__ORIG_KERNEL; | ||
871 | __thread__insert_map(kthread, curr_map); | ||
872 | dsos__add(&dsos__kernel, curr_dso); | ||
873 | } else | ||
874 | curr_dso = curr_map->dso; | ||
875 | |||
876 | goto new_symbol; | ||
877 | } | ||
878 | |||
879 | if (curr_dso->adjust_symbols) { | ||
880 | pr_debug2("adjusting symbol: st_value: %Lx sh_addr: " | ||
881 | "%Lx sh_offset: %Lx\n", (u64)sym.st_value, | ||
882 | (u64)shdr.sh_addr, (u64)shdr.sh_offset); | ||
883 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | ||
628 | } | 884 | } |
629 | /* | 885 | /* |
630 | * We need to figure out if the object was created from C++ sources | 886 | * We need to figure out if the object was created from C++ sources |
631 | * DWARF DW_compile_unit has this, but we don't always have access | 887 | * DWARF DW_compile_unit has this, but we don't always have access |
632 | * to it... | 888 | * to it... |
633 | */ | 889 | */ |
634 | elf_name = elf_sym__name(&sym, symstrs); | ||
635 | demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); | 890 | demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); |
636 | if (demangled != NULL) | 891 | if (demangled != NULL) |
637 | elf_name = demangled; | 892 | elf_name = demangled; |
638 | 893 | new_symbol: | |
639 | f = symbol__new(sym.st_value, sym.st_size, elf_name, | 894 | f = symbol__new(sym.st_value, sym.st_size, elf_name); |
640 | self->sym_priv_size, obj_start, v); | ||
641 | free(demangled); | 895 | free(demangled); |
642 | if (!f) | 896 | if (!f) |
643 | goto out_elf_end; | 897 | goto out_elf_end; |
644 | 898 | ||
645 | if (filter && filter(self, f)) | 899 | if (filter && filter(curr_map, f)) |
646 | symbol__delete(f, self->sym_priv_size); | 900 | symbol__delete(f); |
647 | else { | 901 | else { |
648 | f->module = mod; | 902 | symbols__insert(&curr_dso->symbols[curr_map->type], f); |
649 | dso__insert_symbol(self, f); | ||
650 | nr++; | 903 | nr++; |
651 | } | 904 | } |
652 | } | 905 | } |
653 | 906 | ||
907 | /* | ||
908 | * For misannotated, zeroed, ASM function sizes. | ||
909 | */ | ||
910 | if (nr > 0) | ||
911 | symbols__fixup_end(&self->symbols[map->type]); | ||
654 | err = nr; | 912 | err = nr; |
655 | out_elf_end: | 913 | out_elf_end: |
656 | elf_end(elf); | 914 | elf_end(elf); |
@@ -658,63 +916,153 @@ out_close: | |||
658 | return err; | 916 | return err; |
659 | } | 917 | } |
660 | 918 | ||
661 | #define BUILD_ID_SIZE 128 | 919 | static bool dso__build_id_equal(const struct dso *self, u8 *build_id) |
920 | { | ||
921 | return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; | ||
922 | } | ||
662 | 923 | ||
663 | static char *dso__read_build_id(struct dso *self, int v) | 924 | static bool __dsos__read_build_ids(struct list_head *head) |
664 | { | 925 | { |
665 | int i; | 926 | bool have_build_id = false; |
927 | struct dso *pos; | ||
928 | |||
929 | list_for_each_entry(pos, head, node) | ||
930 | if (filename__read_build_id(pos->long_name, pos->build_id, | ||
931 | sizeof(pos->build_id)) > 0) { | ||
932 | have_build_id = true; | ||
933 | pos->has_build_id = true; | ||
934 | } | ||
935 | |||
936 | return have_build_id; | ||
937 | } | ||
938 | |||
939 | bool dsos__read_build_ids(void) | ||
940 | { | ||
941 | return __dsos__read_build_ids(&dsos__kernel) || | ||
942 | __dsos__read_build_ids(&dsos__user); | ||
943 | } | ||
944 | |||
945 | /* | ||
946 | * Align offset to 4 bytes as needed for note name and descriptor data. | ||
947 | */ | ||
948 | #define NOTE_ALIGN(n) (((n) + 3) & -4U) | ||
949 | |||
950 | int filename__read_build_id(const char *filename, void *bf, size_t size) | ||
951 | { | ||
952 | int fd, err = -1; | ||
666 | GElf_Ehdr ehdr; | 953 | GElf_Ehdr ehdr; |
667 | GElf_Shdr shdr; | 954 | GElf_Shdr shdr; |
668 | Elf_Data *build_id_data; | 955 | Elf_Data *data; |
669 | Elf_Scn *sec; | 956 | Elf_Scn *sec; |
670 | char *build_id = NULL, *bid; | 957 | Elf_Kind ek; |
671 | unsigned char *raw; | 958 | void *ptr; |
672 | Elf *elf; | 959 | Elf *elf; |
673 | int fd = open(self->name, O_RDONLY); | ||
674 | 960 | ||
961 | if (size < BUILD_ID_SIZE) | ||
962 | goto out; | ||
963 | |||
964 | fd = open(filename, O_RDONLY); | ||
675 | if (fd < 0) | 965 | if (fd < 0) |
676 | goto out; | 966 | goto out; |
677 | 967 | ||
678 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | 968 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); |
679 | if (elf == NULL) { | 969 | if (elf == NULL) { |
680 | if (v) | 970 | pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); |
681 | fprintf(stderr, "%s: cannot read %s ELF file.\n", | ||
682 | __func__, self->name); | ||
683 | goto out_close; | 971 | goto out_close; |
684 | } | 972 | } |
685 | 973 | ||
974 | ek = elf_kind(elf); | ||
975 | if (ek != ELF_K_ELF) | ||
976 | goto out_elf_end; | ||
977 | |||
686 | if (gelf_getehdr(elf, &ehdr) == NULL) { | 978 | if (gelf_getehdr(elf, &ehdr) == NULL) { |
687 | if (v) | 979 | pr_err("%s: cannot get elf header.\n", __func__); |
688 | fprintf(stderr, "%s: cannot get elf header.\n", __func__); | ||
689 | goto out_elf_end; | 980 | goto out_elf_end; |
690 | } | 981 | } |
691 | 982 | ||
692 | sec = elf_section_by_name(elf, &ehdr, &shdr, ".note.gnu.build-id", NULL); | 983 | sec = elf_section_by_name(elf, &ehdr, &shdr, |
693 | if (sec == NULL) | 984 | ".note.gnu.build-id", NULL); |
694 | goto out_elf_end; | 985 | if (sec == NULL) { |
986 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
987 | ".notes", NULL); | ||
988 | if (sec == NULL) | ||
989 | goto out_elf_end; | ||
990 | } | ||
695 | 991 | ||
696 | build_id_data = elf_getdata(sec, NULL); | 992 | data = elf_getdata(sec, NULL); |
697 | if (build_id_data == NULL) | 993 | if (data == NULL) |
698 | goto out_elf_end; | ||
699 | build_id = malloc(BUILD_ID_SIZE); | ||
700 | if (build_id == NULL) | ||
701 | goto out_elf_end; | 994 | goto out_elf_end; |
702 | raw = build_id_data->d_buf + 16; | ||
703 | bid = build_id; | ||
704 | 995 | ||
705 | for (i = 0; i < 20; ++i) { | 996 | ptr = data->d_buf; |
706 | sprintf(bid, "%02x", *raw); | 997 | while (ptr < (data->d_buf + data->d_size)) { |
707 | ++raw; | 998 | GElf_Nhdr *nhdr = ptr; |
708 | bid += 2; | 999 | int namesz = NOTE_ALIGN(nhdr->n_namesz), |
1000 | descsz = NOTE_ALIGN(nhdr->n_descsz); | ||
1001 | const char *name; | ||
1002 | |||
1003 | ptr += sizeof(*nhdr); | ||
1004 | name = ptr; | ||
1005 | ptr += namesz; | ||
1006 | if (nhdr->n_type == NT_GNU_BUILD_ID && | ||
1007 | nhdr->n_namesz == sizeof("GNU")) { | ||
1008 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { | ||
1009 | memcpy(bf, ptr, BUILD_ID_SIZE); | ||
1010 | err = BUILD_ID_SIZE; | ||
1011 | break; | ||
1012 | } | ||
1013 | } | ||
1014 | ptr += descsz; | ||
709 | } | 1015 | } |
710 | if (v >= 2) | ||
711 | printf("%s(%s): %s\n", __func__, self->name, build_id); | ||
712 | out_elf_end: | 1016 | out_elf_end: |
713 | elf_end(elf); | 1017 | elf_end(elf); |
714 | out_close: | 1018 | out_close: |
715 | close(fd); | 1019 | close(fd); |
716 | out: | 1020 | out: |
717 | return build_id; | 1021 | return err; |
1022 | } | ||
1023 | |||
1024 | int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | ||
1025 | { | ||
1026 | int fd, err = -1; | ||
1027 | |||
1028 | if (size < BUILD_ID_SIZE) | ||
1029 | goto out; | ||
1030 | |||
1031 | fd = open(filename, O_RDONLY); | ||
1032 | if (fd < 0) | ||
1033 | goto out; | ||
1034 | |||
1035 | while (1) { | ||
1036 | char bf[BUFSIZ]; | ||
1037 | GElf_Nhdr nhdr; | ||
1038 | int namesz, descsz; | ||
1039 | |||
1040 | if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) | ||
1041 | break; | ||
1042 | |||
1043 | namesz = NOTE_ALIGN(nhdr.n_namesz); | ||
1044 | descsz = NOTE_ALIGN(nhdr.n_descsz); | ||
1045 | if (nhdr.n_type == NT_GNU_BUILD_ID && | ||
1046 | nhdr.n_namesz == sizeof("GNU")) { | ||
1047 | if (read(fd, bf, namesz) != namesz) | ||
1048 | break; | ||
1049 | if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { | ||
1050 | if (read(fd, build_id, | ||
1051 | BUILD_ID_SIZE) == BUILD_ID_SIZE) { | ||
1052 | err = 0; | ||
1053 | break; | ||
1054 | } | ||
1055 | } else if (read(fd, bf, descsz) != descsz) | ||
1056 | break; | ||
1057 | } else { | ||
1058 | int n = namesz + descsz; | ||
1059 | if (read(fd, bf, n) != n) | ||
1060 | break; | ||
1061 | } | ||
1062 | } | ||
1063 | close(fd); | ||
1064 | out: | ||
1065 | return err; | ||
718 | } | 1066 | } |
719 | 1067 | ||
720 | char dso__symtab_origin(const struct dso *self) | 1068 | char dso__symtab_origin(const struct dso *self) |
@@ -726,6 +1074,7 @@ char dso__symtab_origin(const struct dso *self) | |||
726 | [DSO__ORIG_UBUNTU] = 'u', | 1074 | [DSO__ORIG_UBUNTU] = 'u', |
727 | [DSO__ORIG_BUILDID] = 'b', | 1075 | [DSO__ORIG_BUILDID] = 'b', |
728 | [DSO__ORIG_DSO] = 'd', | 1076 | [DSO__ORIG_DSO] = 'd', |
1077 | [DSO__ORIG_KMODULE] = 'K', | ||
729 | }; | 1078 | }; |
730 | 1079 | ||
731 | if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) | 1080 | if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) |
@@ -733,20 +1082,27 @@ char dso__symtab_origin(const struct dso *self) | |||
733 | return origin[self->origin]; | 1082 | return origin[self->origin]; |
734 | } | 1083 | } |
735 | 1084 | ||
736 | int dso__load(struct dso *self, symbol_filter_t filter, int v) | 1085 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) |
737 | { | 1086 | { |
738 | int size = PATH_MAX; | 1087 | int size = PATH_MAX; |
739 | char *name = malloc(size), *build_id = NULL; | 1088 | char *name; |
1089 | u8 build_id[BUILD_ID_SIZE]; | ||
740 | int ret = -1; | 1090 | int ret = -1; |
741 | int fd; | 1091 | int fd; |
742 | 1092 | ||
1093 | dso__set_loaded(self, map->type); | ||
1094 | |||
1095 | if (self->kernel) | ||
1096 | return dso__load_kernel_sym(self, map, kthread, filter); | ||
1097 | |||
1098 | name = malloc(size); | ||
743 | if (!name) | 1099 | if (!name) |
744 | return -1; | 1100 | return -1; |
745 | 1101 | ||
746 | self->adjust_symbols = 0; | 1102 | self->adjust_symbols = 0; |
747 | 1103 | ||
748 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) { | 1104 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) { |
749 | ret = dso__load_perf_map(self, filter, v); | 1105 | ret = dso__load_perf_map(self, map, filter); |
750 | self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : | 1106 | self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : |
751 | DSO__ORIG_NOT_FOUND; | 1107 | DSO__ORIG_NOT_FOUND; |
752 | return ret; | 1108 | return ret; |
@@ -759,34 +1115,50 @@ more: | |||
759 | self->origin++; | 1115 | self->origin++; |
760 | switch (self->origin) { | 1116 | switch (self->origin) { |
761 | case DSO__ORIG_FEDORA: | 1117 | case DSO__ORIG_FEDORA: |
762 | snprintf(name, size, "/usr/lib/debug%s.debug", self->name); | 1118 | snprintf(name, size, "/usr/lib/debug%s.debug", |
1119 | self->long_name); | ||
763 | break; | 1120 | break; |
764 | case DSO__ORIG_UBUNTU: | 1121 | case DSO__ORIG_UBUNTU: |
765 | snprintf(name, size, "/usr/lib/debug%s", self->name); | 1122 | snprintf(name, size, "/usr/lib/debug%s", |
1123 | self->long_name); | ||
766 | break; | 1124 | break; |
767 | case DSO__ORIG_BUILDID: | 1125 | case DSO__ORIG_BUILDID: |
768 | build_id = dso__read_build_id(self, v); | 1126 | if (filename__read_build_id(self->long_name, build_id, |
769 | if (build_id != NULL) { | 1127 | sizeof(build_id))) { |
1128 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; | ||
1129 | |||
1130 | build_id__sprintf(build_id, sizeof(build_id), | ||
1131 | build_id_hex); | ||
770 | snprintf(name, size, | 1132 | snprintf(name, size, |
771 | "/usr/lib/debug/.build-id/%.2s/%s.debug", | 1133 | "/usr/lib/debug/.build-id/%.2s/%s.debug", |
772 | build_id, build_id + 2); | 1134 | build_id_hex, build_id_hex + 2); |
773 | free(build_id); | 1135 | if (self->has_build_id) |
1136 | goto compare_build_id; | ||
774 | break; | 1137 | break; |
775 | } | 1138 | } |
776 | self->origin++; | 1139 | self->origin++; |
777 | /* Fall thru */ | 1140 | /* Fall thru */ |
778 | case DSO__ORIG_DSO: | 1141 | case DSO__ORIG_DSO: |
779 | snprintf(name, size, "%s", self->name); | 1142 | snprintf(name, size, "%s", self->long_name); |
780 | break; | 1143 | break; |
781 | 1144 | ||
782 | default: | 1145 | default: |
783 | goto out; | 1146 | goto out; |
784 | } | 1147 | } |
785 | 1148 | ||
1149 | if (self->has_build_id) { | ||
1150 | if (filename__read_build_id(name, build_id, | ||
1151 | sizeof(build_id)) < 0) | ||
1152 | goto more; | ||
1153 | compare_build_id: | ||
1154 | if (!dso__build_id_equal(self, build_id)) | ||
1155 | goto more; | ||
1156 | } | ||
1157 | |||
786 | fd = open(name, O_RDONLY); | 1158 | fd = open(name, O_RDONLY); |
787 | } while (fd < 0); | 1159 | } while (fd < 0); |
788 | 1160 | ||
789 | ret = dso__load_sym(self, fd, name, filter, v, NULL); | 1161 | ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0); |
790 | close(fd); | 1162 | close(fd); |
791 | 1163 | ||
792 | /* | 1164 | /* |
@@ -796,7 +1168,7 @@ more: | |||
796 | goto more; | 1168 | goto more; |
797 | 1169 | ||
798 | if (ret > 0) { | 1170 | if (ret > 0) { |
799 | int nr_plt = dso__synthesize_plt_symbols(self, v); | 1171 | int nr_plt = dso__synthesize_plt_symbols(self, map, filter); |
800 | if (nr_plt > 0) | 1172 | if (nr_plt > 0) |
801 | ret += nr_plt; | 1173 | ret += nr_plt; |
802 | } | 1174 | } |
@@ -807,151 +1179,279 @@ out: | |||
807 | return ret; | 1179 | return ret; |
808 | } | 1180 | } |
809 | 1181 | ||
810 | static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name, | 1182 | static struct map *thread__find_map_by_name(struct thread *self, char *name) |
811 | symbol_filter_t filter, int v) | ||
812 | { | 1183 | { |
813 | struct module *mod = mod_dso__find_module(mods, name); | 1184 | struct rb_node *nd; |
814 | int err = 0, fd; | ||
815 | 1185 | ||
816 | if (mod == NULL || !mod->active) | 1186 | for (nd = rb_first(&self->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { |
817 | return err; | 1187 | struct map *map = rb_entry(nd, struct map, rb_node); |
818 | 1188 | ||
819 | fd = open(mod->path, O_RDONLY); | 1189 | if (map->dso && strcmp(map->dso->name, name) == 0) |
1190 | return map; | ||
1191 | } | ||
820 | 1192 | ||
821 | if (fd < 0) | 1193 | return NULL; |
822 | return err; | 1194 | } |
823 | 1195 | ||
824 | err = dso__load_sym(self, fd, name, filter, v, mod); | 1196 | static int dsos__set_modules_path_dir(char *dirname) |
825 | close(fd); | 1197 | { |
1198 | struct dirent *dent; | ||
1199 | DIR *dir = opendir(dirname); | ||
826 | 1200 | ||
827 | return err; | 1201 | if (!dir) { |
1202 | pr_debug("%s: cannot open %s dir\n", __func__, dirname); | ||
1203 | return -1; | ||
1204 | } | ||
1205 | |||
1206 | while ((dent = readdir(dir)) != NULL) { | ||
1207 | char path[PATH_MAX]; | ||
1208 | |||
1209 | if (dent->d_type == DT_DIR) { | ||
1210 | if (!strcmp(dent->d_name, ".") || | ||
1211 | !strcmp(dent->d_name, "..")) | ||
1212 | continue; | ||
1213 | |||
1214 | snprintf(path, sizeof(path), "%s/%s", | ||
1215 | dirname, dent->d_name); | ||
1216 | if (dsos__set_modules_path_dir(path) < 0) | ||
1217 | goto failure; | ||
1218 | } else { | ||
1219 | char *dot = strrchr(dent->d_name, '.'), | ||
1220 | dso_name[PATH_MAX]; | ||
1221 | struct map *map; | ||
1222 | char *long_name; | ||
1223 | |||
1224 | if (dot == NULL || strcmp(dot, ".ko")) | ||
1225 | continue; | ||
1226 | snprintf(dso_name, sizeof(dso_name), "[%.*s]", | ||
1227 | (int)(dot - dent->d_name), dent->d_name); | ||
1228 | |||
1229 | strxfrchar(dso_name, '-', '_'); | ||
1230 | map = thread__find_map_by_name(kthread, dso_name); | ||
1231 | if (map == NULL) | ||
1232 | continue; | ||
1233 | |||
1234 | snprintf(path, sizeof(path), "%s/%s", | ||
1235 | dirname, dent->d_name); | ||
1236 | |||
1237 | long_name = strdup(path); | ||
1238 | if (long_name == NULL) | ||
1239 | goto failure; | ||
1240 | dso__set_long_name(map->dso, long_name); | ||
1241 | } | ||
1242 | } | ||
1243 | |||
1244 | return 0; | ||
1245 | failure: | ||
1246 | closedir(dir); | ||
1247 | return -1; | ||
828 | } | 1248 | } |
829 | 1249 | ||
830 | int dso__load_modules(struct dso *self, symbol_filter_t filter, int v) | 1250 | static int dsos__set_modules_path(void) |
831 | { | 1251 | { |
832 | struct mod_dso *mods = mod_dso__new_dso("modules"); | 1252 | struct utsname uts; |
833 | struct module *pos; | 1253 | char modules_path[PATH_MAX]; |
834 | struct rb_node *next; | ||
835 | int err, count = 0; | ||
836 | 1254 | ||
837 | err = mod_dso__load_modules(mods); | 1255 | if (uname(&uts) < 0) |
838 | 1256 | return -1; | |
839 | if (err <= 0) | ||
840 | return err; | ||
841 | 1257 | ||
842 | /* | 1258 | snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", |
843 | * Iterate over modules, and load active symbols. | 1259 | uts.release); |
844 | */ | ||
845 | next = rb_first(&mods->mods); | ||
846 | while (next) { | ||
847 | pos = rb_entry(next, struct module, rb_node); | ||
848 | err = dso__load_module(self, mods, pos->name, filter, v); | ||
849 | 1260 | ||
850 | if (err < 0) | 1261 | return dsos__set_modules_path_dir(modules_path); |
851 | break; | 1262 | } |
852 | 1263 | ||
853 | next = rb_next(&pos->rb_node); | 1264 | /* |
854 | count += err; | 1265 | * Constructor variant for modules (where we know from /proc/modules where |
855 | } | 1266 | * they are loaded) and for vmlinux, where only after we load all the |
1267 | * symbols we'll know where it starts and ends. | ||
1268 | */ | ||
1269 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) | ||
1270 | { | ||
1271 | struct map *self = malloc(sizeof(*self)); | ||
856 | 1272 | ||
857 | if (err < 0) { | 1273 | if (self != NULL) { |
858 | mod_dso__delete_modules(mods); | 1274 | /* |
859 | mod_dso__delete_self(mods); | 1275 | * ->end will be filled after we load all the symbols |
860 | return err; | 1276 | */ |
1277 | map__init(self, type, start, 0, 0, dso); | ||
861 | } | 1278 | } |
862 | 1279 | ||
863 | return count; | 1280 | return self; |
864 | } | 1281 | } |
865 | 1282 | ||
866 | static inline void dso__fill_symbol_holes(struct dso *self) | 1283 | static int thread__create_module_maps(struct thread *self) |
867 | { | 1284 | { |
868 | struct symbol *prev = NULL; | 1285 | char *line = NULL; |
869 | struct rb_node *nd; | 1286 | size_t n; |
1287 | FILE *file = fopen("/proc/modules", "r"); | ||
1288 | struct map *map; | ||
870 | 1289 | ||
871 | for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) { | 1290 | if (file == NULL) |
872 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 1291 | return -1; |
873 | 1292 | ||
874 | if (prev) { | 1293 | while (!feof(file)) { |
875 | u64 hole = 0; | 1294 | char name[PATH_MAX]; |
876 | int alias = pos->start == prev->start; | 1295 | u64 start; |
1296 | struct dso *dso; | ||
1297 | char *sep; | ||
1298 | int line_len; | ||
877 | 1299 | ||
878 | if (!alias) | 1300 | line_len = getline(&line, &n, file); |
879 | hole = prev->start - pos->end - 1; | 1301 | if (line_len < 0) |
1302 | break; | ||
880 | 1303 | ||
881 | if (hole || alias) { | 1304 | if (!line) |
882 | if (alias) | 1305 | goto out_failure; |
883 | pos->end = prev->end; | 1306 | |
884 | else if (hole) | 1307 | line[--line_len] = '\0'; /* \n */ |
885 | pos->end = prev->start - 1; | 1308 | |
886 | } | 1309 | sep = strrchr(line, 'x'); |
1310 | if (sep == NULL) | ||
1311 | continue; | ||
1312 | |||
1313 | hex2u64(sep + 1, &start); | ||
1314 | |||
1315 | sep = strchr(line, ' '); | ||
1316 | if (sep == NULL) | ||
1317 | continue; | ||
1318 | |||
1319 | *sep = '\0'; | ||
1320 | |||
1321 | snprintf(name, sizeof(name), "[%s]", line); | ||
1322 | dso = dso__new(name); | ||
1323 | |||
1324 | if (dso == NULL) | ||
1325 | goto out_delete_line; | ||
1326 | |||
1327 | map = map__new2(start, dso, MAP__FUNCTION); | ||
1328 | if (map == NULL) { | ||
1329 | dso__delete(dso); | ||
1330 | goto out_delete_line; | ||
887 | } | 1331 | } |
888 | prev = pos; | 1332 | |
1333 | snprintf(name, sizeof(name), | ||
1334 | "/sys/module/%s/notes/.note.gnu.build-id", line); | ||
1335 | if (sysfs__read_build_id(name, dso->build_id, | ||
1336 | sizeof(dso->build_id)) == 0) | ||
1337 | dso->has_build_id = true; | ||
1338 | |||
1339 | dso->origin = DSO__ORIG_KMODULE; | ||
1340 | __thread__insert_map(self, map); | ||
1341 | dsos__add(&dsos__kernel, dso); | ||
889 | } | 1342 | } |
1343 | |||
1344 | free(line); | ||
1345 | fclose(file); | ||
1346 | |||
1347 | return dsos__set_modules_path(); | ||
1348 | |||
1349 | out_delete_line: | ||
1350 | free(line); | ||
1351 | out_failure: | ||
1352 | return -1; | ||
890 | } | 1353 | } |
891 | 1354 | ||
892 | static int dso__load_vmlinux(struct dso *self, const char *vmlinux, | 1355 | static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *thread, |
893 | symbol_filter_t filter, int v) | 1356 | const char *vmlinux, symbol_filter_t filter) |
894 | { | 1357 | { |
895 | int err, fd = open(vmlinux, O_RDONLY); | 1358 | int err = -1, fd; |
896 | 1359 | ||
897 | if (fd < 0) | 1360 | if (self->has_build_id) { |
898 | return -1; | 1361 | u8 build_id[BUILD_ID_SIZE]; |
899 | 1362 | ||
900 | err = dso__load_sym(self, fd, vmlinux, filter, v, NULL); | 1363 | if (filename__read_build_id(vmlinux, build_id, |
1364 | sizeof(build_id)) < 0) { | ||
1365 | pr_debug("No build_id in %s, ignoring it\n", vmlinux); | ||
1366 | return -1; | ||
1367 | } | ||
1368 | if (!dso__build_id_equal(self, build_id)) { | ||
1369 | char expected_build_id[BUILD_ID_SIZE * 2 + 1], | ||
1370 | vmlinux_build_id[BUILD_ID_SIZE * 2 + 1]; | ||
1371 | |||
1372 | build_id__sprintf(self->build_id, | ||
1373 | sizeof(self->build_id), | ||
1374 | expected_build_id); | ||
1375 | build_id__sprintf(build_id, sizeof(build_id), | ||
1376 | vmlinux_build_id); | ||
1377 | pr_debug("build_id in %s is %s while expected is %s, " | ||
1378 | "ignoring it\n", vmlinux, vmlinux_build_id, | ||
1379 | expected_build_id); | ||
1380 | return -1; | ||
1381 | } | ||
1382 | } | ||
901 | 1383 | ||
902 | if (err > 0) | 1384 | fd = open(vmlinux, O_RDONLY); |
903 | dso__fill_symbol_holes(self); | 1385 | if (fd < 0) |
1386 | return -1; | ||
904 | 1387 | ||
1388 | dso__set_loaded(self, map->type); | ||
1389 | err = dso__load_sym(self, map, thread, self->long_name, fd, filter, 1, 0); | ||
905 | close(fd); | 1390 | close(fd); |
906 | 1391 | ||
907 | return err; | 1392 | return err; |
908 | } | 1393 | } |
909 | 1394 | ||
910 | int dso__load_kernel(struct dso *self, const char *vmlinux, | 1395 | static int dso__load_kernel_sym(struct dso *self, struct map *map, |
911 | symbol_filter_t filter, int v, int use_modules) | 1396 | struct thread *thread, symbol_filter_t filter) |
912 | { | 1397 | { |
913 | int err = -1; | 1398 | int err; |
914 | 1399 | bool is_kallsyms; | |
915 | if (vmlinux) { | 1400 | |
916 | err = dso__load_vmlinux(self, vmlinux, filter, v); | 1401 | if (vmlinux_path != NULL) { |
917 | if (err > 0 && use_modules) { | 1402 | int i; |
918 | int syms = dso__load_modules(self, filter, v); | 1403 | pr_debug("Looking at the vmlinux_path (%d entries long)\n", |
919 | 1404 | vmlinux_path__nr_entries); | |
920 | if (syms < 0) { | 1405 | for (i = 0; i < vmlinux_path__nr_entries; ++i) { |
921 | fprintf(stderr, "dso__load_modules failed!\n"); | 1406 | err = dso__load_vmlinux(self, map, thread, |
922 | return syms; | 1407 | vmlinux_path[i], filter); |
1408 | if (err > 0) { | ||
1409 | pr_debug("Using %s for symbols\n", | ||
1410 | vmlinux_path[i]); | ||
1411 | dso__set_long_name(self, | ||
1412 | strdup(vmlinux_path[i])); | ||
1413 | goto out_fixup; | ||
923 | } | 1414 | } |
924 | err += syms; | ||
925 | } | 1415 | } |
926 | } | 1416 | } |
927 | 1417 | ||
928 | if (err <= 0) | 1418 | is_kallsyms = self->long_name[0] == '['; |
929 | err = dso__load_kallsyms(self, filter, v); | 1419 | if (is_kallsyms) |
1420 | goto do_kallsyms; | ||
930 | 1421 | ||
931 | if (err > 0) | 1422 | err = dso__load_vmlinux(self, map, thread, self->long_name, filter); |
932 | self->origin = DSO__ORIG_KERNEL; | 1423 | if (err <= 0) { |
1424 | pr_info("The file %s cannot be used, " | ||
1425 | "trying to use /proc/kallsyms...", self->long_name); | ||
1426 | do_kallsyms: | ||
1427 | err = dso__load_kallsyms(self, map, thread, filter); | ||
1428 | if (err > 0 && !is_kallsyms) | ||
1429 | dso__set_long_name(self, strdup("[kernel.kallsyms]")); | ||
1430 | } | ||
1431 | |||
1432 | if (err > 0) { | ||
1433 | out_fixup: | ||
1434 | map__fixup_start(map); | ||
1435 | map__fixup_end(map); | ||
1436 | } | ||
933 | 1437 | ||
934 | return err; | 1438 | return err; |
935 | } | 1439 | } |
936 | 1440 | ||
937 | LIST_HEAD(dsos); | 1441 | LIST_HEAD(dsos__user); |
938 | struct dso *kernel_dso; | 1442 | LIST_HEAD(dsos__kernel); |
939 | struct dso *vdso; | 1443 | struct dso *vdso; |
940 | struct dso *hypervisor_dso; | ||
941 | |||
942 | const char *vmlinux_name = "vmlinux"; | ||
943 | int modules; | ||
944 | 1444 | ||
945 | static void dsos__add(struct dso *dso) | 1445 | static void dsos__add(struct list_head *head, struct dso *dso) |
946 | { | 1446 | { |
947 | list_add_tail(&dso->node, &dsos); | 1447 | list_add_tail(&dso->node, head); |
948 | } | 1448 | } |
949 | 1449 | ||
950 | static struct dso *dsos__find(const char *name) | 1450 | static struct dso *dsos__find(struct list_head *head, const char *name) |
951 | { | 1451 | { |
952 | struct dso *pos; | 1452 | struct dso *pos; |
953 | 1453 | ||
954 | list_for_each_entry(pos, &dsos, node) | 1454 | list_for_each_entry(pos, head, node) |
955 | if (strcmp(pos->name, name) == 0) | 1455 | if (strcmp(pos->name, name) == 0) |
956 | return pos; | 1456 | return pos; |
957 | return NULL; | 1457 | return NULL; |
@@ -959,79 +1459,170 @@ static struct dso *dsos__find(const char *name) | |||
959 | 1459 | ||
960 | struct dso *dsos__findnew(const char *name) | 1460 | struct dso *dsos__findnew(const char *name) |
961 | { | 1461 | { |
962 | struct dso *dso = dsos__find(name); | 1462 | struct dso *dso = dsos__find(&dsos__user, name); |
963 | int nr; | ||
964 | |||
965 | if (dso) | ||
966 | return dso; | ||
967 | |||
968 | dso = dso__new(name, 0); | ||
969 | if (!dso) | ||
970 | goto out_delete_dso; | ||
971 | 1463 | ||
972 | nr = dso__load(dso, NULL, verbose); | 1464 | if (!dso) { |
973 | if (nr < 0) { | 1465 | dso = dso__new(name); |
974 | eprintf("Failed to open: %s\n", name); | 1466 | if (dso != NULL) { |
975 | goto out_delete_dso; | 1467 | dsos__add(&dsos__user, dso); |
1468 | dso__set_basename(dso); | ||
1469 | } | ||
976 | } | 1470 | } |
977 | if (!nr) | ||
978 | eprintf("No symbols found in: %s, maybe install a debug package?\n", name); | ||
979 | |||
980 | dsos__add(dso); | ||
981 | 1471 | ||
982 | return dso; | 1472 | return dso; |
1473 | } | ||
983 | 1474 | ||
984 | out_delete_dso: | 1475 | static void __dsos__fprintf(struct list_head *head, FILE *fp) |
985 | dso__delete(dso); | 1476 | { |
986 | return NULL; | 1477 | struct dso *pos; |
1478 | |||
1479 | list_for_each_entry(pos, head, node) { | ||
1480 | int i; | ||
1481 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
1482 | dso__fprintf(pos, i, fp); | ||
1483 | } | ||
987 | } | 1484 | } |
988 | 1485 | ||
989 | void dsos__fprintf(FILE *fp) | 1486 | void dsos__fprintf(FILE *fp) |
990 | { | 1487 | { |
1488 | __dsos__fprintf(&dsos__kernel, fp); | ||
1489 | __dsos__fprintf(&dsos__user, fp); | ||
1490 | } | ||
1491 | |||
1492 | static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp) | ||
1493 | { | ||
991 | struct dso *pos; | 1494 | struct dso *pos; |
1495 | size_t ret = 0; | ||
992 | 1496 | ||
993 | list_for_each_entry(pos, &dsos, node) | 1497 | list_for_each_entry(pos, head, node) { |
994 | dso__fprintf(pos, fp); | 1498 | ret += dso__fprintf_buildid(pos, fp); |
1499 | ret += fprintf(fp, " %s\n", pos->long_name); | ||
1500 | } | ||
1501 | return ret; | ||
995 | } | 1502 | } |
996 | 1503 | ||
997 | static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip) | 1504 | size_t dsos__fprintf_buildid(FILE *fp) |
998 | { | 1505 | { |
999 | return dso__find_symbol(dso, ip); | 1506 | return (__dsos__fprintf_buildid(&dsos__kernel, fp) + |
1507 | __dsos__fprintf_buildid(&dsos__user, fp)); | ||
1000 | } | 1508 | } |
1001 | 1509 | ||
1002 | int load_kernel(void) | 1510 | static int thread__create_kernel_map(struct thread *self, const char *vmlinux) |
1003 | { | 1511 | { |
1004 | int err; | 1512 | struct map *kmap; |
1513 | struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]"); | ||
1005 | 1514 | ||
1006 | kernel_dso = dso__new("[kernel]", 0); | 1515 | if (kernel == NULL) |
1007 | if (!kernel_dso) | ||
1008 | return -1; | 1516 | return -1; |
1009 | 1517 | ||
1010 | err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules); | 1518 | kmap = map__new2(0, kernel, MAP__FUNCTION); |
1011 | if (err <= 0) { | 1519 | if (kmap == NULL) |
1012 | dso__delete(kernel_dso); | 1520 | goto out_delete_kernel_dso; |
1013 | kernel_dso = NULL; | ||
1014 | } else | ||
1015 | dsos__add(kernel_dso); | ||
1016 | 1521 | ||
1017 | vdso = dso__new("[vdso]", 0); | 1522 | kmap->map_ip = kmap->unmap_ip = identity__map_ip; |
1018 | if (!vdso) | 1523 | kernel->short_name = "[kernel]"; |
1019 | return -1; | 1524 | kernel->kernel = 1; |
1020 | 1525 | ||
1021 | vdso->find_symbol = vdso__find_symbol; | 1526 | vdso = dso__new("[vdso]"); |
1527 | if (vdso == NULL) | ||
1528 | goto out_delete_kernel_map; | ||
1529 | dso__set_loaded(vdso, MAP__FUNCTION); | ||
1022 | 1530 | ||
1023 | dsos__add(vdso); | 1531 | if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, |
1532 | sizeof(kernel->build_id)) == 0) | ||
1533 | kernel->has_build_id = true; | ||
1024 | 1534 | ||
1025 | hypervisor_dso = dso__new("[hypervisor]", 0); | 1535 | __thread__insert_map(self, kmap); |
1026 | if (!hypervisor_dso) | 1536 | dsos__add(&dsos__kernel, kernel); |
1027 | return -1; | 1537 | dsos__add(&dsos__user, vdso); |
1028 | dsos__add(hypervisor_dso); | ||
1029 | 1538 | ||
1030 | return err; | 1539 | return 0; |
1540 | |||
1541 | out_delete_kernel_map: | ||
1542 | map__delete(kmap); | ||
1543 | out_delete_kernel_dso: | ||
1544 | dso__delete(kernel); | ||
1545 | return -1; | ||
1546 | } | ||
1547 | |||
1548 | static void vmlinux_path__exit(void) | ||
1549 | { | ||
1550 | while (--vmlinux_path__nr_entries >= 0) { | ||
1551 | free(vmlinux_path[vmlinux_path__nr_entries]); | ||
1552 | vmlinux_path[vmlinux_path__nr_entries] = NULL; | ||
1553 | } | ||
1554 | |||
1555 | free(vmlinux_path); | ||
1556 | vmlinux_path = NULL; | ||
1031 | } | 1557 | } |
1032 | 1558 | ||
1559 | static int vmlinux_path__init(void) | ||
1560 | { | ||
1561 | struct utsname uts; | ||
1562 | char bf[PATH_MAX]; | ||
1563 | |||
1564 | if (uname(&uts) < 0) | ||
1565 | return -1; | ||
1566 | |||
1567 | vmlinux_path = malloc(sizeof(char *) * 5); | ||
1568 | if (vmlinux_path == NULL) | ||
1569 | return -1; | ||
1570 | |||
1571 | vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); | ||
1572 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | ||
1573 | goto out_fail; | ||
1574 | ++vmlinux_path__nr_entries; | ||
1575 | vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); | ||
1576 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | ||
1577 | goto out_fail; | ||
1578 | ++vmlinux_path__nr_entries; | ||
1579 | snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); | ||
1580 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); | ||
1581 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | ||
1582 | goto out_fail; | ||
1583 | ++vmlinux_path__nr_entries; | ||
1584 | snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); | ||
1585 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); | ||
1586 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | ||
1587 | goto out_fail; | ||
1588 | ++vmlinux_path__nr_entries; | ||
1589 | snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", | ||
1590 | uts.release); | ||
1591 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); | ||
1592 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | ||
1593 | goto out_fail; | ||
1594 | ++vmlinux_path__nr_entries; | ||
1595 | |||
1596 | return 0; | ||
1597 | |||
1598 | out_fail: | ||
1599 | vmlinux_path__exit(); | ||
1600 | return -1; | ||
1601 | } | ||
1033 | 1602 | ||
1034 | void symbol__init(void) | 1603 | int symbol__init(struct symbol_conf *conf) |
1035 | { | 1604 | { |
1605 | const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults; | ||
1606 | |||
1036 | elf_version(EV_CURRENT); | 1607 | elf_version(EV_CURRENT); |
1608 | symbol__priv_size = pconf->priv_size; | ||
1609 | thread__init(kthread, 0); | ||
1610 | |||
1611 | if (pconf->try_vmlinux_path && vmlinux_path__init() < 0) | ||
1612 | return -1; | ||
1613 | |||
1614 | if (thread__create_kernel_map(kthread, pconf->vmlinux_name) < 0) { | ||
1615 | vmlinux_path__exit(); | ||
1616 | return -1; | ||
1617 | } | ||
1618 | |||
1619 | kthread->use_modules = pconf->use_modules; | ||
1620 | if (pconf->use_modules && thread__create_module_maps(kthread) < 0) | ||
1621 | pr_debug("Failed to load list of modules in use, " | ||
1622 | "continuing...\n"); | ||
1623 | /* | ||
1624 | * Now that we have all the maps created, just set the ->end of them: | ||
1625 | */ | ||
1626 | thread__fixup_maps_end(kthread); | ||
1627 | return 0; | ||
1037 | } | 1628 | } |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 829da9edba64..17003efa0b39 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -1,11 +1,11 @@ | |||
1 | #ifndef _PERF_SYMBOL_ | 1 | #ifndef __PERF_SYMBOL |
2 | #define _PERF_SYMBOL_ 1 | 2 | #define __PERF_SYMBOL 1 |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <stdbool.h> | ||
5 | #include "types.h" | 6 | #include "types.h" |
6 | #include <linux/list.h> | 7 | #include <linux/list.h> |
7 | #include <linux/rbtree.h> | 8 | #include <linux/rbtree.h> |
8 | #include "module.h" | ||
9 | #include "event.h" | 9 | #include "event.h" |
10 | 10 | ||
11 | #ifdef HAVE_CPLUS_DEMANGLE | 11 | #ifdef HAVE_CPLUS_DEMANGLE |
@@ -46,57 +46,75 @@ struct symbol { | |||
46 | struct rb_node rb_node; | 46 | struct rb_node rb_node; |
47 | u64 start; | 47 | u64 start; |
48 | u64 end; | 48 | u64 end; |
49 | u64 obj_start; | ||
50 | u64 hist_sum; | ||
51 | u64 *hist; | ||
52 | struct module *module; | ||
53 | void *priv; | ||
54 | char name[0]; | 49 | char name[0]; |
55 | }; | 50 | }; |
56 | 51 | ||
52 | struct symbol_conf { | ||
53 | unsigned short priv_size; | ||
54 | bool try_vmlinux_path, | ||
55 | use_modules; | ||
56 | const char *vmlinux_name; | ||
57 | }; | ||
58 | |||
59 | extern unsigned int symbol__priv_size; | ||
60 | |||
61 | static inline void *symbol__priv(struct symbol *self) | ||
62 | { | ||
63 | return ((void *)self) - symbol__priv_size; | ||
64 | } | ||
65 | |||
66 | struct addr_location { | ||
67 | struct thread *thread; | ||
68 | struct map *map; | ||
69 | struct symbol *sym; | ||
70 | u64 addr; | ||
71 | char level; | ||
72 | }; | ||
73 | |||
57 | struct dso { | 74 | struct dso { |
58 | struct list_head node; | 75 | struct list_head node; |
59 | struct rb_root syms; | 76 | struct rb_root symbols[MAP__NR_TYPES]; |
60 | struct symbol *(*find_symbol)(struct dso *, u64 ip); | 77 | struct symbol *(*find_symbol)(struct dso *self, |
61 | unsigned int sym_priv_size; | 78 | enum map_type type, u64 addr); |
62 | unsigned char adjust_symbols; | 79 | u8 adjust_symbols:1; |
63 | unsigned char slen_calculated; | 80 | u8 slen_calculated:1; |
81 | u8 has_build_id:1; | ||
82 | u8 kernel:1; | ||
64 | unsigned char origin; | 83 | unsigned char origin; |
84 | u8 loaded; | ||
85 | u8 build_id[BUILD_ID_SIZE]; | ||
86 | u16 long_name_len; | ||
87 | const char *short_name; | ||
88 | char *long_name; | ||
65 | char name[0]; | 89 | char name[0]; |
66 | }; | 90 | }; |
67 | 91 | ||
68 | extern const char *sym_hist_filter; | 92 | struct dso *dso__new(const char *name); |
69 | |||
70 | typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); | ||
71 | |||
72 | struct dso *dso__new(const char *name, unsigned int sym_priv_size); | ||
73 | void dso__delete(struct dso *self); | 93 | void dso__delete(struct dso *self); |
74 | 94 | ||
75 | static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) | 95 | bool dso__loaded(const struct dso *self, enum map_type type); |
76 | { | ||
77 | return ((void *)sym) - self->sym_priv_size; | ||
78 | } | ||
79 | |||
80 | struct symbol *dso__find_symbol(struct dso *self, u64 ip); | ||
81 | 96 | ||
82 | int dso__load_kernel(struct dso *self, const char *vmlinux, | ||
83 | symbol_filter_t filter, int verbose, int modules); | ||
84 | int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); | ||
85 | int dso__load(struct dso *self, symbol_filter_t filter, int verbose); | ||
86 | struct dso *dsos__findnew(const char *name); | 97 | struct dso *dsos__findnew(const char *name); |
98 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); | ||
87 | void dsos__fprintf(FILE *fp); | 99 | void dsos__fprintf(FILE *fp); |
100 | size_t dsos__fprintf_buildid(FILE *fp); | ||
88 | 101 | ||
89 | size_t dso__fprintf(struct dso *self, FILE *fp); | 102 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp); |
103 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); | ||
90 | char dso__symtab_origin(const struct dso *self); | 104 | char dso__symtab_origin(const struct dso *self); |
105 | void dso__set_build_id(struct dso *self, void *build_id); | ||
106 | |||
107 | int filename__read_build_id(const char *filename, void *bf, size_t size); | ||
108 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); | ||
109 | bool dsos__read_build_ids(void); | ||
110 | int build_id__sprintf(u8 *self, int len, char *bf); | ||
91 | 111 | ||
92 | int load_kernel(void); | 112 | size_t kernel_maps__fprintf(FILE *fp); |
93 | 113 | ||
94 | void symbol__init(void); | 114 | int symbol__init(struct symbol_conf *conf); |
95 | 115 | ||
96 | extern struct list_head dsos; | 116 | struct thread; |
97 | extern struct dso *kernel_dso; | 117 | struct thread *kthread; |
118 | extern struct list_head dsos__user, dsos__kernel; | ||
98 | extern struct dso *vdso; | 119 | extern struct dso *vdso; |
99 | extern struct dso *hypervisor_dso; | 120 | #endif /* __PERF_SYMBOL */ |
100 | extern const char *vmlinux_name; | ||
101 | extern int modules; | ||
102 | #endif /* _PERF_SYMBOL_ */ | ||
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 45efb5db0d19..603f5610861b 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -6,16 +6,29 @@ | |||
6 | #include "util.h" | 6 | #include "util.h" |
7 | #include "debug.h" | 7 | #include "debug.h" |
8 | 8 | ||
9 | static struct rb_root threads; | ||
10 | static struct thread *last_match; | ||
11 | |||
12 | void thread__init(struct thread *self, pid_t pid) | ||
13 | { | ||
14 | int i; | ||
15 | self->pid = pid; | ||
16 | self->comm = NULL; | ||
17 | for (i = 0; i < MAP__NR_TYPES; ++i) { | ||
18 | self->maps[i] = RB_ROOT; | ||
19 | INIT_LIST_HEAD(&self->removed_maps[i]); | ||
20 | } | ||
21 | } | ||
22 | |||
9 | static struct thread *thread__new(pid_t pid) | 23 | static struct thread *thread__new(pid_t pid) |
10 | { | 24 | { |
11 | struct thread *self = calloc(1, sizeof(*self)); | 25 | struct thread *self = zalloc(sizeof(*self)); |
12 | 26 | ||
13 | if (self != NULL) { | 27 | if (self != NULL) { |
14 | self->pid = pid; | 28 | thread__init(self, pid); |
15 | self->comm = malloc(32); | 29 | self->comm = malloc(32); |
16 | if (self->comm) | 30 | if (self->comm) |
17 | snprintf(self->comm, 32, ":%d", self->pid); | 31 | snprintf(self->comm, 32, ":%d", self->pid); |
18 | INIT_LIST_HEAD(&self->maps); | ||
19 | } | 32 | } |
20 | 33 | ||
21 | return self; | 34 | return self; |
@@ -29,21 +42,84 @@ int thread__set_comm(struct thread *self, const char *comm) | |||
29 | return self->comm ? 0 : -ENOMEM; | 42 | return self->comm ? 0 : -ENOMEM; |
30 | } | 43 | } |
31 | 44 | ||
32 | static size_t thread__fprintf(struct thread *self, FILE *fp) | 45 | int thread__comm_len(struct thread *self) |
46 | { | ||
47 | if (!self->comm_len) { | ||
48 | if (!self->comm) | ||
49 | return 0; | ||
50 | self->comm_len = strlen(self->comm); | ||
51 | } | ||
52 | |||
53 | return self->comm_len; | ||
54 | } | ||
55 | |||
56 | static const char *map_type__name[MAP__NR_TYPES] = { | ||
57 | [MAP__FUNCTION] = "Functions", | ||
58 | }; | ||
59 | |||
60 | static size_t __thread__fprintf_maps(struct thread *self, | ||
61 | enum map_type type, FILE *fp) | ||
62 | { | ||
63 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); | ||
64 | struct rb_node *nd; | ||
65 | |||
66 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | ||
67 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
68 | printed += fprintf(fp, "Map:"); | ||
69 | printed += map__fprintf(pos, fp); | ||
70 | if (verbose > 1) { | ||
71 | printed += dso__fprintf(pos->dso, type, fp); | ||
72 | printed += fprintf(fp, "--\n"); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | return printed; | ||
77 | } | ||
78 | |||
79 | size_t thread__fprintf_maps(struct thread *self, FILE *fp) | ||
80 | { | ||
81 | size_t printed = 0, i; | ||
82 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
83 | printed += __thread__fprintf_maps(self, i, fp); | ||
84 | return printed; | ||
85 | } | ||
86 | |||
87 | static size_t __thread__fprintf_removed_maps(struct thread *self, | ||
88 | enum map_type type, FILE *fp) | ||
33 | { | 89 | { |
34 | struct map *pos; | 90 | struct map *pos; |
35 | size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); | 91 | size_t printed = 0; |
92 | |||
93 | list_for_each_entry(pos, &self->removed_maps[type], node) { | ||
94 | printed += fprintf(fp, "Map:"); | ||
95 | printed += map__fprintf(pos, fp); | ||
96 | if (verbose > 1) { | ||
97 | printed += dso__fprintf(pos->dso, type, fp); | ||
98 | printed += fprintf(fp, "--\n"); | ||
99 | } | ||
100 | } | ||
101 | return printed; | ||
102 | } | ||
36 | 103 | ||
37 | list_for_each_entry(pos, &self->maps, node) | 104 | static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp) |
38 | ret += map__fprintf(pos, fp); | 105 | { |
106 | size_t printed = 0, i; | ||
107 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
108 | printed += __thread__fprintf_removed_maps(self, i, fp); | ||
109 | return printed; | ||
110 | } | ||
39 | 111 | ||
40 | return ret; | 112 | static size_t thread__fprintf(struct thread *self, FILE *fp) |
113 | { | ||
114 | size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); | ||
115 | printed += thread__fprintf_removed_maps(self, fp); | ||
116 | printed += fprintf(fp, "Removed maps:\n"); | ||
117 | return printed + thread__fprintf_removed_maps(self, fp); | ||
41 | } | 118 | } |
42 | 119 | ||
43 | struct thread * | 120 | struct thread *threads__findnew(pid_t pid) |
44 | threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | ||
45 | { | 121 | { |
46 | struct rb_node **p = &threads->rb_node; | 122 | struct rb_node **p = &threads.rb_node; |
47 | struct rb_node *parent = NULL; | 123 | struct rb_node *parent = NULL; |
48 | struct thread *th; | 124 | struct thread *th; |
49 | 125 | ||
@@ -52,15 +128,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | |||
52 | * so most of the time we dont have to look up | 128 | * so most of the time we dont have to look up |
53 | * the full rbtree: | 129 | * the full rbtree: |
54 | */ | 130 | */ |
55 | if (*last_match && (*last_match)->pid == pid) | 131 | if (last_match && last_match->pid == pid) |
56 | return *last_match; | 132 | return last_match; |
57 | 133 | ||
58 | while (*p != NULL) { | 134 | while (*p != NULL) { |
59 | parent = *p; | 135 | parent = *p; |
60 | th = rb_entry(parent, struct thread, rb_node); | 136 | th = rb_entry(parent, struct thread, rb_node); |
61 | 137 | ||
62 | if (th->pid == pid) { | 138 | if (th->pid == pid) { |
63 | *last_match = th; | 139 | last_match = th; |
64 | return th; | 140 | return th; |
65 | } | 141 | } |
66 | 142 | ||
@@ -73,17 +149,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | |||
73 | th = thread__new(pid); | 149 | th = thread__new(pid); |
74 | if (th != NULL) { | 150 | if (th != NULL) { |
75 | rb_link_node(&th->rb_node, parent, p); | 151 | rb_link_node(&th->rb_node, parent, p); |
76 | rb_insert_color(&th->rb_node, threads); | 152 | rb_insert_color(&th->rb_node, &threads); |
77 | *last_match = th; | 153 | last_match = th; |
78 | } | 154 | } |
79 | 155 | ||
80 | return th; | 156 | return th; |
81 | } | 157 | } |
82 | 158 | ||
83 | struct thread * | 159 | struct thread *register_idle_thread(void) |
84 | register_idle_thread(struct rb_root *threads, struct thread **last_match) | ||
85 | { | 160 | { |
86 | struct thread *thread = threads__findnew(0, threads, last_match); | 161 | struct thread *thread = threads__findnew(0); |
87 | 162 | ||
88 | if (!thread || thread__set_comm(thread, "swapper")) { | 163 | if (!thread || thread__set_comm(thread, "swapper")) { |
89 | fprintf(stderr, "problem inserting idle task.\n"); | 164 | fprintf(stderr, "problem inserting idle task.\n"); |
@@ -93,79 +168,116 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match) | |||
93 | return thread; | 168 | return thread; |
94 | } | 169 | } |
95 | 170 | ||
96 | void thread__insert_map(struct thread *self, struct map *map) | 171 | static void thread__remove_overlappings(struct thread *self, struct map *map) |
97 | { | 172 | { |
98 | struct map *pos, *tmp; | 173 | struct rb_root *root = &self->maps[map->type]; |
174 | struct rb_node *next = rb_first(root); | ||
99 | 175 | ||
100 | list_for_each_entry_safe(pos, tmp, &self->maps, node) { | 176 | while (next) { |
101 | if (map__overlap(pos, map)) { | 177 | struct map *pos = rb_entry(next, struct map, rb_node); |
102 | if (verbose >= 2) { | 178 | next = rb_next(&pos->rb_node); |
103 | printf("overlapping maps:\n"); | ||
104 | map__fprintf(map, stdout); | ||
105 | map__fprintf(pos, stdout); | ||
106 | } | ||
107 | 179 | ||
108 | if (map->start <= pos->start && map->end > pos->start) | 180 | if (!map__overlap(pos, map)) |
109 | pos->start = map->end; | 181 | continue; |
110 | 182 | ||
111 | if (map->end >= pos->end && map->start < pos->end) | 183 | if (verbose >= 2) { |
112 | pos->end = map->start; | 184 | fputs("overlapping maps:\n", stderr); |
185 | map__fprintf(map, stderr); | ||
186 | map__fprintf(pos, stderr); | ||
187 | } | ||
113 | 188 | ||
114 | if (verbose >= 2) { | 189 | rb_erase(&pos->rb_node, root); |
115 | printf("after collision:\n"); | 190 | /* |
116 | map__fprintf(pos, stdout); | 191 | * We may have references to this map, for instance in some |
117 | } | 192 | * hist_entry instances, so just move them to a separate |
193 | * list. | ||
194 | */ | ||
195 | list_add_tail(&pos->node, &self->removed_maps[map->type]); | ||
196 | } | ||
197 | } | ||
118 | 198 | ||
119 | if (pos->start >= pos->end) { | 199 | void maps__insert(struct rb_root *maps, struct map *map) |
120 | list_del_init(&pos->node); | 200 | { |
121 | free(pos); | 201 | struct rb_node **p = &maps->rb_node; |
122 | } | 202 | struct rb_node *parent = NULL; |
123 | } | 203 | const u64 ip = map->start; |
204 | struct map *m; | ||
205 | |||
206 | while (*p != NULL) { | ||
207 | parent = *p; | ||
208 | m = rb_entry(parent, struct map, rb_node); | ||
209 | if (ip < m->start) | ||
210 | p = &(*p)->rb_left; | ||
211 | else | ||
212 | p = &(*p)->rb_right; | ||
124 | } | 213 | } |
125 | 214 | ||
126 | list_add_tail(&map->node, &self->maps); | 215 | rb_link_node(&map->rb_node, parent, p); |
216 | rb_insert_color(&map->rb_node, maps); | ||
127 | } | 217 | } |
128 | 218 | ||
129 | int thread__fork(struct thread *self, struct thread *parent) | 219 | struct map *maps__find(struct rb_root *maps, u64 ip) |
130 | { | 220 | { |
131 | struct map *map; | 221 | struct rb_node **p = &maps->rb_node; |
222 | struct rb_node *parent = NULL; | ||
223 | struct map *m; | ||
132 | 224 | ||
133 | if (self->comm) | 225 | while (*p != NULL) { |
134 | free(self->comm); | 226 | parent = *p; |
135 | self->comm = strdup(parent->comm); | 227 | m = rb_entry(parent, struct map, rb_node); |
136 | if (!self->comm) | 228 | if (ip < m->start) |
137 | return -ENOMEM; | 229 | p = &(*p)->rb_left; |
230 | else if (ip > m->end) | ||
231 | p = &(*p)->rb_right; | ||
232 | else | ||
233 | return m; | ||
234 | } | ||
235 | |||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | void thread__insert_map(struct thread *self, struct map *map) | ||
240 | { | ||
241 | thread__remove_overlappings(self, map); | ||
242 | maps__insert(&self->maps[map->type], map); | ||
243 | } | ||
138 | 244 | ||
139 | list_for_each_entry(map, &parent->maps, node) { | 245 | static int thread__clone_maps(struct thread *self, struct thread *parent, |
246 | enum map_type type) | ||
247 | { | ||
248 | struct rb_node *nd; | ||
249 | for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { | ||
250 | struct map *map = rb_entry(nd, struct map, rb_node); | ||
140 | struct map *new = map__clone(map); | 251 | struct map *new = map__clone(map); |
141 | if (!new) | 252 | if (new == NULL) |
142 | return -ENOMEM; | 253 | return -ENOMEM; |
143 | thread__insert_map(self, new); | 254 | thread__insert_map(self, new); |
144 | } | 255 | } |
145 | |||
146 | return 0; | 256 | return 0; |
147 | } | 257 | } |
148 | 258 | ||
149 | struct map *thread__find_map(struct thread *self, u64 ip) | 259 | int thread__fork(struct thread *self, struct thread *parent) |
150 | { | 260 | { |
151 | struct map *pos; | 261 | int i; |
152 | 262 | ||
153 | if (self == NULL) | 263 | if (self->comm) |
154 | return NULL; | 264 | free(self->comm); |
155 | 265 | self->comm = strdup(parent->comm); | |
156 | list_for_each_entry(pos, &self->maps, node) | 266 | if (!self->comm) |
157 | if (ip >= pos->start && ip <= pos->end) | 267 | return -ENOMEM; |
158 | return pos; | ||
159 | 268 | ||
160 | return NULL; | 269 | for (i = 0; i < MAP__NR_TYPES; ++i) |
270 | if (thread__clone_maps(self, parent, i) < 0) | ||
271 | return -ENOMEM; | ||
272 | return 0; | ||
161 | } | 273 | } |
162 | 274 | ||
163 | size_t threads__fprintf(FILE *fp, struct rb_root *threads) | 275 | size_t threads__fprintf(FILE *fp) |
164 | { | 276 | { |
165 | size_t ret = 0; | 277 | size_t ret = 0; |
166 | struct rb_node *nd; | 278 | struct rb_node *nd; |
167 | 279 | ||
168 | for (nd = rb_first(threads); nd; nd = rb_next(nd)) { | 280 | for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { |
169 | struct thread *pos = rb_entry(nd, struct thread, rb_node); | 281 | struct thread *pos = rb_entry(nd, struct thread, rb_node); |
170 | 282 | ||
171 | ret += thread__fprintf(pos, fp); | 283 | ret += thread__fprintf(pos, fp); |
@@ -173,3 +285,15 @@ size_t threads__fprintf(FILE *fp, struct rb_root *threads) | |||
173 | 285 | ||
174 | return ret; | 286 | return ret; |
175 | } | 287 | } |
288 | |||
289 | struct symbol *thread__find_symbol(struct thread *self, | ||
290 | enum map_type type, u64 addr, | ||
291 | symbol_filter_t filter) | ||
292 | { | ||
293 | struct map *map = thread__find_map(self, type, addr); | ||
294 | |||
295 | if (map != NULL) | ||
296 | return map__find_symbol(map, map->map_ip(map, addr), filter); | ||
297 | |||
298 | return NULL; | ||
299 | } | ||
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 32aea3c1c2ad..686d6e914d9e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -1,22 +1,56 @@ | |||
1 | #ifndef __PERF_THREAD_H | ||
2 | #define __PERF_THREAD_H | ||
3 | |||
1 | #include <linux/rbtree.h> | 4 | #include <linux/rbtree.h> |
2 | #include <linux/list.h> | ||
3 | #include <unistd.h> | 5 | #include <unistd.h> |
4 | #include "symbol.h" | 6 | #include "symbol.h" |
5 | 7 | ||
6 | struct thread { | 8 | struct thread { |
7 | struct rb_node rb_node; | 9 | struct rb_node rb_node; |
8 | struct list_head maps; | 10 | struct rb_root maps[MAP__NR_TYPES]; |
11 | struct list_head removed_maps[MAP__NR_TYPES]; | ||
9 | pid_t pid; | 12 | pid_t pid; |
13 | bool use_modules; | ||
10 | char shortname[3]; | 14 | char shortname[3]; |
11 | char *comm; | 15 | char *comm; |
16 | int comm_len; | ||
12 | }; | 17 | }; |
13 | 18 | ||
19 | void thread__init(struct thread *self, pid_t pid); | ||
14 | int thread__set_comm(struct thread *self, const char *comm); | 20 | int thread__set_comm(struct thread *self, const char *comm); |
15 | struct thread * | 21 | int thread__comm_len(struct thread *self); |
16 | threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); | 22 | struct thread *threads__findnew(pid_t pid); |
17 | struct thread * | 23 | struct thread *register_idle_thread(void); |
18 | register_idle_thread(struct rb_root *threads, struct thread **last_match); | ||
19 | void thread__insert_map(struct thread *self, struct map *map); | 24 | void thread__insert_map(struct thread *self, struct map *map); |
20 | int thread__fork(struct thread *self, struct thread *parent); | 25 | int thread__fork(struct thread *self, struct thread *parent); |
21 | struct map *thread__find_map(struct thread *self, u64 ip); | 26 | size_t thread__fprintf_maps(struct thread *self, FILE *fp); |
22 | size_t threads__fprintf(FILE *fp, struct rb_root *threads); | 27 | size_t threads__fprintf(FILE *fp); |
28 | |||
29 | void maps__insert(struct rb_root *maps, struct map *map); | ||
30 | struct map *maps__find(struct rb_root *maps, u64 addr); | ||
31 | |||
32 | static inline struct map *thread__find_map(struct thread *self, | ||
33 | enum map_type type, u64 addr) | ||
34 | { | ||
35 | return self ? maps__find(&self->maps[type], addr) : NULL; | ||
36 | } | ||
37 | |||
38 | static inline void __thread__insert_map(struct thread *self, struct map *map) | ||
39 | { | ||
40 | maps__insert(&self->maps[map->type], map); | ||
41 | } | ||
42 | |||
43 | void thread__find_addr_location(struct thread *self, u8 cpumode, | ||
44 | enum map_type type, u64 addr, | ||
45 | struct addr_location *al, | ||
46 | symbol_filter_t filter); | ||
47 | struct symbol *thread__find_symbol(struct thread *self, | ||
48 | enum map_type type, u64 addr, | ||
49 | symbol_filter_t filter); | ||
50 | |||
51 | static inline struct symbol * | ||
52 | thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter) | ||
53 | { | ||
54 | return thread__find_symbol(self, MAP__FUNCTION, addr, filter); | ||
55 | } | ||
56 | #endif /* __PERF_THREAD_H */ | ||
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index af4b0573b37f..cace35595530 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c | |||
@@ -33,11 +33,11 @@ | |||
33 | #include <ctype.h> | 33 | #include <ctype.h> |
34 | #include <errno.h> | 34 | #include <errno.h> |
35 | #include <stdbool.h> | 35 | #include <stdbool.h> |
36 | #include <linux/kernel.h> | ||
36 | 37 | ||
37 | #include "../perf.h" | 38 | #include "../perf.h" |
38 | #include "trace-event.h" | 39 | #include "trace-event.h" |
39 | 40 | ||
40 | |||
41 | #define VERSION "0.5" | 41 | #define VERSION "0.5" |
42 | 42 | ||
43 | #define _STR(x) #x | 43 | #define _STR(x) #x |
@@ -483,27 +483,33 @@ static struct tracepoint_path * | |||
483 | get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | 483 | get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) |
484 | { | 484 | { |
485 | struct tracepoint_path path, *ppath = &path; | 485 | struct tracepoint_path path, *ppath = &path; |
486 | int i; | 486 | int i, nr_tracepoints = 0; |
487 | 487 | ||
488 | for (i = 0; i < nb_events; i++) { | 488 | for (i = 0; i < nb_events; i++) { |
489 | if (pattrs[i].type != PERF_TYPE_TRACEPOINT) | 489 | if (pattrs[i].type != PERF_TYPE_TRACEPOINT) |
490 | continue; | 490 | continue; |
491 | ++nr_tracepoints; | ||
491 | ppath->next = tracepoint_id_to_path(pattrs[i].config); | 492 | ppath->next = tracepoint_id_to_path(pattrs[i].config); |
492 | if (!ppath->next) | 493 | if (!ppath->next) |
493 | die("%s\n", "No memory to alloc tracepoints list"); | 494 | die("%s\n", "No memory to alloc tracepoints list"); |
494 | ppath = ppath->next; | 495 | ppath = ppath->next; |
495 | } | 496 | } |
496 | 497 | ||
497 | return path.next; | 498 | return nr_tracepoints > 0 ? path.next : NULL; |
498 | } | 499 | } |
499 | void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) | 500 | |
501 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | ||
500 | { | 502 | { |
501 | char buf[BUFSIZ]; | 503 | char buf[BUFSIZ]; |
502 | struct tracepoint_path *tps; | 504 | struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); |
505 | |||
506 | /* | ||
507 | * What? No tracepoints? No sense writing anything here, bail out. | ||
508 | */ | ||
509 | if (tps == NULL) | ||
510 | return -1; | ||
503 | 511 | ||
504 | output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644); | 512 | output_fd = fd; |
505 | if (output_fd < 0) | ||
506 | die("creating file '%s'", output_file); | ||
507 | 513 | ||
508 | buf[0] = 23; | 514 | buf[0] = 23; |
509 | buf[1] = 8; | 515 | buf[1] = 8; |
@@ -530,11 +536,11 @@ void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) | |||
530 | page_size = getpagesize(); | 536 | page_size = getpagesize(); |
531 | write_or_die(&page_size, 4); | 537 | write_or_die(&page_size, 4); |
532 | 538 | ||
533 | tps = get_tracepoints_path(pattrs, nb_events); | ||
534 | |||
535 | read_header_files(); | 539 | read_header_files(); |
536 | read_ftrace_files(tps); | 540 | read_ftrace_files(tps); |
537 | read_event_files(tps); | 541 | read_event_files(tps); |
538 | read_proc_kallsyms(); | 542 | read_proc_kallsyms(); |
539 | read_ftrace_printk(); | 543 | read_ftrace_printk(); |
544 | |||
545 | return 0; | ||
540 | } | 546 | } |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 55c9659a56e2..0302405aa2ca 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -40,12 +40,19 @@ int header_page_size_size; | |||
40 | int header_page_data_offset; | 40 | int header_page_data_offset; |
41 | int header_page_data_size; | 41 | int header_page_data_size; |
42 | 42 | ||
43 | int latency_format; | ||
44 | |||
43 | static char *input_buf; | 45 | static char *input_buf; |
44 | static unsigned long long input_buf_ptr; | 46 | static unsigned long long input_buf_ptr; |
45 | static unsigned long long input_buf_siz; | 47 | static unsigned long long input_buf_siz; |
46 | 48 | ||
47 | static int cpus; | 49 | static int cpus; |
48 | static int long_size; | 50 | static int long_size; |
51 | static int is_flag_field; | ||
52 | static int is_symbolic_field; | ||
53 | |||
54 | static struct format_field * | ||
55 | find_any_field(struct event *event, const char *name); | ||
49 | 56 | ||
50 | static void init_input_buf(char *buf, unsigned long long size) | 57 | static void init_input_buf(char *buf, unsigned long long size) |
51 | { | 58 | { |
@@ -284,18 +291,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) | |||
284 | char *line; | 291 | char *line; |
285 | char *next = NULL; | 292 | char *next = NULL; |
286 | char *addr_str; | 293 | char *addr_str; |
287 | int ret; | ||
288 | int i; | 294 | int i; |
289 | 295 | ||
290 | line = strtok_r(file, "\n", &next); | 296 | line = strtok_r(file, "\n", &next); |
291 | while (line) { | 297 | while (line) { |
298 | addr_str = strsep(&line, ":"); | ||
299 | if (!line) { | ||
300 | warning("error parsing print strings"); | ||
301 | break; | ||
302 | } | ||
292 | item = malloc_or_die(sizeof(*item)); | 303 | item = malloc_or_die(sizeof(*item)); |
293 | ret = sscanf(line, "%as : %as", | ||
294 | (float *)(void *)&addr_str, /* workaround gcc warning */ | ||
295 | (float *)(void *)&item->printk); | ||
296 | item->addr = strtoull(addr_str, NULL, 16); | 304 | item->addr = strtoull(addr_str, NULL, 16); |
297 | free(addr_str); | 305 | /* fmt still has a space, skip it */ |
298 | 306 | item->printk = strdup(line+1); | |
299 | item->next = list; | 307 | item->next = list; |
300 | list = item; | 308 | list = item; |
301 | line = strtok_r(NULL, "\n", &next); | 309 | line = strtok_r(NULL, "\n", &next); |
@@ -522,7 +530,10 @@ static enum event_type __read_token(char **tok) | |||
522 | last_ch = ch; | 530 | last_ch = ch; |
523 | ch = __read_char(); | 531 | ch = __read_char(); |
524 | buf[i++] = ch; | 532 | buf[i++] = ch; |
525 | } while (ch != quote_ch && last_ch != '\\'); | 533 | /* the '\' '\' will cancel itself */ |
534 | if (ch == '\\' && last_ch == '\\') | ||
535 | last_ch = 0; | ||
536 | } while (ch != quote_ch || last_ch == '\\'); | ||
526 | /* remove the last quote */ | 537 | /* remove the last quote */ |
527 | i--; | 538 | i--; |
528 | goto out; | 539 | goto out; |
@@ -610,7 +621,7 @@ static enum event_type read_token_item(char **tok) | |||
610 | static int test_type(enum event_type type, enum event_type expect) | 621 | static int test_type(enum event_type type, enum event_type expect) |
611 | { | 622 | { |
612 | if (type != expect) { | 623 | if (type != expect) { |
613 | die("Error: expected type %d but read %d", | 624 | warning("Error: expected type %d but read %d", |
614 | expect, type); | 625 | expect, type); |
615 | return -1; | 626 | return -1; |
616 | } | 627 | } |
@@ -621,13 +632,13 @@ static int test_type_token(enum event_type type, char *token, | |||
621 | enum event_type expect, const char *expect_tok) | 632 | enum event_type expect, const char *expect_tok) |
622 | { | 633 | { |
623 | if (type != expect) { | 634 | if (type != expect) { |
624 | die("Error: expected type %d but read %d", | 635 | warning("Error: expected type %d but read %d", |
625 | expect, type); | 636 | expect, type); |
626 | return -1; | 637 | return -1; |
627 | } | 638 | } |
628 | 639 | ||
629 | if (strcmp(token, expect_tok) != 0) { | 640 | if (strcmp(token, expect_tok) != 0) { |
630 | die("Error: expected '%s' but read '%s'", | 641 | warning("Error: expected '%s' but read '%s'", |
631 | expect_tok, token); | 642 | expect_tok, token); |
632 | return -1; | 643 | return -1; |
633 | } | 644 | } |
@@ -665,7 +676,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ | |||
665 | 676 | ||
666 | free_token(token); | 677 | free_token(token); |
667 | 678 | ||
668 | return 0; | 679 | return ret; |
669 | } | 680 | } |
670 | 681 | ||
671 | static int read_expected(enum event_type expect, const char *str) | 682 | static int read_expected(enum event_type expect, const char *str) |
@@ -682,10 +693,10 @@ static char *event_read_name(void) | |||
682 | { | 693 | { |
683 | char *token; | 694 | char *token; |
684 | 695 | ||
685 | if (read_expected(EVENT_ITEM, (char *)"name") < 0) | 696 | if (read_expected(EVENT_ITEM, "name") < 0) |
686 | return NULL; | 697 | return NULL; |
687 | 698 | ||
688 | if (read_expected(EVENT_OP, (char *)":") < 0) | 699 | if (read_expected(EVENT_OP, ":") < 0) |
689 | return NULL; | 700 | return NULL; |
690 | 701 | ||
691 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 702 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
@@ -703,10 +714,10 @@ static int event_read_id(void) | |||
703 | char *token; | 714 | char *token; |
704 | int id; | 715 | int id; |
705 | 716 | ||
706 | if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0) | 717 | if (read_expected_item(EVENT_ITEM, "ID") < 0) |
707 | return -1; | 718 | return -1; |
708 | 719 | ||
709 | if (read_expected(EVENT_OP, (char *)":") < 0) | 720 | if (read_expected(EVENT_OP, ":") < 0) |
710 | return -1; | 721 | return -1; |
711 | 722 | ||
712 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 723 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
@@ -721,6 +732,24 @@ static int event_read_id(void) | |||
721 | return -1; | 732 | return -1; |
722 | } | 733 | } |
723 | 734 | ||
735 | static int field_is_string(struct format_field *field) | ||
736 | { | ||
737 | if ((field->flags & FIELD_IS_ARRAY) && | ||
738 | (!strstr(field->type, "char") || !strstr(field->type, "u8") || | ||
739 | !strstr(field->type, "s8"))) | ||
740 | return 1; | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int field_is_dynamic(struct format_field *field) | ||
746 | { | ||
747 | if (!strcmp(field->type, "__data_loc")) | ||
748 | return 1; | ||
749 | |||
750 | return 0; | ||
751 | } | ||
752 | |||
724 | static int event_read_fields(struct event *event, struct format_field **fields) | 753 | static int event_read_fields(struct event *event, struct format_field **fields) |
725 | { | 754 | { |
726 | struct format_field *field = NULL; | 755 | struct format_field *field = NULL; |
@@ -738,7 +767,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) | |||
738 | 767 | ||
739 | count++; | 768 | count++; |
740 | 769 | ||
741 | if (test_type_token(type, token, EVENT_ITEM, (char *)"field")) | 770 | if (test_type_token(type, token, EVENT_ITEM, "field")) |
742 | goto fail; | 771 | goto fail; |
743 | free_token(token); | 772 | free_token(token); |
744 | 773 | ||
@@ -753,7 +782,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) | |||
753 | type = read_token(&token); | 782 | type = read_token(&token); |
754 | } | 783 | } |
755 | 784 | ||
756 | if (test_type_token(type, token, EVENT_OP, (char *)":") < 0) | 785 | if (test_type_token(type, token, EVENT_OP, ":") < 0) |
757 | return -1; | 786 | return -1; |
758 | 787 | ||
759 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 788 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
@@ -865,14 +894,20 @@ static int event_read_fields(struct event *event, struct format_field **fields) | |||
865 | free(brackets); | 894 | free(brackets); |
866 | } | 895 | } |
867 | 896 | ||
868 | if (test_type_token(type, token, EVENT_OP, (char *)";")) | 897 | if (field_is_string(field)) { |
898 | field->flags |= FIELD_IS_STRING; | ||
899 | if (field_is_dynamic(field)) | ||
900 | field->flags |= FIELD_IS_DYNAMIC; | ||
901 | } | ||
902 | |||
903 | if (test_type_token(type, token, EVENT_OP, ";")) | ||
869 | goto fail; | 904 | goto fail; |
870 | free_token(token); | 905 | free_token(token); |
871 | 906 | ||
872 | if (read_expected(EVENT_ITEM, (char *)"offset") < 0) | 907 | if (read_expected(EVENT_ITEM, "offset") < 0) |
873 | goto fail_expect; | 908 | goto fail_expect; |
874 | 909 | ||
875 | if (read_expected(EVENT_OP, (char *)":") < 0) | 910 | if (read_expected(EVENT_OP, ":") < 0) |
876 | goto fail_expect; | 911 | goto fail_expect; |
877 | 912 | ||
878 | if (read_expect_type(EVENT_ITEM, &token)) | 913 | if (read_expect_type(EVENT_ITEM, &token)) |
@@ -880,13 +915,13 @@ static int event_read_fields(struct event *event, struct format_field **fields) | |||
880 | field->offset = strtoul(token, NULL, 0); | 915 | field->offset = strtoul(token, NULL, 0); |
881 | free_token(token); | 916 | free_token(token); |
882 | 917 | ||
883 | if (read_expected(EVENT_OP, (char *)";") < 0) | 918 | if (read_expected(EVENT_OP, ";") < 0) |
884 | goto fail_expect; | 919 | goto fail_expect; |
885 | 920 | ||
886 | if (read_expected(EVENT_ITEM, (char *)"size") < 0) | 921 | if (read_expected(EVENT_ITEM, "size") < 0) |
887 | goto fail_expect; | 922 | goto fail_expect; |
888 | 923 | ||
889 | if (read_expected(EVENT_OP, (char *)":") < 0) | 924 | if (read_expected(EVENT_OP, ":") < 0) |
890 | goto fail_expect; | 925 | goto fail_expect; |
891 | 926 | ||
892 | if (read_expect_type(EVENT_ITEM, &token)) | 927 | if (read_expect_type(EVENT_ITEM, &token)) |
@@ -894,11 +929,34 @@ static int event_read_fields(struct event *event, struct format_field **fields) | |||
894 | field->size = strtoul(token, NULL, 0); | 929 | field->size = strtoul(token, NULL, 0); |
895 | free_token(token); | 930 | free_token(token); |
896 | 931 | ||
897 | if (read_expected(EVENT_OP, (char *)";") < 0) | 932 | if (read_expected(EVENT_OP, ";") < 0) |
898 | goto fail_expect; | 933 | goto fail_expect; |
899 | 934 | ||
900 | if (read_expect_type(EVENT_NEWLINE, &token) < 0) | 935 | type = read_token(&token); |
901 | goto fail; | 936 | if (type != EVENT_NEWLINE) { |
937 | /* newer versions of the kernel have a "signed" type */ | ||
938 | if (test_type_token(type, token, EVENT_ITEM, "signed")) | ||
939 | goto fail; | ||
940 | |||
941 | free_token(token); | ||
942 | |||
943 | if (read_expected(EVENT_OP, ":") < 0) | ||
944 | goto fail_expect; | ||
945 | |||
946 | if (read_expect_type(EVENT_ITEM, &token)) | ||
947 | goto fail; | ||
948 | |||
949 | if (strtoul(token, NULL, 0)) | ||
950 | field->flags |= FIELD_IS_SIGNED; | ||
951 | |||
952 | free_token(token); | ||
953 | if (read_expected(EVENT_OP, ";") < 0) | ||
954 | goto fail_expect; | ||
955 | |||
956 | if (read_expect_type(EVENT_NEWLINE, &token)) | ||
957 | goto fail; | ||
958 | } | ||
959 | |||
902 | free_token(token); | 960 | free_token(token); |
903 | 961 | ||
904 | *fields = field; | 962 | *fields = field; |
@@ -921,10 +979,10 @@ static int event_read_format(struct event *event) | |||
921 | char *token; | 979 | char *token; |
922 | int ret; | 980 | int ret; |
923 | 981 | ||
924 | if (read_expected_item(EVENT_ITEM, (char *)"format") < 0) | 982 | if (read_expected_item(EVENT_ITEM, "format") < 0) |
925 | return -1; | 983 | return -1; |
926 | 984 | ||
927 | if (read_expected(EVENT_OP, (char *)":") < 0) | 985 | if (read_expected(EVENT_OP, ":") < 0) |
928 | return -1; | 986 | return -1; |
929 | 987 | ||
930 | if (read_expect_type(EVENT_NEWLINE, &token)) | 988 | if (read_expect_type(EVENT_NEWLINE, &token)) |
@@ -984,7 +1042,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok) | |||
984 | 1042 | ||
985 | *tok = NULL; | 1043 | *tok = NULL; |
986 | type = process_arg(event, left, &token); | 1044 | type = process_arg(event, left, &token); |
987 | if (test_type_token(type, token, EVENT_OP, (char *)":")) | 1045 | if (test_type_token(type, token, EVENT_OP, ":")) |
988 | goto out_free; | 1046 | goto out_free; |
989 | 1047 | ||
990 | arg->op.op = token; | 1048 | arg->op.op = token; |
@@ -1004,6 +1062,35 @@ out_free: | |||
1004 | return EVENT_ERROR; | 1062 | return EVENT_ERROR; |
1005 | } | 1063 | } |
1006 | 1064 | ||
1065 | static enum event_type | ||
1066 | process_array(struct event *event, struct print_arg *top, char **tok) | ||
1067 | { | ||
1068 | struct print_arg *arg; | ||
1069 | enum event_type type; | ||
1070 | char *token = NULL; | ||
1071 | |||
1072 | arg = malloc_or_die(sizeof(*arg)); | ||
1073 | memset(arg, 0, sizeof(*arg)); | ||
1074 | |||
1075 | *tok = NULL; | ||
1076 | type = process_arg(event, arg, &token); | ||
1077 | if (test_type_token(type, token, EVENT_OP, "]")) | ||
1078 | goto out_free; | ||
1079 | |||
1080 | top->op.right = arg; | ||
1081 | |||
1082 | free_token(token); | ||
1083 | type = read_token_item(&token); | ||
1084 | *tok = token; | ||
1085 | |||
1086 | return type; | ||
1087 | |||
1088 | out_free: | ||
1089 | free_token(*tok); | ||
1090 | free_arg(arg); | ||
1091 | return EVENT_ERROR; | ||
1092 | } | ||
1093 | |||
1007 | static int get_op_prio(char *op) | 1094 | static int get_op_prio(char *op) |
1008 | { | 1095 | { |
1009 | if (!op[1]) { | 1096 | if (!op[1]) { |
@@ -1128,6 +1215,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok) | |||
1128 | strcmp(token, "*") == 0 || | 1215 | strcmp(token, "*") == 0 || |
1129 | strcmp(token, "^") == 0 || | 1216 | strcmp(token, "^") == 0 || |
1130 | strcmp(token, "/") == 0 || | 1217 | strcmp(token, "/") == 0 || |
1218 | strcmp(token, "<") == 0 || | ||
1219 | strcmp(token, ">") == 0 || | ||
1131 | strcmp(token, "==") == 0 || | 1220 | strcmp(token, "==") == 0 || |
1132 | strcmp(token, "!=") == 0) { | 1221 | strcmp(token, "!=") == 0) { |
1133 | 1222 | ||
@@ -1144,17 +1233,46 @@ process_op(struct event *event, struct print_arg *arg, char **tok) | |||
1144 | 1233 | ||
1145 | right = malloc_or_die(sizeof(*right)); | 1234 | right = malloc_or_die(sizeof(*right)); |
1146 | 1235 | ||
1147 | type = process_arg(event, right, tok); | 1236 | type = read_token_item(&token); |
1237 | *tok = token; | ||
1238 | |||
1239 | /* could just be a type pointer */ | ||
1240 | if ((strcmp(arg->op.op, "*") == 0) && | ||
1241 | type == EVENT_DELIM && (strcmp(token, ")") == 0)) { | ||
1242 | if (left->type != PRINT_ATOM) | ||
1243 | die("bad pointer type"); | ||
1244 | left->atom.atom = realloc(left->atom.atom, | ||
1245 | sizeof(left->atom.atom) + 3); | ||
1246 | strcat(left->atom.atom, " *"); | ||
1247 | *arg = *left; | ||
1248 | free(arg); | ||
1249 | |||
1250 | return type; | ||
1251 | } | ||
1252 | |||
1253 | type = process_arg_token(event, right, tok, type); | ||
1148 | 1254 | ||
1149 | arg->op.right = right; | 1255 | arg->op.right = right; |
1150 | 1256 | ||
1257 | } else if (strcmp(token, "[") == 0) { | ||
1258 | |||
1259 | left = malloc_or_die(sizeof(*left)); | ||
1260 | *left = *arg; | ||
1261 | |||
1262 | arg->type = PRINT_OP; | ||
1263 | arg->op.op = token; | ||
1264 | arg->op.left = left; | ||
1265 | |||
1266 | arg->op.prio = 0; | ||
1267 | type = process_array(event, arg, tok); | ||
1268 | |||
1151 | } else { | 1269 | } else { |
1152 | die("unknown op '%s'", token); | 1270 | warning("unknown op '%s'", token); |
1271 | event->flags |= EVENT_FL_FAILED; | ||
1153 | /* the arg is now the left side */ | 1272 | /* the arg is now the left side */ |
1154 | return EVENT_NONE; | 1273 | return EVENT_NONE; |
1155 | } | 1274 | } |
1156 | 1275 | ||
1157 | |||
1158 | if (type == EVENT_OP) { | 1276 | if (type == EVENT_OP) { |
1159 | int prio; | 1277 | int prio; |
1160 | 1278 | ||
@@ -1178,7 +1296,7 @@ process_entry(struct event *event __unused, struct print_arg *arg, | |||
1178 | char *field; | 1296 | char *field; |
1179 | char *token; | 1297 | char *token; |
1180 | 1298 | ||
1181 | if (read_expected(EVENT_OP, (char *)"->") < 0) | 1299 | if (read_expected(EVENT_OP, "->") < 0) |
1182 | return EVENT_ERROR; | 1300 | return EVENT_ERROR; |
1183 | 1301 | ||
1184 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 1302 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
@@ -1188,6 +1306,16 @@ process_entry(struct event *event __unused, struct print_arg *arg, | |||
1188 | arg->type = PRINT_FIELD; | 1306 | arg->type = PRINT_FIELD; |
1189 | arg->field.name = field; | 1307 | arg->field.name = field; |
1190 | 1308 | ||
1309 | if (is_flag_field) { | ||
1310 | arg->field.field = find_any_field(event, arg->field.name); | ||
1311 | arg->field.field->flags |= FIELD_IS_FLAG; | ||
1312 | is_flag_field = 0; | ||
1313 | } else if (is_symbolic_field) { | ||
1314 | arg->field.field = find_any_field(event, arg->field.name); | ||
1315 | arg->field.field->flags |= FIELD_IS_SYMBOLIC; | ||
1316 | is_symbolic_field = 0; | ||
1317 | } | ||
1318 | |||
1191 | type = read_token(&token); | 1319 | type = read_token(&token); |
1192 | *tok = token; | 1320 | *tok = token; |
1193 | 1321 | ||
@@ -1338,14 +1466,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) | |||
1338 | do { | 1466 | do { |
1339 | free_token(token); | 1467 | free_token(token); |
1340 | type = read_token_item(&token); | 1468 | type = read_token_item(&token); |
1341 | if (test_type_token(type, token, EVENT_OP, (char *)"{")) | 1469 | if (test_type_token(type, token, EVENT_OP, "{")) |
1342 | break; | 1470 | break; |
1343 | 1471 | ||
1344 | arg = malloc_or_die(sizeof(*arg)); | 1472 | arg = malloc_or_die(sizeof(*arg)); |
1345 | 1473 | ||
1346 | free_token(token); | 1474 | free_token(token); |
1347 | type = process_arg(event, arg, &token); | 1475 | type = process_arg(event, arg, &token); |
1348 | if (test_type_token(type, token, EVENT_DELIM, (char *)",")) | 1476 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
1349 | goto out_free; | 1477 | goto out_free; |
1350 | 1478 | ||
1351 | field = malloc_or_die(sizeof(*field)); | 1479 | field = malloc_or_die(sizeof(*field)); |
@@ -1356,7 +1484,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) | |||
1356 | 1484 | ||
1357 | free_token(token); | 1485 | free_token(token); |
1358 | type = process_arg(event, arg, &token); | 1486 | type = process_arg(event, arg, &token); |
1359 | if (test_type_token(type, token, EVENT_OP, (char *)"}")) | 1487 | if (test_type_token(type, token, EVENT_OP, "}")) |
1360 | goto out_free; | 1488 | goto out_free; |
1361 | 1489 | ||
1362 | value = arg_eval(arg); | 1490 | value = arg_eval(arg); |
@@ -1391,13 +1519,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) | |||
1391 | memset(arg, 0, sizeof(*arg)); | 1519 | memset(arg, 0, sizeof(*arg)); |
1392 | arg->type = PRINT_FLAGS; | 1520 | arg->type = PRINT_FLAGS; |
1393 | 1521 | ||
1394 | if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) | 1522 | if (read_expected_item(EVENT_DELIM, "(") < 0) |
1395 | return EVENT_ERROR; | 1523 | return EVENT_ERROR; |
1396 | 1524 | ||
1397 | field = malloc_or_die(sizeof(*field)); | 1525 | field = malloc_or_die(sizeof(*field)); |
1398 | 1526 | ||
1399 | type = process_arg(event, field, &token); | 1527 | type = process_arg(event, field, &token); |
1400 | if (test_type_token(type, token, EVENT_DELIM, (char *)",")) | 1528 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
1401 | goto out_free; | 1529 | goto out_free; |
1402 | 1530 | ||
1403 | arg->flags.field = field; | 1531 | arg->flags.field = field; |
@@ -1408,11 +1536,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) | |||
1408 | type = read_token_item(&token); | 1536 | type = read_token_item(&token); |
1409 | } | 1537 | } |
1410 | 1538 | ||
1411 | if (test_type_token(type, token, EVENT_DELIM, (char *)",")) | 1539 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
1412 | goto out_free; | 1540 | goto out_free; |
1413 | 1541 | ||
1414 | type = process_fields(event, &arg->flags.flags, &token); | 1542 | type = process_fields(event, &arg->flags.flags, &token); |
1415 | if (test_type_token(type, token, EVENT_DELIM, (char *)")")) | 1543 | if (test_type_token(type, token, EVENT_DELIM, ")")) |
1416 | goto out_free; | 1544 | goto out_free; |
1417 | 1545 | ||
1418 | free_token(token); | 1546 | free_token(token); |
@@ -1434,19 +1562,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok) | |||
1434 | memset(arg, 0, sizeof(*arg)); | 1562 | memset(arg, 0, sizeof(*arg)); |
1435 | arg->type = PRINT_SYMBOL; | 1563 | arg->type = PRINT_SYMBOL; |
1436 | 1564 | ||
1437 | if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) | 1565 | if (read_expected_item(EVENT_DELIM, "(") < 0) |
1438 | return EVENT_ERROR; | 1566 | return EVENT_ERROR; |
1439 | 1567 | ||
1440 | field = malloc_or_die(sizeof(*field)); | 1568 | field = malloc_or_die(sizeof(*field)); |
1441 | 1569 | ||
1442 | type = process_arg(event, field, &token); | 1570 | type = process_arg(event, field, &token); |
1443 | if (test_type_token(type, token, EVENT_DELIM, (char *)",")) | 1571 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
1444 | goto out_free; | 1572 | goto out_free; |
1445 | 1573 | ||
1446 | arg->symbol.field = field; | 1574 | arg->symbol.field = field; |
1447 | 1575 | ||
1448 | type = process_fields(event, &arg->symbol.symbols, &token); | 1576 | type = process_fields(event, &arg->symbol.symbols, &token); |
1449 | if (test_type_token(type, token, EVENT_DELIM, (char *)")")) | 1577 | if (test_type_token(type, token, EVENT_DELIM, ")")) |
1450 | goto out_free; | 1578 | goto out_free; |
1451 | 1579 | ||
1452 | free_token(token); | 1580 | free_token(token); |
@@ -1463,7 +1591,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) | |||
1463 | { | 1591 | { |
1464 | struct print_arg *item_arg; | 1592 | struct print_arg *item_arg; |
1465 | enum event_type type; | 1593 | enum event_type type; |
1466 | int ptr_cast = 0; | ||
1467 | char *token; | 1594 | char *token; |
1468 | 1595 | ||
1469 | type = process_arg(event, arg, &token); | 1596 | type = process_arg(event, arg, &token); |
@@ -1471,28 +1598,13 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) | |||
1471 | if (type == EVENT_ERROR) | 1598 | if (type == EVENT_ERROR) |
1472 | return EVENT_ERROR; | 1599 | return EVENT_ERROR; |
1473 | 1600 | ||
1474 | if (type == EVENT_OP) { | 1601 | if (type == EVENT_OP) |
1475 | /* handle the ptr casts */ | 1602 | type = process_op(event, arg, &token); |
1476 | if (!strcmp(token, "*")) { | ||
1477 | /* | ||
1478 | * FIXME: should we zapp whitespaces before ')' ? | ||
1479 | * (may require a peek_token_item()) | ||
1480 | */ | ||
1481 | if (__peek_char() == ')') { | ||
1482 | ptr_cast = 1; | ||
1483 | free_token(token); | ||
1484 | type = read_token_item(&token); | ||
1485 | } | ||
1486 | } | ||
1487 | if (!ptr_cast) { | ||
1488 | type = process_op(event, arg, &token); | ||
1489 | 1603 | ||
1490 | if (type == EVENT_ERROR) | 1604 | if (type == EVENT_ERROR) |
1491 | return EVENT_ERROR; | 1605 | return EVENT_ERROR; |
1492 | } | ||
1493 | } | ||
1494 | 1606 | ||
1495 | if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { | 1607 | if (test_type_token(type, token, EVENT_DELIM, ")")) { |
1496 | free_token(token); | 1608 | free_token(token); |
1497 | return EVENT_ERROR; | 1609 | return EVENT_ERROR; |
1498 | } | 1610 | } |
@@ -1516,13 +1628,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) | |||
1516 | item_arg = malloc_or_die(sizeof(*item_arg)); | 1628 | item_arg = malloc_or_die(sizeof(*item_arg)); |
1517 | 1629 | ||
1518 | arg->type = PRINT_TYPE; | 1630 | arg->type = PRINT_TYPE; |
1519 | if (ptr_cast) { | ||
1520 | char *old = arg->atom.atom; | ||
1521 | |||
1522 | arg->atom.atom = malloc_or_die(strlen(old + 3)); | ||
1523 | sprintf(arg->atom.atom, "%s *", old); | ||
1524 | free(old); | ||
1525 | } | ||
1526 | arg->typecast.type = arg->atom.atom; | 1631 | arg->typecast.type = arg->atom.atom; |
1527 | arg->typecast.item = item_arg; | 1632 | arg->typecast.item = item_arg; |
1528 | type = process_arg_token(event, item_arg, &token, type); | 1633 | type = process_arg_token(event, item_arg, &token, type); |
@@ -1540,7 +1645,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) | |||
1540 | enum event_type type; | 1645 | enum event_type type; |
1541 | char *token; | 1646 | char *token; |
1542 | 1647 | ||
1543 | if (read_expected(EVENT_DELIM, (char *)"(") < 0) | 1648 | if (read_expected(EVENT_DELIM, "(") < 0) |
1544 | return EVENT_ERROR; | 1649 | return EVENT_ERROR; |
1545 | 1650 | ||
1546 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 1651 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
@@ -1550,7 +1655,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) | |||
1550 | arg->string.string = token; | 1655 | arg->string.string = token; |
1551 | arg->string.offset = -1; | 1656 | arg->string.offset = -1; |
1552 | 1657 | ||
1553 | if (read_expected(EVENT_DELIM, (char *)")") < 0) | 1658 | if (read_expected(EVENT_DELIM, ")") < 0) |
1554 | return EVENT_ERROR; | 1659 | return EVENT_ERROR; |
1555 | 1660 | ||
1556 | type = read_token(&token); | 1661 | type = read_token(&token); |
@@ -1578,9 +1683,11 @@ process_arg_token(struct event *event, struct print_arg *arg, | |||
1578 | type = process_entry(event, arg, &token); | 1683 | type = process_entry(event, arg, &token); |
1579 | } else if (strcmp(token, "__print_flags") == 0) { | 1684 | } else if (strcmp(token, "__print_flags") == 0) { |
1580 | free_token(token); | 1685 | free_token(token); |
1686 | is_flag_field = 1; | ||
1581 | type = process_flags(event, arg, &token); | 1687 | type = process_flags(event, arg, &token); |
1582 | } else if (strcmp(token, "__print_symbolic") == 0) { | 1688 | } else if (strcmp(token, "__print_symbolic") == 0) { |
1583 | free_token(token); | 1689 | free_token(token); |
1690 | is_symbolic_field = 1; | ||
1584 | type = process_symbols(event, arg, &token); | 1691 | type = process_symbols(event, arg, &token); |
1585 | } else if (strcmp(token, "__get_str") == 0) { | 1692 | } else if (strcmp(token, "__get_str") == 0) { |
1586 | free_token(token); | 1693 | free_token(token); |
@@ -1637,12 +1744,18 @@ process_arg_token(struct event *event, struct print_arg *arg, | |||
1637 | 1744 | ||
1638 | static int event_read_print_args(struct event *event, struct print_arg **list) | 1745 | static int event_read_print_args(struct event *event, struct print_arg **list) |
1639 | { | 1746 | { |
1640 | enum event_type type; | 1747 | enum event_type type = EVENT_ERROR; |
1641 | struct print_arg *arg; | 1748 | struct print_arg *arg; |
1642 | char *token; | 1749 | char *token; |
1643 | int args = 0; | 1750 | int args = 0; |
1644 | 1751 | ||
1645 | do { | 1752 | do { |
1753 | if (type == EVENT_NEWLINE) { | ||
1754 | free_token(token); | ||
1755 | type = read_token_item(&token); | ||
1756 | continue; | ||
1757 | } | ||
1758 | |||
1646 | arg = malloc_or_die(sizeof(*arg)); | 1759 | arg = malloc_or_die(sizeof(*arg)); |
1647 | memset(arg, 0, sizeof(*arg)); | 1760 | memset(arg, 0, sizeof(*arg)); |
1648 | 1761 | ||
@@ -1683,18 +1796,19 @@ static int event_read_print(struct event *event) | |||
1683 | char *token; | 1796 | char *token; |
1684 | int ret; | 1797 | int ret; |
1685 | 1798 | ||
1686 | if (read_expected_item(EVENT_ITEM, (char *)"print") < 0) | 1799 | if (read_expected_item(EVENT_ITEM, "print") < 0) |
1687 | return -1; | 1800 | return -1; |
1688 | 1801 | ||
1689 | if (read_expected(EVENT_ITEM, (char *)"fmt") < 0) | 1802 | if (read_expected(EVENT_ITEM, "fmt") < 0) |
1690 | return -1; | 1803 | return -1; |
1691 | 1804 | ||
1692 | if (read_expected(EVENT_OP, (char *)":") < 0) | 1805 | if (read_expected(EVENT_OP, ":") < 0) |
1693 | return -1; | 1806 | return -1; |
1694 | 1807 | ||
1695 | if (read_expect_type(EVENT_DQUOTE, &token) < 0) | 1808 | if (read_expect_type(EVENT_DQUOTE, &token) < 0) |
1696 | goto fail; | 1809 | goto fail; |
1697 | 1810 | ||
1811 | concat: | ||
1698 | event->print_fmt.format = token; | 1812 | event->print_fmt.format = token; |
1699 | event->print_fmt.args = NULL; | 1813 | event->print_fmt.args = NULL; |
1700 | 1814 | ||
@@ -1704,7 +1818,22 @@ static int event_read_print(struct event *event) | |||
1704 | if (type == EVENT_NONE) | 1818 | if (type == EVENT_NONE) |
1705 | return 0; | 1819 | return 0; |
1706 | 1820 | ||
1707 | if (test_type_token(type, token, EVENT_DELIM, (char *)",")) | 1821 | /* Handle concatination of print lines */ |
1822 | if (type == EVENT_DQUOTE) { | ||
1823 | char *cat; | ||
1824 | |||
1825 | cat = malloc_or_die(strlen(event->print_fmt.format) + | ||
1826 | strlen(token) + 1); | ||
1827 | strcpy(cat, event->print_fmt.format); | ||
1828 | strcat(cat, token); | ||
1829 | free_token(token); | ||
1830 | free_token(event->print_fmt.format); | ||
1831 | event->print_fmt.format = NULL; | ||
1832 | token = cat; | ||
1833 | goto concat; | ||
1834 | } | ||
1835 | |||
1836 | if (test_type_token(type, token, EVENT_DELIM, ",")) | ||
1708 | goto fail; | 1837 | goto fail; |
1709 | 1838 | ||
1710 | free_token(token); | 1839 | free_token(token); |
@@ -1713,7 +1842,7 @@ static int event_read_print(struct event *event) | |||
1713 | if (ret < 0) | 1842 | if (ret < 0) |
1714 | return -1; | 1843 | return -1; |
1715 | 1844 | ||
1716 | return 0; | 1845 | return ret; |
1717 | 1846 | ||
1718 | fail: | 1847 | fail: |
1719 | free_token(token); | 1848 | free_token(token); |
@@ -1759,7 +1888,7 @@ find_any_field(struct event *event, const char *name) | |||
1759 | return find_field(event, name); | 1888 | return find_field(event, name); |
1760 | } | 1889 | } |
1761 | 1890 | ||
1762 | static unsigned long long read_size(void *ptr, int size) | 1891 | unsigned long long read_size(void *ptr, int size) |
1763 | { | 1892 | { |
1764 | switch (size) { | 1893 | switch (size) { |
1765 | case 1: | 1894 | case 1: |
@@ -1822,37 +1951,67 @@ static int get_common_info(const char *type, int *offset, int *size) | |||
1822 | return 0; | 1951 | return 0; |
1823 | } | 1952 | } |
1824 | 1953 | ||
1825 | int trace_parse_common_type(void *data) | 1954 | static int __parse_common(void *data, int *size, int *offset, |
1955 | const char *name) | ||
1826 | { | 1956 | { |
1827 | static int type_offset; | ||
1828 | static int type_size; | ||
1829 | int ret; | 1957 | int ret; |
1830 | 1958 | ||
1831 | if (!type_size) { | 1959 | if (!*size) { |
1832 | ret = get_common_info("common_type", | 1960 | ret = get_common_info(name, offset, size); |
1833 | &type_offset, | ||
1834 | &type_size); | ||
1835 | if (ret < 0) | 1961 | if (ret < 0) |
1836 | return ret; | 1962 | return ret; |
1837 | } | 1963 | } |
1838 | return read_size(data + type_offset, type_size); | 1964 | return read_size(data + *offset, *size); |
1965 | } | ||
1966 | |||
1967 | int trace_parse_common_type(void *data) | ||
1968 | { | ||
1969 | static int type_offset; | ||
1970 | static int type_size; | ||
1971 | |||
1972 | return __parse_common(data, &type_size, &type_offset, | ||
1973 | "common_type"); | ||
1839 | } | 1974 | } |
1840 | 1975 | ||
1841 | static int parse_common_pid(void *data) | 1976 | int trace_parse_common_pid(void *data) |
1842 | { | 1977 | { |
1843 | static int pid_offset; | 1978 | static int pid_offset; |
1844 | static int pid_size; | 1979 | static int pid_size; |
1980 | |||
1981 | return __parse_common(data, &pid_size, &pid_offset, | ||
1982 | "common_pid"); | ||
1983 | } | ||
1984 | |||
1985 | int parse_common_pc(void *data) | ||
1986 | { | ||
1987 | static int pc_offset; | ||
1988 | static int pc_size; | ||
1989 | |||
1990 | return __parse_common(data, &pc_size, &pc_offset, | ||
1991 | "common_preempt_count"); | ||
1992 | } | ||
1993 | |||
1994 | int parse_common_flags(void *data) | ||
1995 | { | ||
1996 | static int flags_offset; | ||
1997 | static int flags_size; | ||
1998 | |||
1999 | return __parse_common(data, &flags_size, &flags_offset, | ||
2000 | "common_flags"); | ||
2001 | } | ||
2002 | |||
2003 | int parse_common_lock_depth(void *data) | ||
2004 | { | ||
2005 | static int ld_offset; | ||
2006 | static int ld_size; | ||
1845 | int ret; | 2007 | int ret; |
1846 | 2008 | ||
1847 | if (!pid_size) { | 2009 | ret = __parse_common(data, &ld_size, &ld_offset, |
1848 | ret = get_common_info("common_pid", | 2010 | "common_lock_depth"); |
1849 | &pid_offset, | 2011 | if (ret < 0) |
1850 | &pid_size); | 2012 | return -1; |
1851 | if (ret < 0) | ||
1852 | return ret; | ||
1853 | } | ||
1854 | 2013 | ||
1855 | return read_size(data + pid_offset, pid_size); | 2014 | return ret; |
1856 | } | 2015 | } |
1857 | 2016 | ||
1858 | struct event *trace_find_event(int id) | 2017 | struct event *trace_find_event(int id) |
@@ -1866,11 +2025,20 @@ struct event *trace_find_event(int id) | |||
1866 | return event; | 2025 | return event; |
1867 | } | 2026 | } |
1868 | 2027 | ||
2028 | struct event *trace_find_next_event(struct event *event) | ||
2029 | { | ||
2030 | if (!event) | ||
2031 | return event_list; | ||
2032 | |||
2033 | return event->next; | ||
2034 | } | ||
2035 | |||
1869 | static unsigned long long eval_num_arg(void *data, int size, | 2036 | static unsigned long long eval_num_arg(void *data, int size, |
1870 | struct event *event, struct print_arg *arg) | 2037 | struct event *event, struct print_arg *arg) |
1871 | { | 2038 | { |
1872 | unsigned long long val = 0; | 2039 | unsigned long long val = 0; |
1873 | unsigned long long left, right; | 2040 | unsigned long long left, right; |
2041 | struct print_arg *larg; | ||
1874 | 2042 | ||
1875 | switch (arg->type) { | 2043 | switch (arg->type) { |
1876 | case PRINT_NULL: | 2044 | case PRINT_NULL: |
@@ -1897,6 +2065,26 @@ static unsigned long long eval_num_arg(void *data, int size, | |||
1897 | return 0; | 2065 | return 0; |
1898 | break; | 2066 | break; |
1899 | case PRINT_OP: | 2067 | case PRINT_OP: |
2068 | if (strcmp(arg->op.op, "[") == 0) { | ||
2069 | /* | ||
2070 | * Arrays are special, since we don't want | ||
2071 | * to read the arg as is. | ||
2072 | */ | ||
2073 | if (arg->op.left->type != PRINT_FIELD) | ||
2074 | goto default_op; /* oops, all bets off */ | ||
2075 | larg = arg->op.left; | ||
2076 | if (!larg->field.field) { | ||
2077 | larg->field.field = | ||
2078 | find_any_field(event, larg->field.name); | ||
2079 | if (!larg->field.field) | ||
2080 | die("field %s not found", larg->field.name); | ||
2081 | } | ||
2082 | right = eval_num_arg(data, size, event, arg->op.right); | ||
2083 | val = read_size(data + larg->field.field->offset + | ||
2084 | right * long_size, long_size); | ||
2085 | break; | ||
2086 | } | ||
2087 | default_op: | ||
1900 | left = eval_num_arg(data, size, event, arg->op.left); | 2088 | left = eval_num_arg(data, size, event, arg->op.left); |
1901 | right = eval_num_arg(data, size, event, arg->op.right); | 2089 | right = eval_num_arg(data, size, event, arg->op.right); |
1902 | switch (arg->op.op[0]) { | 2090 | switch (arg->op.op[0]) { |
@@ -1947,6 +2135,12 @@ static unsigned long long eval_num_arg(void *data, int size, | |||
1947 | die("unknown op '%s'", arg->op.op); | 2135 | die("unknown op '%s'", arg->op.op); |
1948 | val = left == right; | 2136 | val = left == right; |
1949 | break; | 2137 | break; |
2138 | case '-': | ||
2139 | val = left - right; | ||
2140 | break; | ||
2141 | case '+': | ||
2142 | val = left + right; | ||
2143 | break; | ||
1950 | default: | 2144 | default: |
1951 | die("unknown op '%s'", arg->op.op); | 2145 | die("unknown op '%s'", arg->op.op); |
1952 | } | 2146 | } |
@@ -1978,7 +2172,7 @@ static const struct flag flags[] = { | |||
1978 | { "HRTIMER_RESTART", 1 }, | 2172 | { "HRTIMER_RESTART", 1 }, |
1979 | }; | 2173 | }; |
1980 | 2174 | ||
1981 | static unsigned long long eval_flag(const char *flag) | 2175 | unsigned long long eval_flag(const char *flag) |
1982 | { | 2176 | { |
1983 | int i; | 2177 | int i; |
1984 | 2178 | ||
@@ -2145,8 +2339,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
2145 | case 'u': | 2339 | case 'u': |
2146 | case 'x': | 2340 | case 'x': |
2147 | case 'i': | 2341 | case 'i': |
2148 | bptr = (void *)(((unsigned long)bptr + (long_size - 1)) & | 2342 | /* the pointers are always 4 bytes aligned */ |
2149 | ~(long_size - 1)); | 2343 | bptr = (void *)(((unsigned long)bptr + 3) & |
2344 | ~3); | ||
2150 | switch (ls) { | 2345 | switch (ls) { |
2151 | case 0: | 2346 | case 0: |
2152 | case 1: | 2347 | case 1: |
@@ -2270,7 +2465,27 @@ static void pretty_print(void *data, int size, struct event *event) | |||
2270 | 2465 | ||
2271 | for (; *ptr; ptr++) { | 2466 | for (; *ptr; ptr++) { |
2272 | ls = 0; | 2467 | ls = 0; |
2273 | if (*ptr == '%') { | 2468 | if (*ptr == '\\') { |
2469 | ptr++; | ||
2470 | switch (*ptr) { | ||
2471 | case 'n': | ||
2472 | printf("\n"); | ||
2473 | break; | ||
2474 | case 't': | ||
2475 | printf("\t"); | ||
2476 | break; | ||
2477 | case 'r': | ||
2478 | printf("\r"); | ||
2479 | break; | ||
2480 | case '\\': | ||
2481 | printf("\\"); | ||
2482 | break; | ||
2483 | default: | ||
2484 | printf("%c", *ptr); | ||
2485 | break; | ||
2486 | } | ||
2487 | |||
2488 | } else if (*ptr == '%') { | ||
2274 | saveptr = ptr; | 2489 | saveptr = ptr; |
2275 | show_func = 0; | 2490 | show_func = 0; |
2276 | cont_process: | 2491 | cont_process: |
@@ -2377,6 +2592,41 @@ static inline int log10_cpu(int nb) | |||
2377 | return 1; | 2592 | return 1; |
2378 | } | 2593 | } |
2379 | 2594 | ||
2595 | static void print_lat_fmt(void *data, int size __unused) | ||
2596 | { | ||
2597 | unsigned int lat_flags; | ||
2598 | unsigned int pc; | ||
2599 | int lock_depth; | ||
2600 | int hardirq; | ||
2601 | int softirq; | ||
2602 | |||
2603 | lat_flags = parse_common_flags(data); | ||
2604 | pc = parse_common_pc(data); | ||
2605 | lock_depth = parse_common_lock_depth(data); | ||
2606 | |||
2607 | hardirq = lat_flags & TRACE_FLAG_HARDIRQ; | ||
2608 | softirq = lat_flags & TRACE_FLAG_SOFTIRQ; | ||
2609 | |||
2610 | printf("%c%c%c", | ||
2611 | (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
2612 | (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ? | ||
2613 | 'X' : '.', | ||
2614 | (lat_flags & TRACE_FLAG_NEED_RESCHED) ? | ||
2615 | 'N' : '.', | ||
2616 | (hardirq && softirq) ? 'H' : | ||
2617 | hardirq ? 'h' : softirq ? 's' : '.'); | ||
2618 | |||
2619 | if (pc) | ||
2620 | printf("%x", pc); | ||
2621 | else | ||
2622 | printf("."); | ||
2623 | |||
2624 | if (lock_depth < 0) | ||
2625 | printf("."); | ||
2626 | else | ||
2627 | printf("%d", lock_depth); | ||
2628 | } | ||
2629 | |||
2380 | /* taken from Linux, written by Frederic Weisbecker */ | 2630 | /* taken from Linux, written by Frederic Weisbecker */ |
2381 | static void print_graph_cpu(int cpu) | 2631 | static void print_graph_cpu(int cpu) |
2382 | { | 2632 | { |
@@ -2452,7 +2702,7 @@ get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func, | |||
2452 | if (!(event->flags & EVENT_FL_ISFUNCRET)) | 2702 | if (!(event->flags & EVENT_FL_ISFUNCRET)) |
2453 | return NULL; | 2703 | return NULL; |
2454 | 2704 | ||
2455 | pid = parse_common_pid(next->data); | 2705 | pid = trace_parse_common_pid(next->data); |
2456 | field = find_field(event, "func"); | 2706 | field = find_field(event, "func"); |
2457 | if (!field) | 2707 | if (!field) |
2458 | die("function return does not have field func"); | 2708 | die("function return does not have field func"); |
@@ -2620,6 +2870,11 @@ pretty_print_func_ent(void *data, int size, struct event *event, | |||
2620 | 2870 | ||
2621 | printf(" | "); | 2871 | printf(" | "); |
2622 | 2872 | ||
2873 | if (latency_format) { | ||
2874 | print_lat_fmt(data, size); | ||
2875 | printf(" | "); | ||
2876 | } | ||
2877 | |||
2623 | field = find_field(event, "func"); | 2878 | field = find_field(event, "func"); |
2624 | if (!field) | 2879 | if (!field) |
2625 | die("function entry does not have func field"); | 2880 | die("function entry does not have func field"); |
@@ -2663,6 +2918,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event, | |||
2663 | 2918 | ||
2664 | printf(" | "); | 2919 | printf(" | "); |
2665 | 2920 | ||
2921 | if (latency_format) { | ||
2922 | print_lat_fmt(data, size); | ||
2923 | printf(" | "); | ||
2924 | } | ||
2925 | |||
2666 | field = find_field(event, "rettime"); | 2926 | field = find_field(event, "rettime"); |
2667 | if (!field) | 2927 | if (!field) |
2668 | die("can't find rettime in return graph"); | 2928 | die("can't find rettime in return graph"); |
@@ -2724,19 +2984,30 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, | |||
2724 | 2984 | ||
2725 | event = trace_find_event(type); | 2985 | event = trace_find_event(type); |
2726 | if (!event) { | 2986 | if (!event) { |
2727 | printf("ug! no event found for type %d\n", type); | 2987 | warning("ug! no event found for type %d", type); |
2728 | return; | 2988 | return; |
2729 | } | 2989 | } |
2730 | 2990 | ||
2731 | pid = parse_common_pid(data); | 2991 | pid = trace_parse_common_pid(data); |
2732 | 2992 | ||
2733 | if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET)) | 2993 | if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET)) |
2734 | return pretty_print_func_graph(data, size, event, cpu, | 2994 | return pretty_print_func_graph(data, size, event, cpu, |
2735 | pid, comm, secs, usecs); | 2995 | pid, comm, secs, usecs); |
2736 | 2996 | ||
2737 | printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ", | 2997 | if (latency_format) { |
2738 | comm, pid, cpu, | 2998 | printf("%8.8s-%-5d %3d", |
2739 | secs, nsecs, event->name); | 2999 | comm, pid, cpu); |
3000 | print_lat_fmt(data, size); | ||
3001 | } else | ||
3002 | printf("%16s-%-5d [%03d]", comm, pid, cpu); | ||
3003 | |||
3004 | printf(" %5lu.%06lu: %s: ", secs, usecs, event->name); | ||
3005 | |||
3006 | if (event->flags & EVENT_FL_FAILED) { | ||
3007 | printf("EVENT '%s' FAILED TO PARSE\n", | ||
3008 | event->name); | ||
3009 | return; | ||
3010 | } | ||
2740 | 3011 | ||
2741 | pretty_print(data, size, event); | 3012 | pretty_print(data, size, event); |
2742 | printf("\n"); | 3013 | printf("\n"); |
@@ -2807,46 +3078,71 @@ static void print_args(struct print_arg *args) | |||
2807 | } | 3078 | } |
2808 | } | 3079 | } |
2809 | 3080 | ||
2810 | static void parse_header_field(char *type, | 3081 | static void parse_header_field(const char *field, |
2811 | int *offset, int *size) | 3082 | int *offset, int *size) |
2812 | { | 3083 | { |
2813 | char *token; | 3084 | char *token; |
3085 | int type; | ||
2814 | 3086 | ||
2815 | if (read_expected(EVENT_ITEM, (char *)"field") < 0) | 3087 | if (read_expected(EVENT_ITEM, "field") < 0) |
2816 | return; | 3088 | return; |
2817 | if (read_expected(EVENT_OP, (char *)":") < 0) | 3089 | if (read_expected(EVENT_OP, ":") < 0) |
2818 | return; | 3090 | return; |
3091 | |||
2819 | /* type */ | 3092 | /* type */ |
2820 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 3093 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
2821 | return; | 3094 | goto fail; |
2822 | free_token(token); | 3095 | free_token(token); |
2823 | 3096 | ||
2824 | if (read_expected(EVENT_ITEM, type) < 0) | 3097 | if (read_expected(EVENT_ITEM, field) < 0) |
2825 | return; | 3098 | return; |
2826 | if (read_expected(EVENT_OP, (char *)";") < 0) | 3099 | if (read_expected(EVENT_OP, ";") < 0) |
2827 | return; | 3100 | return; |
2828 | if (read_expected(EVENT_ITEM, (char *)"offset") < 0) | 3101 | if (read_expected(EVENT_ITEM, "offset") < 0) |
2829 | return; | 3102 | return; |
2830 | if (read_expected(EVENT_OP, (char *)":") < 0) | 3103 | if (read_expected(EVENT_OP, ":") < 0) |
2831 | return; | 3104 | return; |
2832 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 3105 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
2833 | return; | 3106 | goto fail; |
2834 | *offset = atoi(token); | 3107 | *offset = atoi(token); |
2835 | free_token(token); | 3108 | free_token(token); |
2836 | if (read_expected(EVENT_OP, (char *)";") < 0) | 3109 | if (read_expected(EVENT_OP, ";") < 0) |
2837 | return; | 3110 | return; |
2838 | if (read_expected(EVENT_ITEM, (char *)"size") < 0) | 3111 | if (read_expected(EVENT_ITEM, "size") < 0) |
2839 | return; | 3112 | return; |
2840 | if (read_expected(EVENT_OP, (char *)":") < 0) | 3113 | if (read_expected(EVENT_OP, ":") < 0) |
2841 | return; | 3114 | return; |
2842 | if (read_expect_type(EVENT_ITEM, &token) < 0) | 3115 | if (read_expect_type(EVENT_ITEM, &token) < 0) |
2843 | return; | 3116 | goto fail; |
2844 | *size = atoi(token); | 3117 | *size = atoi(token); |
2845 | free_token(token); | 3118 | free_token(token); |
2846 | if (read_expected(EVENT_OP, (char *)";") < 0) | 3119 | if (read_expected(EVENT_OP, ";") < 0) |
2847 | return; | ||
2848 | if (read_expect_type(EVENT_NEWLINE, &token) < 0) | ||
2849 | return; | 3120 | return; |
3121 | type = read_token(&token); | ||
3122 | if (type != EVENT_NEWLINE) { | ||
3123 | /* newer versions of the kernel have a "signed" type */ | ||
3124 | if (type != EVENT_ITEM) | ||
3125 | goto fail; | ||
3126 | |||
3127 | if (strcmp(token, "signed") != 0) | ||
3128 | goto fail; | ||
3129 | |||
3130 | free_token(token); | ||
3131 | |||
3132 | if (read_expected(EVENT_OP, ":") < 0) | ||
3133 | return; | ||
3134 | |||
3135 | if (read_expect_type(EVENT_ITEM, &token)) | ||
3136 | goto fail; | ||
3137 | |||
3138 | free_token(token); | ||
3139 | if (read_expected(EVENT_OP, ";") < 0) | ||
3140 | return; | ||
3141 | |||
3142 | if (read_expect_type(EVENT_NEWLINE, &token)) | ||
3143 | goto fail; | ||
3144 | } | ||
3145 | fail: | ||
2850 | free_token(token); | 3146 | free_token(token); |
2851 | } | 3147 | } |
2852 | 3148 | ||
@@ -2854,11 +3150,11 @@ int parse_header_page(char *buf, unsigned long size) | |||
2854 | { | 3150 | { |
2855 | init_input_buf(buf, size); | 3151 | init_input_buf(buf, size); |
2856 | 3152 | ||
2857 | parse_header_field((char *)"timestamp", &header_page_ts_offset, | 3153 | parse_header_field("timestamp", &header_page_ts_offset, |
2858 | &header_page_ts_size); | 3154 | &header_page_ts_size); |
2859 | parse_header_field((char *)"commit", &header_page_size_offset, | 3155 | parse_header_field("commit", &header_page_size_offset, |
2860 | &header_page_size_size); | 3156 | &header_page_size_size); |
2861 | parse_header_field((char *)"data", &header_page_data_offset, | 3157 | parse_header_field("data", &header_page_data_offset, |
2862 | &header_page_data_size); | 3158 | &header_page_data_size); |
2863 | 3159 | ||
2864 | return 0; | 3160 | return 0; |
@@ -2909,6 +3205,9 @@ int parse_ftrace_file(char *buf, unsigned long size) | |||
2909 | if (ret < 0) | 3205 | if (ret < 0) |
2910 | die("failed to read ftrace event print fmt"); | 3206 | die("failed to read ftrace event print fmt"); |
2911 | 3207 | ||
3208 | /* New ftrace handles args */ | ||
3209 | if (ret > 0) | ||
3210 | return 0; | ||
2912 | /* | 3211 | /* |
2913 | * The arguments for ftrace files are parsed by the fields. | 3212 | * The arguments for ftrace files are parsed by the fields. |
2914 | * Set up the fields as their arguments. | 3213 | * Set up the fields as their arguments. |
@@ -2926,7 +3225,7 @@ int parse_ftrace_file(char *buf, unsigned long size) | |||
2926 | return 0; | 3225 | return 0; |
2927 | } | 3226 | } |
2928 | 3227 | ||
2929 | int parse_event_file(char *buf, unsigned long size, char *system__unused __unused) | 3228 | int parse_event_file(char *buf, unsigned long size, char *sys) |
2930 | { | 3229 | { |
2931 | struct event *event; | 3230 | struct event *event; |
2932 | int ret; | 3231 | int ret; |
@@ -2946,12 +3245,18 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse | |||
2946 | die("failed to read event id"); | 3245 | die("failed to read event id"); |
2947 | 3246 | ||
2948 | ret = event_read_format(event); | 3247 | ret = event_read_format(event); |
2949 | if (ret < 0) | 3248 | if (ret < 0) { |
2950 | die("failed to read event format"); | 3249 | warning("failed to read event format for %s", event->name); |
3250 | goto event_failed; | ||
3251 | } | ||
2951 | 3252 | ||
2952 | ret = event_read_print(event); | 3253 | ret = event_read_print(event); |
2953 | if (ret < 0) | 3254 | if (ret < 0) { |
2954 | die("failed to read event print fmt"); | 3255 | warning("failed to read event print fmt for %s", event->name); |
3256 | goto event_failed; | ||
3257 | } | ||
3258 | |||
3259 | event->system = strdup(sys); | ||
2955 | 3260 | ||
2956 | #define PRINT_ARGS 0 | 3261 | #define PRINT_ARGS 0 |
2957 | if (PRINT_ARGS && event->print_fmt.args) | 3262 | if (PRINT_ARGS && event->print_fmt.args) |
@@ -2959,6 +3264,12 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse | |||
2959 | 3264 | ||
2960 | add_event(event); | 3265 | add_event(event); |
2961 | return 0; | 3266 | return 0; |
3267 | |||
3268 | event_failed: | ||
3269 | event->flags |= EVENT_FL_FAILED; | ||
3270 | /* still add it even if it failed */ | ||
3271 | add_event(event); | ||
3272 | return -1; | ||
2962 | } | 3273 | } |
2963 | 3274 | ||
2964 | void parse_set_info(int nr_cpus, int long_sz) | 3275 | void parse_set_info(int nr_cpus, int long_sz) |
diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c new file mode 100644 index 000000000000..51e833fd58c3 --- /dev/null +++ b/tools/perf/util/trace-event-perl.c | |||
@@ -0,0 +1,598 @@ | |||
1 | /* | ||
2 | * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. | ||
3 | * | ||
4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <stdio.h> | ||
23 | #include <stdlib.h> | ||
24 | #include <string.h> | ||
25 | #include <ctype.h> | ||
26 | #include <errno.h> | ||
27 | |||
28 | #include "../perf.h" | ||
29 | #include "util.h" | ||
30 | #include "trace-event.h" | ||
31 | #include "trace-event-perl.h" | ||
32 | |||
33 | void xs_init(pTHX); | ||
34 | |||
35 | void boot_Perf__Trace__Context(pTHX_ CV *cv); | ||
36 | void boot_DynaLoader(pTHX_ CV *cv); | ||
37 | |||
38 | void xs_init(pTHX) | ||
39 | { | ||
40 | const char *file = __FILE__; | ||
41 | dXSUB_SYS; | ||
42 | |||
43 | newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, | ||
44 | file); | ||
45 | newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); | ||
46 | } | ||
47 | |||
48 | INTERP my_perl; | ||
49 | |||
50 | #define FTRACE_MAX_EVENT \ | ||
51 | ((1 << (sizeof(unsigned short) * 8)) - 1) | ||
52 | |||
53 | struct event *events[FTRACE_MAX_EVENT]; | ||
54 | |||
55 | static struct scripting_context *scripting_context; | ||
56 | |||
57 | static char *cur_field_name; | ||
58 | static int zero_flag_atom; | ||
59 | |||
60 | static void define_symbolic_value(const char *ev_name, | ||
61 | const char *field_name, | ||
62 | const char *field_value, | ||
63 | const char *field_str) | ||
64 | { | ||
65 | unsigned long long value; | ||
66 | dSP; | ||
67 | |||
68 | value = eval_flag(field_value); | ||
69 | |||
70 | ENTER; | ||
71 | SAVETMPS; | ||
72 | PUSHMARK(SP); | ||
73 | |||
74 | XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); | ||
75 | XPUSHs(sv_2mortal(newSVpv(field_name, 0))); | ||
76 | XPUSHs(sv_2mortal(newSVuv(value))); | ||
77 | XPUSHs(sv_2mortal(newSVpv(field_str, 0))); | ||
78 | |||
79 | PUTBACK; | ||
80 | if (get_cv("main::define_symbolic_value", 0)) | ||
81 | call_pv("main::define_symbolic_value", G_SCALAR); | ||
82 | SPAGAIN; | ||
83 | PUTBACK; | ||
84 | FREETMPS; | ||
85 | LEAVE; | ||
86 | } | ||
87 | |||
88 | static void define_symbolic_values(struct print_flag_sym *field, | ||
89 | const char *ev_name, | ||
90 | const char *field_name) | ||
91 | { | ||
92 | define_symbolic_value(ev_name, field_name, field->value, field->str); | ||
93 | if (field->next) | ||
94 | define_symbolic_values(field->next, ev_name, field_name); | ||
95 | } | ||
96 | |||
97 | static void define_symbolic_field(const char *ev_name, | ||
98 | const char *field_name) | ||
99 | { | ||
100 | dSP; | ||
101 | |||
102 | ENTER; | ||
103 | SAVETMPS; | ||
104 | PUSHMARK(SP); | ||
105 | |||
106 | XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); | ||
107 | XPUSHs(sv_2mortal(newSVpv(field_name, 0))); | ||
108 | |||
109 | PUTBACK; | ||
110 | if (get_cv("main::define_symbolic_field", 0)) | ||
111 | call_pv("main::define_symbolic_field", G_SCALAR); | ||
112 | SPAGAIN; | ||
113 | PUTBACK; | ||
114 | FREETMPS; | ||
115 | LEAVE; | ||
116 | } | ||
117 | |||
118 | static void define_flag_value(const char *ev_name, | ||
119 | const char *field_name, | ||
120 | const char *field_value, | ||
121 | const char *field_str) | ||
122 | { | ||
123 | unsigned long long value; | ||
124 | dSP; | ||
125 | |||
126 | value = eval_flag(field_value); | ||
127 | |||
128 | ENTER; | ||
129 | SAVETMPS; | ||
130 | PUSHMARK(SP); | ||
131 | |||
132 | XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); | ||
133 | XPUSHs(sv_2mortal(newSVpv(field_name, 0))); | ||
134 | XPUSHs(sv_2mortal(newSVuv(value))); | ||
135 | XPUSHs(sv_2mortal(newSVpv(field_str, 0))); | ||
136 | |||
137 | PUTBACK; | ||
138 | if (get_cv("main::define_flag_value", 0)) | ||
139 | call_pv("main::define_flag_value", G_SCALAR); | ||
140 | SPAGAIN; | ||
141 | PUTBACK; | ||
142 | FREETMPS; | ||
143 | LEAVE; | ||
144 | } | ||
145 | |||
146 | static void define_flag_values(struct print_flag_sym *field, | ||
147 | const char *ev_name, | ||
148 | const char *field_name) | ||
149 | { | ||
150 | define_flag_value(ev_name, field_name, field->value, field->str); | ||
151 | if (field->next) | ||
152 | define_flag_values(field->next, ev_name, field_name); | ||
153 | } | ||
154 | |||
155 | static void define_flag_field(const char *ev_name, | ||
156 | const char *field_name, | ||
157 | const char *delim) | ||
158 | { | ||
159 | dSP; | ||
160 | |||
161 | ENTER; | ||
162 | SAVETMPS; | ||
163 | PUSHMARK(SP); | ||
164 | |||
165 | XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); | ||
166 | XPUSHs(sv_2mortal(newSVpv(field_name, 0))); | ||
167 | XPUSHs(sv_2mortal(newSVpv(delim, 0))); | ||
168 | |||
169 | PUTBACK; | ||
170 | if (get_cv("main::define_flag_field", 0)) | ||
171 | call_pv("main::define_flag_field", G_SCALAR); | ||
172 | SPAGAIN; | ||
173 | PUTBACK; | ||
174 | FREETMPS; | ||
175 | LEAVE; | ||
176 | } | ||
177 | |||
178 | static void define_event_symbols(struct event *event, | ||
179 | const char *ev_name, | ||
180 | struct print_arg *args) | ||
181 | { | ||
182 | switch (args->type) { | ||
183 | case PRINT_NULL: | ||
184 | break; | ||
185 | case PRINT_ATOM: | ||
186 | define_flag_value(ev_name, cur_field_name, "0", | ||
187 | args->atom.atom); | ||
188 | zero_flag_atom = 0; | ||
189 | break; | ||
190 | case PRINT_FIELD: | ||
191 | if (cur_field_name) | ||
192 | free(cur_field_name); | ||
193 | cur_field_name = strdup(args->field.name); | ||
194 | break; | ||
195 | case PRINT_FLAGS: | ||
196 | define_event_symbols(event, ev_name, args->flags.field); | ||
197 | define_flag_field(ev_name, cur_field_name, args->flags.delim); | ||
198 | define_flag_values(args->flags.flags, ev_name, cur_field_name); | ||
199 | break; | ||
200 | case PRINT_SYMBOL: | ||
201 | define_event_symbols(event, ev_name, args->symbol.field); | ||
202 | define_symbolic_field(ev_name, cur_field_name); | ||
203 | define_symbolic_values(args->symbol.symbols, ev_name, | ||
204 | cur_field_name); | ||
205 | break; | ||
206 | case PRINT_STRING: | ||
207 | break; | ||
208 | case PRINT_TYPE: | ||
209 | define_event_symbols(event, ev_name, args->typecast.item); | ||
210 | break; | ||
211 | case PRINT_OP: | ||
212 | if (strcmp(args->op.op, ":") == 0) | ||
213 | zero_flag_atom = 1; | ||
214 | define_event_symbols(event, ev_name, args->op.left); | ||
215 | define_event_symbols(event, ev_name, args->op.right); | ||
216 | break; | ||
217 | default: | ||
218 | /* we should warn... */ | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | if (args->next) | ||
223 | define_event_symbols(event, ev_name, args->next); | ||
224 | } | ||
225 | |||
226 | static inline struct event *find_cache_event(int type) | ||
227 | { | ||
228 | static char ev_name[256]; | ||
229 | struct event *event; | ||
230 | |||
231 | if (events[type]) | ||
232 | return events[type]; | ||
233 | |||
234 | events[type] = event = trace_find_event(type); | ||
235 | if (!event) | ||
236 | return NULL; | ||
237 | |||
238 | sprintf(ev_name, "%s::%s", event->system, event->name); | ||
239 | |||
240 | define_event_symbols(event, ev_name, event->print_fmt.args); | ||
241 | |||
242 | return event; | ||
243 | } | ||
244 | |||
245 | int common_pc(struct scripting_context *context) | ||
246 | { | ||
247 | int pc; | ||
248 | |||
249 | pc = parse_common_pc(context->event_data); | ||
250 | |||
251 | return pc; | ||
252 | } | ||
253 | |||
254 | int common_flags(struct scripting_context *context) | ||
255 | { | ||
256 | int flags; | ||
257 | |||
258 | flags = parse_common_flags(context->event_data); | ||
259 | |||
260 | return flags; | ||
261 | } | ||
262 | |||
263 | int common_lock_depth(struct scripting_context *context) | ||
264 | { | ||
265 | int lock_depth; | ||
266 | |||
267 | lock_depth = parse_common_lock_depth(context->event_data); | ||
268 | |||
269 | return lock_depth; | ||
270 | } | ||
271 | |||
272 | static void perl_process_event(int cpu, void *data, | ||
273 | int size __attribute((unused)), | ||
274 | unsigned long long nsecs, char *comm) | ||
275 | { | ||
276 | struct format_field *field; | ||
277 | static char handler[256]; | ||
278 | unsigned long long val; | ||
279 | unsigned long s, ns; | ||
280 | struct event *event; | ||
281 | int type; | ||
282 | int pid; | ||
283 | |||
284 | dSP; | ||
285 | |||
286 | type = trace_parse_common_type(data); | ||
287 | |||
288 | event = find_cache_event(type); | ||
289 | if (!event) | ||
290 | die("ug! no event found for type %d", type); | ||
291 | |||
292 | pid = trace_parse_common_pid(data); | ||
293 | |||
294 | sprintf(handler, "%s::%s", event->system, event->name); | ||
295 | |||
296 | s = nsecs / NSECS_PER_SEC; | ||
297 | ns = nsecs - s * NSECS_PER_SEC; | ||
298 | |||
299 | scripting_context->event_data = data; | ||
300 | |||
301 | ENTER; | ||
302 | SAVETMPS; | ||
303 | PUSHMARK(SP); | ||
304 | |||
305 | XPUSHs(sv_2mortal(newSVpv(handler, 0))); | ||
306 | XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); | ||
307 | XPUSHs(sv_2mortal(newSVuv(cpu))); | ||
308 | XPUSHs(sv_2mortal(newSVuv(s))); | ||
309 | XPUSHs(sv_2mortal(newSVuv(ns))); | ||
310 | XPUSHs(sv_2mortal(newSViv(pid))); | ||
311 | XPUSHs(sv_2mortal(newSVpv(comm, 0))); | ||
312 | |||
313 | /* common fields other than pid can be accessed via xsub fns */ | ||
314 | |||
315 | for (field = event->format.fields; field; field = field->next) { | ||
316 | if (field->flags & FIELD_IS_STRING) { | ||
317 | int offset; | ||
318 | if (field->flags & FIELD_IS_DYNAMIC) { | ||
319 | offset = *(int *)(data + field->offset); | ||
320 | offset &= 0xffff; | ||
321 | } else | ||
322 | offset = field->offset; | ||
323 | XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); | ||
324 | } else { /* FIELD_IS_NUMERIC */ | ||
325 | val = read_size(data + field->offset, field->size); | ||
326 | if (field->flags & FIELD_IS_SIGNED) { | ||
327 | XPUSHs(sv_2mortal(newSViv(val))); | ||
328 | } else { | ||
329 | XPUSHs(sv_2mortal(newSVuv(val))); | ||
330 | } | ||
331 | } | ||
332 | } | ||
333 | |||
334 | PUTBACK; | ||
335 | |||
336 | if (get_cv(handler, 0)) | ||
337 | call_pv(handler, G_SCALAR); | ||
338 | else if (get_cv("main::trace_unhandled", 0)) { | ||
339 | XPUSHs(sv_2mortal(newSVpv(handler, 0))); | ||
340 | XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); | ||
341 | XPUSHs(sv_2mortal(newSVuv(cpu))); | ||
342 | XPUSHs(sv_2mortal(newSVuv(nsecs))); | ||
343 | XPUSHs(sv_2mortal(newSViv(pid))); | ||
344 | XPUSHs(sv_2mortal(newSVpv(comm, 0))); | ||
345 | call_pv("main::trace_unhandled", G_SCALAR); | ||
346 | } | ||
347 | SPAGAIN; | ||
348 | PUTBACK; | ||
349 | FREETMPS; | ||
350 | LEAVE; | ||
351 | } | ||
352 | |||
353 | static void run_start_sub(void) | ||
354 | { | ||
355 | dSP; /* access to Perl stack */ | ||
356 | PUSHMARK(SP); | ||
357 | |||
358 | if (get_cv("main::trace_begin", 0)) | ||
359 | call_pv("main::trace_begin", G_DISCARD | G_NOARGS); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Start trace script | ||
364 | */ | ||
365 | static int perl_start_script(const char *script) | ||
366 | { | ||
367 | const char *command_line[2] = { "", NULL }; | ||
368 | |||
369 | command_line[1] = script; | ||
370 | |||
371 | my_perl = perl_alloc(); | ||
372 | perl_construct(my_perl); | ||
373 | |||
374 | if (perl_parse(my_perl, xs_init, 2, (char **)command_line, | ||
375 | (char **)NULL)) | ||
376 | return -1; | ||
377 | |||
378 | perl_run(my_perl); | ||
379 | if (SvTRUE(ERRSV)) | ||
380 | return -1; | ||
381 | |||
382 | run_start_sub(); | ||
383 | |||
384 | fprintf(stderr, "perf trace started with Perl script %s\n\n", script); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Stop trace script | ||
391 | */ | ||
392 | static int perl_stop_script(void) | ||
393 | { | ||
394 | dSP; /* access to Perl stack */ | ||
395 | PUSHMARK(SP); | ||
396 | |||
397 | if (get_cv("main::trace_end", 0)) | ||
398 | call_pv("main::trace_end", G_DISCARD | G_NOARGS); | ||
399 | |||
400 | perl_destruct(my_perl); | ||
401 | perl_free(my_perl); | ||
402 | |||
403 | fprintf(stderr, "\nperf trace Perl script stopped\n"); | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int perl_generate_script(const char *outfile) | ||
409 | { | ||
410 | struct event *event = NULL; | ||
411 | struct format_field *f; | ||
412 | char fname[PATH_MAX]; | ||
413 | int not_first, count; | ||
414 | FILE *ofp; | ||
415 | |||
416 | sprintf(fname, "%s.pl", outfile); | ||
417 | ofp = fopen(fname, "w"); | ||
418 | if (ofp == NULL) { | ||
419 | fprintf(stderr, "couldn't open %s\n", fname); | ||
420 | return -1; | ||
421 | } | ||
422 | |||
423 | fprintf(ofp, "# perf trace event handlers, " | ||
424 | "generated by perf trace -g perl\n"); | ||
425 | |||
426 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" | ||
427 | " License version 2\n\n"); | ||
428 | |||
429 | fprintf(ofp, "# The common_* event handler fields are the most useful " | ||
430 | "fields common to\n"); | ||
431 | |||
432 | fprintf(ofp, "# all events. They don't necessarily correspond to " | ||
433 | "the 'common_*' fields\n"); | ||
434 | |||
435 | fprintf(ofp, "# in the format files. Those fields not available as " | ||
436 | "handler params can\n"); | ||
437 | |||
438 | fprintf(ofp, "# be retrieved using Perl functions of the form " | ||
439 | "common_*($context).\n"); | ||
440 | |||
441 | fprintf(ofp, "# See Context.pm for the list of available " | ||
442 | "functions.\n\n"); | ||
443 | |||
444 | fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" | ||
445 | "Perf-Trace-Util/lib\";\n"); | ||
446 | |||
447 | fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); | ||
448 | fprintf(ofp, "use Perf::Trace::Core;\n"); | ||
449 | fprintf(ofp, "use Perf::Trace::Context;\n"); | ||
450 | fprintf(ofp, "use Perf::Trace::Util;\n\n"); | ||
451 | |||
452 | fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); | ||
453 | fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); | ||
454 | |||
455 | while ((event = trace_find_next_event(event))) { | ||
456 | fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); | ||
457 | fprintf(ofp, "\tmy ("); | ||
458 | |||
459 | fprintf(ofp, "$event_name, "); | ||
460 | fprintf(ofp, "$context, "); | ||
461 | fprintf(ofp, "$common_cpu, "); | ||
462 | fprintf(ofp, "$common_secs, "); | ||
463 | fprintf(ofp, "$common_nsecs,\n"); | ||
464 | fprintf(ofp, "\t $common_pid, "); | ||
465 | fprintf(ofp, "$common_comm,\n\t "); | ||
466 | |||
467 | not_first = 0; | ||
468 | count = 0; | ||
469 | |||
470 | for (f = event->format.fields; f; f = f->next) { | ||
471 | if (not_first++) | ||
472 | fprintf(ofp, ", "); | ||
473 | if (++count % 5 == 0) | ||
474 | fprintf(ofp, "\n\t "); | ||
475 | |||
476 | fprintf(ofp, "$%s", f->name); | ||
477 | } | ||
478 | fprintf(ofp, ") = @_;\n\n"); | ||
479 | |||
480 | fprintf(ofp, "\tprint_header($event_name, $common_cpu, " | ||
481 | "$common_secs, $common_nsecs,\n\t " | ||
482 | "$common_pid, $common_comm);\n\n"); | ||
483 | |||
484 | fprintf(ofp, "\tprintf(\""); | ||
485 | |||
486 | not_first = 0; | ||
487 | count = 0; | ||
488 | |||
489 | for (f = event->format.fields; f; f = f->next) { | ||
490 | if (not_first++) | ||
491 | fprintf(ofp, ", "); | ||
492 | if (count && count % 4 == 0) { | ||
493 | fprintf(ofp, "\".\n\t \""); | ||
494 | } | ||
495 | count++; | ||
496 | |||
497 | fprintf(ofp, "%s=", f->name); | ||
498 | if (f->flags & FIELD_IS_STRING || | ||
499 | f->flags & FIELD_IS_FLAG || | ||
500 | f->flags & FIELD_IS_SYMBOLIC) | ||
501 | fprintf(ofp, "%%s"); | ||
502 | else if (f->flags & FIELD_IS_SIGNED) | ||
503 | fprintf(ofp, "%%d"); | ||
504 | else | ||
505 | fprintf(ofp, "%%u"); | ||
506 | } | ||
507 | |||
508 | fprintf(ofp, "\\n\",\n\t "); | ||
509 | |||
510 | not_first = 0; | ||
511 | count = 0; | ||
512 | |||
513 | for (f = event->format.fields; f; f = f->next) { | ||
514 | if (not_first++) | ||
515 | fprintf(ofp, ", "); | ||
516 | |||
517 | if (++count % 5 == 0) | ||
518 | fprintf(ofp, "\n\t "); | ||
519 | |||
520 | if (f->flags & FIELD_IS_FLAG) { | ||
521 | if ((count - 1) % 5 != 0) { | ||
522 | fprintf(ofp, "\n\t "); | ||
523 | count = 4; | ||
524 | } | ||
525 | fprintf(ofp, "flag_str(\""); | ||
526 | fprintf(ofp, "%s::%s\", ", event->system, | ||
527 | event->name); | ||
528 | fprintf(ofp, "\"%s\", $%s)", f->name, | ||
529 | f->name); | ||
530 | } else if (f->flags & FIELD_IS_SYMBOLIC) { | ||
531 | if ((count - 1) % 5 != 0) { | ||
532 | fprintf(ofp, "\n\t "); | ||
533 | count = 4; | ||
534 | } | ||
535 | fprintf(ofp, "symbol_str(\""); | ||
536 | fprintf(ofp, "%s::%s\", ", event->system, | ||
537 | event->name); | ||
538 | fprintf(ofp, "\"%s\", $%s)", f->name, | ||
539 | f->name); | ||
540 | } else | ||
541 | fprintf(ofp, "$%s", f->name); | ||
542 | } | ||
543 | |||
544 | fprintf(ofp, ");\n"); | ||
545 | fprintf(ofp, "}\n\n"); | ||
546 | } | ||
547 | |||
548 | fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " | ||
549 | "$common_cpu, $common_secs, $common_nsecs,\n\t " | ||
550 | "$common_pid, $common_comm) = @_;\n\n"); | ||
551 | |||
552 | fprintf(ofp, "\tprint_header($event_name, $common_cpu, " | ||
553 | "$common_secs, $common_nsecs,\n\t $common_pid, " | ||
554 | "$common_comm);\n}\n\n"); | ||
555 | |||
556 | fprintf(ofp, "sub print_header\n{\n" | ||
557 | "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" | ||
558 | "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " | ||
559 | "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); | ||
560 | |||
561 | fclose(ofp); | ||
562 | |||
563 | fprintf(stderr, "generated Perl script: %s\n", fname); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | struct scripting_ops perl_scripting_ops = { | ||
569 | .name = "Perl", | ||
570 | .start_script = perl_start_script, | ||
571 | .stop_script = perl_stop_script, | ||
572 | .process_event = perl_process_event, | ||
573 | .generate_script = perl_generate_script, | ||
574 | }; | ||
575 | |||
576 | #ifdef NO_LIBPERL | ||
577 | void setup_perl_scripting(void) | ||
578 | { | ||
579 | fprintf(stderr, "Perl scripting not supported." | ||
580 | " Install libperl and rebuild perf to enable it. e.g. " | ||
581 | "apt-get install libperl-dev (ubuntu), yum install " | ||
582 | "perl-ExtUtils-Embed (Fedora), etc.\n"); | ||
583 | } | ||
584 | #else | ||
585 | void setup_perl_scripting(void) | ||
586 | { | ||
587 | int err; | ||
588 | err = script_spec_register("Perl", &perl_scripting_ops); | ||
589 | if (err) | ||
590 | die("error registering Perl script extension"); | ||
591 | |||
592 | err = script_spec_register("pl", &perl_scripting_ops); | ||
593 | if (err) | ||
594 | die("error registering pl script extension"); | ||
595 | |||
596 | scripting_context = malloc(sizeof(struct scripting_context)); | ||
597 | } | ||
598 | #endif | ||
diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h new file mode 100644 index 000000000000..8fe0d866fe1a --- /dev/null +++ b/tools/perf/util/trace-event-perl.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __PERF_TRACE_EVENT_PERL_H | ||
2 | #define __PERF_TRACE_EVENT_PERL_H | ||
3 | #ifdef NO_LIBPERL | ||
4 | typedef int INTERP; | ||
5 | #define dSP | ||
6 | #define ENTER | ||
7 | #define SAVETMPS | ||
8 | #define PUTBACK | ||
9 | #define SPAGAIN | ||
10 | #define FREETMPS | ||
11 | #define LEAVE | ||
12 | #define SP | ||
13 | #define ERRSV | ||
14 | #define G_SCALAR (0) | ||
15 | #define G_DISCARD (0) | ||
16 | #define G_NOARGS (0) | ||
17 | #define PUSHMARK(a) | ||
18 | #define SvTRUE(a) (0) | ||
19 | #define XPUSHs(s) | ||
20 | #define sv_2mortal(a) | ||
21 | #define newSVpv(a,b) | ||
22 | #define newSVuv(a) | ||
23 | #define newSViv(a) | ||
24 | #define get_cv(a,b) (0) | ||
25 | #define call_pv(a,b) (0) | ||
26 | #define perl_alloc() (0) | ||
27 | #define perl_construct(a) (0) | ||
28 | #define perl_parse(a,b,c,d,e) (0) | ||
29 | #define perl_run(a) (0) | ||
30 | #define perl_destruct(a) (0) | ||
31 | #define perl_free(a) (0) | ||
32 | #define pTHX void | ||
33 | #define CV void | ||
34 | #define dXSUB_SYS | ||
35 | #define pTHX_ | ||
36 | static inline void newXS(const char *a, void *b, const char *c) {} | ||
37 | #else | ||
38 | #include <EXTERN.h> | ||
39 | #include <perl.h> | ||
40 | typedef PerlInterpreter * INTERP; | ||
41 | #endif | ||
42 | |||
43 | struct scripting_context { | ||
44 | void *event_data; | ||
45 | }; | ||
46 | |||
47 | int common_pc(struct scripting_context *context); | ||
48 | int common_flags(struct scripting_context *context); | ||
49 | int common_lock_depth(struct scripting_context *context); | ||
50 | |||
51 | #endif /* __PERF_TRACE_EVENT_PERL_H */ | ||
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 1b5c847d2c22..342dfdd43f87 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c | |||
@@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu) | |||
458 | return data; | 458 | return data; |
459 | } | 459 | } |
460 | 460 | ||
461 | void trace_report(void) | 461 | void trace_report(int fd) |
462 | { | 462 | { |
463 | const char *input_file = "trace.info"; | ||
464 | char buf[BUFSIZ]; | 463 | char buf[BUFSIZ]; |
465 | char test[] = { 23, 8, 68 }; | 464 | char test[] = { 23, 8, 68 }; |
466 | char *version; | 465 | char *version; |
@@ -468,17 +467,15 @@ void trace_report(void) | |||
468 | int show_funcs = 0; | 467 | int show_funcs = 0; |
469 | int show_printk = 0; | 468 | int show_printk = 0; |
470 | 469 | ||
471 | input_fd = open(input_file, O_RDONLY); | 470 | input_fd = fd; |
472 | if (input_fd < 0) | ||
473 | die("opening '%s'\n", input_file); | ||
474 | 471 | ||
475 | read_or_die(buf, 3); | 472 | read_or_die(buf, 3); |
476 | if (memcmp(buf, test, 3) != 0) | 473 | if (memcmp(buf, test, 3) != 0) |
477 | die("not an trace data file"); | 474 | die("no trace data in the file"); |
478 | 475 | ||
479 | read_or_die(buf, 7); | 476 | read_or_die(buf, 7); |
480 | if (memcmp(buf, "tracing", 7) != 0) | 477 | if (memcmp(buf, "tracing", 7) != 0) |
481 | die("not a trace file (missing tracing)"); | 478 | die("not a trace file (missing 'tracing' tag)"); |
482 | 479 | ||
483 | version = read_string(); | 480 | version = read_string(); |
484 | if (show_version) | 481 | if (show_version) |
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 693f815c9429..81698d5e6503 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _TRACE_EVENTS_H | 1 | #ifndef __PERF_TRACE_EVENTS_H |
2 | #define _TRACE_EVENTS_H | 2 | #define __PERF_TRACE_EVENTS_H |
3 | 3 | ||
4 | #include "parse-events.h" | 4 | #include "parse-events.h" |
5 | 5 | ||
@@ -26,6 +26,11 @@ enum { | |||
26 | enum format_flags { | 26 | enum format_flags { |
27 | FIELD_IS_ARRAY = 1, | 27 | FIELD_IS_ARRAY = 1, |
28 | FIELD_IS_POINTER = 2, | 28 | FIELD_IS_POINTER = 2, |
29 | FIELD_IS_SIGNED = 4, | ||
30 | FIELD_IS_STRING = 8, | ||
31 | FIELD_IS_DYNAMIC = 16, | ||
32 | FIELD_IS_FLAG = 32, | ||
33 | FIELD_IS_SYMBOLIC = 64, | ||
29 | }; | 34 | }; |
30 | 35 | ||
31 | struct format_field { | 36 | struct format_field { |
@@ -132,15 +137,18 @@ struct event { | |||
132 | int flags; | 137 | int flags; |
133 | struct format format; | 138 | struct format format; |
134 | struct print_fmt print_fmt; | 139 | struct print_fmt print_fmt; |
140 | char *system; | ||
135 | }; | 141 | }; |
136 | 142 | ||
137 | enum { | 143 | enum { |
138 | EVENT_FL_ISFTRACE = 1, | 144 | EVENT_FL_ISFTRACE = 0x01, |
139 | EVENT_FL_ISPRINT = 2, | 145 | EVENT_FL_ISPRINT = 0x02, |
140 | EVENT_FL_ISBPRINT = 4, | 146 | EVENT_FL_ISBPRINT = 0x04, |
141 | EVENT_FL_ISFUNC = 8, | 147 | EVENT_FL_ISFUNC = 0x08, |
142 | EVENT_FL_ISFUNCENT = 16, | 148 | EVENT_FL_ISFUNCENT = 0x10, |
143 | EVENT_FL_ISFUNCRET = 32, | 149 | EVENT_FL_ISFUNCRET = 0x20, |
150 | |||
151 | EVENT_FL_FAILED = 0x80000000 | ||
144 | }; | 152 | }; |
145 | 153 | ||
146 | struct record { | 154 | struct record { |
@@ -154,7 +162,7 @@ struct record *trace_read_data(int cpu); | |||
154 | 162 | ||
155 | void parse_set_info(int nr_cpus, int long_sz); | 163 | void parse_set_info(int nr_cpus, int long_sz); |
156 | 164 | ||
157 | void trace_report(void); | 165 | void trace_report(int fd); |
158 | 166 | ||
159 | void *malloc_or_die(unsigned int size); | 167 | void *malloc_or_die(unsigned int size); |
160 | 168 | ||
@@ -166,7 +174,7 @@ void print_funcs(void); | |||
166 | void print_printk(void); | 174 | void print_printk(void); |
167 | 175 | ||
168 | int parse_ftrace_file(char *buf, unsigned long size); | 176 | int parse_ftrace_file(char *buf, unsigned long size); |
169 | int parse_event_file(char *buf, unsigned long size, char *system); | 177 | int parse_event_file(char *buf, unsigned long size, char *sys); |
170 | void print_event(int cpu, void *data, int size, unsigned long long nsecs, | 178 | void print_event(int cpu, void *data, int size, unsigned long long nsecs, |
171 | char *comm); | 179 | char *comm); |
172 | 180 | ||
@@ -233,13 +241,45 @@ extern int header_page_size_size; | |||
233 | extern int header_page_data_offset; | 241 | extern int header_page_data_offset; |
234 | extern int header_page_data_size; | 242 | extern int header_page_data_size; |
235 | 243 | ||
244 | extern int latency_format; | ||
245 | |||
236 | int parse_header_page(char *buf, unsigned long size); | 246 | int parse_header_page(char *buf, unsigned long size); |
237 | int trace_parse_common_type(void *data); | 247 | int trace_parse_common_type(void *data); |
248 | int trace_parse_common_pid(void *data); | ||
249 | int parse_common_pc(void *data); | ||
250 | int parse_common_flags(void *data); | ||
251 | int parse_common_lock_depth(void *data); | ||
238 | struct event *trace_find_event(int id); | 252 | struct event *trace_find_event(int id); |
253 | struct event *trace_find_next_event(struct event *event); | ||
254 | unsigned long long read_size(void *ptr, int size); | ||
239 | unsigned long long | 255 | unsigned long long |
240 | raw_field_value(struct event *event, const char *name, void *data); | 256 | raw_field_value(struct event *event, const char *name, void *data); |
241 | void *raw_field_ptr(struct event *event, const char *name, void *data); | 257 | void *raw_field_ptr(struct event *event, const char *name, void *data); |
258 | unsigned long long eval_flag(const char *flag); | ||
259 | |||
260 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); | ||
261 | |||
262 | /* taken from kernel/trace/trace.h */ | ||
263 | enum trace_flag_type { | ||
264 | TRACE_FLAG_IRQS_OFF = 0x01, | ||
265 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, | ||
266 | TRACE_FLAG_NEED_RESCHED = 0x04, | ||
267 | TRACE_FLAG_HARDIRQ = 0x08, | ||
268 | TRACE_FLAG_SOFTIRQ = 0x10, | ||
269 | }; | ||
270 | |||
271 | struct scripting_ops { | ||
272 | const char *name; | ||
273 | int (*start_script) (const char *); | ||
274 | int (*stop_script) (void); | ||
275 | void (*process_event) (int cpu, void *data, int size, | ||
276 | unsigned long long nsecs, char *comm); | ||
277 | int (*generate_script) (const char *outfile); | ||
278 | }; | ||
279 | |||
280 | int script_spec_register(const char *spec, struct scripting_ops *ops); | ||
242 | 281 | ||
243 | void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); | 282 | extern struct scripting_ops perl_scripting_ops; |
283 | void setup_perl_scripting(void); | ||
244 | 284 | ||
245 | #endif /* _TRACE_EVENTS_H */ | 285 | #endif /* __PERF_TRACE_EVENTS_H */ |
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h index 5e75f9005940..7d6b8331f898 100644 --- a/tools/perf/util/types.h +++ b/tools/perf/util/types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _PERF_TYPES_H | 1 | #ifndef __PERF_TYPES_H |
2 | #define _PERF_TYPES_H | 2 | #define __PERF_TYPES_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * We define u64 as unsigned long long for every architecture | 5 | * We define u64 as unsigned long long for every architecture |
@@ -14,4 +14,4 @@ typedef signed short s16; | |||
14 | typedef unsigned char u8; | 14 | typedef unsigned char u8; |
15 | typedef signed char s8; | 15 | typedef signed char s8; |
16 | 16 | ||
17 | #endif /* _PERF_TYPES_H */ | 17 | #endif /* __PERF_TYPES_H */ |
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 9de2329dd44d..c673d8825883 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -84,6 +84,9 @@ | |||
84 | #include <iconv.h> | 84 | #include <iconv.h> |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | extern const char *graph_line; | ||
88 | extern const char *graph_dotted_line; | ||
89 | |||
87 | /* On most systems <limits.h> would have given us this, but | 90 | /* On most systems <limits.h> would have given us this, but |
88 | * not on some systems (e.g. GNU/Hurd). | 91 | * not on some systems (e.g. GNU/Hurd). |
89 | */ | 92 | */ |
@@ -134,6 +137,15 @@ extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, | |||
134 | extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); | 137 | extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); |
135 | extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); | 138 | extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); |
136 | 139 | ||
140 | #include "../../../include/linux/stringify.h" | ||
141 | |||
142 | #define DIE_IF(cnd) \ | ||
143 | do { if (cnd) \ | ||
144 | die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \ | ||
145 | __stringify(cnd) "\n"); \ | ||
146 | } while (0) | ||
147 | |||
148 | |||
137 | extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); | 149 | extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); |
138 | 150 | ||
139 | extern int prefixcmp(const char *str, const char *prefix); | 151 | extern int prefixcmp(const char *str, const char *prefix); |
@@ -278,17 +290,15 @@ static inline char *gitstrchrnul(const char *s, int c) | |||
278 | * Wrappers: | 290 | * Wrappers: |
279 | */ | 291 | */ |
280 | extern char *xstrdup(const char *str); | 292 | extern char *xstrdup(const char *str); |
281 | extern void *xmalloc(size_t size); | 293 | extern void *xmalloc(size_t size) __attribute__((weak)); |
282 | extern void *xmemdupz(const void *data, size_t len); | 294 | extern void *xmemdupz(const void *data, size_t len); |
283 | extern char *xstrndup(const char *str, size_t len); | 295 | extern char *xstrndup(const char *str, size_t len); |
284 | extern void *xrealloc(void *ptr, size_t size); | 296 | extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); |
285 | extern void *xcalloc(size_t nmemb, size_t size); | 297 | |
286 | extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); | 298 | static inline void *zalloc(size_t size) |
287 | extern ssize_t xread(int fd, void *buf, size_t len); | 299 | { |
288 | extern ssize_t xwrite(int fd, const void *buf, size_t len); | 300 | return calloc(1, size); |
289 | extern int xdup(int fd); | 301 | } |
290 | extern FILE *xfdopen(int fd, const char *mode); | ||
291 | extern int xmkstemp(char *template); | ||
292 | 302 | ||
293 | static inline size_t xsize_t(off_t len) | 303 | static inline size_t xsize_t(off_t len) |
294 | { | 304 | { |
@@ -306,6 +316,7 @@ static inline int has_extension(const char *filename, const char *ext) | |||
306 | #undef isascii | 316 | #undef isascii |
307 | #undef isspace | 317 | #undef isspace |
308 | #undef isdigit | 318 | #undef isdigit |
319 | #undef isxdigit | ||
309 | #undef isalpha | 320 | #undef isalpha |
310 | #undef isprint | 321 | #undef isprint |
311 | #undef isalnum | 322 | #undef isalnum |
@@ -323,6 +334,8 @@ extern unsigned char sane_ctype[256]; | |||
323 | #define isascii(x) (((x) & ~0x7f) == 0) | 334 | #define isascii(x) (((x) & ~0x7f) == 0) |
324 | #define isspace(x) sane_istest(x,GIT_SPACE) | 335 | #define isspace(x) sane_istest(x,GIT_SPACE) |
325 | #define isdigit(x) sane_istest(x,GIT_DIGIT) | 336 | #define isdigit(x) sane_istest(x,GIT_DIGIT) |
337 | #define isxdigit(x) \ | ||
338 | (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G') | ||
326 | #define isalpha(x) sane_istest(x,GIT_ALPHA) | 339 | #define isalpha(x) sane_istest(x,GIT_ALPHA) |
327 | #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) | 340 | #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) |
328 | #define isprint(x) sane_istest(x,GIT_PRINT) | 341 | #define isprint(x) sane_istest(x,GIT_PRINT) |
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index cadf8cf2a590..2fa967e1a88a 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _PERF_VALUES_H | 1 | #ifndef __PERF_VALUES_H |
2 | #define _PERF_VALUES_H | 2 | #define __PERF_VALUES_H |
3 | 3 | ||
4 | #include "types.h" | 4 | #include "types.h" |
5 | 5 | ||
@@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values, | |||
24 | void perf_read_values_display(FILE *fp, struct perf_read_values *values, | 24 | void perf_read_values_display(FILE *fp, struct perf_read_values *values, |
25 | int raw); | 25 | int raw); |
26 | 26 | ||
27 | #endif /* _PERF_VALUES_H */ | 27 | #endif /* __PERF_VALUES_H */ |
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c index 4574ac28396f..bf44ca85d23b 100644 --- a/tools/perf/util/wrapper.c +++ b/tools/perf/util/wrapper.c | |||
@@ -79,43 +79,12 @@ void *xrealloc(void *ptr, size_t size) | |||
79 | return ret; | 79 | return ret; |
80 | } | 80 | } |
81 | 81 | ||
82 | void *xcalloc(size_t nmemb, size_t size) | ||
83 | { | ||
84 | void *ret = calloc(nmemb, size); | ||
85 | if (!ret && (!nmemb || !size)) | ||
86 | ret = calloc(1, 1); | ||
87 | if (!ret) { | ||
88 | release_pack_memory(nmemb * size, -1); | ||
89 | ret = calloc(nmemb, size); | ||
90 | if (!ret && (!nmemb || !size)) | ||
91 | ret = calloc(1, 1); | ||
92 | if (!ret) | ||
93 | die("Out of memory, calloc failed"); | ||
94 | } | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | void *xmmap(void *start, size_t length, | ||
99 | int prot, int flags, int fd, off_t offset) | ||
100 | { | ||
101 | void *ret = mmap(start, length, prot, flags, fd, offset); | ||
102 | if (ret == MAP_FAILED) { | ||
103 | if (!length) | ||
104 | return NULL; | ||
105 | release_pack_memory(length, fd); | ||
106 | ret = mmap(start, length, prot, flags, fd, offset); | ||
107 | if (ret == MAP_FAILED) | ||
108 | die("Out of memory? mmap failed: %s", strerror(errno)); | ||
109 | } | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | /* | 82 | /* |
114 | * xread() is the same a read(), but it automatically restarts read() | 83 | * xread() is the same a read(), but it automatically restarts read() |
115 | * operations with a recoverable error (EAGAIN and EINTR). xread() | 84 | * operations with a recoverable error (EAGAIN and EINTR). xread() |
116 | * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. | 85 | * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. |
117 | */ | 86 | */ |
118 | ssize_t xread(int fd, void *buf, size_t len) | 87 | static ssize_t xread(int fd, void *buf, size_t len) |
119 | { | 88 | { |
120 | ssize_t nr; | 89 | ssize_t nr; |
121 | while (1) { | 90 | while (1) { |
@@ -131,7 +100,7 @@ ssize_t xread(int fd, void *buf, size_t len) | |||
131 | * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT | 100 | * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT |
132 | * GUARANTEE that "len" bytes is written even if the operation is successful. | 101 | * GUARANTEE that "len" bytes is written even if the operation is successful. |
133 | */ | 102 | */ |
134 | ssize_t xwrite(int fd, const void *buf, size_t len) | 103 | static ssize_t xwrite(int fd, const void *buf, size_t len) |
135 | { | 104 | { |
136 | ssize_t nr; | 105 | ssize_t nr; |
137 | while (1) { | 106 | while (1) { |
@@ -179,29 +148,3 @@ ssize_t write_in_full(int fd, const void *buf, size_t count) | |||
179 | 148 | ||
180 | return total; | 149 | return total; |
181 | } | 150 | } |
182 | |||
183 | int xdup(int fd) | ||
184 | { | ||
185 | int ret = dup(fd); | ||
186 | if (ret < 0) | ||
187 | die("dup failed: %s", strerror(errno)); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | FILE *xfdopen(int fd, const char *mode) | ||
192 | { | ||
193 | FILE *stream = fdopen(fd, mode); | ||
194 | if (stream == NULL) | ||
195 | die("Out of memory? fdopen failed: %s", strerror(errno)); | ||
196 | return stream; | ||
197 | } | ||
198 | |||
199 | int xmkstemp(char *template) | ||
200 | { | ||
201 | int fd; | ||
202 | |||
203 | fd = mkstemp(template); | ||
204 | if (fd < 0) | ||
205 | die("Unable to create temporary file: %s", strerror(errno)); | ||
206 | return fd; | ||
207 | } | ||