diff options
94 files changed, 3263 insertions, 2006 deletions
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt index 769f925c8526..87f4d052e39c 100644 --- a/Documentation/ioctl-number.txt +++ b/Documentation/ioctl-number.txt | |||
@@ -130,8 +130,6 @@ Code Seq# Include File Comments | |||
130 | <mailto:zapman@interlan.net> | 130 | <mailto:zapman@interlan.net> |
131 | 'i' 00-3F linux/i2o.h | 131 | 'i' 00-3F linux/i2o.h |
132 | 'j' 00-3F linux/joystick.h | 132 | 'j' 00-3F linux/joystick.h |
133 | 'k' all asm-sparc/kbio.h | ||
134 | asm-sparc64/kbio.h | ||
135 | 'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system | 133 | 'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system |
136 | <http://mikonos.dia.unisa.it/tcfs> | 134 | <http://mikonos.dia.unisa.it/tcfs> |
137 | 'l' 40-7F linux/udf_fs_i.h in development: | 135 | 'l' 40-7F linux/udf_fs_i.h in development: |
diff --git a/Documentation/sharedsubtree.txt b/Documentation/sharedsubtree.txt new file mode 100644 index 000000000000..2d8f403eb6eb --- /dev/null +++ b/Documentation/sharedsubtree.txt | |||
@@ -0,0 +1,1060 @@ | |||
1 | Shared Subtrees | ||
2 | --------------- | ||
3 | |||
4 | Contents: | ||
5 | 1) Overview | ||
6 | 2) Features | ||
7 | 3) smount command | ||
8 | 4) Use-case | ||
9 | 5) Detailed semantics | ||
10 | 6) Quiz | ||
11 | 7) FAQ | ||
12 | 8) Implementation | ||
13 | |||
14 | |||
15 | 1) Overview | ||
16 | ----------- | ||
17 | |||
18 | Consider the following situation: | ||
19 | |||
20 | A process wants to clone its own namespace, but still wants to access the CD | ||
21 | that got mounted recently. Shared subtree semantics provide the necessary | ||
22 | mechanism to accomplish the above. | ||
23 | |||
24 | It provides the necessary building blocks for features like per-user-namespace | ||
25 | and versioned filesystem. | ||
26 | |||
27 | 2) Features | ||
28 | ----------- | ||
29 | |||
30 | Shared subtree provides four different flavors of mounts; struct vfsmount to be | ||
31 | precise | ||
32 | |||
33 | a. shared mount | ||
34 | b. slave mount | ||
35 | c. private mount | ||
36 | d. unbindable mount | ||
37 | |||
38 | |||
39 | 2a) A shared mount can be replicated to as many mountpoints and all the | ||
40 | replicas continue to be exactly same. | ||
41 | |||
42 | Here is an example: | ||
43 | |||
44 | Lets say /mnt has a mount that is shared. | ||
45 | mount --make-shared /mnt | ||
46 | |||
47 | note: mount command does not yet support the --make-shared flag. | ||
48 | I have included a small C program which does the same by executing | ||
49 | 'smount /mnt shared' | ||
50 | |||
51 | #mount --bind /mnt /tmp | ||
52 | The above command replicates the mount at /mnt to the mountpoint /tmp | ||
53 | and the contents of both the mounts remain identical. | ||
54 | |||
55 | #ls /mnt | ||
56 | a b c | ||
57 | |||
58 | #ls /tmp | ||
59 | a b c | ||
60 | |||
61 | Now lets say we mount a device at /tmp/a | ||
62 | #mount /dev/sd0 /tmp/a | ||
63 | |||
64 | #ls /tmp/a | ||
65 | t1 t2 t2 | ||
66 | |||
67 | #ls /mnt/a | ||
68 | t1 t2 t2 | ||
69 | |||
70 | Note that the mount has propagated to the mount at /mnt as well. | ||
71 | |||
72 | And the same is true even when /dev/sd0 is mounted on /mnt/a. The | ||
73 | contents will be visible under /tmp/a too. | ||
74 | |||
75 | |||
76 | 2b) A slave mount is like a shared mount except that mount and umount events | ||
77 | only propagate towards it. | ||
78 | |||
79 | All slave mounts have a master mount which is a shared. | ||
80 | |||
81 | Here is an example: | ||
82 | |||
83 | Lets say /mnt has a mount which is shared. | ||
84 | #mount --make-shared /mnt | ||
85 | |||
86 | Lets bind mount /mnt to /tmp | ||
87 | #mount --bind /mnt /tmp | ||
88 | |||
89 | the new mount at /tmp becomes a shared mount and it is a replica of | ||
90 | the mount at /mnt. | ||
91 | |||
92 | Now lets make the mount at /tmp; a slave of /mnt | ||
93 | #mount --make-slave /tmp | ||
94 | [or smount /tmp slave] | ||
95 | |||
96 | lets mount /dev/sd0 on /mnt/a | ||
97 | #mount /dev/sd0 /mnt/a | ||
98 | |||
99 | #ls /mnt/a | ||
100 | t1 t2 t3 | ||
101 | |||
102 | #ls /tmp/a | ||
103 | t1 t2 t3 | ||
104 | |||
105 | Note the mount event has propagated to the mount at /tmp | ||
106 | |||
107 | However lets see what happens if we mount something on the mount at /tmp | ||
108 | |||
109 | #mount /dev/sd1 /tmp/b | ||
110 | |||
111 | #ls /tmp/b | ||
112 | s1 s2 s3 | ||
113 | |||
114 | #ls /mnt/b | ||
115 | |||
116 | Note how the mount event has not propagated to the mount at | ||
117 | /mnt | ||
118 | |||
119 | |||
120 | 2c) A private mount does not forward or receive propagation. | ||
121 | |||
122 | This is the mount we are familiar with. Its the default type. | ||
123 | |||
124 | |||
125 | 2d) A unbindable mount is a unbindable private mount | ||
126 | |||
127 | lets say we have a mount at /mnt and we make is unbindable | ||
128 | |||
129 | #mount --make-unbindable /mnt | ||
130 | [ smount /mnt unbindable ] | ||
131 | |||
132 | Lets try to bind mount this mount somewhere else. | ||
133 | # mount --bind /mnt /tmp | ||
134 | mount: wrong fs type, bad option, bad superblock on /mnt, | ||
135 | or too many mounted file systems | ||
136 | |||
137 | Binding a unbindable mount is a invalid operation. | ||
138 | |||
139 | |||
140 | 3) smount command | ||
141 | |||
142 | Currently the mount command is not aware of shared subtree features. | ||
143 | Work is in progress to add the support in mount ( util-linux package ). | ||
144 | Till then use the following program. | ||
145 | |||
146 | ------------------------------------------------------------------------ | ||
147 | // | ||
148 | //this code was developed my Miklos Szeredi <miklos@szeredi.hu> | ||
149 | //and modified by Ram Pai <linuxram@us.ibm.com> | ||
150 | // sample usage: | ||
151 | // smount /tmp shared | ||
152 | // | ||
153 | #include <stdio.h> | ||
154 | #include <stdlib.h> | ||
155 | #include <unistd.h> | ||
156 | #include <sys/mount.h> | ||
157 | #include <sys/fsuid.h> | ||
158 | |||
159 | #ifndef MS_REC | ||
160 | #define MS_REC 0x4000 /* 16384: Recursive loopback */ | ||
161 | #endif | ||
162 | |||
163 | #ifndef MS_SHARED | ||
164 | #define MS_SHARED 1<<20 /* Shared */ | ||
165 | #endif | ||
166 | |||
167 | #ifndef MS_PRIVATE | ||
168 | #define MS_PRIVATE 1<<18 /* Private */ | ||
169 | #endif | ||
170 | |||
171 | #ifndef MS_SLAVE | ||
172 | #define MS_SLAVE 1<<19 /* Slave */ | ||
173 | #endif | ||
174 | |||
175 | #ifndef MS_UNBINDABLE | ||
176 | #define MS_UNBINDABLE 1<<17 /* Unbindable */ | ||
177 | #endif | ||
178 | |||
179 | int main(int argc, char *argv[]) | ||
180 | { | ||
181 | int type; | ||
182 | if(argc != 3) { | ||
183 | fprintf(stderr, "usage: %s dir " | ||
184 | "<rshared|rslave|rprivate|runbindable|shared|slave" | ||
185 | "|private|unbindable>\n" , argv[0]); | ||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | fprintf(stdout, "%s %s %s\n", argv[0], argv[1], argv[2]); | ||
190 | |||
191 | if (strcmp(argv[2],"rshared")==0) | ||
192 | type=(MS_SHARED|MS_REC); | ||
193 | else if (strcmp(argv[2],"rslave")==0) | ||
194 | type=(MS_SLAVE|MS_REC); | ||
195 | else if (strcmp(argv[2],"rprivate")==0) | ||
196 | type=(MS_PRIVATE|MS_REC); | ||
197 | else if (strcmp(argv[2],"runbindable")==0) | ||
198 | type=(MS_UNBINDABLE|MS_REC); | ||
199 | else if (strcmp(argv[2],"shared")==0) | ||
200 | type=MS_SHARED; | ||
201 | else if (strcmp(argv[2],"slave")==0) | ||
202 | type=MS_SLAVE; | ||
203 | else if (strcmp(argv[2],"private")==0) | ||
204 | type=MS_PRIVATE; | ||
205 | else if (strcmp(argv[2],"unbindable")==0) | ||
206 | type=MS_UNBINDABLE; | ||
207 | else { | ||
208 | fprintf(stderr, "invalid operation: %s\n", argv[2]); | ||
209 | return 1; | ||
210 | } | ||
211 | setfsuid(getuid()); | ||
212 | |||
213 | if(mount("", argv[1], "dontcare", type, "") == -1) { | ||
214 | perror("mount"); | ||
215 | return 1; | ||
216 | } | ||
217 | return 0; | ||
218 | } | ||
219 | ----------------------------------------------------------------------- | ||
220 | |||
221 | Copy the above code snippet into smount.c | ||
222 | gcc -o smount smount.c | ||
223 | |||
224 | |||
225 | (i) To mark all the mounts under /mnt as shared execute the following | ||
226 | command: | ||
227 | |||
228 | smount /mnt rshared | ||
229 | the corresponding syntax planned for mount command is | ||
230 | mount --make-rshared /mnt | ||
231 | |||
232 | just to mark a mount /mnt as shared, execute the following | ||
233 | command: | ||
234 | smount /mnt shared | ||
235 | the corresponding syntax planned for mount command is | ||
236 | mount --make-shared /mnt | ||
237 | |||
238 | (ii) To mark all the shared mounts under /mnt as slave execute the | ||
239 | following | ||
240 | |||
241 | command: | ||
242 | smount /mnt rslave | ||
243 | the corresponding syntax planned for mount command is | ||
244 | mount --make-rslave /mnt | ||
245 | |||
246 | just to mark a mount /mnt as slave, execute the following | ||
247 | command: | ||
248 | smount /mnt slave | ||
249 | the corresponding syntax planned for mount command is | ||
250 | mount --make-slave /mnt | ||
251 | |||
252 | (iii) To mark all the mounts under /mnt as private execute the | ||
253 | following command: | ||
254 | |||
255 | smount /mnt rprivate | ||
256 | the corresponding syntax planned for mount command is | ||
257 | mount --make-rprivate /mnt | ||
258 | |||
259 | just to mark a mount /mnt as private, execute the following | ||
260 | command: | ||
261 | smount /mnt private | ||
262 | the corresponding syntax planned for mount command is | ||
263 | mount --make-private /mnt | ||
264 | |||
265 | NOTE: by default all the mounts are created as private. But if | ||
266 | you want to change some shared/slave/unbindable mount as | ||
267 | private at a later point in time, this command can help. | ||
268 | |||
269 | (iv) To mark all the mounts under /mnt as unbindable execute the | ||
270 | following | ||
271 | |||
272 | command: | ||
273 | smount /mnt runbindable | ||
274 | the corresponding syntax planned for mount command is | ||
275 | mount --make-runbindable /mnt | ||
276 | |||
277 | just to mark a mount /mnt as unbindable, execute the following | ||
278 | command: | ||
279 | smount /mnt unbindable | ||
280 | the corresponding syntax planned for mount command is | ||
281 | mount --make-unbindable /mnt | ||
282 | |||
283 | |||
284 | 4) Use cases | ||
285 | ------------ | ||
286 | |||
287 | A) A process wants to clone its own namespace, but still wants to | ||
288 | access the CD that got mounted recently. | ||
289 | |||
290 | Solution: | ||
291 | |||
292 | The system administrator can make the mount at /cdrom shared | ||
293 | mount --bind /cdrom /cdrom | ||
294 | mount --make-shared /cdrom | ||
295 | |||
296 | Now any process that clones off a new namespace will have a | ||
297 | mount at /cdrom which is a replica of the same mount in the | ||
298 | parent namespace. | ||
299 | |||
300 | So when a CD is inserted and mounted at /cdrom that mount gets | ||
301 | propagated to the other mount at /cdrom in all the other clone | ||
302 | namespaces. | ||
303 | |||
304 | B) A process wants its mounts invisible to any other process, but | ||
305 | still be able to see the other system mounts. | ||
306 | |||
307 | Solution: | ||
308 | |||
309 | To begin with, the administrator can mark the entire mount tree | ||
310 | as shareable. | ||
311 | |||
312 | mount --make-rshared / | ||
313 | |||
314 | A new process can clone off a new namespace. And mark some part | ||
315 | of its namespace as slave | ||
316 | |||
317 | mount --make-rslave /myprivatetree | ||
318 | |||
319 | Hence forth any mounts within the /myprivatetree done by the | ||
320 | process will not show up in any other namespace. However mounts | ||
321 | done in the parent namespace under /myprivatetree still shows | ||
322 | up in the process's namespace. | ||
323 | |||
324 | |||
325 | Apart from the above semantics this feature provides the | ||
326 | building blocks to solve the following problems: | ||
327 | |||
328 | C) Per-user namespace | ||
329 | |||
330 | The above semantics allows a way to share mounts across | ||
331 | namespaces. But namespaces are associated with processes. If | ||
332 | namespaces are made first class objects with user API to | ||
333 | associate/disassociate a namespace with userid, then each user | ||
334 | could have his/her own namespace and tailor it to his/her | ||
335 | requirements. Offcourse its needs support from PAM. | ||
336 | |||
337 | D) Versioned files | ||
338 | |||
339 | If the entire mount tree is visible at multiple locations, then | ||
340 | a underlying versioning file system can return different | ||
341 | version of the file depending on the path used to access that | ||
342 | file. | ||
343 | |||
344 | An example is: | ||
345 | |||
346 | mount --make-shared / | ||
347 | mount --rbind / /view/v1 | ||
348 | mount --rbind / /view/v2 | ||
349 | mount --rbind / /view/v3 | ||
350 | mount --rbind / /view/v4 | ||
351 | |||
352 | and if /usr has a versioning filesystem mounted, than that | ||
353 | mount appears at /view/v1/usr, /view/v2/usr, /view/v3/usr and | ||
354 | /view/v4/usr too | ||
355 | |||
356 | A user can request v3 version of the file /usr/fs/namespace.c | ||
357 | by accessing /view/v3/usr/fs/namespace.c . The underlying | ||
358 | versioning filesystem can then decipher that v3 version of the | ||
359 | filesystem is being requested and return the corresponding | ||
360 | inode. | ||
361 | |||
362 | 5) Detailed semantics: | ||
363 | ------------------- | ||
364 | The section below explains the detailed semantics of | ||
365 | bind, rbind, move, mount, umount and clone-namespace operations. | ||
366 | |||
367 | Note: the word 'vfsmount' and the noun 'mount' have been used | ||
368 | to mean the same thing, throughout this document. | ||
369 | |||
370 | 5a) Mount states | ||
371 | |||
372 | A given mount can be in one of the following states | ||
373 | 1) shared | ||
374 | 2) slave | ||
375 | 3) shared and slave | ||
376 | 4) private | ||
377 | 5) unbindable | ||
378 | |||
379 | A 'propagation event' is defined as event generated on a vfsmount | ||
380 | that leads to mount or unmount actions in other vfsmounts. | ||
381 | |||
382 | A 'peer group' is defined as a group of vfsmounts that propagate | ||
383 | events to each other. | ||
384 | |||
385 | (1) Shared mounts | ||
386 | |||
387 | A 'shared mount' is defined as a vfsmount that belongs to a | ||
388 | 'peer group'. | ||
389 | |||
390 | For example: | ||
391 | mount --make-shared /mnt | ||
392 | mount --bin /mnt /tmp | ||
393 | |||
394 | The mount at /mnt and that at /tmp are both shared and belong | ||
395 | to the same peer group. Anything mounted or unmounted under | ||
396 | /mnt or /tmp reflect in all the other mounts of its peer | ||
397 | group. | ||
398 | |||
399 | |||
400 | (2) Slave mounts | ||
401 | |||
402 | A 'slave mount' is defined as a vfsmount that receives | ||
403 | propagation events and does not forward propagation events. | ||
404 | |||
405 | A slave mount as the name implies has a master mount from which | ||
406 | mount/unmount events are received. Events do not propagate from | ||
407 | the slave mount to the master. Only a shared mount can be made | ||
408 | a slave by executing the following command | ||
409 | |||
410 | mount --make-slave mount | ||
411 | |||
412 | A shared mount that is made as a slave is no more shared unless | ||
413 | modified to become shared. | ||
414 | |||
415 | (3) Shared and Slave | ||
416 | |||
417 | A vfsmount can be both shared as well as slave. This state | ||
418 | indicates that the mount is a slave of some vfsmount, and | ||
419 | has its own peer group too. This vfsmount receives propagation | ||
420 | events from its master vfsmount, and also forwards propagation | ||
421 | events to its 'peer group' and to its slave vfsmounts. | ||
422 | |||
423 | Strictly speaking, the vfsmount is shared having its own | ||
424 | peer group, and this peer-group is a slave of some other | ||
425 | peer group. | ||
426 | |||
427 | Only a slave vfsmount can be made as 'shared and slave' by | ||
428 | either executing the following command | ||
429 | mount --make-shared mount | ||
430 | or by moving the slave vfsmount under a shared vfsmount. | ||
431 | |||
432 | (4) Private mount | ||
433 | |||
434 | A 'private mount' is defined as vfsmount that does not | ||
435 | receive or forward any propagation events. | ||
436 | |||
437 | (5) Unbindable mount | ||
438 | |||
439 | A 'unbindable mount' is defined as vfsmount that does not | ||
440 | receive or forward any propagation events and cannot | ||
441 | be bind mounted. | ||
442 | |||
443 | |||
444 | State diagram: | ||
445 | The state diagram below explains the state transition of a mount, | ||
446 | in response to various commands. | ||
447 | ------------------------------------------------------------------------ | ||
448 | | |make-shared | make-slave | make-private |make-unbindab| | ||
449 | --------------|------------|--------------|--------------|-------------| | ||
450 | |shared |shared |*slave/private| private | unbindable | | ||
451 | | | | | | | | ||
452 | |-------------|------------|--------------|--------------|-------------| | ||
453 | |slave |shared | **slave | private | unbindable | | ||
454 | | |and slave | | | | | ||
455 | |-------------|------------|--------------|--------------|-------------| | ||
456 | |shared |shared | slave | private | unbindable | | ||
457 | |and slave |and slave | | | | | ||
458 | |-------------|------------|--------------|--------------|-------------| | ||
459 | |private |shared | **private | private | unbindable | | ||
460 | |-------------|------------|--------------|--------------|-------------| | ||
461 | |unbindable |shared |**unbindable | private | unbindable | | ||
462 | ------------------------------------------------------------------------ | ||
463 | |||
464 | * if the shared mount is the only mount in its peer group, making it | ||
465 | slave, makes it private automatically. Note that there is no master to | ||
466 | which it can be slaved to. | ||
467 | |||
468 | ** slaving a non-shared mount has no effect on the mount. | ||
469 | |||
470 | Apart from the commands listed below, the 'move' operation also changes | ||
471 | the state of a mount depending on type of the destination mount. Its | ||
472 | explained in section 5d. | ||
473 | |||
474 | 5b) Bind semantics | ||
475 | |||
476 | Consider the following command | ||
477 | |||
478 | mount --bind A/a B/b | ||
479 | |||
480 | where 'A' is the source mount, 'a' is the dentry in the mount 'A', 'B' | ||
481 | is the destination mount and 'b' is the dentry in the destination mount. | ||
482 | |||
483 | The outcome depends on the type of mount of 'A' and 'B'. The table | ||
484 | below contains quick reference. | ||
485 | --------------------------------------------------------------------------- | ||
486 | | BIND MOUNT OPERATION | | ||
487 | |************************************************************************** | ||
488 | |source(A)->| shared | private | slave | unbindable | | ||
489 | | dest(B) | | | | | | ||
490 | | | | | | | | | ||
491 | | v | | | | | | ||
492 | |************************************************************************** | ||
493 | | shared | shared | shared | shared & slave | invalid | | ||
494 | | | | | | | | ||
495 | |non-shared| shared | private | slave | invalid | | ||
496 | *************************************************************************** | ||
497 | |||
498 | Details: | ||
499 | |||
500 | 1. 'A' is a shared mount and 'B' is a shared mount. A new mount 'C' | ||
501 | which is clone of 'A', is created. Its root dentry is 'a' . 'C' is | ||
502 | mounted on mount 'B' at dentry 'b'. Also new mount 'C1', 'C2', 'C3' ... | ||
503 | are created and mounted at the dentry 'b' on all mounts where 'B' | ||
504 | propagates to. A new propagation tree containing 'C1',..,'Cn' is | ||
505 | created. This propagation tree is identical to the propagation tree of | ||
506 | 'B'. And finally the peer-group of 'C' is merged with the peer group | ||
507 | of 'A'. | ||
508 | |||
509 | 2. 'A' is a private mount and 'B' is a shared mount. A new mount 'C' | ||
510 | which is clone of 'A', is created. Its root dentry is 'a'. 'C' is | ||
511 | mounted on mount 'B' at dentry 'b'. Also new mount 'C1', 'C2', 'C3' ... | ||
512 | are created and mounted at the dentry 'b' on all mounts where 'B' | ||
513 | propagates to. A new propagation tree is set containing all new mounts | ||
514 | 'C', 'C1', .., 'Cn' with exactly the same configuration as the | ||
515 | propagation tree for 'B'. | ||
516 | |||
517 | 3. 'A' is a slave mount of mount 'Z' and 'B' is a shared mount. A new | ||
518 | mount 'C' which is clone of 'A', is created. Its root dentry is 'a' . | ||
519 | 'C' is mounted on mount 'B' at dentry 'b'. Also new mounts 'C1', 'C2', | ||
520 | 'C3' ... are created and mounted at the dentry 'b' on all mounts where | ||
521 | 'B' propagates to. A new propagation tree containing the new mounts | ||
522 | 'C','C1',.. 'Cn' is created. This propagation tree is identical to the | ||
523 | propagation tree for 'B'. And finally the mount 'C' and its peer group | ||
524 | is made the slave of mount 'Z'. In other words, mount 'C' is in the | ||
525 | state 'slave and shared'. | ||
526 | |||
527 | 4. 'A' is a unbindable mount and 'B' is a shared mount. This is a | ||
528 | invalid operation. | ||
529 | |||
530 | 5. 'A' is a private mount and 'B' is a non-shared(private or slave or | ||
531 | unbindable) mount. A new mount 'C' which is clone of 'A', is created. | ||
532 | Its root dentry is 'a'. 'C' is mounted on mount 'B' at dentry 'b'. | ||
533 | |||
534 | 6. 'A' is a shared mount and 'B' is a non-shared mount. A new mount 'C' | ||
535 | which is a clone of 'A' is created. Its root dentry is 'a'. 'C' is | ||
536 | mounted on mount 'B' at dentry 'b'. 'C' is made a member of the | ||
537 | peer-group of 'A'. | ||
538 | |||
539 | 7. 'A' is a slave mount of mount 'Z' and 'B' is a non-shared mount. A | ||
540 | new mount 'C' which is a clone of 'A' is created. Its root dentry is | ||
541 | 'a'. 'C' is mounted on mount 'B' at dentry 'b'. Also 'C' is set as a | ||
542 | slave mount of 'Z'. In other words 'A' and 'C' are both slave mounts of | ||
543 | 'Z'. All mount/unmount events on 'Z' propagates to 'A' and 'C'. But | ||
544 | mount/unmount on 'A' do not propagate anywhere else. Similarly | ||
545 | mount/unmount on 'C' do not propagate anywhere else. | ||
546 | |||
547 | 8. 'A' is a unbindable mount and 'B' is a non-shared mount. This is a | ||
548 | invalid operation. A unbindable mount cannot be bind mounted. | ||
549 | |||
550 | 5c) Rbind semantics | ||
551 | |||
552 | rbind is same as bind. Bind replicates the specified mount. Rbind | ||
553 | replicates all the mounts in the tree belonging to the specified mount. | ||
554 | Rbind mount is bind mount applied to all the mounts in the tree. | ||
555 | |||
556 | If the source tree that is rbind has some unbindable mounts, | ||
557 | then the subtree under the unbindable mount is pruned in the new | ||
558 | location. | ||
559 | |||
560 | eg: lets say we have the following mount tree. | ||
561 | |||
562 | A | ||
563 | / \ | ||
564 | B C | ||
565 | / \ / \ | ||
566 | D E F G | ||
567 | |||
568 | Lets say all the mount except the mount C in the tree are | ||
569 | of a type other than unbindable. | ||
570 | |||
571 | If this tree is rbound to say Z | ||
572 | |||
573 | We will have the following tree at the new location. | ||
574 | |||
575 | Z | ||
576 | | | ||
577 | A' | ||
578 | / | ||
579 | B' Note how the tree under C is pruned | ||
580 | / \ in the new location. | ||
581 | D' E' | ||
582 | |||
583 | |||
584 | |||
585 | 5d) Move semantics | ||
586 | |||
587 | Consider the following command | ||
588 | |||
589 | mount --move A B/b | ||
590 | |||
591 | where 'A' is the source mount, 'B' is the destination mount and 'b' is | ||
592 | the dentry in the destination mount. | ||
593 | |||
594 | The outcome depends on the type of the mount of 'A' and 'B'. The table | ||
595 | below is a quick reference. | ||
596 | --------------------------------------------------------------------------- | ||
597 | | MOVE MOUNT OPERATION | | ||
598 | |************************************************************************** | ||
599 | | source(A)->| shared | private | slave | unbindable | | ||
600 | | dest(B) | | | | | | ||
601 | | | | | | | | | ||
602 | | v | | | | | | ||
603 | |************************************************************************** | ||
604 | | shared | shared | shared |shared and slave| invalid | | ||
605 | | | | | | | | ||
606 | |non-shared| shared | private | slave | unbindable | | ||
607 | *************************************************************************** | ||
608 | NOTE: moving a mount residing under a shared mount is invalid. | ||
609 | |||
610 | Details follow: | ||
611 | |||
612 | 1. 'A' is a shared mount and 'B' is a shared mount. The mount 'A' is | ||
613 | mounted on mount 'B' at dentry 'b'. Also new mounts 'A1', 'A2'...'An' | ||
614 | are created and mounted at dentry 'b' on all mounts that receive | ||
615 | propagation from mount 'B'. A new propagation tree is created in the | ||
616 | exact same configuration as that of 'B'. This new propagation tree | ||
617 | contains all the new mounts 'A1', 'A2'... 'An'. And this new | ||
618 | propagation tree is appended to the already existing propagation tree | ||
619 | of 'A'. | ||
620 | |||
621 | 2. 'A' is a private mount and 'B' is a shared mount. The mount 'A' is | ||
622 | mounted on mount 'B' at dentry 'b'. Also new mount 'A1', 'A2'... 'An' | ||
623 | are created and mounted at dentry 'b' on all mounts that receive | ||
624 | propagation from mount 'B'. The mount 'A' becomes a shared mount and a | ||
625 | propagation tree is created which is identical to that of | ||
626 | 'B'. This new propagation tree contains all the new mounts 'A1', | ||
627 | 'A2'... 'An'. | ||
628 | |||
629 | 3. 'A' is a slave mount of mount 'Z' and 'B' is a shared mount. The | ||
630 | mount 'A' is mounted on mount 'B' at dentry 'b'. Also new mounts 'A1', | ||
631 | 'A2'... 'An' are created and mounted at dentry 'b' on all mounts that | ||
632 | receive propagation from mount 'B'. A new propagation tree is created | ||
633 | in the exact same configuration as that of 'B'. This new propagation | ||
634 | tree contains all the new mounts 'A1', 'A2'... 'An'. And this new | ||
635 | propagation tree is appended to the already existing propagation tree of | ||
636 | 'A'. Mount 'A' continues to be the slave mount of 'Z' but it also | ||
637 | becomes 'shared'. | ||
638 | |||
639 | 4. 'A' is a unbindable mount and 'B' is a shared mount. The operation | ||
640 | is invalid. Because mounting anything on the shared mount 'B' can | ||
641 | create new mounts that get mounted on the mounts that receive | ||
642 | propagation from 'B'. And since the mount 'A' is unbindable, cloning | ||
643 | it to mount at other mountpoints is not possible. | ||
644 | |||
645 | 5. 'A' is a private mount and 'B' is a non-shared(private or slave or | ||
646 | unbindable) mount. The mount 'A' is mounted on mount 'B' at dentry 'b'. | ||
647 | |||
648 | 6. 'A' is a shared mount and 'B' is a non-shared mount. The mount 'A' | ||
649 | is mounted on mount 'B' at dentry 'b'. Mount 'A' continues to be a | ||
650 | shared mount. | ||
651 | |||
652 | 7. 'A' is a slave mount of mount 'Z' and 'B' is a non-shared mount. | ||
653 | The mount 'A' is mounted on mount 'B' at dentry 'b'. Mount 'A' | ||
654 | continues to be a slave mount of mount 'Z'. | ||
655 | |||
656 | 8. 'A' is a unbindable mount and 'B' is a non-shared mount. The mount | ||
657 | 'A' is mounted on mount 'B' at dentry 'b'. Mount 'A' continues to be a | ||
658 | unbindable mount. | ||
659 | |||
660 | 5e) Mount semantics | ||
661 | |||
662 | Consider the following command | ||
663 | |||
664 | mount device B/b | ||
665 | |||
666 | 'B' is the destination mount and 'b' is the dentry in the destination | ||
667 | mount. | ||
668 | |||
669 | The above operation is the same as bind operation with the exception | ||
670 | that the source mount is always a private mount. | ||
671 | |||
672 | |||
673 | 5f) Unmount semantics | ||
674 | |||
675 | Consider the following command | ||
676 | |||
677 | umount A | ||
678 | |||
679 | where 'A' is a mount mounted on mount 'B' at dentry 'b'. | ||
680 | |||
681 | If mount 'B' is shared, then all most-recently-mounted mounts at dentry | ||
682 | 'b' on mounts that receive propagation from mount 'B' and does not have | ||
683 | sub-mounts within them are unmounted. | ||
684 | |||
685 | Example: Lets say 'B1', 'B2', 'B3' are shared mounts that propagate to | ||
686 | each other. | ||
687 | |||
688 | lets say 'A1', 'A2', 'A3' are first mounted at dentry 'b' on mount | ||
689 | 'B1', 'B2' and 'B3' respectively. | ||
690 | |||
691 | lets say 'C1', 'C2', 'C3' are next mounted at the same dentry 'b' on | ||
692 | mount 'B1', 'B2' and 'B3' respectively. | ||
693 | |||
694 | if 'C1' is unmounted, all the mounts that are most-recently-mounted on | ||
695 | 'B1' and on the mounts that 'B1' propagates-to are unmounted. | ||
696 | |||
697 | 'B1' propagates to 'B2' and 'B3'. And the most recently mounted mount | ||
698 | on 'B2' at dentry 'b' is 'C2', and that of mount 'B3' is 'C3'. | ||
699 | |||
700 | So all 'C1', 'C2' and 'C3' should be unmounted. | ||
701 | |||
702 | If any of 'C2' or 'C3' has some child mounts, then that mount is not | ||
703 | unmounted, but all other mounts are unmounted. However if 'C1' is told | ||
704 | to be unmounted and 'C1' has some sub-mounts, the umount operation is | ||
705 | failed entirely. | ||
706 | |||
707 | 5g) Clone Namespace | ||
708 | |||
709 | A cloned namespace contains all the mounts as that of the parent | ||
710 | namespace. | ||
711 | |||
712 | Lets say 'A' and 'B' are the corresponding mounts in the parent and the | ||
713 | child namespace. | ||
714 | |||
715 | If 'A' is shared, then 'B' is also shared and 'A' and 'B' propagate to | ||
716 | each other. | ||
717 | |||
718 | If 'A' is a slave mount of 'Z', then 'B' is also the slave mount of | ||
719 | 'Z'. | ||
720 | |||
721 | If 'A' is a private mount, then 'B' is a private mount too. | ||
722 | |||
723 | If 'A' is unbindable mount, then 'B' is a unbindable mount too. | ||
724 | |||
725 | |||
726 | 6) Quiz | ||
727 | |||
728 | A. What is the result of the following command sequence? | ||
729 | |||
730 | mount --bind /mnt /mnt | ||
731 | mount --make-shared /mnt | ||
732 | mount --bind /mnt /tmp | ||
733 | mount --move /tmp /mnt/1 | ||
734 | |||
735 | what should be the contents of /mnt /mnt/1 /mnt/1/1 should be? | ||
736 | Should they all be identical? or should /mnt and /mnt/1 be | ||
737 | identical only? | ||
738 | |||
739 | |||
740 | B. What is the result of the following command sequence? | ||
741 | |||
742 | mount --make-rshared / | ||
743 | mkdir -p /v/1 | ||
744 | mount --rbind / /v/1 | ||
745 | |||
746 | what should be the content of /v/1/v/1 be? | ||
747 | |||
748 | |||
749 | C. What is the result of the following command sequence? | ||
750 | |||
751 | mount --bind /mnt /mnt | ||
752 | mount --make-shared /mnt | ||
753 | mkdir -p /mnt/1/2/3 /mnt/1/test | ||
754 | mount --bind /mnt/1 /tmp | ||
755 | mount --make-slave /mnt | ||
756 | mount --make-shared /mnt | ||
757 | mount --bind /mnt/1/2 /tmp1 | ||
758 | mount --make-slave /mnt | ||
759 | |||
760 | At this point we have the first mount at /tmp and | ||
761 | its root dentry is 1. Lets call this mount 'A' | ||
762 | And then we have a second mount at /tmp1 with root | ||
763 | dentry 2. Lets call this mount 'B' | ||
764 | Next we have a third mount at /mnt with root dentry | ||
765 | mnt. Lets call this mount 'C' | ||
766 | |||
767 | 'B' is the slave of 'A' and 'C' is a slave of 'B' | ||
768 | A -> B -> C | ||
769 | |||
770 | at this point if we execute the following command | ||
771 | |||
772 | mount --bind /bin /tmp/test | ||
773 | |||
774 | The mount is attempted on 'A' | ||
775 | |||
776 | will the mount propagate to 'B' and 'C' ? | ||
777 | |||
778 | what would be the contents of | ||
779 | /mnt/1/test be? | ||
780 | |||
781 | 7) FAQ | ||
782 | |||
783 | Q1. Why is bind mount needed? How is it different from symbolic links? | ||
784 | symbolic links can get stale if the destination mount gets | ||
785 | unmounted or moved. Bind mounts continue to exist even if the | ||
786 | other mount is unmounted or moved. | ||
787 | |||
788 | Q2. Why can't the shared subtree be implemented using exportfs? | ||
789 | |||
790 | exportfs is a heavyweight way of accomplishing part of what | ||
791 | shared subtree can do. I cannot imagine a way to implement the | ||
792 | semantics of slave mount using exportfs? | ||
793 | |||
794 | Q3 Why is unbindable mount needed? | ||
795 | |||
796 | Lets say we want to replicate the mount tree at multiple | ||
797 | locations within the same subtree. | ||
798 | |||
799 | if one rbind mounts a tree within the same subtree 'n' times | ||
800 | the number of mounts created is an exponential function of 'n'. | ||
801 | Having unbindable mount can help prune the unneeded bind | ||
802 | mounts. Here is a example. | ||
803 | |||
804 | step 1: | ||
805 | lets say the root tree has just two directories with | ||
806 | one vfsmount. | ||
807 | root | ||
808 | / \ | ||
809 | tmp usr | ||
810 | |||
811 | And we want to replicate the tree at multiple | ||
812 | mountpoints under /root/tmp | ||
813 | |||
814 | step2: | ||
815 | mount --make-shared /root | ||
816 | |||
817 | mkdir -p /tmp/m1 | ||
818 | |||
819 | mount --rbind /root /tmp/m1 | ||
820 | |||
821 | the new tree now looks like this: | ||
822 | |||
823 | root | ||
824 | / \ | ||
825 | tmp usr | ||
826 | / | ||
827 | m1 | ||
828 | / \ | ||
829 | tmp usr | ||
830 | / | ||
831 | m1 | ||
832 | |||
833 | it has two vfsmounts | ||
834 | |||
835 | step3: | ||
836 | mkdir -p /tmp/m2 | ||
837 | mount --rbind /root /tmp/m2 | ||
838 | |||
839 | the new tree now looks like this: | ||
840 | |||
841 | root | ||
842 | / \ | ||
843 | tmp usr | ||
844 | / \ | ||
845 | m1 m2 | ||
846 | / \ / \ | ||
847 | tmp usr tmp usr | ||
848 | / \ / | ||
849 | m1 m2 m1 | ||
850 | / \ / \ | ||
851 | tmp usr tmp usr | ||
852 | / / \ | ||
853 | m1 m1 m2 | ||
854 | / \ | ||
855 | tmp usr | ||
856 | / \ | ||
857 | m1 m2 | ||
858 | |||
859 | it has 6 vfsmounts | ||
860 | |||
861 | step 4: | ||
862 | mkdir -p /tmp/m3 | ||
863 | mount --rbind /root /tmp/m3 | ||
864 | |||
865 | I wont' draw the tree..but it has 24 vfsmounts | ||
866 | |||
867 | |||
868 | at step i the number of vfsmounts is V[i] = i*V[i-1]. | ||
869 | This is an exponential function. And this tree has way more | ||
870 | mounts than what we really needed in the first place. | ||
871 | |||
872 | One could use a series of umount at each step to prune | ||
873 | out the unneeded mounts. But there is a better solution. | ||
874 | Unclonable mounts come in handy here. | ||
875 | |||
876 | step 1: | ||
877 | lets say the root tree has just two directories with | ||
878 | one vfsmount. | ||
879 | root | ||
880 | / \ | ||
881 | tmp usr | ||
882 | |||
883 | How do we set up the same tree at multiple locations under | ||
884 | /root/tmp | ||
885 | |||
886 | step2: | ||
887 | mount --bind /root/tmp /root/tmp | ||
888 | |||
889 | mount --make-rshared /root | ||
890 | mount --make-unbindable /root/tmp | ||
891 | |||
892 | mkdir -p /tmp/m1 | ||
893 | |||
894 | mount --rbind /root /tmp/m1 | ||
895 | |||
896 | the new tree now looks like this: | ||
897 | |||
898 | root | ||
899 | / \ | ||
900 | tmp usr | ||
901 | / | ||
902 | m1 | ||
903 | / \ | ||
904 | tmp usr | ||
905 | |||
906 | step3: | ||
907 | mkdir -p /tmp/m2 | ||
908 | mount --rbind /root /tmp/m2 | ||
909 | |||
910 | the new tree now looks like this: | ||
911 | |||
912 | root | ||
913 | / \ | ||
914 | tmp usr | ||
915 | / \ | ||
916 | m1 m2 | ||
917 | / \ / \ | ||
918 | tmp usr tmp usr | ||
919 | |||
920 | step4: | ||
921 | |||
922 | mkdir -p /tmp/m3 | ||
923 | mount --rbind /root /tmp/m3 | ||
924 | |||
925 | the new tree now looks like this: | ||
926 | |||
927 | root | ||
928 | / \ | ||
929 | tmp usr | ||
930 | / \ \ | ||
931 | m1 m2 m3 | ||
932 | / \ / \ / \ | ||
933 | tmp usr tmp usr tmp usr | ||
934 | |||
935 | 8) Implementation | ||
936 | |||
937 | 8A) Datastructure | ||
938 | |||
939 | 4 new fields are introduced to struct vfsmount | ||
940 | ->mnt_share | ||
941 | ->mnt_slave_list | ||
942 | ->mnt_slave | ||
943 | ->mnt_master | ||
944 | |||
945 | ->mnt_share links togather all the mount to/from which this vfsmount | ||
946 | send/receives propagation events. | ||
947 | |||
948 | ->mnt_slave_list links all the mounts to which this vfsmount propagates | ||
949 | to. | ||
950 | |||
951 | ->mnt_slave links togather all the slaves that its master vfsmount | ||
952 | propagates to. | ||
953 | |||
954 | ->mnt_master points to the master vfsmount from which this vfsmount | ||
955 | receives propagation. | ||
956 | |||
957 | ->mnt_flags takes two more flags to indicate the propagation status of | ||
958 | the vfsmount. MNT_SHARE indicates that the vfsmount is a shared | ||
959 | vfsmount. MNT_UNCLONABLE indicates that the vfsmount cannot be | ||
960 | replicated. | ||
961 | |||
962 | All the shared vfsmounts in a peer group form a cyclic list through | ||
963 | ->mnt_share. | ||
964 | |||
965 | All vfsmounts with the same ->mnt_master form on a cyclic list anchored | ||
966 | in ->mnt_master->mnt_slave_list and going through ->mnt_slave. | ||
967 | |||
968 | ->mnt_master can point to arbitrary (and possibly different) members | ||
969 | of master peer group. To find all immediate slaves of a peer group | ||
970 | you need to go through _all_ ->mnt_slave_list of its members. | ||
971 | Conceptually it's just a single set - distribution among the | ||
972 | individual lists does not affect propagation or the way propagation | ||
973 | tree is modified by operations. | ||
974 | |||
975 | A example propagation tree looks as shown in the figure below. | ||
976 | [ NOTE: Though it looks like a forest, if we consider all the shared | ||
977 | mounts as a conceptual entity called 'pnode', it becomes a tree] | ||
978 | |||
979 | |||
980 | A <--> B <--> C <---> D | ||
981 | /|\ /| |\ | ||
982 | / F G J K H I | ||
983 | / | ||
984 | E<-->K | ||
985 | /|\ | ||
986 | M L N | ||
987 | |||
988 | In the above figure A,B,C and D all are shared and propagate to each | ||
989 | other. 'A' has got 3 slave mounts 'E' 'F' and 'G' 'C' has got 2 slave | ||
990 | mounts 'J' and 'K' and 'D' has got two slave mounts 'H' and 'I'. | ||
991 | 'E' is also shared with 'K' and they propagate to each other. And | ||
992 | 'K' has 3 slaves 'M', 'L' and 'N' | ||
993 | |||
994 | A's ->mnt_share links with the ->mnt_share of 'B' 'C' and 'D' | ||
995 | |||
996 | A's ->mnt_slave_list links with ->mnt_slave of 'E', 'K', 'F' and 'G' | ||
997 | |||
998 | E's ->mnt_share links with ->mnt_share of K | ||
999 | 'E', 'K', 'F', 'G' have their ->mnt_master point to struct | ||
1000 | vfsmount of 'A' | ||
1001 | 'M', 'L', 'N' have their ->mnt_master point to struct vfsmount of 'K' | ||
1002 | K's ->mnt_slave_list links with ->mnt_slave of 'M', 'L' and 'N' | ||
1003 | |||
1004 | C's ->mnt_slave_list links with ->mnt_slave of 'J' and 'K' | ||
1005 | J and K's ->mnt_master points to struct vfsmount of C | ||
1006 | and finally D's ->mnt_slave_list links with ->mnt_slave of 'H' and 'I' | ||
1007 | 'H' and 'I' have their ->mnt_master pointing to struct vfsmount of 'D'. | ||
1008 | |||
1009 | |||
1010 | NOTE: The propagation tree is orthogonal to the mount tree. | ||
1011 | |||
1012 | |||
1013 | 8B Algorithm: | ||
1014 | |||
1015 | The crux of the implementation resides in rbind/move operation. | ||
1016 | |||
1017 | The overall algorithm breaks the operation into 3 phases: (look at | ||
1018 | attach_recursive_mnt() and propagate_mnt()) | ||
1019 | |||
1020 | 1. prepare phase. | ||
1021 | 2. commit phases. | ||
1022 | 3. abort phases. | ||
1023 | |||
1024 | Prepare phase: | ||
1025 | |||
1026 | for each mount in the source tree: | ||
1027 | a) Create the necessary number of mount trees to | ||
1028 | be attached to each of the mounts that receive | ||
1029 | propagation from the destination mount. | ||
1030 | b) Do not attach any of the trees to its destination. | ||
1031 | However note down its ->mnt_parent and ->mnt_mountpoint | ||
1032 | c) Link all the new mounts to form a propagation tree that | ||
1033 | is identical to the propagation tree of the destination | ||
1034 | mount. | ||
1035 | |||
1036 | If this phase is successful, there should be 'n' new | ||
1037 | propagation trees; where 'n' is the number of mounts in the | ||
1038 | source tree. Go to the commit phase | ||
1039 | |||
1040 | Also there should be 'm' new mount trees, where 'm' is | ||
1041 | the number of mounts to which the destination mount | ||
1042 | propagates to. | ||
1043 | |||
1044 | if any memory allocations fail, go to the abort phase. | ||
1045 | |||
1046 | Commit phase | ||
1047 | attach each of the mount trees to their corresponding | ||
1048 | destination mounts. | ||
1049 | |||
1050 | Abort phase | ||
1051 | delete all the newly created trees. | ||
1052 | |||
1053 | NOTE: all the propagation related functionality resides in the file | ||
1054 | pnode.c | ||
1055 | |||
1056 | |||
1057 | ------------------------------------------------------------------------ | ||
1058 | |||
1059 | version 0.1 (created the initial document, Ram Pai linuxram@us.ibm.com) | ||
1060 | version 0.2 (Incorporated comments from Al Viro) | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 296bc03d1cf1..91d5ef3397be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -324,7 +324,7 @@ menu "Kernel Features" | |||
324 | 324 | ||
325 | config SMP | 325 | config SMP |
326 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 326 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
327 | depends on EXPERIMENTAL && BROKEN #&& n | 327 | depends on EXPERIMENTAL && REALVIEW_MPCORE |
328 | help | 328 | help |
329 | This enables support for systems with more than one CPU. If you have | 329 | This enables support for systems with more than one CPU. If you have |
330 | a system with only one CPU, like most personal computers, say N. If | 330 | a system with only one CPU, like most personal computers, say N. If |
@@ -585,7 +585,7 @@ config FPE_NWFPE | |||
585 | 585 | ||
586 | config FPE_NWFPE_XP | 586 | config FPE_NWFPE_XP |
587 | bool "Support extended precision" | 587 | bool "Support extended precision" |
588 | depends on FPE_NWFPE && !CPU_BIG_ENDIAN | 588 | depends on FPE_NWFPE |
589 | help | 589 | help |
590 | Say Y to include 80-bit support in the kernel floating-point | 590 | Say Y to include 80-bit support in the kernel floating-point |
591 | emulator. Otherwise, only 32 and 64-bit support is compiled in. | 591 | emulator. Otherwise, only 32 and 64-bit support is compiled in. |
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c index df140962bb0f..6851abaf5524 100644 --- a/arch/arm/mach-ixp2000/core.c +++ b/arch/arm/mach-ixp2000/core.c | |||
@@ -84,63 +84,54 @@ static struct map_desc ixp2000_io_desc[] __initdata = { | |||
84 | .virtual = IXP2000_CAP_VIRT_BASE, | 84 | .virtual = IXP2000_CAP_VIRT_BASE, |
85 | .pfn = __phys_to_pfn(IXP2000_CAP_PHYS_BASE), | 85 | .pfn = __phys_to_pfn(IXP2000_CAP_PHYS_BASE), |
86 | .length = IXP2000_CAP_SIZE, | 86 | .length = IXP2000_CAP_SIZE, |
87 | .type = MT_DEVICE | 87 | .type = MT_IXP2000_DEVICE, |
88 | }, { | 88 | }, { |
89 | .virtual = IXP2000_INTCTL_VIRT_BASE, | 89 | .virtual = IXP2000_INTCTL_VIRT_BASE, |
90 | .pfn = __phys_to_pfn(IXP2000_INTCTL_PHYS_BASE), | 90 | .pfn = __phys_to_pfn(IXP2000_INTCTL_PHYS_BASE), |
91 | .length = IXP2000_INTCTL_SIZE, | 91 | .length = IXP2000_INTCTL_SIZE, |
92 | .type = MT_DEVICE | 92 | .type = MT_IXP2000_DEVICE, |
93 | }, { | 93 | }, { |
94 | .virtual = IXP2000_PCI_CREG_VIRT_BASE, | 94 | .virtual = IXP2000_PCI_CREG_VIRT_BASE, |
95 | .pfn = __phys_to_pfn(IXP2000_PCI_CREG_PHYS_BASE), | 95 | .pfn = __phys_to_pfn(IXP2000_PCI_CREG_PHYS_BASE), |
96 | .length = IXP2000_PCI_CREG_SIZE, | 96 | .length = IXP2000_PCI_CREG_SIZE, |
97 | .type = MT_DEVICE | 97 | .type = MT_IXP2000_DEVICE, |
98 | }, { | 98 | }, { |
99 | .virtual = IXP2000_PCI_CSR_VIRT_BASE, | 99 | .virtual = IXP2000_PCI_CSR_VIRT_BASE, |
100 | .pfn = __phys_to_pfn(IXP2000_PCI_CSR_PHYS_BASE), | 100 | .pfn = __phys_to_pfn(IXP2000_PCI_CSR_PHYS_BASE), |
101 | .length = IXP2000_PCI_CSR_SIZE, | 101 | .length = IXP2000_PCI_CSR_SIZE, |
102 | .type = MT_DEVICE | 102 | .type = MT_IXP2000_DEVICE, |
103 | }, { | 103 | }, { |
104 | .virtual = IXP2000_MSF_VIRT_BASE, | 104 | .virtual = IXP2000_MSF_VIRT_BASE, |
105 | .pfn = __phys_to_pfn(IXP2000_MSF_PHYS_BASE), | 105 | .pfn = __phys_to_pfn(IXP2000_MSF_PHYS_BASE), |
106 | .length = IXP2000_MSF_SIZE, | 106 | .length = IXP2000_MSF_SIZE, |
107 | .type = MT_DEVICE | 107 | .type = MT_IXP2000_DEVICE, |
108 | }, { | 108 | }, { |
109 | .virtual = IXP2000_PCI_IO_VIRT_BASE, | 109 | .virtual = IXP2000_PCI_IO_VIRT_BASE, |
110 | .pfn = __phys_to_pfn(IXP2000_PCI_IO_PHYS_BASE), | 110 | .pfn = __phys_to_pfn(IXP2000_PCI_IO_PHYS_BASE), |
111 | .length = IXP2000_PCI_IO_SIZE, | 111 | .length = IXP2000_PCI_IO_SIZE, |
112 | .type = MT_DEVICE | 112 | .type = MT_IXP2000_DEVICE, |
113 | }, { | 113 | }, { |
114 | .virtual = IXP2000_PCI_CFG0_VIRT_BASE, | 114 | .virtual = IXP2000_PCI_CFG0_VIRT_BASE, |
115 | .pfn = __phys_to_pfn(IXP2000_PCI_CFG0_PHYS_BASE), | 115 | .pfn = __phys_to_pfn(IXP2000_PCI_CFG0_PHYS_BASE), |
116 | .length = IXP2000_PCI_CFG0_SIZE, | 116 | .length = IXP2000_PCI_CFG0_SIZE, |
117 | .type = MT_DEVICE | 117 | .type = MT_IXP2000_DEVICE, |
118 | }, { | 118 | }, { |
119 | .virtual = IXP2000_PCI_CFG1_VIRT_BASE, | 119 | .virtual = IXP2000_PCI_CFG1_VIRT_BASE, |
120 | .pfn = __phys_to_pfn(IXP2000_PCI_CFG1_PHYS_BASE), | 120 | .pfn = __phys_to_pfn(IXP2000_PCI_CFG1_PHYS_BASE), |
121 | .length = IXP2000_PCI_CFG1_SIZE, | 121 | .length = IXP2000_PCI_CFG1_SIZE, |
122 | .type = MT_DEVICE | 122 | .type = MT_IXP2000_DEVICE, |
123 | } | 123 | } |
124 | }; | 124 | }; |
125 | 125 | ||
126 | void __init ixp2000_map_io(void) | 126 | void __init ixp2000_map_io(void) |
127 | { | 127 | { |
128 | extern unsigned int processor_id; | ||
129 | |||
130 | /* | 128 | /* |
131 | * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE for | 129 | * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE so that |
132 | * tweaking the PMDs so XCB=101. On IXP2800s we use the normal | 130 | * XCB=101 (to avoid triggering erratum #66), and given that |
133 | * PMD flags. | 131 | * this mode speeds up I/O accesses and we have write buffer |
132 | * flushes in the right places anyway, it doesn't hurt to use | ||
133 | * XCB=101 for all IXP2000s. | ||
134 | */ | 134 | */ |
135 | if ((processor_id & 0xfffffff0) == 0x69054190) { | ||
136 | int i; | ||
137 | |||
138 | printk(KERN_INFO "Enabling IXP2400 erratum #66 workaround\n"); | ||
139 | |||
140 | for(i=0;i<ARRAY_SIZE(ixp2000_io_desc);i++) | ||
141 | ixp2000_io_desc[i].type = MT_IXP2000_DEVICE; | ||
142 | } | ||
143 | |||
144 | iotable_init(ixp2000_io_desc, ARRAY_SIZE(ixp2000_io_desc)); | 135 | iotable_init(ixp2000_io_desc, ARRAY_SIZE(ixp2000_io_desc)); |
145 | 136 | ||
146 | /* Set slowport to 8-bit mode. */ | 137 | /* Set slowport to 8-bit mode. */ |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index 4b63dc9eabfe..129976866d47 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -8,4 +8,13 @@ config MACH_REALVIEW_EB | |||
8 | help | 8 | help |
9 | Include support for the ARM(R) RealView Emulation Baseboard platform. | 9 | Include support for the ARM(R) RealView Emulation Baseboard platform. |
10 | 10 | ||
11 | config REALVIEW_MPCORE | ||
12 | bool "Support MPcore tile" | ||
13 | depends on MACH_REALVIEW_EB | ||
14 | help | ||
15 | Enable support for the MPCore tile on the Realview platform. | ||
16 | Since there are device address and interrupt differences, a | ||
17 | kernel built with this option enabled is not compatible with | ||
18 | other tiles. | ||
19 | |||
11 | endmenu | 20 | endmenu |
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile index 8d37ea1605fd..011a85c10627 100644 --- a/arch/arm/mach-realview/Makefile +++ b/arch/arm/mach-realview/Makefile | |||
@@ -4,3 +4,4 @@ | |||
4 | 4 | ||
5 | obj-y := core.o clock.o | 5 | obj-y := core.o clock.o |
6 | obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o | 6 | obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o |
7 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o | ||
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h index 575599db74db..d83e8bad2038 100644 --- a/arch/arm/mach-realview/core.h +++ b/arch/arm/mach-realview/core.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define __ASM_ARCH_REALVIEW_H | 23 | #define __ASM_ARCH_REALVIEW_H |
24 | 24 | ||
25 | #include <asm/hardware/amba.h> | 25 | #include <asm/hardware/amba.h> |
26 | #include <asm/leds.h> | ||
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
27 | 28 | ||
28 | #define __io_address(n) __io(IO_ADDRESS(n)) | 29 | #define __io_address(n) __io(IO_ADDRESS(n)) |
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S new file mode 100644 index 000000000000..4075473cf68a --- /dev/null +++ b/arch/arm/mach-realview/headsmp.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-realview/headsmp.S | ||
3 | * | ||
4 | * Copyright (c) 2003 ARM Limited | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/linkage.h> | ||
12 | #include <linux/init.h> | ||
13 | |||
14 | __INIT | ||
15 | |||
16 | /* | ||
17 | * Realview specific entry point for secondary CPUs. This provides | ||
18 | * a "holding pen" into which all secondary cores are held until we're | ||
19 | * ready for them to initialise. | ||
20 | */ | ||
21 | ENTRY(realview_secondary_startup) | ||
22 | mrc p15, 0, r0, c0, c0, 5 | ||
23 | and r0, r0, #15 | ||
24 | adr r4, 1f | ||
25 | ldmia r4, {r5, r6} | ||
26 | sub r4, r4, r5 | ||
27 | add r6, r6, r4 | ||
28 | pen: ldr r7, [r6] | ||
29 | cmp r7, r0 | ||
30 | bne pen | ||
31 | |||
32 | /* | ||
33 | * we've been released from the holding pen: secondary_stack | ||
34 | * should now contain the SVC stack for this core | ||
35 | */ | ||
36 | b secondary_startup | ||
37 | |||
38 | 1: .long . | ||
39 | .long pen_release | ||
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c new file mode 100644 index 000000000000..9844644d0fb5 --- /dev/null +++ b/arch/arm/mach-realview/platsmp.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-realview/platsmp.c | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Ltd. | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/smp.h> | ||
16 | |||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/hardware/arm_scu.h> | ||
19 | #include <asm/hardware.h> | ||
20 | |||
21 | #include "core.h" | ||
22 | |||
23 | extern void realview_secondary_startup(void); | ||
24 | |||
25 | /* | ||
26 | * control for which core is the next to come out of the secondary | ||
27 | * boot "holding pen" | ||
28 | */ | ||
29 | volatile int __cpuinitdata pen_release = -1; | ||
30 | |||
31 | static unsigned int __init get_core_count(void) | ||
32 | { | ||
33 | unsigned int ncores; | ||
34 | |||
35 | ncores = __raw_readl(IO_ADDRESS(REALVIEW_MPCORE_SCU_BASE) + SCU_CONFIG); | ||
36 | |||
37 | return (ncores & 0x03) + 1; | ||
38 | } | ||
39 | |||
40 | static DEFINE_SPINLOCK(boot_lock); | ||
41 | |||
42 | void __cpuinit platform_secondary_init(unsigned int cpu) | ||
43 | { | ||
44 | /* | ||
45 | * the primary core may have used a "cross call" soft interrupt | ||
46 | * to get this processor out of WFI in the BootMonitor - make | ||
47 | * sure that we are no longer being sent this soft interrupt | ||
48 | */ | ||
49 | smp_cross_call_done(cpumask_of_cpu(cpu)); | ||
50 | |||
51 | /* | ||
52 | * if any interrupts are already enabled for the primary | ||
53 | * core (e.g. timer irq), then they will not have been enabled | ||
54 | * for us: do so | ||
55 | */ | ||
56 | gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE)); | ||
57 | |||
58 | /* | ||
59 | * let the primary processor know we're out of the | ||
60 | * pen, then head off into the C entry point | ||
61 | */ | ||
62 | pen_release = -1; | ||
63 | |||
64 | /* | ||
65 | * Synchronise with the boot thread. | ||
66 | */ | ||
67 | spin_lock(&boot_lock); | ||
68 | spin_unlock(&boot_lock); | ||
69 | } | ||
70 | |||
71 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | ||
72 | { | ||
73 | unsigned long timeout; | ||
74 | |||
75 | /* | ||
76 | * set synchronisation state between this boot processor | ||
77 | * and the secondary one | ||
78 | */ | ||
79 | spin_lock(&boot_lock); | ||
80 | |||
81 | /* | ||
82 | * The secondary processor is waiting to be released from | ||
83 | * the holding pen - release it, then wait for it to flag | ||
84 | * that it has been released by resetting pen_release. | ||
85 | * | ||
86 | * Note that "pen_release" is the hardware CPU ID, whereas | ||
87 | * "cpu" is Linux's internal ID. | ||
88 | */ | ||
89 | pen_release = cpu; | ||
90 | flush_cache_all(); | ||
91 | |||
92 | /* | ||
93 | * XXX | ||
94 | * | ||
95 | * This is a later addition to the booting protocol: the | ||
96 | * bootMonitor now puts secondary cores into WFI, so | ||
97 | * poke_milo() no longer gets the cores moving; we need | ||
98 | * to send a soft interrupt to wake the secondary core. | ||
99 | * Use smp_cross_call() for this, since there's little | ||
100 | * point duplicating the code here | ||
101 | */ | ||
102 | smp_cross_call(cpumask_of_cpu(cpu)); | ||
103 | |||
104 | timeout = jiffies + (1 * HZ); | ||
105 | while (time_before(jiffies, timeout)) { | ||
106 | if (pen_release == -1) | ||
107 | break; | ||
108 | |||
109 | udelay(10); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * now the secondary core is starting up let it run its | ||
114 | * calibrations, then wait for it to finish | ||
115 | */ | ||
116 | spin_unlock(&boot_lock); | ||
117 | |||
118 | return pen_release != -1 ? -ENOSYS : 0; | ||
119 | } | ||
120 | |||
121 | static void __init poke_milo(void) | ||
122 | { | ||
123 | extern void secondary_startup(void); | ||
124 | |||
125 | /* nobody is to be released from the pen yet */ | ||
126 | pen_release = -1; | ||
127 | |||
128 | /* | ||
129 | * write the address of secondary startup into the system-wide | ||
130 | * flags register, then clear the bottom two bits, which is what | ||
131 | * BootMonitor is waiting for | ||
132 | */ | ||
133 | #if 1 | ||
134 | #define REALVIEW_SYS_FLAGSS_OFFSET 0x30 | ||
135 | __raw_writel(virt_to_phys(realview_secondary_startup), | ||
136 | (IO_ADDRESS(REALVIEW_SYS_BASE) + | ||
137 | REALVIEW_SYS_FLAGSS_OFFSET)); | ||
138 | #define REALVIEW_SYS_FLAGSC_OFFSET 0x34 | ||
139 | __raw_writel(3, | ||
140 | (IO_ADDRESS(REALVIEW_SYS_BASE) + | ||
141 | REALVIEW_SYS_FLAGSC_OFFSET)); | ||
142 | #endif | ||
143 | |||
144 | mb(); | ||
145 | } | ||
146 | |||
147 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
148 | { | ||
149 | unsigned int ncores = get_core_count(); | ||
150 | unsigned int cpu = smp_processor_id(); | ||
151 | int i; | ||
152 | |||
153 | /* sanity check */ | ||
154 | if (ncores == 0) { | ||
155 | printk(KERN_ERR | ||
156 | "Realview: strange CM count of 0? Default to 1\n"); | ||
157 | |||
158 | ncores = 1; | ||
159 | } | ||
160 | |||
161 | if (ncores > NR_CPUS) { | ||
162 | printk(KERN_WARNING | ||
163 | "Realview: no. of cores (%d) greater than configured " | ||
164 | "maximum of %d - clipping\n", | ||
165 | ncores, NR_CPUS); | ||
166 | ncores = NR_CPUS; | ||
167 | } | ||
168 | |||
169 | smp_store_cpu_info(cpu); | ||
170 | |||
171 | /* | ||
172 | * are we trying to boot more cores than exist? | ||
173 | */ | ||
174 | if (max_cpus > ncores) | ||
175 | max_cpus = ncores; | ||
176 | |||
177 | /* | ||
178 | * Initialise the possible/present maps. | ||
179 | * cpu_possible_map describes the set of CPUs which may be present | ||
180 | * cpu_present_map describes the set of CPUs populated | ||
181 | */ | ||
182 | for (i = 0; i < max_cpus; i++) { | ||
183 | cpu_set(i, cpu_possible_map); | ||
184 | cpu_set(i, cpu_present_map); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Do we need any more CPUs? If so, then let them know where | ||
189 | * to start. Note that, on modern versions of MILO, the "poke" | ||
190 | * doesn't actually do anything until each individual core is | ||
191 | * sent a soft interrupt to get it out of WFI | ||
192 | */ | ||
193 | if (max_cpus > 1) | ||
194 | poke_milo(); | ||
195 | } | ||
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 267bb07e39b7..7dc32503fdf2 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -136,6 +136,11 @@ static struct amba_device *amba_devs[] __initdata = { | |||
136 | 136 | ||
137 | static void __init gic_init_irq(void) | 137 | static void __init gic_init_irq(void) |
138 | { | 138 | { |
139 | #ifdef CONFIG_REALVIEW_MPCORE | ||
140 | writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); | ||
141 | writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8); | ||
142 | writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); | ||
143 | #endif | ||
139 | gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE)); | 144 | gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE)); |
140 | gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE)); | 145 | gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE)); |
141 | } | 146 | } |
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index fb5b40289de2..9e50127be635 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
@@ -354,7 +354,7 @@ void __init build_mem_type_table(void) | |||
354 | { | 354 | { |
355 | struct cachepolicy *cp; | 355 | struct cachepolicy *cp; |
356 | unsigned int cr = get_cr(); | 356 | unsigned int cr = get_cr(); |
357 | unsigned int user_pgprot; | 357 | unsigned int user_pgprot, kern_pgprot; |
358 | int cpu_arch = cpu_architecture(); | 358 | int cpu_arch = cpu_architecture(); |
359 | int i; | 359 | int i; |
360 | 360 | ||
@@ -381,7 +381,7 @@ void __init build_mem_type_table(void) | |||
381 | } | 381 | } |
382 | 382 | ||
383 | cp = &cache_policies[cachepolicy]; | 383 | cp = &cache_policies[cachepolicy]; |
384 | user_pgprot = cp->pte; | 384 | kern_pgprot = user_pgprot = cp->pte; |
385 | 385 | ||
386 | /* | 386 | /* |
387 | * ARMv6 and above have extended page tables. | 387 | * ARMv6 and above have extended page tables. |
@@ -393,6 +393,7 @@ void __init build_mem_type_table(void) | |||
393 | */ | 393 | */ |
394 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; | 394 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; |
395 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; | 395 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; |
396 | |||
396 | /* | 397 | /* |
397 | * Mark cache clean areas and XIP ROM read only | 398 | * Mark cache clean areas and XIP ROM read only |
398 | * from SVC mode and no access from userspace. | 399 | * from SVC mode and no access from userspace. |
@@ -412,32 +413,47 @@ void __init build_mem_type_table(void) | |||
412 | * (iow, non-global) | 413 | * (iow, non-global) |
413 | */ | 414 | */ |
414 | user_pgprot |= L_PTE_ASID; | 415 | user_pgprot |= L_PTE_ASID; |
416 | |||
417 | #ifdef CONFIG_SMP | ||
418 | /* | ||
419 | * Mark memory with the "shared" attribute for SMP systems | ||
420 | */ | ||
421 | user_pgprot |= L_PTE_SHARED; | ||
422 | kern_pgprot |= L_PTE_SHARED; | ||
423 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
424 | #endif | ||
415 | } | 425 | } |
416 | 426 | ||
427 | for (i = 0; i < 16; i++) { | ||
428 | unsigned long v = pgprot_val(protection_map[i]); | ||
429 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | ||
430 | protection_map[i] = __pgprot(v); | ||
431 | } | ||
432 | |||
433 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | ||
434 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | ||
435 | |||
417 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 436 | if (cpu_arch >= CPU_ARCH_ARMv5) { |
418 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | 437 | #ifndef CONFIG_SMP |
419 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | 438 | /* |
439 | * Only use write-through for non-SMP systems | ||
440 | */ | ||
441 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
442 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
443 | #endif | ||
420 | } else { | 444 | } else { |
421 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; | ||
422 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; | ||
423 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 445 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
424 | } | 446 | } |
425 | 447 | ||
448 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | ||
449 | L_PTE_DIRTY | L_PTE_WRITE | | ||
450 | L_PTE_EXEC | kern_pgprot); | ||
451 | |||
426 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 452 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
427 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 453 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
428 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 454 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
429 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 455 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
430 | 456 | ||
431 | for (i = 0; i < 16; i++) { | ||
432 | unsigned long v = pgprot_val(protection_map[i]); | ||
433 | v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; | ||
434 | protection_map[i] = __pgprot(v); | ||
435 | } | ||
436 | |||
437 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | ||
438 | L_PTE_DIRTY | L_PTE_WRITE | | ||
439 | L_PTE_EXEC | cp->pte); | ||
440 | |||
441 | switch (cp->pmd) { | 457 | switch (cp->pmd) { |
442 | case PMD_SECT_WT: | 458 | case PMD_SECT_WT: |
443 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | 459 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 9bb5fff406fb..92f3ca31b7b9 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/asm-offsets.h> | 14 | #include <asm/asm-offsets.h> |
15 | #include <asm/hardware/arm_scu.h> | ||
15 | #include <asm/procinfo.h> | 16 | #include <asm/procinfo.h> |
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
17 | 18 | ||
@@ -112,6 +113,9 @@ ENTRY(cpu_v6_dcache_clean_area) | |||
112 | ENTRY(cpu_v6_switch_mm) | 113 | ENTRY(cpu_v6_switch_mm) |
113 | mov r2, #0 | 114 | mov r2, #0 |
114 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 115 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
116 | #ifdef CONFIG_SMP | ||
117 | orr r0, r0, #2 @ set shared pgtable | ||
118 | #endif | ||
115 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 119 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
116 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer | 120 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
117 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 121 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
@@ -140,7 +144,7 @@ ENTRY(cpu_v6_switch_mm) | |||
140 | ENTRY(cpu_v6_set_pte) | 144 | ENTRY(cpu_v6_set_pte) |
141 | str r1, [r0], #-2048 @ linux version | 145 | str r1, [r0], #-2048 @ linux version |
142 | 146 | ||
143 | bic r2, r1, #0x000007f0 | 147 | bic r2, r1, #0x000003f0 |
144 | bic r2, r2, #0x00000003 | 148 | bic r2, r2, #0x00000003 |
145 | orr r2, r2, #PTE_EXT_AP0 | 2 | 149 | orr r2, r2, #PTE_EXT_AP0 | 2 |
146 | 150 | ||
@@ -191,6 +195,23 @@ cpu_v6_name: | |||
191 | * - cache type register is implemented | 195 | * - cache type register is implemented |
192 | */ | 196 | */ |
193 | __v6_setup: | 197 | __v6_setup: |
198 | #ifdef CONFIG_SMP | ||
199 | /* Set up the SCU on core 0 only */ | ||
200 | mrc p15, 0, r0, c0, c0, 5 @ CPU core number | ||
201 | ands r0, r0, #15 | ||
202 | moveq r0, #0x10000000 @ SCU_BASE | ||
203 | orreq r0, r0, #0x00100000 | ||
204 | ldreq r5, [r0, #SCU_CTRL] | ||
205 | orreq r5, r5, #1 | ||
206 | streq r5, [r0, #SCU_CTRL] | ||
207 | |||
208 | #ifndef CONFIG_CPU_DCACHE_DISABLE | ||
209 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | ||
210 | orr r0, r0, #0x20 | ||
211 | mcr p15, 0, r0, c1, c0, 1 | ||
212 | #endif | ||
213 | #endif | ||
214 | |||
194 | mov r0, #0 | 215 | mov r0, #0 |
195 | mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache | 216 | mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache |
196 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | 217 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache |
@@ -198,6 +219,9 @@ __v6_setup: | |||
198 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 219 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
199 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs | 220 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs |
200 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register | 221 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register |
222 | #ifdef CONFIG_SMP | ||
223 | orr r4, r4, #2 @ set shared pgtable | ||
224 | #endif | ||
201 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 225 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
202 | #ifdef CONFIG_VFP | 226 | #ifdef CONFIG_VFP |
203 | mrc p15, 0, r0, c1, c0, 2 | 227 | mrc p15, 0, r0, c1, c0, 2 |
diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h index 9677ae8448e8..da4c616b6c49 100644 --- a/arch/arm/nwfpe/fpa11.h +++ b/arch/arm/nwfpe/fpa11.h | |||
@@ -60,7 +60,7 @@ typedef union tagFPREG { | |||
60 | #ifdef CONFIG_FPE_NWFPE_XP | 60 | #ifdef CONFIG_FPE_NWFPE_XP |
61 | floatx80 fExtended; | 61 | floatx80 fExtended; |
62 | #else | 62 | #else |
63 | int padding[3]; | 63 | u32 padding[3]; |
64 | #endif | 64 | #endif |
65 | } FPREG; | 65 | } FPREG; |
66 | 66 | ||
diff --git a/arch/arm/nwfpe/fpa11_cpdt.c b/arch/arm/nwfpe/fpa11_cpdt.c index b0db5cbcc3b1..32859fa8dcfc 100644 --- a/arch/arm/nwfpe/fpa11_cpdt.c +++ b/arch/arm/nwfpe/fpa11_cpdt.c | |||
@@ -59,8 +59,13 @@ static inline void loadExtended(const unsigned int Fn, const unsigned int __user | |||
59 | p = (unsigned int *) &fpa11->fpreg[Fn].fExtended; | 59 | p = (unsigned int *) &fpa11->fpreg[Fn].fExtended; |
60 | fpa11->fType[Fn] = typeExtended; | 60 | fpa11->fType[Fn] = typeExtended; |
61 | get_user(p[0], &pMem[0]); /* sign & exponent */ | 61 | get_user(p[0], &pMem[0]); /* sign & exponent */ |
62 | #ifdef __ARMEB__ | ||
63 | get_user(p[1], &pMem[1]); /* ms bits */ | ||
64 | get_user(p[2], &pMem[2]); /* ls bits */ | ||
65 | #else | ||
62 | get_user(p[1], &pMem[2]); /* ls bits */ | 66 | get_user(p[1], &pMem[2]); /* ls bits */ |
63 | get_user(p[2], &pMem[1]); /* ms bits */ | 67 | get_user(p[2], &pMem[1]); /* ms bits */ |
68 | #endif | ||
64 | } | 69 | } |
65 | #endif | 70 | #endif |
66 | 71 | ||
@@ -177,8 +182,13 @@ static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMe | |||
177 | } | 182 | } |
178 | 183 | ||
179 | put_user(val.i[0], &pMem[0]); /* sign & exp */ | 184 | put_user(val.i[0], &pMem[0]); /* sign & exp */ |
185 | #ifdef __ARMEB__ | ||
186 | put_user(val.i[1], &pMem[1]); /* msw */ | ||
187 | put_user(val.i[2], &pMem[2]); | ||
188 | #else | ||
180 | put_user(val.i[1], &pMem[2]); | 189 | put_user(val.i[1], &pMem[2]); |
181 | put_user(val.i[2], &pMem[1]); /* msw */ | 190 | put_user(val.i[2], &pMem[1]); /* msw */ |
191 | #endif | ||
182 | } | 192 | } |
183 | #endif | 193 | #endif |
184 | 194 | ||
diff --git a/arch/arm/nwfpe/fpopcode.c b/arch/arm/nwfpe/fpopcode.c index 4c9f5703148c..67ff2ab08ea0 100644 --- a/arch/arm/nwfpe/fpopcode.c +++ b/arch/arm/nwfpe/fpopcode.c | |||
@@ -29,14 +29,14 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_FPE_NWFPE_XP | 30 | #ifdef CONFIG_FPE_NWFPE_XP |
31 | const floatx80 floatx80Constant[] = { | 31 | const floatx80 floatx80Constant[] = { |
32 | {0x0000, 0x0000000000000000ULL}, /* extended 0.0 */ | 32 | { .high = 0x0000, .low = 0x0000000000000000ULL},/* extended 0.0 */ |
33 | {0x3fff, 0x8000000000000000ULL}, /* extended 1.0 */ | 33 | { .high = 0x3fff, .low = 0x8000000000000000ULL},/* extended 1.0 */ |
34 | {0x4000, 0x8000000000000000ULL}, /* extended 2.0 */ | 34 | { .high = 0x4000, .low = 0x8000000000000000ULL},/* extended 2.0 */ |
35 | {0x4000, 0xc000000000000000ULL}, /* extended 3.0 */ | 35 | { .high = 0x4000, .low = 0xc000000000000000ULL},/* extended 3.0 */ |
36 | {0x4001, 0x8000000000000000ULL}, /* extended 4.0 */ | 36 | { .high = 0x4001, .low = 0x8000000000000000ULL},/* extended 4.0 */ |
37 | {0x4001, 0xa000000000000000ULL}, /* extended 5.0 */ | 37 | { .high = 0x4001, .low = 0xa000000000000000ULL},/* extended 5.0 */ |
38 | {0x3ffe, 0x8000000000000000ULL}, /* extended 0.5 */ | 38 | { .high = 0x3ffe, .low = 0x8000000000000000ULL},/* extended 0.5 */ |
39 | {0x4002, 0xa000000000000000ULL} /* extended 10.0 */ | 39 | { .high = 0x4002, .low = 0xa000000000000000ULL},/* extended 10.0 */ |
40 | }; | 40 | }; |
41 | #endif | 41 | #endif |
42 | 42 | ||
diff --git a/arch/arm/nwfpe/softfloat-specialize b/arch/arm/nwfpe/softfloat-specialize index acf409144763..d4a4c8e06635 100644 --- a/arch/arm/nwfpe/softfloat-specialize +++ b/arch/arm/nwfpe/softfloat-specialize | |||
@@ -332,6 +332,7 @@ static floatx80 commonNaNToFloatx80( commonNaNT a ) | |||
332 | 332 | ||
333 | z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 ); | 333 | z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 ); |
334 | z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF; | 334 | z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF; |
335 | z.__padding = 0; | ||
335 | return z; | 336 | return z; |
336 | 337 | ||
337 | } | 338 | } |
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c index f9f049132a17..0f9656e482ba 100644 --- a/arch/arm/nwfpe/softfloat.c +++ b/arch/arm/nwfpe/softfloat.c | |||
@@ -531,6 +531,7 @@ INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig ) | |||
531 | 531 | ||
532 | z.low = zSig; | 532 | z.low = zSig; |
533 | z.high = ( ( (bits16) zSign )<<15 ) + zExp; | 533 | z.high = ( ( (bits16) zSign )<<15 ) + zExp; |
534 | z.__padding = 0; | ||
534 | return z; | 535 | return z; |
535 | 536 | ||
536 | } | 537 | } |
@@ -2831,6 +2832,7 @@ static floatx80 subFloatx80Sigs( struct roundingData *roundData, floatx80 a, flo | |||
2831 | roundData->exception |= float_flag_invalid; | 2832 | roundData->exception |= float_flag_invalid; |
2832 | z.low = floatx80_default_nan_low; | 2833 | z.low = floatx80_default_nan_low; |
2833 | z.high = floatx80_default_nan_high; | 2834 | z.high = floatx80_default_nan_high; |
2835 | z.__padding = 0; | ||
2834 | return z; | 2836 | return z; |
2835 | } | 2837 | } |
2836 | if ( aExp == 0 ) { | 2838 | if ( aExp == 0 ) { |
@@ -2950,6 +2952,7 @@ floatx80 floatx80_mul( struct roundingData *roundData, floatx80 a, floatx80 b ) | |||
2950 | roundData->exception |= float_flag_invalid; | 2952 | roundData->exception |= float_flag_invalid; |
2951 | z.low = floatx80_default_nan_low; | 2953 | z.low = floatx80_default_nan_low; |
2952 | z.high = floatx80_default_nan_high; | 2954 | z.high = floatx80_default_nan_high; |
2955 | z.__padding = 0; | ||
2953 | return z; | 2956 | return z; |
2954 | } | 2957 | } |
2955 | return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); | 2958 | return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); |
@@ -3015,6 +3018,7 @@ floatx80 floatx80_div( struct roundingData *roundData, floatx80 a, floatx80 b ) | |||
3015 | roundData->exception |= float_flag_invalid; | 3018 | roundData->exception |= float_flag_invalid; |
3016 | z.low = floatx80_default_nan_low; | 3019 | z.low = floatx80_default_nan_low; |
3017 | z.high = floatx80_default_nan_high; | 3020 | z.high = floatx80_default_nan_high; |
3021 | z.__padding = 0; | ||
3018 | return z; | 3022 | return z; |
3019 | } | 3023 | } |
3020 | roundData->exception |= float_flag_divbyzero; | 3024 | roundData->exception |= float_flag_divbyzero; |
@@ -3093,6 +3097,7 @@ floatx80 floatx80_rem( struct roundingData *roundData, floatx80 a, floatx80 b ) | |||
3093 | roundData->exception |= float_flag_invalid; | 3097 | roundData->exception |= float_flag_invalid; |
3094 | z.low = floatx80_default_nan_low; | 3098 | z.low = floatx80_default_nan_low; |
3095 | z.high = floatx80_default_nan_high; | 3099 | z.high = floatx80_default_nan_high; |
3100 | z.__padding = 0; | ||
3096 | return z; | 3101 | return z; |
3097 | } | 3102 | } |
3098 | normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); | 3103 | normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); |
@@ -3184,6 +3189,7 @@ floatx80 floatx80_sqrt( struct roundingData *roundData, floatx80 a ) | |||
3184 | roundData->exception |= float_flag_invalid; | 3189 | roundData->exception |= float_flag_invalid; |
3185 | z.low = floatx80_default_nan_low; | 3190 | z.low = floatx80_default_nan_low; |
3186 | z.high = floatx80_default_nan_high; | 3191 | z.high = floatx80_default_nan_high; |
3192 | z.__padding = 0; | ||
3187 | return z; | 3193 | return z; |
3188 | } | 3194 | } |
3189 | if ( aExp == 0 ) { | 3195 | if ( aExp == 0 ) { |
diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h index 14151700b6b2..978c699673c6 100644 --- a/arch/arm/nwfpe/softfloat.h +++ b/arch/arm/nwfpe/softfloat.h | |||
@@ -51,11 +51,17 @@ input or output the `floatx80' type will be defined. | |||
51 | Software IEC/IEEE floating-point types. | 51 | Software IEC/IEEE floating-point types. |
52 | ------------------------------------------------------------------------------- | 52 | ------------------------------------------------------------------------------- |
53 | */ | 53 | */ |
54 | typedef unsigned long int float32; | 54 | typedef u32 float32; |
55 | typedef unsigned long long float64; | 55 | typedef u64 float64; |
56 | typedef struct { | 56 | typedef struct { |
57 | unsigned short high; | 57 | #ifdef __ARMEB__ |
58 | unsigned long long low; | 58 | u16 __padding; |
59 | u16 high; | ||
60 | #else | ||
61 | u16 high; | ||
62 | u16 __padding; | ||
63 | #endif | ||
64 | u64 low; | ||
59 | } floatx80; | 65 | } floatx80; |
60 | 66 | ||
61 | /* | 67 | /* |
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index caa9f7711343..871366b83b3f 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -377,10 +377,9 @@ acpi_cpufreq_cpu_init ( | |||
377 | arg0.buffer.length = 12; | 377 | arg0.buffer.length = 12; |
378 | arg0.buffer.pointer = (u8 *) arg0_buf; | 378 | arg0.buffer.pointer = (u8 *) arg0_buf; |
379 | 379 | ||
380 | data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); | 380 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); |
381 | if (!data) | 381 | if (!data) |
382 | return (-ENOMEM); | 382 | return (-ENOMEM); |
383 | memset(data, 0, sizeof(struct cpufreq_acpi_io)); | ||
384 | 383 | ||
385 | acpi_io_data[cpu] = data; | 384 | acpi_io_data[cpu] = data; |
386 | 385 | ||
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c index 73a5dc5b26b8..edcd626001da 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -171,10 +171,9 @@ static int get_ranges (unsigned char *pst) | |||
171 | unsigned int speed; | 171 | unsigned int speed; |
172 | u8 fid, vid; | 172 | u8 fid, vid; |
173 | 173 | ||
174 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); | 174 | powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); |
175 | if (!powernow_table) | 175 | if (!powernow_table) |
176 | return -ENOMEM; | 176 | return -ENOMEM; |
177 | memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1))); | ||
178 | 177 | ||
179 | for (j=0 ; j < number_scales; j++) { | 178 | for (j=0 ; j < number_scales; j++) { |
180 | fid = *pst++; | 179 | fid = *pst++; |
@@ -305,16 +304,13 @@ static int powernow_acpi_init(void) | |||
305 | goto err0; | 304 | goto err0; |
306 | } | 305 | } |
307 | 306 | ||
308 | acpi_processor_perf = kmalloc(sizeof(struct acpi_processor_performance), | 307 | acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), |
309 | GFP_KERNEL); | 308 | GFP_KERNEL); |
310 | |||
311 | if (!acpi_processor_perf) { | 309 | if (!acpi_processor_perf) { |
312 | retval = -ENOMEM; | 310 | retval = -ENOMEM; |
313 | goto err0; | 311 | goto err0; |
314 | } | 312 | } |
315 | 313 | ||
316 | memset(acpi_processor_perf, 0, sizeof(struct acpi_processor_performance)); | ||
317 | |||
318 | if (acpi_processor_register_performance(acpi_processor_perf, 0)) { | 314 | if (acpi_processor_register_performance(acpi_processor_perf, 0)) { |
319 | retval = -EIO; | 315 | retval = -EIO; |
320 | goto err1; | 316 | goto err1; |
@@ -337,14 +333,12 @@ static int powernow_acpi_init(void) | |||
337 | goto err2; | 333 | goto err2; |
338 | } | 334 | } |
339 | 335 | ||
340 | powernow_table = kmalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL); | 336 | powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL); |
341 | if (!powernow_table) { | 337 | if (!powernow_table) { |
342 | retval = -ENOMEM; | 338 | retval = -ENOMEM; |
343 | goto err2; | 339 | goto err2; |
344 | } | 340 | } |
345 | 341 | ||
346 | memset(powernow_table, 0, ((number_scales + 1) * sizeof(struct cpufreq_frequency_table))); | ||
347 | |||
348 | pc.val = (unsigned long) acpi_processor_perf->states[0].control; | 342 | pc.val = (unsigned long) acpi_processor_perf->states[0].control; |
349 | for (i = 0; i < number_scales; i++) { | 343 | for (i = 0; i < number_scales; i++) { |
350 | u8 fid, vid; | 344 | u8 fid, vid; |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 2d5c9adba0cd..68a1fc87f4ca 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -462,7 +462,6 @@ static int check_supported_cpu(unsigned int cpu) | |||
462 | 462 | ||
463 | oldmask = current->cpus_allowed; | 463 | oldmask = current->cpus_allowed; |
464 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 464 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
465 | schedule(); | ||
466 | 465 | ||
467 | if (smp_processor_id() != cpu) { | 466 | if (smp_processor_id() != cpu) { |
468 | printk(KERN_ERR "limiting to cpu %u failed\n", cpu); | 467 | printk(KERN_ERR "limiting to cpu %u failed\n", cpu); |
@@ -497,9 +496,7 @@ static int check_supported_cpu(unsigned int cpu) | |||
497 | 496 | ||
498 | out: | 497 | out: |
499 | set_cpus_allowed(current, oldmask); | 498 | set_cpus_allowed(current, oldmask); |
500 | schedule(); | ||
501 | return rc; | 499 | return rc; |
502 | |||
503 | } | 500 | } |
504 | 501 | ||
505 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) | 502 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) |
@@ -913,7 +910,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
913 | /* only run on specific CPU from here on */ | 910 | /* only run on specific CPU from here on */ |
914 | oldmask = current->cpus_allowed; | 911 | oldmask = current->cpus_allowed; |
915 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | 912 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); |
916 | schedule(); | ||
917 | 913 | ||
918 | if (smp_processor_id() != pol->cpu) { | 914 | if (smp_processor_id() != pol->cpu) { |
919 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); | 915 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); |
@@ -968,8 +964,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
968 | 964 | ||
969 | err_out: | 965 | err_out: |
970 | set_cpus_allowed(current, oldmask); | 966 | set_cpus_allowed(current, oldmask); |
971 | schedule(); | ||
972 | |||
973 | return ret; | 967 | return ret; |
974 | } | 968 | } |
975 | 969 | ||
@@ -991,12 +985,11 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
991 | if (!check_supported_cpu(pol->cpu)) | 985 | if (!check_supported_cpu(pol->cpu)) |
992 | return -ENODEV; | 986 | return -ENODEV; |
993 | 987 | ||
994 | data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); | 988 | data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); |
995 | if (!data) { | 989 | if (!data) { |
996 | printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); | 990 | printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); |
997 | return -ENOMEM; | 991 | return -ENOMEM; |
998 | } | 992 | } |
999 | memset(data,0,sizeof(struct powernow_k8_data)); | ||
1000 | 993 | ||
1001 | data->cpu = pol->cpu; | 994 | data->cpu = pol->cpu; |
1002 | 995 | ||
@@ -1026,7 +1019,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1026 | /* only run on specific CPU from here on */ | 1019 | /* only run on specific CPU from here on */ |
1027 | oldmask = current->cpus_allowed; | 1020 | oldmask = current->cpus_allowed; |
1028 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | 1021 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); |
1029 | schedule(); | ||
1030 | 1022 | ||
1031 | if (smp_processor_id() != pol->cpu) { | 1023 | if (smp_processor_id() != pol->cpu) { |
1032 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); | 1024 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); |
@@ -1045,7 +1037,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1045 | 1037 | ||
1046 | /* run on any CPU again */ | 1038 | /* run on any CPU again */ |
1047 | set_cpus_allowed(current, oldmask); | 1039 | set_cpus_allowed(current, oldmask); |
1048 | schedule(); | ||
1049 | 1040 | ||
1050 | pol->governor = CPUFREQ_DEFAULT_GOVERNOR; | 1041 | pol->governor = CPUFREQ_DEFAULT_GOVERNOR; |
1051 | pol->cpus = cpu_core_map[pol->cpu]; | 1042 | pol->cpus = cpu_core_map[pol->cpu]; |
@@ -1080,7 +1071,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1080 | 1071 | ||
1081 | err_out: | 1072 | err_out: |
1082 | set_cpus_allowed(current, oldmask); | 1073 | set_cpus_allowed(current, oldmask); |
1083 | schedule(); | ||
1084 | powernow_k8_cpu_exit_acpi(data); | 1074 | powernow_k8_cpu_exit_acpi(data); |
1085 | 1075 | ||
1086 | kfree(data); | 1076 | kfree(data); |
@@ -1116,17 +1106,14 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1116 | set_cpus_allowed(current, oldmask); | 1106 | set_cpus_allowed(current, oldmask); |
1117 | return 0; | 1107 | return 0; |
1118 | } | 1108 | } |
1119 | preempt_disable(); | 1109 | |
1120 | |||
1121 | if (query_current_values_with_pending_wait(data)) | 1110 | if (query_current_values_with_pending_wait(data)) |
1122 | goto out; | 1111 | goto out; |
1123 | 1112 | ||
1124 | khz = find_khz_freq_from_fid(data->currfid); | 1113 | khz = find_khz_freq_from_fid(data->currfid); |
1125 | 1114 | ||
1126 | out: | 1115 | out: |
1127 | preempt_enable_no_resched(); | ||
1128 | set_cpus_allowed(current, oldmask); | 1116 | set_cpus_allowed(current, oldmask); |
1129 | |||
1130 | return khz; | 1117 | return khz; |
1131 | } | 1118 | } |
1132 | 1119 | ||
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index 0ea010a7afb1..edb9873e27e3 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -423,12 +423,11 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | centrino_model[cpu] = kmalloc(sizeof(struct cpu_model), GFP_KERNEL); | 426 | centrino_model[cpu] = kzalloc(sizeof(struct cpu_model), GFP_KERNEL); |
427 | if (!centrino_model[cpu]) { | 427 | if (!centrino_model[cpu]) { |
428 | result = -ENOMEM; | 428 | result = -ENOMEM; |
429 | goto err_unreg; | 429 | goto err_unreg; |
430 | } | 430 | } |
431 | memset(centrino_model[cpu], 0, sizeof(struct cpu_model)); | ||
432 | 431 | ||
433 | centrino_model[cpu]->model_name=NULL; | 432 | centrino_model[cpu]->model_name=NULL; |
434 | centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000; | 433 | centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000; |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9f2093c1f44b..d4de8a4814be 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -191,6 +191,7 @@ config IOSAPIC | |||
191 | 191 | ||
192 | config IA64_SGI_SN_XP | 192 | config IA64_SGI_SN_XP |
193 | tristate "Support communication between SGI SSIs" | 193 | tristate "Support communication between SGI SSIs" |
194 | depends on IA64_GENERIC || IA64_SGI_SN2 | ||
194 | select IA64_UNCACHED_ALLOCATOR | 195 | select IA64_UNCACHED_ALLOCATOR |
195 | help | 196 | help |
196 | An SGI machine can be divided into multiple Single System | 197 | An SGI machine can be divided into multiple Single System |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6537445dac0e..3cfb8be3ff6d 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -201,6 +201,14 @@ config SUN_OPENPROMFS | |||
201 | Only choose N if you know in advance that you will not need to modify | 201 | Only choose N if you know in advance that you will not need to modify |
202 | OpenPROM settings on the running system. | 202 | OpenPROM settings on the running system. |
203 | 203 | ||
204 | config SPARC_LED | ||
205 | tristate "Sun4m LED driver" | ||
206 | help | ||
207 | This driver toggles the front-panel LED on sun4m systems | ||
208 | in a user-specifyable manner. It's state can be probed | ||
209 | by reading /proc/led and it's blinking mode can be changed | ||
210 | via writes to /proc/led | ||
211 | |||
204 | source "fs/Kconfig.binfmt" | 212 | source "fs/Kconfig.binfmt" |
205 | 213 | ||
206 | config SUNOS_EMUL | 214 | config SUNOS_EMUL |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 3d22ba2af01c..1b83e21841b5 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_SUN_AUXIO) += auxio.o | |||
21 | obj-$(CONFIG_PCI) += ebus.o | 21 | obj-$(CONFIG_PCI) += ebus.o |
22 | obj-$(CONFIG_SUN_PM) += apc.o pmc.o | 22 | obj-$(CONFIG_SUN_PM) += apc.o pmc.o |
23 | obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o | 23 | obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o |
24 | obj-$(CONFIG_SPARC_LED) += led.o | ||
24 | 25 | ||
25 | ifdef CONFIG_SUNOS_EMUL | 26 | ifdef CONFIG_SUNOS_EMUL |
26 | obj-y += sys_sunos.o sunos_ioctl.o | 27 | obj-y += sys_sunos.o sunos_ioctl.o |
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c new file mode 100644 index 000000000000..2a3afca453c9 --- /dev/null +++ b/arch/sparc/kernel/led.c | |||
@@ -0,0 +1,139 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/string.h> | ||
6 | |||
7 | #include <asm/auxio.h> | ||
8 | |||
9 | #define LED_MAX_LENGTH 8 /* maximum chars written to proc file */ | ||
10 | |||
11 | static inline void led_toggle(void) | ||
12 | { | ||
13 | unsigned char val = get_auxio(); | ||
14 | unsigned char on, off; | ||
15 | |||
16 | if (val & AUXIO_LED) { | ||
17 | on = 0; | ||
18 | off = AUXIO_LED; | ||
19 | } else { | ||
20 | on = AUXIO_LED; | ||
21 | off = 0; | ||
22 | } | ||
23 | |||
24 | set_auxio(on, off); | ||
25 | } | ||
26 | |||
27 | static struct timer_list led_blink_timer; | ||
28 | |||
29 | static void led_blink(unsigned long timeout) | ||
30 | { | ||
31 | led_toggle(); | ||
32 | |||
33 | /* reschedule */ | ||
34 | if (!timeout) { /* blink according to load */ | ||
35 | led_blink_timer.expires = jiffies + | ||
36 | ((1 + (avenrun[0] >> FSHIFT)) * HZ); | ||
37 | led_blink_timer.data = 0; | ||
38 | } else { /* blink at user specified interval */ | ||
39 | led_blink_timer.expires = jiffies + (timeout * HZ); | ||
40 | led_blink_timer.data = timeout; | ||
41 | } | ||
42 | add_timer(&led_blink_timer); | ||
43 | } | ||
44 | |||
45 | static int led_read_proc(char *buf, char **start, off_t offset, int count, | ||
46 | int *eof, void *data) | ||
47 | { | ||
48 | int len = 0; | ||
49 | |||
50 | if (get_auxio() & AUXIO_LED) | ||
51 | len = sprintf(buf, "on\n"); | ||
52 | else | ||
53 | len = sprintf(buf, "off\n"); | ||
54 | |||
55 | return len; | ||
56 | } | ||
57 | |||
58 | static int led_write_proc(struct file *file, const char *buffer, | ||
59 | unsigned long count, void *data) | ||
60 | { | ||
61 | char *buf = NULL; | ||
62 | |||
63 | if (count > LED_MAX_LENGTH) | ||
64 | count = LED_MAX_LENGTH; | ||
65 | |||
66 | buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL); | ||
67 | if (!buf) | ||
68 | return -ENOMEM; | ||
69 | |||
70 | if (copy_from_user(buf, buffer, count)) { | ||
71 | kfree(buf); | ||
72 | return -EFAULT; | ||
73 | } | ||
74 | |||
75 | buf[count] = '\0'; | ||
76 | |||
77 | /* work around \n when echo'ing into proc */ | ||
78 | if (buf[count - 1] == '\n') | ||
79 | buf[count - 1] = '\0'; | ||
80 | |||
81 | /* before we change anything we want to stop any running timers, | ||
82 | * otherwise calls such as on will have no persistent effect | ||
83 | */ | ||
84 | del_timer_sync(&led_blink_timer); | ||
85 | |||
86 | if (!strcmp(buf, "on")) { | ||
87 | auxio_set_led(AUXIO_LED_ON); | ||
88 | } else if (!strcmp(buf, "toggle")) { | ||
89 | led_toggle(); | ||
90 | } else if ((*buf > '0') && (*buf <= '9')) { | ||
91 | led_blink(simple_strtoul(buf, NULL, 10)); | ||
92 | } else if (!strcmp(buf, "load")) { | ||
93 | led_blink(0); | ||
94 | } else { | ||
95 | auxio_set_led(AUXIO_LED_OFF); | ||
96 | } | ||
97 | |||
98 | kfree(buf); | ||
99 | |||
100 | return count; | ||
101 | } | ||
102 | |||
103 | static struct proc_dir_entry *led; | ||
104 | |||
105 | #define LED_VERSION "0.1" | ||
106 | |||
107 | static int __init led_init(void) | ||
108 | { | ||
109 | init_timer(&led_blink_timer); | ||
110 | led_blink_timer.function = led_blink; | ||
111 | |||
112 | led = create_proc_entry("led", 0, NULL); | ||
113 | if (!led) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | led->read_proc = led_read_proc; /* reader function */ | ||
117 | led->write_proc = led_write_proc; /* writer function */ | ||
118 | led->owner = THIS_MODULE; | ||
119 | |||
120 | printk(KERN_INFO | ||
121 | "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n", | ||
122 | LED_VERSION); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static void __exit led_exit(void) | ||
128 | { | ||
129 | remove_proc_entry("led", NULL); | ||
130 | del_timer_sync(&led_blink_timer); | ||
131 | } | ||
132 | |||
133 | module_init(led_init); | ||
134 | module_exit(led_exit); | ||
135 | |||
136 | MODULE_AUTHOR("Lars Kotthoff <metalhead@metalhead.ws>"); | ||
137 | MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems."); | ||
138 | MODULE_LICENSE("GPL"); | ||
139 | MODULE_VERSION(LED_VERSION); | ||
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c index df1c0b31a930..a6ba3d26222c 100644 --- a/arch/sparc/kernel/sunos_ioctl.c +++ b/arch/sparc/kernel/sunos_ioctl.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/smp_lock.h> | 23 | #include <linux/smp_lock.h> |
24 | #include <linux/syscalls.h> | 24 | #include <linux/syscalls.h> |
25 | #include <linux/file.h> | 25 | #include <linux/file.h> |
26 | #include <asm/kbio.h> | ||
27 | 26 | ||
28 | #if 0 | 27 | #if 0 |
29 | extern char sunkbd_type; | 28 | extern char sunkbd_type; |
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c index e6a00325075a..92e26304de90 100644 --- a/arch/sparc64/kernel/ioctl32.c +++ b/arch/sparc64/kernel/ioctl32.c | |||
@@ -11,33 +11,14 @@ | |||
11 | 11 | ||
12 | #define INCLUDES | 12 | #define INCLUDES |
13 | #include "compat_ioctl.c" | 13 | #include "compat_ioctl.c" |
14 | #include <linux/ncp_fs.h> | ||
15 | #include <linux/syscalls.h> | 14 | #include <linux/syscalls.h> |
16 | #include <asm/fbio.h> | 15 | #include <asm/fbio.h> |
17 | #include <asm/kbio.h> | ||
18 | #include <asm/vuid_event.h> | ||
19 | #include <asm/envctrl.h> | ||
20 | #include <asm/display7seg.h> | ||
21 | #include <asm/openpromio.h> | ||
22 | #include <asm/audioio.h> | ||
23 | #include <asm/watchdog.h> | ||
24 | 16 | ||
25 | /* Use this to get at 32-bit user passed pointers. | 17 | /* Use this to get at 32-bit user passed pointers. |
26 | * See sys_sparc32.c for description about it. | 18 | * See sys_sparc32.c for description about it. |
27 | */ | 19 | */ |
28 | #define A(__x) compat_ptr(__x) | 20 | #define A(__x) compat_ptr(__x) |
29 | 21 | ||
30 | static __inline__ void *alloc_user_space(long len) | ||
31 | { | ||
32 | struct pt_regs *regs = current_thread_info()->kregs; | ||
33 | unsigned long usp = regs->u_regs[UREG_I6]; | ||
34 | |||
35 | if (!(test_thread_flag(TIF_32BIT))) | ||
36 | usp += STACK_BIAS; | ||
37 | |||
38 | return (void *) (usp - len); | ||
39 | } | ||
40 | |||
41 | #define CODE | 22 | #define CODE |
42 | #include "compat_ioctl.c" | 23 | #include "compat_ioctl.c" |
43 | 24 | ||
@@ -111,357 +92,6 @@ static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg) | |||
111 | return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p); | 92 | return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p); |
112 | } | 93 | } |
113 | 94 | ||
114 | #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) | ||
115 | /* This really belongs in include/linux/drm.h -DaveM */ | ||
116 | #include "../../../drivers/char/drm/drm.h" | ||
117 | |||
118 | typedef struct drm32_version { | ||
119 | int version_major; /* Major version */ | ||
120 | int version_minor; /* Minor version */ | ||
121 | int version_patchlevel;/* Patch level */ | ||
122 | int name_len; /* Length of name buffer */ | ||
123 | u32 name; /* Name of driver */ | ||
124 | int date_len; /* Length of date buffer */ | ||
125 | u32 date; /* User-space buffer to hold date */ | ||
126 | int desc_len; /* Length of desc buffer */ | ||
127 | u32 desc; /* User-space buffer to hold desc */ | ||
128 | } drm32_version_t; | ||
129 | #define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t) | ||
130 | |||
131 | static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
132 | { | ||
133 | drm32_version_t __user *uversion = (drm32_version_t __user *)arg; | ||
134 | drm_version_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
135 | compat_uptr_t addr; | ||
136 | int n; | ||
137 | int ret; | ||
138 | |||
139 | if (clear_user(p, 3 * sizeof(int)) || | ||
140 | get_user(n, &uversion->name_len) || | ||
141 | put_user(n, &p->name_len) || | ||
142 | get_user(addr, &uversion->name) || | ||
143 | put_user(compat_ptr(addr), &p->name) || | ||
144 | get_user(n, &uversion->date_len) || | ||
145 | put_user(n, &p->date_len) || | ||
146 | get_user(addr, &uversion->date) || | ||
147 | put_user(compat_ptr(addr), &p->date) || | ||
148 | get_user(n, &uversion->desc_len) || | ||
149 | put_user(n, &p->desc_len) || | ||
150 | get_user(addr, &uversion->desc) || | ||
151 | put_user(compat_ptr(addr), &p->desc)) | ||
152 | return -EFAULT; | ||
153 | |||
154 | ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p); | ||
155 | if (ret) | ||
156 | return ret; | ||
157 | |||
158 | if (copy_in_user(uversion, p, 3 * sizeof(int)) || | ||
159 | get_user(n, &p->name_len) || | ||
160 | put_user(n, &uversion->name_len) || | ||
161 | get_user(n, &p->date_len) || | ||
162 | put_user(n, &uversion->date_len) || | ||
163 | get_user(n, &p->desc_len) || | ||
164 | put_user(n, &uversion->desc_len)) | ||
165 | return -EFAULT; | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | typedef struct drm32_unique { | ||
171 | int unique_len; /* Length of unique */ | ||
172 | u32 unique; /* Unique name for driver instantiation */ | ||
173 | } drm32_unique_t; | ||
174 | #define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t) | ||
175 | #define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t) | ||
176 | |||
177 | static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
178 | { | ||
179 | drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg; | ||
180 | drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
181 | compat_uptr_t addr; | ||
182 | int n; | ||
183 | int ret; | ||
184 | |||
185 | if (get_user(n, &uarg->unique_len) || | ||
186 | put_user(n, &p->unique_len) || | ||
187 | get_user(addr, &uarg->unique) || | ||
188 | put_user(compat_ptr(addr), &p->unique)) | ||
189 | return -EFAULT; | ||
190 | |||
191 | if (cmd == DRM32_IOCTL_GET_UNIQUE) | ||
192 | ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p); | ||
193 | else | ||
194 | ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p); | ||
195 | |||
196 | if (ret) | ||
197 | return ret; | ||
198 | |||
199 | if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len)) | ||
200 | return -EFAULT; | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | typedef struct drm32_map { | ||
206 | u32 offset; /* Requested physical address (0 for SAREA)*/ | ||
207 | u32 size; /* Requested physical size (bytes) */ | ||
208 | drm_map_type_t type; /* Type of memory to map */ | ||
209 | drm_map_flags_t flags; /* Flags */ | ||
210 | u32 handle; /* User-space: "Handle" to pass to mmap */ | ||
211 | /* Kernel-space: kernel-virtual address */ | ||
212 | int mtrr; /* MTRR slot used */ | ||
213 | /* Private data */ | ||
214 | } drm32_map_t; | ||
215 | #define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t) | ||
216 | |||
217 | static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
218 | { | ||
219 | drm32_map_t __user *uarg = (drm32_map_t __user *) arg; | ||
220 | drm_map_t karg; | ||
221 | mm_segment_t old_fs; | ||
222 | u32 tmp; | ||
223 | int ret; | ||
224 | |||
225 | ret = get_user(karg.offset, &uarg->offset); | ||
226 | ret |= get_user(karg.size, &uarg->size); | ||
227 | ret |= get_user(karg.type, &uarg->type); | ||
228 | ret |= get_user(karg.flags, &uarg->flags); | ||
229 | ret |= get_user(tmp, &uarg->handle); | ||
230 | ret |= get_user(karg.mtrr, &uarg->mtrr); | ||
231 | if (ret) | ||
232 | return -EFAULT; | ||
233 | |||
234 | karg.handle = (void *) (unsigned long) tmp; | ||
235 | |||
236 | old_fs = get_fs(); | ||
237 | set_fs(KERNEL_DS); | ||
238 | ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg); | ||
239 | set_fs(old_fs); | ||
240 | |||
241 | if (!ret) { | ||
242 | ret = put_user(karg.offset, &uarg->offset); | ||
243 | ret |= put_user(karg.size, &uarg->size); | ||
244 | ret |= put_user(karg.type, &uarg->type); | ||
245 | ret |= put_user(karg.flags, &uarg->flags); | ||
246 | tmp = (u32) (long)karg.handle; | ||
247 | ret |= put_user(tmp, &uarg->handle); | ||
248 | ret |= put_user(karg.mtrr, &uarg->mtrr); | ||
249 | if (ret) | ||
250 | ret = -EFAULT; | ||
251 | } | ||
252 | |||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | typedef struct drm32_buf_info { | ||
257 | int count; /* Entries in list */ | ||
258 | u32 list; /* (drm_buf_desc_t *) */ | ||
259 | } drm32_buf_info_t; | ||
260 | #define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t) | ||
261 | |||
262 | static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
263 | { | ||
264 | drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg; | ||
265 | drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
266 | compat_uptr_t addr; | ||
267 | int n; | ||
268 | int ret; | ||
269 | |||
270 | if (get_user(n, &uarg->count) || put_user(n, &p->count) || | ||
271 | get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list)) | ||
272 | return -EFAULT; | ||
273 | |||
274 | ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p); | ||
275 | if (ret) | ||
276 | return ret; | ||
277 | |||
278 | if (get_user(n, &p->count) || put_user(n, &uarg->count)) | ||
279 | return -EFAULT; | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | typedef struct drm32_buf_free { | ||
285 | int count; | ||
286 | u32 list; /* (int *) */ | ||
287 | } drm32_buf_free_t; | ||
288 | #define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t) | ||
289 | |||
290 | static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
291 | { | ||
292 | drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg; | ||
293 | drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
294 | compat_uptr_t addr; | ||
295 | int n; | ||
296 | |||
297 | if (get_user(n, &uarg->count) || put_user(n, &p->count) || | ||
298 | get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list)) | ||
299 | return -EFAULT; | ||
300 | |||
301 | return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p); | ||
302 | } | ||
303 | |||
304 | typedef struct drm32_buf_pub { | ||
305 | int idx; /* Index into master buflist */ | ||
306 | int total; /* Buffer size */ | ||
307 | int used; /* Amount of buffer in use (for DMA) */ | ||
308 | u32 address; /* Address of buffer (void *) */ | ||
309 | } drm32_buf_pub_t; | ||
310 | |||
311 | typedef struct drm32_buf_map { | ||
312 | int count; /* Length of buflist */ | ||
313 | u32 virtual; /* Mmaped area in user-virtual (void *) */ | ||
314 | u32 list; /* Buffer information (drm_buf_pub_t *) */ | ||
315 | } drm32_buf_map_t; | ||
316 | #define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t) | ||
317 | |||
318 | static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
319 | { | ||
320 | drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg; | ||
321 | drm32_buf_pub_t __user *ulist; | ||
322 | drm_buf_map_t __user *arg64; | ||
323 | drm_buf_pub_t __user *list; | ||
324 | int orig_count, ret, i; | ||
325 | int n; | ||
326 | compat_uptr_t addr; | ||
327 | |||
328 | if (get_user(orig_count, &uarg->count)) | ||
329 | return -EFAULT; | ||
330 | |||
331 | arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) + | ||
332 | (size_t)orig_count * sizeof(drm_buf_pub_t)); | ||
333 | list = (void __user *)(arg64 + 1); | ||
334 | |||
335 | if (put_user(orig_count, &arg64->count) || | ||
336 | put_user(list, &arg64->list) || | ||
337 | get_user(addr, &uarg->virtual) || | ||
338 | put_user(compat_ptr(addr), &arg64->virtual) || | ||
339 | get_user(addr, &uarg->list)) | ||
340 | return -EFAULT; | ||
341 | |||
342 | ulist = compat_ptr(addr); | ||
343 | |||
344 | for (i = 0; i < orig_count; i++) { | ||
345 | if (get_user(n, &ulist[i].idx) || | ||
346 | put_user(n, &list[i].idx) || | ||
347 | get_user(n, &ulist[i].total) || | ||
348 | put_user(n, &list[i].total) || | ||
349 | get_user(n, &ulist[i].used) || | ||
350 | put_user(n, &list[i].used) || | ||
351 | get_user(addr, &ulist[i].address) || | ||
352 | put_user(compat_ptr(addr), &list[i].address)) | ||
353 | return -EFAULT; | ||
354 | } | ||
355 | |||
356 | ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64); | ||
357 | if (ret) | ||
358 | return ret; | ||
359 | |||
360 | for (i = 0; i < orig_count; i++) { | ||
361 | void __user *p; | ||
362 | if (get_user(n, &list[i].idx) || | ||
363 | put_user(n, &ulist[i].idx) || | ||
364 | get_user(n, &list[i].total) || | ||
365 | put_user(n, &ulist[i].total) || | ||
366 | get_user(n, &list[i].used) || | ||
367 | put_user(n, &ulist[i].used) || | ||
368 | get_user(p, &list[i].address) || | ||
369 | put_user((unsigned long)p, &ulist[i].address)) | ||
370 | return -EFAULT; | ||
371 | } | ||
372 | |||
373 | if (get_user(n, &arg64->count) || put_user(n, &uarg->count)) | ||
374 | return -EFAULT; | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | typedef struct drm32_dma { | ||
380 | /* Indices here refer to the offset into | ||
381 | buflist in drm_buf_get_t. */ | ||
382 | int context; /* Context handle */ | ||
383 | int send_count; /* Number of buffers to send */ | ||
384 | u32 send_indices; /* List of handles to buffers (int *) */ | ||
385 | u32 send_sizes; /* Lengths of data to send (int *) */ | ||
386 | drm_dma_flags_t flags; /* Flags */ | ||
387 | int request_count; /* Number of buffers requested */ | ||
388 | int request_size; /* Desired size for buffers */ | ||
389 | u32 request_indices; /* Buffer information (int *) */ | ||
390 | u32 request_sizes; /* (int *) */ | ||
391 | int granted_count; /* Number of buffers granted */ | ||
392 | } drm32_dma_t; | ||
393 | #define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t) | ||
394 | |||
395 | /* RED PEN The DRM layer blindly dereferences the send/request | ||
396 | * index/size arrays even though they are userland | ||
397 | * pointers. -DaveM | ||
398 | */ | ||
399 | static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
400 | { | ||
401 | drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg; | ||
402 | drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
403 | compat_uptr_t addr; | ||
404 | int ret; | ||
405 | |||
406 | if (copy_in_user(p, uarg, 2 * sizeof(int)) || | ||
407 | get_user(addr, &uarg->send_indices) || | ||
408 | put_user(compat_ptr(addr), &p->send_indices) || | ||
409 | get_user(addr, &uarg->send_sizes) || | ||
410 | put_user(compat_ptr(addr), &p->send_sizes) || | ||
411 | copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) || | ||
412 | copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))|| | ||
413 | copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) || | ||
414 | get_user(addr, &uarg->request_indices) || | ||
415 | put_user(compat_ptr(addr), &p->request_indices) || | ||
416 | get_user(addr, &uarg->request_sizes) || | ||
417 | put_user(compat_ptr(addr), &p->request_sizes) || | ||
418 | copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int))) | ||
419 | return -EFAULT; | ||
420 | |||
421 | ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p); | ||
422 | if (ret) | ||
423 | return ret; | ||
424 | |||
425 | if (copy_in_user(uarg, p, 2 * sizeof(int)) || | ||
426 | copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) || | ||
427 | copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))|| | ||
428 | copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) || | ||
429 | copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int))) | ||
430 | return -EFAULT; | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | typedef struct drm32_ctx_res { | ||
436 | int count; | ||
437 | u32 contexts; /* (drm_ctx_t *) */ | ||
438 | } drm32_ctx_res_t; | ||
439 | #define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t) | ||
440 | |||
441 | static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
442 | { | ||
443 | drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg; | ||
444 | drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p)); | ||
445 | compat_uptr_t addr; | ||
446 | int ret; | ||
447 | |||
448 | if (copy_in_user(p, uarg, sizeof(int)) || | ||
449 | get_user(addr, &uarg->contexts) || | ||
450 | put_user(compat_ptr(addr), &p->contexts)) | ||
451 | return -EFAULT; | ||
452 | |||
453 | ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p); | ||
454 | if (ret) | ||
455 | return ret; | ||
456 | |||
457 | if (copy_in_user(uarg, p, sizeof(int))) | ||
458 | return -EFAULT; | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | #endif | ||
464 | |||
465 | typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *); | 95 | typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *); |
466 | 96 | ||
467 | #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl) | 97 | #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl) |
@@ -485,103 +115,14 @@ COMPATIBLE_IOCTL(FBIOSCURPOS) | |||
485 | COMPATIBLE_IOCTL(FBIOGCURPOS) | 115 | COMPATIBLE_IOCTL(FBIOGCURPOS) |
486 | COMPATIBLE_IOCTL(FBIOGCURMAX) | 116 | COMPATIBLE_IOCTL(FBIOGCURMAX) |
487 | /* Little k */ | 117 | /* Little k */ |
488 | COMPATIBLE_IOCTL(KIOCTYPE) | ||
489 | COMPATIBLE_IOCTL(KIOCLAYOUT) | ||
490 | COMPATIBLE_IOCTL(KIOCGTRANS) | ||
491 | COMPATIBLE_IOCTL(KIOCTRANS) | ||
492 | COMPATIBLE_IOCTL(KIOCCMD) | ||
493 | COMPATIBLE_IOCTL(KIOCSDIRECT) | ||
494 | COMPATIBLE_IOCTL(KIOCSLED) | ||
495 | COMPATIBLE_IOCTL(KIOCGLED) | ||
496 | COMPATIBLE_IOCTL(KIOCSRATE) | ||
497 | COMPATIBLE_IOCTL(KIOCGRATE) | ||
498 | COMPATIBLE_IOCTL(VUIDSFORMAT) | ||
499 | COMPATIBLE_IOCTL(VUIDGFORMAT) | ||
500 | /* Little v, the video4linux ioctls */ | 118 | /* Little v, the video4linux ioctls */ |
501 | COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ | 119 | COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ |
502 | COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ | 120 | COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ |
503 | COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE) | ||
504 | COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE) | ||
505 | COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE) | ||
506 | COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS) | ||
507 | COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS) | ||
508 | COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE) | ||
509 | COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE) | ||
510 | COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE) | ||
511 | COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE) | ||
512 | COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS) | ||
513 | /* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */ | ||
514 | COMPATIBLE_IOCTL(D7SIOCWR) | ||
515 | COMPATIBLE_IOCTL(D7SIOCTM) | ||
516 | /* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have | ||
517 | * embedded pointers in the arg which we'd need to clean up... | ||
518 | */ | ||
519 | COMPATIBLE_IOCTL(OPROMGETOPT) | ||
520 | COMPATIBLE_IOCTL(OPROMSETOPT) | ||
521 | COMPATIBLE_IOCTL(OPROMNXTOPT) | ||
522 | COMPATIBLE_IOCTL(OPROMSETOPT2) | ||
523 | COMPATIBLE_IOCTL(OPROMNEXT) | ||
524 | COMPATIBLE_IOCTL(OPROMCHILD) | ||
525 | COMPATIBLE_IOCTL(OPROMGETPROP) | ||
526 | COMPATIBLE_IOCTL(OPROMNXTPROP) | ||
527 | COMPATIBLE_IOCTL(OPROMU2P) | ||
528 | COMPATIBLE_IOCTL(OPROMGETCONS) | ||
529 | COMPATIBLE_IOCTL(OPROMGETFBNAME) | ||
530 | COMPATIBLE_IOCTL(OPROMGETBOOTARGS) | ||
531 | COMPATIBLE_IOCTL(OPROMSETCUR) | ||
532 | COMPATIBLE_IOCTL(OPROMPCI2NODE) | ||
533 | COMPATIBLE_IOCTL(OPROMPATH2NODE) | ||
534 | /* Big L */ | ||
535 | COMPATIBLE_IOCTL(LOOP_SET_STATUS64) | ||
536 | COMPATIBLE_IOCTL(LOOP_GET_STATUS64) | ||
537 | /* Big A */ | ||
538 | COMPATIBLE_IOCTL(AUDIO_GETINFO) | ||
539 | COMPATIBLE_IOCTL(AUDIO_SETINFO) | ||
540 | COMPATIBLE_IOCTL(AUDIO_DRAIN) | ||
541 | COMPATIBLE_IOCTL(AUDIO_GETDEV) | ||
542 | COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS) | ||
543 | COMPATIBLE_IOCTL(AUDIO_FLUSH) | ||
544 | COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI) | ||
545 | #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) | ||
546 | COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC) | ||
547 | COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID) | ||
548 | COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC) | ||
549 | COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK) | ||
550 | COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK) | ||
551 | COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL) | ||
552 | COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS) | ||
553 | COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS) | ||
554 | COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX) | ||
555 | COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX) | ||
556 | COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX) | ||
557 | COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX) | ||
558 | COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX) | ||
559 | COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX) | ||
560 | COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW) | ||
561 | COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW) | ||
562 | COMPATIBLE_IOCTL(DRM_IOCTL_LOCK) | ||
563 | COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK) | ||
564 | COMPATIBLE_IOCTL(DRM_IOCTL_FINISH) | ||
565 | #endif /* DRM */ | ||
566 | COMPATIBLE_IOCTL(WIOCSTART) | ||
567 | COMPATIBLE_IOCTL(WIOCSTOP) | ||
568 | COMPATIBLE_IOCTL(WIOCGSTAT) | ||
569 | /* And these ioctls need translation */ | 121 | /* And these ioctls need translation */ |
570 | /* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */ | 122 | /* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */ |
571 | HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap) | 123 | HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap) |
572 | HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap) | 124 | HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap) |
573 | HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor) | 125 | HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor) |
574 | #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) | ||
575 | HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version) | ||
576 | HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique) | ||
577 | HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique) | ||
578 | HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap) | ||
579 | HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs) | ||
580 | HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs) | ||
581 | HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs) | ||
582 | HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma) | ||
583 | HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx) | ||
584 | #endif /* DRM */ | ||
585 | #if 0 | 126 | #if 0 |
586 | HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl) | 127 | HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl) |
587 | HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl) | 128 | HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl) |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index c1f34237cdf2..bf1849dd9c49 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -154,6 +154,7 @@ int prom_callback(long *args) | |||
154 | pud_t *pudp; | 154 | pud_t *pudp; |
155 | pmd_t *pmdp; | 155 | pmd_t *pmdp; |
156 | pte_t *ptep; | 156 | pte_t *ptep; |
157 | pte_t pte; | ||
157 | 158 | ||
158 | for_each_process(p) { | 159 | for_each_process(p) { |
159 | mm = p->mm; | 160 | mm = p->mm; |
@@ -178,8 +179,9 @@ int prom_callback(long *args) | |||
178 | * being called from inside OBP. | 179 | * being called from inside OBP. |
179 | */ | 180 | */ |
180 | ptep = pte_offset_map(pmdp, va); | 181 | ptep = pte_offset_map(pmdp, va); |
181 | if (pte_present(*ptep)) { | 182 | pte = *ptep; |
182 | tte = pte_val(*ptep); | 183 | if (pte_present(pte)) { |
184 | tte = pte_val(pte); | ||
183 | res = PROM_TRUE; | 185 | res = PROM_TRUE; |
184 | } | 186 | } |
185 | pte_unmap(ptep); | 187 | pte_unmap(ptep); |
@@ -218,6 +220,7 @@ int prom_callback(long *args) | |||
218 | pud_t *pudp; | 220 | pud_t *pudp; |
219 | pmd_t *pmdp; | 221 | pmd_t *pmdp; |
220 | pte_t *ptep; | 222 | pte_t *ptep; |
223 | pte_t pte; | ||
221 | int error; | 224 | int error; |
222 | 225 | ||
223 | if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { | 226 | if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { |
@@ -240,8 +243,9 @@ int prom_callback(long *args) | |||
240 | * being called from inside OBP. | 243 | * being called from inside OBP. |
241 | */ | 244 | */ |
242 | ptep = pte_offset_kernel(pmdp, va); | 245 | ptep = pte_offset_kernel(pmdp, va); |
243 | if (pte_present(*ptep)) { | 246 | pte = *ptep; |
244 | tte = pte_val(*ptep); | 247 | if (pte_present(pte)) { |
248 | tte = pte_val(pte); | ||
245 | res = PROM_TRUE; | 249 | res = PROM_TRUE; |
246 | } | 250 | } |
247 | goto done; | 251 | goto done; |
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index aecccd0df1d1..009a86e5ded4 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c | |||
@@ -863,6 +863,7 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
863 | pud_t *pudp = pud_offset(pgdp, address); | 863 | pud_t *pudp = pud_offset(pgdp, address); |
864 | pmd_t *pmdp = pmd_offset(pudp, address); | 864 | pmd_t *pmdp = pmd_offset(pudp, address); |
865 | pte_t *ptep; | 865 | pte_t *ptep; |
866 | pte_t pte; | ||
866 | 867 | ||
867 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); | 868 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); |
868 | 869 | ||
@@ -873,9 +874,10 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
873 | 874 | ||
874 | preempt_disable(); | 875 | preempt_disable(); |
875 | ptep = pte_offset_map(pmdp, address); | 876 | ptep = pte_offset_map(pmdp, address); |
876 | if (pte_present(*ptep)) { | 877 | pte = *ptep; |
878 | if (pte_present(pte)) { | ||
877 | unsigned long page = (unsigned long) | 879 | unsigned long page = (unsigned long) |
878 | page_address(pte_page(*ptep)); | 880 | page_address(pte_page(pte)); |
879 | 881 | ||
880 | wmb(); | 882 | wmb(); |
881 | __asm__ __volatile__("flush %0 + %1" | 883 | __asm__ __volatile__("flush %0 + %1" |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b137fd63f5e1..5d90ee9aebf1 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -839,43 +839,29 @@ void smp_flush_tlb_all(void) | |||
839 | * questionable (in theory the big win for threads is the massive sharing of | 839 | * questionable (in theory the big win for threads is the massive sharing of |
840 | * address space state across processors). | 840 | * address space state across processors). |
841 | */ | 841 | */ |
842 | |||
843 | /* This currently is only used by the hugetlb arch pre-fault | ||
844 | * hook on UltraSPARC-III+ and later when changing the pagesize | ||
845 | * bits of the context register for an address space. | ||
846 | */ | ||
842 | void smp_flush_tlb_mm(struct mm_struct *mm) | 847 | void smp_flush_tlb_mm(struct mm_struct *mm) |
843 | { | 848 | { |
844 | /* | 849 | u32 ctx = CTX_HWBITS(mm->context); |
845 | * This code is called from two places, dup_mmap and exit_mmap. In the | 850 | int cpu = get_cpu(); |
846 | * former case, we really need a flush. In the later case, the callers | ||
847 | * are single threaded exec_mmap (really need a flush), multithreaded | ||
848 | * exec_mmap case (do not need to flush, since the caller gets a new | ||
849 | * context via activate_mm), and all other callers of mmput() whence | ||
850 | * the flush can be optimized since the associated threads are dead and | ||
851 | * the mm is being torn down (__exit_mm and other mmput callers) or the | ||
852 | * owning thread is dissociating itself from the mm. The | ||
853 | * (atomic_read(&mm->mm_users) == 0) check ensures real work is done | ||
854 | * for single thread exec and dup_mmap cases. An alternate check might | ||
855 | * have been (current->mm != mm). | ||
856 | * Kanoj Sarcar | ||
857 | */ | ||
858 | if (atomic_read(&mm->mm_users) == 0) | ||
859 | return; | ||
860 | |||
861 | { | ||
862 | u32 ctx = CTX_HWBITS(mm->context); | ||
863 | int cpu = get_cpu(); | ||
864 | 851 | ||
865 | if (atomic_read(&mm->mm_users) == 1) { | 852 | if (atomic_read(&mm->mm_users) == 1) { |
866 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | 853 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); |
867 | goto local_flush_and_out; | 854 | goto local_flush_and_out; |
868 | } | 855 | } |
869 | 856 | ||
870 | smp_cross_call_masked(&xcall_flush_tlb_mm, | 857 | smp_cross_call_masked(&xcall_flush_tlb_mm, |
871 | ctx, 0, 0, | 858 | ctx, 0, 0, |
872 | mm->cpu_vm_mask); | 859 | mm->cpu_vm_mask); |
873 | 860 | ||
874 | local_flush_and_out: | 861 | local_flush_and_out: |
875 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | 862 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); |
876 | 863 | ||
877 | put_cpu(); | 864 | put_cpu(); |
878 | } | ||
879 | } | 865 | } |
880 | 866 | ||
881 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | 867 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
@@ -883,34 +869,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long | |||
883 | u32 ctx = CTX_HWBITS(mm->context); | 869 | u32 ctx = CTX_HWBITS(mm->context); |
884 | int cpu = get_cpu(); | 870 | int cpu = get_cpu(); |
885 | 871 | ||
886 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) { | 872 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) |
887 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | 873 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); |
888 | goto local_flush_and_out; | 874 | else |
889 | } else { | 875 | smp_cross_call_masked(&xcall_flush_tlb_pending, |
890 | /* This optimization is not valid. Normally | 876 | ctx, nr, (unsigned long) vaddrs, |
891 | * we will be holding the page_table_lock, but | 877 | mm->cpu_vm_mask); |
892 | * there is an exception which is copy_page_range() | ||
893 | * when forking. The lock is held during the individual | ||
894 | * page table updates in the parent, but not at the | ||
895 | * top level, which is where we are invoked. | ||
896 | */ | ||
897 | if (0) { | ||
898 | cpumask_t this_cpu_mask = cpumask_of_cpu(cpu); | ||
899 | |||
900 | /* By virtue of running under the mm->page_table_lock, | ||
901 | * and mmu_context.h:switch_mm doing the same, the | ||
902 | * following operation is safe. | ||
903 | */ | ||
904 | if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask)) | ||
905 | goto local_flush_and_out; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | smp_cross_call_masked(&xcall_flush_tlb_pending, | ||
910 | ctx, nr, (unsigned long) vaddrs, | ||
911 | mm->cpu_vm_mask); | ||
912 | 878 | ||
913 | local_flush_and_out: | ||
914 | __flush_tlb_pending(ctx, nr, vaddrs); | 879 | __flush_tlb_pending(ctx, nr, vaddrs); |
915 | 880 | ||
916 | put_cpu(); | 881 | put_cpu(); |
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c index 7654b8a7f03a..3f619ead22cc 100644 --- a/arch/sparc64/kernel/sunos_ioctl32.c +++ b/arch/sparc64/kernel/sunos_ioctl32.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/smp_lock.h> | 24 | #include <linux/smp_lock.h> |
25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/compat.h> | 26 | #include <linux/compat.h> |
27 | #include <asm/kbio.h> | ||
28 | 27 | ||
29 | #define SUNOS_NR_OPEN 256 | 28 | #define SUNOS_NR_OPEN 256 |
30 | 29 | ||
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 38c5525087a2..459c8fbe02b4 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -60,17 +60,6 @@ static void __iomem *mstk48t59_regs; | |||
60 | 60 | ||
61 | static int set_rtc_mmss(unsigned long); | 61 | static int set_rtc_mmss(unsigned long); |
62 | 62 | ||
63 | static __init unsigned long dummy_get_tick(void) | ||
64 | { | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static __initdata struct sparc64_tick_ops dummy_tick_ops = { | ||
69 | .get_tick = dummy_get_tick, | ||
70 | }; | ||
71 | |||
72 | struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops; | ||
73 | |||
74 | #define TICK_PRIV_BIT (1UL << 63) | 63 | #define TICK_PRIV_BIT (1UL << 63) |
75 | 64 | ||
76 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
@@ -200,6 +189,8 @@ static struct sparc64_tick_ops tick_operations __read_mostly = { | |||
200 | .softint_mask = 1UL << 0, | 189 | .softint_mask = 1UL << 0, |
201 | }; | 190 | }; |
202 | 191 | ||
192 | struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; | ||
193 | |||
203 | static void stick_init_tick(unsigned long offset) | 194 | static void stick_init_tick(unsigned long offset) |
204 | { | 195 | { |
205 | tick_disable_protection(); | 196 | tick_disable_protection(); |
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index b02fc2267159..ba54b5872578 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -389,6 +389,7 @@ static struct pci_device_id agp_ali_pci_table[] = { | |||
389 | MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); | 389 | MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); |
390 | 390 | ||
391 | static struct pci_driver agp_ali_pci_driver = { | 391 | static struct pci_driver agp_ali_pci_driver = { |
392 | .owner = THIS_MODULE, | ||
392 | .name = "agpgart-ali", | 393 | .name = "agpgart-ali", |
393 | .id_table = agp_ali_pci_table, | 394 | .id_table = agp_ali_pci_table, |
394 | .probe = agp_ali_probe, | 395 | .probe = agp_ali_probe, |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 3a41672e4d66..40fcd88b2cea 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -94,19 +94,16 @@ static int amd_create_gatt_pages(int nr_tables) | |||
94 | int retval = 0; | 94 | int retval = 0; |
95 | int i; | 95 | int i; |
96 | 96 | ||
97 | tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *), | 97 | tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL); |
98 | GFP_KERNEL); | ||
99 | if (tables == NULL) | 98 | if (tables == NULL) |
100 | return -ENOMEM; | 99 | return -ENOMEM; |
101 | 100 | ||
102 | memset (tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1)); | ||
103 | for (i = 0; i < nr_tables; i++) { | 101 | for (i = 0; i < nr_tables; i++) { |
104 | entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL); | 102 | entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); |
105 | if (entry == NULL) { | 103 | if (entry == NULL) { |
106 | retval = -ENOMEM; | 104 | retval = -ENOMEM; |
107 | break; | 105 | break; |
108 | } | 106 | } |
109 | memset (entry, 0, sizeof(struct amd_page_map)); | ||
110 | tables[i] = entry; | 107 | tables[i] = entry; |
111 | retval = amd_create_page_map(entry); | 108 | retval = amd_create_page_map(entry); |
112 | if (retval != 0) | 109 | if (retval != 0) |
@@ -518,6 +515,7 @@ static struct pci_device_id agp_amdk7_pci_table[] = { | |||
518 | MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); | 515 | MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); |
519 | 516 | ||
520 | static struct pci_driver agp_amdk7_pci_driver = { | 517 | static struct pci_driver agp_amdk7_pci_driver = { |
518 | .owner = THIS_MODULE, | ||
521 | .name = "agpgart-amdk7", | 519 | .name = "agpgart-amdk7", |
522 | .id_table = agp_amdk7_pci_table, | 520 | .id_table = agp_amdk7_pci_table, |
523 | .probe = agp_amdk7_probe, | 521 | .probe = agp_amdk7_probe, |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 78ce98a69f37..8f748fddca94 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -703,6 +703,7 @@ static struct pci_device_id agp_amd64_pci_table[] = { | |||
703 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); | 703 | MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); |
704 | 704 | ||
705 | static struct pci_driver agp_amd64_pci_driver = { | 705 | static struct pci_driver agp_amd64_pci_driver = { |
706 | .owner = THIS_MODULE, | ||
706 | .name = "agpgart-amd64", | 707 | .name = "agpgart-amd64", |
707 | .id_table = agp_amd64_pci_table, | 708 | .id_table = agp_amd64_pci_table, |
708 | .probe = agp_amd64_probe, | 709 | .probe = agp_amd64_probe, |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 0b6e72642d6e..fbd415565463 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -118,14 +118,12 @@ static int ati_create_gatt_pages(int nr_tables) | |||
118 | int retval = 0; | 118 | int retval = 0; |
119 | int i; | 119 | int i; |
120 | 120 | ||
121 | tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *), | 121 | tables = kzalloc((nr_tables + 1) * sizeof(ati_page_map *),GFP_KERNEL); |
122 | GFP_KERNEL); | ||
123 | if (tables == NULL) | 122 | if (tables == NULL) |
124 | return -ENOMEM; | 123 | return -ENOMEM; |
125 | 124 | ||
126 | memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1)); | ||
127 | for (i = 0; i < nr_tables; i++) { | 125 | for (i = 0; i < nr_tables; i++) { |
128 | entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL); | 126 | entry = kzalloc(sizeof(ati_page_map), GFP_KERNEL); |
129 | if (entry == NULL) { | 127 | if (entry == NULL) { |
130 | while (i>0) { | 128 | while (i>0) { |
131 | kfree (tables[i-1]); | 129 | kfree (tables[i-1]); |
@@ -136,7 +134,6 @@ static int ati_create_gatt_pages(int nr_tables) | |||
136 | retval = -ENOMEM; | 134 | retval = -ENOMEM; |
137 | break; | 135 | break; |
138 | } | 136 | } |
139 | memset(entry, 0, sizeof(ati_page_map)); | ||
140 | tables[i] = entry; | 137 | tables[i] = entry; |
141 | retval = ati_create_page_map(entry); | 138 | retval = ati_create_page_map(entry); |
142 | if (retval != 0) break; | 139 | if (retval != 0) break; |
@@ -524,6 +521,7 @@ static struct pci_device_id agp_ati_pci_table[] = { | |||
524 | MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); | 521 | MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); |
525 | 522 | ||
526 | static struct pci_driver agp_ati_pci_driver = { | 523 | static struct pci_driver agp_ati_pci_driver = { |
524 | .owner = THIS_MODULE, | ||
527 | .name = "agpgart-ati", | 525 | .name = "agpgart-ati", |
528 | .id_table = agp_ati_pci_table, | 526 | .id_table = agp_ati_pci_table, |
529 | .probe = agp_ati_probe, | 527 | .probe = agp_ati_probe, |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 82b43c541c8d..73f333f491bd 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
@@ -222,12 +222,12 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) | |||
222 | 222 | ||
223 | struct agp_bridge_data *agp_alloc_bridge(void) | 223 | struct agp_bridge_data *agp_alloc_bridge(void) |
224 | { | 224 | { |
225 | struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL); | 225 | struct agp_bridge_data *bridge; |
226 | 226 | ||
227 | bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); | ||
227 | if (!bridge) | 228 | if (!bridge) |
228 | return NULL; | 229 | return NULL; |
229 | 230 | ||
230 | memset(bridge, 0, sizeof(*bridge)); | ||
231 | atomic_set(&bridge->agp_in_use, 0); | 231 | atomic_set(&bridge->agp_in_use, 0); |
232 | atomic_set(&bridge->current_memory_agp, 0); | 232 | atomic_set(&bridge->current_memory_agp, 0); |
233 | 233 | ||
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index ac19fdcd21c1..d41e0a62e32e 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -219,7 +219,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) | |||
219 | 219 | ||
220 | efficeon_private.l1_table[index] = page; | 220 | efficeon_private.l1_table[index] = page; |
221 | 221 | ||
222 | value = virt_to_gart(page) | pati | present | index; | 222 | value = virt_to_gart((unsigned long *)page) | pati | present | index; |
223 | 223 | ||
224 | pci_write_config_dword(agp_bridge->dev, | 224 | pci_write_config_dword(agp_bridge->dev, |
225 | EFFICEON_ATTPAGE, value); | 225 | EFFICEON_ATTPAGE, value); |
@@ -429,6 +429,7 @@ static struct pci_device_id agp_efficeon_pci_table[] = { | |||
429 | MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); | 429 | MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); |
430 | 430 | ||
431 | static struct pci_driver agp_efficeon_pci_driver = { | 431 | static struct pci_driver agp_efficeon_pci_driver = { |
432 | .owner = THIS_MODULE, | ||
432 | .name = "agpgart-efficeon", | 433 | .name = "agpgart-efficeon", |
433 | .id_table = agp_efficeon_pci_table, | 434 | .id_table = agp_efficeon_pci_table, |
434 | .probe = agp_efficeon_probe, | 435 | .probe = agp_efficeon_probe, |
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 3dfb6648547b..17f520c9d471 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c | |||
@@ -189,13 +189,12 @@ static int agp_create_segment(struct agp_client *client, struct agp_region *regi | |||
189 | struct agp_segment *user_seg; | 189 | struct agp_segment *user_seg; |
190 | size_t i; | 190 | size_t i; |
191 | 191 | ||
192 | seg = kmalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); | 192 | seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); |
193 | if (seg == NULL) { | 193 | if (seg == NULL) { |
194 | kfree(region->seg_list); | 194 | kfree(region->seg_list); |
195 | region->seg_list = NULL; | 195 | region->seg_list = NULL; |
196 | return -ENOMEM; | 196 | return -ENOMEM; |
197 | } | 197 | } |
198 | memset(seg, 0, (sizeof(struct agp_segment_priv) * region->seg_count)); | ||
199 | user_seg = region->seg_list; | 198 | user_seg = region->seg_list; |
200 | 199 | ||
201 | for (i = 0; i < region->seg_count; i++) { | 200 | for (i = 0; i < region->seg_count; i++) { |
@@ -332,14 +331,11 @@ static struct agp_controller *agp_create_controller(pid_t id) | |||
332 | { | 331 | { |
333 | struct agp_controller *controller; | 332 | struct agp_controller *controller; |
334 | 333 | ||
335 | controller = kmalloc(sizeof(struct agp_controller), GFP_KERNEL); | 334 | controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL); |
336 | |||
337 | if (controller == NULL) | 335 | if (controller == NULL) |
338 | return NULL; | 336 | return NULL; |
339 | 337 | ||
340 | memset(controller, 0, sizeof(struct agp_controller)); | ||
341 | controller->pid = id; | 338 | controller->pid = id; |
342 | |||
343 | return controller; | 339 | return controller; |
344 | } | 340 | } |
345 | 341 | ||
@@ -540,12 +536,10 @@ static struct agp_client *agp_create_client(pid_t id) | |||
540 | { | 536 | { |
541 | struct agp_client *new_client; | 537 | struct agp_client *new_client; |
542 | 538 | ||
543 | new_client = kmalloc(sizeof(struct agp_client), GFP_KERNEL); | 539 | new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL); |
544 | |||
545 | if (new_client == NULL) | 540 | if (new_client == NULL) |
546 | return NULL; | 541 | return NULL; |
547 | 542 | ||
548 | memset(new_client, 0, sizeof(struct agp_client)); | ||
549 | new_client->pid = id; | 543 | new_client->pid = id; |
550 | agp_insert_client(new_client); | 544 | agp_insert_client(new_client); |
551 | return new_client; | 545 | return new_client; |
@@ -709,11 +703,10 @@ static int agp_open(struct inode *inode, struct file *file) | |||
709 | if (minor != AGPGART_MINOR) | 703 | if (minor != AGPGART_MINOR) |
710 | goto err_out; | 704 | goto err_out; |
711 | 705 | ||
712 | priv = kmalloc(sizeof(struct agp_file_private), GFP_KERNEL); | 706 | priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL); |
713 | if (priv == NULL) | 707 | if (priv == NULL) |
714 | goto err_out_nomem; | 708 | goto err_out_nomem; |
715 | 709 | ||
716 | memset(priv, 0, sizeof(struct agp_file_private)); | ||
717 | set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); | 710 | set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); |
718 | priv->my_pid = current->pid; | 711 | priv->my_pid = current->pid; |
719 | 712 | ||
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index ac9da0ca36b7..c4a38715c6f9 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -105,12 +105,10 @@ struct agp_memory *agp_create_memory(int scratch_pages) | |||
105 | { | 105 | { |
106 | struct agp_memory *new; | 106 | struct agp_memory *new; |
107 | 107 | ||
108 | new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL); | 108 | new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); |
109 | |||
110 | if (new == NULL) | 109 | if (new == NULL) |
111 | return NULL; | 110 | return NULL; |
112 | 111 | ||
113 | memset(new, 0, sizeof(struct agp_memory)); | ||
114 | new->key = agp_get_key(); | 112 | new->key = agp_get_key(); |
115 | 113 | ||
116 | if (new->key < 0) { | 114 | if (new->key < 0) { |
@@ -414,7 +412,8 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ | |||
414 | u32 tmp; | 412 | u32 tmp; |
415 | 413 | ||
416 | if (*requested_mode & AGP2_RESERVED_MASK) { | 414 | if (*requested_mode & AGP2_RESERVED_MASK) { |
417 | printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode); | 415 | printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", |
416 | *requested_mode & AGP2_RESERVED_MASK, *requested_mode); | ||
418 | *requested_mode &= ~AGP2_RESERVED_MASK; | 417 | *requested_mode &= ~AGP2_RESERVED_MASK; |
419 | } | 418 | } |
420 | 419 | ||
@@ -492,7 +491,8 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ | |||
492 | u32 tmp; | 491 | u32 tmp; |
493 | 492 | ||
494 | if (*requested_mode & AGP3_RESERVED_MASK) { | 493 | if (*requested_mode & AGP3_RESERVED_MASK) { |
495 | printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode); | 494 | printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", |
495 | *requested_mode & AGP3_RESERVED_MASK, *requested_mode); | ||
496 | *requested_mode &= ~AGP3_RESERVED_MASK; | 496 | *requested_mode &= ~AGP3_RESERVED_MASK; |
497 | } | 497 | } |
498 | 498 | ||
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index a2d9e5e48bbe..58944cd271ea 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c | |||
@@ -227,10 +227,9 @@ static int i460_configure (void) | |||
227 | */ | 227 | */ |
228 | if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { | 228 | if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { |
229 | size = current_size->num_entries * sizeof(i460.lp_desc[0]); | 229 | size = current_size->num_entries * sizeof(i460.lp_desc[0]); |
230 | i460.lp_desc = kmalloc(size, GFP_KERNEL); | 230 | i460.lp_desc = kzalloc(size, GFP_KERNEL); |
231 | if (!i460.lp_desc) | 231 | if (!i460.lp_desc) |
232 | return -ENOMEM; | 232 | return -ENOMEM; |
233 | memset(i460.lp_desc, 0, size); | ||
234 | } | 233 | } |
235 | return 0; | 234 | return 0; |
236 | } | 235 | } |
@@ -366,13 +365,12 @@ static int i460_alloc_large_page (struct lp_desc *lp) | |||
366 | } | 365 | } |
367 | 366 | ||
368 | map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; | 367 | map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; |
369 | lp->alloced_map = kmalloc(map_size, GFP_KERNEL); | 368 | lp->alloced_map = kzalloc(map_size, GFP_KERNEL); |
370 | if (!lp->alloced_map) { | 369 | if (!lp->alloced_map) { |
371 | free_pages((unsigned long) lpage, order); | 370 | free_pages((unsigned long) lpage, order); |
372 | printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); | 371 | printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); |
373 | return -ENOMEM; | 372 | return -ENOMEM; |
374 | } | 373 | } |
375 | memset(lp->alloced_map, 0, map_size); | ||
376 | 374 | ||
377 | lp->paddr = virt_to_gart(lpage); | 375 | lp->paddr = virt_to_gart(lpage); |
378 | lp->refcount = 0; | 376 | lp->refcount = 0; |
@@ -619,6 +617,7 @@ static struct pci_device_id agp_intel_i460_pci_table[] = { | |||
619 | MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); | 617 | MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); |
620 | 618 | ||
621 | static struct pci_driver agp_intel_i460_pci_driver = { | 619 | static struct pci_driver agp_intel_i460_pci_driver = { |
620 | .owner = THIS_MODULE, | ||
622 | .name = "agpgart-intel-i460", | 621 | .name = "agpgart-intel-i460", |
623 | .id_table = agp_intel_i460_pci_table, | 622 | .id_table = agp_intel_i460_pci_table, |
624 | .probe = agp_intel_i460_probe, | 623 | .probe = agp_intel_i460_probe, |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 1f7d415f432c..bf4cc9ffd5b1 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -1824,6 +1824,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
1824 | MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); | 1824 | MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); |
1825 | 1825 | ||
1826 | static struct pci_driver agp_intel_pci_driver = { | 1826 | static struct pci_driver agp_intel_pci_driver = { |
1827 | .owner = THIS_MODULE, | ||
1827 | .name = "agpgart-intel", | 1828 | .name = "agpgart-intel", |
1828 | .id_table = agp_intel_pci_table, | 1829 | .id_table = agp_intel_pci_table, |
1829 | .probe = agp_intel_probe, | 1830 | .probe = agp_intel_probe, |
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 80dafa3030bd..3aed0c5e2f92 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c | |||
@@ -398,6 +398,7 @@ static struct pci_device_id agp_nvidia_pci_table[] = { | |||
398 | MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); | 398 | MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); |
399 | 399 | ||
400 | static struct pci_driver agp_nvidia_pci_driver = { | 400 | static struct pci_driver agp_nvidia_pci_driver = { |
401 | .owner = THIS_MODULE, | ||
401 | .name = "agpgart-nvidia", | 402 | .name = "agpgart-nvidia", |
402 | .id_table = agp_nvidia_pci_table, | 403 | .id_table = agp_nvidia_pci_table, |
403 | .probe = agp_nvidia_probe, | 404 | .probe = agp_nvidia_probe, |
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index 7957fc91f6ad..4df7734b51c2 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c | |||
@@ -289,6 +289,8 @@ static int __devinit agp_sgi_init(void) | |||
289 | j = 0; | 289 | j = 0; |
290 | list_for_each_entry(info, &tioca_list, ca_list) { | 290 | list_for_each_entry(info, &tioca_list, ca_list) { |
291 | struct list_head *tmp; | 291 | struct list_head *tmp; |
292 | if (list_empty(info->ca_devices)) | ||
293 | continue; | ||
292 | list_for_each(tmp, info->ca_devices) { | 294 | list_for_each(tmp, info->ca_devices) { |
293 | u8 cap_ptr; | 295 | u8 cap_ptr; |
294 | pdev = pci_dev_b(tmp); | 296 | pdev = pci_dev_b(tmp); |
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index ebc05554045c..a701361a8890 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -332,6 +332,7 @@ static struct pci_device_id agp_sis_pci_table[] = { | |||
332 | MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); | 332 | MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); |
333 | 333 | ||
334 | static struct pci_driver agp_sis_pci_driver = { | 334 | static struct pci_driver agp_sis_pci_driver = { |
335 | .owner = THIS_MODULE, | ||
335 | .name = "agpgart-sis", | 336 | .name = "agpgart-sis", |
336 | .id_table = agp_sis_pci_table, | 337 | .id_table = agp_sis_pci_table, |
337 | .probe = agp_sis_probe, | 338 | .probe = agp_sis_probe, |
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 71ea59a1dbeb..5a5392dd1254 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c | |||
@@ -102,19 +102,17 @@ static int serverworks_create_gatt_pages(int nr_tables) | |||
102 | int retval = 0; | 102 | int retval = 0; |
103 | int i; | 103 | int i; |
104 | 104 | ||
105 | tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), | 105 | tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), |
106 | GFP_KERNEL); | 106 | GFP_KERNEL); |
107 | if (tables == NULL) { | 107 | if (tables == NULL) |
108 | return -ENOMEM; | 108 | return -ENOMEM; |
109 | } | 109 | |
110 | memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1)); | ||
111 | for (i = 0; i < nr_tables; i++) { | 110 | for (i = 0; i < nr_tables; i++) { |
112 | entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); | 111 | entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); |
113 | if (entry == NULL) { | 112 | if (entry == NULL) { |
114 | retval = -ENOMEM; | 113 | retval = -ENOMEM; |
115 | break; | 114 | break; |
116 | } | 115 | } |
117 | memset(entry, 0, sizeof(struct serverworks_page_map)); | ||
118 | tables[i] = entry; | 116 | tables[i] = entry; |
119 | retval = serverworks_create_page_map(entry); | 117 | retval = serverworks_create_page_map(entry); |
120 | if (retval != 0) break; | 118 | if (retval != 0) break; |
@@ -244,13 +242,27 @@ static int serverworks_fetch_size(void) | |||
244 | */ | 242 | */ |
245 | static void serverworks_tlbflush(struct agp_memory *temp) | 243 | static void serverworks_tlbflush(struct agp_memory *temp) |
246 | { | 244 | { |
245 | unsigned long timeout; | ||
246 | |||
247 | writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); | 247 | writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); |
248 | while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) | 248 | timeout = jiffies + 3*HZ; |
249 | while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { | ||
249 | cpu_relax(); | 250 | cpu_relax(); |
251 | if (time_after(jiffies, timeout)) { | ||
252 | printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n"); | ||
253 | break; | ||
254 | } | ||
255 | } | ||
250 | 256 | ||
251 | writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); | 257 | writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); |
252 | while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) | 258 | timeout = jiffies + 3*HZ; |
259 | while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { | ||
253 | cpu_relax(); | 260 | cpu_relax(); |
261 | if (time_after(jiffies, timeout)) { | ||
262 | printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n"); | ||
263 | break; | ||
264 | } | ||
265 | } | ||
254 | } | 266 | } |
255 | 267 | ||
256 | static int serverworks_configure(void) | 268 | static int serverworks_configure(void) |
@@ -533,6 +545,7 @@ static struct pci_device_id agp_serverworks_pci_table[] = { | |||
533 | MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); | 545 | MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); |
534 | 546 | ||
535 | static struct pci_driver agp_serverworks_pci_driver = { | 547 | static struct pci_driver agp_serverworks_pci_driver = { |
548 | .owner = THIS_MODULE, | ||
536 | .name = "agpgart-serverworks", | 549 | .name = "agpgart-serverworks", |
537 | .id_table = agp_serverworks_pci_table, | 550 | .id_table = agp_serverworks_pci_table, |
538 | .probe = agp_serverworks_probe, | 551 | .probe = agp_serverworks_probe, |
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index c8255312b8c1..183c50acab27 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -658,6 +658,7 @@ static struct pci_device_id agp_uninorth_pci_table[] = { | |||
658 | MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table); | 658 | MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table); |
659 | 659 | ||
660 | static struct pci_driver agp_uninorth_pci_driver = { | 660 | static struct pci_driver agp_uninorth_pci_driver = { |
661 | .owner = THIS_MODULE, | ||
661 | .name = "agpgart-uninorth", | 662 | .name = "agpgart-uninorth", |
662 | .id_table = agp_uninorth_pci_table, | 663 | .id_table = agp_uninorth_pci_table, |
663 | .probe = agp_uninorth_probe, | 664 | .probe = agp_uninorth_probe, |
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index c847df575cf5..5d9a13700074 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -518,6 +518,7 @@ MODULE_DEVICE_TABLE(pci, agp_via_pci_table); | |||
518 | 518 | ||
519 | 519 | ||
520 | static struct pci_driver agp_via_pci_driver = { | 520 | static struct pci_driver agp_via_pci_driver = { |
521 | .owner = THIS_MODULE, | ||
521 | .name = "agpgart-via", | 522 | .name = "agpgart-via", |
522 | .id_table = agp_via_pci_table, | 523 | .id_table = agp_via_pci_table, |
523 | .probe = agp_via_probe, | 524 | .probe = agp_via_probe, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6c6121b85a54..25acf478c9e8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -593,12 +593,11 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
593 | goto module_out; | 593 | goto module_out; |
594 | } | 594 | } |
595 | 595 | ||
596 | policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | 596 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); |
597 | if (!policy) { | 597 | if (!policy) { |
598 | ret = -ENOMEM; | 598 | ret = -ENOMEM; |
599 | goto nomem_out; | 599 | goto nomem_out; |
600 | } | 600 | } |
601 | memset(policy, 0, sizeof(struct cpufreq_policy)); | ||
602 | 601 | ||
603 | policy->cpu = cpu; | 602 | policy->cpu = cpu; |
604 | policy->cpus = cpumask_of_cpu(cpu); | 603 | policy->cpus = cpumask_of_cpu(cpu); |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c1fc9c62bb51..17741111246b 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -48,7 +48,10 @@ | |||
48 | * All times here are in uS. | 48 | * All times here are in uS. |
49 | */ | 49 | */ |
50 | static unsigned int def_sampling_rate; | 50 | static unsigned int def_sampling_rate; |
51 | #define MIN_SAMPLING_RATE (def_sampling_rate / 2) | 51 | #define MIN_SAMPLING_RATE_RATIO (2) |
52 | /* for correct statistics, we need at least 10 ticks between each measure */ | ||
53 | #define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) | ||
54 | #define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) | ||
52 | #define MAX_SAMPLING_RATE (500 * def_sampling_rate) | 55 | #define MAX_SAMPLING_RATE (500 * def_sampling_rate) |
53 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) | 56 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) |
54 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 57 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
@@ -416,13 +419,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
416 | if (dbs_enable == 1) { | 419 | if (dbs_enable == 1) { |
417 | unsigned int latency; | 420 | unsigned int latency; |
418 | /* policy latency is in nS. Convert it to uS first */ | 421 | /* policy latency is in nS. Convert it to uS first */ |
422 | latency = policy->cpuinfo.transition_latency / 1000; | ||
423 | if (latency == 0) | ||
424 | latency = 1; | ||
419 | 425 | ||
420 | latency = policy->cpuinfo.transition_latency; | 426 | def_sampling_rate = latency * |
421 | if (latency < 1000) | ||
422 | latency = 1000; | ||
423 | |||
424 | def_sampling_rate = (latency / 1000) * | ||
425 | DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; | 427 | DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; |
428 | |||
429 | if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) | ||
430 | def_sampling_rate = MIN_STAT_SAMPLING_RATE; | ||
431 | |||
426 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 432 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
427 | dbs_tuners_ins.ignore_nice = 0; | 433 | dbs_tuners_ins.ignore_nice = 0; |
428 | 434 | ||
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 3597f25d5efa..0bddb8e694d9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -193,11 +193,15 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, | |||
193 | unsigned int cpu = policy->cpu; | 193 | unsigned int cpu = policy->cpu; |
194 | if (cpufreq_stats_table[cpu]) | 194 | if (cpufreq_stats_table[cpu]) |
195 | return -EBUSY; | 195 | return -EBUSY; |
196 | if ((stat = kmalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) | 196 | if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) |
197 | return -ENOMEM; | 197 | return -ENOMEM; |
198 | memset(stat, 0, sizeof (struct cpufreq_stats)); | ||
199 | 198 | ||
200 | data = cpufreq_cpu_get(cpu); | 199 | data = cpufreq_cpu_get(cpu); |
200 | if (data == NULL) { | ||
201 | ret = -EINVAL; | ||
202 | goto error_get_fail; | ||
203 | } | ||
204 | |||
201 | if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group))) | 205 | if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group))) |
202 | goto error_out; | 206 | goto error_out; |
203 | 207 | ||
@@ -217,12 +221,11 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, | |||
217 | alloc_size += count * count * sizeof(int); | 221 | alloc_size += count * count * sizeof(int); |
218 | #endif | 222 | #endif |
219 | stat->max_state = count; | 223 | stat->max_state = count; |
220 | stat->time_in_state = kmalloc(alloc_size, GFP_KERNEL); | 224 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); |
221 | if (!stat->time_in_state) { | 225 | if (!stat->time_in_state) { |
222 | ret = -ENOMEM; | 226 | ret = -ENOMEM; |
223 | goto error_out; | 227 | goto error_out; |
224 | } | 228 | } |
225 | memset(stat->time_in_state, 0, alloc_size); | ||
226 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); | 229 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); |
227 | 230 | ||
228 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 231 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
@@ -245,6 +248,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy, | |||
245 | return 0; | 248 | return 0; |
246 | error_out: | 249 | error_out: |
247 | cpufreq_cpu_put(data); | 250 | cpufreq_cpu_put(data); |
251 | error_get_fail: | ||
248 | kfree(stat); | 252 | kfree(stat); |
249 | cpufreq_stats_table[cpu] = NULL; | 253 | cpufreq_stats_table[cpu] = NULL; |
250 | return ret; | 254 | return ret; |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index ceae379a4d4c..da528390acf8 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -1263,7 +1263,7 @@ EXPORT_SYMBOL(mmc_suspend_host); | |||
1263 | */ | 1263 | */ |
1264 | int mmc_resume_host(struct mmc_host *host) | 1264 | int mmc_resume_host(struct mmc_host *host) |
1265 | { | 1265 | { |
1266 | mmc_detect_change(host, 0); | 1266 | mmc_rescan(host); |
1267 | 1267 | ||
1268 | return 0; | 1268 | return 0; |
1269 | } | 1269 | } |
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c index c82abeb59d3a..071ae24be892 100644 --- a/drivers/sbus/char/cpwatchdog.c +++ b/drivers/sbus/char/cpwatchdog.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <linux/timer.h> | 28 | #include <linux/timer.h> |
29 | #include <linux/smp_lock.h> | ||
29 | #include <asm/irq.h> | 30 | #include <asm/irq.h> |
30 | #include <asm/ebus.h> | 31 | #include <asm/ebus.h> |
31 | #include <asm/oplib.h> | 32 | #include <asm/oplib.h> |
@@ -394,6 +395,28 @@ static int wd_ioctl(struct inode *inode, struct file *file, | |||
394 | return(0); | 395 | return(0); |
395 | } | 396 | } |
396 | 397 | ||
398 | static long wd_compat_ioctl(struct file *file, unsigned int cmd, | ||
399 | unsigned long arg) | ||
400 | { | ||
401 | int rval = -ENOIOCTLCMD; | ||
402 | |||
403 | switch (cmd) { | ||
404 | /* solaris ioctls are specific to this driver */ | ||
405 | case WIOCSTART: | ||
406 | case WIOCSTOP: | ||
407 | case WIOCGSTAT: | ||
408 | lock_kernel(); | ||
409 | rval = wd_ioctl(file->f_dentry->d_inode, file, cmd, arg); | ||
410 | lock_kernel(); | ||
411 | break; | ||
412 | /* everything else is handled by the generic compat layer */ | ||
413 | default: | ||
414 | break; | ||
415 | } | ||
416 | |||
417 | return rval; | ||
418 | } | ||
419 | |||
397 | static ssize_t wd_write(struct file *file, | 420 | static ssize_t wd_write(struct file *file, |
398 | const char __user *buf, | 421 | const char __user *buf, |
399 | size_t count, | 422 | size_t count, |
@@ -441,6 +464,7 @@ static irqreturn_t wd_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
441 | static struct file_operations wd_fops = { | 464 | static struct file_operations wd_fops = { |
442 | .owner = THIS_MODULE, | 465 | .owner = THIS_MODULE, |
443 | .ioctl = wd_ioctl, | 466 | .ioctl = wd_ioctl, |
467 | .compat_ioctl = wd_compat_ioctl, | ||
444 | .open = wd_open, | 468 | .open = wd_open, |
445 | .write = wd_write, | 469 | .write = wd_write, |
446 | .read = wd_read, | 470 | .read = wd_read, |
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 24ed5893b4f0..39f54213a6d5 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
17 | #include <linux/ioport.h> /* request_region */ | 17 | #include <linux/ioport.h> /* request_region */ |
18 | #include <linux/smp_lock.h> | ||
18 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
19 | #include <asm/ebus.h> /* EBus device */ | 20 | #include <asm/ebus.h> /* EBus device */ |
20 | #include <asm/oplib.h> /* OpenProm Library */ | 21 | #include <asm/oplib.h> /* OpenProm Library */ |
@@ -114,22 +115,25 @@ static int d7s_release(struct inode *inode, struct file *f) | |||
114 | return 0; | 115 | return 0; |
115 | } | 116 | } |
116 | 117 | ||
117 | static int d7s_ioctl(struct inode *inode, struct file *f, | 118 | static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
118 | unsigned int cmd, unsigned long arg) | ||
119 | { | 119 | { |
120 | __u8 regs = readb(d7s_regs); | 120 | __u8 regs = readb(d7s_regs); |
121 | __u8 ireg = 0; | 121 | __u8 ireg = 0; |
122 | int error = 0 | ||
122 | 123 | ||
123 | if (D7S_MINOR != iminor(inode)) | 124 | if (D7S_MINOR != iminor(file->f_dentry->d_inode)) |
124 | return -ENODEV; | 125 | return -ENODEV; |
125 | 126 | ||
127 | lock_kernel(); | ||
126 | switch (cmd) { | 128 | switch (cmd) { |
127 | case D7SIOCWR: | 129 | case D7SIOCWR: |
128 | /* assign device register values | 130 | /* assign device register values |
129 | * we mask-out D7S_FLIP if in sol_compat mode | 131 | * we mask-out D7S_FLIP if in sol_compat mode |
130 | */ | 132 | */ |
131 | if (get_user(ireg, (int __user *) arg)) | 133 | if (get_user(ireg, (int __user *) arg)) { |
132 | return -EFAULT; | 134 | error = -EFAULT; |
135 | break; | ||
136 | } | ||
133 | if (0 != sol_compat) { | 137 | if (0 != sol_compat) { |
134 | (regs & D7S_FLIP) ? | 138 | (regs & D7S_FLIP) ? |
135 | (ireg |= D7S_FLIP) : (ireg &= ~D7S_FLIP); | 139 | (ireg |= D7S_FLIP) : (ireg &= ~D7S_FLIP); |
@@ -144,8 +148,10 @@ static int d7s_ioctl(struct inode *inode, struct file *f, | |||
144 | * This driver will not misinform you about the state | 148 | * This driver will not misinform you about the state |
145 | * of your hardware while in sol_compat mode | 149 | * of your hardware while in sol_compat mode |
146 | */ | 150 | */ |
147 | if (put_user(regs, (int __user *) arg)) | 151 | if (put_user(regs, (int __user *) arg)) { |
148 | return -EFAULT; | 152 | error = -EFAULT; |
153 | break; | ||
154 | } | ||
149 | break; | 155 | break; |
150 | 156 | ||
151 | case D7SIOCTM: | 157 | case D7SIOCTM: |
@@ -155,15 +161,17 @@ static int d7s_ioctl(struct inode *inode, struct file *f, | |||
155 | writeb(regs, d7s_regs); | 161 | writeb(regs, d7s_regs); |
156 | break; | 162 | break; |
157 | }; | 163 | }; |
164 | lock_kernel(); | ||
158 | 165 | ||
159 | return 0; | 166 | return error; |
160 | } | 167 | } |
161 | 168 | ||
162 | static struct file_operations d7s_fops = { | 169 | static struct file_operations d7s_fops = { |
163 | .owner = THIS_MODULE, | 170 | .owner = THIS_MODULE, |
164 | .ioctl = d7s_ioctl, | 171 | .unlocked_ioctl = d7s_ioctl, |
165 | .open = d7s_open, | 172 | .compat_ioctl = d7s_ioctl, |
166 | .release = d7s_release, | 173 | .open = d7s_open, |
174 | .release = d7s_release, | ||
167 | }; | 175 | }; |
168 | 176 | ||
169 | static struct miscdevice d7s_miscdev = { D7S_MINOR, D7S_DEVNAME, &d7s_fops }; | 177 | static struct miscdevice d7s_miscdev = { D7S_MINOR, D7S_DEVNAME, &d7s_fops }; |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index ba56762b05f6..19e8eddf887a 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
@@ -654,9 +654,8 @@ envctrl_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
654 | /* Function Description: Command what to read. Mapped to user ioctl(). | 654 | /* Function Description: Command what to read. Mapped to user ioctl(). |
655 | * Return: Gives 0 for implemented commands, -EINVAL otherwise. | 655 | * Return: Gives 0 for implemented commands, -EINVAL otherwise. |
656 | */ | 656 | */ |
657 | static int | 657 | static long |
658 | envctrl_ioctl(struct inode *inode, struct file *file, | 658 | envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
659 | unsigned int cmd, unsigned long arg) | ||
660 | { | 659 | { |
661 | char __user *infobuf; | 660 | char __user *infobuf; |
662 | 661 | ||
@@ -715,11 +714,14 @@ envctrl_release(struct inode *inode, struct file *file) | |||
715 | } | 714 | } |
716 | 715 | ||
717 | static struct file_operations envctrl_fops = { | 716 | static struct file_operations envctrl_fops = { |
718 | .owner = THIS_MODULE, | 717 | .owner = THIS_MODULE, |
719 | .read = envctrl_read, | 718 | .read = envctrl_read, |
720 | .ioctl = envctrl_ioctl, | 719 | .unlocked_ioctl = envctrl_ioctl, |
721 | .open = envctrl_open, | 720 | #ifdef CONFIG_COMPAT |
722 | .release = envctrl_release, | 721 | .compat_ioctl = envctrl_ioctl, |
722 | #endif | ||
723 | .open = envctrl_open, | ||
724 | .release = envctrl_release, | ||
723 | }; | 725 | }; |
724 | 726 | ||
725 | static struct miscdevice envctrl_dev = { | 727 | static struct miscdevice envctrl_dev = { |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 58ed33749571..5028ac214326 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/string.h> | 40 | #include <linux/string.h> |
41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
42 | #include <linux/smp_lock.h> | ||
42 | #include <linux/init.h> | 43 | #include <linux/init.h> |
43 | #include <linux/fs.h> | 44 | #include <linux/fs.h> |
44 | #include <asm/oplib.h> | 45 | #include <asm/oplib.h> |
@@ -565,6 +566,38 @@ static int openprom_ioctl(struct inode * inode, struct file * file, | |||
565 | } | 566 | } |
566 | } | 567 | } |
567 | 568 | ||
569 | static long openprom_compat_ioctl(struct file *file, unsigned int cmd, | ||
570 | unsigned long arg) | ||
571 | { | ||
572 | long rval = -ENOTTY; | ||
573 | |||
574 | /* | ||
575 | * SunOS/Solaris only, the NetBSD one's have embedded pointers in | ||
576 | * the arg which we'd need to clean up... | ||
577 | */ | ||
578 | switch (cmd) { | ||
579 | case OPROMGETOPT: | ||
580 | case OPROMSETOPT: | ||
581 | case OPROMNXTOPT: | ||
582 | case OPROMSETOPT2: | ||
583 | case OPROMNEXT: | ||
584 | case OPROMCHILD: | ||
585 | case OPROMGETPROP: | ||
586 | case OPROMNXTPROP: | ||
587 | case OPROMU2P: | ||
588 | case OPROMGETCONS: | ||
589 | case OPROMGETFBNAME: | ||
590 | case OPROMGETBOOTARGS: | ||
591 | case OPROMSETCUR: | ||
592 | case OPROMPCI2NODE: | ||
593 | case OPROMPATH2NODE: | ||
594 | lock_kernel(); | ||
595 | rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg); | ||
596 | lock_kernel(); | ||
597 | break; | ||
598 | } | ||
599 | } | ||
600 | |||
568 | static int openprom_open(struct inode * inode, struct file * file) | 601 | static int openprom_open(struct inode * inode, struct file * file) |
569 | { | 602 | { |
570 | DATA *data; | 603 | DATA *data; |
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 656c0e8d160e..f0738533f39a 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1441,7 +1441,7 @@ static void sunsu_console_write(struct console *co, const char *s, | |||
1441 | * - initialize the serial port | 1441 | * - initialize the serial port |
1442 | * Return non-zero if we didn't find a serial port. | 1442 | * Return non-zero if we didn't find a serial port. |
1443 | */ | 1443 | */ |
1444 | static int __init sunsu_console_setup(struct console *co, char *options) | 1444 | static int sunsu_console_setup(struct console *co, char *options) |
1445 | { | 1445 | { |
1446 | struct uart_port *port; | 1446 | struct uart_port *port; |
1447 | int baud = 9600; | 1447 | int baud = 9600; |
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c index 3280bb9560e2..414c4409e924 100644 --- a/drivers/video/cg6.c +++ b/drivers/video/cg6.c | |||
@@ -653,12 +653,6 @@ static void cg6_chip_init(struct fb_info *info) | |||
653 | sbus_writel(0, &fbc->clipminy); | 653 | sbus_writel(0, &fbc->clipminy); |
654 | sbus_writel(info->var.xres - 1, &fbc->clipmaxx); | 654 | sbus_writel(info->var.xres - 1, &fbc->clipmaxx); |
655 | sbus_writel(info->var.yres - 1, &fbc->clipmaxy); | 655 | sbus_writel(info->var.yres - 1, &fbc->clipmaxy); |
656 | |||
657 | /* Disable cursor in Brooktree DAC. */ | ||
658 | sbus_writel(0x06 << 24, &par->bt->addr); | ||
659 | tmp = sbus_readl(&par->bt->control); | ||
660 | tmp &= ~(0x03 << 24); | ||
661 | sbus_writel(tmp, &par->bt->control); | ||
662 | } | 656 | } |
663 | 657 | ||
664 | struct all_info { | 658 | struct all_info { |
diff --git a/fs/Makefile b/fs/Makefile index 1972da186272..4c2655759078 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ | |||
10 | ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ | 10 | ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ |
11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ | 11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ |
12 | seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ | 12 | seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ |
13 | ioprio.o | 13 | ioprio.o pnode.o |
14 | 14 | ||
15 | obj-$(CONFIG_INOTIFY) += inotify.o | 15 | obj-$(CONFIG_INOTIFY) += inotify.o |
16 | obj-$(CONFIG_EPOLL) += eventpoll.o | 16 | obj-$(CONFIG_EPOLL) += eventpoll.o |
diff --git a/fs/dquot.c b/fs/dquot.c index afa06a893468..05b60283c9c2 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -1321,13 +1321,11 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1321 | int cnt; | 1321 | int cnt; |
1322 | struct quota_info *dqopt = sb_dqopt(sb); | 1322 | struct quota_info *dqopt = sb_dqopt(sb); |
1323 | struct inode *toputinode[MAXQUOTAS]; | 1323 | struct inode *toputinode[MAXQUOTAS]; |
1324 | struct vfsmount *toputmnt[MAXQUOTAS]; | ||
1325 | 1324 | ||
1326 | /* We need to serialize quota_off() for device */ | 1325 | /* We need to serialize quota_off() for device */ |
1327 | down(&dqopt->dqonoff_sem); | 1326 | down(&dqopt->dqonoff_sem); |
1328 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1327 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1329 | toputinode[cnt] = NULL; | 1328 | toputinode[cnt] = NULL; |
1330 | toputmnt[cnt] = NULL; | ||
1331 | if (type != -1 && cnt != type) | 1329 | if (type != -1 && cnt != type) |
1332 | continue; | 1330 | continue; |
1333 | if (!sb_has_quota_enabled(sb, cnt)) | 1331 | if (!sb_has_quota_enabled(sb, cnt)) |
@@ -1348,9 +1346,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1348 | put_quota_format(dqopt->info[cnt].dqi_format); | 1346 | put_quota_format(dqopt->info[cnt].dqi_format); |
1349 | 1347 | ||
1350 | toputinode[cnt] = dqopt->files[cnt]; | 1348 | toputinode[cnt] = dqopt->files[cnt]; |
1351 | toputmnt[cnt] = dqopt->mnt[cnt]; | ||
1352 | dqopt->files[cnt] = NULL; | 1349 | dqopt->files[cnt] = NULL; |
1353 | dqopt->mnt[cnt] = NULL; | ||
1354 | dqopt->info[cnt].dqi_flags = 0; | 1350 | dqopt->info[cnt].dqi_flags = 0; |
1355 | dqopt->info[cnt].dqi_igrace = 0; | 1351 | dqopt->info[cnt].dqi_igrace = 0; |
1356 | dqopt->info[cnt].dqi_bgrace = 0; | 1352 | dqopt->info[cnt].dqi_bgrace = 0; |
@@ -1358,10 +1354,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1358 | } | 1354 | } |
1359 | up(&dqopt->dqonoff_sem); | 1355 | up(&dqopt->dqonoff_sem); |
1360 | /* Sync the superblock so that buffers with quota data are written to | 1356 | /* Sync the superblock so that buffers with quota data are written to |
1361 | * disk (and so userspace sees correct data afterwards). | 1357 | * disk (and so userspace sees correct data afterwards). */ |
1362 | * The reference to vfsmnt we are still holding protects us from | ||
1363 | * umount (we don't have it only when quotas are turned on/off for | ||
1364 | * journal replay but in that case we are guarded by the fs anyway). */ | ||
1365 | if (sb->s_op->sync_fs) | 1358 | if (sb->s_op->sync_fs) |
1366 | sb->s_op->sync_fs(sb, 1); | 1359 | sb->s_op->sync_fs(sb, 1); |
1367 | sync_blockdev(sb->s_bdev); | 1360 | sync_blockdev(sb->s_bdev); |
@@ -1385,10 +1378,6 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1385 | iput(toputinode[cnt]); | 1378 | iput(toputinode[cnt]); |
1386 | } | 1379 | } |
1387 | up(&dqopt->dqonoff_sem); | 1380 | up(&dqopt->dqonoff_sem); |
1388 | /* We don't hold the reference when we turned on quotas | ||
1389 | * just for the journal replay... */ | ||
1390 | if (toputmnt[cnt]) | ||
1391 | mntput(toputmnt[cnt]); | ||
1392 | } | 1381 | } |
1393 | if (sb->s_bdev) | 1382 | if (sb->s_bdev) |
1394 | invalidate_bdev(sb->s_bdev, 0); | 1383 | invalidate_bdev(sb->s_bdev, 0); |
@@ -1503,11 +1492,8 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) | |||
1503 | /* Quota file not on the same filesystem? */ | 1492 | /* Quota file not on the same filesystem? */ |
1504 | if (nd.mnt->mnt_sb != sb) | 1493 | if (nd.mnt->mnt_sb != sb) |
1505 | error = -EXDEV; | 1494 | error = -EXDEV; |
1506 | else { | 1495 | else |
1507 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); | 1496 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); |
1508 | if (!error) | ||
1509 | sb_dqopt(sb)->mnt[type] = mntget(nd.mnt); | ||
1510 | } | ||
1511 | out_path: | 1497 | out_path: |
1512 | path_release(&nd); | 1498 | path_release(&nd); |
1513 | return error; | 1499 | return error; |
diff --git a/fs/namespace.c b/fs/namespace.c index 2fa9fdf7d6f5..caa9187f67e5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mount.h> | 24 | #include <linux/mount.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/unistd.h> | 26 | #include <asm/unistd.h> |
27 | #include "pnode.h" | ||
27 | 28 | ||
28 | extern int __init init_rootfs(void); | 29 | extern int __init init_rootfs(void); |
29 | 30 | ||
@@ -37,33 +38,39 @@ static inline int sysfs_init(void) | |||
37 | #endif | 38 | #endif |
38 | 39 | ||
39 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ | 40 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ |
40 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); | 41 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); |
42 | |||
43 | static int event; | ||
41 | 44 | ||
42 | static struct list_head *mount_hashtable; | 45 | static struct list_head *mount_hashtable; |
43 | static int hash_mask __read_mostly, hash_bits __read_mostly; | 46 | static int hash_mask __read_mostly, hash_bits __read_mostly; |
44 | static kmem_cache_t *mnt_cache; | 47 | static kmem_cache_t *mnt_cache; |
48 | static struct rw_semaphore namespace_sem; | ||
45 | 49 | ||
46 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | 50 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) |
47 | { | 51 | { |
48 | unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES); | 52 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
49 | tmp += ((unsigned long) dentry / L1_CACHE_BYTES); | 53 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
50 | tmp = tmp + (tmp >> hash_bits); | 54 | tmp = tmp + (tmp >> hash_bits); |
51 | return tmp & hash_mask; | 55 | return tmp & hash_mask; |
52 | } | 56 | } |
53 | 57 | ||
54 | struct vfsmount *alloc_vfsmnt(const char *name) | 58 | struct vfsmount *alloc_vfsmnt(const char *name) |
55 | { | 59 | { |
56 | struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); | 60 | struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); |
57 | if (mnt) { | 61 | if (mnt) { |
58 | memset(mnt, 0, sizeof(struct vfsmount)); | 62 | memset(mnt, 0, sizeof(struct vfsmount)); |
59 | atomic_set(&mnt->mnt_count,1); | 63 | atomic_set(&mnt->mnt_count, 1); |
60 | INIT_LIST_HEAD(&mnt->mnt_hash); | 64 | INIT_LIST_HEAD(&mnt->mnt_hash); |
61 | INIT_LIST_HEAD(&mnt->mnt_child); | 65 | INIT_LIST_HEAD(&mnt->mnt_child); |
62 | INIT_LIST_HEAD(&mnt->mnt_mounts); | 66 | INIT_LIST_HEAD(&mnt->mnt_mounts); |
63 | INIT_LIST_HEAD(&mnt->mnt_list); | 67 | INIT_LIST_HEAD(&mnt->mnt_list); |
64 | INIT_LIST_HEAD(&mnt->mnt_expire); | 68 | INIT_LIST_HEAD(&mnt->mnt_expire); |
69 | INIT_LIST_HEAD(&mnt->mnt_share); | ||
70 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
71 | INIT_LIST_HEAD(&mnt->mnt_slave); | ||
65 | if (name) { | 72 | if (name) { |
66 | int size = strlen(name)+1; | 73 | int size = strlen(name) + 1; |
67 | char *newname = kmalloc(size, GFP_KERNEL); | 74 | char *newname = kmalloc(size, GFP_KERNEL); |
68 | if (newname) { | 75 | if (newname) { |
69 | memcpy(newname, name, size); | 76 | memcpy(newname, name, size); |
@@ -81,36 +88,65 @@ void free_vfsmnt(struct vfsmount *mnt) | |||
81 | } | 88 | } |
82 | 89 | ||
83 | /* | 90 | /* |
84 | * Now, lookup_mnt increments the ref count before returning | 91 | * find the first or last mount at @dentry on vfsmount @mnt depending on |
85 | * the vfsmount struct. | 92 | * @dir. If @dir is set return the first mount else return the last mount. |
86 | */ | 93 | */ |
87 | struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | 94 | struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, |
95 | int dir) | ||
88 | { | 96 | { |
89 | struct list_head * head = mount_hashtable + hash(mnt, dentry); | 97 | struct list_head *head = mount_hashtable + hash(mnt, dentry); |
90 | struct list_head * tmp = head; | 98 | struct list_head *tmp = head; |
91 | struct vfsmount *p, *found = NULL; | 99 | struct vfsmount *p, *found = NULL; |
92 | 100 | ||
93 | spin_lock(&vfsmount_lock); | ||
94 | for (;;) { | 101 | for (;;) { |
95 | tmp = tmp->next; | 102 | tmp = dir ? tmp->next : tmp->prev; |
96 | p = NULL; | 103 | p = NULL; |
97 | if (tmp == head) | 104 | if (tmp == head) |
98 | break; | 105 | break; |
99 | p = list_entry(tmp, struct vfsmount, mnt_hash); | 106 | p = list_entry(tmp, struct vfsmount, mnt_hash); |
100 | if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { | 107 | if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { |
101 | found = mntget(p); | 108 | found = p; |
102 | break; | 109 | break; |
103 | } | 110 | } |
104 | } | 111 | } |
105 | spin_unlock(&vfsmount_lock); | ||
106 | return found; | 112 | return found; |
107 | } | 113 | } |
108 | 114 | ||
115 | /* | ||
116 | * lookup_mnt increments the ref count before returning | ||
117 | * the vfsmount struct. | ||
118 | */ | ||
119 | struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | ||
120 | { | ||
121 | struct vfsmount *child_mnt; | ||
122 | spin_lock(&vfsmount_lock); | ||
123 | if ((child_mnt = __lookup_mnt(mnt, dentry, 1))) | ||
124 | mntget(child_mnt); | ||
125 | spin_unlock(&vfsmount_lock); | ||
126 | return child_mnt; | ||
127 | } | ||
128 | |||
109 | static inline int check_mnt(struct vfsmount *mnt) | 129 | static inline int check_mnt(struct vfsmount *mnt) |
110 | { | 130 | { |
111 | return mnt->mnt_namespace == current->namespace; | 131 | return mnt->mnt_namespace == current->namespace; |
112 | } | 132 | } |
113 | 133 | ||
134 | static void touch_namespace(struct namespace *ns) | ||
135 | { | ||
136 | if (ns) { | ||
137 | ns->event = ++event; | ||
138 | wake_up_interruptible(&ns->poll); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void __touch_namespace(struct namespace *ns) | ||
143 | { | ||
144 | if (ns && ns->event != event) { | ||
145 | ns->event = event; | ||
146 | wake_up_interruptible(&ns->poll); | ||
147 | } | ||
148 | } | ||
149 | |||
114 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) | 150 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) |
115 | { | 151 | { |
116 | old_nd->dentry = mnt->mnt_mountpoint; | 152 | old_nd->dentry = mnt->mnt_mountpoint; |
@@ -122,13 +158,43 @@ static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) | |||
122 | old_nd->dentry->d_mounted--; | 158 | old_nd->dentry->d_mounted--; |
123 | } | 159 | } |
124 | 160 | ||
161 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | ||
162 | struct vfsmount *child_mnt) | ||
163 | { | ||
164 | child_mnt->mnt_parent = mntget(mnt); | ||
165 | child_mnt->mnt_mountpoint = dget(dentry); | ||
166 | dentry->d_mounted++; | ||
167 | } | ||
168 | |||
125 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) | 169 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) |
126 | { | 170 | { |
127 | mnt->mnt_parent = mntget(nd->mnt); | 171 | mnt_set_mountpoint(nd->mnt, nd->dentry, mnt); |
128 | mnt->mnt_mountpoint = dget(nd->dentry); | 172 | list_add_tail(&mnt->mnt_hash, mount_hashtable + |
129 | list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry)); | 173 | hash(nd->mnt, nd->dentry)); |
130 | list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); | 174 | list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); |
131 | nd->dentry->d_mounted++; | 175 | } |
176 | |||
177 | /* | ||
178 | * the caller must hold vfsmount_lock | ||
179 | */ | ||
180 | static void commit_tree(struct vfsmount *mnt) | ||
181 | { | ||
182 | struct vfsmount *parent = mnt->mnt_parent; | ||
183 | struct vfsmount *m; | ||
184 | LIST_HEAD(head); | ||
185 | struct namespace *n = parent->mnt_namespace; | ||
186 | |||
187 | BUG_ON(parent == mnt); | ||
188 | |||
189 | list_add_tail(&head, &mnt->mnt_list); | ||
190 | list_for_each_entry(m, &head, mnt_list) | ||
191 | m->mnt_namespace = n; | ||
192 | list_splice(&head, n->list.prev); | ||
193 | |||
194 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | ||
195 | hash(parent, mnt->mnt_mountpoint)); | ||
196 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
197 | touch_namespace(n); | ||
132 | } | 198 | } |
133 | 199 | ||
134 | static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) | 200 | static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) |
@@ -147,8 +213,18 @@ static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) | |||
147 | return list_entry(next, struct vfsmount, mnt_child); | 213 | return list_entry(next, struct vfsmount, mnt_child); |
148 | } | 214 | } |
149 | 215 | ||
150 | static struct vfsmount * | 216 | static struct vfsmount *skip_mnt_tree(struct vfsmount *p) |
151 | clone_mnt(struct vfsmount *old, struct dentry *root) | 217 | { |
218 | struct list_head *prev = p->mnt_mounts.prev; | ||
219 | while (prev != &p->mnt_mounts) { | ||
220 | p = list_entry(prev, struct vfsmount, mnt_child); | ||
221 | prev = p->mnt_mounts.prev; | ||
222 | } | ||
223 | return p; | ||
224 | } | ||
225 | |||
226 | static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, | ||
227 | int flag) | ||
152 | { | 228 | { |
153 | struct super_block *sb = old->mnt_sb; | 229 | struct super_block *sb = old->mnt_sb; |
154 | struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); | 230 | struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); |
@@ -160,19 +236,34 @@ clone_mnt(struct vfsmount *old, struct dentry *root) | |||
160 | mnt->mnt_root = dget(root); | 236 | mnt->mnt_root = dget(root); |
161 | mnt->mnt_mountpoint = mnt->mnt_root; | 237 | mnt->mnt_mountpoint = mnt->mnt_root; |
162 | mnt->mnt_parent = mnt; | 238 | mnt->mnt_parent = mnt; |
163 | mnt->mnt_namespace = current->namespace; | 239 | |
240 | if (flag & CL_SLAVE) { | ||
241 | list_add(&mnt->mnt_slave, &old->mnt_slave_list); | ||
242 | mnt->mnt_master = old; | ||
243 | CLEAR_MNT_SHARED(mnt); | ||
244 | } else { | ||
245 | if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) | ||
246 | list_add(&mnt->mnt_share, &old->mnt_share); | ||
247 | if (IS_MNT_SLAVE(old)) | ||
248 | list_add(&mnt->mnt_slave, &old->mnt_slave); | ||
249 | mnt->mnt_master = old->mnt_master; | ||
250 | } | ||
251 | if (flag & CL_MAKE_SHARED) | ||
252 | set_mnt_shared(mnt); | ||
164 | 253 | ||
165 | /* stick the duplicate mount on the same expiry list | 254 | /* stick the duplicate mount on the same expiry list |
166 | * as the original if that was on one */ | 255 | * as the original if that was on one */ |
167 | spin_lock(&vfsmount_lock); | 256 | if (flag & CL_EXPIRE) { |
168 | if (!list_empty(&old->mnt_expire)) | 257 | spin_lock(&vfsmount_lock); |
169 | list_add(&mnt->mnt_expire, &old->mnt_expire); | 258 | if (!list_empty(&old->mnt_expire)) |
170 | spin_unlock(&vfsmount_lock); | 259 | list_add(&mnt->mnt_expire, &old->mnt_expire); |
260 | spin_unlock(&vfsmount_lock); | ||
261 | } | ||
171 | } | 262 | } |
172 | return mnt; | 263 | return mnt; |
173 | } | 264 | } |
174 | 265 | ||
175 | void __mntput(struct vfsmount *mnt) | 266 | static inline void __mntput(struct vfsmount *mnt) |
176 | { | 267 | { |
177 | struct super_block *sb = mnt->mnt_sb; | 268 | struct super_block *sb = mnt->mnt_sb; |
178 | dput(mnt->mnt_root); | 269 | dput(mnt->mnt_root); |
@@ -180,7 +271,46 @@ void __mntput(struct vfsmount *mnt) | |||
180 | deactivate_super(sb); | 271 | deactivate_super(sb); |
181 | } | 272 | } |
182 | 273 | ||
183 | EXPORT_SYMBOL(__mntput); | 274 | void mntput_no_expire(struct vfsmount *mnt) |
275 | { | ||
276 | repeat: | ||
277 | if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { | ||
278 | if (likely(!mnt->mnt_pinned)) { | ||
279 | spin_unlock(&vfsmount_lock); | ||
280 | __mntput(mnt); | ||
281 | return; | ||
282 | } | ||
283 | atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); | ||
284 | mnt->mnt_pinned = 0; | ||
285 | spin_unlock(&vfsmount_lock); | ||
286 | acct_auto_close_mnt(mnt); | ||
287 | security_sb_umount_close(mnt); | ||
288 | goto repeat; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | EXPORT_SYMBOL(mntput_no_expire); | ||
293 | |||
294 | void mnt_pin(struct vfsmount *mnt) | ||
295 | { | ||
296 | spin_lock(&vfsmount_lock); | ||
297 | mnt->mnt_pinned++; | ||
298 | spin_unlock(&vfsmount_lock); | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL(mnt_pin); | ||
302 | |||
303 | void mnt_unpin(struct vfsmount *mnt) | ||
304 | { | ||
305 | spin_lock(&vfsmount_lock); | ||
306 | if (mnt->mnt_pinned) { | ||
307 | atomic_inc(&mnt->mnt_count); | ||
308 | mnt->mnt_pinned--; | ||
309 | } | ||
310 | spin_unlock(&vfsmount_lock); | ||
311 | } | ||
312 | |||
313 | EXPORT_SYMBOL(mnt_unpin); | ||
184 | 314 | ||
185 | /* iterator */ | 315 | /* iterator */ |
186 | static void *m_start(struct seq_file *m, loff_t *pos) | 316 | static void *m_start(struct seq_file *m, loff_t *pos) |
@@ -189,7 +319,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
189 | struct list_head *p; | 319 | struct list_head *p; |
190 | loff_t l = *pos; | 320 | loff_t l = *pos; |
191 | 321 | ||
192 | down_read(&n->sem); | 322 | down_read(&namespace_sem); |
193 | list_for_each(p, &n->list) | 323 | list_for_each(p, &n->list) |
194 | if (!l--) | 324 | if (!l--) |
195 | return list_entry(p, struct vfsmount, mnt_list); | 325 | return list_entry(p, struct vfsmount, mnt_list); |
@@ -201,13 +331,12 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos) | |||
201 | struct namespace *n = m->private; | 331 | struct namespace *n = m->private; |
202 | struct list_head *p = ((struct vfsmount *)v)->mnt_list.next; | 332 | struct list_head *p = ((struct vfsmount *)v)->mnt_list.next; |
203 | (*pos)++; | 333 | (*pos)++; |
204 | return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list); | 334 | return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list); |
205 | } | 335 | } |
206 | 336 | ||
207 | static void m_stop(struct seq_file *m, void *v) | 337 | static void m_stop(struct seq_file *m, void *v) |
208 | { | 338 | { |
209 | struct namespace *n = m->private; | 339 | up_read(&namespace_sem); |
210 | up_read(&n->sem); | ||
211 | } | 340 | } |
212 | 341 | ||
213 | static inline void mangle(struct seq_file *m, const char *s) | 342 | static inline void mangle(struct seq_file *m, const char *s) |
@@ -275,35 +404,14 @@ struct seq_operations mounts_op = { | |||
275 | */ | 404 | */ |
276 | int may_umount_tree(struct vfsmount *mnt) | 405 | int may_umount_tree(struct vfsmount *mnt) |
277 | { | 406 | { |
278 | struct list_head *next; | 407 | int actual_refs = 0; |
279 | struct vfsmount *this_parent = mnt; | 408 | int minimum_refs = 0; |
280 | int actual_refs; | 409 | struct vfsmount *p; |
281 | int minimum_refs; | ||
282 | 410 | ||
283 | spin_lock(&vfsmount_lock); | 411 | spin_lock(&vfsmount_lock); |
284 | actual_refs = atomic_read(&mnt->mnt_count); | 412 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
285 | minimum_refs = 2; | ||
286 | repeat: | ||
287 | next = this_parent->mnt_mounts.next; | ||
288 | resume: | ||
289 | while (next != &this_parent->mnt_mounts) { | ||
290 | struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child); | ||
291 | |||
292 | next = next->next; | ||
293 | |||
294 | actual_refs += atomic_read(&p->mnt_count); | 413 | actual_refs += atomic_read(&p->mnt_count); |
295 | minimum_refs += 2; | 414 | minimum_refs += 2; |
296 | |||
297 | if (!list_empty(&p->mnt_mounts)) { | ||
298 | this_parent = p; | ||
299 | goto repeat; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | if (this_parent != mnt) { | ||
304 | next = this_parent->mnt_child.next; | ||
305 | this_parent = this_parent->mnt_parent; | ||
306 | goto resume; | ||
307 | } | 415 | } |
308 | spin_unlock(&vfsmount_lock); | 416 | spin_unlock(&vfsmount_lock); |
309 | 417 | ||
@@ -330,45 +438,67 @@ EXPORT_SYMBOL(may_umount_tree); | |||
330 | */ | 438 | */ |
331 | int may_umount(struct vfsmount *mnt) | 439 | int may_umount(struct vfsmount *mnt) |
332 | { | 440 | { |
333 | if (atomic_read(&mnt->mnt_count) > 2) | 441 | int ret = 0; |
334 | return -EBUSY; | 442 | spin_lock(&vfsmount_lock); |
335 | return 0; | 443 | if (propagate_mount_busy(mnt, 2)) |
444 | ret = -EBUSY; | ||
445 | spin_unlock(&vfsmount_lock); | ||
446 | return ret; | ||
336 | } | 447 | } |
337 | 448 | ||
338 | EXPORT_SYMBOL(may_umount); | 449 | EXPORT_SYMBOL(may_umount); |
339 | 450 | ||
340 | static void umount_tree(struct vfsmount *mnt) | 451 | void release_mounts(struct list_head *head) |
452 | { | ||
453 | struct vfsmount *mnt; | ||
454 | while(!list_empty(head)) { | ||
455 | mnt = list_entry(head->next, struct vfsmount, mnt_hash); | ||
456 | list_del_init(&mnt->mnt_hash); | ||
457 | if (mnt->mnt_parent != mnt) { | ||
458 | struct dentry *dentry; | ||
459 | struct vfsmount *m; | ||
460 | spin_lock(&vfsmount_lock); | ||
461 | dentry = mnt->mnt_mountpoint; | ||
462 | m = mnt->mnt_parent; | ||
463 | mnt->mnt_mountpoint = mnt->mnt_root; | ||
464 | mnt->mnt_parent = mnt; | ||
465 | spin_unlock(&vfsmount_lock); | ||
466 | dput(dentry); | ||
467 | mntput(m); | ||
468 | } | ||
469 | mntput(mnt); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) | ||
341 | { | 474 | { |
342 | struct vfsmount *p; | 475 | struct vfsmount *p; |
343 | LIST_HEAD(kill); | ||
344 | 476 | ||
345 | for (p = mnt; p; p = next_mnt(p, mnt)) { | 477 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
346 | list_del(&p->mnt_list); | 478 | list_del(&p->mnt_hash); |
347 | list_add(&p->mnt_list, &kill); | 479 | list_add(&p->mnt_hash, kill); |
348 | p->mnt_namespace = NULL; | ||
349 | } | 480 | } |
350 | 481 | ||
351 | while (!list_empty(&kill)) { | 482 | if (propagate) |
352 | mnt = list_entry(kill.next, struct vfsmount, mnt_list); | 483 | propagate_umount(kill); |
353 | list_del_init(&mnt->mnt_list); | 484 | |
354 | list_del_init(&mnt->mnt_expire); | 485 | list_for_each_entry(p, kill, mnt_hash) { |
355 | if (mnt->mnt_parent == mnt) { | 486 | list_del_init(&p->mnt_expire); |
356 | spin_unlock(&vfsmount_lock); | 487 | list_del_init(&p->mnt_list); |
357 | } else { | 488 | __touch_namespace(p->mnt_namespace); |
358 | struct nameidata old_nd; | 489 | p->mnt_namespace = NULL; |
359 | detach_mnt(mnt, &old_nd); | 490 | list_del_init(&p->mnt_child); |
360 | spin_unlock(&vfsmount_lock); | 491 | if (p->mnt_parent != p) |
361 | path_release(&old_nd); | 492 | mnt->mnt_mountpoint->d_mounted--; |
362 | } | 493 | change_mnt_propagation(p, MS_PRIVATE); |
363 | mntput(mnt); | ||
364 | spin_lock(&vfsmount_lock); | ||
365 | } | 494 | } |
366 | } | 495 | } |
367 | 496 | ||
368 | static int do_umount(struct vfsmount *mnt, int flags) | 497 | static int do_umount(struct vfsmount *mnt, int flags) |
369 | { | 498 | { |
370 | struct super_block * sb = mnt->mnt_sb; | 499 | struct super_block *sb = mnt->mnt_sb; |
371 | int retval; | 500 | int retval; |
501 | LIST_HEAD(umount_list); | ||
372 | 502 | ||
373 | retval = security_sb_umount(mnt, flags); | 503 | retval = security_sb_umount(mnt, flags); |
374 | if (retval) | 504 | if (retval) |
@@ -403,7 +533,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
403 | */ | 533 | */ |
404 | 534 | ||
405 | lock_kernel(); | 535 | lock_kernel(); |
406 | if( (flags&MNT_FORCE) && sb->s_op->umount_begin) | 536 | if ((flags & MNT_FORCE) && sb->s_op->umount_begin) |
407 | sb->s_op->umount_begin(sb); | 537 | sb->s_op->umount_begin(sb); |
408 | unlock_kernel(); | 538 | unlock_kernel(); |
409 | 539 | ||
@@ -432,29 +562,21 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
432 | return retval; | 562 | return retval; |
433 | } | 563 | } |
434 | 564 | ||
435 | down_write(¤t->namespace->sem); | 565 | down_write(&namespace_sem); |
436 | spin_lock(&vfsmount_lock); | 566 | spin_lock(&vfsmount_lock); |
567 | event++; | ||
437 | 568 | ||
438 | if (atomic_read(&sb->s_active) == 1) { | ||
439 | /* last instance - try to be smart */ | ||
440 | spin_unlock(&vfsmount_lock); | ||
441 | lock_kernel(); | ||
442 | DQUOT_OFF(sb); | ||
443 | acct_auto_close(sb); | ||
444 | unlock_kernel(); | ||
445 | security_sb_umount_close(mnt); | ||
446 | spin_lock(&vfsmount_lock); | ||
447 | } | ||
448 | retval = -EBUSY; | 569 | retval = -EBUSY; |
449 | if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) { | 570 | if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { |
450 | if (!list_empty(&mnt->mnt_list)) | 571 | if (!list_empty(&mnt->mnt_list)) |
451 | umount_tree(mnt); | 572 | umount_tree(mnt, 1, &umount_list); |
452 | retval = 0; | 573 | retval = 0; |
453 | } | 574 | } |
454 | spin_unlock(&vfsmount_lock); | 575 | spin_unlock(&vfsmount_lock); |
455 | if (retval) | 576 | if (retval) |
456 | security_sb_umount_busy(mnt); | 577 | security_sb_umount_busy(mnt); |
457 | up_write(¤t->namespace->sem); | 578 | up_write(&namespace_sem); |
579 | release_mounts(&umount_list); | ||
458 | return retval; | 580 | return retval; |
459 | } | 581 | } |
460 | 582 | ||
@@ -494,12 +616,11 @@ out: | |||
494 | #ifdef __ARCH_WANT_SYS_OLDUMOUNT | 616 | #ifdef __ARCH_WANT_SYS_OLDUMOUNT |
495 | 617 | ||
496 | /* | 618 | /* |
497 | * The 2.0 compatible umount. No flags. | 619 | * The 2.0 compatible umount. No flags. |
498 | */ | 620 | */ |
499 | |||
500 | asmlinkage long sys_oldumount(char __user * name) | 621 | asmlinkage long sys_oldumount(char __user * name) |
501 | { | 622 | { |
502 | return sys_umount(name,0); | 623 | return sys_umount(name, 0); |
503 | } | 624 | } |
504 | 625 | ||
505 | #endif | 626 | #endif |
@@ -522,8 +643,7 @@ static int mount_is_safe(struct nameidata *nd) | |||
522 | #endif | 643 | #endif |
523 | } | 644 | } |
524 | 645 | ||
525 | static int | 646 | static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) |
526 | lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | ||
527 | { | 647 | { |
528 | while (1) { | 648 | while (1) { |
529 | if (d == dentry) | 649 | if (d == dentry) |
@@ -534,12 +654,16 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | |||
534 | } | 654 | } |
535 | } | 655 | } |
536 | 656 | ||
537 | static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | 657 | struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, |
658 | int flag) | ||
538 | { | 659 | { |
539 | struct vfsmount *res, *p, *q, *r, *s; | 660 | struct vfsmount *res, *p, *q, *r, *s; |
540 | struct nameidata nd; | 661 | struct nameidata nd; |
541 | 662 | ||
542 | res = q = clone_mnt(mnt, dentry); | 663 | if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) |
664 | return NULL; | ||
665 | |||
666 | res = q = clone_mnt(mnt, dentry, flag); | ||
543 | if (!q) | 667 | if (!q) |
544 | goto Enomem; | 668 | goto Enomem; |
545 | q->mnt_mountpoint = mnt->mnt_mountpoint; | 669 | q->mnt_mountpoint = mnt->mnt_mountpoint; |
@@ -550,6 +674,10 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
550 | continue; | 674 | continue; |
551 | 675 | ||
552 | for (s = r; s; s = next_mnt(s, r)) { | 676 | for (s = r; s; s = next_mnt(s, r)) { |
677 | if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { | ||
678 | s = skip_mnt_tree(s); | ||
679 | continue; | ||
680 | } | ||
553 | while (p != s->mnt_parent) { | 681 | while (p != s->mnt_parent) { |
554 | p = p->mnt_parent; | 682 | p = p->mnt_parent; |
555 | q = q->mnt_parent; | 683 | q = q->mnt_parent; |
@@ -557,7 +685,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
557 | p = s; | 685 | p = s; |
558 | nd.mnt = q; | 686 | nd.mnt = q; |
559 | nd.dentry = p->mnt_mountpoint; | 687 | nd.dentry = p->mnt_mountpoint; |
560 | q = clone_mnt(p, p->mnt_root); | 688 | q = clone_mnt(p, p->mnt_root, flag); |
561 | if (!q) | 689 | if (!q) |
562 | goto Enomem; | 690 | goto Enomem; |
563 | spin_lock(&vfsmount_lock); | 691 | spin_lock(&vfsmount_lock); |
@@ -567,15 +695,114 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
567 | } | 695 | } |
568 | } | 696 | } |
569 | return res; | 697 | return res; |
570 | Enomem: | 698 | Enomem: |
571 | if (res) { | 699 | if (res) { |
700 | LIST_HEAD(umount_list); | ||
572 | spin_lock(&vfsmount_lock); | 701 | spin_lock(&vfsmount_lock); |
573 | umount_tree(res); | 702 | umount_tree(res, 0, &umount_list); |
574 | spin_unlock(&vfsmount_lock); | 703 | spin_unlock(&vfsmount_lock); |
704 | release_mounts(&umount_list); | ||
575 | } | 705 | } |
576 | return NULL; | 706 | return NULL; |
577 | } | 707 | } |
578 | 708 | ||
709 | /* | ||
710 | * @source_mnt : mount tree to be attached | ||
711 | * @nd : place the mount tree @source_mnt is attached | ||
712 | * @parent_nd : if non-null, detach the source_mnt from its parent and | ||
713 | * store the parent mount and mountpoint dentry. | ||
714 | * (done when source_mnt is moved) | ||
715 | * | ||
716 | * NOTE: in the table below explains the semantics when a source mount | ||
717 | * of a given type is attached to a destination mount of a given type. | ||
718 | * --------------------------------------------------------------------------- | ||
719 | * | BIND MOUNT OPERATION | | ||
720 | * |************************************************************************** | ||
721 | * | source-->| shared | private | slave | unbindable | | ||
722 | * | dest | | | | | | ||
723 | * | | | | | | | | ||
724 | * | v | | | | | | ||
725 | * |************************************************************************** | ||
726 | * | shared | shared (++) | shared (+) | shared(+++)| invalid | | ||
727 | * | | | | | | | ||
728 | * |non-shared| shared (+) | private | slave (*) | invalid | | ||
729 | * *************************************************************************** | ||
730 | * A bind operation clones the source mount and mounts the clone on the | ||
731 | * destination mount. | ||
732 | * | ||
733 | * (++) the cloned mount is propagated to all the mounts in the propagation | ||
734 | * tree of the destination mount and the cloned mount is added to | ||
735 | * the peer group of the source mount. | ||
736 | * (+) the cloned mount is created under the destination mount and is marked | ||
737 | * as shared. The cloned mount is added to the peer group of the source | ||
738 | * mount. | ||
739 | * (+++) the mount is propagated to all the mounts in the propagation tree | ||
740 | * of the destination mount and the cloned mount is made slave | ||
741 | * of the same master as that of the source mount. The cloned mount | ||
742 | * is marked as 'shared and slave'. | ||
743 | * (*) the cloned mount is made a slave of the same master as that of the | ||
744 | * source mount. | ||
745 | * | ||
746 | * --------------------------------------------------------------------------- | ||
747 | * | MOVE MOUNT OPERATION | | ||
748 | * |************************************************************************** | ||
749 | * | source-->| shared | private | slave | unbindable | | ||
750 | * | dest | | | | | | ||
751 | * | | | | | | | | ||
752 | * | v | | | | | | ||
753 | * |************************************************************************** | ||
754 | * | shared | shared (+) | shared (+) | shared(+++) | invalid | | ||
755 | * | | | | | | | ||
756 | * |non-shared| shared (+*) | private | slave (*) | unbindable | | ||
757 | * *************************************************************************** | ||
758 | * | ||
759 | * (+) the mount is moved to the destination. And is then propagated to | ||
760 | * all the mounts in the propagation tree of the destination mount. | ||
761 | * (+*) the mount is moved to the destination. | ||
762 | * (+++) the mount is moved to the destination and is then propagated to | ||
763 | * all the mounts belonging to the destination mount's propagation tree. | ||
764 | * the mount is marked as 'shared and slave'. | ||
765 | * (*) the mount continues to be a slave at the new location. | ||
766 | * | ||
767 | * if the source mount is a tree, the operations explained above is | ||
768 | * applied to each mount in the tree. | ||
769 | * Must be called without spinlocks held, since this function can sleep | ||
770 | * in allocations. | ||
771 | */ | ||
772 | static int attach_recursive_mnt(struct vfsmount *source_mnt, | ||
773 | struct nameidata *nd, struct nameidata *parent_nd) | ||
774 | { | ||
775 | LIST_HEAD(tree_list); | ||
776 | struct vfsmount *dest_mnt = nd->mnt; | ||
777 | struct dentry *dest_dentry = nd->dentry; | ||
778 | struct vfsmount *child, *p; | ||
779 | |||
780 | if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) | ||
781 | return -EINVAL; | ||
782 | |||
783 | if (IS_MNT_SHARED(dest_mnt)) { | ||
784 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | ||
785 | set_mnt_shared(p); | ||
786 | } | ||
787 | |||
788 | spin_lock(&vfsmount_lock); | ||
789 | if (parent_nd) { | ||
790 | detach_mnt(source_mnt, parent_nd); | ||
791 | attach_mnt(source_mnt, nd); | ||
792 | touch_namespace(current->namespace); | ||
793 | } else { | ||
794 | mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); | ||
795 | commit_tree(source_mnt); | ||
796 | } | ||
797 | |||
798 | list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { | ||
799 | list_del_init(&child->mnt_hash); | ||
800 | commit_tree(child); | ||
801 | } | ||
802 | spin_unlock(&vfsmount_lock); | ||
803 | return 0; | ||
804 | } | ||
805 | |||
579 | static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | 806 | static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) |
580 | { | 807 | { |
581 | int err; | 808 | int err; |
@@ -596,17 +823,8 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | |||
596 | goto out_unlock; | 823 | goto out_unlock; |
597 | 824 | ||
598 | err = -ENOENT; | 825 | err = -ENOENT; |
599 | spin_lock(&vfsmount_lock); | 826 | if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) |
600 | if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) { | 827 | err = attach_recursive_mnt(mnt, nd, NULL); |
601 | struct list_head head; | ||
602 | |||
603 | attach_mnt(mnt, nd); | ||
604 | list_add_tail(&head, &mnt->mnt_list); | ||
605 | list_splice(&head, current->namespace->list.prev); | ||
606 | mntget(mnt); | ||
607 | err = 0; | ||
608 | } | ||
609 | spin_unlock(&vfsmount_lock); | ||
610 | out_unlock: | 828 | out_unlock: |
611 | up(&nd->dentry->d_inode->i_sem); | 829 | up(&nd->dentry->d_inode->i_sem); |
612 | if (!err) | 830 | if (!err) |
@@ -615,6 +833,27 @@ out_unlock: | |||
615 | } | 833 | } |
616 | 834 | ||
617 | /* | 835 | /* |
836 | * recursively change the type of the mountpoint. | ||
837 | */ | ||
838 | static int do_change_type(struct nameidata *nd, int flag) | ||
839 | { | ||
840 | struct vfsmount *m, *mnt = nd->mnt; | ||
841 | int recurse = flag & MS_REC; | ||
842 | int type = flag & ~MS_REC; | ||
843 | |||
844 | if (nd->dentry != nd->mnt->mnt_root) | ||
845 | return -EINVAL; | ||
846 | |||
847 | down_write(&namespace_sem); | ||
848 | spin_lock(&vfsmount_lock); | ||
849 | for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) | ||
850 | change_mnt_propagation(m, type); | ||
851 | spin_unlock(&vfsmount_lock); | ||
852 | up_write(&namespace_sem); | ||
853 | return 0; | ||
854 | } | ||
855 | |||
856 | /* | ||
618 | * do loopback mount. | 857 | * do loopback mount. |
619 | */ | 858 | */ |
620 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | 859 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) |
@@ -630,32 +869,34 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
630 | if (err) | 869 | if (err) |
631 | return err; | 870 | return err; |
632 | 871 | ||
633 | down_write(¤t->namespace->sem); | 872 | down_write(&namespace_sem); |
634 | err = -EINVAL; | 873 | err = -EINVAL; |
635 | if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) { | 874 | if (IS_MNT_UNBINDABLE(old_nd.mnt)) |
636 | err = -ENOMEM; | 875 | goto out; |
637 | if (recurse) | ||
638 | mnt = copy_tree(old_nd.mnt, old_nd.dentry); | ||
639 | else | ||
640 | mnt = clone_mnt(old_nd.mnt, old_nd.dentry); | ||
641 | } | ||
642 | 876 | ||
643 | if (mnt) { | 877 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) |
644 | /* stop bind mounts from expiring */ | 878 | goto out; |
879 | |||
880 | err = -ENOMEM; | ||
881 | if (recurse) | ||
882 | mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0); | ||
883 | else | ||
884 | mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0); | ||
885 | |||
886 | if (!mnt) | ||
887 | goto out; | ||
888 | |||
889 | err = graft_tree(mnt, nd); | ||
890 | if (err) { | ||
891 | LIST_HEAD(umount_list); | ||
645 | spin_lock(&vfsmount_lock); | 892 | spin_lock(&vfsmount_lock); |
646 | list_del_init(&mnt->mnt_expire); | 893 | umount_tree(mnt, 0, &umount_list); |
647 | spin_unlock(&vfsmount_lock); | 894 | spin_unlock(&vfsmount_lock); |
648 | 895 | release_mounts(&umount_list); | |
649 | err = graft_tree(mnt, nd); | ||
650 | if (err) { | ||
651 | spin_lock(&vfsmount_lock); | ||
652 | umount_tree(mnt); | ||
653 | spin_unlock(&vfsmount_lock); | ||
654 | } else | ||
655 | mntput(mnt); | ||
656 | } | 896 | } |
657 | 897 | ||
658 | up_write(¤t->namespace->sem); | 898 | out: |
899 | up_write(&namespace_sem); | ||
659 | path_release(&old_nd); | 900 | path_release(&old_nd); |
660 | return err; | 901 | return err; |
661 | } | 902 | } |
@@ -665,12 +906,11 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
665 | * If you've mounted a non-root directory somewhere and want to do remount | 906 | * If you've mounted a non-root directory somewhere and want to do remount |
666 | * on it - tough luck. | 907 | * on it - tough luck. |
667 | */ | 908 | */ |
668 | |||
669 | static int do_remount(struct nameidata *nd, int flags, int mnt_flags, | 909 | static int do_remount(struct nameidata *nd, int flags, int mnt_flags, |
670 | void *data) | 910 | void *data) |
671 | { | 911 | { |
672 | int err; | 912 | int err; |
673 | struct super_block * sb = nd->mnt->mnt_sb; | 913 | struct super_block *sb = nd->mnt->mnt_sb; |
674 | 914 | ||
675 | if (!capable(CAP_SYS_ADMIN)) | 915 | if (!capable(CAP_SYS_ADMIN)) |
676 | return -EPERM; | 916 | return -EPERM; |
@@ -684,13 +924,23 @@ static int do_remount(struct nameidata *nd, int flags, int mnt_flags, | |||
684 | down_write(&sb->s_umount); | 924 | down_write(&sb->s_umount); |
685 | err = do_remount_sb(sb, flags, data, 0); | 925 | err = do_remount_sb(sb, flags, data, 0); |
686 | if (!err) | 926 | if (!err) |
687 | nd->mnt->mnt_flags=mnt_flags; | 927 | nd->mnt->mnt_flags = mnt_flags; |
688 | up_write(&sb->s_umount); | 928 | up_write(&sb->s_umount); |
689 | if (!err) | 929 | if (!err) |
690 | security_sb_post_remount(nd->mnt, flags, data); | 930 | security_sb_post_remount(nd->mnt, flags, data); |
691 | return err; | 931 | return err; |
692 | } | 932 | } |
693 | 933 | ||
934 | static inline int tree_contains_unbindable(struct vfsmount *mnt) | ||
935 | { | ||
936 | struct vfsmount *p; | ||
937 | for (p = mnt; p; p = next_mnt(p, mnt)) { | ||
938 | if (IS_MNT_UNBINDABLE(p)) | ||
939 | return 1; | ||
940 | } | ||
941 | return 0; | ||
942 | } | ||
943 | |||
694 | static int do_move_mount(struct nameidata *nd, char *old_name) | 944 | static int do_move_mount(struct nameidata *nd, char *old_name) |
695 | { | 945 | { |
696 | struct nameidata old_nd, parent_nd; | 946 | struct nameidata old_nd, parent_nd; |
@@ -704,8 +954,8 @@ static int do_move_mount(struct nameidata *nd, char *old_name) | |||
704 | if (err) | 954 | if (err) |
705 | return err; | 955 | return err; |
706 | 956 | ||
707 | down_write(¤t->namespace->sem); | 957 | down_write(&namespace_sem); |
708 | while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 958 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) |
709 | ; | 959 | ; |
710 | err = -EINVAL; | 960 | err = -EINVAL; |
711 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) | 961 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) |
@@ -716,39 +966,47 @@ static int do_move_mount(struct nameidata *nd, char *old_name) | |||
716 | if (IS_DEADDIR(nd->dentry->d_inode)) | 966 | if (IS_DEADDIR(nd->dentry->d_inode)) |
717 | goto out1; | 967 | goto out1; |
718 | 968 | ||
719 | spin_lock(&vfsmount_lock); | ||
720 | if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) | 969 | if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) |
721 | goto out2; | 970 | goto out1; |
722 | 971 | ||
723 | err = -EINVAL; | 972 | err = -EINVAL; |
724 | if (old_nd.dentry != old_nd.mnt->mnt_root) | 973 | if (old_nd.dentry != old_nd.mnt->mnt_root) |
725 | goto out2; | 974 | goto out1; |
726 | 975 | ||
727 | if (old_nd.mnt == old_nd.mnt->mnt_parent) | 976 | if (old_nd.mnt == old_nd.mnt->mnt_parent) |
728 | goto out2; | 977 | goto out1; |
729 | 978 | ||
730 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != | 979 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != |
731 | S_ISDIR(old_nd.dentry->d_inode->i_mode)) | 980 | S_ISDIR(old_nd.dentry->d_inode->i_mode)) |
732 | goto out2; | 981 | goto out1; |
733 | 982 | /* | |
983 | * Don't move a mount residing in a shared parent. | ||
984 | */ | ||
985 | if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent)) | ||
986 | goto out1; | ||
987 | /* | ||
988 | * Don't move a mount tree containing unbindable mounts to a destination | ||
989 | * mount which is shared. | ||
990 | */ | ||
991 | if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt)) | ||
992 | goto out1; | ||
734 | err = -ELOOP; | 993 | err = -ELOOP; |
735 | for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent) | 994 | for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent) |
736 | if (p == old_nd.mnt) | 995 | if (p == old_nd.mnt) |
737 | goto out2; | 996 | goto out1; |
738 | err = 0; | ||
739 | 997 | ||
740 | detach_mnt(old_nd.mnt, &parent_nd); | 998 | if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd))) |
741 | attach_mnt(old_nd.mnt, nd); | 999 | goto out1; |
742 | 1000 | ||
1001 | spin_lock(&vfsmount_lock); | ||
743 | /* if the mount is moved, it should no longer be expire | 1002 | /* if the mount is moved, it should no longer be expire |
744 | * automatically */ | 1003 | * automatically */ |
745 | list_del_init(&old_nd.mnt->mnt_expire); | 1004 | list_del_init(&old_nd.mnt->mnt_expire); |
746 | out2: | ||
747 | spin_unlock(&vfsmount_lock); | 1005 | spin_unlock(&vfsmount_lock); |
748 | out1: | 1006 | out1: |
749 | up(&nd->dentry->d_inode->i_sem); | 1007 | up(&nd->dentry->d_inode->i_sem); |
750 | out: | 1008 | out: |
751 | up_write(¤t->namespace->sem); | 1009 | up_write(&namespace_sem); |
752 | if (!err) | 1010 | if (!err) |
753 | path_release(&parent_nd); | 1011 | path_release(&parent_nd); |
754 | path_release(&old_nd); | 1012 | path_release(&old_nd); |
@@ -787,9 +1045,9 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, | |||
787 | { | 1045 | { |
788 | int err; | 1046 | int err; |
789 | 1047 | ||
790 | down_write(¤t->namespace->sem); | 1048 | down_write(&namespace_sem); |
791 | /* Something was mounted here while we slept */ | 1049 | /* Something was mounted here while we slept */ |
792 | while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 1050 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) |
793 | ; | 1051 | ; |
794 | err = -EINVAL; | 1052 | err = -EINVAL; |
795 | if (!check_mnt(nd->mnt)) | 1053 | if (!check_mnt(nd->mnt)) |
@@ -806,25 +1064,28 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, | |||
806 | goto unlock; | 1064 | goto unlock; |
807 | 1065 | ||
808 | newmnt->mnt_flags = mnt_flags; | 1066 | newmnt->mnt_flags = mnt_flags; |
809 | newmnt->mnt_namespace = current->namespace; | 1067 | if ((err = graft_tree(newmnt, nd))) |
810 | err = graft_tree(newmnt, nd); | 1068 | goto unlock; |
811 | 1069 | ||
812 | if (err == 0 && fslist) { | 1070 | if (fslist) { |
813 | /* add to the specified expiration list */ | 1071 | /* add to the specified expiration list */ |
814 | spin_lock(&vfsmount_lock); | 1072 | spin_lock(&vfsmount_lock); |
815 | list_add_tail(&newmnt->mnt_expire, fslist); | 1073 | list_add_tail(&newmnt->mnt_expire, fslist); |
816 | spin_unlock(&vfsmount_lock); | 1074 | spin_unlock(&vfsmount_lock); |
817 | } | 1075 | } |
1076 | up_write(&namespace_sem); | ||
1077 | return 0; | ||
818 | 1078 | ||
819 | unlock: | 1079 | unlock: |
820 | up_write(¤t->namespace->sem); | 1080 | up_write(&namespace_sem); |
821 | mntput(newmnt); | 1081 | mntput(newmnt); |
822 | return err; | 1082 | return err; |
823 | } | 1083 | } |
824 | 1084 | ||
825 | EXPORT_SYMBOL_GPL(do_add_mount); | 1085 | EXPORT_SYMBOL_GPL(do_add_mount); |
826 | 1086 | ||
827 | static void expire_mount(struct vfsmount *mnt, struct list_head *mounts) | 1087 | static void expire_mount(struct vfsmount *mnt, struct list_head *mounts, |
1088 | struct list_head *umounts) | ||
828 | { | 1089 | { |
829 | spin_lock(&vfsmount_lock); | 1090 | spin_lock(&vfsmount_lock); |
830 | 1091 | ||
@@ -841,27 +1102,13 @@ static void expire_mount(struct vfsmount *mnt, struct list_head *mounts) | |||
841 | * Check that it is still dead: the count should now be 2 - as | 1102 | * Check that it is still dead: the count should now be 2 - as |
842 | * contributed by the vfsmount parent and the mntget above | 1103 | * contributed by the vfsmount parent and the mntget above |
843 | */ | 1104 | */ |
844 | if (atomic_read(&mnt->mnt_count) == 2) { | 1105 | if (!propagate_mount_busy(mnt, 2)) { |
845 | struct nameidata old_nd; | ||
846 | |||
847 | /* delete from the namespace */ | 1106 | /* delete from the namespace */ |
1107 | touch_namespace(mnt->mnt_namespace); | ||
848 | list_del_init(&mnt->mnt_list); | 1108 | list_del_init(&mnt->mnt_list); |
849 | mnt->mnt_namespace = NULL; | 1109 | mnt->mnt_namespace = NULL; |
850 | detach_mnt(mnt, &old_nd); | 1110 | umount_tree(mnt, 1, umounts); |
851 | spin_unlock(&vfsmount_lock); | 1111 | spin_unlock(&vfsmount_lock); |
852 | path_release(&old_nd); | ||
853 | |||
854 | /* | ||
855 | * Now lay it to rest if this was the last ref on the superblock | ||
856 | */ | ||
857 | if (atomic_read(&mnt->mnt_sb->s_active) == 1) { | ||
858 | /* last instance - try to be smart */ | ||
859 | lock_kernel(); | ||
860 | DQUOT_OFF(mnt->mnt_sb); | ||
861 | acct_auto_close(mnt->mnt_sb); | ||
862 | unlock_kernel(); | ||
863 | } | ||
864 | mntput(mnt); | ||
865 | } else { | 1112 | } else { |
866 | /* | 1113 | /* |
867 | * Someone brought it back to life whilst we didn't have any | 1114 | * Someone brought it back to life whilst we didn't have any |
@@ -910,6 +1157,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
910 | * - dispose of the corpse | 1157 | * - dispose of the corpse |
911 | */ | 1158 | */ |
912 | while (!list_empty(&graveyard)) { | 1159 | while (!list_empty(&graveyard)) { |
1160 | LIST_HEAD(umounts); | ||
913 | mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire); | 1161 | mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire); |
914 | list_del_init(&mnt->mnt_expire); | 1162 | list_del_init(&mnt->mnt_expire); |
915 | 1163 | ||
@@ -921,13 +1169,12 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
921 | get_namespace(namespace); | 1169 | get_namespace(namespace); |
922 | 1170 | ||
923 | spin_unlock(&vfsmount_lock); | 1171 | spin_unlock(&vfsmount_lock); |
924 | down_write(&namespace->sem); | 1172 | down_write(&namespace_sem); |
925 | expire_mount(mnt, mounts); | 1173 | expire_mount(mnt, mounts, &umounts); |
926 | up_write(&namespace->sem); | 1174 | up_write(&namespace_sem); |
927 | 1175 | release_mounts(&umounts); | |
928 | mntput(mnt); | 1176 | mntput(mnt); |
929 | put_namespace(namespace); | 1177 | put_namespace(namespace); |
930 | |||
931 | spin_lock(&vfsmount_lock); | 1178 | spin_lock(&vfsmount_lock); |
932 | } | 1179 | } |
933 | 1180 | ||
@@ -942,8 +1189,8 @@ EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); | |||
942 | * Note that this function differs from copy_from_user() in that it will oops | 1189 | * Note that this function differs from copy_from_user() in that it will oops |
943 | * on bad values of `to', rather than returning a short copy. | 1190 | * on bad values of `to', rather than returning a short copy. |
944 | */ | 1191 | */ |
945 | static long | 1192 | static long exact_copy_from_user(void *to, const void __user * from, |
946 | exact_copy_from_user(void *to, const void __user *from, unsigned long n) | 1193 | unsigned long n) |
947 | { | 1194 | { |
948 | char *t = to; | 1195 | char *t = to; |
949 | const char __user *f = from; | 1196 | const char __user *f = from; |
@@ -964,12 +1211,12 @@ exact_copy_from_user(void *to, const void __user *from, unsigned long n) | |||
964 | return n; | 1211 | return n; |
965 | } | 1212 | } |
966 | 1213 | ||
967 | int copy_mount_options(const void __user *data, unsigned long *where) | 1214 | int copy_mount_options(const void __user * data, unsigned long *where) |
968 | { | 1215 | { |
969 | int i; | 1216 | int i; |
970 | unsigned long page; | 1217 | unsigned long page; |
971 | unsigned long size; | 1218 | unsigned long size; |
972 | 1219 | ||
973 | *where = 0; | 1220 | *where = 0; |
974 | if (!data) | 1221 | if (!data) |
975 | return 0; | 1222 | return 0; |
@@ -988,7 +1235,7 @@ int copy_mount_options(const void __user *data, unsigned long *where) | |||
988 | 1235 | ||
989 | i = size - exact_copy_from_user((void *)page, data, size); | 1236 | i = size - exact_copy_from_user((void *)page, data, size); |
990 | if (!i) { | 1237 | if (!i) { |
991 | free_page(page); | 1238 | free_page(page); |
992 | return -EFAULT; | 1239 | return -EFAULT; |
993 | } | 1240 | } |
994 | if (i != PAGE_SIZE) | 1241 | if (i != PAGE_SIZE) |
@@ -1011,7 +1258,7 @@ int copy_mount_options(const void __user *data, unsigned long *where) | |||
1011 | * Therefore, if this magic number is present, it carries no information | 1258 | * Therefore, if this magic number is present, it carries no information |
1012 | * and must be discarded. | 1259 | * and must be discarded. |
1013 | */ | 1260 | */ |
1014 | long do_mount(char * dev_name, char * dir_name, char *type_page, | 1261 | long do_mount(char *dev_name, char *dir_name, char *type_page, |
1015 | unsigned long flags, void *data_page) | 1262 | unsigned long flags, void *data_page) |
1016 | { | 1263 | { |
1017 | struct nameidata nd; | 1264 | struct nameidata nd; |
@@ -1039,7 +1286,7 @@ long do_mount(char * dev_name, char * dir_name, char *type_page, | |||
1039 | mnt_flags |= MNT_NODEV; | 1286 | mnt_flags |= MNT_NODEV; |
1040 | if (flags & MS_NOEXEC) | 1287 | if (flags & MS_NOEXEC) |
1041 | mnt_flags |= MNT_NOEXEC; | 1288 | mnt_flags |= MNT_NOEXEC; |
1042 | flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE); | 1289 | flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE); |
1043 | 1290 | ||
1044 | /* ... and get the mountpoint */ | 1291 | /* ... and get the mountpoint */ |
1045 | retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); | 1292 | retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); |
@@ -1055,6 +1302,8 @@ long do_mount(char * dev_name, char * dir_name, char *type_page, | |||
1055 | data_page); | 1302 | data_page); |
1056 | else if (flags & MS_BIND) | 1303 | else if (flags & MS_BIND) |
1057 | retval = do_loopback(&nd, dev_name, flags & MS_REC); | 1304 | retval = do_loopback(&nd, dev_name, flags & MS_REC); |
1305 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | ||
1306 | retval = do_change_type(&nd, flags); | ||
1058 | else if (flags & MS_MOVE) | 1307 | else if (flags & MS_MOVE) |
1059 | retval = do_move_mount(&nd, dev_name); | 1308 | retval = do_move_mount(&nd, dev_name); |
1060 | else | 1309 | else |
@@ -1091,14 +1340,16 @@ int copy_namespace(int flags, struct task_struct *tsk) | |||
1091 | goto out; | 1340 | goto out; |
1092 | 1341 | ||
1093 | atomic_set(&new_ns->count, 1); | 1342 | atomic_set(&new_ns->count, 1); |
1094 | init_rwsem(&new_ns->sem); | ||
1095 | INIT_LIST_HEAD(&new_ns->list); | 1343 | INIT_LIST_HEAD(&new_ns->list); |
1344 | init_waitqueue_head(&new_ns->poll); | ||
1345 | new_ns->event = 0; | ||
1096 | 1346 | ||
1097 | down_write(&tsk->namespace->sem); | 1347 | down_write(&namespace_sem); |
1098 | /* First pass: copy the tree topology */ | 1348 | /* First pass: copy the tree topology */ |
1099 | new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root); | 1349 | new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root, |
1350 | CL_COPY_ALL | CL_EXPIRE); | ||
1100 | if (!new_ns->root) { | 1351 | if (!new_ns->root) { |
1101 | up_write(&tsk->namespace->sem); | 1352 | up_write(&namespace_sem); |
1102 | kfree(new_ns); | 1353 | kfree(new_ns); |
1103 | goto out; | 1354 | goto out; |
1104 | } | 1355 | } |
@@ -1132,7 +1383,7 @@ int copy_namespace(int flags, struct task_struct *tsk) | |||
1132 | p = next_mnt(p, namespace->root); | 1383 | p = next_mnt(p, namespace->root); |
1133 | q = next_mnt(q, new_ns->root); | 1384 | q = next_mnt(q, new_ns->root); |
1134 | } | 1385 | } |
1135 | up_write(&tsk->namespace->sem); | 1386 | up_write(&namespace_sem); |
1136 | 1387 | ||
1137 | tsk->namespace = new_ns; | 1388 | tsk->namespace = new_ns; |
1138 | 1389 | ||
@@ -1161,7 +1412,7 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, | |||
1161 | unsigned long dev_page; | 1412 | unsigned long dev_page; |
1162 | char *dir_page; | 1413 | char *dir_page; |
1163 | 1414 | ||
1164 | retval = copy_mount_options (type, &type_page); | 1415 | retval = copy_mount_options(type, &type_page); |
1165 | if (retval < 0) | 1416 | if (retval < 0) |
1166 | return retval; | 1417 | return retval; |
1167 | 1418 | ||
@@ -1170,17 +1421,17 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, | |||
1170 | if (IS_ERR(dir_page)) | 1421 | if (IS_ERR(dir_page)) |
1171 | goto out1; | 1422 | goto out1; |
1172 | 1423 | ||
1173 | retval = copy_mount_options (dev_name, &dev_page); | 1424 | retval = copy_mount_options(dev_name, &dev_page); |
1174 | if (retval < 0) | 1425 | if (retval < 0) |
1175 | goto out2; | 1426 | goto out2; |
1176 | 1427 | ||
1177 | retval = copy_mount_options (data, &data_page); | 1428 | retval = copy_mount_options(data, &data_page); |
1178 | if (retval < 0) | 1429 | if (retval < 0) |
1179 | goto out3; | 1430 | goto out3; |
1180 | 1431 | ||
1181 | lock_kernel(); | 1432 | lock_kernel(); |
1182 | retval = do_mount((char*)dev_page, dir_page, (char*)type_page, | 1433 | retval = do_mount((char *)dev_page, dir_page, (char *)type_page, |
1183 | flags, (void*)data_page); | 1434 | flags, (void *)data_page); |
1184 | unlock_kernel(); | 1435 | unlock_kernel(); |
1185 | free_page(data_page); | 1436 | free_page(data_page); |
1186 | 1437 | ||
@@ -1249,9 +1500,11 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | |||
1249 | if (fs) { | 1500 | if (fs) { |
1250 | atomic_inc(&fs->count); | 1501 | atomic_inc(&fs->count); |
1251 | task_unlock(p); | 1502 | task_unlock(p); |
1252 | if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt) | 1503 | if (fs->root == old_nd->dentry |
1504 | && fs->rootmnt == old_nd->mnt) | ||
1253 | set_fs_root(fs, new_nd->mnt, new_nd->dentry); | 1505 | set_fs_root(fs, new_nd->mnt, new_nd->dentry); |
1254 | if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt) | 1506 | if (fs->pwd == old_nd->dentry |
1507 | && fs->pwdmnt == old_nd->mnt) | ||
1255 | set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); | 1508 | set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); |
1256 | put_fs_struct(fs); | 1509 | put_fs_struct(fs); |
1257 | } else | 1510 | } else |
@@ -1281,8 +1534,8 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | |||
1281 | * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root | 1534 | * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root |
1282 | * first. | 1535 | * first. |
1283 | */ | 1536 | */ |
1284 | 1537 | asmlinkage long sys_pivot_root(const char __user * new_root, | |
1285 | asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old) | 1538 | const char __user * put_old) |
1286 | { | 1539 | { |
1287 | struct vfsmount *tmp; | 1540 | struct vfsmount *tmp; |
1288 | struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd; | 1541 | struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd; |
@@ -1293,14 +1546,15 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1293 | 1546 | ||
1294 | lock_kernel(); | 1547 | lock_kernel(); |
1295 | 1548 | ||
1296 | error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd); | 1549 | error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, |
1550 | &new_nd); | ||
1297 | if (error) | 1551 | if (error) |
1298 | goto out0; | 1552 | goto out0; |
1299 | error = -EINVAL; | 1553 | error = -EINVAL; |
1300 | if (!check_mnt(new_nd.mnt)) | 1554 | if (!check_mnt(new_nd.mnt)) |
1301 | goto out1; | 1555 | goto out1; |
1302 | 1556 | ||
1303 | error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd); | 1557 | error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); |
1304 | if (error) | 1558 | if (error) |
1305 | goto out1; | 1559 | goto out1; |
1306 | 1560 | ||
@@ -1314,9 +1568,13 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1314 | user_nd.mnt = mntget(current->fs->rootmnt); | 1568 | user_nd.mnt = mntget(current->fs->rootmnt); |
1315 | user_nd.dentry = dget(current->fs->root); | 1569 | user_nd.dentry = dget(current->fs->root); |
1316 | read_unlock(¤t->fs->lock); | 1570 | read_unlock(¤t->fs->lock); |
1317 | down_write(¤t->namespace->sem); | 1571 | down_write(&namespace_sem); |
1318 | down(&old_nd.dentry->d_inode->i_sem); | 1572 | down(&old_nd.dentry->d_inode->i_sem); |
1319 | error = -EINVAL; | 1573 | error = -EINVAL; |
1574 | if (IS_MNT_SHARED(old_nd.mnt) || | ||
1575 | IS_MNT_SHARED(new_nd.mnt->mnt_parent) || | ||
1576 | IS_MNT_SHARED(user_nd.mnt->mnt_parent)) | ||
1577 | goto out2; | ||
1320 | if (!check_mnt(user_nd.mnt)) | 1578 | if (!check_mnt(user_nd.mnt)) |
1321 | goto out2; | 1579 | goto out2; |
1322 | error = -ENOENT; | 1580 | error = -ENOENT; |
@@ -1356,6 +1614,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1356 | detach_mnt(user_nd.mnt, &root_parent); | 1614 | detach_mnt(user_nd.mnt, &root_parent); |
1357 | attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ | 1615 | attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ |
1358 | attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ | 1616 | attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ |
1617 | touch_namespace(current->namespace); | ||
1359 | spin_unlock(&vfsmount_lock); | 1618 | spin_unlock(&vfsmount_lock); |
1360 | chroot_fs_refs(&user_nd, &new_nd); | 1619 | chroot_fs_refs(&user_nd, &new_nd); |
1361 | security_sb_post_pivotroot(&user_nd, &new_nd); | 1620 | security_sb_post_pivotroot(&user_nd, &new_nd); |
@@ -1364,7 +1623,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1364 | path_release(&parent_nd); | 1623 | path_release(&parent_nd); |
1365 | out2: | 1624 | out2: |
1366 | up(&old_nd.dentry->d_inode->i_sem); | 1625 | up(&old_nd.dentry->d_inode->i_sem); |
1367 | up_write(¤t->namespace->sem); | 1626 | up_write(&namespace_sem); |
1368 | path_release(&user_nd); | 1627 | path_release(&user_nd); |
1369 | path_release(&old_nd); | 1628 | path_release(&old_nd); |
1370 | out1: | 1629 | out1: |
@@ -1391,7 +1650,8 @@ static void __init init_mount_tree(void) | |||
1391 | panic("Can't allocate initial namespace"); | 1650 | panic("Can't allocate initial namespace"); |
1392 | atomic_set(&namespace->count, 1); | 1651 | atomic_set(&namespace->count, 1); |
1393 | INIT_LIST_HEAD(&namespace->list); | 1652 | INIT_LIST_HEAD(&namespace->list); |
1394 | init_rwsem(&namespace->sem); | 1653 | init_waitqueue_head(&namespace->poll); |
1654 | namespace->event = 0; | ||
1395 | list_add(&mnt->mnt_list, &namespace->list); | 1655 | list_add(&mnt->mnt_list, &namespace->list); |
1396 | namespace->root = mnt; | 1656 | namespace->root = mnt; |
1397 | mnt->mnt_namespace = namespace; | 1657 | mnt->mnt_namespace = namespace; |
@@ -1414,11 +1674,12 @@ void __init mnt_init(unsigned long mempages) | |||
1414 | unsigned int nr_hash; | 1674 | unsigned int nr_hash; |
1415 | int i; | 1675 | int i; |
1416 | 1676 | ||
1677 | init_rwsem(&namespace_sem); | ||
1678 | |||
1417 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), | 1679 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), |
1418 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1680 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); |
1419 | 1681 | ||
1420 | mount_hashtable = (struct list_head *) | 1682 | mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); |
1421 | __get_free_page(GFP_ATOMIC); | ||
1422 | 1683 | ||
1423 | if (!mount_hashtable) | 1684 | if (!mount_hashtable) |
1424 | panic("Failed to allocate mount hash table\n"); | 1685 | panic("Failed to allocate mount hash table\n"); |
@@ -1440,7 +1701,7 @@ void __init mnt_init(unsigned long mempages) | |||
1440 | * from the number of bits we can fit. | 1701 | * from the number of bits we can fit. |
1441 | */ | 1702 | */ |
1442 | nr_hash = 1UL << hash_bits; | 1703 | nr_hash = 1UL << hash_bits; |
1443 | hash_mask = nr_hash-1; | 1704 | hash_mask = nr_hash - 1; |
1444 | 1705 | ||
1445 | printk("Mount-cache hash table entries: %d\n", nr_hash); | 1706 | printk("Mount-cache hash table entries: %d\n", nr_hash); |
1446 | 1707 | ||
@@ -1460,12 +1721,14 @@ void __init mnt_init(unsigned long mempages) | |||
1460 | void __put_namespace(struct namespace *namespace) | 1721 | void __put_namespace(struct namespace *namespace) |
1461 | { | 1722 | { |
1462 | struct vfsmount *root = namespace->root; | 1723 | struct vfsmount *root = namespace->root; |
1724 | LIST_HEAD(umount_list); | ||
1463 | namespace->root = NULL; | 1725 | namespace->root = NULL; |
1464 | spin_unlock(&vfsmount_lock); | 1726 | spin_unlock(&vfsmount_lock); |
1465 | down_write(&namespace->sem); | 1727 | down_write(&namespace_sem); |
1466 | spin_lock(&vfsmount_lock); | 1728 | spin_lock(&vfsmount_lock); |
1467 | umount_tree(root); | 1729 | umount_tree(root, 0, &umount_list); |
1468 | spin_unlock(&vfsmount_lock); | 1730 | spin_unlock(&vfsmount_lock); |
1469 | up_write(&namespace->sem); | 1731 | up_write(&namespace_sem); |
1732 | release_mounts(&umount_list); | ||
1470 | kfree(namespace); | 1733 | kfree(namespace); |
1471 | } | 1734 | } |
diff --git a/fs/pnode.c b/fs/pnode.c new file mode 100644 index 000000000000..aeeec8ba8dd2 --- /dev/null +++ b/fs/pnode.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | * linux/fs/pnode.c | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005. | ||
5 | * Released under GPL v2. | ||
6 | * Author : Ram Pai (linuxram@us.ibm.com) | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/namespace.h> | ||
10 | #include <linux/mount.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include "pnode.h" | ||
13 | |||
14 | /* return the next shared peer mount of @p */ | ||
15 | static inline struct vfsmount *next_peer(struct vfsmount *p) | ||
16 | { | ||
17 | return list_entry(p->mnt_share.next, struct vfsmount, mnt_share); | ||
18 | } | ||
19 | |||
20 | static inline struct vfsmount *first_slave(struct vfsmount *p) | ||
21 | { | ||
22 | return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave); | ||
23 | } | ||
24 | |||
25 | static inline struct vfsmount *next_slave(struct vfsmount *p) | ||
26 | { | ||
27 | return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave); | ||
28 | } | ||
29 | |||
30 | static int do_make_slave(struct vfsmount *mnt) | ||
31 | { | ||
32 | struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master; | ||
33 | struct vfsmount *slave_mnt; | ||
34 | |||
35 | /* | ||
36 | * slave 'mnt' to a peer mount that has the | ||
37 | * same root dentry. If none is available than | ||
38 | * slave it to anything that is available. | ||
39 | */ | ||
40 | while ((peer_mnt = next_peer(peer_mnt)) != mnt && | ||
41 | peer_mnt->mnt_root != mnt->mnt_root) ; | ||
42 | |||
43 | if (peer_mnt == mnt) { | ||
44 | peer_mnt = next_peer(mnt); | ||
45 | if (peer_mnt == mnt) | ||
46 | peer_mnt = NULL; | ||
47 | } | ||
48 | list_del_init(&mnt->mnt_share); | ||
49 | |||
50 | if (peer_mnt) | ||
51 | master = peer_mnt; | ||
52 | |||
53 | if (master) { | ||
54 | list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) | ||
55 | slave_mnt->mnt_master = master; | ||
56 | list_del(&mnt->mnt_slave); | ||
57 | list_add(&mnt->mnt_slave, &master->mnt_slave_list); | ||
58 | list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); | ||
59 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
60 | } else { | ||
61 | struct list_head *p = &mnt->mnt_slave_list; | ||
62 | while (!list_empty(p)) { | ||
63 | slave_mnt = list_entry(p->next, | ||
64 | struct vfsmount, mnt_slave); | ||
65 | list_del_init(&slave_mnt->mnt_slave); | ||
66 | slave_mnt->mnt_master = NULL; | ||
67 | } | ||
68 | } | ||
69 | mnt->mnt_master = master; | ||
70 | CLEAR_MNT_SHARED(mnt); | ||
71 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | void change_mnt_propagation(struct vfsmount *mnt, int type) | ||
76 | { | ||
77 | if (type == MS_SHARED) { | ||
78 | set_mnt_shared(mnt); | ||
79 | return; | ||
80 | } | ||
81 | do_make_slave(mnt); | ||
82 | if (type != MS_SLAVE) { | ||
83 | list_del_init(&mnt->mnt_slave); | ||
84 | mnt->mnt_master = NULL; | ||
85 | if (type == MS_UNBINDABLE) | ||
86 | mnt->mnt_flags |= MNT_UNBINDABLE; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * get the next mount in the propagation tree. | ||
92 | * @m: the mount seen last | ||
93 | * @origin: the original mount from where the tree walk initiated | ||
94 | */ | ||
95 | static struct vfsmount *propagation_next(struct vfsmount *m, | ||
96 | struct vfsmount *origin) | ||
97 | { | ||
98 | /* are there any slaves of this mount? */ | ||
99 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) | ||
100 | return first_slave(m); | ||
101 | |||
102 | while (1) { | ||
103 | struct vfsmount *next; | ||
104 | struct vfsmount *master = m->mnt_master; | ||
105 | |||
106 | if ( master == origin->mnt_master ) { | ||
107 | next = next_peer(m); | ||
108 | return ((next == origin) ? NULL : next); | ||
109 | } else if (m->mnt_slave.next != &master->mnt_slave_list) | ||
110 | return next_slave(m); | ||
111 | |||
112 | /* back at master */ | ||
113 | m = master; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * return the source mount to be used for cloning | ||
119 | * | ||
120 | * @dest the current destination mount | ||
121 | * @last_dest the last seen destination mount | ||
122 | * @last_src the last seen source mount | ||
123 | * @type return CL_SLAVE if the new mount has to be | ||
124 | * cloned as a slave. | ||
125 | */ | ||
126 | static struct vfsmount *get_source(struct vfsmount *dest, | ||
127 | struct vfsmount *last_dest, | ||
128 | struct vfsmount *last_src, | ||
129 | int *type) | ||
130 | { | ||
131 | struct vfsmount *p_last_src = NULL; | ||
132 | struct vfsmount *p_last_dest = NULL; | ||
133 | *type = CL_PROPAGATION;; | ||
134 | |||
135 | if (IS_MNT_SHARED(dest)) | ||
136 | *type |= CL_MAKE_SHARED; | ||
137 | |||
138 | while (last_dest != dest->mnt_master) { | ||
139 | p_last_dest = last_dest; | ||
140 | p_last_src = last_src; | ||
141 | last_dest = last_dest->mnt_master; | ||
142 | last_src = last_src->mnt_master; | ||
143 | } | ||
144 | |||
145 | if (p_last_dest) { | ||
146 | do { | ||
147 | p_last_dest = next_peer(p_last_dest); | ||
148 | } while (IS_MNT_NEW(p_last_dest)); | ||
149 | } | ||
150 | |||
151 | if (dest != p_last_dest) { | ||
152 | *type |= CL_SLAVE; | ||
153 | return last_src; | ||
154 | } else | ||
155 | return p_last_src; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * mount 'source_mnt' under the destination 'dest_mnt' at | ||
160 | * dentry 'dest_dentry'. And propagate that mount to | ||
161 | * all the peer and slave mounts of 'dest_mnt'. | ||
162 | * Link all the new mounts into a propagation tree headed at | ||
163 | * source_mnt. Also link all the new mounts using ->mnt_list | ||
164 | * headed at source_mnt's ->mnt_list | ||
165 | * | ||
166 | * @dest_mnt: destination mount. | ||
167 | * @dest_dentry: destination dentry. | ||
168 | * @source_mnt: source mount. | ||
169 | * @tree_list : list of heads of trees to be attached. | ||
170 | */ | ||
171 | int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry, | ||
172 | struct vfsmount *source_mnt, struct list_head *tree_list) | ||
173 | { | ||
174 | struct vfsmount *m, *child; | ||
175 | int ret = 0; | ||
176 | struct vfsmount *prev_dest_mnt = dest_mnt; | ||
177 | struct vfsmount *prev_src_mnt = source_mnt; | ||
178 | LIST_HEAD(tmp_list); | ||
179 | LIST_HEAD(umount_list); | ||
180 | |||
181 | for (m = propagation_next(dest_mnt, dest_mnt); m; | ||
182 | m = propagation_next(m, dest_mnt)) { | ||
183 | int type; | ||
184 | struct vfsmount *source; | ||
185 | |||
186 | if (IS_MNT_NEW(m)) | ||
187 | continue; | ||
188 | |||
189 | source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); | ||
190 | |||
191 | if (!(child = copy_tree(source, source->mnt_root, type))) { | ||
192 | ret = -ENOMEM; | ||
193 | list_splice(tree_list, tmp_list.prev); | ||
194 | goto out; | ||
195 | } | ||
196 | |||
197 | if (is_subdir(dest_dentry, m->mnt_root)) { | ||
198 | mnt_set_mountpoint(m, dest_dentry, child); | ||
199 | list_add_tail(&child->mnt_hash, tree_list); | ||
200 | } else { | ||
201 | /* | ||
202 | * This can happen if the parent mount was bind mounted | ||
203 | * on some subdirectory of a shared/slave mount. | ||
204 | */ | ||
205 | list_add_tail(&child->mnt_hash, &tmp_list); | ||
206 | } | ||
207 | prev_dest_mnt = m; | ||
208 | prev_src_mnt = child; | ||
209 | } | ||
210 | out: | ||
211 | spin_lock(&vfsmount_lock); | ||
212 | while (!list_empty(&tmp_list)) { | ||
213 | child = list_entry(tmp_list.next, struct vfsmount, mnt_hash); | ||
214 | list_del_init(&child->mnt_hash); | ||
215 | umount_tree(child, 0, &umount_list); | ||
216 | } | ||
217 | spin_unlock(&vfsmount_lock); | ||
218 | release_mounts(&umount_list); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * return true if the refcount is greater than count | ||
224 | */ | ||
225 | static inline int do_refcount_check(struct vfsmount *mnt, int count) | ||
226 | { | ||
227 | int mycount = atomic_read(&mnt->mnt_count); | ||
228 | return (mycount > count); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * check if the mount 'mnt' can be unmounted successfully. | ||
233 | * @mnt: the mount to be checked for unmount | ||
234 | * NOTE: unmounting 'mnt' would naturally propagate to all | ||
235 | * other mounts its parent propagates to. | ||
236 | * Check if any of these mounts that **do not have submounts** | ||
237 | * have more references than 'refcnt'. If so return busy. | ||
238 | */ | ||
239 | int propagate_mount_busy(struct vfsmount *mnt, int refcnt) | ||
240 | { | ||
241 | struct vfsmount *m, *child; | ||
242 | struct vfsmount *parent = mnt->mnt_parent; | ||
243 | int ret = 0; | ||
244 | |||
245 | if (mnt == parent) | ||
246 | return do_refcount_check(mnt, refcnt); | ||
247 | |||
248 | /* | ||
249 | * quickly check if the current mount can be unmounted. | ||
250 | * If not, we don't have to go checking for all other | ||
251 | * mounts | ||
252 | */ | ||
253 | if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) | ||
254 | return 1; | ||
255 | |||
256 | for (m = propagation_next(parent, parent); m; | ||
257 | m = propagation_next(m, parent)) { | ||
258 | child = __lookup_mnt(m, mnt->mnt_mountpoint, 0); | ||
259 | if (child && list_empty(&child->mnt_mounts) && | ||
260 | (ret = do_refcount_check(child, 1))) | ||
261 | break; | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its | ||
268 | * parent propagates to. | ||
269 | */ | ||
270 | static void __propagate_umount(struct vfsmount *mnt) | ||
271 | { | ||
272 | struct vfsmount *parent = mnt->mnt_parent; | ||
273 | struct vfsmount *m; | ||
274 | |||
275 | BUG_ON(parent == mnt); | ||
276 | |||
277 | for (m = propagation_next(parent, parent); m; | ||
278 | m = propagation_next(m, parent)) { | ||
279 | |||
280 | struct vfsmount *child = __lookup_mnt(m, | ||
281 | mnt->mnt_mountpoint, 0); | ||
282 | /* | ||
283 | * umount the child only if the child has no | ||
284 | * other children | ||
285 | */ | ||
286 | if (child && list_empty(&child->mnt_mounts)) { | ||
287 | list_del(&child->mnt_hash); | ||
288 | list_add_tail(&child->mnt_hash, &mnt->mnt_hash); | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * collect all mounts that receive propagation from the mount in @list, | ||
295 | * and return these additional mounts in the same list. | ||
296 | * @list: the list of mounts to be unmounted. | ||
297 | */ | ||
298 | int propagate_umount(struct list_head *list) | ||
299 | { | ||
300 | struct vfsmount *mnt; | ||
301 | |||
302 | list_for_each_entry(mnt, list, mnt_hash) | ||
303 | __propagate_umount(mnt); | ||
304 | return 0; | ||
305 | } | ||
diff --git a/fs/pnode.h b/fs/pnode.h new file mode 100644 index 000000000000..020e1bb60fdb --- /dev/null +++ b/fs/pnode.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * linux/fs/pnode.h | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005. | ||
5 | * Released under GPL v2. | ||
6 | * | ||
7 | */ | ||
8 | #ifndef _LINUX_PNODE_H | ||
9 | #define _LINUX_PNODE_H | ||
10 | |||
11 | #include <linux/list.h> | ||
12 | #include <linux/mount.h> | ||
13 | |||
14 | #define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED) | ||
15 | #define IS_MNT_SLAVE(mnt) (mnt->mnt_master) | ||
16 | #define IS_MNT_NEW(mnt) (!mnt->mnt_namespace) | ||
17 | #define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED) | ||
18 | #define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE) | ||
19 | |||
20 | #define CL_EXPIRE 0x01 | ||
21 | #define CL_SLAVE 0x02 | ||
22 | #define CL_COPY_ALL 0x04 | ||
23 | #define CL_MAKE_SHARED 0x08 | ||
24 | #define CL_PROPAGATION 0x10 | ||
25 | |||
26 | static inline void set_mnt_shared(struct vfsmount *mnt) | ||
27 | { | ||
28 | mnt->mnt_flags &= ~MNT_PNODE_MASK; | ||
29 | mnt->mnt_flags |= MNT_SHARED; | ||
30 | } | ||
31 | |||
32 | void change_mnt_propagation(struct vfsmount *, int); | ||
33 | int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *, | ||
34 | struct list_head *); | ||
35 | int propagate_umount(struct list_head *); | ||
36 | int propagate_mount_busy(struct vfsmount *, int); | ||
37 | #endif /* _LINUX_PNODE_H */ | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index a170450aadb1..634355e16986 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/seccomp.h> | 70 | #include <linux/seccomp.h> |
71 | #include <linux/cpuset.h> | 71 | #include <linux/cpuset.h> |
72 | #include <linux/audit.h> | 72 | #include <linux/audit.h> |
73 | #include <linux/poll.h> | ||
73 | #include "internal.h" | 74 | #include "internal.h" |
74 | 75 | ||
75 | /* | 76 | /* |
@@ -660,26 +661,38 @@ static struct file_operations proc_smaps_operations = { | |||
660 | #endif | 661 | #endif |
661 | 662 | ||
662 | extern struct seq_operations mounts_op; | 663 | extern struct seq_operations mounts_op; |
664 | struct proc_mounts { | ||
665 | struct seq_file m; | ||
666 | int event; | ||
667 | }; | ||
668 | |||
663 | static int mounts_open(struct inode *inode, struct file *file) | 669 | static int mounts_open(struct inode *inode, struct file *file) |
664 | { | 670 | { |
665 | struct task_struct *task = proc_task(inode); | 671 | struct task_struct *task = proc_task(inode); |
666 | int ret = seq_open(file, &mounts_op); | 672 | struct namespace *namespace; |
673 | struct proc_mounts *p; | ||
674 | int ret = -EINVAL; | ||
667 | 675 | ||
668 | if (!ret) { | 676 | task_lock(task); |
669 | struct seq_file *m = file->private_data; | 677 | namespace = task->namespace; |
670 | struct namespace *namespace; | 678 | if (namespace) |
671 | task_lock(task); | 679 | get_namespace(namespace); |
672 | namespace = task->namespace; | 680 | task_unlock(task); |
673 | if (namespace) | 681 | |
674 | get_namespace(namespace); | 682 | if (namespace) { |
675 | task_unlock(task); | 683 | ret = -ENOMEM; |
676 | 684 | p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); | |
677 | if (namespace) | 685 | if (p) { |
678 | m->private = namespace; | 686 | file->private_data = &p->m; |
679 | else { | 687 | ret = seq_open(file, &mounts_op); |
680 | seq_release(inode, file); | 688 | if (!ret) { |
681 | ret = -EINVAL; | 689 | p->m.private = namespace; |
690 | p->event = namespace->event; | ||
691 | return 0; | ||
692 | } | ||
693 | kfree(p); | ||
682 | } | 694 | } |
695 | put_namespace(namespace); | ||
683 | } | 696 | } |
684 | return ret; | 697 | return ret; |
685 | } | 698 | } |
@@ -692,11 +705,30 @@ static int mounts_release(struct inode *inode, struct file *file) | |||
692 | return seq_release(inode, file); | 705 | return seq_release(inode, file); |
693 | } | 706 | } |
694 | 707 | ||
708 | static unsigned mounts_poll(struct file *file, poll_table *wait) | ||
709 | { | ||
710 | struct proc_mounts *p = file->private_data; | ||
711 | struct namespace *ns = p->m.private; | ||
712 | unsigned res = 0; | ||
713 | |||
714 | poll_wait(file, &ns->poll, wait); | ||
715 | |||
716 | spin_lock(&vfsmount_lock); | ||
717 | if (p->event != ns->event) { | ||
718 | p->event = ns->event; | ||
719 | res = POLLERR; | ||
720 | } | ||
721 | spin_unlock(&vfsmount_lock); | ||
722 | |||
723 | return res; | ||
724 | } | ||
725 | |||
695 | static struct file_operations proc_mounts_operations = { | 726 | static struct file_operations proc_mounts_operations = { |
696 | .open = mounts_open, | 727 | .open = mounts_open, |
697 | .read = seq_read, | 728 | .read = seq_read, |
698 | .llseek = seq_lseek, | 729 | .llseek = seq_lseek, |
699 | .release = mounts_release, | 730 | .release = mounts_release, |
731 | .poll = mounts_poll, | ||
700 | }; | 732 | }; |
701 | 733 | ||
702 | #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ | 734 | #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 38ef913767ff..7c40570b71dc 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -28,13 +28,17 @@ | |||
28 | */ | 28 | */ |
29 | int seq_open(struct file *file, struct seq_operations *op) | 29 | int seq_open(struct file *file, struct seq_operations *op) |
30 | { | 30 | { |
31 | struct seq_file *p = kmalloc(sizeof(*p), GFP_KERNEL); | 31 | struct seq_file *p = file->private_data; |
32 | if (!p) | 32 | |
33 | return -ENOMEM; | 33 | if (!p) { |
34 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
35 | if (!p) | ||
36 | return -ENOMEM; | ||
37 | file->private_data = p; | ||
38 | } | ||
34 | memset(p, 0, sizeof(*p)); | 39 | memset(p, 0, sizeof(*p)); |
35 | sema_init(&p->sem, 1); | 40 | sema_init(&p->sem, 1); |
36 | p->op = op; | 41 | p->op = op; |
37 | file->private_data = p; | ||
38 | 42 | ||
39 | /* | 43 | /* |
40 | * Wrappers around seq_open(e.g. swaps_open) need to be | 44 | * Wrappers around seq_open(e.g. swaps_open) need to be |
diff --git a/fs/super.c b/fs/super.c index eed6c3132905..6689dded3c84 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -171,6 +171,7 @@ void deactivate_super(struct super_block *s) | |||
171 | if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { | 171 | if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { |
172 | s->s_count -= S_BIAS-1; | 172 | s->s_count -= S_BIAS-1; |
173 | spin_unlock(&sb_lock); | 173 | spin_unlock(&sb_lock); |
174 | DQUOT_OFF(s); | ||
174 | down_write(&s->s_umount); | 175 | down_write(&s->s_umount); |
175 | fs->kill_sb(s); | 176 | fs->kill_sb(s); |
176 | put_filesystem(fs); | 177 | put_filesystem(fs); |
diff --git a/include/asm-arm/arch-realview/entry-macro.S b/include/asm-arm/arch-realview/entry-macro.S index 2712ba77bb3a..4df469bf42e2 100644 --- a/include/asm-arm/arch-realview/entry-macro.S +++ b/include/asm-arm/arch-realview/entry-macro.S | |||
@@ -47,3 +47,17 @@ | |||
47 | cmpcs \irqnr, \irqnr | 47 | cmpcs \irqnr, \irqnr |
48 | 48 | ||
49 | .endm | 49 | .endm |
50 | |||
51 | /* We assume that irqstat (the raw value of the IRQ acknowledge | ||
52 | * register) is preserved from the macro above. | ||
53 | * If there is an IPI, we immediately signal end of interrupt on the | ||
54 | * controller, since this requires the original irqstat value which | ||
55 | * we won't easily be able to recreate later. | ||
56 | */ | ||
57 | |||
58 | .macro test_for_ipi, irqnr, irqstat, base, tmp | ||
59 | bic \irqnr, \irqstat, #0x1c00 | ||
60 | cmp \irqnr, #16 | ||
61 | strcc \irqstat, [\base, #GIC_CPU_EOI] | ||
62 | cmpcs \irqnr, \irqnr | ||
63 | .endm | ||
diff --git a/include/asm-arm/arch-realview/platform.h b/include/asm-arm/arch-realview/platform.h index 4b6de13a6b9a..aef9b36b3c37 100644 --- a/include/asm-arm/arch-realview/platform.h +++ b/include/asm-arm/arch-realview/platform.h | |||
@@ -203,8 +203,14 @@ | |||
203 | /* Reserved 0x1001A000 - 0x1001FFFF */ | 203 | /* Reserved 0x1001A000 - 0x1001FFFF */ |
204 | #define REALVIEW_CLCD_BASE 0x10020000 /* CLCD */ | 204 | #define REALVIEW_CLCD_BASE 0x10020000 /* CLCD */ |
205 | #define REALVIEW_DMAC_BASE 0x10030000 /* DMA controller */ | 205 | #define REALVIEW_DMAC_BASE 0x10030000 /* DMA controller */ |
206 | #ifndef CONFIG_REALVIEW_MPCORE | ||
206 | #define REALVIEW_GIC_CPU_BASE 0x10040000 /* Generic interrupt controller CPU interface */ | 207 | #define REALVIEW_GIC_CPU_BASE 0x10040000 /* Generic interrupt controller CPU interface */ |
207 | #define REALVIEW_GIC_DIST_BASE 0x10041000 /* Generic interrupt controller distributor */ | 208 | #define REALVIEW_GIC_DIST_BASE 0x10041000 /* Generic interrupt controller distributor */ |
209 | #else | ||
210 | #define REALVIEW_MPCORE_SCU_BASE 0x10100000 /* SCU registers */ | ||
211 | #define REALVIEW_GIC_CPU_BASE 0x10100100 /* Generic interrupt controller CPU interface */ | ||
212 | #define REALVIEW_GIC_DIST_BASE 0x10101000 /* Generic interrupt controller distributor */ | ||
213 | #endif | ||
208 | #define REALVIEW_SMC_BASE 0x10080000 /* SMC */ | 214 | #define REALVIEW_SMC_BASE 0x10080000 /* SMC */ |
209 | /* Reserved 0x10090000 - 0x100EFFFF */ | 215 | /* Reserved 0x10090000 - 0x100EFFFF */ |
210 | 216 | ||
@@ -265,6 +271,7 @@ | |||
265 | * Interrupts - bit assignment (primary) | 271 | * Interrupts - bit assignment (primary) |
266 | * ------------------------------------------------------------------------ | 272 | * ------------------------------------------------------------------------ |
267 | */ | 273 | */ |
274 | #ifndef CONFIG_REALVIEW_MPCORE | ||
268 | #define INT_WDOGINT 0 /* Watchdog timer */ | 275 | #define INT_WDOGINT 0 /* Watchdog timer */ |
269 | #define INT_SOFTINT 1 /* Software interrupt */ | 276 | #define INT_SOFTINT 1 /* Software interrupt */ |
270 | #define INT_COMMRx 2 /* Debug Comm Rx interrupt */ | 277 | #define INT_COMMRx 2 /* Debug Comm Rx interrupt */ |
@@ -297,6 +304,55 @@ | |||
297 | #define INT_USB 29 /* USB controller */ | 304 | #define INT_USB 29 /* USB controller */ |
298 | #define INT_TSPENINT 30 /* Touchscreen pen */ | 305 | #define INT_TSPENINT 30 /* Touchscreen pen */ |
299 | #define INT_TSKPADINT 31 /* Touchscreen keypad */ | 306 | #define INT_TSKPADINT 31 /* Touchscreen keypad */ |
307 | #else | ||
308 | #define INT_LOCALTIMER 29 | ||
309 | #define INT_LOCALWDOG 30 | ||
310 | |||
311 | #define INT_AACI 0 | ||
312 | #define INT_TIMERINT0_1 1 | ||
313 | #define INT_TIMERINT2_3 2 | ||
314 | #define INT_USB 3 | ||
315 | #define INT_UARTINT0 4 | ||
316 | #define INT_UARTINT1 5 | ||
317 | #define INT_RTCINT 6 | ||
318 | #define INT_KMI0 7 | ||
319 | #define INT_KMI1 8 | ||
320 | #define INT_ETH 9 | ||
321 | #define INT_EB_IRQ1 10 /* main GIC */ | ||
322 | #define INT_EB_IRQ2 11 /* tile GIC */ | ||
323 | #define INT_EB_FIQ1 12 /* main GIC */ | ||
324 | #define INT_EB_FIQ2 13 /* tile GIC */ | ||
325 | #define INT_MMCI0A 14 | ||
326 | #define INT_MMCI0B 15 | ||
327 | |||
328 | #define INT_PMU_CPU0 17 | ||
329 | #define INT_PMU_CPU1 18 | ||
330 | #define INT_PMU_CPU2 19 | ||
331 | #define INT_PMU_CPU3 20 | ||
332 | #define INT_PMU_SCU0 21 | ||
333 | #define INT_PMU_SCU1 22 | ||
334 | #define INT_PMU_SCU2 23 | ||
335 | #define INT_PMU_SCU3 24 | ||
336 | #define INT_PMU_SCU4 25 | ||
337 | #define INT_PMU_SCU5 26 | ||
338 | #define INT_PMU_SCU6 27 | ||
339 | #define INT_PMU_SCU7 28 | ||
340 | |||
341 | #define INT_L220_EVENT 29 | ||
342 | #define INT_L220_SLAVE 30 | ||
343 | #define INT_L220_DECODE 31 | ||
344 | |||
345 | #define INT_UARTINT2 -1 | ||
346 | #define INT_UARTINT3 -1 | ||
347 | #define INT_CLCDINT -1 | ||
348 | #define INT_DMAINT -1 | ||
349 | #define INT_WDOGINT -1 | ||
350 | #define INT_GPIOINT0 -1 | ||
351 | #define INT_GPIOINT1 -1 | ||
352 | #define INT_GPIOINT2 -1 | ||
353 | #define INT_SCIINT -1 | ||
354 | #define INT_SSPINT -1 | ||
355 | #endif | ||
300 | 356 | ||
301 | /* | 357 | /* |
302 | * Interrupt bit positions | 358 | * Interrupt bit positions |
diff --git a/include/asm-arm/arch-realview/smp.h b/include/asm-arm/arch-realview/smp.h new file mode 100644 index 000000000000..fc87783e8e8b --- /dev/null +++ b/include/asm-arm/arch-realview/smp.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef ASMARM_ARCH_SMP_H | ||
2 | #define ASMARM_ARCH_SMP_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #include <asm/hardware/gic.h> | ||
7 | |||
8 | #define hard_smp_processor_id() \ | ||
9 | ({ \ | ||
10 | unsigned int cpunum; \ | ||
11 | __asm__("mrc p15, 0, %0, c0, c0, 5" \ | ||
12 | : "=r" (cpunum)); \ | ||
13 | cpunum &= 0x0F; \ | ||
14 | }) | ||
15 | |||
16 | /* | ||
17 | * We use IRQ1 as the IPI | ||
18 | */ | ||
19 | static inline void smp_cross_call(cpumask_t callmap) | ||
20 | { | ||
21 | gic_raise_softirq(callmap, 1); | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * Do nothing on MPcore. | ||
26 | */ | ||
27 | static inline void smp_cross_call_done(cpumask_t callmap) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | #endif | ||
diff --git a/include/asm-arm/hardware/arm_scu.h b/include/asm-arm/hardware/arm_scu.h new file mode 100644 index 000000000000..9903f60c84b7 --- /dev/null +++ b/include/asm-arm/hardware/arm_scu.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef ASMARM_HARDWARE_ARM_SCU_H | ||
2 | #define ASMARM_HARDWARE_ARM_SCU_H | ||
3 | |||
4 | /* | ||
5 | * SCU registers | ||
6 | */ | ||
7 | #define SCU_CTRL 0x00 | ||
8 | #define SCU_CONFIG 0x04 | ||
9 | #define SCU_CPU_STATUS 0x08 | ||
10 | #define SCU_INVALIDATE 0x0c | ||
11 | #define SCU_FPGA_REVISION 0x10 | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index ef436b9d06ad..9d41548b7fef 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
@@ -120,6 +120,7 @@ extern unsigned long max_low_pfn; | |||
120 | 120 | ||
121 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 121 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
122 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 122 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
123 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
123 | 124 | ||
124 | typedef union ia64_va { | 125 | typedef union ia64_va { |
125 | struct { | 126 | struct { |
diff --git a/include/asm-m68k/kbio.h b/include/asm-m68k/kbio.h deleted file mode 100644 index e1fbf8fba3e8..000000000000 --- a/include/asm-m68k/kbio.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-sparc/kbio.h> | ||
diff --git a/include/asm-m68k/vuid_event.h b/include/asm-m68k/vuid_event.h deleted file mode 100644 index 52ecb521a395..000000000000 --- a/include/asm-m68k/vuid_event.h +++ /dev/null | |||
@@ -1,4 +0,0 @@ | |||
1 | #ifndef _M68K_VUID_EVENT_H | ||
2 | #define _M68K_VUID_EVENT_H | ||
3 | #include <asm-sparc/vuid_event.h> | ||
4 | #endif | ||
diff --git a/include/asm-sparc/audioio.h b/include/asm-sparc/audioio.h deleted file mode 100644 index cf16173f521b..000000000000 --- a/include/asm-sparc/audioio.h +++ /dev/null | |||
@@ -1,234 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-sparc/audioio.h | ||
3 | * | ||
4 | * Sparc Audio Midlayer | ||
5 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _AUDIOIO_H_ | ||
9 | #define _AUDIOIO_H_ | ||
10 | |||
11 | /* | ||
12 | * SunOS/Solaris /dev/audio interface | ||
13 | */ | ||
14 | |||
15 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/time.h> | ||
18 | #include <linux/ioctl.h> | ||
19 | #endif | ||
20 | |||
21 | /* | ||
22 | * This structure contains state information for audio device IO streams. | ||
23 | */ | ||
24 | typedef struct audio_prinfo { | ||
25 | /* | ||
26 | * The following values describe the audio data encoding. | ||
27 | */ | ||
28 | unsigned int sample_rate; /* samples per second */ | ||
29 | unsigned int channels; /* number of interleaved channels */ | ||
30 | unsigned int precision; /* bit-width of each sample */ | ||
31 | unsigned int encoding; /* data encoding method */ | ||
32 | |||
33 | /* | ||
34 | * The following values control audio device configuration | ||
35 | */ | ||
36 | unsigned int gain; /* gain level: 0 - 255 */ | ||
37 | unsigned int port; /* selected I/O port (see below) */ | ||
38 | unsigned int avail_ports; /* available I/O ports (see below) */ | ||
39 | unsigned int _xxx[2]; /* Reserved for future use */ | ||
40 | |||
41 | unsigned int buffer_size; /* I/O buffer size */ | ||
42 | |||
43 | /* | ||
44 | * The following values describe driver state | ||
45 | */ | ||
46 | unsigned int samples; /* number of samples converted */ | ||
47 | unsigned int eof; /* End Of File counter (play only) */ | ||
48 | |||
49 | unsigned char pause; /* non-zero for pause, zero to resume */ | ||
50 | unsigned char error; /* non-zero if overflow/underflow */ | ||
51 | unsigned char waiting; /* non-zero if a process wants access */ | ||
52 | unsigned char balance; /* stereo channel balance */ | ||
53 | |||
54 | unsigned short minordev; | ||
55 | |||
56 | /* | ||
57 | * The following values are read-only state flags | ||
58 | */ | ||
59 | unsigned char open; /* non-zero if open access permitted */ | ||
60 | unsigned char active; /* non-zero if I/O is active */ | ||
61 | } audio_prinfo_t; | ||
62 | |||
63 | |||
64 | /* | ||
65 | * This structure describes the current state of the audio device. | ||
66 | */ | ||
67 | typedef struct audio_info { | ||
68 | /* | ||
69 | * Per-stream information | ||
70 | */ | ||
71 | audio_prinfo_t play; /* output status information */ | ||
72 | audio_prinfo_t record; /* input status information */ | ||
73 | |||
74 | /* | ||
75 | * Per-unit/channel information | ||
76 | */ | ||
77 | unsigned int monitor_gain; /* input to output mix: 0 - 255 */ | ||
78 | unsigned char output_muted; /* non-zero if output is muted */ | ||
79 | unsigned char _xxx[3]; /* Reserved for future use */ | ||
80 | unsigned int _yyy[3]; /* Reserved for future use */ | ||
81 | } audio_info_t; | ||
82 | |||
83 | |||
84 | /* | ||
85 | * Audio encoding types | ||
86 | */ | ||
87 | #define AUDIO_ENCODING_NONE (0) /* no encoding assigned */ | ||
88 | #define AUDIO_ENCODING_ULAW (1) /* u-law encoding */ | ||
89 | #define AUDIO_ENCODING_ALAW (2) /* A-law encoding */ | ||
90 | #define AUDIO_ENCODING_LINEAR (3) /* Linear PCM encoding */ | ||
91 | #define AUDIO_ENCODING_FLOAT (4) /* IEEE float (-1. <-> +1.) */ | ||
92 | #define AUDIO_ENCODING_DVI (104) /* DVI ADPCM */ | ||
93 | #define AUDIO_ENCODING_LINEAR8 (105) /* 8 bit UNSIGNED */ | ||
94 | #define AUDIO_ENCODING_LINEARLE (106) /* Linear PCM LE encoding */ | ||
95 | |||
96 | /* | ||
97 | * These ranges apply to record, play, and monitor gain values | ||
98 | */ | ||
99 | #define AUDIO_MIN_GAIN (0) /* minimum gain value */ | ||
100 | #define AUDIO_MAX_GAIN (255) /* maximum gain value */ | ||
101 | |||
102 | /* | ||
103 | * These values apply to the balance field to adjust channel gain values | ||
104 | */ | ||
105 | #define AUDIO_LEFT_BALANCE (0) /* left channel only */ | ||
106 | #define AUDIO_MID_BALANCE (32) /* equal left/right channel */ | ||
107 | #define AUDIO_RIGHT_BALANCE (64) /* right channel only */ | ||
108 | #define AUDIO_BALANCE_SHIFT (3) | ||
109 | |||
110 | /* | ||
111 | * Generic minimum/maximum limits for number of channels, both modes | ||
112 | */ | ||
113 | #define AUDIO_MIN_PLAY_CHANNELS (1) | ||
114 | #define AUDIO_MAX_PLAY_CHANNELS (4) | ||
115 | #define AUDIO_MIN_REC_CHANNELS (1) | ||
116 | #define AUDIO_MAX_REC_CHANNELS (4) | ||
117 | |||
118 | /* | ||
119 | * Generic minimum/maximum limits for sample precision | ||
120 | */ | ||
121 | #define AUDIO_MIN_PLAY_PRECISION (8) | ||
122 | #define AUDIO_MAX_PLAY_PRECISION (32) | ||
123 | #define AUDIO_MIN_REC_PRECISION (8) | ||
124 | #define AUDIO_MAX_REC_PRECISION (32) | ||
125 | |||
126 | /* | ||
127 | * Define some convenient names for typical audio ports | ||
128 | */ | ||
129 | /* | ||
130 | * output ports (several may be enabled simultaneously) | ||
131 | */ | ||
132 | #define AUDIO_SPEAKER 0x01 /* output to built-in speaker */ | ||
133 | #define AUDIO_HEADPHONE 0x02 /* output to headphone jack */ | ||
134 | #define AUDIO_LINE_OUT 0x04 /* output to line out */ | ||
135 | |||
136 | /* | ||
137 | * input ports (usually only one at a time) | ||
138 | */ | ||
139 | #define AUDIO_MICROPHONE 0x01 /* input from microphone */ | ||
140 | #define AUDIO_LINE_IN 0x02 /* input from line in */ | ||
141 | #define AUDIO_CD 0x04 /* input from on-board CD inputs */ | ||
142 | #define AUDIO_INTERNAL_CD_IN AUDIO_CD /* input from internal CDROM */ | ||
143 | #define AUDIO_ANALOG_LOOPBACK 0x40 /* input from output */ | ||
144 | |||
145 | |||
146 | /* | ||
147 | * This macro initializes an audio_info structure to 'harmless' values. | ||
148 | * Note that (~0) might not be a harmless value for a flag that was | ||
149 | * a signed int. | ||
150 | */ | ||
151 | #define AUDIO_INITINFO(i) { \ | ||
152 | unsigned int *__x__; \ | ||
153 | for (__x__ = (unsigned int *)(i); \ | ||
154 | (char *) __x__ < (((char *)(i)) + sizeof (audio_info_t)); \ | ||
155 | *__x__++ = ~0); \ | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * These allow testing for what the user wants to set | ||
160 | */ | ||
161 | #define AUD_INITVALUE (~0) | ||
162 | #define Modify(X) ((unsigned int)(X) != AUD_INITVALUE) | ||
163 | #define Modifys(X) ((X) != (unsigned short)AUD_INITVALUE) | ||
164 | #define Modifyc(X) ((X) != (unsigned char)AUD_INITVALUE) | ||
165 | |||
166 | /* | ||
167 | * Parameter for the AUDIO_GETDEV ioctl to determine current | ||
168 | * audio devices. | ||
169 | */ | ||
170 | #define MAX_AUDIO_DEV_LEN (16) | ||
171 | typedef struct audio_device { | ||
172 | char name[MAX_AUDIO_DEV_LEN]; | ||
173 | char version[MAX_AUDIO_DEV_LEN]; | ||
174 | char config[MAX_AUDIO_DEV_LEN]; | ||
175 | } audio_device_t; | ||
176 | |||
177 | |||
178 | /* | ||
179 | * Ioctl calls for the audio device. | ||
180 | */ | ||
181 | |||
182 | /* | ||
183 | * AUDIO_GETINFO retrieves the current state of the audio device. | ||
184 | * | ||
185 | * AUDIO_SETINFO copies all fields of the audio_info structure whose | ||
186 | * values are not set to the initialized value (-1) to the device state. | ||
187 | * It performs an implicit AUDIO_GETINFO to return the new state of the | ||
188 | * device. Note that the record.samples and play.samples fields are set | ||
189 | * to the last value before the AUDIO_SETINFO took effect. This allows | ||
190 | * an application to reset the counters while atomically retrieving the | ||
191 | * last value. | ||
192 | * | ||
193 | * AUDIO_DRAIN suspends the calling process until the write buffers are | ||
194 | * empty. | ||
195 | * | ||
196 | * AUDIO_GETDEV returns a structure of type audio_device_t which contains | ||
197 | * three strings. The string "name" is a short identifying string (for | ||
198 | * example, the SBus Fcode name string), the string "version" identifies | ||
199 | * the current version of the device, and the "config" string identifies | ||
200 | * the specific configuration of the audio stream. All fields are | ||
201 | * device-dependent -- see the device specific manual pages for details. | ||
202 | * | ||
203 | * AUDIO_GETDEV_SUNOS returns a number which is an audio device defined | ||
204 | * herein (making it not too portable) | ||
205 | * | ||
206 | * AUDIO_FLUSH stops all playback and recording, clears all queued buffers, | ||
207 | * resets error counters, and restarts recording and playback as appropriate | ||
208 | * for the current sampling mode. | ||
209 | */ | ||
210 | #define AUDIO_GETINFO _IOR('A', 1, audio_info_t) | ||
211 | #define AUDIO_SETINFO _IOWR('A', 2, audio_info_t) | ||
212 | #define AUDIO_DRAIN _IO('A', 3) | ||
213 | #define AUDIO_GETDEV _IOR('A', 4, audio_device_t) | ||
214 | #define AUDIO_GETDEV_SUNOS _IOR('A', 4, int) | ||
215 | #define AUDIO_FLUSH _IO('A', 5) | ||
216 | |||
217 | /* Define possible audio hardware configurations for | ||
218 | * old SunOS-style AUDIO_GETDEV ioctl */ | ||
219 | #define AUDIO_DEV_UNKNOWN (0) /* not defined */ | ||
220 | #define AUDIO_DEV_AMD (1) /* audioamd device */ | ||
221 | #define AUDIO_DEV_SPEAKERBOX (2) /* dbri device with speakerbox */ | ||
222 | #define AUDIO_DEV_CODEC (3) /* dbri device (internal speaker) */ | ||
223 | #define AUDIO_DEV_CS4231 (5) /* cs4231 device */ | ||
224 | |||
225 | /* | ||
226 | * The following ioctl sets the audio device into an internal loopback mode, | ||
227 | * if the hardware supports this. The argument is TRUE to set loopback, | ||
228 | * FALSE to reset to normal operation. If the hardware does not support | ||
229 | * internal loopback, the ioctl should fail with EINVAL. | ||
230 | * Causes ADC data to be digitally mixed in and sent to the DAC. | ||
231 | */ | ||
232 | #define AUDIO_DIAG_LOOPBACK _IOW('A', 101, int) | ||
233 | |||
234 | #endif /* _AUDIOIO_H_ */ | ||
diff --git a/include/asm-sparc/kbio.h b/include/asm-sparc/kbio.h deleted file mode 100644 index 3cf496bdf399..000000000000 --- a/include/asm-sparc/kbio.h +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | #ifndef __LINUX_KBIO_H | ||
2 | #define __LINUX_KBIO_H | ||
3 | |||
4 | /* Return keyboard type */ | ||
5 | #define KIOCTYPE _IOR('k', 9, int) | ||
6 | /* Return Keyboard layout */ | ||
7 | #define KIOCLAYOUT _IOR('k', 20, int) | ||
8 | |||
9 | enum { | ||
10 | TR_NONE, | ||
11 | TR_ASCII, /* keyboard is in regular state */ | ||
12 | TR_EVENT, /* keystrokes sent as firm events */ | ||
13 | TR_UNTRANS_EVENT /* EVENT+up and down+no translation */ | ||
14 | }; | ||
15 | |||
16 | /* Return the current keyboard translation */ | ||
17 | #define KIOCGTRANS _IOR('k', 5, int) | ||
18 | /* Set the keyboard translation */ | ||
19 | #define KIOCTRANS _IOW('k', 0, int) | ||
20 | |||
21 | /* Send a keyboard command */ | ||
22 | #define KIOCCMD _IOW('k', 8, int) | ||
23 | |||
24 | /* Return if keystrokes are being sent to /dev/kbd */ | ||
25 | |||
26 | /* Set routing of keystrokes to /dev/kbd */ | ||
27 | #define KIOCSDIRECT _IOW('k', 10, int) | ||
28 | |||
29 | /* Set keyboard leds */ | ||
30 | #define KIOCSLED _IOW('k', 14, unsigned char) | ||
31 | |||
32 | /* Get keyboard leds */ | ||
33 | #define KIOCGLED _IOR('k', 15, unsigned char) | ||
34 | |||
35 | /* Used by KIOC[GS]RATE */ | ||
36 | struct kbd_rate { | ||
37 | unsigned char delay; /* Delay in Hz before first repeat. */ | ||
38 | unsigned char rate; /* In characters per second (0..50). */ | ||
39 | }; | ||
40 | |||
41 | /* Set keyboard rate */ | ||
42 | #define KIOCSRATE _IOW('k', 40, struct kbd_rate) | ||
43 | |||
44 | /* Get keyboard rate */ | ||
45 | #define KIOCGRATE _IOW('k', 41, struct kbd_rate) | ||
46 | |||
47 | /* Top bit records if the key is up or down */ | ||
48 | #define KBD_UP 0x80 | ||
49 | |||
50 | /* Usable information */ | ||
51 | #define KBD_KEYMASK 0x7f | ||
52 | |||
53 | /* All keys up */ | ||
54 | #define KBD_IDLE 0x75 | ||
55 | |||
56 | #endif /* __LINUX_KBIO_H */ | ||
diff --git a/include/asm-sparc/termios.h b/include/asm-sparc/termios.h index 0a8ad4cac125..d05f83c80989 100644 --- a/include/asm-sparc/termios.h +++ b/include/asm-sparc/termios.h | |||
@@ -38,15 +38,6 @@ struct sunos_ttysize { | |||
38 | int st_columns; /* Columns on the terminal */ | 38 | int st_columns; /* Columns on the terminal */ |
39 | }; | 39 | }; |
40 | 40 | ||
41 | /* Used for packet mode */ | ||
42 | #define TIOCPKT_DATA 0 | ||
43 | #define TIOCPKT_FLUSHREAD 1 | ||
44 | #define TIOCPKT_FLUSHWRITE 2 | ||
45 | #define TIOCPKT_STOP 4 | ||
46 | #define TIOCPKT_START 8 | ||
47 | #define TIOCPKT_NOSTOP 16 | ||
48 | #define TIOCPKT_DOSTOP 32 | ||
49 | |||
50 | struct winsize { | 41 | struct winsize { |
51 | unsigned short ws_row; | 42 | unsigned short ws_row; |
52 | unsigned short ws_col; | 43 | unsigned short ws_col; |
diff --git a/include/asm-sparc/vuid_event.h b/include/asm-sparc/vuid_event.h deleted file mode 100644 index 7781e9f2fdd3..000000000000 --- a/include/asm-sparc/vuid_event.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* SunOS Virtual User Input Device (VUID) compatibility */ | ||
2 | |||
3 | |||
4 | typedef struct firm_event { | ||
5 | unsigned short id; /* tag for this event */ | ||
6 | unsigned char pair_type; /* unused by X11 */ | ||
7 | unsigned char pair; /* unused by X11 */ | ||
8 | int value; /* VKEY_UP, VKEY_DOWN or delta */ | ||
9 | struct timeval time; | ||
10 | } Firm_event; | ||
11 | |||
12 | enum { | ||
13 | FE_PAIR_NONE, | ||
14 | FE_PAIR_SET, | ||
15 | FE_PAIR_DELTA, | ||
16 | FE_PAIR_ABSOLUTE | ||
17 | }; | ||
18 | |||
19 | /* VUID stream formats */ | ||
20 | #define VUID_NATIVE 0 /* Native byte stream format */ | ||
21 | #define VUID_FIRM_EVENT 1 /* send firm_event structures */ | ||
22 | |||
23 | /* ioctls */ | ||
24 | /* Set input device byte stream format (any of VUID_{NATIVE,FIRM_EVENT}) */ | ||
25 | #define VUIDSFORMAT _IOW('v', 1, int) | ||
26 | /* Retrieve input device byte stream format */ | ||
27 | #define VUIDGFORMAT _IOR('v', 2, int) | ||
28 | |||
29 | /* Possible tag values */ | ||
30 | /* mouse buttons: */ | ||
31 | #define MS_LEFT 0x7f20 | ||
32 | #define MS_MIDDLE 0x7f21 | ||
33 | #define MS_RIGHT 0x7f22 | ||
34 | /* motion: */ | ||
35 | #define LOC_X_DELTA 0x7f80 | ||
36 | #define LOC_Y_DELTA 0x7f81 | ||
37 | #define LOC_X_ABSOLUTE 0x7f82 /* X compat, unsupported */ | ||
38 | #define LOC_Y_ABSOLUTE 0x7f83 /* X compat, unsupported */ | ||
39 | |||
40 | #define VKEY_UP 0 | ||
41 | #define VKEY_DOWN 1 | ||
diff --git a/include/asm-sparc64/audioio.h b/include/asm-sparc64/audioio.h deleted file mode 100644 index cf16173f521b..000000000000 --- a/include/asm-sparc64/audioio.h +++ /dev/null | |||
@@ -1,234 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-sparc/audioio.h | ||
3 | * | ||
4 | * Sparc Audio Midlayer | ||
5 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _AUDIOIO_H_ | ||
9 | #define _AUDIOIO_H_ | ||
10 | |||
11 | /* | ||
12 | * SunOS/Solaris /dev/audio interface | ||
13 | */ | ||
14 | |||
15 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/time.h> | ||
18 | #include <linux/ioctl.h> | ||
19 | #endif | ||
20 | |||
21 | /* | ||
22 | * This structure contains state information for audio device IO streams. | ||
23 | */ | ||
24 | typedef struct audio_prinfo { | ||
25 | /* | ||
26 | * The following values describe the audio data encoding. | ||
27 | */ | ||
28 | unsigned int sample_rate; /* samples per second */ | ||
29 | unsigned int channels; /* number of interleaved channels */ | ||
30 | unsigned int precision; /* bit-width of each sample */ | ||
31 | unsigned int encoding; /* data encoding method */ | ||
32 | |||
33 | /* | ||
34 | * The following values control audio device configuration | ||
35 | */ | ||
36 | unsigned int gain; /* gain level: 0 - 255 */ | ||
37 | unsigned int port; /* selected I/O port (see below) */ | ||
38 | unsigned int avail_ports; /* available I/O ports (see below) */ | ||
39 | unsigned int _xxx[2]; /* Reserved for future use */ | ||
40 | |||
41 | unsigned int buffer_size; /* I/O buffer size */ | ||
42 | |||
43 | /* | ||
44 | * The following values describe driver state | ||
45 | */ | ||
46 | unsigned int samples; /* number of samples converted */ | ||
47 | unsigned int eof; /* End Of File counter (play only) */ | ||
48 | |||
49 | unsigned char pause; /* non-zero for pause, zero to resume */ | ||
50 | unsigned char error; /* non-zero if overflow/underflow */ | ||
51 | unsigned char waiting; /* non-zero if a process wants access */ | ||
52 | unsigned char balance; /* stereo channel balance */ | ||
53 | |||
54 | unsigned short minordev; | ||
55 | |||
56 | /* | ||
57 | * The following values are read-only state flags | ||
58 | */ | ||
59 | unsigned char open; /* non-zero if open access permitted */ | ||
60 | unsigned char active; /* non-zero if I/O is active */ | ||
61 | } audio_prinfo_t; | ||
62 | |||
63 | |||
64 | /* | ||
65 | * This structure describes the current state of the audio device. | ||
66 | */ | ||
67 | typedef struct audio_info { | ||
68 | /* | ||
69 | * Per-stream information | ||
70 | */ | ||
71 | audio_prinfo_t play; /* output status information */ | ||
72 | audio_prinfo_t record; /* input status information */ | ||
73 | |||
74 | /* | ||
75 | * Per-unit/channel information | ||
76 | */ | ||
77 | unsigned int monitor_gain; /* input to output mix: 0 - 255 */ | ||
78 | unsigned char output_muted; /* non-zero if output is muted */ | ||
79 | unsigned char _xxx[3]; /* Reserved for future use */ | ||
80 | unsigned int _yyy[3]; /* Reserved for future use */ | ||
81 | } audio_info_t; | ||
82 | |||
83 | |||
84 | /* | ||
85 | * Audio encoding types | ||
86 | */ | ||
87 | #define AUDIO_ENCODING_NONE (0) /* no encoding assigned */ | ||
88 | #define AUDIO_ENCODING_ULAW (1) /* u-law encoding */ | ||
89 | #define AUDIO_ENCODING_ALAW (2) /* A-law encoding */ | ||
90 | #define AUDIO_ENCODING_LINEAR (3) /* Linear PCM encoding */ | ||
91 | #define AUDIO_ENCODING_FLOAT (4) /* IEEE float (-1. <-> +1.) */ | ||
92 | #define AUDIO_ENCODING_DVI (104) /* DVI ADPCM */ | ||
93 | #define AUDIO_ENCODING_LINEAR8 (105) /* 8 bit UNSIGNED */ | ||
94 | #define AUDIO_ENCODING_LINEARLE (106) /* Linear PCM LE encoding */ | ||
95 | |||
96 | /* | ||
97 | * These ranges apply to record, play, and monitor gain values | ||
98 | */ | ||
99 | #define AUDIO_MIN_GAIN (0) /* minimum gain value */ | ||
100 | #define AUDIO_MAX_GAIN (255) /* maximum gain value */ | ||
101 | |||
102 | /* | ||
103 | * These values apply to the balance field to adjust channel gain values | ||
104 | */ | ||
105 | #define AUDIO_LEFT_BALANCE (0) /* left channel only */ | ||
106 | #define AUDIO_MID_BALANCE (32) /* equal left/right channel */ | ||
107 | #define AUDIO_RIGHT_BALANCE (64) /* right channel only */ | ||
108 | #define AUDIO_BALANCE_SHIFT (3) | ||
109 | |||
110 | /* | ||
111 | * Generic minimum/maximum limits for number of channels, both modes | ||
112 | */ | ||
113 | #define AUDIO_MIN_PLAY_CHANNELS (1) | ||
114 | #define AUDIO_MAX_PLAY_CHANNELS (4) | ||
115 | #define AUDIO_MIN_REC_CHANNELS (1) | ||
116 | #define AUDIO_MAX_REC_CHANNELS (4) | ||
117 | |||
118 | /* | ||
119 | * Generic minimum/maximum limits for sample precision | ||
120 | */ | ||
121 | #define AUDIO_MIN_PLAY_PRECISION (8) | ||
122 | #define AUDIO_MAX_PLAY_PRECISION (32) | ||
123 | #define AUDIO_MIN_REC_PRECISION (8) | ||
124 | #define AUDIO_MAX_REC_PRECISION (32) | ||
125 | |||
126 | /* | ||
127 | * Define some convenient names for typical audio ports | ||
128 | */ | ||
129 | /* | ||
130 | * output ports (several may be enabled simultaneously) | ||
131 | */ | ||
132 | #define AUDIO_SPEAKER 0x01 /* output to built-in speaker */ | ||
133 | #define AUDIO_HEADPHONE 0x02 /* output to headphone jack */ | ||
134 | #define AUDIO_LINE_OUT 0x04 /* output to line out */ | ||
135 | |||
136 | /* | ||
137 | * input ports (usually only one at a time) | ||
138 | */ | ||
139 | #define AUDIO_MICROPHONE 0x01 /* input from microphone */ | ||
140 | #define AUDIO_LINE_IN 0x02 /* input from line in */ | ||
141 | #define AUDIO_CD 0x04 /* input from on-board CD inputs */ | ||
142 | #define AUDIO_INTERNAL_CD_IN AUDIO_CD /* input from internal CDROM */ | ||
143 | #define AUDIO_ANALOG_LOOPBACK 0x40 /* input from output */ | ||
144 | |||
145 | |||
146 | /* | ||
147 | * This macro initializes an audio_info structure to 'harmless' values. | ||
148 | * Note that (~0) might not be a harmless value for a flag that was | ||
149 | * a signed int. | ||
150 | */ | ||
151 | #define AUDIO_INITINFO(i) { \ | ||
152 | unsigned int *__x__; \ | ||
153 | for (__x__ = (unsigned int *)(i); \ | ||
154 | (char *) __x__ < (((char *)(i)) + sizeof (audio_info_t)); \ | ||
155 | *__x__++ = ~0); \ | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * These allow testing for what the user wants to set | ||
160 | */ | ||
161 | #define AUD_INITVALUE (~0) | ||
162 | #define Modify(X) ((unsigned int)(X) != AUD_INITVALUE) | ||
163 | #define Modifys(X) ((X) != (unsigned short)AUD_INITVALUE) | ||
164 | #define Modifyc(X) ((X) != (unsigned char)AUD_INITVALUE) | ||
165 | |||
166 | /* | ||
167 | * Parameter for the AUDIO_GETDEV ioctl to determine current | ||
168 | * audio devices. | ||
169 | */ | ||
170 | #define MAX_AUDIO_DEV_LEN (16) | ||
171 | typedef struct audio_device { | ||
172 | char name[MAX_AUDIO_DEV_LEN]; | ||
173 | char version[MAX_AUDIO_DEV_LEN]; | ||
174 | char config[MAX_AUDIO_DEV_LEN]; | ||
175 | } audio_device_t; | ||
176 | |||
177 | |||
178 | /* | ||
179 | * Ioctl calls for the audio device. | ||
180 | */ | ||
181 | |||
182 | /* | ||
183 | * AUDIO_GETINFO retrieves the current state of the audio device. | ||
184 | * | ||
185 | * AUDIO_SETINFO copies all fields of the audio_info structure whose | ||
186 | * values are not set to the initialized value (-1) to the device state. | ||
187 | * It performs an implicit AUDIO_GETINFO to return the new state of the | ||
188 | * device. Note that the record.samples and play.samples fields are set | ||
189 | * to the last value before the AUDIO_SETINFO took effect. This allows | ||
190 | * an application to reset the counters while atomically retrieving the | ||
191 | * last value. | ||
192 | * | ||
193 | * AUDIO_DRAIN suspends the calling process until the write buffers are | ||
194 | * empty. | ||
195 | * | ||
196 | * AUDIO_GETDEV returns a structure of type audio_device_t which contains | ||
197 | * three strings. The string "name" is a short identifying string (for | ||
198 | * example, the SBus Fcode name string), the string "version" identifies | ||
199 | * the current version of the device, and the "config" string identifies | ||
200 | * the specific configuration of the audio stream. All fields are | ||
201 | * device-dependent -- see the device specific manual pages for details. | ||
202 | * | ||
203 | * AUDIO_GETDEV_SUNOS returns a number which is an audio device defined | ||
204 | * herein (making it not too portable) | ||
205 | * | ||
206 | * AUDIO_FLUSH stops all playback and recording, clears all queued buffers, | ||
207 | * resets error counters, and restarts recording and playback as appropriate | ||
208 | * for the current sampling mode. | ||
209 | */ | ||
210 | #define AUDIO_GETINFO _IOR('A', 1, audio_info_t) | ||
211 | #define AUDIO_SETINFO _IOWR('A', 2, audio_info_t) | ||
212 | #define AUDIO_DRAIN _IO('A', 3) | ||
213 | #define AUDIO_GETDEV _IOR('A', 4, audio_device_t) | ||
214 | #define AUDIO_GETDEV_SUNOS _IOR('A', 4, int) | ||
215 | #define AUDIO_FLUSH _IO('A', 5) | ||
216 | |||
217 | /* Define possible audio hardware configurations for | ||
218 | * old SunOS-style AUDIO_GETDEV ioctl */ | ||
219 | #define AUDIO_DEV_UNKNOWN (0) /* not defined */ | ||
220 | #define AUDIO_DEV_AMD (1) /* audioamd device */ | ||
221 | #define AUDIO_DEV_SPEAKERBOX (2) /* dbri device with speakerbox */ | ||
222 | #define AUDIO_DEV_CODEC (3) /* dbri device (internal speaker) */ | ||
223 | #define AUDIO_DEV_CS4231 (5) /* cs4231 device */ | ||
224 | |||
225 | /* | ||
226 | * The following ioctl sets the audio device into an internal loopback mode, | ||
227 | * if the hardware supports this. The argument is TRUE to set loopback, | ||
228 | * FALSE to reset to normal operation. If the hardware does not support | ||
229 | * internal loopback, the ioctl should fail with EINVAL. | ||
230 | * Causes ADC data to be digitally mixed in and sent to the DAC. | ||
231 | */ | ||
232 | #define AUDIO_DIAG_LOOPBACK _IOW('A', 101, int) | ||
233 | |||
234 | #endif /* _AUDIOIO_H_ */ | ||
diff --git a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h index 543e4e500a72..7a408a030f52 100644 --- a/include/asm-sparc64/ebus.h +++ b/include/asm-sparc64/ebus.h | |||
@@ -79,6 +79,7 @@ extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, | |||
79 | size_t len); | 79 | size_t len); |
80 | extern void ebus_dma_prepare(struct ebus_dma_info *p, int write); | 80 | extern void ebus_dma_prepare(struct ebus_dma_info *p, int write); |
81 | extern unsigned int ebus_dma_residue(struct ebus_dma_info *p); | 81 | extern unsigned int ebus_dma_residue(struct ebus_dma_info *p); |
82 | extern unsigned int ebus_dma_addr(struct ebus_dma_info *p); | ||
82 | extern void ebus_dma_enable(struct ebus_dma_info *p, int on); | 83 | extern void ebus_dma_enable(struct ebus_dma_info *p, int on); |
83 | 84 | ||
84 | extern struct linux_ebus *ebus_chain; | 85 | extern struct linux_ebus *ebus_chain; |
diff --git a/include/asm-sparc64/kbio.h b/include/asm-sparc64/kbio.h deleted file mode 100644 index 3cf496bdf399..000000000000 --- a/include/asm-sparc64/kbio.h +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | #ifndef __LINUX_KBIO_H | ||
2 | #define __LINUX_KBIO_H | ||
3 | |||
4 | /* Return keyboard type */ | ||
5 | #define KIOCTYPE _IOR('k', 9, int) | ||
6 | /* Return Keyboard layout */ | ||
7 | #define KIOCLAYOUT _IOR('k', 20, int) | ||
8 | |||
9 | enum { | ||
10 | TR_NONE, | ||
11 | TR_ASCII, /* keyboard is in regular state */ | ||
12 | TR_EVENT, /* keystrokes sent as firm events */ | ||
13 | TR_UNTRANS_EVENT /* EVENT+up and down+no translation */ | ||
14 | }; | ||
15 | |||
16 | /* Return the current keyboard translation */ | ||
17 | #define KIOCGTRANS _IOR('k', 5, int) | ||
18 | /* Set the keyboard translation */ | ||
19 | #define KIOCTRANS _IOW('k', 0, int) | ||
20 | |||
21 | /* Send a keyboard command */ | ||
22 | #define KIOCCMD _IOW('k', 8, int) | ||
23 | |||
24 | /* Return if keystrokes are being sent to /dev/kbd */ | ||
25 | |||
26 | /* Set routing of keystrokes to /dev/kbd */ | ||
27 | #define KIOCSDIRECT _IOW('k', 10, int) | ||
28 | |||
29 | /* Set keyboard leds */ | ||
30 | #define KIOCSLED _IOW('k', 14, unsigned char) | ||
31 | |||
32 | /* Get keyboard leds */ | ||
33 | #define KIOCGLED _IOR('k', 15, unsigned char) | ||
34 | |||
35 | /* Used by KIOC[GS]RATE */ | ||
36 | struct kbd_rate { | ||
37 | unsigned char delay; /* Delay in Hz before first repeat. */ | ||
38 | unsigned char rate; /* In characters per second (0..50). */ | ||
39 | }; | ||
40 | |||
41 | /* Set keyboard rate */ | ||
42 | #define KIOCSRATE _IOW('k', 40, struct kbd_rate) | ||
43 | |||
44 | /* Get keyboard rate */ | ||
45 | #define KIOCGRATE _IOW('k', 41, struct kbd_rate) | ||
46 | |||
47 | /* Top bit records if the key is up or down */ | ||
48 | #define KBD_UP 0x80 | ||
49 | |||
50 | /* Usable information */ | ||
51 | #define KBD_KEYMASK 0x7f | ||
52 | |||
53 | /* All keys up */ | ||
54 | #define KBD_IDLE 0x75 | ||
55 | |||
56 | #endif /* __LINUX_KBIO_H */ | ||
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 87c43c67866e..08ba72d7722c 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h | |||
@@ -87,37 +87,35 @@ extern void __flush_tlb_mm(unsigned long, unsigned long); | |||
87 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | 87 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
88 | { | 88 | { |
89 | unsigned long ctx_valid; | 89 | unsigned long ctx_valid; |
90 | int cpu; | ||
90 | 91 | ||
92 | /* Note: page_table_lock is used here to serialize switch_mm | ||
93 | * and activate_mm, and their calls to get_new_mmu_context. | ||
94 | * This use of page_table_lock is unrelated to its other uses. | ||
95 | */ | ||
91 | spin_lock(&mm->page_table_lock); | 96 | spin_lock(&mm->page_table_lock); |
92 | if (CTX_VALID(mm->context)) | 97 | ctx_valid = CTX_VALID(mm->context); |
93 | ctx_valid = 1; | 98 | if (!ctx_valid) |
94 | else | 99 | get_new_mmu_context(mm); |
95 | ctx_valid = 0; | 100 | spin_unlock(&mm->page_table_lock); |
96 | 101 | ||
97 | if (!ctx_valid || (old_mm != mm)) { | 102 | if (!ctx_valid || (old_mm != mm)) { |
98 | if (!ctx_valid) | ||
99 | get_new_mmu_context(mm); | ||
100 | |||
101 | load_secondary_context(mm); | 103 | load_secondary_context(mm); |
102 | reload_tlbmiss_state(tsk, mm); | 104 | reload_tlbmiss_state(tsk, mm); |
103 | } | 105 | } |
104 | 106 | ||
105 | { | 107 | /* Even if (mm == old_mm) we _must_ check |
106 | int cpu = smp_processor_id(); | 108 | * the cpu_vm_mask. If we do not we could |
107 | 109 | * corrupt the TLB state because of how | |
108 | /* Even if (mm == old_mm) we _must_ check | 110 | * smp_flush_tlb_{page,range,mm} on sparc64 |
109 | * the cpu_vm_mask. If we do not we could | 111 | * and lazy tlb switches work. -DaveM |
110 | * corrupt the TLB state because of how | 112 | */ |
111 | * smp_flush_tlb_{page,range,mm} on sparc64 | 113 | cpu = smp_processor_id(); |
112 | * and lazy tlb switches work. -DaveM | 114 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { |
113 | */ | 115 | cpu_set(cpu, mm->cpu_vm_mask); |
114 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { | 116 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
115 | cpu_set(cpu, mm->cpu_vm_mask); | 117 | SECONDARY_CONTEXT); |
116 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
117 | SECONDARY_CONTEXT); | ||
118 | } | ||
119 | } | 118 | } |
120 | spin_unlock(&mm->page_table_lock); | ||
121 | } | 119 | } |
122 | 120 | ||
123 | #define deactivate_mm(tsk,mm) do { } while (0) | 121 | #define deactivate_mm(tsk,mm) do { } while (0) |
@@ -127,6 +125,10 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm | |||
127 | { | 125 | { |
128 | int cpu; | 126 | int cpu; |
129 | 127 | ||
128 | /* Note: page_table_lock is used here to serialize switch_mm | ||
129 | * and activate_mm, and their calls to get_new_mmu_context. | ||
130 | * This use of page_table_lock is unrelated to its other uses. | ||
131 | */ | ||
130 | spin_lock(&mm->page_table_lock); | 132 | spin_lock(&mm->page_table_lock); |
131 | if (!CTX_VALID(mm->context)) | 133 | if (!CTX_VALID(mm->context)) |
132 | get_new_mmu_context(mm); | 134 | get_new_mmu_context(mm); |
diff --git a/include/asm-sparc64/termios.h b/include/asm-sparc64/termios.h index 9777a9cca88a..ee26a071c677 100644 --- a/include/asm-sparc64/termios.h +++ b/include/asm-sparc64/termios.h | |||
@@ -38,15 +38,6 @@ struct sunos_ttysize { | |||
38 | int st_columns; /* Columns on the terminal */ | 38 | int st_columns; /* Columns on the terminal */ |
39 | }; | 39 | }; |
40 | 40 | ||
41 | /* Used for packet mode */ | ||
42 | #define TIOCPKT_DATA 0 | ||
43 | #define TIOCPKT_FLUSHREAD 1 | ||
44 | #define TIOCPKT_FLUSHWRITE 2 | ||
45 | #define TIOCPKT_STOP 4 | ||
46 | #define TIOCPKT_START 8 | ||
47 | #define TIOCPKT_NOSTOP 16 | ||
48 | #define TIOCPKT_DOSTOP 32 | ||
49 | |||
50 | struct winsize { | 41 | struct winsize { |
51 | unsigned short ws_row; | 42 | unsigned short ws_row; |
52 | unsigned short ws_col; | 43 | unsigned short ws_col; |
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h index 66138d959df5..61c01882b562 100644 --- a/include/asm-sparc64/tlb.h +++ b/include/asm-sparc64/tlb.h | |||
@@ -58,11 +58,9 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i | |||
58 | static inline void tlb_flush_mmu(struct mmu_gather *mp) | 58 | static inline void tlb_flush_mmu(struct mmu_gather *mp) |
59 | { | 59 | { |
60 | if (mp->need_flush) { | 60 | if (mp->need_flush) { |
61 | free_pages_and_swap_cache(mp->pages, mp->pages_nr); | ||
62 | mp->pages_nr = 0; | ||
61 | mp->need_flush = 0; | 63 | mp->need_flush = 0; |
62 | if (!tlb_fast_mode(mp)) { | ||
63 | free_pages_and_swap_cache(mp->pages, mp->pages_nr); | ||
64 | mp->pages_nr = 0; | ||
65 | } | ||
66 | } | 64 | } |
67 | 65 | ||
68 | } | 66 | } |
@@ -78,11 +76,9 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un | |||
78 | { | 76 | { |
79 | tlb_flush_mmu(mp); | 77 | tlb_flush_mmu(mp); |
80 | 78 | ||
81 | if (mp->fullmm) { | 79 | if (mp->fullmm) |
82 | if (CTX_VALID(mp->mm->context)) | ||
83 | do_flush_tlb_mm(mp->mm); | ||
84 | mp->fullmm = 0; | 80 | mp->fullmm = 0; |
85 | } else | 81 | else |
86 | flush_tlb_pending(); | 82 | flush_tlb_pending(); |
87 | 83 | ||
88 | /* keep the page table cache within bounds */ | 84 | /* keep the page table cache within bounds */ |
@@ -93,11 +89,11 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un | |||
93 | 89 | ||
94 | static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) | 90 | static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) |
95 | { | 91 | { |
96 | mp->need_flush = 1; | ||
97 | if (tlb_fast_mode(mp)) { | 92 | if (tlb_fast_mode(mp)) { |
98 | free_page_and_swap_cache(page); | 93 | free_page_and_swap_cache(page); |
99 | return; | 94 | return; |
100 | } | 95 | } |
96 | mp->need_flush = 1; | ||
101 | mp->pages[mp->pages_nr++] = page; | 97 | mp->pages[mp->pages_nr++] = page; |
102 | if (mp->pages_nr >= FREE_PTE_NR) | 98 | if (mp->pages_nr >= FREE_PTE_NR) |
103 | tlb_flush_mmu(mp); | 99 | tlb_flush_mmu(mp); |
diff --git a/include/asm-sparc64/vuid_event.h b/include/asm-sparc64/vuid_event.h deleted file mode 100644 index 9ef4d17ad08f..000000000000 --- a/include/asm-sparc64/vuid_event.h +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* SunOS Virtual User Input Device (VUID) compatibility */ | ||
2 | |||
3 | typedef struct firm_event { | ||
4 | unsigned short id; /* tag for this event */ | ||
5 | unsigned char pair_type; /* unused by X11 */ | ||
6 | unsigned char pair; /* unused by X11 */ | ||
7 | int value; /* VKEY_UP, VKEY_DOWN or delta */ | ||
8 | struct timeval time; | ||
9 | } Firm_event; | ||
10 | |||
11 | enum { | ||
12 | FE_PAIR_NONE, | ||
13 | FE_PAIR_SET, | ||
14 | FE_PAIR_DELTA, | ||
15 | FE_PAIR_ABSOLUTE | ||
16 | }; | ||
17 | |||
18 | /* VUID stream formats */ | ||
19 | #define VUID_NATIVE 0 /* Native byte stream format */ | ||
20 | #define VUID_FIRM_EVENT 1 /* send firm_event structures */ | ||
21 | |||
22 | /* ioctls */ | ||
23 | /* Set input device byte stream format (any of VUID_{NATIVE,FIRM_EVENT}) */ | ||
24 | #define VUIDSFORMAT _IOW('v', 1, int) | ||
25 | /* Retrieve input device byte stream format */ | ||
26 | #define VUIDGFORMAT _IOR('v', 2, int) | ||
27 | |||
28 | /* Possible tag values */ | ||
29 | /* mouse buttons: */ | ||
30 | #define MS_LEFT 0x7f20 | ||
31 | #define MS_MIDDLE 0x7f21 | ||
32 | #define MS_RIGHT 0x7f22 | ||
33 | /* motion: */ | ||
34 | #define LOC_X_DELTA 0x7f80 | ||
35 | #define LOC_Y_DELTA 0x7f81 | ||
36 | #define LOC_X_ABSOLUTE 0x7f82 /* X compat, unsupported */ | ||
37 | #define LOC_Y_ABSOLUTE 0x7f83 /* X compat, unsupported */ | ||
38 | |||
39 | #define VKEY_UP 0 | ||
40 | #define VKEY_DOWN 1 | ||
diff --git a/include/linux/acct.h b/include/linux/acct.h index 19f70462b3be..93c5b3cdf951 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h | |||
@@ -117,12 +117,15 @@ struct acct_v3 | |||
117 | #include <linux/config.h> | 117 | #include <linux/config.h> |
118 | 118 | ||
119 | #ifdef CONFIG_BSD_PROCESS_ACCT | 119 | #ifdef CONFIG_BSD_PROCESS_ACCT |
120 | struct vfsmount; | ||
120 | struct super_block; | 121 | struct super_block; |
122 | extern void acct_auto_close_mnt(struct vfsmount *m); | ||
121 | extern void acct_auto_close(struct super_block *sb); | 123 | extern void acct_auto_close(struct super_block *sb); |
122 | extern void acct_process(long exitcode); | 124 | extern void acct_process(long exitcode); |
123 | extern void acct_update_integrals(struct task_struct *tsk); | 125 | extern void acct_update_integrals(struct task_struct *tsk); |
124 | extern void acct_clear_integrals(struct task_struct *tsk); | 126 | extern void acct_clear_integrals(struct task_struct *tsk); |
125 | #else | 127 | #else |
128 | #define acct_auto_close_mnt(x) do { } while (0) | ||
126 | #define acct_auto_close(x) do { } while (0) | 129 | #define acct_auto_close(x) do { } while (0) |
127 | #define acct_process(x) do { } while (0) | 130 | #define acct_process(x) do { } while (0) |
128 | #define acct_update_integrals(x) do { } while (0) | 131 | #define acct_update_integrals(x) do { } while (0) |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ab04b4f9b0db..46a2ba617595 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -329,6 +329,7 @@ static inline int d_mountpoint(struct dentry *dentry) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); | 331 | extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); |
332 | extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int); | ||
332 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); | 333 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); |
333 | 334 | ||
334 | extern int sysctl_vfs_cache_pressure; | 335 | extern int sysctl_vfs_cache_pressure; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9a593ef262ef..1b5f502a4b8f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -104,6 +104,10 @@ extern int dir_notify_enable; | |||
104 | #define MS_MOVE 8192 | 104 | #define MS_MOVE 8192 |
105 | #define MS_REC 16384 | 105 | #define MS_REC 16384 |
106 | #define MS_VERBOSE 32768 | 106 | #define MS_VERBOSE 32768 |
107 | #define MS_UNBINDABLE (1<<17) /* change to unbindable */ | ||
108 | #define MS_PRIVATE (1<<18) /* change to private */ | ||
109 | #define MS_SLAVE (1<<19) /* change to slave */ | ||
110 | #define MS_SHARED (1<<20) /* change to shared */ | ||
107 | #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ | 111 | #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ |
108 | #define MS_ACTIVE (1<<30) | 112 | #define MS_ACTIVE (1<<30) |
109 | #define MS_NOUSER (1<<31) | 113 | #define MS_NOUSER (1<<31) |
@@ -1249,7 +1253,12 @@ extern int unregister_filesystem(struct file_system_type *); | |||
1249 | extern struct vfsmount *kern_mount(struct file_system_type *); | 1253 | extern struct vfsmount *kern_mount(struct file_system_type *); |
1250 | extern int may_umount_tree(struct vfsmount *); | 1254 | extern int may_umount_tree(struct vfsmount *); |
1251 | extern int may_umount(struct vfsmount *); | 1255 | extern int may_umount(struct vfsmount *); |
1256 | extern void umount_tree(struct vfsmount *, int, struct list_head *); | ||
1257 | extern void release_mounts(struct list_head *); | ||
1252 | extern long do_mount(char *, char *, char *, unsigned long, void *); | 1258 | extern long do_mount(char *, char *, char *, unsigned long, void *); |
1259 | extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); | ||
1260 | extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, | ||
1261 | struct vfsmount *); | ||
1253 | 1262 | ||
1254 | extern int vfs_statfs(struct super_block *, struct kstatfs *); | 1263 | extern int vfs_statfs(struct super_block *, struct kstatfs *); |
1255 | 1264 | ||
diff --git a/include/linux/mount.h b/include/linux/mount.h index f8f39937e301..dd4e83eba933 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -17,12 +17,14 @@ | |||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
19 | 19 | ||
20 | #define MNT_NOSUID 1 | 20 | #define MNT_NOSUID 0x01 |
21 | #define MNT_NODEV 2 | 21 | #define MNT_NODEV 0x02 |
22 | #define MNT_NOEXEC 4 | 22 | #define MNT_NOEXEC 0x04 |
23 | #define MNT_SHARED 0x10 /* if the vfsmount is a shared mount */ | ||
24 | #define MNT_UNBINDABLE 0x20 /* if the vfsmount is a unbindable mount */ | ||
25 | #define MNT_PNODE_MASK 0x30 /* propogation flag mask */ | ||
23 | 26 | ||
24 | struct vfsmount | 27 | struct vfsmount { |
25 | { | ||
26 | struct list_head mnt_hash; | 28 | struct list_head mnt_hash; |
27 | struct vfsmount *mnt_parent; /* fs we are mounted on */ | 29 | struct vfsmount *mnt_parent; /* fs we are mounted on */ |
28 | struct dentry *mnt_mountpoint; /* dentry of mountpoint */ | 30 | struct dentry *mnt_mountpoint; /* dentry of mountpoint */ |
@@ -36,7 +38,12 @@ struct vfsmount | |||
36 | char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ | 38 | char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ |
37 | struct list_head mnt_list; | 39 | struct list_head mnt_list; |
38 | struct list_head mnt_expire; /* link in fs-specific expiry list */ | 40 | struct list_head mnt_expire; /* link in fs-specific expiry list */ |
41 | struct list_head mnt_share; /* circular list of shared mounts */ | ||
42 | struct list_head mnt_slave_list;/* list of slave mounts */ | ||
43 | struct list_head mnt_slave; /* slave list entry */ | ||
44 | struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */ | ||
39 | struct namespace *mnt_namespace; /* containing namespace */ | 45 | struct namespace *mnt_namespace; /* containing namespace */ |
46 | int mnt_pinned; | ||
40 | }; | 47 | }; |
41 | 48 | ||
42 | static inline struct vfsmount *mntget(struct vfsmount *mnt) | 49 | static inline struct vfsmount *mntget(struct vfsmount *mnt) |
@@ -46,15 +53,9 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt) | |||
46 | return mnt; | 53 | return mnt; |
47 | } | 54 | } |
48 | 55 | ||
49 | extern void __mntput(struct vfsmount *mnt); | 56 | extern void mntput_no_expire(struct vfsmount *mnt); |
50 | 57 | extern void mnt_pin(struct vfsmount *mnt); | |
51 | static inline void mntput_no_expire(struct vfsmount *mnt) | 58 | extern void mnt_unpin(struct vfsmount *mnt); |
52 | { | ||
53 | if (mnt) { | ||
54 | if (atomic_dec_and_test(&mnt->mnt_count)) | ||
55 | __mntput(mnt); | ||
56 | } | ||
57 | } | ||
58 | 59 | ||
59 | static inline void mntput(struct vfsmount *mnt) | 60 | static inline void mntput(struct vfsmount *mnt) |
60 | { | 61 | { |
diff --git a/include/linux/namespace.h b/include/linux/namespace.h index 0e5a86f13b2f..6731977c4c13 100644 --- a/include/linux/namespace.h +++ b/include/linux/namespace.h | |||
@@ -9,7 +9,8 @@ struct namespace { | |||
9 | atomic_t count; | 9 | atomic_t count; |
10 | struct vfsmount * root; | 10 | struct vfsmount * root; |
11 | struct list_head list; | 11 | struct list_head list; |
12 | struct rw_semaphore sem; | 12 | wait_queue_head_t poll; |
13 | int event; | ||
13 | }; | 14 | }; |
14 | 15 | ||
15 | extern int copy_namespace(int, struct task_struct *); | 16 | extern int copy_namespace(int, struct task_struct *); |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 700ead45084f..f33aeb22c26a 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -289,7 +289,6 @@ struct quota_info { | |||
289 | struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */ | 289 | struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */ |
290 | struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ | 290 | struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ |
291 | struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ | 291 | struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ |
292 | struct vfsmount *mnt[MAXQUOTAS]; /* mountpoint entries of filesystems with quota files */ | ||
293 | struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ | 292 | struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ |
294 | struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ | 293 | struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ |
295 | }; | 294 | }; |
diff --git a/kernel/acct.c b/kernel/acct.c index 2e3f4a47e7d0..6312d6bd43e3 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/jiffies.h> | 54 | #include <linux/jiffies.h> |
55 | #include <linux/times.h> | 55 | #include <linux/times.h> |
56 | #include <linux/syscalls.h> | 56 | #include <linux/syscalls.h> |
57 | #include <linux/mount.h> | ||
57 | #include <asm/uaccess.h> | 58 | #include <asm/uaccess.h> |
58 | #include <asm/div64.h> | 59 | #include <asm/div64.h> |
59 | #include <linux/blkdev.h> /* sector_div */ | 60 | #include <linux/blkdev.h> /* sector_div */ |
@@ -192,6 +193,7 @@ static void acct_file_reopen(struct file *file) | |||
192 | add_timer(&acct_globals.timer); | 193 | add_timer(&acct_globals.timer); |
193 | } | 194 | } |
194 | if (old_acct) { | 195 | if (old_acct) { |
196 | mnt_unpin(old_acct->f_vfsmnt); | ||
195 | spin_unlock(&acct_globals.lock); | 197 | spin_unlock(&acct_globals.lock); |
196 | do_acct_process(0, old_acct); | 198 | do_acct_process(0, old_acct); |
197 | filp_close(old_acct, NULL); | 199 | filp_close(old_acct, NULL); |
@@ -199,6 +201,42 @@ static void acct_file_reopen(struct file *file) | |||
199 | } | 201 | } |
200 | } | 202 | } |
201 | 203 | ||
204 | static int acct_on(char *name) | ||
205 | { | ||
206 | struct file *file; | ||
207 | int error; | ||
208 | |||
209 | /* Difference from BSD - they don't do O_APPEND */ | ||
210 | file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0); | ||
211 | if (IS_ERR(file)) | ||
212 | return PTR_ERR(file); | ||
213 | |||
214 | if (!S_ISREG(file->f_dentry->d_inode->i_mode)) { | ||
215 | filp_close(file, NULL); | ||
216 | return -EACCES; | ||
217 | } | ||
218 | |||
219 | if (!file->f_op->write) { | ||
220 | filp_close(file, NULL); | ||
221 | return -EIO; | ||
222 | } | ||
223 | |||
224 | error = security_acct(file); | ||
225 | if (error) { | ||
226 | filp_close(file, NULL); | ||
227 | return error; | ||
228 | } | ||
229 | |||
230 | spin_lock(&acct_globals.lock); | ||
231 | mnt_pin(file->f_vfsmnt); | ||
232 | acct_file_reopen(file); | ||
233 | spin_unlock(&acct_globals.lock); | ||
234 | |||
235 | mntput(file->f_vfsmnt); /* it's pinned, now give up active reference */ | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
202 | /** | 240 | /** |
203 | * sys_acct - enable/disable process accounting | 241 | * sys_acct - enable/disable process accounting |
204 | * @name: file name for accounting records or NULL to shutdown accounting | 242 | * @name: file name for accounting records or NULL to shutdown accounting |
@@ -212,47 +250,41 @@ static void acct_file_reopen(struct file *file) | |||
212 | */ | 250 | */ |
213 | asmlinkage long sys_acct(const char __user *name) | 251 | asmlinkage long sys_acct(const char __user *name) |
214 | { | 252 | { |
215 | struct file *file = NULL; | ||
216 | char *tmp; | ||
217 | int error; | 253 | int error; |
218 | 254 | ||
219 | if (!capable(CAP_SYS_PACCT)) | 255 | if (!capable(CAP_SYS_PACCT)) |
220 | return -EPERM; | 256 | return -EPERM; |
221 | 257 | ||
222 | if (name) { | 258 | if (name) { |
223 | tmp = getname(name); | 259 | char *tmp = getname(name); |
224 | if (IS_ERR(tmp)) { | 260 | if (IS_ERR(tmp)) |
225 | return (PTR_ERR(tmp)); | 261 | return (PTR_ERR(tmp)); |
226 | } | 262 | error = acct_on(tmp); |
227 | /* Difference from BSD - they don't do O_APPEND */ | ||
228 | file = filp_open(tmp, O_WRONLY|O_APPEND|O_LARGEFILE, 0); | ||
229 | putname(tmp); | 263 | putname(tmp); |
230 | if (IS_ERR(file)) { | 264 | } else { |
231 | return (PTR_ERR(file)); | 265 | error = security_acct(NULL); |
232 | } | 266 | if (!error) { |
233 | if (!S_ISREG(file->f_dentry->d_inode->i_mode)) { | 267 | spin_lock(&acct_globals.lock); |
234 | filp_close(file, NULL); | 268 | acct_file_reopen(NULL); |
235 | return (-EACCES); | 269 | spin_unlock(&acct_globals.lock); |
236 | } | ||
237 | |||
238 | if (!file->f_op->write) { | ||
239 | filp_close(file, NULL); | ||
240 | return (-EIO); | ||
241 | } | 270 | } |
242 | } | 271 | } |
272 | return error; | ||
273 | } | ||
243 | 274 | ||
244 | error = security_acct(file); | 275 | /** |
245 | if (error) { | 276 | * acct_auto_close - turn off a filesystem's accounting if it is on |
246 | if (file) | 277 | * @m: vfsmount being shut down |
247 | filp_close(file, NULL); | 278 | * |
248 | return error; | 279 | * If the accounting is turned on for a file in the subtree pointed to |
249 | } | 280 | * to by m, turn accounting off. Done when m is about to die. |
250 | 281 | */ | |
282 | void acct_auto_close_mnt(struct vfsmount *m) | ||
283 | { | ||
251 | spin_lock(&acct_globals.lock); | 284 | spin_lock(&acct_globals.lock); |
252 | acct_file_reopen(file); | 285 | if (acct_globals.file && acct_globals.file->f_vfsmnt == m) |
286 | acct_file_reopen(NULL); | ||
253 | spin_unlock(&acct_globals.lock); | 287 | spin_unlock(&acct_globals.lock); |
254 | |||
255 | return (0); | ||
256 | } | 288 | } |
257 | 289 | ||
258 | /** | 290 | /** |
@@ -266,8 +298,8 @@ void acct_auto_close(struct super_block *sb) | |||
266 | { | 298 | { |
267 | spin_lock(&acct_globals.lock); | 299 | spin_lock(&acct_globals.lock); |
268 | if (acct_globals.file && | 300 | if (acct_globals.file && |
269 | acct_globals.file->f_dentry->d_inode->i_sb == sb) { | 301 | acct_globals.file->f_vfsmnt->mnt_sb == sb) { |
270 | acct_file_reopen((struct file *)NULL); | 302 | acct_file_reopen(NULL); |
271 | } | 303 | } |
272 | spin_unlock(&acct_globals.lock); | 304 | spin_unlock(&acct_globals.lock); |
273 | } | 305 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index efac2c58ec7d..158710d22566 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -470,13 +470,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | |||
470 | if (clone_flags & CLONE_VM) { | 470 | if (clone_flags & CLONE_VM) { |
471 | atomic_inc(&oldmm->mm_users); | 471 | atomic_inc(&oldmm->mm_users); |
472 | mm = oldmm; | 472 | mm = oldmm; |
473 | /* | ||
474 | * There are cases where the PTL is held to ensure no | ||
475 | * new threads start up in user mode using an mm, which | ||
476 | * allows optimizing out ipis; the tlb_gather_mmu code | ||
477 | * is an example. | ||
478 | */ | ||
479 | spin_unlock_wait(&oldmm->page_table_lock); | ||
480 | goto good_mm; | 473 | goto good_mm; |
481 | } | 474 | } |
482 | 475 | ||
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c index f4361c518e46..1f8d27a6152e 100644 --- a/sound/sparc/cs4231.c +++ b/sound/sparc/cs4231.c | |||
@@ -61,13 +61,37 @@ MODULE_DESCRIPTION("Sun CS4231"); | |||
61 | MODULE_LICENSE("GPL"); | 61 | MODULE_LICENSE("GPL"); |
62 | MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}"); | 62 | MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}"); |
63 | 63 | ||
64 | typedef struct snd_cs4231 { | 64 | #ifdef SBUS_SUPPORT |
65 | spinlock_t lock; | 65 | typedef struct sbus_dma_info { |
66 | void __iomem *port; | 66 | spinlock_t lock; |
67 | int dir; | ||
68 | void __iomem *regs; | ||
69 | } sbus_dma_info_t; | ||
70 | #endif | ||
71 | |||
72 | typedef struct snd_cs4231 cs4231_t; | ||
73 | |||
74 | typedef struct cs4231_dma_control { | ||
75 | void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); | ||
76 | void (*enable)(struct cs4231_dma_control *dma_cont, int on); | ||
77 | int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); | ||
78 | unsigned int (*address)(struct cs4231_dma_control *dma_cont); | ||
79 | void (*reset)(cs4231_t *chip); | ||
80 | void (*preallocate)(cs4231_t *chip, snd_pcm_t *pcm); | ||
67 | #ifdef EBUS_SUPPORT | 81 | #ifdef EBUS_SUPPORT |
68 | struct ebus_dma_info eb2c; | 82 | struct ebus_dma_info ebus_info; |
69 | struct ebus_dma_info eb2p; | 83 | #endif |
84 | #ifdef SBUS_SUPPORT | ||
85 | struct sbus_dma_info sbus_info; | ||
70 | #endif | 86 | #endif |
87 | } cs4231_dma_control_t; | ||
88 | |||
89 | struct snd_cs4231 { | ||
90 | spinlock_t lock; | ||
91 | void __iomem *port; | ||
92 | |||
93 | cs4231_dma_control_t p_dma; | ||
94 | cs4231_dma_control_t c_dma; | ||
71 | 95 | ||
72 | u32 flags; | 96 | u32 flags; |
73 | #define CS4231_FLAG_EBUS 0x00000001 | 97 | #define CS4231_FLAG_EBUS 0x00000001 |
@@ -106,7 +130,7 @@ typedef struct snd_cs4231 { | |||
106 | unsigned int irq[2]; | 130 | unsigned int irq[2]; |
107 | unsigned int regs_size; | 131 | unsigned int regs_size; |
108 | struct snd_cs4231 *next; | 132 | struct snd_cs4231 *next; |
109 | } cs4231_t; | 133 | }; |
110 | 134 | ||
111 | static cs4231_t *cs4231_list; | 135 | static cs4231_t *cs4231_list; |
112 | 136 | ||
@@ -251,6 +275,15 @@ static cs4231_t *cs4231_list; | |||
251 | #define APCPNVA 0x38UL /* APC Play DMA Next Address */ | 275 | #define APCPNVA 0x38UL /* APC Play DMA Next Address */ |
252 | #define APCPNC 0x3cUL /* APC Play Next Count */ | 276 | #define APCPNC 0x3cUL /* APC Play Next Count */ |
253 | 277 | ||
278 | /* Defines for SBUS DMA-routines */ | ||
279 | |||
280 | #define APCVA 0x0UL /* APC DMA Address */ | ||
281 | #define APCC 0x4UL /* APC Count */ | ||
282 | #define APCNVA 0x8UL /* APC DMA Next Address */ | ||
283 | #define APCNC 0xcUL /* APC Next Count */ | ||
284 | #define APC_PLAY 0x30UL /* Play registers start at 0x30 */ | ||
285 | #define APC_RECORD 0x20UL /* Record registers start at 0x20 */ | ||
286 | |||
254 | /* APCCSR bits */ | 287 | /* APCCSR bits */ |
255 | 288 | ||
256 | #define APC_INT_PENDING 0x800000 /* Interrupt Pending */ | 289 | #define APC_INT_PENDING 0x800000 /* Interrupt Pending */ |
@@ -569,8 +602,7 @@ static void snd_cs4231_mce_down(cs4231_t *chip) | |||
569 | spin_unlock_irqrestore(&chip->lock, flags); | 602 | spin_unlock_irqrestore(&chip->lock, flags); |
570 | } | 603 | } |
571 | 604 | ||
572 | #ifdef EBUS_SUPPORT | 605 | static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont, snd_pcm_substream_t *substream, unsigned int *periods_sent) |
573 | static void snd_cs4231_ebus_advance_dma(struct ebus_dma_info *p, snd_pcm_substream_t *substream, unsigned int *periods_sent) | ||
574 | { | 606 | { |
575 | snd_pcm_runtime_t *runtime = substream->runtime; | 607 | snd_pcm_runtime_t *runtime = substream->runtime; |
576 | 608 | ||
@@ -581,129 +613,41 @@ static void snd_cs4231_ebus_advance_dma(struct ebus_dma_info *p, snd_pcm_substre | |||
581 | if (period_size >= (1 << 24)) | 613 | if (period_size >= (1 << 24)) |
582 | BUG(); | 614 | BUG(); |
583 | 615 | ||
584 | if (ebus_dma_request(p, runtime->dma_addr + offset, period_size)) | 616 | if (dma_cont->request(dma_cont, runtime->dma_addr + offset, period_size)) |
585 | return; | 617 | return; |
586 | (*periods_sent) = ((*periods_sent) + 1) % runtime->periods; | 618 | (*periods_sent) = ((*periods_sent) + 1) % runtime->periods; |
587 | } | 619 | } |
588 | } | 620 | } |
589 | #endif | ||
590 | |||
591 | #ifdef SBUS_SUPPORT | ||
592 | static void snd_cs4231_sbus_advance_dma(snd_pcm_substream_t *substream, unsigned int *periods_sent) | ||
593 | { | ||
594 | cs4231_t *chip = snd_pcm_substream_chip(substream); | ||
595 | snd_pcm_runtime_t *runtime = substream->runtime; | ||
596 | |||
597 | unsigned int period_size = snd_pcm_lib_period_bytes(substream); | ||
598 | unsigned int offset = period_size * (*periods_sent % runtime->periods); | ||
599 | |||
600 | if (runtime->period_size > 0xffff + 1) | ||
601 | BUG(); | ||
602 | |||
603 | switch (substream->stream) { | ||
604 | case SNDRV_PCM_STREAM_PLAYBACK: | ||
605 | sbus_writel(runtime->dma_addr + offset, chip->port + APCPNVA); | ||
606 | sbus_writel(period_size, chip->port + APCPNC); | ||
607 | break; | ||
608 | case SNDRV_PCM_STREAM_CAPTURE: | ||
609 | sbus_writel(runtime->dma_addr + offset, chip->port + APCCNVA); | ||
610 | sbus_writel(period_size, chip->port + APCCNC); | ||
611 | break; | ||
612 | } | ||
613 | |||
614 | (*periods_sent) = (*periods_sent + 1) % runtime->periods; | ||
615 | } | ||
616 | #endif | ||
617 | 621 | ||
618 | static void cs4231_dma_trigger(snd_pcm_substream_t *substream, unsigned int what, int on) | 622 | static void cs4231_dma_trigger(snd_pcm_substream_t *substream, unsigned int what, int on) |
619 | { | 623 | { |
620 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 624 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
625 | cs4231_dma_control_t *dma_cont; | ||
621 | 626 | ||
622 | #ifdef EBUS_SUPPORT | 627 | if (what & CS4231_PLAYBACK_ENABLE) { |
623 | if (chip->flags & CS4231_FLAG_EBUS) { | 628 | dma_cont = &chip->p_dma; |
624 | if (what & CS4231_PLAYBACK_ENABLE) { | ||
625 | if (on) { | ||
626 | ebus_dma_prepare(&chip->eb2p, 0); | ||
627 | ebus_dma_enable(&chip->eb2p, 1); | ||
628 | snd_cs4231_ebus_advance_dma(&chip->eb2p, | ||
629 | chip->playback_substream, | ||
630 | &chip->p_periods_sent); | ||
631 | } else { | ||
632 | ebus_dma_enable(&chip->eb2p, 0); | ||
633 | } | ||
634 | } | ||
635 | if (what & CS4231_RECORD_ENABLE) { | ||
636 | if (on) { | ||
637 | ebus_dma_prepare(&chip->eb2c, 1); | ||
638 | ebus_dma_enable(&chip->eb2c, 1); | ||
639 | snd_cs4231_ebus_advance_dma(&chip->eb2c, | ||
640 | chip->capture_substream, | ||
641 | &chip->c_periods_sent); | ||
642 | } else { | ||
643 | ebus_dma_enable(&chip->eb2c, 0); | ||
644 | } | ||
645 | } | ||
646 | } else { | ||
647 | #endif | ||
648 | #ifdef SBUS_SUPPORT | ||
649 | u32 csr = sbus_readl(chip->port + APCCSR); | ||
650 | /* I don't know why, but on sbus the period counter must | ||
651 | * only start counting after the first period is sent. | ||
652 | * Therefore this dummy thing. | ||
653 | */ | ||
654 | unsigned int dummy = 0; | ||
655 | |||
656 | switch (what) { | ||
657 | case CS4231_PLAYBACK_ENABLE: | ||
658 | if (on) { | 629 | if (on) { |
659 | csr &= ~APC_XINT_PLAY; | 630 | dma_cont->prepare(dma_cont, 0); |
660 | sbus_writel(csr, chip->port + APCCSR); | 631 | dma_cont->enable(dma_cont, 1); |
661 | 632 | snd_cs4231_advance_dma(dma_cont, | |
662 | csr &= ~APC_PPAUSE; | 633 | chip->playback_substream, |
663 | sbus_writel(csr, chip->port + APCCSR); | 634 | &chip->p_periods_sent); |
664 | |||
665 | snd_cs4231_sbus_advance_dma(substream, &dummy); | ||
666 | |||
667 | csr |= APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA | | ||
668 | APC_XINT_PLAY | APC_XINT_EMPT | APC_XINT_GENL | | ||
669 | APC_XINT_PENA | APC_PDMA_READY; | ||
670 | sbus_writel(csr, chip->port + APCCSR); | ||
671 | } else { | 635 | } else { |
672 | csr |= APC_PPAUSE; | 636 | dma_cont->enable(dma_cont, 0); |
673 | sbus_writel(csr, chip->port + APCCSR); | ||
674 | |||
675 | csr &= ~APC_PDMA_READY; | ||
676 | sbus_writel(csr, chip->port + APCCSR); | ||
677 | } | 637 | } |
678 | break; | 638 | } |
679 | case CS4231_RECORD_ENABLE: | 639 | if (what & CS4231_RECORD_ENABLE) { |
640 | dma_cont = &chip->c_dma; | ||
680 | if (on) { | 641 | if (on) { |
681 | csr &= ~APC_XINT_CAPT; | 642 | dma_cont->prepare(dma_cont, 1); |
682 | sbus_writel(csr, chip->port + APCCSR); | 643 | dma_cont->enable(dma_cont, 1); |
683 | 644 | snd_cs4231_advance_dma(dma_cont, | |
684 | csr &= ~APC_CPAUSE; | 645 | chip->capture_substream, |
685 | sbus_writel(csr, chip->port + APCCSR); | 646 | &chip->c_periods_sent); |
686 | |||
687 | snd_cs4231_sbus_advance_dma(substream, &dummy); | ||
688 | |||
689 | csr |= APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA | | ||
690 | APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL | | ||
691 | APC_CDMA_READY; | ||
692 | |||
693 | sbus_writel(csr, chip->port + APCCSR); | ||
694 | } else { | 647 | } else { |
695 | csr |= APC_CPAUSE; | 648 | dma_cont->enable(dma_cont, 0); |
696 | sbus_writel(csr, chip->port + APCCSR); | ||
697 | |||
698 | csr &= ~APC_CDMA_READY; | ||
699 | sbus_writel(csr, chip->port + APCCSR); | ||
700 | } | 649 | } |
701 | break; | ||
702 | } | ||
703 | #endif | ||
704 | #ifdef EBUS_SUPPORT | ||
705 | } | 650 | } |
706 | #endif | ||
707 | } | 651 | } |
708 | 652 | ||
709 | static int snd_cs4231_trigger(snd_pcm_substream_t *substream, int cmd) | 653 | static int snd_cs4231_trigger(snd_pcm_substream_t *substream, int cmd) |
@@ -1136,10 +1080,7 @@ static int snd_cs4231_playback_prepare(snd_pcm_substream_t *substream) | |||
1136 | if (runtime->period_size > 0xffff + 1) | 1080 | if (runtime->period_size > 0xffff + 1) |
1137 | BUG(); | 1081 | BUG(); |
1138 | 1082 | ||
1139 | snd_cs4231_out(chip, CS4231_PLY_LWR_CNT, (runtime->period_size - 1) & 0x00ff); | ||
1140 | snd_cs4231_out(chip, CS4231_PLY_UPR_CNT, (runtime->period_size - 1) >> 8 & 0x00ff); | ||
1141 | chip->p_periods_sent = 0; | 1083 | chip->p_periods_sent = 0; |
1142 | |||
1143 | spin_unlock_irqrestore(&chip->lock, flags); | 1084 | spin_unlock_irqrestore(&chip->lock, flags); |
1144 | 1085 | ||
1145 | return 0; | 1086 | return 0; |
@@ -1171,16 +1112,14 @@ static int snd_cs4231_capture_hw_free(snd_pcm_substream_t *substream) | |||
1171 | static int snd_cs4231_capture_prepare(snd_pcm_substream_t *substream) | 1112 | static int snd_cs4231_capture_prepare(snd_pcm_substream_t *substream) |
1172 | { | 1113 | { |
1173 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 1114 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
1174 | snd_pcm_runtime_t *runtime = substream->runtime; | ||
1175 | unsigned long flags; | 1115 | unsigned long flags; |
1176 | 1116 | ||
1177 | spin_lock_irqsave(&chip->lock, flags); | 1117 | spin_lock_irqsave(&chip->lock, flags); |
1178 | chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE | | 1118 | chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE | |
1179 | CS4231_RECORD_PIO); | 1119 | CS4231_RECORD_PIO); |
1180 | 1120 | ||
1181 | snd_cs4231_out(chip, CS4231_REC_LWR_CNT, (runtime->period_size - 1) & 0x00ff); | ||
1182 | snd_cs4231_out(chip, CS4231_REC_LWR_CNT, (runtime->period_size - 1) >> 8 & 0x00ff); | ||
1183 | 1121 | ||
1122 | chip->c_periods_sent = 0; | ||
1184 | spin_unlock_irqrestore(&chip->lock, flags); | 1123 | spin_unlock_irqrestore(&chip->lock, flags); |
1185 | 1124 | ||
1186 | return 0; | 1125 | return 0; |
@@ -1199,134 +1138,55 @@ static void snd_cs4231_overrange(cs4231_t *chip) | |||
1199 | chip->capture_substream->runtime->overrange++; | 1138 | chip->capture_substream->runtime->overrange++; |
1200 | } | 1139 | } |
1201 | 1140 | ||
1202 | static irqreturn_t snd_cs4231_generic_interrupt(cs4231_t *chip) | 1141 | static void snd_cs4231_play_callback(cs4231_t *cookie) |
1203 | { | ||
1204 | unsigned long flags; | ||
1205 | unsigned char status; | ||
1206 | |||
1207 | /*This is IRQ is not raised by the cs4231*/ | ||
1208 | if (!(__cs4231_readb(chip, CS4231P(chip, STATUS)) & CS4231_GLOBALIRQ)) | ||
1209 | return IRQ_NONE; | ||
1210 | |||
1211 | status = snd_cs4231_in(chip, CS4231_IRQ_STATUS); | ||
1212 | |||
1213 | if (status & CS4231_TIMER_IRQ) { | ||
1214 | if (chip->timer) | ||
1215 | snd_timer_interrupt(chip->timer, chip->timer->sticks); | ||
1216 | } | ||
1217 | |||
1218 | if (status & CS4231_RECORD_IRQ) | ||
1219 | snd_cs4231_overrange(chip); | ||
1220 | |||
1221 | /* ACK the CS4231 interrupt. */ | ||
1222 | spin_lock_irqsave(&chip->lock, flags); | ||
1223 | snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0); | ||
1224 | spin_unlock_irqrestore(&chip->lock, flags); | ||
1225 | |||
1226 | return 0; | ||
1227 | } | ||
1228 | |||
1229 | #ifdef SBUS_SUPPORT | ||
1230 | static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
1231 | { | ||
1232 | cs4231_t *chip = dev_id; | ||
1233 | |||
1234 | /* ACK the APC interrupt. */ | ||
1235 | u32 csr = sbus_readl(chip->port + APCCSR); | ||
1236 | |||
1237 | sbus_writel(csr, chip->port + APCCSR); | ||
1238 | |||
1239 | if ((chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) && | ||
1240 | (csr & APC_PLAY_INT) && | ||
1241 | (csr & APC_XINT_PNVA) && | ||
1242 | !(csr & APC_XINT_EMPT)) { | ||
1243 | snd_cs4231_sbus_advance_dma(chip->playback_substream, | ||
1244 | &chip->p_periods_sent); | ||
1245 | snd_pcm_period_elapsed(chip->playback_substream); | ||
1246 | } | ||
1247 | |||
1248 | if ((chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) && | ||
1249 | (csr & APC_CAPT_INT) && | ||
1250 | (csr & APC_XINT_CNVA)) { | ||
1251 | snd_cs4231_sbus_advance_dma(chip->capture_substream, | ||
1252 | &chip->c_periods_sent); | ||
1253 | snd_pcm_period_elapsed(chip->capture_substream); | ||
1254 | } | ||
1255 | |||
1256 | return snd_cs4231_generic_interrupt(chip); | ||
1257 | } | ||
1258 | #endif | ||
1259 | |||
1260 | #ifdef EBUS_SUPPORT | ||
1261 | static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, void *cookie) | ||
1262 | { | 1142 | { |
1263 | cs4231_t *chip = cookie; | 1143 | cs4231_t *chip = cookie; |
1264 | 1144 | ||
1265 | if (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) { | 1145 | if (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) { |
1266 | snd_pcm_period_elapsed(chip->playback_substream); | 1146 | snd_pcm_period_elapsed(chip->playback_substream); |
1267 | snd_cs4231_ebus_advance_dma(p, chip->playback_substream, | 1147 | snd_cs4231_advance_dma(&chip->p_dma, chip->playback_substream, |
1268 | &chip->p_periods_sent); | 1148 | &chip->p_periods_sent); |
1269 | } | 1149 | } |
1270 | } | 1150 | } |
1271 | 1151 | ||
1272 | static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p, int event, void *cookie) | 1152 | static void snd_cs4231_capture_callback(cs4231_t *cookie) |
1273 | { | 1153 | { |
1274 | cs4231_t *chip = cookie; | 1154 | cs4231_t *chip = cookie; |
1275 | 1155 | ||
1276 | if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) { | 1156 | if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) { |
1277 | snd_pcm_period_elapsed(chip->capture_substream); | 1157 | snd_pcm_period_elapsed(chip->capture_substream); |
1278 | snd_cs4231_ebus_advance_dma(p, chip->capture_substream, | 1158 | snd_cs4231_advance_dma(&chip->c_dma, chip->capture_substream, |
1279 | &chip->c_periods_sent); | 1159 | &chip->c_periods_sent); |
1280 | } | 1160 | } |
1281 | } | 1161 | } |
1282 | #endif | ||
1283 | 1162 | ||
1284 | static snd_pcm_uframes_t snd_cs4231_playback_pointer(snd_pcm_substream_t *substream) | 1163 | static snd_pcm_uframes_t snd_cs4231_playback_pointer(snd_pcm_substream_t *substream) |
1285 | { | 1164 | { |
1286 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 1165 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
1287 | size_t ptr, residue, period_bytes; | 1166 | cs4231_dma_control_t *dma_cont = &chip->p_dma; |
1288 | 1167 | size_t ptr; | |
1168 | |||
1289 | if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) | 1169 | if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) |
1290 | return 0; | 1170 | return 0; |
1291 | period_bytes = snd_pcm_lib_period_bytes(substream); | 1171 | ptr = dma_cont->address(dma_cont); |
1292 | ptr = period_bytes * chip->p_periods_sent; | 1172 | if (ptr != 0) |
1293 | #ifdef EBUS_SUPPORT | 1173 | ptr -= substream->runtime->dma_addr; |
1294 | if (chip->flags & CS4231_FLAG_EBUS) { | 1174 | |
1295 | residue = ebus_dma_residue(&chip->eb2p); | ||
1296 | } else { | ||
1297 | #endif | ||
1298 | #ifdef SBUS_SUPPORT | ||
1299 | residue = sbus_readl(chip->port + APCPC); | ||
1300 | #endif | ||
1301 | #ifdef EBUS_SUPPORT | ||
1302 | } | ||
1303 | #endif | ||
1304 | ptr += period_bytes - residue; | ||
1305 | |||
1306 | return bytes_to_frames(substream->runtime, ptr); | 1175 | return bytes_to_frames(substream->runtime, ptr); |
1307 | } | 1176 | } |
1308 | 1177 | ||
1309 | static snd_pcm_uframes_t snd_cs4231_capture_pointer(snd_pcm_substream_t * substream) | 1178 | static snd_pcm_uframes_t snd_cs4231_capture_pointer(snd_pcm_substream_t * substream) |
1310 | { | 1179 | { |
1311 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 1180 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
1312 | size_t ptr, residue, period_bytes; | 1181 | cs4231_dma_control_t *dma_cont = &chip->c_dma; |
1182 | size_t ptr; | ||
1313 | 1183 | ||
1314 | if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE)) | 1184 | if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE)) |
1315 | return 0; | 1185 | return 0; |
1316 | period_bytes = snd_pcm_lib_period_bytes(substream); | 1186 | ptr = dma_cont->address(dma_cont); |
1317 | ptr = period_bytes * chip->c_periods_sent; | 1187 | if (ptr != 0) |
1318 | #ifdef EBUS_SUPPORT | 1188 | ptr -= substream->runtime->dma_addr; |
1319 | if (chip->flags & CS4231_FLAG_EBUS) { | 1189 | |
1320 | residue = ebus_dma_residue(&chip->eb2c); | ||
1321 | } else { | ||
1322 | #endif | ||
1323 | #ifdef SBUS_SUPPORT | ||
1324 | residue = sbus_readl(chip->port + APCCC); | ||
1325 | #endif | ||
1326 | #ifdef EBUS_SUPPORT | ||
1327 | } | ||
1328 | #endif | ||
1329 | ptr += period_bytes - residue; | ||
1330 | return bytes_to_frames(substream->runtime, ptr); | 1190 | return bytes_to_frames(substream->runtime, ptr); |
1331 | } | 1191 | } |
1332 | 1192 | ||
@@ -1362,30 +1222,8 @@ static int snd_cs4231_probe(cs4231_t *chip) | |||
1362 | spin_lock_irqsave(&chip->lock, flags); | 1222 | spin_lock_irqsave(&chip->lock, flags); |
1363 | 1223 | ||
1364 | 1224 | ||
1365 | /* Reset DMA engine. */ | 1225 | /* Reset DMA engine (sbus only). */ |
1366 | #ifdef EBUS_SUPPORT | 1226 | chip->p_dma.reset(chip); |
1367 | if (chip->flags & CS4231_FLAG_EBUS) { | ||
1368 | /* Done by ebus_dma_register */ | ||
1369 | } else { | ||
1370 | #endif | ||
1371 | #ifdef SBUS_SUPPORT | ||
1372 | sbus_writel(APC_CHIP_RESET, chip->port + APCCSR); | ||
1373 | sbus_writel(0x00, chip->port + APCCSR); | ||
1374 | sbus_writel(sbus_readl(chip->port + APCCSR) | APC_CDC_RESET, | ||
1375 | chip->port + APCCSR); | ||
1376 | |||
1377 | udelay(20); | ||
1378 | |||
1379 | sbus_writel(sbus_readl(chip->port + APCCSR) & ~APC_CDC_RESET, | ||
1380 | chip->port + APCCSR); | ||
1381 | sbus_writel(sbus_readl(chip->port + APCCSR) | (APC_XINT_ENA | | ||
1382 | APC_XINT_PENA | | ||
1383 | APC_XINT_CENA), | ||
1384 | chip->port + APCCSR); | ||
1385 | #endif | ||
1386 | #ifdef EBUS_SUPPORT | ||
1387 | } | ||
1388 | #endif | ||
1389 | 1227 | ||
1390 | __cs4231_readb(chip, CS4231P(chip, STATUS)); /* clear any pendings IRQ */ | 1228 | __cs4231_readb(chip, CS4231P(chip, STATUS)); /* clear any pendings IRQ */ |
1391 | __cs4231_writeb(chip, 0, CS4231P(chip, STATUS)); | 1229 | __cs4231_writeb(chip, 0, CS4231P(chip, STATUS)); |
@@ -1505,8 +1343,8 @@ static int snd_cs4231_playback_close(snd_pcm_substream_t *substream) | |||
1505 | { | 1343 | { |
1506 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 1344 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
1507 | 1345 | ||
1508 | chip->playback_substream = NULL; | ||
1509 | snd_cs4231_close(chip, CS4231_MODE_PLAY); | 1346 | snd_cs4231_close(chip, CS4231_MODE_PLAY); |
1347 | chip->playback_substream = NULL; | ||
1510 | 1348 | ||
1511 | return 0; | 1349 | return 0; |
1512 | } | 1350 | } |
@@ -1515,8 +1353,8 @@ static int snd_cs4231_capture_close(snd_pcm_substream_t *substream) | |||
1515 | { | 1353 | { |
1516 | cs4231_t *chip = snd_pcm_substream_chip(substream); | 1354 | cs4231_t *chip = snd_pcm_substream_chip(substream); |
1517 | 1355 | ||
1518 | chip->capture_substream = NULL; | ||
1519 | snd_cs4231_close(chip, CS4231_MODE_RECORD); | 1356 | snd_cs4231_close(chip, CS4231_MODE_RECORD); |
1357 | chip->capture_substream = NULL; | ||
1520 | 1358 | ||
1521 | return 0; | 1359 | return 0; |
1522 | } | 1360 | } |
@@ -1571,21 +1409,7 @@ int snd_cs4231_pcm(cs4231_t *chip) | |||
1571 | pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; | 1409 | pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; |
1572 | strcpy(pcm->name, "CS4231"); | 1410 | strcpy(pcm->name, "CS4231"); |
1573 | 1411 | ||
1574 | #ifdef EBUS_SUPPORT | 1412 | chip->p_dma.preallocate(chip, pcm); |
1575 | if (chip->flags & CS4231_FLAG_EBUS) { | ||
1576 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | ||
1577 | snd_dma_pci_data(chip->dev_u.pdev), | ||
1578 | 64*1024, 128*1024); | ||
1579 | } else { | ||
1580 | #endif | ||
1581 | #ifdef SBUS_SUPPORT | ||
1582 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS, | ||
1583 | snd_dma_sbus_data(chip->dev_u.sdev), | ||
1584 | 64*1024, 128*1024); | ||
1585 | #endif | ||
1586 | #ifdef EBUS_SUPPORT | ||
1587 | } | ||
1588 | #endif | ||
1589 | 1413 | ||
1590 | chip->pcm = pcm; | 1414 | chip->pcm = pcm; |
1591 | 1415 | ||
@@ -1942,6 +1766,180 @@ out_err: | |||
1942 | } | 1766 | } |
1943 | 1767 | ||
1944 | #ifdef SBUS_SUPPORT | 1768 | #ifdef SBUS_SUPPORT |
1769 | |||
1770 | static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
1771 | { | ||
1772 | unsigned long flags; | ||
1773 | unsigned char status; | ||
1774 | u32 csr; | ||
1775 | cs4231_t *chip = dev_id; | ||
1776 | |||
1777 | /*This is IRQ is not raised by the cs4231*/ | ||
1778 | if (!(__cs4231_readb(chip, CS4231P(chip, STATUS)) & CS4231_GLOBALIRQ)) | ||
1779 | return IRQ_NONE; | ||
1780 | |||
1781 | /* ACK the APC interrupt. */ | ||
1782 | csr = sbus_readl(chip->port + APCCSR); | ||
1783 | |||
1784 | sbus_writel(csr, chip->port + APCCSR); | ||
1785 | |||
1786 | if ((csr & APC_PDMA_READY) && | ||
1787 | (csr & APC_PLAY_INT) && | ||
1788 | (csr & APC_XINT_PNVA) && | ||
1789 | !(csr & APC_XINT_EMPT)) | ||
1790 | snd_cs4231_play_callback(chip); | ||
1791 | |||
1792 | if ((csr & APC_CDMA_READY) && | ||
1793 | (csr & APC_CAPT_INT) && | ||
1794 | (csr & APC_XINT_CNVA) && | ||
1795 | !(csr & APC_XINT_EMPT)) | ||
1796 | snd_cs4231_capture_callback(chip); | ||
1797 | |||
1798 | status = snd_cs4231_in(chip, CS4231_IRQ_STATUS); | ||
1799 | |||
1800 | if (status & CS4231_TIMER_IRQ) { | ||
1801 | if (chip->timer) | ||
1802 | snd_timer_interrupt(chip->timer, chip->timer->sticks); | ||
1803 | } | ||
1804 | |||
1805 | if ((status & CS4231_RECORD_IRQ) && (csr & APC_CDMA_READY)) | ||
1806 | snd_cs4231_overrange(chip); | ||
1807 | |||
1808 | /* ACK the CS4231 interrupt. */ | ||
1809 | spin_lock_irqsave(&chip->lock, flags); | ||
1810 | snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0); | ||
1811 | spin_unlock_irqrestore(&chip->lock, flags); | ||
1812 | |||
1813 | return 0; | ||
1814 | } | ||
1815 | |||
1816 | /* | ||
1817 | * SBUS DMA routines | ||
1818 | */ | ||
1819 | |||
1820 | int sbus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len) | ||
1821 | { | ||
1822 | unsigned long flags; | ||
1823 | u32 test, csr; | ||
1824 | int err; | ||
1825 | sbus_dma_info_t *base = &dma_cont->sbus_info; | ||
1826 | |||
1827 | if (len >= (1 << 24)) | ||
1828 | return -EINVAL; | ||
1829 | spin_lock_irqsave(&base->lock, flags); | ||
1830 | csr = sbus_readl(base->regs + APCCSR); | ||
1831 | err = -EINVAL; | ||
1832 | test = APC_CDMA_READY; | ||
1833 | if ( base->dir == APC_PLAY ) | ||
1834 | test = APC_PDMA_READY; | ||
1835 | if (!(csr & test)) | ||
1836 | goto out; | ||
1837 | err = -EBUSY; | ||
1838 | csr = sbus_readl(base->regs + APCCSR); | ||
1839 | test = APC_XINT_CNVA; | ||
1840 | if ( base->dir == APC_PLAY ) | ||
1841 | test = APC_XINT_PNVA; | ||
1842 | if (!(csr & test)) | ||
1843 | goto out; | ||
1844 | err = 0; | ||
1845 | sbus_writel(bus_addr, base->regs + base->dir + APCNVA); | ||
1846 | sbus_writel(len, base->regs + base->dir + APCNC); | ||
1847 | out: | ||
1848 | spin_unlock_irqrestore(&base->lock, flags); | ||
1849 | return err; | ||
1850 | } | ||
1851 | |||
1852 | void sbus_dma_prepare(struct cs4231_dma_control *dma_cont, int d) | ||
1853 | { | ||
1854 | unsigned long flags; | ||
1855 | u32 csr, test; | ||
1856 | sbus_dma_info_t *base = &dma_cont->sbus_info; | ||
1857 | |||
1858 | spin_lock_irqsave(&base->lock, flags); | ||
1859 | csr = sbus_readl(base->regs + APCCSR); | ||
1860 | test = APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA | | ||
1861 | APC_XINT_PLAY | APC_XINT_PEMP | APC_XINT_GENL | | ||
1862 | APC_XINT_PENA; | ||
1863 | if ( base->dir == APC_RECORD ) | ||
1864 | test = APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA | | ||
1865 | APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL; | ||
1866 | csr |= test; | ||
1867 | sbus_writel(csr, base->regs + APCCSR); | ||
1868 | spin_unlock_irqrestore(&base->lock, flags); | ||
1869 | } | ||
1870 | |||
1871 | void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on) | ||
1872 | { | ||
1873 | unsigned long flags; | ||
1874 | u32 csr, shift; | ||
1875 | sbus_dma_info_t *base = &dma_cont->sbus_info; | ||
1876 | |||
1877 | spin_lock_irqsave(&base->lock, flags); | ||
1878 | if (!on) { | ||
1879 | if (base->dir == APC_PLAY) { | ||
1880 | sbus_writel(0, base->regs + base->dir + APCNVA); | ||
1881 | sbus_writel(1, base->regs + base->dir + APCC); | ||
1882 | } | ||
1883 | else | ||
1884 | { | ||
1885 | sbus_writel(0, base->regs + base->dir + APCNC); | ||
1886 | sbus_writel(0, base->regs + base->dir + APCVA); | ||
1887 | } | ||
1888 | } | ||
1889 | udelay(600); | ||
1890 | csr = sbus_readl(base->regs + APCCSR); | ||
1891 | shift = 0; | ||
1892 | if ( base->dir == APC_PLAY ) | ||
1893 | shift = 1; | ||
1894 | if (on) | ||
1895 | csr &= ~(APC_CPAUSE << shift); | ||
1896 | else | ||
1897 | csr |= (APC_CPAUSE << shift); | ||
1898 | sbus_writel(csr, base->regs + APCCSR); | ||
1899 | if (on) | ||
1900 | csr |= (APC_CDMA_READY << shift); | ||
1901 | else | ||
1902 | csr &= ~(APC_CDMA_READY << shift); | ||
1903 | sbus_writel(csr, base->regs + APCCSR); | ||
1904 | |||
1905 | spin_unlock_irqrestore(&base->lock, flags); | ||
1906 | } | ||
1907 | |||
1908 | unsigned int sbus_dma_addr(struct cs4231_dma_control *dma_cont) | ||
1909 | { | ||
1910 | sbus_dma_info_t *base = &dma_cont->sbus_info; | ||
1911 | |||
1912 | return sbus_readl(base->regs + base->dir + APCVA); | ||
1913 | } | ||
1914 | |||
1915 | void sbus_dma_reset(cs4231_t *chip) | ||
1916 | { | ||
1917 | sbus_writel(APC_CHIP_RESET, chip->port + APCCSR); | ||
1918 | sbus_writel(0x00, chip->port + APCCSR); | ||
1919 | sbus_writel(sbus_readl(chip->port + APCCSR) | APC_CDC_RESET, | ||
1920 | chip->port + APCCSR); | ||
1921 | |||
1922 | udelay(20); | ||
1923 | |||
1924 | sbus_writel(sbus_readl(chip->port + APCCSR) & ~APC_CDC_RESET, | ||
1925 | chip->port + APCCSR); | ||
1926 | sbus_writel(sbus_readl(chip->port + APCCSR) | (APC_XINT_ENA | | ||
1927 | APC_XINT_PENA | | ||
1928 | APC_XINT_CENA), | ||
1929 | chip->port + APCCSR); | ||
1930 | } | ||
1931 | |||
1932 | void sbus_dma_preallocate(cs4231_t *chip, snd_pcm_t *pcm) | ||
1933 | { | ||
1934 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS, | ||
1935 | snd_dma_sbus_data(chip->dev_u.sdev), | ||
1936 | 64*1024, 128*1024); | ||
1937 | } | ||
1938 | |||
1939 | /* | ||
1940 | * Init and exit routines | ||
1941 | */ | ||
1942 | |||
1945 | static int snd_cs4231_sbus_free(cs4231_t *chip) | 1943 | static int snd_cs4231_sbus_free(cs4231_t *chip) |
1946 | { | 1944 | { |
1947 | if (chip->irq[0]) | 1945 | if (chip->irq[0]) |
@@ -1983,6 +1981,8 @@ static int __init snd_cs4231_sbus_create(snd_card_t *card, | |||
1983 | return -ENOMEM; | 1981 | return -ENOMEM; |
1984 | 1982 | ||
1985 | spin_lock_init(&chip->lock); | 1983 | spin_lock_init(&chip->lock); |
1984 | spin_lock_init(&chip->c_dma.sbus_info.lock); | ||
1985 | spin_lock_init(&chip->p_dma.sbus_info.lock); | ||
1986 | init_MUTEX(&chip->mce_mutex); | 1986 | init_MUTEX(&chip->mce_mutex); |
1987 | init_MUTEX(&chip->open_mutex); | 1987 | init_MUTEX(&chip->open_mutex); |
1988 | chip->card = card; | 1988 | chip->card = card; |
@@ -1998,6 +1998,25 @@ static int __init snd_cs4231_sbus_create(snd_card_t *card, | |||
1998 | return -EIO; | 1998 | return -EIO; |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | chip->c_dma.sbus_info.regs = chip->port; | ||
2002 | chip->p_dma.sbus_info.regs = chip->port; | ||
2003 | chip->c_dma.sbus_info.dir = APC_RECORD; | ||
2004 | chip->p_dma.sbus_info.dir = APC_PLAY; | ||
2005 | |||
2006 | chip->p_dma.prepare = sbus_dma_prepare; | ||
2007 | chip->p_dma.enable = sbus_dma_enable; | ||
2008 | chip->p_dma.request = sbus_dma_request; | ||
2009 | chip->p_dma.address = sbus_dma_addr; | ||
2010 | chip->p_dma.reset = sbus_dma_reset; | ||
2011 | chip->p_dma.preallocate = sbus_dma_preallocate; | ||
2012 | |||
2013 | chip->c_dma.prepare = sbus_dma_prepare; | ||
2014 | chip->c_dma.enable = sbus_dma_enable; | ||
2015 | chip->c_dma.request = sbus_dma_request; | ||
2016 | chip->c_dma.address = sbus_dma_addr; | ||
2017 | chip->c_dma.reset = sbus_dma_reset; | ||
2018 | chip->c_dma.preallocate = sbus_dma_preallocate; | ||
2019 | |||
2001 | if (request_irq(sdev->irqs[0], snd_cs4231_sbus_interrupt, | 2020 | if (request_irq(sdev->irqs[0], snd_cs4231_sbus_interrupt, |
2002 | SA_SHIRQ, "cs4231", chip)) { | 2021 | SA_SHIRQ, "cs4231", chip)) { |
2003 | snd_printdd("cs4231-%d: Unable to grab SBUS IRQ %s\n", | 2022 | snd_printdd("cs4231-%d: Unable to grab SBUS IRQ %s\n", |
@@ -2051,15 +2070,70 @@ static int cs4231_sbus_attach(struct sbus_dev *sdev) | |||
2051 | #endif | 2070 | #endif |
2052 | 2071 | ||
2053 | #ifdef EBUS_SUPPORT | 2072 | #ifdef EBUS_SUPPORT |
2073 | |||
2074 | static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, void *cookie) | ||
2075 | { | ||
2076 | cs4231_t *chip = cookie; | ||
2077 | |||
2078 | snd_cs4231_play_callback(chip); | ||
2079 | } | ||
2080 | |||
2081 | static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p, int event, void *cookie) | ||
2082 | { | ||
2083 | cs4231_t *chip = cookie; | ||
2084 | |||
2085 | snd_cs4231_capture_callback(chip); | ||
2086 | } | ||
2087 | |||
2088 | /* | ||
2089 | * EBUS DMA wrappers | ||
2090 | */ | ||
2091 | |||
2092 | int _ebus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len) | ||
2093 | { | ||
2094 | return ebus_dma_request(&dma_cont->ebus_info, bus_addr, len); | ||
2095 | } | ||
2096 | |||
2097 | void _ebus_dma_enable(struct cs4231_dma_control *dma_cont, int on) | ||
2098 | { | ||
2099 | ebus_dma_enable(&dma_cont->ebus_info, on); | ||
2100 | } | ||
2101 | |||
2102 | void _ebus_dma_prepare(struct cs4231_dma_control *dma_cont, int dir) | ||
2103 | { | ||
2104 | ebus_dma_prepare(&dma_cont->ebus_info, dir); | ||
2105 | } | ||
2106 | |||
2107 | unsigned int _ebus_dma_addr(struct cs4231_dma_control *dma_cont) | ||
2108 | { | ||
2109 | return ebus_dma_addr(&dma_cont->ebus_info); | ||
2110 | } | ||
2111 | |||
2112 | void _ebus_dma_reset(cs4231_t *chip) | ||
2113 | { | ||
2114 | return; | ||
2115 | } | ||
2116 | |||
2117 | void _ebus_dma_preallocate(cs4231_t *chip, snd_pcm_t *pcm) | ||
2118 | { | ||
2119 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | ||
2120 | snd_dma_pci_data(chip->dev_u.pdev), | ||
2121 | 64*1024, 128*1024); | ||
2122 | } | ||
2123 | |||
2124 | /* | ||
2125 | * Init and exit routines | ||
2126 | */ | ||
2127 | |||
2054 | static int snd_cs4231_ebus_free(cs4231_t *chip) | 2128 | static int snd_cs4231_ebus_free(cs4231_t *chip) |
2055 | { | 2129 | { |
2056 | if (chip->eb2c.regs) { | 2130 | if (chip->c_dma.ebus_info.regs) { |
2057 | ebus_dma_unregister(&chip->eb2c); | 2131 | ebus_dma_unregister(&chip->c_dma.ebus_info); |
2058 | iounmap(chip->eb2c.regs); | 2132 | iounmap(chip->c_dma.ebus_info.regs); |
2059 | } | 2133 | } |
2060 | if (chip->eb2p.regs) { | 2134 | if (chip->p_dma.ebus_info.regs) { |
2061 | ebus_dma_unregister(&chip->eb2p); | 2135 | ebus_dma_unregister(&chip->p_dma.ebus_info); |
2062 | iounmap(chip->eb2p.regs); | 2136 | iounmap(chip->p_dma.ebus_info.regs); |
2063 | } | 2137 | } |
2064 | 2138 | ||
2065 | if (chip->port) | 2139 | if (chip->port) |
@@ -2097,8 +2171,8 @@ static int __init snd_cs4231_ebus_create(snd_card_t *card, | |||
2097 | return -ENOMEM; | 2171 | return -ENOMEM; |
2098 | 2172 | ||
2099 | spin_lock_init(&chip->lock); | 2173 | spin_lock_init(&chip->lock); |
2100 | spin_lock_init(&chip->eb2c.lock); | 2174 | spin_lock_init(&chip->c_dma.ebus_info.lock); |
2101 | spin_lock_init(&chip->eb2p.lock); | 2175 | spin_lock_init(&chip->p_dma.ebus_info.lock); |
2102 | init_MUTEX(&chip->mce_mutex); | 2176 | init_MUTEX(&chip->mce_mutex); |
2103 | init_MUTEX(&chip->open_mutex); | 2177 | init_MUTEX(&chip->open_mutex); |
2104 | chip->flags |= CS4231_FLAG_EBUS; | 2178 | chip->flags |= CS4231_FLAG_EBUS; |
@@ -2106,43 +2180,57 @@ static int __init snd_cs4231_ebus_create(snd_card_t *card, | |||
2106 | chip->dev_u.pdev = edev->bus->self; | 2180 | chip->dev_u.pdev = edev->bus->self; |
2107 | memcpy(&chip->image, &snd_cs4231_original_image, | 2181 | memcpy(&chip->image, &snd_cs4231_original_image, |
2108 | sizeof(snd_cs4231_original_image)); | 2182 | sizeof(snd_cs4231_original_image)); |
2109 | strcpy(chip->eb2c.name, "cs4231(capture)"); | 2183 | strcpy(chip->c_dma.ebus_info.name, "cs4231(capture)"); |
2110 | chip->eb2c.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; | 2184 | chip->c_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; |
2111 | chip->eb2c.callback = snd_cs4231_ebus_capture_callback; | 2185 | chip->c_dma.ebus_info.callback = snd_cs4231_ebus_capture_callback; |
2112 | chip->eb2c.client_cookie = chip; | 2186 | chip->c_dma.ebus_info.client_cookie = chip; |
2113 | chip->eb2c.irq = edev->irqs[0]; | 2187 | chip->c_dma.ebus_info.irq = edev->irqs[0]; |
2114 | strcpy(chip->eb2p.name, "cs4231(play)"); | 2188 | strcpy(chip->p_dma.ebus_info.name, "cs4231(play)"); |
2115 | chip->eb2p.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; | 2189 | chip->p_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; |
2116 | chip->eb2p.callback = snd_cs4231_ebus_play_callback; | 2190 | chip->p_dma.ebus_info.callback = snd_cs4231_ebus_play_callback; |
2117 | chip->eb2p.client_cookie = chip; | 2191 | chip->p_dma.ebus_info.client_cookie = chip; |
2118 | chip->eb2p.irq = edev->irqs[1]; | 2192 | chip->p_dma.ebus_info.irq = edev->irqs[1]; |
2193 | |||
2194 | chip->p_dma.prepare = _ebus_dma_prepare; | ||
2195 | chip->p_dma.enable = _ebus_dma_enable; | ||
2196 | chip->p_dma.request = _ebus_dma_request; | ||
2197 | chip->p_dma.address = _ebus_dma_addr; | ||
2198 | chip->p_dma.reset = _ebus_dma_reset; | ||
2199 | chip->p_dma.preallocate = _ebus_dma_preallocate; | ||
2200 | |||
2201 | chip->c_dma.prepare = _ebus_dma_prepare; | ||
2202 | chip->c_dma.enable = _ebus_dma_enable; | ||
2203 | chip->c_dma.request = _ebus_dma_request; | ||
2204 | chip->c_dma.address = _ebus_dma_addr; | ||
2205 | chip->c_dma.reset = _ebus_dma_reset; | ||
2206 | chip->c_dma.preallocate = _ebus_dma_preallocate; | ||
2119 | 2207 | ||
2120 | chip->port = ioremap(edev->resource[0].start, 0x10); | 2208 | chip->port = ioremap(edev->resource[0].start, 0x10); |
2121 | chip->eb2p.regs = ioremap(edev->resource[1].start, 0x10); | 2209 | chip->p_dma.ebus_info.regs = ioremap(edev->resource[1].start, 0x10); |
2122 | chip->eb2c.regs = ioremap(edev->resource[2].start, 0x10); | 2210 | chip->c_dma.ebus_info.regs = ioremap(edev->resource[2].start, 0x10); |
2123 | if (!chip->port || !chip->eb2p.regs || !chip->eb2c.regs) { | 2211 | if (!chip->port || !chip->p_dma.ebus_info.regs || !chip->c_dma.ebus_info.regs) { |
2124 | snd_cs4231_ebus_free(chip); | 2212 | snd_cs4231_ebus_free(chip); |
2125 | snd_printdd("cs4231-%d: Unable to map chip registers.\n", dev); | 2213 | snd_printdd("cs4231-%d: Unable to map chip registers.\n", dev); |
2126 | return -EIO; | 2214 | return -EIO; |
2127 | } | 2215 | } |
2128 | 2216 | ||
2129 | if (ebus_dma_register(&chip->eb2c)) { | 2217 | if (ebus_dma_register(&chip->c_dma.ebus_info)) { |
2130 | snd_cs4231_ebus_free(chip); | 2218 | snd_cs4231_ebus_free(chip); |
2131 | snd_printdd("cs4231-%d: Unable to register EBUS capture DMA\n", dev); | 2219 | snd_printdd("cs4231-%d: Unable to register EBUS capture DMA\n", dev); |
2132 | return -EBUSY; | 2220 | return -EBUSY; |
2133 | } | 2221 | } |
2134 | if (ebus_dma_irq_enable(&chip->eb2c, 1)) { | 2222 | if (ebus_dma_irq_enable(&chip->c_dma.ebus_info, 1)) { |
2135 | snd_cs4231_ebus_free(chip); | 2223 | snd_cs4231_ebus_free(chip); |
2136 | snd_printdd("cs4231-%d: Unable to enable EBUS capture IRQ\n", dev); | 2224 | snd_printdd("cs4231-%d: Unable to enable EBUS capture IRQ\n", dev); |
2137 | return -EBUSY; | 2225 | return -EBUSY; |
2138 | } | 2226 | } |
2139 | 2227 | ||
2140 | if (ebus_dma_register(&chip->eb2p)) { | 2228 | if (ebus_dma_register(&chip->p_dma.ebus_info)) { |
2141 | snd_cs4231_ebus_free(chip); | 2229 | snd_cs4231_ebus_free(chip); |
2142 | snd_printdd("cs4231-%d: Unable to register EBUS play DMA\n", dev); | 2230 | snd_printdd("cs4231-%d: Unable to register EBUS play DMA\n", dev); |
2143 | return -EBUSY; | 2231 | return -EBUSY; |
2144 | } | 2232 | } |
2145 | if (ebus_dma_irq_enable(&chip->eb2p, 1)) { | 2233 | if (ebus_dma_irq_enable(&chip->p_dma.ebus_info, 1)) { |
2146 | snd_cs4231_ebus_free(chip); | 2234 | snd_cs4231_ebus_free(chip); |
2147 | snd_printdd("cs4231-%d: Unable to enable EBUS play IRQ\n", dev); | 2235 | snd_printdd("cs4231-%d: Unable to enable EBUS play IRQ\n", dev); |
2148 | return -EBUSY; | 2236 | return -EBUSY; |