diff options
author | Steve French <sfrench@us.ibm.com> | 2009-06-14 09:34:46 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2009-06-14 09:34:46 -0400 |
commit | b70b92e41d95fd906f05f6e98f61209201495fa7 (patch) | |
tree | 594890f30f1d89d54eccfd2780dfc033bd2fdd06 | |
parent | 1e68b2b2756fc3488ecbade5ad5f13302b3aaafc (diff) | |
parent | 44b7532b8b464f606053562400719c9c21276037 (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
129 files changed, 8724 insertions, 4428 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index b95082be4d5e..d21b3b5aa543 100644 --- a/Documentation/Changes +++ b/Documentation/Changes | |||
@@ -48,6 +48,7 @@ o procps 3.2.0 # ps --version | |||
48 | o oprofile 0.9 # oprofiled --version | 48 | o oprofile 0.9 # oprofiled --version |
49 | o udev 081 # udevinfo -V | 49 | o udev 081 # udevinfo -V |
50 | o grub 0.93 # grub --version | 50 | o grub 0.93 # grub --version |
51 | o mcelog 0.6 | ||
51 | 52 | ||
52 | Kernel compilation | 53 | Kernel compilation |
53 | ================== | 54 | ================== |
@@ -276,6 +277,16 @@ before running exportfs or mountd. It is recommended that all NFS | |||
276 | services be protected from the internet-at-large by a firewall where | 277 | services be protected from the internet-at-large by a firewall where |
277 | that is possible. | 278 | that is possible. |
278 | 279 | ||
280 | mcelog | ||
281 | ------ | ||
282 | |||
283 | In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility | ||
284 | as a regular cronjob similar to the x86-64 kernel to process and log | ||
285 | machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check | ||
286 | events are errors reported by the CPU. Processing them is strongly encouraged. | ||
287 | All x86-64 kernels since 2.6.4 require the mcelog utility to | ||
288 | process machine checks. | ||
289 | |||
279 | Getting updated software | 290 | Getting updated software |
280 | ======================== | 291 | ======================== |
281 | 292 | ||
@@ -365,6 +376,10 @@ FUSE | |||
365 | ---- | 376 | ---- |
366 | o <http://sourceforge.net/projects/fuse> | 377 | o <http://sourceforge.net/projects/fuse> |
367 | 378 | ||
379 | mcelog | ||
380 | ------ | ||
381 | o <ftp://ftp.kernel.org/pub/linux/utils/cpu/mce/mcelog/> | ||
382 | |||
368 | Networking | 383 | Networking |
369 | ********** | 384 | ********** |
370 | 385 | ||
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index f309d3c6221c..6c456835c1fd 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches | |||
@@ -91,6 +91,10 @@ Be as specific as possible. The WORST descriptions possible include | |||
91 | things like "update driver X", "bug fix for driver X", or "this patch | 91 | things like "update driver X", "bug fix for driver X", or "this patch |
92 | includes updates for subsystem X. Please apply." | 92 | includes updates for subsystem X. Please apply." |
93 | 93 | ||
94 | The maintainer will thank you if you write your patch description in a | ||
95 | form which can be easily pulled into Linux's source code management | ||
96 | system, git, as a "commit log". See #15, below. | ||
97 | |||
94 | If your description starts to get long, that's a sign that you probably | 98 | If your description starts to get long, that's a sign that you probably |
95 | need to split up your patch. See #3, next. | 99 | need to split up your patch. See #3, next. |
96 | 100 | ||
@@ -405,7 +409,14 @@ person it names. This tag documents that potentially interested parties | |||
405 | have been included in the discussion | 409 | have been included in the discussion |
406 | 410 | ||
407 | 411 | ||
408 | 14) Using Tested-by: and Reviewed-by: | 412 | 14) Using Reported-by:, Tested-by: and Reviewed-by: |
413 | |||
414 | If this patch fixes a problem reported by somebody else, consider adding a | ||
415 | Reported-by: tag to credit the reporter for their contribution. Please | ||
416 | note that this tag should not be added without the reporter's permission, | ||
417 | especially if the problem was not reported in a public forum. That said, | ||
418 | if we diligently credit our bug reporters, they will, hopefully, be | ||
419 | inspired to help us again in the future. | ||
409 | 420 | ||
410 | A Tested-by: tag indicates that the patch has been successfully tested (in | 421 | A Tested-by: tag indicates that the patch has been successfully tested (in |
411 | some environment) by the person named. This tag informs maintainers that | 422 | some environment) by the person named. This tag informs maintainers that |
@@ -444,7 +455,7 @@ offer a Reviewed-by tag for a patch. This tag serves to give credit to | |||
444 | reviewers and to inform maintainers of the degree of review which has been | 455 | reviewers and to inform maintainers of the degree of review which has been |
445 | done on the patch. Reviewed-by: tags, when supplied by reviewers known to | 456 | done on the patch. Reviewed-by: tags, when supplied by reviewers known to |
446 | understand the subject area and to perform thorough reviews, will normally | 457 | understand the subject area and to perform thorough reviews, will normally |
447 | increase the liklihood of your patch getting into the kernel. | 458 | increase the likelihood of your patch getting into the kernel. |
448 | 459 | ||
449 | 460 | ||
450 | 15) The canonical patch format | 461 | 15) The canonical patch format |
@@ -485,12 +496,33 @@ phrase" should not be a filename. Do not use the same "summary | |||
485 | phrase" for every patch in a whole patch series (where a "patch | 496 | phrase" for every patch in a whole patch series (where a "patch |
486 | series" is an ordered sequence of multiple, related patches). | 497 | series" is an ordered sequence of multiple, related patches). |
487 | 498 | ||
488 | Bear in mind that the "summary phrase" of your email becomes | 499 | Bear in mind that the "summary phrase" of your email becomes a |
489 | a globally-unique identifier for that patch. It propagates | 500 | globally-unique identifier for that patch. It propagates all the way |
490 | all the way into the git changelog. The "summary phrase" may | 501 | into the git changelog. The "summary phrase" may later be used in |
491 | later be used in developer discussions which refer to the patch. | 502 | developer discussions which refer to the patch. People will want to |
492 | People will want to google for the "summary phrase" to read | 503 | google for the "summary phrase" to read discussion regarding that |
493 | discussion regarding that patch. | 504 | patch. It will also be the only thing that people may quickly see |
505 | when, two or three months later, they are going through perhaps | ||
506 | thousands of patches using tools such as "gitk" or "git log | ||
507 | --oneline". | ||
508 | |||
509 | For these reasons, the "summary" must be no more than 70-75 | ||
510 | characters, and it must describe both what the patch changes, as well | ||
511 | as why the patch might be necessary. It is challenging to be both | ||
512 | succinct and descriptive, but that is what a well-written summary | ||
513 | should do. | ||
514 | |||
515 | The "summary phrase" may be prefixed by tags enclosed in square | ||
516 | brackets: "Subject: [PATCH tag] <summary phrase>". The tags are not | ||
517 | considered part of the summary phrase, but describe how the patch | ||
518 | should be treated. Common tags might include a version descriptor if | ||
519 | the multiple versions of the patch have been sent out in response to | ||
520 | comments (i.e., "v1, v2, v3"), or "RFC" to indicate a request for | ||
521 | comments. If there are four patches in a patch series the individual | ||
522 | patches may be numbered like this: 1/4, 2/4, 3/4, 4/4. This assures | ||
523 | that developers understand the order in which the patches should be | ||
524 | applied and that they have reviewed or applied all of the patches in | ||
525 | the patch series. | ||
494 | 526 | ||
495 | A couple of example Subjects: | 527 | A couple of example Subjects: |
496 | 528 | ||
@@ -510,19 +542,31 @@ the patch author in the changelog. | |||
510 | The explanation body will be committed to the permanent source | 542 | The explanation body will be committed to the permanent source |
511 | changelog, so should make sense to a competent reader who has long | 543 | changelog, so should make sense to a competent reader who has long |
512 | since forgotten the immediate details of the discussion that might | 544 | since forgotten the immediate details of the discussion that might |
513 | have led to this patch. | 545 | have led to this patch. Including symptoms of the failure which the |
546 | patch addresses (kernel log messages, oops messages, etc.) is | ||
547 | especially useful for people who might be searching the commit logs | ||
548 | looking for the applicable patch. If a patch fixes a compile failure, | ||
549 | it may not be necessary to include _all_ of the compile failures; just | ||
550 | enough that it is likely that someone searching for the patch can find | ||
551 | it. As in the "summary phrase", it is important to be both succinct as | ||
552 | well as descriptive. | ||
514 | 553 | ||
515 | The "---" marker line serves the essential purpose of marking for patch | 554 | The "---" marker line serves the essential purpose of marking for patch |
516 | handling tools where the changelog message ends. | 555 | handling tools where the changelog message ends. |
517 | 556 | ||
518 | One good use for the additional comments after the "---" marker is for | 557 | One good use for the additional comments after the "---" marker is for |
519 | a diffstat, to show what files have changed, and the number of inserted | 558 | a diffstat, to show what files have changed, and the number of |
520 | and deleted lines per file. A diffstat is especially useful on bigger | 559 | inserted and deleted lines per file. A diffstat is especially useful |
521 | patches. Other comments relevant only to the moment or the maintainer, | 560 | on bigger patches. Other comments relevant only to the moment or the |
522 | not suitable for the permanent changelog, should also go here. | 561 | maintainer, not suitable for the permanent changelog, should also go |
523 | Use diffstat options "-p 1 -w 70" so that filenames are listed from the | 562 | here. A good example of such comments might be "patch changelogs" |
524 | top of the kernel source tree and don't use too much horizontal space | 563 | which describe what has changed between the v1 and v2 version of the |
525 | (easily fit in 80 columns, maybe with some indentation). | 564 | patch. |
565 | |||
566 | If you are going to include a diffstat after the "---" marker, please | ||
567 | use diffstat options "-p 1 -w 70" so that filenames are listed from | ||
568 | the top of the kernel source tree and don't use too much horizontal | ||
569 | space (easily fit in 80 columns, maybe with some indentation). | ||
526 | 570 | ||
527 | See more details on the proper patch format in the following | 571 | See more details on the proper patch format in the following |
528 | references. | 572 | references. |
diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting index dd48132a74dd..f622c1e9f0f9 100644 --- a/Documentation/development-process/5.Posting +++ b/Documentation/development-process/5.Posting | |||
@@ -119,7 +119,7 @@ which takes quite a bit of time and thought after the "real work" has been | |||
119 | done. When done properly, though, it is time well spent. | 119 | done. When done properly, though, it is time well spent. |
120 | 120 | ||
121 | 121 | ||
122 | 5.4: PATCH FORMATTING | 122 | 5.4: PATCH FORMATTING AND CHANGELOGS |
123 | 123 | ||
124 | So now you have a perfect series of patches for posting, but the work is | 124 | So now you have a perfect series of patches for posting, but the work is |
125 | not done quite yet. Each patch needs to be formatted into a message which | 125 | not done quite yet. Each patch needs to be formatted into a message which |
@@ -146,8 +146,33 @@ that end, each patch will be composed of the following: | |||
146 | - One or more tag lines, with, at a minimum, one Signed-off-by: line from | 146 | - One or more tag lines, with, at a minimum, one Signed-off-by: line from |
147 | the author of the patch. Tags will be described in more detail below. | 147 | the author of the patch. Tags will be described in more detail below. |
148 | 148 | ||
149 | The above three items should, normally, be the text used when committing | 149 | The items above, together, form the changelog for the patch. Writing good |
150 | the change to a revision control system. They are followed by: | 150 | changelogs is a crucial but often-neglected art; it's worth spending |
151 | another moment discussing this issue. When writing a changelog, you should | ||
152 | bear in mind that a number of different people will be reading your words. | ||
153 | These include subsystem maintainers and reviewers who need to decide | ||
154 | whether the patch should be included, distributors and other maintainers | ||
155 | trying to decide whether a patch should be backported to other kernels, bug | ||
156 | hunters wondering whether the patch is responsible for a problem they are | ||
157 | chasing, users who want to know how the kernel has changed, and more. A | ||
158 | good changelog conveys the needed information to all of these people in the | ||
159 | most direct and concise way possible. | ||
160 | |||
161 | To that end, the summary line should describe the effects of and motivation | ||
162 | for the change as well as possible given the one-line constraint. The | ||
163 | detailed description can then amplify on those topics and provide any | ||
164 | needed additional information. If the patch fixes a bug, cite the commit | ||
165 | which introduced the bug if possible. If a problem is associated with | ||
166 | specific log or compiler output, include that output to help others | ||
167 | searching for a solution to the same problem. If the change is meant to | ||
168 | support other changes coming in later patch, say so. If internal APIs are | ||
169 | changed, detail those changes and how other developers should respond. In | ||
170 | general, the more you can put yourself into the shoes of everybody who will | ||
171 | be reading your changelog, the better that changelog (and the kernel as a | ||
172 | whole) will be. | ||
173 | |||
174 | Needless to say, the changelog should be the text used when committing the | ||
175 | change to a revision control system. It will be followed by: | ||
151 | 176 | ||
152 | - The patch itself, in the unified ("-u") patch format. Using the "-p" | 177 | - The patch itself, in the unified ("-u") patch format. Using the "-p" |
153 | option to diff will associate function names with changes, making the | 178 | option to diff will associate function names with changes, making the |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index de491a3e2313..ec9ef5d0d7b3 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -437,3 +437,13 @@ Why: Superseded by tdfxfb. I2C/DDC support used to live in a separate | |||
437 | driver but this caused driver conflicts. | 437 | driver but this caused driver conflicts. |
438 | Who: Jean Delvare <khali@linux-fr.org> | 438 | Who: Jean Delvare <khali@linux-fr.org> |
439 | Krzysztof Helt <krzysztof.h1@wp.pl> | 439 | Krzysztof Helt <krzysztof.h1@wp.pl> |
440 | |||
441 | ---------------------------- | ||
442 | |||
443 | What: CONFIG_X86_OLD_MCE | ||
444 | When: 2.6.32 | ||
445 | Why: Remove the old legacy 32bit machine check code. This has been | ||
446 | superseded by the newer machine check code from the 64bit port, | ||
447 | but the old version has been kept around for easier testing. Note this | ||
448 | doesn't impact the old P5 and WinChip machine check handlers. | ||
449 | Who: Andi Kleen <andi@firstfloor.org> | ||
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt new file mode 100644 index 000000000000..ed52af60c2d8 --- /dev/null +++ b/Documentation/filesystems/debugfs.txt | |||
@@ -0,0 +1,158 @@ | |||
1 | Copyright 2009 Jonathan Corbet <corbet@lwn.net> | ||
2 | |||
3 | Debugfs exists as a simple way for kernel developers to make information | ||
4 | available to user space. Unlike /proc, which is only meant for information | ||
5 | about a process, or sysfs, which has strict one-value-per-file rules, | ||
6 | debugfs has no rules at all. Developers can put any information they want | ||
7 | there. The debugfs filesystem is also intended to not serve as a stable | ||
8 | ABI to user space; in theory, there are no stability constraints placed on | ||
9 | files exported there. The real world is not always so simple, though [1]; | ||
10 | even debugfs interfaces are best designed with the idea that they will need | ||
11 | to be maintained forever. | ||
12 | |||
13 | Debugfs is typically mounted with a command like: | ||
14 | |||
15 | mount -t debugfs none /sys/kernel/debug | ||
16 | |||
17 | (Or an equivalent /etc/fstab line). | ||
18 | |||
19 | Note that the debugfs API is exported GPL-only to modules. | ||
20 | |||
21 | Code using debugfs should include <linux/debugfs.h>. Then, the first order | ||
22 | of business will be to create at least one directory to hold a set of | ||
23 | debugfs files: | ||
24 | |||
25 | struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); | ||
26 | |||
27 | This call, if successful, will make a directory called name underneath the | ||
28 | indicated parent directory. If parent is NULL, the directory will be | ||
29 | created in the debugfs root. On success, the return value is a struct | ||
30 | dentry pointer which can be used to create files in the directory (and to | ||
31 | clean it up at the end). A NULL return value indicates that something went | ||
32 | wrong. If ERR_PTR(-ENODEV) is returned, that is an indication that the | ||
33 | kernel has been built without debugfs support and none of the functions | ||
34 | described below will work. | ||
35 | |||
36 | The most general way to create a file within a debugfs directory is with: | ||
37 | |||
38 | struct dentry *debugfs_create_file(const char *name, mode_t mode, | ||
39 | struct dentry *parent, void *data, | ||
40 | const struct file_operations *fops); | ||
41 | |||
42 | Here, name is the name of the file to create, mode describes the access | ||
43 | permissions the file should have, parent indicates the directory which | ||
44 | should hold the file, data will be stored in the i_private field of the | ||
45 | resulting inode structure, and fops is a set of file operations which | ||
46 | implement the file's behavior. At a minimum, the read() and/or write() | ||
47 | operations should be provided; others can be included as needed. Again, | ||
48 | the return value will be a dentry pointer to the created file, NULL for | ||
49 | error, or ERR_PTR(-ENODEV) if debugfs support is missing. | ||
50 | |||
51 | In a number of cases, the creation of a set of file operations is not | ||
52 | actually necessary; the debugfs code provides a number of helper functions | ||
53 | for simple situations. Files containing a single integer value can be | ||
54 | created with any of: | ||
55 | |||
56 | struct dentry *debugfs_create_u8(const char *name, mode_t mode, | ||
57 | struct dentry *parent, u8 *value); | ||
58 | struct dentry *debugfs_create_u16(const char *name, mode_t mode, | ||
59 | struct dentry *parent, u16 *value); | ||
60 | struct dentry *debugfs_create_u32(const char *name, mode_t mode, | ||
61 | struct dentry *parent, u32 *value); | ||
62 | struct dentry *debugfs_create_u64(const char *name, mode_t mode, | ||
63 | struct dentry *parent, u64 *value); | ||
64 | |||
65 | These files support both reading and writing the given value; if a specific | ||
66 | file should not be written to, simply set the mode bits accordingly. The | ||
67 | values in these files are in decimal; if hexadecimal is more appropriate, | ||
68 | the following functions can be used instead: | ||
69 | |||
70 | struct dentry *debugfs_create_x8(const char *name, mode_t mode, | ||
71 | struct dentry *parent, u8 *value); | ||
72 | struct dentry *debugfs_create_x16(const char *name, mode_t mode, | ||
73 | struct dentry *parent, u16 *value); | ||
74 | struct dentry *debugfs_create_x32(const char *name, mode_t mode, | ||
75 | struct dentry *parent, u32 *value); | ||
76 | |||
77 | Note that there is no debugfs_create_x64(). | ||
78 | |||
79 | These functions are useful as long as the developer knows the size of the | ||
80 | value to be exported. Some types can have different widths on different | ||
81 | architectures, though, complicating the situation somewhat. There is a | ||
82 | function meant to help out in one special case: | ||
83 | |||
84 | struct dentry *debugfs_create_size_t(const char *name, mode_t mode, | ||
85 | struct dentry *parent, | ||
86 | size_t *value); | ||
87 | |||
88 | As might be expected, this function will create a debugfs file to represent | ||
89 | a variable of type size_t. | ||
90 | |||
91 | Boolean values can be placed in debugfs with: | ||
92 | |||
93 | struct dentry *debugfs_create_bool(const char *name, mode_t mode, | ||
94 | struct dentry *parent, u32 *value); | ||
95 | |||
96 | A read on the resulting file will yield either Y (for non-zero values) or | ||
97 | N, followed by a newline. If written to, it will accept either upper- or | ||
98 | lower-case values, or 1 or 0. Any other input will be silently ignored. | ||
99 | |||
100 | Finally, a block of arbitrary binary data can be exported with: | ||
101 | |||
102 | struct debugfs_blob_wrapper { | ||
103 | void *data; | ||
104 | unsigned long size; | ||
105 | }; | ||
106 | |||
107 | struct dentry *debugfs_create_blob(const char *name, mode_t mode, | ||
108 | struct dentry *parent, | ||
109 | struct debugfs_blob_wrapper *blob); | ||
110 | |||
111 | A read of this file will return the data pointed to by the | ||
112 | debugfs_blob_wrapper structure. Some drivers use "blobs" as a simple way | ||
113 | to return several lines of (static) formatted text output. This function | ||
114 | can be used to export binary information, but there does not appear to be | ||
115 | any code which does so in the mainline. Note that all files created with | ||
116 | debugfs_create_blob() are read-only. | ||
117 | |||
118 | There are a couple of other directory-oriented helper functions: | ||
119 | |||
120 | struct dentry *debugfs_rename(struct dentry *old_dir, | ||
121 | struct dentry *old_dentry, | ||
122 | struct dentry *new_dir, | ||
123 | const char *new_name); | ||
124 | |||
125 | struct dentry *debugfs_create_symlink(const char *name, | ||
126 | struct dentry *parent, | ||
127 | const char *target); | ||
128 | |||
129 | A call to debugfs_rename() will give a new name to an existing debugfs | ||
130 | file, possibly in a different directory. The new_name must not exist prior | ||
131 | to the call; the return value is old_dentry with updated information. | ||
132 | Symbolic links can be created with debugfs_create_symlink(). | ||
133 | |||
134 | There is one important thing that all debugfs users must take into account: | ||
135 | there is no automatic cleanup of any directories created in debugfs. If a | ||
136 | module is unloaded without explicitly removing debugfs entries, the result | ||
137 | will be a lot of stale pointers and no end of highly antisocial behavior. | ||
138 | So all debugfs users - at least those which can be built as modules - must | ||
139 | be prepared to remove all files and directories they create there. A file | ||
140 | can be removed with: | ||
141 | |||
142 | void debugfs_remove(struct dentry *dentry); | ||
143 | |||
144 | The dentry value can be NULL, in which case nothing will be removed. | ||
145 | |||
146 | Once upon a time, debugfs users were required to remember the dentry | ||
147 | pointer for every debugfs file they created so that all files could be | ||
148 | cleaned up. We live in more civilized times now, though, and debugfs users | ||
149 | can call: | ||
150 | |||
151 | void debugfs_remove_recursive(struct dentry *dentry); | ||
152 | |||
153 | If this function is passed a pointer for the dentry corresponding to the | ||
154 | top-level directory, the entire hierarchy below that directory will be | ||
155 | removed. | ||
156 | |||
157 | Notes: | ||
158 | [1] http://lwn.net/Articles/309298/ | ||
diff --git a/Documentation/i2c/busses/i2c-ocores b/Documentation/i2c/busses/i2c-ocores index cfcebb10d14e..c269aaa2f26a 100644 --- a/Documentation/i2c/busses/i2c-ocores +++ b/Documentation/i2c/busses/i2c-ocores | |||
@@ -20,6 +20,8 @@ platform_device with the base address and interrupt number. The | |||
20 | dev.platform_data of the device should also point to a struct | 20 | dev.platform_data of the device should also point to a struct |
21 | ocores_i2c_platform_data (see linux/i2c-ocores.h) describing the | 21 | ocores_i2c_platform_data (see linux/i2c-ocores.h) describing the |
22 | distance between registers and the input clock speed. | 22 | distance between registers and the input clock speed. |
23 | There is also a possibility to attach a list of i2c_board_info which | ||
24 | the i2c-ocores driver will add to the bus upon creation. | ||
23 | 25 | ||
24 | E.G. something like: | 26 | E.G. something like: |
25 | 27 | ||
@@ -36,9 +38,24 @@ static struct resource ocores_resources[] = { | |||
36 | }, | 38 | }, |
37 | }; | 39 | }; |
38 | 40 | ||
41 | /* optional board info */ | ||
42 | struct i2c_board_info ocores_i2c_board_info[] = { | ||
43 | { | ||
44 | I2C_BOARD_INFO("tsc2003", 0x48), | ||
45 | .platform_data = &tsc2003_platform_data, | ||
46 | .irq = TSC_IRQ | ||
47 | }, | ||
48 | { | ||
49 | I2C_BOARD_INFO("adv7180", 0x42 >> 1), | ||
50 | .irq = ADV_IRQ | ||
51 | } | ||
52 | }; | ||
53 | |||
39 | static struct ocores_i2c_platform_data myi2c_data = { | 54 | static struct ocores_i2c_platform_data myi2c_data = { |
40 | .regstep = 2, /* two bytes between registers */ | 55 | .regstep = 2, /* two bytes between registers */ |
41 | .clock_khz = 50000, /* input clock of 50MHz */ | 56 | .clock_khz = 50000, /* input clock of 50MHz */ |
57 | .devices = ocores_i2c_board_info, /* optional table of devices */ | ||
58 | .num_devices = ARRAY_SIZE(ocores_i2c_board_info), /* table size */ | ||
42 | }; | 59 | }; |
43 | 60 | ||
44 | static struct platform_device myi2c = { | 61 | static struct platform_device myi2c = { |
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index 2db5893d6c97..29a6ff8bc7d3 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt | |||
@@ -5,21 +5,51 @@ only the AMD64 specific ones are listed here. | |||
5 | 5 | ||
6 | Machine check | 6 | Machine check |
7 | 7 | ||
8 | mce=off disable machine check | 8 | Please see Documentation/x86/x86_64/machinecheck for sysfs runtime tunables. |
9 | mce=bootlog Enable logging of machine checks left over from booting. | 9 | |
10 | Disabled by default on AMD because some BIOS leave bogus ones. | 10 | mce=off |
11 | If your BIOS doesn't do that it's a good idea to enable though | 11 | Disable machine check |
12 | to make sure you log even machine check events that result | 12 | mce=no_cmci |
13 | in a reboot. On Intel systems it is enabled by default. | 13 | Disable CMCI(Corrected Machine Check Interrupt) that |
14 | Intel processor supports. Usually this disablement is | ||
15 | not recommended, but it might be handy if your hardware | ||
16 | is misbehaving. | ||
17 | Note that you'll get more problems without CMCI than with | ||
18 | due to the shared banks, i.e. you might get duplicated | ||
19 | error logs. | ||
20 | mce=dont_log_ce | ||
21 | Don't make logs for corrected errors. All events reported | ||
22 | as corrected are silently cleared by OS. | ||
23 | This option will be useful if you have no interest in any | ||
24 | of corrected errors. | ||
25 | mce=ignore_ce | ||
26 | Disable features for corrected errors, e.g. polling timer | ||
27 | and CMCI. All events reported as corrected are not cleared | ||
28 | by OS and remained in its error banks. | ||
29 | Usually this disablement is not recommended, however if | ||
30 | there is an agent checking/clearing corrected errors | ||
31 | (e.g. BIOS or hardware monitoring applications), conflicting | ||
32 | with OS's error handling, and you cannot deactivate the agent, | ||
33 | then this option will be a help. | ||
34 | mce=bootlog | ||
35 | Enable logging of machine checks left over from booting. | ||
36 | Disabled by default on AMD because some BIOS leave bogus ones. | ||
37 | If your BIOS doesn't do that it's a good idea to enable though | ||
38 | to make sure you log even machine check events that result | ||
39 | in a reboot. On Intel systems it is enabled by default. | ||
14 | mce=nobootlog | 40 | mce=nobootlog |
15 | Disable boot machine check logging. | 41 | Disable boot machine check logging. |
16 | mce=tolerancelevel (number) | 42 | mce=tolerancelevel[,monarchtimeout] (number,number) |
43 | tolerance levels: | ||
17 | 0: always panic on uncorrected errors, log corrected errors | 44 | 0: always panic on uncorrected errors, log corrected errors |
18 | 1: panic or SIGBUS on uncorrected errors, log corrected errors | 45 | 1: panic or SIGBUS on uncorrected errors, log corrected errors |
19 | 2: SIGBUS or log uncorrected errors, log corrected errors | 46 | 2: SIGBUS or log uncorrected errors, log corrected errors |
20 | 3: never panic or SIGBUS, log all errors (for testing only) | 47 | 3: never panic or SIGBUS, log all errors (for testing only) |
21 | Default is 1 | 48 | Default is 1 |
22 | Can be also set using sysfs which is preferable. | 49 | Can be also set using sysfs which is preferable. |
50 | monarchtimeout: | ||
51 | Sets the time in us to wait for other CPUs on machine checks. 0 | ||
52 | to disable. | ||
23 | 53 | ||
24 | nomce (for compatibility with i386): same as mce=off | 54 | nomce (for compatibility with i386): same as mce=off |
25 | 55 | ||
diff --git a/Documentation/x86/x86_64/machinecheck b/Documentation/x86/x86_64/machinecheck index a05e58e7b159..b1fb30273286 100644 --- a/Documentation/x86/x86_64/machinecheck +++ b/Documentation/x86/x86_64/machinecheck | |||
@@ -41,7 +41,9 @@ check_interval | |||
41 | the polling interval. When the poller stops finding MCEs, it | 41 | the polling interval. When the poller stops finding MCEs, it |
42 | triggers an exponential backoff (poll less often) on the polling | 42 | triggers an exponential backoff (poll less often) on the polling |
43 | interval. The check_interval variable is both the initial and | 43 | interval. The check_interval variable is both the initial and |
44 | maximum polling interval. | 44 | maximum polling interval. 0 means no polling for corrected machine |
45 | check errors (but some corrected errors might be still reported | ||
46 | in other ways) | ||
45 | 47 | ||
46 | tolerant | 48 | tolerant |
47 | Tolerance level. When a machine check exception occurs for a non | 49 | Tolerance level. When a machine check exception occurs for a non |
@@ -67,6 +69,10 @@ trigger | |||
67 | Program to run when a machine check event is detected. | 69 | Program to run when a machine check event is detected. |
68 | This is an alternative to running mcelog regularly from cron | 70 | This is an alternative to running mcelog regularly from cron |
69 | and allows to detect events faster. | 71 | and allows to detect events faster. |
72 | monarch_timeout | ||
73 | How long to wait for the other CPUs to machine check too on a | ||
74 | exception. 0 to disable waiting for other CPUs. | ||
75 | Unit: us | ||
70 | 76 | ||
71 | TBD document entries for AMD threshold interrupt configuration | 77 | TBD document entries for AMD threshold interrupt configuration |
72 | 78 | ||
diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig index b3f99477bbeb..be27a0218ab4 100644 --- a/arch/avr32/boards/atngw100/Kconfig +++ b/arch/avr32/boards/atngw100/Kconfig | |||
@@ -2,8 +2,15 @@ | |||
2 | 2 | ||
3 | if BOARD_ATNGW100 | 3 | if BOARD_ATNGW100 |
4 | 4 | ||
5 | choice | ||
6 | prompt "Select an NGW100 add-on board to support" | ||
7 | default BOARD_ATNGW100_ADDON_NONE | ||
8 | |||
9 | config BOARD_ATNGW100_ADDON_NONE | ||
10 | bool "None" | ||
11 | |||
5 | config BOARD_ATNGW100_EVKLCD10X | 12 | config BOARD_ATNGW100_EVKLCD10X |
6 | bool "Add support for EVKLCD10X addon board" | 13 | bool "EVKLCD10X addon board" |
7 | help | 14 | help |
8 | This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA) | 15 | This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA) |
9 | addon board for the NGW100. By enabling this the LCD controller and | 16 | addon board for the NGW100. By enabling this the LCD controller and |
@@ -14,7 +21,19 @@ config BOARD_ATNGW100_EVKLCD10X | |||
14 | The MCI pins can be reenabled by editing the "add device function" but | 21 | The MCI pins can be reenabled by editing the "add device function" but |
15 | this may break the setup for other displays that use these pins. | 22 | this may break the setup for other displays that use these pins. |
16 | 23 | ||
17 | Choose 'Y' here if you have a EVKLCD100/101 connected to the NGW100. | 24 | config BOARD_ATNGW100_MRMT |
25 | bool "Mediama RMT1/2 add-on board" | ||
26 | help | ||
27 | This enables support for the Mediama RMT1 or RMT2 board. | ||
28 | RMT provides LCD support, AC97 codec and other | ||
29 | optional peripherals to the Atmel NGW100. | ||
30 | |||
31 | This choice disables the detect pin and the write-protect pin for the | ||
32 | MCI platform device, since it conflicts with the LCD platform device. | ||
33 | The MCI pins can be reenabled by editing the "add device function" but | ||
34 | this may break the setup for other displays that use these pins. | ||
35 | |||
36 | endchoice | ||
18 | 37 | ||
19 | choice | 38 | choice |
20 | prompt "LCD panel resolution on EVKLCD10X" | 39 | prompt "LCD panel resolution on EVKLCD10X" |
@@ -32,4 +51,8 @@ config BOARD_ATNGW100_EVKLCD10X_POW_QVGA | |||
32 | 51 | ||
33 | endchoice | 52 | endchoice |
34 | 53 | ||
54 | if BOARD_ATNGW100_MRMT | ||
55 | source "arch/avr32/boards/atngw100/Kconfig_mrmt" | ||
56 | endif | ||
57 | |||
35 | endif # BOARD_ATNGW100 | 58 | endif # BOARD_ATNGW100 |
diff --git a/arch/avr32/boards/atngw100/Kconfig_mrmt b/arch/avr32/boards/atngw100/Kconfig_mrmt new file mode 100644 index 000000000000..9a199a207f3c --- /dev/null +++ b/arch/avr32/boards/atngw100/Kconfig_mrmt | |||
@@ -0,0 +1,80 @@ | |||
1 | # RMT for NGW100 customization | ||
2 | |||
3 | choice | ||
4 | prompt "RMT Version" | ||
5 | help | ||
6 | Select the RMTx board version. | ||
7 | |||
8 | config BOARD_MRMT_REV1 | ||
9 | bool "RMT1" | ||
10 | config BOARD_MRMT_REV2 | ||
11 | bool "RMT2" | ||
12 | |||
13 | endchoice | ||
14 | |||
15 | config BOARD_MRMT_AC97 | ||
16 | bool "Enable AC97 CODEC" | ||
17 | help | ||
18 | Enable the UCB1400 AC97 CODEC driver. | ||
19 | |||
20 | choice | ||
21 | prompt "Touchscreen Driver" | ||
22 | default BOARD_MRMT_ADS7846_TS | ||
23 | |||
24 | config BOARD_MRMT_UCB1400_TS | ||
25 | bool "Use UCB1400 Touchscreen" | ||
26 | |||
27 | config BOARD_MRMT_ADS7846_TS | ||
28 | bool "Use ADS7846 Touchscreen" | ||
29 | |||
30 | endchoice | ||
31 | |||
32 | choice | ||
33 | prompt "RMTx LCD Selection" | ||
34 | default BOARD_MRMT_LCD_DISABLE | ||
35 | |||
36 | config BOARD_MRMT_LCD_DISABLE | ||
37 | bool "LCD Disabled" | ||
38 | |||
39 | config BOARD_MRMT_LCD_LQ043T3DX0X | ||
40 | bool "Sharp LQ043T3DX0x or compatible" | ||
41 | help | ||
42 | If using RMT2, be sure to load the resistor pack selectors accordingly | ||
43 | |||
44 | if BOARD_MRMT_REV2 | ||
45 | config BOARD_MRMT_LCD_KWH043GM08 | ||
46 | bool "Formike KWH043GM08 or compatible" | ||
47 | help | ||
48 | Be sure to load the RMT2 resistor pack selectors accordingly | ||
49 | endif | ||
50 | |||
51 | endchoice | ||
52 | |||
53 | if !BOARD_MRMT_LCD_DISABLE | ||
54 | config BOARD_MRMT_BL_PWM | ||
55 | bool "Use PWM control for LCD Backlight" | ||
56 | help | ||
57 | Use PWM driver for controlling LCD Backlight. | ||
58 | Otherwise, LCD Backlight is always on. | ||
59 | endif | ||
60 | |||
61 | config BOARD_MRMT_RTC_I2C | ||
62 | bool "Use External RTC on I2C Bus" | ||
63 | help | ||
64 | RMT1 has an optional RTC device on the I2C bus. | ||
65 | It is a SII S35390A. Be sure to select the | ||
66 | matching RTC driver. | ||
67 | |||
68 | choice | ||
69 | prompt "Wireless Module on ttyS2" | ||
70 | default BOARD_MRMT_WIRELESS_ZB | ||
71 | |||
72 | config BOARD_MRMT_WIRELESS_ZB | ||
73 | bool "Use ZigBee/802.15.4 Module" | ||
74 | |||
75 | config BOARD_MRMT_WIRELESS_BT | ||
76 | bool "Use Bluetooth (HCI) Module" | ||
77 | |||
78 | config BOARD_MRMT_WIRELESS_NONE | ||
79 | bool "Not Installed" | ||
80 | endchoice | ||
diff --git a/arch/avr32/boards/atngw100/Makefile b/arch/avr32/boards/atngw100/Makefile index 6376f5322e4d..f4ebe42a8254 100644 --- a/arch/avr32/boards/atngw100/Makefile +++ b/arch/avr32/boards/atngw100/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-y += setup.o flash.o | 1 | obj-y += setup.o flash.o |
2 | obj-$(CONFIG_BOARD_ATNGW100_EVKLCD10X) += evklcd10x.o | 2 | obj-$(CONFIG_BOARD_ATNGW100_EVKLCD10X) += evklcd10x.o |
3 | obj-$(CONFIG_BOARD_ATNGW100_MRMT) += mrmt.o | ||
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c new file mode 100644 index 000000000000..bf78e516a85f --- /dev/null +++ b/arch/avr32/boards/atngw100/mrmt.c | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * Board-specific setup code for Remote Media Terminal 1 (RMT1) | ||
3 | * add-on board for the ATNGW100 Network Gateway | ||
4 | * | ||
5 | * Copyright (C) 2008 Mediama Technologies | ||
6 | * Based on ATNGW100 Network Gateway (Copyright (C) Atmel) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <linux/gpio.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/fb.h> | ||
19 | #include <linux/leds.h> | ||
20 | #include <linux/input.h> | ||
21 | #include <linux/gpio_keys.h> | ||
22 | #include <linux/atmel_serial.h> | ||
23 | #include <linux/spi/spi.h> | ||
24 | #include <linux/spi/ads7846.h> | ||
25 | |||
26 | #include <video/atmel_lcdc.h> | ||
27 | #include <sound/atmel-ac97c.h> | ||
28 | |||
29 | #include <asm/delay.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/setup.h> | ||
32 | |||
33 | #include <mach/at32ap700x.h> | ||
34 | #include <mach/board.h> | ||
35 | #include <mach/init.h> | ||
36 | #include <mach/portmux.h> | ||
37 | |||
38 | /* Define board-specifoic GPIO assignments */ | ||
39 | #define PIN_LCD_BL GPIO_PIN_PA(28) | ||
40 | #define PWM_CH_BL 0 /* Must match with GPIO pin definition */ | ||
41 | #define PIN_LCD_DISP GPIO_PIN_PA(31) | ||
42 | #define PIN_AC97_RST_N GPIO_PIN_PA(30) | ||
43 | #define PB_EXTINT_BASE 25 | ||
44 | #define TS_IRQ 0 | ||
45 | #define PIN_TS_EXTINT GPIO_PIN_PB(PB_EXTINT_BASE+TS_IRQ) | ||
46 | #define PIN_PB_LEFT GPIO_PIN_PB(11) | ||
47 | #define PIN_PB_RIGHT GPIO_PIN_PB(12) | ||
48 | #define PIN_PWR_SW_N GPIO_PIN_PB(14) | ||
49 | #define PIN_PWR_ON GPIO_PIN_PB(13) | ||
50 | #define PIN_ZB_RST_N GPIO_PIN_PA(21) | ||
51 | #define PIN_BT_RST GPIO_PIN_PA(22) | ||
52 | #define PIN_LED_SYS GPIO_PIN_PA(16) | ||
53 | #define PIN_LED_A GPIO_PIN_PA(19) | ||
54 | #define PIN_LED_B GPIO_PIN_PE(19) | ||
55 | |||
56 | #ifdef CONFIG_BOARD_MRMT_LCD_LQ043T3DX0X | ||
57 | /* Sharp LQ043T3DX0x (or compatible) panel */ | ||
58 | static struct fb_videomode __initdata lcd_fb_modes[] = { | ||
59 | { | ||
60 | .name = "480x272 @ 59.94Hz", | ||
61 | .refresh = 59.94, | ||
62 | .xres = 480, .yres = 272, | ||
63 | .pixclock = KHZ2PICOS(9000), | ||
64 | |||
65 | .left_margin = 2, .right_margin = 2, | ||
66 | .upper_margin = 3, .lower_margin = 9, | ||
67 | .hsync_len = 41, .vsync_len = 1, | ||
68 | |||
69 | .sync = 0, | ||
70 | .vmode = FB_VMODE_NONINTERLACED, | ||
71 | }, | ||
72 | }; | ||
73 | |||
74 | static struct fb_monspecs __initdata lcd_fb_default_monspecs = { | ||
75 | .manufacturer = "SHA", | ||
76 | .monitor = "LQ043T3DX02", | ||
77 | .modedb = lcd_fb_modes, | ||
78 | .modedb_len = ARRAY_SIZE(lcd_fb_modes), | ||
79 | .hfmin = 14915, | ||
80 | .hfmax = 17638, | ||
81 | .vfmin = 53, | ||
82 | .vfmax = 61, | ||
83 | .dclkmax = 9260000, | ||
84 | }; | ||
85 | |||
86 | static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { | ||
87 | .default_bpp = 24, | ||
88 | .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, | ||
89 | .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ||
90 | | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ||
91 | | ATMEL_LCDC_INVCLK_NORMAL | ||
92 | | ATMEL_LCDC_MEMOR_BIG), | ||
93 | .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, | ||
94 | .default_monspecs = &lcd_fb_default_monspecs, | ||
95 | .guard_time = 2, | ||
96 | }; | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_BOARD_MRMT_LCD_KWH043GM08 | ||
100 | /* Sharp KWH043GM08-Fxx (or compatible) panel */ | ||
101 | static struct fb_videomode __initdata lcd_fb_modes[] = { | ||
102 | { | ||
103 | .name = "480x272 @ 59.94Hz", | ||
104 | .refresh = 59.94, | ||
105 | .xres = 480, .yres = 272, | ||
106 | .pixclock = KHZ2PICOS(9000), | ||
107 | |||
108 | .left_margin = 2, .right_margin = 2, | ||
109 | .upper_margin = 3, .lower_margin = 9, | ||
110 | .hsync_len = 41, .vsync_len = 1, | ||
111 | |||
112 | .sync = 0, | ||
113 | .vmode = FB_VMODE_NONINTERLACED, | ||
114 | }, | ||
115 | }; | ||
116 | |||
117 | static struct fb_monspecs __initdata lcd_fb_default_monspecs = { | ||
118 | .manufacturer = "FOR", | ||
119 | .monitor = "KWH043GM08", | ||
120 | .modedb = lcd_fb_modes, | ||
121 | .modedb_len = ARRAY_SIZE(lcd_fb_modes), | ||
122 | .hfmin = 14915, | ||
123 | .hfmax = 17638, | ||
124 | .vfmin = 53, | ||
125 | .vfmax = 61, | ||
126 | .dclkmax = 9260000, | ||
127 | }; | ||
128 | |||
129 | static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { | ||
130 | .default_bpp = 24, | ||
131 | .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, | ||
132 | .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ||
133 | | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ||
134 | | ATMEL_LCDC_INVCLK_INVERTED | ||
135 | | ATMEL_LCDC_MEMOR_BIG), | ||
136 | .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, | ||
137 | .default_monspecs = &lcd_fb_default_monspecs, | ||
138 | .guard_time = 2, | ||
139 | }; | ||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_BOARD_MRMT_AC97 | ||
143 | static struct ac97c_platform_data __initdata ac97c0_data = { | ||
144 | .reset_pin = PIN_AC97_RST_N, | ||
145 | }; | ||
146 | #endif | ||
147 | |||
148 | #ifdef CONFIG_BOARD_MRMT_UCB1400_TS | ||
149 | /* NOTE: IRQ assignment relies on kernel module parameter */ | ||
150 | static struct platform_device rmt_ts_device = { | ||
151 | .name = "ucb1400_ts", | ||
152 | .id = -1, | ||
153 | } | ||
154 | }; | ||
155 | #endif | ||
156 | |||
157 | #ifdef CONFIG_BOARD_MRMT_BL_PWM | ||
158 | /* PWM LEDs: LCD Backlight, etc */ | ||
159 | static struct gpio_led rmt_pwm_led[] = { | ||
160 | /* here the "gpio" is actually a PWM channel */ | ||
161 | { .name = "backlight", .gpio = PWM_CH_BL, }, | ||
162 | }; | ||
163 | |||
164 | static struct gpio_led_platform_data rmt_pwm_led_data = { | ||
165 | .num_leds = ARRAY_SIZE(rmt_pwm_led), | ||
166 | .leds = rmt_pwm_led, | ||
167 | }; | ||
168 | |||
169 | static struct platform_device rmt_pwm_led_dev = { | ||
170 | .name = "leds-atmel-pwm", | ||
171 | .id = -1, | ||
172 | .dev = { | ||
173 | .platform_data = &rmt_pwm_led_data, | ||
174 | }, | ||
175 | }; | ||
176 | #endif | ||
177 | |||
178 | #ifdef CONFIG_BOARD_MRMT_ADS7846_TS | ||
179 | static int ads7846_pendown_state(void) | ||
180 | { | ||
181 | return !gpio_get_value( PIN_TS_EXTINT ); /* PENIRQ.*/ | ||
182 | } | ||
183 | |||
184 | static struct ads7846_platform_data ads_info = { | ||
185 | .model = 7846, | ||
186 | .keep_vref_on = 0, /* Use external VREF pin */ | ||
187 | .vref_delay_usecs = 0, | ||
188 | .vref_mv = 3300, /* VREF = 3.3V */ | ||
189 | .settle_delay_usecs = 800, | ||
190 | .penirq_recheck_delay_usecs = 800, | ||
191 | .x_plate_ohms = 750, | ||
192 | .y_plate_ohms = 300, | ||
193 | .pressure_max = 4096, | ||
194 | .debounce_max = 1, | ||
195 | .debounce_rep = 0, | ||
196 | .debounce_tol = (~0), | ||
197 | .get_pendown_state = ads7846_pendown_state, | ||
198 | .filter = NULL, | ||
199 | .filter_init = NULL, | ||
200 | }; | ||
201 | |||
202 | static struct spi_board_info spi01_board_info[] __initdata = { | ||
203 | { | ||
204 | .modalias = "ads7846", | ||
205 | .max_speed_hz = 31250*26, | ||
206 | .bus_num = 0, | ||
207 | .chip_select = 1, | ||
208 | .platform_data = &ads_info, | ||
209 | .irq = AT32_EXTINT(TS_IRQ), | ||
210 | }, | ||
211 | }; | ||
212 | #endif | ||
213 | |||
214 | /* GPIO Keys: left, right, power, etc */ | ||
215 | static const struct gpio_keys_button rmt_gpio_keys_buttons[] = { | ||
216 | [0] = { | ||
217 | .type = EV_KEY, | ||
218 | .code = KEY_POWER, | ||
219 | .gpio = PIN_PWR_SW_N, | ||
220 | .active_low = 1, | ||
221 | .desc = "power button", | ||
222 | }, | ||
223 | [1] = { | ||
224 | .type = EV_KEY, | ||
225 | .code = KEY_LEFT, | ||
226 | .gpio = PIN_PB_LEFT, | ||
227 | .active_low = 1, | ||
228 | .desc = "left button", | ||
229 | }, | ||
230 | [2] = { | ||
231 | .type = EV_KEY, | ||
232 | .code = KEY_RIGHT, | ||
233 | .gpio = PIN_PB_RIGHT, | ||
234 | .active_low = 1, | ||
235 | .desc = "right button", | ||
236 | }, | ||
237 | }; | ||
238 | |||
239 | static const struct gpio_keys_platform_data rmt_gpio_keys_data = { | ||
240 | .nbuttons = ARRAY_SIZE(rmt_gpio_keys_buttons), | ||
241 | .buttons = (void *) rmt_gpio_keys_buttons, | ||
242 | }; | ||
243 | |||
244 | static struct platform_device rmt_gpio_keys = { | ||
245 | .name = "gpio-keys", | ||
246 | .id = -1, | ||
247 | .dev = { | ||
248 | .platform_data = (void *) &rmt_gpio_keys_data, | ||
249 | } | ||
250 | }; | ||
251 | |||
252 | #ifdef CONFIG_BOARD_MRMT_RTC_I2C | ||
253 | static struct i2c_board_info __initdata mrmt1_i2c_rtc = { | ||
254 | I2C_BOARD_INFO("s35390a", 0x30), | ||
255 | .irq = 0, | ||
256 | }; | ||
257 | #endif | ||
258 | |||
259 | static void mrmt_power_off(void) | ||
260 | { | ||
261 | /* PWR_ON=0 will force power off */ | ||
262 | gpio_set_value( PIN_PWR_ON, 0 ); | ||
263 | } | ||
264 | |||
265 | static int __init mrmt1_init(void) | ||
266 | { | ||
267 | gpio_set_value( PIN_PWR_ON, 1 ); /* Ensure PWR_ON is enabled */ | ||
268 | |||
269 | pm_power_off = mrmt_power_off; | ||
270 | |||
271 | /* Setup USARTS (other than console) */ | ||
272 | at32_map_usart(2, 1, 0); /* USART 2: /dev/ttyS1, RMT1:DB9M */ | ||
273 | at32_map_usart(3, 2, ATMEL_USART_RTS | ATMEL_USART_CTS); | ||
274 | /* USART 3: /dev/ttyS2, RMT1:Wireless, w/ RTS/CTS */ | ||
275 | at32_add_device_usart(1); | ||
276 | at32_add_device_usart(2); | ||
277 | |||
278 | /* Select GPIO Key pins */ | ||
279 | at32_select_gpio( PIN_PWR_SW_N, AT32_GPIOF_DEGLITCH); | ||
280 | at32_select_gpio( PIN_PB_LEFT, AT32_GPIOF_DEGLITCH); | ||
281 | at32_select_gpio( PIN_PB_RIGHT, AT32_GPIOF_DEGLITCH); | ||
282 | platform_device_register(&rmt_gpio_keys); | ||
283 | |||
284 | #ifdef CONFIG_BOARD_MRMT_RTC_I2C | ||
285 | i2c_register_board_info(0, &mrmt1_i2c_rtc, 1); | ||
286 | #endif | ||
287 | |||
288 | #ifndef CONFIG_BOARD_MRMT_LCD_DISABLE | ||
289 | /* User "alternate" LCDC inferface on Port E & D */ | ||
290 | /* NB: exclude LCDC_CC pin, as NGW100 reserves it for other use */ | ||
291 | at32_add_device_lcdc(0, &rmt_lcdc_data, | ||
292 | fbmem_start, fbmem_size, | ||
293 | (ATMEL_LCDC_ALT_24BIT | ATMEL_LCDC_PE_DVAL ) ); | ||
294 | #endif | ||
295 | |||
296 | #ifdef CONFIG_BOARD_MRMT_AC97 | ||
297 | at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); | ||
298 | #endif | ||
299 | |||
300 | #ifdef CONFIG_BOARD_MRMT_ADS7846_TS | ||
301 | /* Select the Touchscreen interrupt pin mode */ | ||
302 | at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), | ||
303 | GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); | ||
304 | set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING ); | ||
305 | spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); | ||
306 | #endif | ||
307 | |||
308 | #ifdef CONFIG_BOARD_MRMT_UCB1400_TS | ||
309 | /* Select the Touchscreen interrupt pin mode */ | ||
310 | at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), | ||
311 | GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); | ||
312 | platform_device_register(&rmt_ts_device); | ||
313 | #endif | ||
314 | |||
315 | at32_select_gpio( PIN_LCD_DISP, AT32_GPIOF_OUTPUT ); | ||
316 | gpio_request( PIN_LCD_DISP, "LCD_DISP" ); | ||
317 | gpio_direction_output( PIN_LCD_DISP, 0 ); /* LCD DISP */ | ||
318 | #ifdef CONFIG_BOARD_MRMT_LCD_DISABLE | ||
319 | /* Keep Backlight and DISP off */ | ||
320 | at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); | ||
321 | gpio_request( PIN_LCD_BL, "LCD_BL" ); | ||
322 | gpio_direction_output( PIN_LCD_BL, 0 ); /* Backlight */ | ||
323 | #else | ||
324 | gpio_set_value( PIN_LCD_DISP, 1 ); /* DISP asserted first */ | ||
325 | #ifdef CONFIG_BOARD_MRMT_BL_PWM | ||
326 | /* Use PWM for Backlight controls */ | ||
327 | at32_add_device_pwm(1 << PWM_CH_BL); | ||
328 | platform_device_register(&rmt_pwm_led_dev); | ||
329 | #else | ||
330 | /* Backlight always on */ | ||
331 | udelay( 1 ); | ||
332 | at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); | ||
333 | gpio_request( PIN_LCD_BL, "LCD_BL" ); | ||
334 | gpio_direction_output( PIN_LCD_BL, 1 ); | ||
335 | #endif | ||
336 | #endif | ||
337 | |||
338 | /* Make sure BT and Zigbee modules in reset */ | ||
339 | at32_select_gpio( PIN_BT_RST, AT32_GPIOF_OUTPUT ); | ||
340 | gpio_request( PIN_BT_RST, "BT_RST" ); | ||
341 | gpio_direction_output( PIN_BT_RST, 1 ); | ||
342 | /* BT Module in Reset */ | ||
343 | |||
344 | at32_select_gpio( PIN_ZB_RST_N, AT32_GPIOF_OUTPUT ); | ||
345 | gpio_request( PIN_ZB_RST_N, "ZB_RST_N" ); | ||
346 | gpio_direction_output( PIN_ZB_RST_N, 0 ); | ||
347 | /* XBee Module in Reset */ | ||
348 | |||
349 | #ifdef CONFIG_BOARD_MRMT_WIRELESS_ZB | ||
350 | udelay( 1000 ); | ||
351 | /* Unreset the XBee Module */ | ||
352 | gpio_set_value( PIN_ZB_RST_N, 1 ); | ||
353 | #endif | ||
354 | #ifdef CONFIG_BOARD_MRMT_WIRELESS_BT | ||
355 | udelay( 1000 ); | ||
356 | /* Unreset the BT Module */ | ||
357 | gpio_set_value( PIN_BT_RST, 0 ); | ||
358 | #endif | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | arch_initcall(mrmt1_init); | ||
363 | |||
364 | static int __init mrmt1_early_init(void) | ||
365 | { | ||
366 | /* To maintain power-on signal in case boot loader did not already */ | ||
367 | at32_select_gpio( PIN_PWR_ON, AT32_GPIOF_OUTPUT ); | ||
368 | gpio_request( PIN_PWR_ON, "PIN_PWR_ON" ); | ||
369 | gpio_direction_output( PIN_PWR_ON, 1 ); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | core_initcall(mrmt1_early_init); | ||
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index 5b022aad4bd9..bc299fbbeb4e 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c | |||
@@ -56,8 +56,13 @@ static struct spi_board_info spi0_board_info[] __initdata = { | |||
56 | static struct mci_platform_data __initdata mci0_data = { | 56 | static struct mci_platform_data __initdata mci0_data = { |
57 | .slot[0] = { | 57 | .slot[0] = { |
58 | .bus_width = 4, | 58 | .bus_width = 4, |
59 | #if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1) | ||
60 | .detect_pin = GPIO_PIN_NONE, | ||
61 | .wp_pin = GPIO_PIN_NONE, | ||
62 | #else | ||
59 | .detect_pin = GPIO_PIN_PC(25), | 63 | .detect_pin = GPIO_PIN_PC(25), |
60 | .wp_pin = GPIO_PIN_PE(0), | 64 | .wp_pin = GPIO_PIN_PE(0), |
65 | #endif | ||
61 | }, | 66 | }, |
62 | }; | 67 | }; |
63 | 68 | ||
diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c index 20b300cf105a..623b077594fc 100644 --- a/arch/avr32/boards/merisc/setup.c +++ b/arch/avr32/boards/merisc/setup.c | |||
@@ -94,9 +94,10 @@ static struct spi_board_info __initdata spi0_board_info[] = { | |||
94 | 94 | ||
95 | static struct mci_platform_data __initdata mci0_data = { | 95 | static struct mci_platform_data __initdata mci0_data = { |
96 | .slot[0] = { | 96 | .slot[0] = { |
97 | .bus_width = 4, | 97 | .bus_width = 4, |
98 | .detect_pin = GPIO_PIN_PE(19), | 98 | .detect_pin = GPIO_PIN_PE(19), |
99 | .wp_pin = GPIO_PIN_PE(20), | 99 | .wp_pin = GPIO_PIN_PE(20), |
100 | .detect_is_active_high = true, | ||
100 | }, | 101 | }, |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c index c1b2175b4fea..523d8e183bef 100644 --- a/arch/avr32/boards/mimc200/setup.c +++ b/arch/avr32/boards/mimc200/setup.c | |||
@@ -43,16 +43,16 @@ unsigned long at32_board_osc_rates[3] = { | |||
43 | /* Initialized by bootloader-specific startup code. */ | 43 | /* Initialized by bootloader-specific startup code. */ |
44 | struct tag *bootloader_tags __initdata; | 44 | struct tag *bootloader_tags __initdata; |
45 | 45 | ||
46 | static struct fb_videomode __initdata tx14d14_modes[] = { | 46 | static struct fb_videomode __initdata pt0434827_modes[] = { |
47 | { | 47 | { |
48 | .name = "640x480 @ 60", | 48 | .name = "480x272 @ 72", |
49 | .refresh = 60, | 49 | .refresh = 72, |
50 | .xres = 640, .yres = 480, | 50 | .xres = 480, .yres = 272, |
51 | .pixclock = KHZ2PICOS(11666), | 51 | .pixclock = KHZ2PICOS(10000), |
52 | 52 | ||
53 | .left_margin = 80, .right_margin = 1, | 53 | .left_margin = 1, .right_margin = 1, |
54 | .upper_margin = 13, .lower_margin = 2, | 54 | .upper_margin = 12, .lower_margin = 1, |
55 | .hsync_len = 64, .vsync_len = 1, | 55 | .hsync_len = 42, .vsync_len = 1, |
56 | 56 | ||
57 | .sync = 0, | 57 | .sync = 0, |
58 | .vmode = FB_VMODE_NONINTERLACED, | 58 | .vmode = FB_VMODE_NONINTERLACED, |
@@ -60,14 +60,14 @@ static struct fb_videomode __initdata tx14d14_modes[] = { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | static struct fb_monspecs __initdata mimc200_default_monspecs = { | 62 | static struct fb_monspecs __initdata mimc200_default_monspecs = { |
63 | .manufacturer = "HIT", | 63 | .manufacturer = "PT", |
64 | .monitor = "TX14D14VM1BAB", | 64 | .monitor = "PT0434827-A401", |
65 | .modedb = tx14d14_modes, | 65 | .modedb = pt0434827_modes, |
66 | .modedb_len = ARRAY_SIZE(tx14d14_modes), | 66 | .modedb_len = ARRAY_SIZE(pt0434827_modes), |
67 | .hfmin = 14820, | 67 | .hfmin = 14820, |
68 | .hfmax = 22230, | 68 | .hfmax = 22230, |
69 | .vfmin = 60, | 69 | .vfmin = 60, |
70 | .vfmax = 73.3, | 70 | .vfmax = 85, |
71 | .dclkmax = 25200000, | 71 | .dclkmax = 25200000, |
72 | }; | 72 | }; |
73 | 73 | ||
@@ -228,7 +228,8 @@ static int __init mimc200_init(void) | |||
228 | i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info)); | 228 | i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info)); |
229 | 229 | ||
230 | at32_add_device_lcdc(0, &mimc200_lcdc_data, | 230 | at32_add_device_lcdc(0, &mimc200_lcdc_data, |
231 | fbmem_start, fbmem_size, 1); | 231 | fbmem_start, fbmem_size, |
232 | ATMEL_LCDC_CONTROL | ATMEL_LCDC_ALT_CONTROL | ATMEL_LCDC_ALT_24B_DATA); | ||
232 | 233 | ||
233 | return 0; | 234 | return 0; |
234 | } | 235 | } |
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig new file mode 100644 index 000000000000..17b030777d36 --- /dev/null +++ b/arch/avr32/configs/atngw100_mrmt_defconfig | |||
@@ -0,0 +1,1363 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.30-rc1 | ||
4 | # Wed Jun 3 00:24:53 2009 | ||
5 | # | ||
6 | CONFIG_AVR32=y | ||
7 | CONFIG_GENERIC_GPIO=y | ||
8 | CONFIG_GENERIC_HARDIRQS=y | ||
9 | CONFIG_STACKTRACE_SUPPORT=y | ||
10 | CONFIG_LOCKDEP_SUPPORT=y | ||
11 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
12 | CONFIG_HARDIRQS_SW_RESEND=y | ||
13 | CONFIG_GENERIC_IRQ_PROBE=y | ||
14 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
15 | CONFIG_GENERIC_TIME=y | ||
16 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
17 | # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set | ||
18 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
19 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
20 | CONFIG_GENERIC_HWEIGHT=y | ||
21 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
22 | CONFIG_GENERIC_BUG=y | ||
23 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
24 | |||
25 | # | ||
26 | # General setup | ||
27 | # | ||
28 | CONFIG_EXPERIMENTAL=y | ||
29 | CONFIG_BROKEN_ON_SMP=y | ||
30 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
31 | CONFIG_LOCALVERSION="" | ||
32 | # CONFIG_LOCALVERSION_AUTO is not set | ||
33 | CONFIG_SWAP=y | ||
34 | CONFIG_SYSVIPC=y | ||
35 | CONFIG_SYSVIPC_SYSCTL=y | ||
36 | CONFIG_POSIX_MQUEUE=y | ||
37 | CONFIG_POSIX_MQUEUE_SYSCTL=y | ||
38 | CONFIG_BSD_PROCESS_ACCT=y | ||
39 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
40 | # CONFIG_TASKSTATS is not set | ||
41 | # CONFIG_AUDIT is not set | ||
42 | |||
43 | # | ||
44 | # RCU Subsystem | ||
45 | # | ||
46 | CONFIG_CLASSIC_RCU=y | ||
47 | # CONFIG_TREE_RCU is not set | ||
48 | # CONFIG_PREEMPT_RCU is not set | ||
49 | # CONFIG_TREE_RCU_TRACE is not set | ||
50 | # CONFIG_PREEMPT_RCU_TRACE is not set | ||
51 | # CONFIG_IKCONFIG is not set | ||
52 | CONFIG_LOG_BUF_SHIFT=14 | ||
53 | CONFIG_GROUP_SCHED=y | ||
54 | CONFIG_FAIR_GROUP_SCHED=y | ||
55 | # CONFIG_RT_GROUP_SCHED is not set | ||
56 | CONFIG_USER_SCHED=y | ||
57 | # CONFIG_CGROUP_SCHED is not set | ||
58 | # CONFIG_CGROUPS is not set | ||
59 | CONFIG_SYSFS_DEPRECATED=y | ||
60 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
61 | # CONFIG_RELAY is not set | ||
62 | # CONFIG_NAMESPACES is not set | ||
63 | CONFIG_BLK_DEV_INITRD=y | ||
64 | CONFIG_INITRAMFS_SOURCE="" | ||
65 | CONFIG_RD_GZIP=y | ||
66 | # CONFIG_RD_BZIP2 is not set | ||
67 | # CONFIG_RD_LZMA is not set | ||
68 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
69 | CONFIG_SYSCTL=y | ||
70 | CONFIG_ANON_INODES=y | ||
71 | CONFIG_EMBEDDED=y | ||
72 | # CONFIG_SYSCTL_SYSCALL is not set | ||
73 | CONFIG_KALLSYMS=y | ||
74 | # CONFIG_KALLSYMS_ALL is not set | ||
75 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
76 | CONFIG_HOTPLUG=y | ||
77 | CONFIG_PRINTK=y | ||
78 | CONFIG_BUG=y | ||
79 | CONFIG_ELF_CORE=y | ||
80 | # CONFIG_BASE_FULL is not set | ||
81 | CONFIG_FUTEX=y | ||
82 | CONFIG_EPOLL=y | ||
83 | CONFIG_SIGNALFD=y | ||
84 | CONFIG_TIMERFD=y | ||
85 | CONFIG_EVENTFD=y | ||
86 | CONFIG_SHMEM=y | ||
87 | CONFIG_AIO=y | ||
88 | CONFIG_VM_EVENT_COUNTERS=y | ||
89 | # CONFIG_SLUB_DEBUG is not set | ||
90 | CONFIG_COMPAT_BRK=y | ||
91 | # CONFIG_SLAB is not set | ||
92 | CONFIG_SLUB=y | ||
93 | # CONFIG_SLOB is not set | ||
94 | # CONFIG_PROFILING is not set | ||
95 | # CONFIG_MARKERS is not set | ||
96 | CONFIG_HAVE_OPROFILE=y | ||
97 | # CONFIG_KPROBES is not set | ||
98 | CONFIG_HAVE_KPROBES=y | ||
99 | CONFIG_HAVE_CLK=y | ||
100 | # CONFIG_SLOW_WORK is not set | ||
101 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
102 | CONFIG_RT_MUTEXES=y | ||
103 | CONFIG_BASE_SMALL=1 | ||
104 | CONFIG_MODULES=y | ||
105 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
106 | CONFIG_MODULE_UNLOAD=y | ||
107 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
108 | # CONFIG_MODVERSIONS is not set | ||
109 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
110 | CONFIG_BLOCK=y | ||
111 | # CONFIG_LBD is not set | ||
112 | # CONFIG_BLK_DEV_BSG is not set | ||
113 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
114 | |||
115 | # | ||
116 | # IO Schedulers | ||
117 | # | ||
118 | CONFIG_IOSCHED_NOOP=y | ||
119 | # CONFIG_IOSCHED_AS is not set | ||
120 | # CONFIG_IOSCHED_DEADLINE is not set | ||
121 | CONFIG_IOSCHED_CFQ=y | ||
122 | # CONFIG_DEFAULT_AS is not set | ||
123 | # CONFIG_DEFAULT_DEADLINE is not set | ||
124 | CONFIG_DEFAULT_CFQ=y | ||
125 | # CONFIG_DEFAULT_NOOP is not set | ||
126 | CONFIG_DEFAULT_IOSCHED="cfq" | ||
127 | # CONFIG_FREEZER is not set | ||
128 | |||
129 | # | ||
130 | # System Type and features | ||
131 | # | ||
132 | # CONFIG_NO_HZ is not set | ||
133 | # CONFIG_HIGH_RES_TIMERS is not set | ||
134 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
135 | CONFIG_SUBARCH_AVR32B=y | ||
136 | CONFIG_MMU=y | ||
137 | CONFIG_PERFORMANCE_COUNTERS=y | ||
138 | CONFIG_PLATFORM_AT32AP=y | ||
139 | CONFIG_CPU_AT32AP700X=y | ||
140 | CONFIG_CPU_AT32AP7000=y | ||
141 | # CONFIG_BOARD_ATSTK1000 is not set | ||
142 | CONFIG_BOARD_ATNGW100=y | ||
143 | # CONFIG_BOARD_HAMMERHEAD is not set | ||
144 | # CONFIG_BOARD_FAVR_32 is not set | ||
145 | # CONFIG_BOARD_MERISC is not set | ||
146 | # CONFIG_BOARD_MIMC200 is not set | ||
147 | # CONFIG_BOARD_ATNGW100_ADDON_NONE is not set | ||
148 | # CONFIG_BOARD_ATNGW100_EVKLCD10X is not set | ||
149 | CONFIG_BOARD_ATNGW100_MRMT=y | ||
150 | CONFIG_BOARD_MRMT_REV1=y | ||
151 | # CONFIG_BOARD_MRMT_REV2 is not set | ||
152 | CONFIG_BOARD_MRMT_AC97=y | ||
153 | # CONFIG_BOARD_MRMT_UCB1400_TS is not set | ||
154 | CONFIG_BOARD_MRMT_ADS7846_TS=y | ||
155 | # CONFIG_BOARD_MRMT_LCD_DISABLE is not set | ||
156 | CONFIG_BOARD_MRMT_LCD_LQ043T3DX0X=y | ||
157 | # CONFIG_BOARD_MRMT_LCD_KWH043GM08 is not set | ||
158 | CONFIG_BOARD_MRMT_BL_PWM=y | ||
159 | CONFIG_BOARD_MRMT_RTC_I2C=y | ||
160 | CONFIG_BOARD_MRMT_WIRELESS_ZB=y | ||
161 | # CONFIG_BOARD_MRMT_WIRELESS_BT is not set | ||
162 | # CONFIG_BOARD_MRMT_WIRELESS_NONE is not set | ||
163 | CONFIG_LOADER_U_BOOT=y | ||
164 | |||
165 | # | ||
166 | # Atmel AVR32 AP options | ||
167 | # | ||
168 | # CONFIG_AP700X_32_BIT_SMC is not set | ||
169 | CONFIG_AP700X_16_BIT_SMC=y | ||
170 | # CONFIG_AP700X_8_BIT_SMC is not set | ||
171 | CONFIG_LOAD_ADDRESS=0x10000000 | ||
172 | CONFIG_ENTRY_ADDRESS=0x90000000 | ||
173 | CONFIG_PHYS_OFFSET=0x10000000 | ||
174 | CONFIG_PREEMPT_NONE=y | ||
175 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
176 | # CONFIG_PREEMPT is not set | ||
177 | CONFIG_QUICKLIST=y | ||
178 | # CONFIG_HAVE_ARCH_BOOTMEM is not set | ||
179 | # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set | ||
180 | # CONFIG_NEED_NODE_MEMMAP_SIZE is not set | ||
181 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
182 | # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set | ||
183 | # CONFIG_ARCH_SPARSEMEM_ENABLE is not set | ||
184 | CONFIG_SELECT_MEMORY_MODEL=y | ||
185 | CONFIG_FLATMEM_MANUAL=y | ||
186 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
187 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
188 | CONFIG_FLATMEM=y | ||
189 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
190 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
191 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
192 | # CONFIG_PHYS_ADDR_T_64BIT is not set | ||
193 | CONFIG_ZONE_DMA_FLAG=0 | ||
194 | CONFIG_NR_QUICK=2 | ||
195 | CONFIG_VIRT_TO_BUS=y | ||
196 | CONFIG_UNEVICTABLE_LRU=y | ||
197 | CONFIG_HAVE_MLOCK=y | ||
198 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
199 | # CONFIG_OWNERSHIP_TRACE is not set | ||
200 | # CONFIG_NMI_DEBUGGING is not set | ||
201 | # CONFIG_HZ_100 is not set | ||
202 | CONFIG_HZ_250=y | ||
203 | # CONFIG_HZ_300 is not set | ||
204 | # CONFIG_HZ_1000 is not set | ||
205 | CONFIG_HZ=250 | ||
206 | # CONFIG_SCHED_HRTICK is not set | ||
207 | CONFIG_CMDLINE="" | ||
208 | |||
209 | # | ||
210 | # Power management options | ||
211 | # | ||
212 | CONFIG_PM=y | ||
213 | # CONFIG_PM_DEBUG is not set | ||
214 | # CONFIG_SUSPEND is not set | ||
215 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
216 | |||
217 | # | ||
218 | # CPU Frequency scaling | ||
219 | # | ||
220 | CONFIG_CPU_FREQ=y | ||
221 | CONFIG_CPU_FREQ_TABLE=y | ||
222 | # CONFIG_CPU_FREQ_DEBUG is not set | ||
223 | CONFIG_CPU_FREQ_STAT=y | ||
224 | # CONFIG_CPU_FREQ_STAT_DETAILS is not set | ||
225 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
226 | # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set | ||
227 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
228 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
229 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
230 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
231 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y | ||
232 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | ||
233 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y | ||
234 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
235 | CONFIG_CPU_FREQ_AT32AP=y | ||
236 | |||
237 | # | ||
238 | # Bus options | ||
239 | # | ||
240 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
241 | # CONFIG_PCCARD is not set | ||
242 | |||
243 | # | ||
244 | # Executable file formats | ||
245 | # | ||
246 | CONFIG_BINFMT_ELF=y | ||
247 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
248 | # CONFIG_HAVE_AOUT is not set | ||
249 | # CONFIG_BINFMT_MISC is not set | ||
250 | CONFIG_NET=y | ||
251 | |||
252 | # | ||
253 | # Networking options | ||
254 | # | ||
255 | CONFIG_PACKET=y | ||
256 | CONFIG_PACKET_MMAP=y | ||
257 | CONFIG_UNIX=y | ||
258 | # CONFIG_NET_KEY is not set | ||
259 | CONFIG_INET=y | ||
260 | # CONFIG_IP_MULTICAST is not set | ||
261 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
262 | CONFIG_IP_FIB_HASH=y | ||
263 | CONFIG_IP_PNP=y | ||
264 | CONFIG_IP_PNP_DHCP=y | ||
265 | # CONFIG_IP_PNP_BOOTP is not set | ||
266 | # CONFIG_IP_PNP_RARP is not set | ||
267 | # CONFIG_NET_IPIP is not set | ||
268 | # CONFIG_NET_IPGRE is not set | ||
269 | # CONFIG_ARPD is not set | ||
270 | CONFIG_SYN_COOKIES=y | ||
271 | # CONFIG_INET_AH is not set | ||
272 | # CONFIG_INET_ESP is not set | ||
273 | # CONFIG_INET_IPCOMP is not set | ||
274 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
275 | # CONFIG_INET_TUNNEL is not set | ||
276 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
277 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
278 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
279 | # CONFIG_INET_LRO is not set | ||
280 | CONFIG_INET_DIAG=y | ||
281 | CONFIG_INET_TCP_DIAG=y | ||
282 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
283 | CONFIG_TCP_CONG_CUBIC=y | ||
284 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
285 | # CONFIG_TCP_MD5SIG is not set | ||
286 | # CONFIG_IPV6 is not set | ||
287 | # CONFIG_NETWORK_SECMARK is not set | ||
288 | # CONFIG_NETFILTER is not set | ||
289 | # CONFIG_IP_DCCP is not set | ||
290 | # CONFIG_IP_SCTP is not set | ||
291 | # CONFIG_TIPC is not set | ||
292 | # CONFIG_ATM is not set | ||
293 | # CONFIG_BRIDGE is not set | ||
294 | # CONFIG_NET_DSA is not set | ||
295 | # CONFIG_VLAN_8021Q is not set | ||
296 | # CONFIG_DECNET is not set | ||
297 | # CONFIG_LLC2 is not set | ||
298 | # CONFIG_IPX is not set | ||
299 | # CONFIG_ATALK is not set | ||
300 | # CONFIG_X25 is not set | ||
301 | # CONFIG_LAPB is not set | ||
302 | # CONFIG_ECONET is not set | ||
303 | # CONFIG_WAN_ROUTER is not set | ||
304 | # CONFIG_PHONET is not set | ||
305 | # CONFIG_NET_SCHED is not set | ||
306 | # CONFIG_DCB is not set | ||
307 | |||
308 | # | ||
309 | # Network testing | ||
310 | # | ||
311 | # CONFIG_NET_PKTGEN is not set | ||
312 | # CONFIG_HAMRADIO is not set | ||
313 | # CONFIG_CAN is not set | ||
314 | # CONFIG_IRDA is not set | ||
315 | CONFIG_BT=m | ||
316 | CONFIG_BT_L2CAP=m | ||
317 | # CONFIG_BT_SCO is not set | ||
318 | CONFIG_BT_RFCOMM=m | ||
319 | CONFIG_BT_RFCOMM_TTY=y | ||
320 | # CONFIG_BT_BNEP is not set | ||
321 | CONFIG_BT_HIDP=m | ||
322 | |||
323 | # | ||
324 | # Bluetooth device drivers | ||
325 | # | ||
326 | # CONFIG_BT_HCIBTSDIO is not set | ||
327 | CONFIG_BT_HCIUART=m | ||
328 | CONFIG_BT_HCIUART_H4=y | ||
329 | CONFIG_BT_HCIUART_BCSP=y | ||
330 | # CONFIG_BT_HCIUART_LL is not set | ||
331 | # CONFIG_BT_HCIVHCI is not set | ||
332 | # CONFIG_AF_RXRPC is not set | ||
333 | # CONFIG_WIRELESS is not set | ||
334 | # CONFIG_WIMAX is not set | ||
335 | # CONFIG_RFKILL is not set | ||
336 | # CONFIG_NET_9P is not set | ||
337 | |||
338 | # | ||
339 | # Device Drivers | ||
340 | # | ||
341 | |||
342 | # | ||
343 | # Generic Driver Options | ||
344 | # | ||
345 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
346 | CONFIG_STANDALONE=y | ||
347 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | ||
348 | # CONFIG_FW_LOADER is not set | ||
349 | # CONFIG_DEBUG_DRIVER is not set | ||
350 | # CONFIG_DEBUG_DEVRES is not set | ||
351 | # CONFIG_SYS_HYPERVISOR is not set | ||
352 | # CONFIG_CONNECTOR is not set | ||
353 | CONFIG_MTD=y | ||
354 | # CONFIG_MTD_DEBUG is not set | ||
355 | # CONFIG_MTD_CONCAT is not set | ||
356 | CONFIG_MTD_PARTITIONS=y | ||
357 | # CONFIG_MTD_TESTS is not set | ||
358 | # CONFIG_MTD_REDBOOT_PARTS is not set | ||
359 | CONFIG_MTD_CMDLINE_PARTS=y | ||
360 | # CONFIG_MTD_AR7_PARTS is not set | ||
361 | |||
362 | # | ||
363 | # User Modules And Translation Layers | ||
364 | # | ||
365 | CONFIG_MTD_CHAR=y | ||
366 | CONFIG_MTD_BLKDEVS=y | ||
367 | CONFIG_MTD_BLOCK=y | ||
368 | # CONFIG_FTL is not set | ||
369 | # CONFIG_NFTL is not set | ||
370 | # CONFIG_INFTL is not set | ||
371 | # CONFIG_RFD_FTL is not set | ||
372 | # CONFIG_SSFDC is not set | ||
373 | # CONFIG_MTD_OOPS is not set | ||
374 | |||
375 | # | ||
376 | # RAM/ROM/Flash chip drivers | ||
377 | # | ||
378 | CONFIG_MTD_CFI=y | ||
379 | # CONFIG_MTD_JEDECPROBE is not set | ||
380 | CONFIG_MTD_GEN_PROBE=y | ||
381 | # CONFIG_MTD_CFI_ADV_OPTIONS is not set | ||
382 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
383 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
384 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
385 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
386 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
387 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
388 | CONFIG_MTD_CFI_I1=y | ||
389 | CONFIG_MTD_CFI_I2=y | ||
390 | # CONFIG_MTD_CFI_I4 is not set | ||
391 | # CONFIG_MTD_CFI_I8 is not set | ||
392 | # CONFIG_MTD_CFI_INTELEXT is not set | ||
393 | CONFIG_MTD_CFI_AMDSTD=y | ||
394 | # CONFIG_MTD_CFI_STAA is not set | ||
395 | CONFIG_MTD_CFI_UTIL=y | ||
396 | # CONFIG_MTD_RAM is not set | ||
397 | # CONFIG_MTD_ROM is not set | ||
398 | # CONFIG_MTD_ABSENT is not set | ||
399 | |||
400 | # | ||
401 | # Mapping drivers for chip access | ||
402 | # | ||
403 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
404 | CONFIG_MTD_PHYSMAP=y | ||
405 | # CONFIG_MTD_PHYSMAP_COMPAT is not set | ||
406 | # CONFIG_MTD_PLATRAM is not set | ||
407 | |||
408 | # | ||
409 | # Self-contained MTD device drivers | ||
410 | # | ||
411 | CONFIG_MTD_DATAFLASH=y | ||
412 | # CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set | ||
413 | # CONFIG_MTD_DATAFLASH_OTP is not set | ||
414 | # CONFIG_MTD_M25P80 is not set | ||
415 | # CONFIG_MTD_SLRAM is not set | ||
416 | # CONFIG_MTD_PHRAM is not set | ||
417 | # CONFIG_MTD_MTDRAM is not set | ||
418 | # CONFIG_MTD_BLOCK2MTD is not set | ||
419 | |||
420 | # | ||
421 | # Disk-On-Chip Device Drivers | ||
422 | # | ||
423 | # CONFIG_MTD_DOC2000 is not set | ||
424 | # CONFIG_MTD_DOC2001 is not set | ||
425 | # CONFIG_MTD_DOC2001PLUS is not set | ||
426 | # CONFIG_MTD_NAND is not set | ||
427 | # CONFIG_MTD_ONENAND is not set | ||
428 | |||
429 | # | ||
430 | # LPDDR flash memory drivers | ||
431 | # | ||
432 | # CONFIG_MTD_LPDDR is not set | ||
433 | |||
434 | # | ||
435 | # UBI - Unsorted block images | ||
436 | # | ||
437 | # CONFIG_MTD_UBI is not set | ||
438 | # CONFIG_PARPORT is not set | ||
439 | CONFIG_BLK_DEV=y | ||
440 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
441 | CONFIG_BLK_DEV_LOOP=y | ||
442 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
443 | # CONFIG_BLK_DEV_NBD is not set | ||
444 | # CONFIG_BLK_DEV_RAM is not set | ||
445 | # CONFIG_CDROM_PKTCDVD is not set | ||
446 | # CONFIG_ATA_OVER_ETH is not set | ||
447 | CONFIG_MISC_DEVICES=y | ||
448 | CONFIG_ATMEL_PWM=y | ||
449 | # CONFIG_ATMEL_TCLIB is not set | ||
450 | # CONFIG_ICS932S401 is not set | ||
451 | # CONFIG_ATMEL_SSC is not set | ||
452 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
453 | # CONFIG_ISL29003 is not set | ||
454 | # CONFIG_C2PORT is not set | ||
455 | |||
456 | # | ||
457 | # EEPROM support | ||
458 | # | ||
459 | # CONFIG_EEPROM_AT24 is not set | ||
460 | # CONFIG_EEPROM_AT25 is not set | ||
461 | # CONFIG_EEPROM_LEGACY is not set | ||
462 | # CONFIG_EEPROM_93CX6 is not set | ||
463 | |||
464 | # | ||
465 | # SCSI device support | ||
466 | # | ||
467 | # CONFIG_RAID_ATTRS is not set | ||
468 | # CONFIG_SCSI is not set | ||
469 | # CONFIG_SCSI_DMA is not set | ||
470 | # CONFIG_SCSI_NETLINK is not set | ||
471 | # CONFIG_ATA is not set | ||
472 | # CONFIG_MD is not set | ||
473 | CONFIG_NETDEVICES=y | ||
474 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
475 | # CONFIG_DUMMY is not set | ||
476 | # CONFIG_BONDING is not set | ||
477 | # CONFIG_MACVLAN is not set | ||
478 | # CONFIG_EQUALIZER is not set | ||
479 | # CONFIG_TUN is not set | ||
480 | # CONFIG_VETH is not set | ||
481 | CONFIG_PHYLIB=y | ||
482 | |||
483 | # | ||
484 | # MII PHY device drivers | ||
485 | # | ||
486 | # CONFIG_MARVELL_PHY is not set | ||
487 | # CONFIG_DAVICOM_PHY is not set | ||
488 | # CONFIG_QSEMI_PHY is not set | ||
489 | # CONFIG_LXT_PHY is not set | ||
490 | # CONFIG_CICADA_PHY is not set | ||
491 | # CONFIG_VITESSE_PHY is not set | ||
492 | # CONFIG_SMSC_PHY is not set | ||
493 | # CONFIG_BROADCOM_PHY is not set | ||
494 | # CONFIG_ICPLUS_PHY is not set | ||
495 | # CONFIG_REALTEK_PHY is not set | ||
496 | # CONFIG_NATIONAL_PHY is not set | ||
497 | # CONFIG_STE10XP is not set | ||
498 | # CONFIG_LSI_ET1011C_PHY is not set | ||
499 | # CONFIG_FIXED_PHY is not set | ||
500 | # CONFIG_MDIO_BITBANG is not set | ||
501 | CONFIG_NET_ETHERNET=y | ||
502 | # CONFIG_MII is not set | ||
503 | CONFIG_MACB=y | ||
504 | # CONFIG_ENC28J60 is not set | ||
505 | # CONFIG_ETHOC is not set | ||
506 | # CONFIG_DNET is not set | ||
507 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
508 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
509 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
510 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
511 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
512 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
513 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
514 | # CONFIG_B44 is not set | ||
515 | # CONFIG_NETDEV_1000 is not set | ||
516 | # CONFIG_NETDEV_10000 is not set | ||
517 | |||
518 | # | ||
519 | # Wireless LAN | ||
520 | # | ||
521 | # CONFIG_WLAN_PRE80211 is not set | ||
522 | # CONFIG_WLAN_80211 is not set | ||
523 | |||
524 | # | ||
525 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
526 | # | ||
527 | # CONFIG_WAN is not set | ||
528 | # CONFIG_PPP is not set | ||
529 | # CONFIG_SLIP is not set | ||
530 | # CONFIG_NETCONSOLE is not set | ||
531 | # CONFIG_NETPOLL is not set | ||
532 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
533 | # CONFIG_ISDN is not set | ||
534 | # CONFIG_PHONE is not set | ||
535 | |||
536 | # | ||
537 | # Input device support | ||
538 | # | ||
539 | CONFIG_INPUT=y | ||
540 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
541 | # CONFIG_INPUT_POLLDEV is not set | ||
542 | |||
543 | # | ||
544 | # Userland interfaces | ||
545 | # | ||
546 | # CONFIG_INPUT_MOUSEDEV is not set | ||
547 | # CONFIG_INPUT_JOYDEV is not set | ||
548 | CONFIG_INPUT_EVDEV=y | ||
549 | # CONFIG_INPUT_EVBUG is not set | ||
550 | |||
551 | # | ||
552 | # Input Device Drivers | ||
553 | # | ||
554 | CONFIG_INPUT_KEYBOARD=y | ||
555 | # CONFIG_KEYBOARD_ATKBD is not set | ||
556 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
557 | # CONFIG_KEYBOARD_LKKBD is not set | ||
558 | # CONFIG_KEYBOARD_XTKBD is not set | ||
559 | # CONFIG_KEYBOARD_NEWTON is not set | ||
560 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
561 | CONFIG_KEYBOARD_GPIO=y | ||
562 | # CONFIG_INPUT_MOUSE is not set | ||
563 | # CONFIG_INPUT_JOYSTICK is not set | ||
564 | # CONFIG_INPUT_TABLET is not set | ||
565 | CONFIG_INPUT_TOUCHSCREEN=y | ||
566 | CONFIG_TOUCHSCREEN_ADS7846=m | ||
567 | # CONFIG_TOUCHSCREEN_FUJITSU is not set | ||
568 | # CONFIG_TOUCHSCREEN_GUNZE is not set | ||
569 | # CONFIG_TOUCHSCREEN_ELO is not set | ||
570 | # CONFIG_TOUCHSCREEN_WACOM_W8001 is not set | ||
571 | # CONFIG_TOUCHSCREEN_MTOUCH is not set | ||
572 | # CONFIG_TOUCHSCREEN_INEXIO is not set | ||
573 | # CONFIG_TOUCHSCREEN_MK712 is not set | ||
574 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set | ||
575 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set | ||
576 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set | ||
577 | # CONFIG_TOUCHSCREEN_WM97XX is not set | ||
578 | # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set | ||
579 | # CONFIG_TOUCHSCREEN_TSC2007 is not set | ||
580 | # CONFIG_INPUT_MISC is not set | ||
581 | |||
582 | # | ||
583 | # Hardware I/O ports | ||
584 | # | ||
585 | # CONFIG_SERIO is not set | ||
586 | # CONFIG_GAMEPORT is not set | ||
587 | |||
588 | # | ||
589 | # Character devices | ||
590 | # | ||
591 | CONFIG_VT=y | ||
592 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
593 | CONFIG_VT_CONSOLE=y | ||
594 | CONFIG_HW_CONSOLE=y | ||
595 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
596 | CONFIG_DEVKMEM=y | ||
597 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
598 | |||
599 | # | ||
600 | # Serial drivers | ||
601 | # | ||
602 | # CONFIG_SERIAL_8250 is not set | ||
603 | |||
604 | # | ||
605 | # Non-8250 serial port support | ||
606 | # | ||
607 | CONFIG_SERIAL_ATMEL=y | ||
608 | CONFIG_SERIAL_ATMEL_CONSOLE=y | ||
609 | CONFIG_SERIAL_ATMEL_PDC=y | ||
610 | # CONFIG_SERIAL_ATMEL_TTYAT is not set | ||
611 | # CONFIG_SERIAL_MAX3100 is not set | ||
612 | CONFIG_SERIAL_CORE=y | ||
613 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
614 | CONFIG_UNIX98_PTYS=y | ||
615 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
616 | # CONFIG_LEGACY_PTYS is not set | ||
617 | # CONFIG_IPMI_HANDLER is not set | ||
618 | # CONFIG_HW_RANDOM is not set | ||
619 | # CONFIG_R3964 is not set | ||
620 | # CONFIG_RAW_DRIVER is not set | ||
621 | # CONFIG_TCG_TPM is not set | ||
622 | CONFIG_I2C=y | ||
623 | CONFIG_I2C_BOARDINFO=y | ||
624 | CONFIG_I2C_CHARDEV=y | ||
625 | CONFIG_I2C_HELPER_AUTO=y | ||
626 | CONFIG_I2C_ALGOBIT=y | ||
627 | |||
628 | # | ||
629 | # I2C Hardware Bus support | ||
630 | # | ||
631 | |||
632 | # | ||
633 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
634 | # | ||
635 | CONFIG_I2C_GPIO=y | ||
636 | # CONFIG_I2C_OCORES is not set | ||
637 | # CONFIG_I2C_SIMTEC is not set | ||
638 | |||
639 | # | ||
640 | # External I2C/SMBus adapter drivers | ||
641 | # | ||
642 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
643 | # CONFIG_I2C_TAOS_EVM is not set | ||
644 | |||
645 | # | ||
646 | # Other I2C/SMBus bus drivers | ||
647 | # | ||
648 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
649 | # CONFIG_I2C_STUB is not set | ||
650 | |||
651 | # | ||
652 | # Miscellaneous I2C Chip support | ||
653 | # | ||
654 | # CONFIG_DS1682 is not set | ||
655 | # CONFIG_SENSORS_PCF8574 is not set | ||
656 | # CONFIG_PCF8575 is not set | ||
657 | # CONFIG_SENSORS_PCA9539 is not set | ||
658 | # CONFIG_SENSORS_MAX6875 is not set | ||
659 | # CONFIG_SENSORS_TSL2550 is not set | ||
660 | # CONFIG_I2C_DEBUG_CORE is not set | ||
661 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
662 | # CONFIG_I2C_DEBUG_BUS is not set | ||
663 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
664 | CONFIG_SPI=y | ||
665 | # CONFIG_SPI_DEBUG is not set | ||
666 | CONFIG_SPI_MASTER=y | ||
667 | |||
668 | # | ||
669 | # SPI Master Controller Drivers | ||
670 | # | ||
671 | CONFIG_SPI_ATMEL=y | ||
672 | # CONFIG_SPI_BITBANG is not set | ||
673 | # CONFIG_SPI_GPIO is not set | ||
674 | |||
675 | # | ||
676 | # SPI Protocol Masters | ||
677 | # | ||
678 | CONFIG_SPI_SPIDEV=y | ||
679 | # CONFIG_SPI_TLE62X0 is not set | ||
680 | CONFIG_ARCH_REQUIRE_GPIOLIB=y | ||
681 | CONFIG_GPIOLIB=y | ||
682 | # CONFIG_DEBUG_GPIO is not set | ||
683 | # CONFIG_GPIO_SYSFS is not set | ||
684 | |||
685 | # | ||
686 | # Memory mapped GPIO expanders: | ||
687 | # | ||
688 | |||
689 | # | ||
690 | # I2C GPIO expanders: | ||
691 | # | ||
692 | # CONFIG_GPIO_MAX732X is not set | ||
693 | # CONFIG_GPIO_PCA953X is not set | ||
694 | # CONFIG_GPIO_PCF857X is not set | ||
695 | |||
696 | # | ||
697 | # PCI GPIO expanders: | ||
698 | # | ||
699 | |||
700 | # | ||
701 | # SPI GPIO expanders: | ||
702 | # | ||
703 | # CONFIG_GPIO_MAX7301 is not set | ||
704 | # CONFIG_GPIO_MCP23S08 is not set | ||
705 | # CONFIG_W1 is not set | ||
706 | # CONFIG_POWER_SUPPLY is not set | ||
707 | CONFIG_HWMON=y | ||
708 | # CONFIG_HWMON_VID is not set | ||
709 | # CONFIG_SENSORS_AD7414 is not set | ||
710 | # CONFIG_SENSORS_AD7418 is not set | ||
711 | # CONFIG_SENSORS_ADCXX is not set | ||
712 | # CONFIG_SENSORS_ADM1021 is not set | ||
713 | # CONFIG_SENSORS_ADM1025 is not set | ||
714 | # CONFIG_SENSORS_ADM1026 is not set | ||
715 | # CONFIG_SENSORS_ADM1029 is not set | ||
716 | # CONFIG_SENSORS_ADM1031 is not set | ||
717 | # CONFIG_SENSORS_ADM9240 is not set | ||
718 | # CONFIG_SENSORS_ADT7462 is not set | ||
719 | # CONFIG_SENSORS_ADT7470 is not set | ||
720 | # CONFIG_SENSORS_ADT7473 is not set | ||
721 | # CONFIG_SENSORS_ADT7475 is not set | ||
722 | # CONFIG_SENSORS_ATXP1 is not set | ||
723 | # CONFIG_SENSORS_DS1621 is not set | ||
724 | # CONFIG_SENSORS_F71805F is not set | ||
725 | # CONFIG_SENSORS_F71882FG is not set | ||
726 | # CONFIG_SENSORS_F75375S is not set | ||
727 | # CONFIG_SENSORS_G760A is not set | ||
728 | # CONFIG_SENSORS_GL518SM is not set | ||
729 | # CONFIG_SENSORS_GL520SM is not set | ||
730 | # CONFIG_SENSORS_IT87 is not set | ||
731 | # CONFIG_SENSORS_LM63 is not set | ||
732 | # CONFIG_SENSORS_LM70 is not set | ||
733 | # CONFIG_SENSORS_LM75 is not set | ||
734 | # CONFIG_SENSORS_LM77 is not set | ||
735 | # CONFIG_SENSORS_LM78 is not set | ||
736 | # CONFIG_SENSORS_LM80 is not set | ||
737 | # CONFIG_SENSORS_LM83 is not set | ||
738 | # CONFIG_SENSORS_LM85 is not set | ||
739 | # CONFIG_SENSORS_LM87 is not set | ||
740 | # CONFIG_SENSORS_LM90 is not set | ||
741 | # CONFIG_SENSORS_LM92 is not set | ||
742 | # CONFIG_SENSORS_LM93 is not set | ||
743 | # CONFIG_SENSORS_LTC4215 is not set | ||
744 | # CONFIG_SENSORS_LTC4245 is not set | ||
745 | # CONFIG_SENSORS_LM95241 is not set | ||
746 | # CONFIG_SENSORS_MAX1111 is not set | ||
747 | # CONFIG_SENSORS_MAX1619 is not set | ||
748 | # CONFIG_SENSORS_MAX6650 is not set | ||
749 | # CONFIG_SENSORS_PC87360 is not set | ||
750 | # CONFIG_SENSORS_PC87427 is not set | ||
751 | # CONFIG_SENSORS_PCF8591 is not set | ||
752 | # CONFIG_SENSORS_DME1737 is not set | ||
753 | # CONFIG_SENSORS_SMSC47M1 is not set | ||
754 | # CONFIG_SENSORS_SMSC47M192 is not set | ||
755 | # CONFIG_SENSORS_SMSC47B397 is not set | ||
756 | # CONFIG_SENSORS_ADS7828 is not set | ||
757 | # CONFIG_SENSORS_THMC50 is not set | ||
758 | # CONFIG_SENSORS_VT1211 is not set | ||
759 | # CONFIG_SENSORS_W83781D is not set | ||
760 | # CONFIG_SENSORS_W83791D is not set | ||
761 | # CONFIG_SENSORS_W83792D is not set | ||
762 | # CONFIG_SENSORS_W83793 is not set | ||
763 | # CONFIG_SENSORS_W83L785TS is not set | ||
764 | # CONFIG_SENSORS_W83L786NG is not set | ||
765 | # CONFIG_SENSORS_W83627HF is not set | ||
766 | # CONFIG_SENSORS_W83627EHF is not set | ||
767 | # CONFIG_SENSORS_LIS3_SPI is not set | ||
768 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
769 | # CONFIG_THERMAL is not set | ||
770 | # CONFIG_THERMAL_HWMON is not set | ||
771 | CONFIG_WATCHDOG=y | ||
772 | # CONFIG_WATCHDOG_NOWAYOUT is not set | ||
773 | |||
774 | # | ||
775 | # Watchdog Device Drivers | ||
776 | # | ||
777 | # CONFIG_SOFT_WATCHDOG is not set | ||
778 | CONFIG_AT32AP700X_WDT=y | ||
779 | CONFIG_SSB_POSSIBLE=y | ||
780 | |||
781 | # | ||
782 | # Sonics Silicon Backplane | ||
783 | # | ||
784 | # CONFIG_SSB is not set | ||
785 | |||
786 | # | ||
787 | # Multifunction device drivers | ||
788 | # | ||
789 | # CONFIG_MFD_CORE is not set | ||
790 | # CONFIG_MFD_SM501 is not set | ||
791 | # CONFIG_HTC_PASIC3 is not set | ||
792 | # CONFIG_UCB1400_CORE is not set | ||
793 | # CONFIG_TPS65010 is not set | ||
794 | # CONFIG_TWL4030_CORE is not set | ||
795 | # CONFIG_MFD_TMIO is not set | ||
796 | # CONFIG_PMIC_DA903X is not set | ||
797 | # CONFIG_MFD_WM8400 is not set | ||
798 | # CONFIG_MFD_WM8350_I2C is not set | ||
799 | # CONFIG_MFD_PCF50633 is not set | ||
800 | # CONFIG_REGULATOR is not set | ||
801 | |||
802 | # | ||
803 | # Multimedia devices | ||
804 | # | ||
805 | |||
806 | # | ||
807 | # Multimedia core support | ||
808 | # | ||
809 | # CONFIG_VIDEO_DEV is not set | ||
810 | # CONFIG_DVB_CORE is not set | ||
811 | # CONFIG_VIDEO_MEDIA is not set | ||
812 | |||
813 | # | ||
814 | # Multimedia drivers | ||
815 | # | ||
816 | # CONFIG_DAB is not set | ||
817 | |||
818 | # | ||
819 | # Graphics support | ||
820 | # | ||
821 | # CONFIG_VGASTATE is not set | ||
822 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
823 | CONFIG_FB=y | ||
824 | # CONFIG_FIRMWARE_EDID is not set | ||
825 | # CONFIG_FB_DDC is not set | ||
826 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | ||
827 | CONFIG_FB_CFB_FILLRECT=y | ||
828 | CONFIG_FB_CFB_COPYAREA=y | ||
829 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
830 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
831 | # CONFIG_FB_SYS_FILLRECT is not set | ||
832 | # CONFIG_FB_SYS_COPYAREA is not set | ||
833 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
834 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
835 | # CONFIG_FB_SYS_FOPS is not set | ||
836 | # CONFIG_FB_SVGALIB is not set | ||
837 | # CONFIG_FB_MACMODES is not set | ||
838 | # CONFIG_FB_BACKLIGHT is not set | ||
839 | # CONFIG_FB_MODE_HELPERS is not set | ||
840 | # CONFIG_FB_TILEBLITTING is not set | ||
841 | |||
842 | # | ||
843 | # Frame buffer hardware drivers | ||
844 | # | ||
845 | # CONFIG_FB_S1D13XXX is not set | ||
846 | CONFIG_FB_ATMEL=y | ||
847 | # CONFIG_FB_VIRTUAL is not set | ||
848 | # CONFIG_FB_METRONOME is not set | ||
849 | # CONFIG_FB_MB862XX is not set | ||
850 | # CONFIG_FB_BROADSHEET is not set | ||
851 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | ||
852 | CONFIG_LCD_CLASS_DEVICE=y | ||
853 | # CONFIG_LCD_LTV350QV is not set | ||
854 | # CONFIG_LCD_ILI9320 is not set | ||
855 | # CONFIG_LCD_TDO24M is not set | ||
856 | # CONFIG_LCD_VGG2432A4 is not set | ||
857 | # CONFIG_LCD_PLATFORM is not set | ||
858 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | ||
859 | # CONFIG_BACKLIGHT_ATMEL_LCDC is not set | ||
860 | # CONFIG_BACKLIGHT_ATMEL_PWM is not set | ||
861 | CONFIG_BACKLIGHT_GENERIC=y | ||
862 | |||
863 | # | ||
864 | # Display device support | ||
865 | # | ||
866 | # CONFIG_DISPLAY_SUPPORT is not set | ||
867 | |||
868 | # | ||
869 | # Console display driver support | ||
870 | # | ||
871 | CONFIG_DUMMY_CONSOLE=y | ||
872 | # CONFIG_FRAMEBUFFER_CONSOLE is not set | ||
873 | # CONFIG_LOGO is not set | ||
874 | CONFIG_SOUND=m | ||
875 | CONFIG_SOUND_OSS_CORE=y | ||
876 | CONFIG_SND=m | ||
877 | CONFIG_SND_TIMER=m | ||
878 | CONFIG_SND_PCM=m | ||
879 | # CONFIG_SND_SEQUENCER is not set | ||
880 | CONFIG_SND_OSSEMUL=y | ||
881 | CONFIG_SND_MIXER_OSS=m | ||
882 | CONFIG_SND_PCM_OSS=m | ||
883 | CONFIG_SND_PCM_OSS_PLUGINS=y | ||
884 | # CONFIG_SND_DYNAMIC_MINORS is not set | ||
885 | # CONFIG_SND_SUPPORT_OLD_API is not set | ||
886 | # CONFIG_SND_VERBOSE_PROCFS is not set | ||
887 | # CONFIG_SND_VERBOSE_PRINTK is not set | ||
888 | # CONFIG_SND_DEBUG is not set | ||
889 | CONFIG_SND_VMASTER=y | ||
890 | CONFIG_SND_AC97_CODEC=m | ||
891 | CONFIG_SND_DRIVERS=y | ||
892 | # CONFIG_SND_DUMMY is not set | ||
893 | # CONFIG_SND_MTPAV is not set | ||
894 | # CONFIG_SND_SERIAL_U16550 is not set | ||
895 | # CONFIG_SND_MPU401 is not set | ||
896 | # CONFIG_SND_AC97_POWER_SAVE is not set | ||
897 | |||
898 | # | ||
899 | # Atmel devices (AVR32 and AT91) | ||
900 | # | ||
901 | # CONFIG_SND_ATMEL_ABDAC is not set | ||
902 | CONFIG_SND_ATMEL_AC97C=m | ||
903 | # CONFIG_SND_SPI is not set | ||
904 | # CONFIG_SND_SOC is not set | ||
905 | # CONFIG_SOUND_PRIME is not set | ||
906 | CONFIG_AC97_BUS=m | ||
907 | CONFIG_HID_SUPPORT=y | ||
908 | CONFIG_HID=y | ||
909 | # CONFIG_HID_DEBUG is not set | ||
910 | # CONFIG_HIDRAW is not set | ||
911 | # CONFIG_HID_PID is not set | ||
912 | |||
913 | # | ||
914 | # Special HID drivers | ||
915 | # | ||
916 | # CONFIG_HID_APPLE is not set | ||
917 | CONFIG_USB_SUPPORT=y | ||
918 | # CONFIG_USB_ARCH_HAS_HCD is not set | ||
919 | # CONFIG_USB_ARCH_HAS_OHCI is not set | ||
920 | # CONFIG_USB_ARCH_HAS_EHCI is not set | ||
921 | # CONFIG_USB_OTG_WHITELIST is not set | ||
922 | # CONFIG_USB_OTG_BLACKLIST_HUB is not set | ||
923 | # CONFIG_USB_MUSB_HDRC is not set | ||
924 | # CONFIG_USB_GADGET_MUSB_HDRC is not set | ||
925 | |||
926 | # | ||
927 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may | ||
928 | # | ||
929 | CONFIG_USB_GADGET=m | ||
930 | # CONFIG_USB_GADGET_DEBUG is not set | ||
931 | CONFIG_USB_GADGET_DEBUG_FILES=y | ||
932 | # CONFIG_USB_GADGET_DEBUG_FS is not set | ||
933 | CONFIG_USB_GADGET_VBUS_DRAW=2 | ||
934 | CONFIG_USB_GADGET_SELECTED=y | ||
935 | # CONFIG_USB_GADGET_AT91 is not set | ||
936 | CONFIG_USB_GADGET_ATMEL_USBA=y | ||
937 | CONFIG_USB_ATMEL_USBA=m | ||
938 | # CONFIG_USB_GADGET_FSL_USB2 is not set | ||
939 | # CONFIG_USB_GADGET_LH7A40X is not set | ||
940 | # CONFIG_USB_GADGET_OMAP is not set | ||
941 | # CONFIG_USB_GADGET_PXA25X is not set | ||
942 | # CONFIG_USB_GADGET_PXA27X is not set | ||
943 | # CONFIG_USB_GADGET_S3C2410 is not set | ||
944 | # CONFIG_USB_GADGET_IMX is not set | ||
945 | # CONFIG_USB_GADGET_M66592 is not set | ||
946 | # CONFIG_USB_GADGET_AMD5536UDC is not set | ||
947 | # CONFIG_USB_GADGET_FSL_QE is not set | ||
948 | # CONFIG_USB_GADGET_CI13XXX is not set | ||
949 | # CONFIG_USB_GADGET_NET2280 is not set | ||
950 | # CONFIG_USB_GADGET_GOKU is not set | ||
951 | # CONFIG_USB_GADGET_DUMMY_HCD is not set | ||
952 | CONFIG_USB_GADGET_DUALSPEED=y | ||
953 | # CONFIG_USB_ZERO is not set | ||
954 | # CONFIG_USB_ETH is not set | ||
955 | # CONFIG_USB_GADGETFS is not set | ||
956 | CONFIG_USB_FILE_STORAGE=m | ||
957 | # CONFIG_USB_FILE_STORAGE_TEST is not set | ||
958 | CONFIG_USB_G_SERIAL=m | ||
959 | # CONFIG_USB_MIDI_GADGET is not set | ||
960 | # CONFIG_USB_G_PRINTER is not set | ||
961 | # CONFIG_USB_CDC_COMPOSITE is not set | ||
962 | |||
963 | # | ||
964 | # OTG and related infrastructure | ||
965 | # | ||
966 | # CONFIG_USB_GPIO_VBUS is not set | ||
967 | # CONFIG_NOP_USB_XCEIV is not set | ||
968 | CONFIG_MMC=y | ||
969 | # CONFIG_MMC_DEBUG is not set | ||
970 | # CONFIG_MMC_UNSAFE_RESUME is not set | ||
971 | |||
972 | # | ||
973 | # MMC/SD/SDIO Card Drivers | ||
974 | # | ||
975 | CONFIG_MMC_BLOCK=y | ||
976 | CONFIG_MMC_BLOCK_BOUNCE=y | ||
977 | # CONFIG_SDIO_UART is not set | ||
978 | # CONFIG_MMC_TEST is not set | ||
979 | |||
980 | # | ||
981 | # MMC/SD/SDIO Host Controller Drivers | ||
982 | # | ||
983 | # CONFIG_MMC_SDHCI is not set | ||
984 | CONFIG_MMC_ATMELMCI=y | ||
985 | # CONFIG_MMC_ATMELMCI_DMA is not set | ||
986 | # CONFIG_MMC_SPI is not set | ||
987 | # CONFIG_MEMSTICK is not set | ||
988 | CONFIG_NEW_LEDS=y | ||
989 | CONFIG_LEDS_CLASS=y | ||
990 | |||
991 | # | ||
992 | # LED drivers | ||
993 | # | ||
994 | CONFIG_LEDS_ATMEL_PWM=y | ||
995 | # CONFIG_LEDS_PCA9532 is not set | ||
996 | CONFIG_LEDS_GPIO=y | ||
997 | CONFIG_LEDS_GPIO_PLATFORM=y | ||
998 | # CONFIG_LEDS_LP5521 is not set | ||
999 | # CONFIG_LEDS_PCA955X is not set | ||
1000 | # CONFIG_LEDS_DAC124S085 is not set | ||
1001 | # CONFIG_LEDS_BD2802 is not set | ||
1002 | |||
1003 | # | ||
1004 | # LED Triggers | ||
1005 | # | ||
1006 | CONFIG_LEDS_TRIGGERS=y | ||
1007 | CONFIG_LEDS_TRIGGER_TIMER=y | ||
1008 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
1009 | # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set | ||
1010 | # CONFIG_LEDS_TRIGGER_GPIO is not set | ||
1011 | # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set | ||
1012 | |||
1013 | # | ||
1014 | # iptables trigger is under Netfilter config (LED target) | ||
1015 | # | ||
1016 | # CONFIG_ACCESSIBILITY is not set | ||
1017 | CONFIG_RTC_LIB=m | ||
1018 | CONFIG_RTC_CLASS=m | ||
1019 | |||
1020 | # | ||
1021 | # RTC interfaces | ||
1022 | # | ||
1023 | CONFIG_RTC_INTF_SYSFS=y | ||
1024 | CONFIG_RTC_INTF_PROC=y | ||
1025 | CONFIG_RTC_INTF_DEV=y | ||
1026 | # CONFIG_RTC_INTF_DEV_UIE_EMUL is not set | ||
1027 | # CONFIG_RTC_DRV_TEST is not set | ||
1028 | |||
1029 | # | ||
1030 | # I2C RTC drivers | ||
1031 | # | ||
1032 | # CONFIG_RTC_DRV_DS1307 is not set | ||
1033 | # CONFIG_RTC_DRV_DS1374 is not set | ||
1034 | # CONFIG_RTC_DRV_DS1672 is not set | ||
1035 | # CONFIG_RTC_DRV_MAX6900 is not set | ||
1036 | # CONFIG_RTC_DRV_RS5C372 is not set | ||
1037 | # CONFIG_RTC_DRV_ISL1208 is not set | ||
1038 | # CONFIG_RTC_DRV_X1205 is not set | ||
1039 | # CONFIG_RTC_DRV_PCF8563 is not set | ||
1040 | # CONFIG_RTC_DRV_PCF8583 is not set | ||
1041 | # CONFIG_RTC_DRV_M41T80 is not set | ||
1042 | CONFIG_RTC_DRV_S35390A=m | ||
1043 | # CONFIG_RTC_DRV_FM3130 is not set | ||
1044 | # CONFIG_RTC_DRV_RX8581 is not set | ||
1045 | |||
1046 | # | ||
1047 | # SPI RTC drivers | ||
1048 | # | ||
1049 | # CONFIG_RTC_DRV_M41T94 is not set | ||
1050 | # CONFIG_RTC_DRV_DS1305 is not set | ||
1051 | # CONFIG_RTC_DRV_DS1390 is not set | ||
1052 | # CONFIG_RTC_DRV_MAX6902 is not set | ||
1053 | # CONFIG_RTC_DRV_R9701 is not set | ||
1054 | # CONFIG_RTC_DRV_RS5C348 is not set | ||
1055 | # CONFIG_RTC_DRV_DS3234 is not set | ||
1056 | |||
1057 | # | ||
1058 | # Platform RTC drivers | ||
1059 | # | ||
1060 | # CONFIG_RTC_DRV_DS1286 is not set | ||
1061 | # CONFIG_RTC_DRV_DS1511 is not set | ||
1062 | # CONFIG_RTC_DRV_DS1553 is not set | ||
1063 | # CONFIG_RTC_DRV_DS1742 is not set | ||
1064 | # CONFIG_RTC_DRV_STK17TA8 is not set | ||
1065 | # CONFIG_RTC_DRV_M48T86 is not set | ||
1066 | # CONFIG_RTC_DRV_M48T35 is not set | ||
1067 | # CONFIG_RTC_DRV_M48T59 is not set | ||
1068 | # CONFIG_RTC_DRV_BQ4802 is not set | ||
1069 | # CONFIG_RTC_DRV_V3020 is not set | ||
1070 | |||
1071 | # | ||
1072 | # on-CPU RTC drivers | ||
1073 | # | ||
1074 | CONFIG_RTC_DRV_AT32AP700X=m | ||
1075 | CONFIG_DMADEVICES=y | ||
1076 | |||
1077 | # | ||
1078 | # DMA Devices | ||
1079 | # | ||
1080 | CONFIG_DW_DMAC=y | ||
1081 | CONFIG_DMA_ENGINE=y | ||
1082 | |||
1083 | # | ||
1084 | # DMA Clients | ||
1085 | # | ||
1086 | # CONFIG_NET_DMA is not set | ||
1087 | # CONFIG_ASYNC_TX_DMA is not set | ||
1088 | # CONFIG_DMATEST is not set | ||
1089 | # CONFIG_AUXDISPLAY is not set | ||
1090 | CONFIG_UIO=y | ||
1091 | # CONFIG_UIO_PDRV is not set | ||
1092 | # CONFIG_UIO_PDRV_GENIRQ is not set | ||
1093 | # CONFIG_UIO_SMX is not set | ||
1094 | # CONFIG_UIO_SERCOS3 is not set | ||
1095 | # CONFIG_STAGING is not set | ||
1096 | |||
1097 | # | ||
1098 | # File systems | ||
1099 | # | ||
1100 | CONFIG_EXT2_FS=y | ||
1101 | CONFIG_EXT2_FS_XATTR=y | ||
1102 | # CONFIG_EXT2_FS_POSIX_ACL is not set | ||
1103 | # CONFIG_EXT2_FS_SECURITY is not set | ||
1104 | # CONFIG_EXT2_FS_XIP is not set | ||
1105 | CONFIG_EXT3_FS=y | ||
1106 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
1107 | CONFIG_EXT3_FS_XATTR=y | ||
1108 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
1109 | # CONFIG_EXT3_FS_SECURITY is not set | ||
1110 | # CONFIG_EXT4_FS is not set | ||
1111 | CONFIG_JBD=y | ||
1112 | # CONFIG_JBD_DEBUG is not set | ||
1113 | CONFIG_FS_MBCACHE=y | ||
1114 | # CONFIG_REISERFS_FS is not set | ||
1115 | # CONFIG_JFS_FS is not set | ||
1116 | # CONFIG_FS_POSIX_ACL is not set | ||
1117 | CONFIG_FILE_LOCKING=y | ||
1118 | # CONFIG_XFS_FS is not set | ||
1119 | # CONFIG_OCFS2_FS is not set | ||
1120 | # CONFIG_BTRFS_FS is not set | ||
1121 | # CONFIG_DNOTIFY is not set | ||
1122 | # CONFIG_INOTIFY is not set | ||
1123 | # CONFIG_QUOTA is not set | ||
1124 | # CONFIG_AUTOFS_FS is not set | ||
1125 | # CONFIG_AUTOFS4_FS is not set | ||
1126 | # CONFIG_FUSE_FS is not set | ||
1127 | |||
1128 | # | ||
1129 | # Caches | ||
1130 | # | ||
1131 | # CONFIG_FSCACHE is not set | ||
1132 | |||
1133 | # | ||
1134 | # CD-ROM/DVD Filesystems | ||
1135 | # | ||
1136 | # CONFIG_ISO9660_FS is not set | ||
1137 | # CONFIG_UDF_FS is not set | ||
1138 | |||
1139 | # | ||
1140 | # DOS/FAT/NT Filesystems | ||
1141 | # | ||
1142 | CONFIG_FAT_FS=y | ||
1143 | CONFIG_MSDOS_FS=y | ||
1144 | CONFIG_VFAT_FS=y | ||
1145 | CONFIG_FAT_DEFAULT_CODEPAGE=850 | ||
1146 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
1147 | CONFIG_NTFS_FS=m | ||
1148 | # CONFIG_NTFS_DEBUG is not set | ||
1149 | CONFIG_NTFS_RW=y | ||
1150 | |||
1151 | # | ||
1152 | # Pseudo filesystems | ||
1153 | # | ||
1154 | CONFIG_PROC_FS=y | ||
1155 | # CONFIG_PROC_KCORE is not set | ||
1156 | CONFIG_PROC_SYSCTL=y | ||
1157 | CONFIG_PROC_PAGE_MONITOR=y | ||
1158 | CONFIG_SYSFS=y | ||
1159 | CONFIG_TMPFS=y | ||
1160 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1161 | # CONFIG_HUGETLB_PAGE is not set | ||
1162 | CONFIG_CONFIGFS_FS=y | ||
1163 | CONFIG_MISC_FILESYSTEMS=y | ||
1164 | # CONFIG_ADFS_FS is not set | ||
1165 | # CONFIG_AFFS_FS is not set | ||
1166 | # CONFIG_HFS_FS is not set | ||
1167 | # CONFIG_HFSPLUS_FS is not set | ||
1168 | # CONFIG_BEFS_FS is not set | ||
1169 | # CONFIG_BFS_FS is not set | ||
1170 | # CONFIG_EFS_FS is not set | ||
1171 | CONFIG_JFFS2_FS=y | ||
1172 | CONFIG_JFFS2_FS_DEBUG=0 | ||
1173 | CONFIG_JFFS2_FS_WRITEBUFFER=y | ||
1174 | # CONFIG_JFFS2_FS_WBUF_VERIFY is not set | ||
1175 | # CONFIG_JFFS2_SUMMARY is not set | ||
1176 | # CONFIG_JFFS2_FS_XATTR is not set | ||
1177 | # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set | ||
1178 | CONFIG_JFFS2_ZLIB=y | ||
1179 | # CONFIG_JFFS2_LZO is not set | ||
1180 | CONFIG_JFFS2_RTIME=y | ||
1181 | # CONFIG_JFFS2_RUBIN is not set | ||
1182 | # CONFIG_CRAMFS is not set | ||
1183 | # CONFIG_SQUASHFS is not set | ||
1184 | # CONFIG_VXFS_FS is not set | ||
1185 | # CONFIG_MINIX_FS is not set | ||
1186 | # CONFIG_OMFS_FS is not set | ||
1187 | # CONFIG_HPFS_FS is not set | ||
1188 | # CONFIG_QNX4FS_FS is not set | ||
1189 | # CONFIG_ROMFS_FS is not set | ||
1190 | # CONFIG_SYSV_FS is not set | ||
1191 | # CONFIG_UFS_FS is not set | ||
1192 | # CONFIG_NILFS2_FS is not set | ||
1193 | CONFIG_NETWORK_FILESYSTEMS=y | ||
1194 | CONFIG_NFS_FS=y | ||
1195 | CONFIG_NFS_V3=y | ||
1196 | # CONFIG_NFS_V3_ACL is not set | ||
1197 | # CONFIG_NFS_V4 is not set | ||
1198 | CONFIG_ROOT_NFS=y | ||
1199 | # CONFIG_NFSD is not set | ||
1200 | CONFIG_LOCKD=y | ||
1201 | CONFIG_LOCKD_V4=y | ||
1202 | CONFIG_NFS_COMMON=y | ||
1203 | CONFIG_SUNRPC=y | ||
1204 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
1205 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1206 | CONFIG_SMB_FS=m | ||
1207 | CONFIG_SMB_NLS_DEFAULT=y | ||
1208 | CONFIG_SMB_NLS_REMOTE="cp437" | ||
1209 | CONFIG_CIFS=m | ||
1210 | CONFIG_CIFS_STATS=y | ||
1211 | # CONFIG_CIFS_STATS2 is not set | ||
1212 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
1213 | CONFIG_CIFS_XATTR=y | ||
1214 | CONFIG_CIFS_POSIX=y | ||
1215 | # CONFIG_CIFS_DEBUG2 is not set | ||
1216 | # CONFIG_CIFS_EXPERIMENTAL is not set | ||
1217 | # CONFIG_NCP_FS is not set | ||
1218 | # CONFIG_CODA_FS is not set | ||
1219 | # CONFIG_AFS_FS is not set | ||
1220 | |||
1221 | # | ||
1222 | # Partition Types | ||
1223 | # | ||
1224 | # CONFIG_PARTITION_ADVANCED is not set | ||
1225 | CONFIG_MSDOS_PARTITION=y | ||
1226 | CONFIG_NLS=y | ||
1227 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1228 | CONFIG_NLS_CODEPAGE_437=y | ||
1229 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1230 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1231 | CONFIG_NLS_CODEPAGE_850=y | ||
1232 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1233 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1234 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1235 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1236 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1237 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1238 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1239 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1240 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1241 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1242 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1243 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
1244 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1245 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1246 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1247 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1248 | # CONFIG_NLS_ISO8859_8 is not set | ||
1249 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1250 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1251 | # CONFIG_NLS_ASCII is not set | ||
1252 | CONFIG_NLS_ISO8859_1=y | ||
1253 | # CONFIG_NLS_ISO8859_2 is not set | ||
1254 | # CONFIG_NLS_ISO8859_3 is not set | ||
1255 | # CONFIG_NLS_ISO8859_4 is not set | ||
1256 | # CONFIG_NLS_ISO8859_5 is not set | ||
1257 | # CONFIG_NLS_ISO8859_6 is not set | ||
1258 | # CONFIG_NLS_ISO8859_7 is not set | ||
1259 | # CONFIG_NLS_ISO8859_9 is not set | ||
1260 | # CONFIG_NLS_ISO8859_13 is not set | ||
1261 | # CONFIG_NLS_ISO8859_14 is not set | ||
1262 | # CONFIG_NLS_ISO8859_15 is not set | ||
1263 | # CONFIG_NLS_KOI8_R is not set | ||
1264 | # CONFIG_NLS_KOI8_U is not set | ||
1265 | CONFIG_NLS_UTF8=y | ||
1266 | # CONFIG_DLM is not set | ||
1267 | |||
1268 | # | ||
1269 | # Kernel hacking | ||
1270 | # | ||
1271 | # CONFIG_PRINTK_TIME is not set | ||
1272 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1273 | CONFIG_ENABLE_MUST_CHECK=y | ||
1274 | CONFIG_FRAME_WARN=1024 | ||
1275 | CONFIG_MAGIC_SYSRQ=y | ||
1276 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1277 | CONFIG_DEBUG_FS=y | ||
1278 | # CONFIG_HEADERS_CHECK is not set | ||
1279 | CONFIG_DEBUG_KERNEL=y | ||
1280 | # CONFIG_DEBUG_SHIRQ is not set | ||
1281 | CONFIG_DETECT_SOFTLOCKUP=y | ||
1282 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
1283 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
1284 | CONFIG_DETECT_HUNG_TASK=y | ||
1285 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
1286 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
1287 | CONFIG_SCHED_DEBUG=y | ||
1288 | # CONFIG_SCHEDSTATS is not set | ||
1289 | # CONFIG_TIMER_STATS is not set | ||
1290 | # CONFIG_DEBUG_OBJECTS is not set | ||
1291 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
1292 | # CONFIG_RT_MUTEX_TESTER is not set | ||
1293 | # CONFIG_DEBUG_SPINLOCK is not set | ||
1294 | # CONFIG_DEBUG_MUTEXES is not set | ||
1295 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
1296 | # CONFIG_PROVE_LOCKING is not set | ||
1297 | # CONFIG_LOCK_STAT is not set | ||
1298 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
1299 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
1300 | # CONFIG_DEBUG_KOBJECT is not set | ||
1301 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1302 | # CONFIG_DEBUG_INFO is not set | ||
1303 | # CONFIG_DEBUG_VM is not set | ||
1304 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
1305 | # CONFIG_DEBUG_MEMORY_INIT is not set | ||
1306 | # CONFIG_DEBUG_LIST is not set | ||
1307 | # CONFIG_DEBUG_SG is not set | ||
1308 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
1309 | CONFIG_FRAME_POINTER=y | ||
1310 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
1311 | # CONFIG_RCU_TORTURE_TEST is not set | ||
1312 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1313 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
1314 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
1315 | # CONFIG_FAULT_INJECTION is not set | ||
1316 | # CONFIG_PAGE_POISONING is not set | ||
1317 | CONFIG_TRACING_SUPPORT=y | ||
1318 | |||
1319 | # | ||
1320 | # Tracers | ||
1321 | # | ||
1322 | # CONFIG_IRQSOFF_TRACER is not set | ||
1323 | # CONFIG_SCHED_TRACER is not set | ||
1324 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
1325 | # CONFIG_EVENT_TRACER is not set | ||
1326 | # CONFIG_BOOT_TRACER is not set | ||
1327 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
1328 | # CONFIG_KMEMTRACE is not set | ||
1329 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1330 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1331 | # CONFIG_DYNAMIC_DEBUG is not set | ||
1332 | # CONFIG_SAMPLES is not set | ||
1333 | |||
1334 | # | ||
1335 | # Security options | ||
1336 | # | ||
1337 | # CONFIG_KEYS is not set | ||
1338 | # CONFIG_SECURITY is not set | ||
1339 | # CONFIG_SECURITYFS is not set | ||
1340 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1341 | # CONFIG_CRYPTO is not set | ||
1342 | # CONFIG_BINARY_PRINTF is not set | ||
1343 | |||
1344 | # | ||
1345 | # Library routines | ||
1346 | # | ||
1347 | CONFIG_BITREVERSE=y | ||
1348 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1349 | CONFIG_CRC_CCITT=y | ||
1350 | # CONFIG_CRC16 is not set | ||
1351 | # CONFIG_CRC_T10DIF is not set | ||
1352 | # CONFIG_CRC_ITU_T is not set | ||
1353 | CONFIG_CRC32=y | ||
1354 | # CONFIG_CRC7 is not set | ||
1355 | # CONFIG_LIBCRC32C is not set | ||
1356 | CONFIG_ZLIB_INFLATE=y | ||
1357 | CONFIG_ZLIB_DEFLATE=y | ||
1358 | CONFIG_DECOMPRESS_GZIP=y | ||
1359 | CONFIG_GENERIC_ALLOCATOR=y | ||
1360 | CONFIG_HAS_IOMEM=y | ||
1361 | CONFIG_HAS_IOPORT=y | ||
1362 | CONFIG_HAS_DMA=y | ||
1363 | CONFIG_NLATTR=y | ||
diff --git a/arch/avr32/include/asm/hw_irq.h b/arch/avr32/include/asm/hw_irq.h index 218b0a6bfd1b..a36f9fcb8fcd 100644 --- a/arch/avr32/include/asm/hw_irq.h +++ b/arch/avr32/include/asm/hw_irq.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_AVR32_HW_IRQ_H | 1 | #ifndef __ASM_AVR32_HW_IRQ_H |
2 | #define __ASM_AVR32_HW_IRQ_H | 2 | #define __ASM_AVR32_HW_IRQ_H |
3 | 3 | ||
4 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) | 4 | static inline void hw_resend_irq(struct irq_chip *h, unsigned int i) |
5 | { | 5 | { |
6 | /* Nothing to do */ | 6 | /* Nothing to do */ |
7 | } | 7 | } |
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index d547c8df157d..6e3d491184ea 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c | |||
@@ -75,8 +75,17 @@ void _exception(long signr, struct pt_regs *regs, int code, | |||
75 | { | 75 | { |
76 | siginfo_t info; | 76 | siginfo_t info; |
77 | 77 | ||
78 | if (!user_mode(regs)) | 78 | if (!user_mode(regs)) { |
79 | const struct exception_table_entry *fixup; | ||
80 | |||
81 | /* Are we prepared to handle this kernel fault? */ | ||
82 | fixup = search_exception_tables(regs->pc); | ||
83 | if (fixup) { | ||
84 | regs->pc = fixup->fixup; | ||
85 | return; | ||
86 | } | ||
79 | die("Unhandled exception in kernel mode", regs, signr); | 87 | die("Unhandled exception in kernel mode", regs, signr); |
88 | } | ||
80 | 89 | ||
81 | memset(&info, 0, sizeof(info)); | 90 | memset(&info, 0, sizeof(info)); |
82 | info.si_signo = signr; | 91 | info.si_signo = signr; |
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index 0b8164281899..ddedb471f33e 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h | |||
@@ -29,7 +29,7 @@ extern struct platform_device *atmel_default_console_device; | |||
29 | /* Flags for selecting USART extra pins */ | 29 | /* Flags for selecting USART extra pins */ |
30 | #define ATMEL_USART_RTS 0x01 | 30 | #define ATMEL_USART_RTS 0x01 |
31 | #define ATMEL_USART_CTS 0x02 | 31 | #define ATMEL_USART_CTS 0x02 |
32 | #define ATMEL_USART_CLK 0x03 | 32 | #define ATMEL_USART_CLK 0x04 |
33 | 33 | ||
34 | struct atmel_uart_data { | 34 | struct atmel_uart_data { |
35 | short use_dma_tx; /* use transmit DMA? */ | 35 | short use_dma_tx; /* use transmit DMA? */ |
diff --git a/arch/frv/include/asm/signal.h b/arch/frv/include/asm/signal.h index 2079197d483d..f071e813dcb3 100644 --- a/arch/frv/include/asm/signal.h +++ b/arch/frv/include/asm/signal.h | |||
@@ -3,107 +3,15 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* Avoid too many header ordering problems. */ | 6 | #ifndef __KERNEL__ |
7 | struct siginfo; | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | /* Most things should be clean enough to redefine this at will, if care | ||
11 | is taken to make libc match. */ | ||
12 | |||
13 | #define _NSIG 64 | ||
14 | #define _NSIG_BPW 32 | ||
15 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
16 | |||
17 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
18 | |||
19 | typedef struct { | ||
20 | unsigned long sig[_NSIG_WORDS]; | ||
21 | } sigset_t; | ||
22 | |||
23 | #else | ||
24 | /* Here we must cater to libcs that poke about in kernel headers. */ | 7 | /* Here we must cater to libcs that poke about in kernel headers. */ |
25 | 8 | ||
26 | #define NSIG 32 | 9 | #define NSIG 32 |
27 | typedef unsigned long sigset_t; | 10 | typedef unsigned long sigset_t; |
28 | 11 | ||
29 | #endif /* __KERNEL__ */ | 12 | #endif /* !__KERNEL__ */ |
30 | |||
31 | #define SIGHUP 1 | ||
32 | #define SIGINT 2 | ||
33 | #define SIGQUIT 3 | ||
34 | #define SIGILL 4 | ||
35 | #define SIGTRAP 5 | ||
36 | #define SIGABRT 6 | ||
37 | #define SIGIOT 6 | ||
38 | #define SIGBUS 7 | ||
39 | #define SIGFPE 8 | ||
40 | #define SIGKILL 9 | ||
41 | #define SIGUSR1 10 | ||
42 | #define SIGSEGV 11 | ||
43 | #define SIGUSR2 12 | ||
44 | #define SIGPIPE 13 | ||
45 | #define SIGALRM 14 | ||
46 | #define SIGTERM 15 | ||
47 | #define SIGSTKFLT 16 | ||
48 | #define SIGCHLD 17 | ||
49 | #define SIGCONT 18 | ||
50 | #define SIGSTOP 19 | ||
51 | #define SIGTSTP 20 | ||
52 | #define SIGTTIN 21 | ||
53 | #define SIGTTOU 22 | ||
54 | #define SIGURG 23 | ||
55 | #define SIGXCPU 24 | ||
56 | #define SIGXFSZ 25 | ||
57 | #define SIGVTALRM 26 | ||
58 | #define SIGPROF 27 | ||
59 | #define SIGWINCH 28 | ||
60 | #define SIGIO 29 | ||
61 | #define SIGPOLL SIGIO | ||
62 | /* | ||
63 | #define SIGLOST 29 | ||
64 | */ | ||
65 | #define SIGPWR 30 | ||
66 | #define SIGSYS 31 | ||
67 | #define SIGUNUSED 31 | ||
68 | |||
69 | /* These should not be considered constants from userland. */ | ||
70 | #define SIGRTMIN 32 | ||
71 | #define SIGRTMAX (_NSIG-1) | ||
72 | |||
73 | /* | ||
74 | * SA_FLAGS values: | ||
75 | * | ||
76 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
77 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
78 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
79 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
80 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
81 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
82 | * | ||
83 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
84 | * Unix names RESETHAND and NODEFER respectively. | ||
85 | */ | ||
86 | #define SA_NOCLDSTOP 0x00000001 | ||
87 | #define SA_NOCLDWAIT 0x00000002 /* not supported yet */ | ||
88 | #define SA_SIGINFO 0x00000004 | ||
89 | #define SA_ONSTACK 0x08000000 | ||
90 | #define SA_RESTART 0x10000000 | ||
91 | #define SA_NODEFER 0x40000000 | ||
92 | #define SA_RESETHAND 0x80000000 | ||
93 | |||
94 | #define SA_NOMASK SA_NODEFER | ||
95 | #define SA_ONESHOT SA_RESETHAND | ||
96 | |||
97 | #define SA_RESTORER 0x04000000 | ||
98 | |||
99 | /* | ||
100 | * sigaltstack controls | ||
101 | */ | ||
102 | #define SS_ONSTACK 1 | ||
103 | #define SS_DISABLE 2 | ||
104 | 13 | ||
105 | #define MINSIGSTKSZ 2048 | 14 | #define SA_RESTORER 0x04000000 /* to get struct sigaction correct */ |
106 | #define SIGSTKSZ 8192 | ||
107 | 15 | ||
108 | #include <asm-generic/signal.h> | 16 | #include <asm-generic/signal.h> |
109 | 17 | ||
@@ -115,16 +23,6 @@ struct old_sigaction { | |||
115 | __sigrestore_t sa_restorer; | 23 | __sigrestore_t sa_restorer; |
116 | }; | 24 | }; |
117 | 25 | ||
118 | struct sigaction { | ||
119 | __sighandler_t sa_handler; | ||
120 | unsigned long sa_flags; | ||
121 | __sigrestore_t sa_restorer; | ||
122 | sigset_t sa_mask; /* mask last for extensibility */ | ||
123 | }; | ||
124 | |||
125 | struct k_sigaction { | ||
126 | struct sigaction sa; | ||
127 | }; | ||
128 | #else | 26 | #else |
129 | /* Here we must cater to libcs that poke about in kernel headers. */ | 27 | /* Here we must cater to libcs that poke about in kernel headers. */ |
130 | 28 | ||
@@ -143,19 +41,4 @@ struct sigaction { | |||
143 | 41 | ||
144 | #endif /* __KERNEL__ */ | 42 | #endif /* __KERNEL__ */ |
145 | 43 | ||
146 | typedef struct sigaltstack { | ||
147 | void __user *ss_sp; | ||
148 | int ss_flags; | ||
149 | size_t ss_size; | ||
150 | } stack_t; | ||
151 | |||
152 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | ||
153 | |||
154 | #ifdef __KERNEL__ | ||
155 | |||
156 | #include <asm/sigcontext.h> | ||
157 | #undef __HAVE_ARCH_SIG_BITOPS | ||
158 | |||
159 | #endif /* __KERNEL__ */ | ||
160 | |||
161 | #endif /* _ASM_SIGNAL_H */ | 44 | #endif /* _ASM_SIGNAL_H */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 68f5578fe38e..356d2ec8e2fb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -789,10 +789,26 @@ config X86_MCE | |||
789 | to disable it. MCE support simply ignores non-MCE processors like | 789 | to disable it. MCE support simply ignores non-MCE processors like |
790 | the 386 and 486, so nearly everyone can say Y here. | 790 | the 386 and 486, so nearly everyone can say Y here. |
791 | 791 | ||
792 | config X86_OLD_MCE | ||
793 | depends on X86_32 && X86_MCE | ||
794 | bool "Use legacy machine check code (will go away)" | ||
795 | default n | ||
796 | select X86_ANCIENT_MCE | ||
797 | ---help--- | ||
798 | Use the old i386 machine check code. This is merely intended for | ||
799 | testing in a transition period. Try this if you run into any machine | ||
800 | check related software problems, but report the problem to | ||
801 | linux-kernel. When in doubt say no. | ||
802 | |||
803 | config X86_NEW_MCE | ||
804 | depends on X86_MCE | ||
805 | bool | ||
806 | default y if (!X86_OLD_MCE && X86_32) || X86_64 | ||
807 | |||
792 | config X86_MCE_INTEL | 808 | config X86_MCE_INTEL |
793 | def_bool y | 809 | def_bool y |
794 | prompt "Intel MCE features" | 810 | prompt "Intel MCE features" |
795 | depends on X86_64 && X86_MCE && X86_LOCAL_APIC | 811 | depends on X86_NEW_MCE && X86_LOCAL_APIC |
796 | ---help--- | 812 | ---help--- |
797 | Additional support for intel specific MCE features such as | 813 | Additional support for intel specific MCE features such as |
798 | the thermal monitor. | 814 | the thermal monitor. |
@@ -800,19 +816,36 @@ config X86_MCE_INTEL | |||
800 | config X86_MCE_AMD | 816 | config X86_MCE_AMD |
801 | def_bool y | 817 | def_bool y |
802 | prompt "AMD MCE features" | 818 | prompt "AMD MCE features" |
803 | depends on X86_64 && X86_MCE && X86_LOCAL_APIC | 819 | depends on X86_NEW_MCE && X86_LOCAL_APIC |
804 | ---help--- | 820 | ---help--- |
805 | Additional support for AMD specific MCE features such as | 821 | Additional support for AMD specific MCE features such as |
806 | the DRAM Error Threshold. | 822 | the DRAM Error Threshold. |
807 | 823 | ||
824 | config X86_ANCIENT_MCE | ||
825 | def_bool n | ||
826 | depends on X86_32 | ||
827 | prompt "Support for old Pentium 5 / WinChip machine checks" | ||
828 | ---help--- | ||
829 | Include support for machine check handling on old Pentium 5 or WinChip | ||
830 | systems. These typically need to be enabled explicitely on the command | ||
831 | line. | ||
832 | |||
808 | config X86_MCE_THRESHOLD | 833 | config X86_MCE_THRESHOLD |
809 | depends on X86_MCE_AMD || X86_MCE_INTEL | 834 | depends on X86_MCE_AMD || X86_MCE_INTEL |
810 | bool | 835 | bool |
811 | default y | 836 | default y |
812 | 837 | ||
838 | config X86_MCE_INJECT | ||
839 | depends on X86_NEW_MCE | ||
840 | tristate "Machine check injector support" | ||
841 | ---help--- | ||
842 | Provide support for injecting machine checks for testing purposes. | ||
843 | If you don't know what a machine check is and you don't do kernel | ||
844 | QA it is safe to say n. | ||
845 | |||
813 | config X86_MCE_NONFATAL | 846 | config X86_MCE_NONFATAL |
814 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" | 847 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" |
815 | depends on X86_32 && X86_MCE | 848 | depends on X86_OLD_MCE |
816 | ---help--- | 849 | ---help--- |
817 | Enabling this feature starts a timer that triggers every 5 seconds which | 850 | Enabling this feature starts a timer that triggers every 5 seconds which |
818 | will look at the machine check registers to see if anything happened. | 851 | will look at the machine check registers to see if anything happened. |
@@ -825,11 +858,15 @@ config X86_MCE_NONFATAL | |||
825 | 858 | ||
826 | config X86_MCE_P4THERMAL | 859 | config X86_MCE_P4THERMAL |
827 | bool "check for P4 thermal throttling interrupt." | 860 | bool "check for P4 thermal throttling interrupt." |
828 | depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP) | 861 | depends on X86_OLD_MCE && X86_MCE && (X86_UP_APIC || SMP) |
829 | ---help--- | 862 | ---help--- |
830 | Enabling this feature will cause a message to be printed when the P4 | 863 | Enabling this feature will cause a message to be printed when the P4 |
831 | enters thermal throttling. | 864 | enters thermal throttling. |
832 | 865 | ||
866 | config X86_THERMAL_VECTOR | ||
867 | def_bool y | ||
868 | depends on X86_MCE_P4THERMAL || X86_MCE_INTEL | ||
869 | |||
833 | config VM86 | 870 | config VM86 |
834 | bool "Enable VM86 support" if EMBEDDED | 871 | bool "Enable VM86 support" if EMBEDDED |
835 | default y | 872 | default y |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index ebe7deedd5b4..cfb0010fa940 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Arch-specific CryptoAPI modules. | 2 | # Arch-specific CryptoAPI modules. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO_FPU) += fpu.o | ||
6 | |||
5 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o | 7 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o |
6 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o | 8 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o |
7 | obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o | 9 | obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 02af0af65497..4e663398f77f 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -21,6 +21,22 @@ | |||
21 | #include <asm/i387.h> | 21 | #include <asm/i387.h> |
22 | #include <asm/aes.h> | 22 | #include <asm/aes.h> |
23 | 23 | ||
24 | #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE) | ||
25 | #define HAS_CTR | ||
26 | #endif | ||
27 | |||
28 | #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE) | ||
29 | #define HAS_LRW | ||
30 | #endif | ||
31 | |||
32 | #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE) | ||
33 | #define HAS_PCBC | ||
34 | #endif | ||
35 | |||
36 | #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE) | ||
37 | #define HAS_XTS | ||
38 | #endif | ||
39 | |||
24 | struct async_aes_ctx { | 40 | struct async_aes_ctx { |
25 | struct cryptd_ablkcipher *cryptd_tfm; | 41 | struct cryptd_ablkcipher *cryptd_tfm; |
26 | }; | 42 | }; |
@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = { | |||
137 | } | 153 | } |
138 | }; | 154 | }; |
139 | 155 | ||
156 | static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
157 | { | ||
158 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | ||
159 | |||
160 | aesni_enc(ctx, dst, src); | ||
161 | } | ||
162 | |||
163 | static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
164 | { | ||
165 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | ||
166 | |||
167 | aesni_dec(ctx, dst, src); | ||
168 | } | ||
169 | |||
170 | static struct crypto_alg __aesni_alg = { | ||
171 | .cra_name = "__aes-aesni", | ||
172 | .cra_driver_name = "__driver-aes-aesni", | ||
173 | .cra_priority = 0, | ||
174 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
175 | .cra_blocksize = AES_BLOCK_SIZE, | ||
176 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, | ||
177 | .cra_alignmask = 0, | ||
178 | .cra_module = THIS_MODULE, | ||
179 | .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list), | ||
180 | .cra_u = { | ||
181 | .cipher = { | ||
182 | .cia_min_keysize = AES_MIN_KEY_SIZE, | ||
183 | .cia_max_keysize = AES_MAX_KEY_SIZE, | ||
184 | .cia_setkey = aes_set_key, | ||
185 | .cia_encrypt = __aes_encrypt, | ||
186 | .cia_decrypt = __aes_decrypt | ||
187 | } | ||
188 | } | ||
189 | }; | ||
190 | |||
140 | static int ecb_encrypt(struct blkcipher_desc *desc, | 191 | static int ecb_encrypt(struct blkcipher_desc *desc, |
141 | struct scatterlist *dst, struct scatterlist *src, | 192 | struct scatterlist *dst, struct scatterlist *src, |
142 | unsigned int nbytes) | 193 | unsigned int nbytes) |
@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | |||
277 | unsigned int key_len) | 328 | unsigned int key_len) |
278 | { | 329 | { |
279 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 330 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
331 | struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; | ||
332 | int err; | ||
280 | 333 | ||
281 | return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); | 334 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
335 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) | ||
336 | & CRYPTO_TFM_REQ_MASK); | ||
337 | err = crypto_ablkcipher_setkey(child, key, key_len); | ||
338 | crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) | ||
339 | & CRYPTO_TFM_RES_MASK); | ||
340 | return err; | ||
282 | } | 341 | } |
283 | 342 | ||
284 | static int ablk_encrypt(struct ablkcipher_request *req) | 343 | static int ablk_encrypt(struct ablkcipher_request *req) |
@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = { | |||
411 | }, | 470 | }, |
412 | }; | 471 | }; |
413 | 472 | ||
473 | #ifdef HAS_CTR | ||
474 | static int ablk_ctr_init(struct crypto_tfm *tfm) | ||
475 | { | ||
476 | struct cryptd_ablkcipher *cryptd_tfm; | ||
477 | |||
478 | cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))", | ||
479 | 0, 0); | ||
480 | if (IS_ERR(cryptd_tfm)) | ||
481 | return PTR_ERR(cryptd_tfm); | ||
482 | ablk_init_common(tfm, cryptd_tfm); | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | static struct crypto_alg ablk_ctr_alg = { | ||
487 | .cra_name = "ctr(aes)", | ||
488 | .cra_driver_name = "ctr-aes-aesni", | ||
489 | .cra_priority = 400, | ||
490 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
491 | .cra_blocksize = 1, | ||
492 | .cra_ctxsize = sizeof(struct async_aes_ctx), | ||
493 | .cra_alignmask = 0, | ||
494 | .cra_type = &crypto_ablkcipher_type, | ||
495 | .cra_module = THIS_MODULE, | ||
496 | .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list), | ||
497 | .cra_init = ablk_ctr_init, | ||
498 | .cra_exit = ablk_exit, | ||
499 | .cra_u = { | ||
500 | .ablkcipher = { | ||
501 | .min_keysize = AES_MIN_KEY_SIZE, | ||
502 | .max_keysize = AES_MAX_KEY_SIZE, | ||
503 | .ivsize = AES_BLOCK_SIZE, | ||
504 | .setkey = ablk_set_key, | ||
505 | .encrypt = ablk_encrypt, | ||
506 | .decrypt = ablk_decrypt, | ||
507 | .geniv = "chainiv", | ||
508 | }, | ||
509 | }, | ||
510 | }; | ||
511 | #endif | ||
512 | |||
513 | #ifdef HAS_LRW | ||
514 | static int ablk_lrw_init(struct crypto_tfm *tfm) | ||
515 | { | ||
516 | struct cryptd_ablkcipher *cryptd_tfm; | ||
517 | |||
518 | cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))", | ||
519 | 0, 0); | ||
520 | if (IS_ERR(cryptd_tfm)) | ||
521 | return PTR_ERR(cryptd_tfm); | ||
522 | ablk_init_common(tfm, cryptd_tfm); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | static struct crypto_alg ablk_lrw_alg = { | ||
527 | .cra_name = "lrw(aes)", | ||
528 | .cra_driver_name = "lrw-aes-aesni", | ||
529 | .cra_priority = 400, | ||
530 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
531 | .cra_blocksize = AES_BLOCK_SIZE, | ||
532 | .cra_ctxsize = sizeof(struct async_aes_ctx), | ||
533 | .cra_alignmask = 0, | ||
534 | .cra_type = &crypto_ablkcipher_type, | ||
535 | .cra_module = THIS_MODULE, | ||
536 | .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list), | ||
537 | .cra_init = ablk_lrw_init, | ||
538 | .cra_exit = ablk_exit, | ||
539 | .cra_u = { | ||
540 | .ablkcipher = { | ||
541 | .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, | ||
542 | .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, | ||
543 | .ivsize = AES_BLOCK_SIZE, | ||
544 | .setkey = ablk_set_key, | ||
545 | .encrypt = ablk_encrypt, | ||
546 | .decrypt = ablk_decrypt, | ||
547 | }, | ||
548 | }, | ||
549 | }; | ||
550 | #endif | ||
551 | |||
552 | #ifdef HAS_PCBC | ||
553 | static int ablk_pcbc_init(struct crypto_tfm *tfm) | ||
554 | { | ||
555 | struct cryptd_ablkcipher *cryptd_tfm; | ||
556 | |||
557 | cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))", | ||
558 | 0, 0); | ||
559 | if (IS_ERR(cryptd_tfm)) | ||
560 | return PTR_ERR(cryptd_tfm); | ||
561 | ablk_init_common(tfm, cryptd_tfm); | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static struct crypto_alg ablk_pcbc_alg = { | ||
566 | .cra_name = "pcbc(aes)", | ||
567 | .cra_driver_name = "pcbc-aes-aesni", | ||
568 | .cra_priority = 400, | ||
569 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
570 | .cra_blocksize = AES_BLOCK_SIZE, | ||
571 | .cra_ctxsize = sizeof(struct async_aes_ctx), | ||
572 | .cra_alignmask = 0, | ||
573 | .cra_type = &crypto_ablkcipher_type, | ||
574 | .cra_module = THIS_MODULE, | ||
575 | .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list), | ||
576 | .cra_init = ablk_pcbc_init, | ||
577 | .cra_exit = ablk_exit, | ||
578 | .cra_u = { | ||
579 | .ablkcipher = { | ||
580 | .min_keysize = AES_MIN_KEY_SIZE, | ||
581 | .max_keysize = AES_MAX_KEY_SIZE, | ||
582 | .ivsize = AES_BLOCK_SIZE, | ||
583 | .setkey = ablk_set_key, | ||
584 | .encrypt = ablk_encrypt, | ||
585 | .decrypt = ablk_decrypt, | ||
586 | }, | ||
587 | }, | ||
588 | }; | ||
589 | #endif | ||
590 | |||
591 | #ifdef HAS_XTS | ||
592 | static int ablk_xts_init(struct crypto_tfm *tfm) | ||
593 | { | ||
594 | struct cryptd_ablkcipher *cryptd_tfm; | ||
595 | |||
596 | cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))", | ||
597 | 0, 0); | ||
598 | if (IS_ERR(cryptd_tfm)) | ||
599 | return PTR_ERR(cryptd_tfm); | ||
600 | ablk_init_common(tfm, cryptd_tfm); | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static struct crypto_alg ablk_xts_alg = { | ||
605 | .cra_name = "xts(aes)", | ||
606 | .cra_driver_name = "xts-aes-aesni", | ||
607 | .cra_priority = 400, | ||
608 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
609 | .cra_blocksize = AES_BLOCK_SIZE, | ||
610 | .cra_ctxsize = sizeof(struct async_aes_ctx), | ||
611 | .cra_alignmask = 0, | ||
612 | .cra_type = &crypto_ablkcipher_type, | ||
613 | .cra_module = THIS_MODULE, | ||
614 | .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list), | ||
615 | .cra_init = ablk_xts_init, | ||
616 | .cra_exit = ablk_exit, | ||
617 | .cra_u = { | ||
618 | .ablkcipher = { | ||
619 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
620 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
621 | .ivsize = AES_BLOCK_SIZE, | ||
622 | .setkey = ablk_set_key, | ||
623 | .encrypt = ablk_encrypt, | ||
624 | .decrypt = ablk_decrypt, | ||
625 | }, | ||
626 | }, | ||
627 | }; | ||
628 | #endif | ||
629 | |||
414 | static int __init aesni_init(void) | 630 | static int __init aesni_init(void) |
415 | { | 631 | { |
416 | int err; | 632 | int err; |
@@ -421,6 +637,8 @@ static int __init aesni_init(void) | |||
421 | } | 637 | } |
422 | if ((err = crypto_register_alg(&aesni_alg))) | 638 | if ((err = crypto_register_alg(&aesni_alg))) |
423 | goto aes_err; | 639 | goto aes_err; |
640 | if ((err = crypto_register_alg(&__aesni_alg))) | ||
641 | goto __aes_err; | ||
424 | if ((err = crypto_register_alg(&blk_ecb_alg))) | 642 | if ((err = crypto_register_alg(&blk_ecb_alg))) |
425 | goto blk_ecb_err; | 643 | goto blk_ecb_err; |
426 | if ((err = crypto_register_alg(&blk_cbc_alg))) | 644 | if ((err = crypto_register_alg(&blk_cbc_alg))) |
@@ -429,9 +647,41 @@ static int __init aesni_init(void) | |||
429 | goto ablk_ecb_err; | 647 | goto ablk_ecb_err; |
430 | if ((err = crypto_register_alg(&ablk_cbc_alg))) | 648 | if ((err = crypto_register_alg(&ablk_cbc_alg))) |
431 | goto ablk_cbc_err; | 649 | goto ablk_cbc_err; |
650 | #ifdef HAS_CTR | ||
651 | if ((err = crypto_register_alg(&ablk_ctr_alg))) | ||
652 | goto ablk_ctr_err; | ||
653 | #endif | ||
654 | #ifdef HAS_LRW | ||
655 | if ((err = crypto_register_alg(&ablk_lrw_alg))) | ||
656 | goto ablk_lrw_err; | ||
657 | #endif | ||
658 | #ifdef HAS_PCBC | ||
659 | if ((err = crypto_register_alg(&ablk_pcbc_alg))) | ||
660 | goto ablk_pcbc_err; | ||
661 | #endif | ||
662 | #ifdef HAS_XTS | ||
663 | if ((err = crypto_register_alg(&ablk_xts_alg))) | ||
664 | goto ablk_xts_err; | ||
665 | #endif | ||
432 | 666 | ||
433 | return err; | 667 | return err; |
434 | 668 | ||
669 | #ifdef HAS_XTS | ||
670 | ablk_xts_err: | ||
671 | #endif | ||
672 | #ifdef HAS_PCBC | ||
673 | crypto_unregister_alg(&ablk_pcbc_alg); | ||
674 | ablk_pcbc_err: | ||
675 | #endif | ||
676 | #ifdef HAS_LRW | ||
677 | crypto_unregister_alg(&ablk_lrw_alg); | ||
678 | ablk_lrw_err: | ||
679 | #endif | ||
680 | #ifdef HAS_CTR | ||
681 | crypto_unregister_alg(&ablk_ctr_alg); | ||
682 | ablk_ctr_err: | ||
683 | #endif | ||
684 | crypto_unregister_alg(&ablk_cbc_alg); | ||
435 | ablk_cbc_err: | 685 | ablk_cbc_err: |
436 | crypto_unregister_alg(&ablk_ecb_alg); | 686 | crypto_unregister_alg(&ablk_ecb_alg); |
437 | ablk_ecb_err: | 687 | ablk_ecb_err: |
@@ -439,6 +689,8 @@ ablk_ecb_err: | |||
439 | blk_cbc_err: | 689 | blk_cbc_err: |
440 | crypto_unregister_alg(&blk_ecb_alg); | 690 | crypto_unregister_alg(&blk_ecb_alg); |
441 | blk_ecb_err: | 691 | blk_ecb_err: |
692 | crypto_unregister_alg(&__aesni_alg); | ||
693 | __aes_err: | ||
442 | crypto_unregister_alg(&aesni_alg); | 694 | crypto_unregister_alg(&aesni_alg); |
443 | aes_err: | 695 | aes_err: |
444 | return err; | 696 | return err; |
@@ -446,10 +698,23 @@ aes_err: | |||
446 | 698 | ||
447 | static void __exit aesni_exit(void) | 699 | static void __exit aesni_exit(void) |
448 | { | 700 | { |
701 | #ifdef HAS_XTS | ||
702 | crypto_unregister_alg(&ablk_xts_alg); | ||
703 | #endif | ||
704 | #ifdef HAS_PCBC | ||
705 | crypto_unregister_alg(&ablk_pcbc_alg); | ||
706 | #endif | ||
707 | #ifdef HAS_LRW | ||
708 | crypto_unregister_alg(&ablk_lrw_alg); | ||
709 | #endif | ||
710 | #ifdef HAS_CTR | ||
711 | crypto_unregister_alg(&ablk_ctr_alg); | ||
712 | #endif | ||
449 | crypto_unregister_alg(&ablk_cbc_alg); | 713 | crypto_unregister_alg(&ablk_cbc_alg); |
450 | crypto_unregister_alg(&ablk_ecb_alg); | 714 | crypto_unregister_alg(&ablk_ecb_alg); |
451 | crypto_unregister_alg(&blk_cbc_alg); | 715 | crypto_unregister_alg(&blk_cbc_alg); |
452 | crypto_unregister_alg(&blk_ecb_alg); | 716 | crypto_unregister_alg(&blk_ecb_alg); |
717 | crypto_unregister_alg(&__aesni_alg); | ||
453 | crypto_unregister_alg(&aesni_alg); | 718 | crypto_unregister_alg(&aesni_alg); |
454 | } | 719 | } |
455 | 720 | ||
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c new file mode 100644 index 000000000000..5f9781a3815f --- /dev/null +++ b/arch/x86/crypto/fpu.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * FPU: Wrapper for blkcipher touching fpu | ||
3 | * | ||
4 | * Copyright (c) Intel Corp. | ||
5 | * Author: Huang Ying <ying.huang@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <crypto/algapi.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <asm/i387.h> | ||
20 | |||
21 | struct crypto_fpu_ctx { | ||
22 | struct crypto_blkcipher *child; | ||
23 | }; | ||
24 | |||
25 | static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key, | ||
26 | unsigned int keylen) | ||
27 | { | ||
28 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent); | ||
29 | struct crypto_blkcipher *child = ctx->child; | ||
30 | int err; | ||
31 | |||
32 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
33 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & | ||
34 | CRYPTO_TFM_REQ_MASK); | ||
35 | err = crypto_blkcipher_setkey(child, key, keylen); | ||
36 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & | ||
37 | CRYPTO_TFM_RES_MASK); | ||
38 | return err; | ||
39 | } | ||
40 | |||
41 | static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, | ||
42 | struct scatterlist *dst, struct scatterlist *src, | ||
43 | unsigned int nbytes) | ||
44 | { | ||
45 | int err; | ||
46 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | ||
47 | struct crypto_blkcipher *child = ctx->child; | ||
48 | struct blkcipher_desc desc = { | ||
49 | .tfm = child, | ||
50 | .info = desc_in->info, | ||
51 | .flags = desc_in->flags, | ||
52 | }; | ||
53 | |||
54 | kernel_fpu_begin(); | ||
55 | err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); | ||
56 | kernel_fpu_end(); | ||
57 | return err; | ||
58 | } | ||
59 | |||
60 | static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, | ||
61 | struct scatterlist *dst, struct scatterlist *src, | ||
62 | unsigned int nbytes) | ||
63 | { | ||
64 | int err; | ||
65 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | ||
66 | struct crypto_blkcipher *child = ctx->child; | ||
67 | struct blkcipher_desc desc = { | ||
68 | .tfm = child, | ||
69 | .info = desc_in->info, | ||
70 | .flags = desc_in->flags, | ||
71 | }; | ||
72 | |||
73 | kernel_fpu_begin(); | ||
74 | err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); | ||
75 | kernel_fpu_end(); | ||
76 | return err; | ||
77 | } | ||
78 | |||
79 | static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) | ||
80 | { | ||
81 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
82 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | ||
83 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | ||
84 | struct crypto_blkcipher *cipher; | ||
85 | |||
86 | cipher = crypto_spawn_blkcipher(spawn); | ||
87 | if (IS_ERR(cipher)) | ||
88 | return PTR_ERR(cipher); | ||
89 | |||
90 | ctx->child = cipher; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) | ||
95 | { | ||
96 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | ||
97 | crypto_free_blkcipher(ctx->child); | ||
98 | } | ||
99 | |||
100 | static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) | ||
101 | { | ||
102 | struct crypto_instance *inst; | ||
103 | struct crypto_alg *alg; | ||
104 | int err; | ||
105 | |||
106 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | ||
107 | if (err) | ||
108 | return ERR_PTR(err); | ||
109 | |||
110 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | ||
111 | CRYPTO_ALG_TYPE_MASK); | ||
112 | if (IS_ERR(alg)) | ||
113 | return ERR_CAST(alg); | ||
114 | |||
115 | inst = crypto_alloc_instance("fpu", alg); | ||
116 | if (IS_ERR(inst)) | ||
117 | goto out_put_alg; | ||
118 | |||
119 | inst->alg.cra_flags = alg->cra_flags; | ||
120 | inst->alg.cra_priority = alg->cra_priority; | ||
121 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
122 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
123 | inst->alg.cra_type = alg->cra_type; | ||
124 | inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; | ||
125 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | ||
126 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | ||
127 | inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); | ||
128 | inst->alg.cra_init = crypto_fpu_init_tfm; | ||
129 | inst->alg.cra_exit = crypto_fpu_exit_tfm; | ||
130 | inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; | ||
131 | inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; | ||
132 | inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; | ||
133 | |||
134 | out_put_alg: | ||
135 | crypto_mod_put(alg); | ||
136 | return inst; | ||
137 | } | ||
138 | |||
139 | static void crypto_fpu_free(struct crypto_instance *inst) | ||
140 | { | ||
141 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
142 | kfree(inst); | ||
143 | } | ||
144 | |||
145 | static struct crypto_template crypto_fpu_tmpl = { | ||
146 | .name = "fpu", | ||
147 | .alloc = crypto_fpu_alloc, | ||
148 | .free = crypto_fpu_free, | ||
149 | .module = THIS_MODULE, | ||
150 | }; | ||
151 | |||
152 | static int __init crypto_fpu_module_init(void) | ||
153 | { | ||
154 | return crypto_register_template(&crypto_fpu_tmpl); | ||
155 | } | ||
156 | |||
157 | static void __exit crypto_fpu_module_exit(void) | ||
158 | { | ||
159 | crypto_unregister_template(&crypto_fpu_tmpl); | ||
160 | } | ||
161 | |||
162 | module_init(crypto_fpu_module_init); | ||
163 | module_exit(crypto_fpu_module_exit); | ||
164 | |||
165 | MODULE_LICENSE("GPL"); | ||
166 | MODULE_DESCRIPTION("FPU block cipher wrapper"); | ||
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index d750a10ccad6..ff8cbfa07851 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -14,6 +14,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | |||
14 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | 14 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) |
15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | 15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) |
16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | 16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) |
17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | ||
17 | 18 | ||
18 | BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, | 19 | BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, |
19 | smp_invalidate_interrupt) | 20 | smp_invalidate_interrupt) |
@@ -52,8 +53,16 @@ BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | |||
52 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) | 53 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #ifdef CONFIG_X86_MCE_P4THERMAL | 56 | #ifdef CONFIG_X86_THERMAL_VECTOR |
56 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) | 57 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) |
57 | #endif | 58 | #endif |
58 | 59 | ||
60 | #ifdef CONFIG_X86_MCE_THRESHOLD | ||
61 | BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) | ||
62 | #endif | ||
63 | |||
64 | #ifdef CONFIG_X86_NEW_MCE | ||
65 | BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) | ||
66 | #endif | ||
67 | |||
59 | #endif | 68 | #endif |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 9ebc5c255032..82e3e8f01043 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -22,7 +22,7 @@ typedef struct { | |||
22 | #endif | 22 | #endif |
23 | #ifdef CONFIG_X86_MCE | 23 | #ifdef CONFIG_X86_MCE |
24 | unsigned int irq_thermal_count; | 24 | unsigned int irq_thermal_count; |
25 | # ifdef CONFIG_X86_64 | 25 | # ifdef CONFIG_X86_MCE_THRESHOLD |
26 | unsigned int irq_threshold_count; | 26 | unsigned int irq_threshold_count; |
27 | # endif | 27 | # endif |
28 | #endif | 28 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 6df45f639666..ba180d93b08c 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -34,6 +34,7 @@ extern void perf_pending_interrupt(void); | |||
34 | extern void spurious_interrupt(void); | 34 | extern void spurious_interrupt(void); |
35 | extern void thermal_interrupt(void); | 35 | extern void thermal_interrupt(void); |
36 | extern void reschedule_interrupt(void); | 36 | extern void reschedule_interrupt(void); |
37 | extern void mce_self_interrupt(void); | ||
37 | 38 | ||
38 | extern void invalidate_interrupt(void); | 39 | extern void invalidate_interrupt(void); |
39 | extern void invalidate_interrupt0(void); | 40 | extern void invalidate_interrupt0(void); |
@@ -46,6 +47,7 @@ extern void invalidate_interrupt6(void); | |||
46 | extern void invalidate_interrupt7(void); | 47 | extern void invalidate_interrupt7(void); |
47 | 48 | ||
48 | extern void irq_move_cleanup_interrupt(void); | 49 | extern void irq_move_cleanup_interrupt(void); |
50 | extern void reboot_interrupt(void); | ||
49 | extern void threshold_interrupt(void); | 51 | extern void threshold_interrupt(void); |
50 | 52 | ||
51 | extern void call_function_interrupt(void); | 53 | extern void call_function_interrupt(void); |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e997be98c9b9..5b21f0ec3df2 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define NMI_VECTOR 0x02 | 27 | #define NMI_VECTOR 0x02 |
28 | #define MCE_VECTOR 0x12 | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * IDT vectors usable for external interrupt sources start | 31 | * IDT vectors usable for external interrupt sources start |
@@ -87,13 +88,8 @@ | |||
87 | #define CALL_FUNCTION_VECTOR 0xfc | 88 | #define CALL_FUNCTION_VECTOR 0xfc |
88 | #define CALL_FUNCTION_SINGLE_VECTOR 0xfb | 89 | #define CALL_FUNCTION_SINGLE_VECTOR 0xfb |
89 | #define THERMAL_APIC_VECTOR 0xfa | 90 | #define THERMAL_APIC_VECTOR 0xfa |
90 | 91 | #define THRESHOLD_APIC_VECTOR 0xf9 | |
91 | #ifdef CONFIG_X86_32 | 92 | #define REBOOT_VECTOR 0xf8 |
92 | /* 0xf8 - 0xf9 : free */ | ||
93 | #else | ||
94 | # define THRESHOLD_APIC_VECTOR 0xf9 | ||
95 | # define UV_BAU_MESSAGE 0xf8 | ||
96 | #endif | ||
97 | 93 | ||
98 | /* f0-f7 used for spreading out TLB flushes: */ | 94 | /* f0-f7 used for spreading out TLB flushes: */ |
99 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | 95 | #define INVALIDATE_TLB_VECTOR_END 0xf7 |
@@ -117,6 +113,13 @@ | |||
117 | */ | 113 | */ |
118 | #define LOCAL_PENDING_VECTOR 0xec | 114 | #define LOCAL_PENDING_VECTOR 0xec |
119 | 115 | ||
116 | #define UV_BAU_MESSAGE 0xec | ||
117 | |||
118 | /* | ||
119 | * Self IPI vector for machine checks | ||
120 | */ | ||
121 | #define MCE_SELF_VECTOR 0xeb | ||
122 | |||
120 | /* | 123 | /* |
121 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | 124 | * First APIC vector available to drivers: (vectors 0x30-0xee) we |
122 | * start at 0x31(0x41) to spread out vectors evenly between priority | 125 | * start at 0x31(0x41) to spread out vectors evenly between priority |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4f8c199584e7..540a466e50f5 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ASM_X86_MCE_H | 1 | #ifndef _ASM_X86_MCE_H |
2 | #define _ASM_X86_MCE_H | 2 | #define _ASM_X86_MCE_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | ||
5 | |||
6 | #include <linux/types.h> | 4 | #include <linux/types.h> |
7 | #include <asm/ioctls.h> | 5 | #include <asm/ioctls.h> |
8 | 6 | ||
@@ -10,21 +8,35 @@ | |||
10 | * Machine Check support for x86 | 8 | * Machine Check support for x86 |
11 | */ | 9 | */ |
12 | 10 | ||
13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ | 11 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ |
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | 12 | #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ |
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | 13 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ |
16 | 14 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | |
17 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ | 15 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ |
18 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ | 16 | #define MCG_EXT_CNT_SHIFT 16 |
19 | #define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ | 17 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) |
20 | 18 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | |
21 | #define MCI_STATUS_VAL (1UL<<63) /* valid error */ | 19 | |
22 | #define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */ | 20 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ |
23 | #define MCI_STATUS_UC (1UL<<61) /* uncorrected error */ | 21 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ |
24 | #define MCI_STATUS_EN (1UL<<60) /* error enabled */ | 22 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ |
25 | #define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */ | 23 | |
26 | #define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */ | 24 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ |
27 | #define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */ | 25 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ |
26 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
27 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
28 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
29 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
30 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
31 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
32 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
33 | |||
34 | /* MISC register defines */ | ||
35 | #define MCM_ADDR_SEGOFF 0 /* segment offset */ | ||
36 | #define MCM_ADDR_LINEAR 1 /* linear address */ | ||
37 | #define MCM_ADDR_PHYS 2 /* physical address */ | ||
38 | #define MCM_ADDR_MEM 3 /* memory address */ | ||
39 | #define MCM_ADDR_GENERIC 7 /* generic */ | ||
28 | 40 | ||
29 | /* Fields are zero when not available */ | 41 | /* Fields are zero when not available */ |
30 | struct mce { | 42 | struct mce { |
@@ -34,13 +46,19 @@ struct mce { | |||
34 | __u64 mcgstatus; | 46 | __u64 mcgstatus; |
35 | __u64 ip; | 47 | __u64 ip; |
36 | __u64 tsc; /* cpu time stamp counter */ | 48 | __u64 tsc; /* cpu time stamp counter */ |
37 | __u64 res1; /* for future extension */ | 49 | __u64 time; /* wall time_t when error was detected */ |
38 | __u64 res2; /* dito. */ | 50 | __u8 cpuvendor; /* cpu vendor as encoded in system.h */ |
51 | __u8 pad1; | ||
52 | __u16 pad2; | ||
53 | __u32 cpuid; /* CPUID 1 EAX */ | ||
39 | __u8 cs; /* code segment */ | 54 | __u8 cs; /* code segment */ |
40 | __u8 bank; /* machine check bank */ | 55 | __u8 bank; /* machine check bank */ |
41 | __u8 cpu; /* cpu that raised the error */ | 56 | __u8 cpu; /* cpu number; obsolete; use extcpu now */ |
42 | __u8 finished; /* entry is valid */ | 57 | __u8 finished; /* entry is valid */ |
43 | __u32 pad; | 58 | __u32 extcpu; /* linux cpu number that detected the error */ |
59 | __u32 socketid; /* CPU socket ID */ | ||
60 | __u32 apicid; /* CPU initial apic ID */ | ||
61 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ | ||
44 | }; | 62 | }; |
45 | 63 | ||
46 | /* | 64 | /* |
@@ -57,7 +75,7 @@ struct mce_log { | |||
57 | unsigned len; /* = MCE_LOG_LEN */ | 75 | unsigned len; /* = MCE_LOG_LEN */ |
58 | unsigned next; | 76 | unsigned next; |
59 | unsigned flags; | 77 | unsigned flags; |
60 | unsigned pad0; | 78 | unsigned recordlen; /* length of struct mce */ |
61 | struct mce entry[MCE_LOG_LEN]; | 79 | struct mce entry[MCE_LOG_LEN]; |
62 | }; | 80 | }; |
63 | 81 | ||
@@ -82,19 +100,16 @@ struct mce_log { | |||
82 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | 100 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) |
83 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | 101 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) |
84 | 102 | ||
85 | #endif /* __x86_64__ */ | ||
86 | |||
87 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
88 | 104 | ||
89 | #ifdef CONFIG_X86_32 | ||
90 | extern int mce_disabled; | 105 | extern int mce_disabled; |
91 | #else /* CONFIG_X86_32 */ | ||
92 | 106 | ||
93 | #include <asm/atomic.h> | 107 | #include <asm/atomic.h> |
108 | #include <linux/percpu.h> | ||
94 | 109 | ||
95 | void mce_setup(struct mce *m); | 110 | void mce_setup(struct mce *m); |
96 | void mce_log(struct mce *m); | 111 | void mce_log(struct mce *m); |
97 | DECLARE_PER_CPU(struct sys_device, device_mce); | 112 | DECLARE_PER_CPU(struct sys_device, mce_dev); |
98 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 113 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
99 | 114 | ||
100 | /* | 115 | /* |
@@ -104,6 +119,8 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | |||
104 | #define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) | 119 | #define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) |
105 | 120 | ||
106 | #ifdef CONFIG_X86_MCE_INTEL | 121 | #ifdef CONFIG_X86_MCE_INTEL |
122 | extern int mce_cmci_disabled; | ||
123 | extern int mce_ignore_ce; | ||
107 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 124 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
108 | void cmci_clear(void); | 125 | void cmci_clear(void); |
109 | void cmci_reenable(void); | 126 | void cmci_reenable(void); |
@@ -123,13 +140,16 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c); | |||
123 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } | 140 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } |
124 | #endif | 141 | #endif |
125 | 142 | ||
126 | extern int mce_available(struct cpuinfo_x86 *c); | 143 | int mce_available(struct cpuinfo_x86 *c); |
144 | |||
145 | DECLARE_PER_CPU(unsigned, mce_exception_count); | ||
146 | DECLARE_PER_CPU(unsigned, mce_poll_count); | ||
127 | 147 | ||
128 | void mce_log_therm_throt_event(__u64 status); | 148 | void mce_log_therm_throt_event(__u64 status); |
129 | 149 | ||
130 | extern atomic_t mce_entry; | 150 | extern atomic_t mce_entry; |
131 | 151 | ||
132 | extern void do_machine_check(struct pt_regs *, long); | 152 | void do_machine_check(struct pt_regs *, long); |
133 | 153 | ||
134 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | 154 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); |
135 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | 155 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); |
@@ -139,14 +159,16 @@ enum mcp_flags { | |||
139 | MCP_UC = (1 << 1), /* log uncorrected errors */ | 159 | MCP_UC = (1 << 1), /* log uncorrected errors */ |
140 | MCP_DONTLOG = (1 << 2), /* only clear, don't log */ | 160 | MCP_DONTLOG = (1 << 2), /* only clear, don't log */ |
141 | }; | 161 | }; |
142 | extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | 162 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); |
143 | 163 | ||
144 | extern int mce_notify_user(void); | 164 | int mce_notify_irq(void); |
165 | void mce_notify_process(void); | ||
145 | 166 | ||
146 | #endif /* !CONFIG_X86_32 */ | 167 | DECLARE_PER_CPU(struct mce, injectm); |
168 | extern struct file_operations mce_chrdev_ops; | ||
147 | 169 | ||
148 | #ifdef CONFIG_X86_MCE | 170 | #ifdef CONFIG_X86_MCE |
149 | extern void mcheck_init(struct cpuinfo_x86 *c); | 171 | void mcheck_init(struct cpuinfo_x86 *c); |
150 | #else | 172 | #else |
151 | #define mcheck_init(c) do { } while (0) | 173 | #define mcheck_init(c) do { } while (0) |
152 | #endif | 174 | #endif |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d58d04fca83..1692fb5050e3 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -207,7 +207,14 @@ | |||
207 | 207 | ||
208 | #define MSR_IA32_THERM_CONTROL 0x0000019a | 208 | #define MSR_IA32_THERM_CONTROL 0x0000019a |
209 | #define MSR_IA32_THERM_INTERRUPT 0x0000019b | 209 | #define MSR_IA32_THERM_INTERRUPT 0x0000019b |
210 | |||
211 | #define THERM_INT_LOW_ENABLE (1 << 0) | ||
212 | #define THERM_INT_HIGH_ENABLE (1 << 1) | ||
213 | |||
210 | #define MSR_IA32_THERM_STATUS 0x0000019c | 214 | #define MSR_IA32_THERM_STATUS 0x0000019c |
215 | |||
216 | #define THERM_STATUS_PROCHOT (1 << 0) | ||
217 | |||
211 | #define MSR_IA32_MISC_ENABLE 0x000001a0 | 218 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
212 | 219 | ||
213 | /* MISC_ENABLE bits: architectural */ | 220 | /* MISC_ENABLE bits: architectural */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 076d3881f3da..8c7c042ecad1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -899,7 +899,7 @@ void clear_local_APIC(void) | |||
899 | } | 899 | } |
900 | 900 | ||
901 | /* lets not touch this if we didn't frob it */ | 901 | /* lets not touch this if we didn't frob it */ |
902 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | 902 | #ifdef CONFIG_X86_THERMAL_VECTOR |
903 | if (maxlvt >= 5) { | 903 | if (maxlvt >= 5) { |
904 | v = apic_read(APIC_LVTTHMR); | 904 | v = apic_read(APIC_LVTTHMR); |
905 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 905 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
@@ -2017,7 +2017,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
2017 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | 2017 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); |
2018 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | 2018 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); |
2019 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | 2019 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); |
2020 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | 2020 | #ifdef CONFIG_X86_THERMAL_VECTOR |
2021 | if (maxlvt >= 5) | 2021 | if (maxlvt >= 5) |
2022 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | 2022 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); |
2023 | #endif | 2023 | #endif |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a691302dc3ff..b3025b43b63a 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -66,7 +66,7 @@ static inline unsigned int get_nmi_count(int cpu) | |||
66 | 66 | ||
67 | static inline int mce_in_progress(void) | 67 | static inline int mce_in_progress(void) |
68 | { | 68 | { |
69 | #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) | 69 | #if defined(CONFIG_X86_NEW_MCE) |
70 | return atomic_read(&mce_entry) > 0; | 70 | return atomic_read(&mce_entry) > 0; |
71 | #endif | 71 | #endif |
72 | return 0; | 72 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index b2f89829bbe8..45004faf67ea 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
@@ -1,7 +1,11 @@ | |||
1 | obj-y = mce_$(BITS).o therm_throt.o | 1 | obj-y = mce.o therm_throt.o |
2 | 2 | ||
3 | obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | 3 | obj-$(CONFIG_X86_NEW_MCE) += mce-severity.o |
4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | 4 | obj-$(CONFIG_X86_OLD_MCE) += k7.o p4.o p6.o |
5 | obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o | ||
6 | obj-$(CONFIG_X86_MCE_P4THERMAL) += mce_intel.o | ||
7 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o mce_intel.o | ||
5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 8 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 9 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
7 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o | 10 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o |
11 | obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o | ||
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c index dd3af6e7b39a..89e510424152 100644 --- a/arch/x86/kernel/cpu/mcheck/k7.c +++ b/arch/x86/kernel/cpu/mcheck/k7.c | |||
@@ -2,11 +2,10 @@ | |||
2 | * Athlon specific Machine Check Exception Reporting | 2 | * Athlon specific Machine Check Exception Reporting |
3 | * (C) Copyright 2002 Dave Jones <davej@redhat.com> | 3 | * (C) Copyright 2002 Dave Jones <davej@redhat.com> |
4 | */ | 4 | */ |
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/init.h> | ||
10 | #include <linux/smp.h> | 9 | #include <linux/smp.h> |
11 | 10 | ||
12 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
@@ -15,12 +14,12 @@ | |||
15 | 14 | ||
16 | #include "mce.h" | 15 | #include "mce.h" |
17 | 16 | ||
18 | /* Machine Check Handler For AMD Athlon/Duron */ | 17 | /* Machine Check Handler For AMD Athlon/Duron: */ |
19 | static void k7_machine_check(struct pt_regs *regs, long error_code) | 18 | static void k7_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 19 | { |
21 | int recover = 1; | ||
22 | u32 alow, ahigh, high, low; | 20 | u32 alow, ahigh, high, low; |
23 | u32 mcgstl, mcgsth; | 21 | u32 mcgstl, mcgsth; |
22 | int recover = 1; | ||
24 | int i; | 23 | int i; |
25 | 24 | ||
26 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 25 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
@@ -32,15 +31,19 @@ static void k7_machine_check(struct pt_regs *regs, long error_code) | |||
32 | 31 | ||
33 | for (i = 1; i < nr_mce_banks; i++) { | 32 | for (i = 1; i < nr_mce_banks; i++) { |
34 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); | 33 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); |
35 | if (high&(1<<31)) { | 34 | if (high & (1<<31)) { |
36 | char misc[20]; | 35 | char misc[20]; |
37 | char addr[24]; | 36 | char addr[24]; |
38 | misc[0] = addr[0] = '\0'; | 37 | |
38 | misc[0] = '\0'; | ||
39 | addr[0] = '\0'; | ||
40 | |||
39 | if (high & (1<<29)) | 41 | if (high & (1<<29)) |
40 | recover |= 1; | 42 | recover |= 1; |
41 | if (high & (1<<25)) | 43 | if (high & (1<<25)) |
42 | recover |= 2; | 44 | recover |= 2; |
43 | high &= ~(1<<31); | 45 | high &= ~(1<<31); |
46 | |||
44 | if (high & (1<<27)) { | 47 | if (high & (1<<27)) { |
45 | rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); | 48 | rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); |
46 | snprintf(misc, 20, "[%08x%08x]", ahigh, alow); | 49 | snprintf(misc, 20, "[%08x%08x]", ahigh, alow); |
@@ -49,27 +52,31 @@ static void k7_machine_check(struct pt_regs *regs, long error_code) | |||
49 | rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); | 52 | rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); |
50 | snprintf(addr, 24, " at %08x%08x", ahigh, alow); | 53 | snprintf(addr, 24, " at %08x%08x", ahigh, alow); |
51 | } | 54 | } |
55 | |||
52 | printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", | 56 | printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", |
53 | smp_processor_id(), i, high, low, misc, addr); | 57 | smp_processor_id(), i, high, low, misc, addr); |
54 | /* Clear it */ | 58 | |
59 | /* Clear it: */ | ||
55 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | 60 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); |
56 | /* Serialize */ | 61 | /* Serialize: */ |
57 | wmb(); | 62 | wmb(); |
58 | add_taint(TAINT_MACHINE_CHECK); | 63 | add_taint(TAINT_MACHINE_CHECK); |
59 | } | 64 | } |
60 | } | 65 | } |
61 | 66 | ||
62 | if (recover&2) | 67 | if (recover & 2) |
63 | panic("CPU context corrupt"); | 68 | panic("CPU context corrupt"); |
64 | if (recover&1) | 69 | if (recover & 1) |
65 | panic("Unable to continue"); | 70 | panic("Unable to continue"); |
71 | |||
66 | printk(KERN_EMERG "Attempting to continue.\n"); | 72 | printk(KERN_EMERG "Attempting to continue.\n"); |
73 | |||
67 | mcgstl &= ~(1<<2); | 74 | mcgstl &= ~(1<<2); |
68 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 75 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
69 | } | 76 | } |
70 | 77 | ||
71 | 78 | ||
72 | /* AMD K7 machine check is Intel like */ | 79 | /* AMD K7 machine check is Intel like: */ |
73 | void amd_mcheck_init(struct cpuinfo_x86 *c) | 80 | void amd_mcheck_init(struct cpuinfo_x86 *c) |
74 | { | 81 | { |
75 | u32 l, h; | 82 | u32 l, h; |
@@ -79,21 +86,26 @@ void amd_mcheck_init(struct cpuinfo_x86 *c) | |||
79 | return; | 86 | return; |
80 | 87 | ||
81 | machine_check_vector = k7_machine_check; | 88 | machine_check_vector = k7_machine_check; |
89 | /* Make sure the vector pointer is visible before we enable MCEs: */ | ||
82 | wmb(); | 90 | wmb(); |
83 | 91 | ||
84 | printk(KERN_INFO "Intel machine check architecture supported.\n"); | 92 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
93 | |||
85 | rdmsr(MSR_IA32_MCG_CAP, l, h); | 94 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
86 | if (l & (1<<8)) /* Control register present ? */ | 95 | if (l & (1<<8)) /* Control register present ? */ |
87 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 96 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
88 | nr_mce_banks = l & 0xff; | 97 | nr_mce_banks = l & 0xff; |
89 | 98 | ||
90 | /* Clear status for MC index 0 separately, we don't touch CTL, | 99 | /* |
91 | * as some K7 Athlons cause spurious MCEs when its enabled. */ | 100 | * Clear status for MC index 0 separately, we don't touch CTL, |
101 | * as some K7 Athlons cause spurious MCEs when its enabled: | ||
102 | */ | ||
92 | if (boot_cpu_data.x86 == 6) { | 103 | if (boot_cpu_data.x86 == 6) { |
93 | wrmsr(MSR_IA32_MC0_STATUS, 0x0, 0x0); | 104 | wrmsr(MSR_IA32_MC0_STATUS, 0x0, 0x0); |
94 | i = 1; | 105 | i = 1; |
95 | } else | 106 | } else |
96 | i = 0; | 107 | i = 0; |
108 | |||
97 | for (; i < nr_mce_banks; i++) { | 109 | for (; i < nr_mce_banks; i++) { |
98 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 110 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
99 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 111 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c new file mode 100644 index 000000000000..a3a235a53f09 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Machine check injection support. | ||
3 | * Copyright 2008 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; version 2 | ||
8 | * of the License. | ||
9 | * | ||
10 | * Authors: | ||
11 | * Andi Kleen | ||
12 | * Ying Huang | ||
13 | */ | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/timer.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/mce.h> | ||
22 | |||
23 | /* Update fake mce registers on current CPU. */ | ||
24 | static void inject_mce(struct mce *m) | ||
25 | { | ||
26 | struct mce *i = &per_cpu(injectm, m->extcpu); | ||
27 | |||
28 | /* Make sure noone reads partially written injectm */ | ||
29 | i->finished = 0; | ||
30 | mb(); | ||
31 | m->finished = 0; | ||
32 | /* First set the fields after finished */ | ||
33 | i->extcpu = m->extcpu; | ||
34 | mb(); | ||
35 | /* Now write record in order, finished last (except above) */ | ||
36 | memcpy(i, m, sizeof(struct mce)); | ||
37 | /* Finally activate it */ | ||
38 | mb(); | ||
39 | i->finished = 1; | ||
40 | } | ||
41 | |||
42 | struct delayed_mce { | ||
43 | struct timer_list timer; | ||
44 | struct mce m; | ||
45 | }; | ||
46 | |||
47 | /* Inject mce on current CPU */ | ||
48 | static void raise_mce(unsigned long data) | ||
49 | { | ||
50 | struct delayed_mce *dm = (struct delayed_mce *)data; | ||
51 | struct mce *m = &dm->m; | ||
52 | int cpu = m->extcpu; | ||
53 | |||
54 | inject_mce(m); | ||
55 | if (m->status & MCI_STATUS_UC) { | ||
56 | struct pt_regs regs; | ||
57 | memset(®s, 0, sizeof(struct pt_regs)); | ||
58 | regs.ip = m->ip; | ||
59 | regs.cs = m->cs; | ||
60 | printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); | ||
61 | do_machine_check(®s, 0); | ||
62 | printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); | ||
63 | } else { | ||
64 | mce_banks_t b; | ||
65 | memset(&b, 0xff, sizeof(mce_banks_t)); | ||
66 | printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); | ||
67 | machine_check_poll(0, &b); | ||
68 | mce_notify_irq(); | ||
69 | printk(KERN_INFO "Finished machine check poll on CPU %d\n", | ||
70 | cpu); | ||
71 | } | ||
72 | kfree(dm); | ||
73 | } | ||
74 | |||
75 | /* Error injection interface */ | ||
76 | static ssize_t mce_write(struct file *filp, const char __user *ubuf, | ||
77 | size_t usize, loff_t *off) | ||
78 | { | ||
79 | struct delayed_mce *dm; | ||
80 | struct mce m; | ||
81 | |||
82 | if (!capable(CAP_SYS_ADMIN)) | ||
83 | return -EPERM; | ||
84 | /* | ||
85 | * There are some cases where real MSR reads could slip | ||
86 | * through. | ||
87 | */ | ||
88 | if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA)) | ||
89 | return -EIO; | ||
90 | |||
91 | if ((unsigned long)usize > sizeof(struct mce)) | ||
92 | usize = sizeof(struct mce); | ||
93 | if (copy_from_user(&m, ubuf, usize)) | ||
94 | return -EFAULT; | ||
95 | |||
96 | if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu)) | ||
97 | return -EINVAL; | ||
98 | |||
99 | dm = kmalloc(sizeof(struct delayed_mce), GFP_KERNEL); | ||
100 | if (!dm) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | /* | ||
104 | * Need to give user space some time to set everything up, | ||
105 | * so do it a jiffie or two later everywhere. | ||
106 | * Should we use a hrtimer here for better synchronization? | ||
107 | */ | ||
108 | memcpy(&dm->m, &m, sizeof(struct mce)); | ||
109 | setup_timer(&dm->timer, raise_mce, (unsigned long)dm); | ||
110 | dm->timer.expires = jiffies + 2; | ||
111 | add_timer_on(&dm->timer, m.extcpu); | ||
112 | return usize; | ||
113 | } | ||
114 | |||
115 | static int inject_init(void) | ||
116 | { | ||
117 | printk(KERN_INFO "Machine check injector initialized\n"); | ||
118 | mce_chrdev_ops.write = mce_write; | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | module_init(inject_init); | ||
123 | /* | ||
124 | * Cannot tolerate unloading currently because we cannot | ||
125 | * guarantee all openers of mce_chrdev will get a reference to us. | ||
126 | */ | ||
127 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h new file mode 100644 index 000000000000..54dcb8ff12e5 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #include <asm/mce.h> | ||
2 | |||
3 | enum severity_level { | ||
4 | MCE_NO_SEVERITY, | ||
5 | MCE_KEEP_SEVERITY, | ||
6 | MCE_SOME_SEVERITY, | ||
7 | MCE_AO_SEVERITY, | ||
8 | MCE_UC_SEVERITY, | ||
9 | MCE_AR_SEVERITY, | ||
10 | MCE_PANIC_SEVERITY, | ||
11 | }; | ||
12 | |||
13 | int mce_severity(struct mce *a, int tolerant, char **msg); | ||
14 | |||
15 | extern int mce_ser; | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c new file mode 100644 index 000000000000..ff0807f97056 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * MCE grading rules. | ||
3 | * Copyright 2008, 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; version 2 | ||
8 | * of the License. | ||
9 | * | ||
10 | * Author: Andi Kleen | ||
11 | */ | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/seq_file.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <asm/mce.h> | ||
17 | |||
18 | #include "mce-internal.h" | ||
19 | |||
20 | /* | ||
21 | * Grade an mce by severity. In general the most severe ones are processed | ||
22 | * first. Since there are quite a lot of combinations test the bits in a | ||
23 | * table-driven way. The rules are simply processed in order, first | ||
24 | * match wins. | ||
25 | * | ||
26 | * Note this is only used for machine check exceptions, the corrected | ||
27 | * errors use much simpler rules. The exceptions still check for the corrected | ||
28 | * errors, but only to leave them alone for the CMCI handler (except for | ||
29 | * panic situations) | ||
30 | */ | ||
31 | |||
32 | enum context { IN_KERNEL = 1, IN_USER = 2 }; | ||
33 | enum ser { SER_REQUIRED = 1, NO_SER = 2 }; | ||
34 | |||
35 | static struct severity { | ||
36 | u64 mask; | ||
37 | u64 result; | ||
38 | unsigned char sev; | ||
39 | unsigned char mcgmask; | ||
40 | unsigned char mcgres; | ||
41 | unsigned char ser; | ||
42 | unsigned char context; | ||
43 | unsigned char covered; | ||
44 | char *msg; | ||
45 | } severities[] = { | ||
46 | #define KERNEL .context = IN_KERNEL | ||
47 | #define USER .context = IN_USER | ||
48 | #define SER .ser = SER_REQUIRED | ||
49 | #define NOSER .ser = NO_SER | ||
50 | #define SEV(s) .sev = MCE_ ## s ## _SEVERITY | ||
51 | #define BITCLR(x, s, m, r...) { .mask = x, .result = 0, SEV(s), .msg = m, ## r } | ||
52 | #define BITSET(x, s, m, r...) { .mask = x, .result = x, SEV(s), .msg = m, ## r } | ||
53 | #define MCGMASK(x, res, s, m, r...) \ | ||
54 | { .mcgmask = x, .mcgres = res, SEV(s), .msg = m, ## r } | ||
55 | #define MASK(x, y, s, m, r...) \ | ||
56 | { .mask = x, .result = y, SEV(s), .msg = m, ## r } | ||
57 | #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) | ||
58 | #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) | ||
59 | #define MCACOD 0xffff | ||
60 | |||
61 | BITCLR(MCI_STATUS_VAL, NO, "Invalid"), | ||
62 | BITCLR(MCI_STATUS_EN, NO, "Not enabled"), | ||
63 | BITSET(MCI_STATUS_PCC, PANIC, "Processor context corrupt"), | ||
64 | /* When MCIP is not set something is very confused */ | ||
65 | MCGMASK(MCG_STATUS_MCIP, 0, PANIC, "MCIP not set in MCA handler"), | ||
66 | /* Neither return not error IP -- no chance to recover -> PANIC */ | ||
67 | MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0, PANIC, | ||
68 | "Neither restart nor error IP"), | ||
69 | MCGMASK(MCG_STATUS_RIPV, 0, PANIC, "In kernel and no restart IP", | ||
70 | KERNEL), | ||
71 | BITCLR(MCI_STATUS_UC, KEEP, "Corrected error", NOSER), | ||
72 | MASK(MCI_STATUS_OVER|MCI_STATUS_UC|MCI_STATUS_EN, MCI_STATUS_UC, SOME, | ||
73 | "Spurious not enabled", SER), | ||
74 | |||
75 | /* ignore OVER for UCNA */ | ||
76 | MASK(MCI_UC_SAR, MCI_STATUS_UC, KEEP, | ||
77 | "Uncorrected no action required", SER), | ||
78 | MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR, PANIC, | ||
79 | "Illegal combination (UCNA with AR=1)", SER), | ||
80 | MASK(MCI_STATUS_S, 0, KEEP, "Non signalled machine check", SER), | ||
81 | |||
82 | /* AR add known MCACODs here */ | ||
83 | MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_SAR, PANIC, | ||
84 | "Action required with lost events", SER), | ||
85 | MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_SAR, PANIC, | ||
86 | "Action required; unknown MCACOD", SER), | ||
87 | |||
88 | /* known AO MCACODs: */ | ||
89 | MASK(MCI_UC_SAR|MCI_STATUS_OVER|0xfff0, MCI_UC_S|0xc0, AO, | ||
90 | "Action optional: memory scrubbing error", SER), | ||
91 | MASK(MCI_UC_SAR|MCI_STATUS_OVER|MCACOD, MCI_UC_S|0x17a, AO, | ||
92 | "Action optional: last level cache writeback error", SER), | ||
93 | |||
94 | MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S, SOME, | ||
95 | "Action optional unknown MCACOD", SER), | ||
96 | MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S|MCI_STATUS_OVER, SOME, | ||
97 | "Action optional with lost events", SER), | ||
98 | BITSET(MCI_STATUS_UC|MCI_STATUS_OVER, PANIC, "Overflowed uncorrected"), | ||
99 | BITSET(MCI_STATUS_UC, UC, "Uncorrected"), | ||
100 | BITSET(0, SOME, "No match") /* always matches. keep at end */ | ||
101 | }; | ||
102 | |||
103 | /* | ||
104 | * If the EIPV bit is set, it means the saved IP is the | ||
105 | * instruction which caused the MCE. | ||
106 | */ | ||
107 | static int error_context(struct mce *m) | ||
108 | { | ||
109 | if (m->mcgstatus & MCG_STATUS_EIPV) | ||
110 | return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL; | ||
111 | /* Unknown, assume kernel */ | ||
112 | return IN_KERNEL; | ||
113 | } | ||
114 | |||
115 | int mce_severity(struct mce *a, int tolerant, char **msg) | ||
116 | { | ||
117 | enum context ctx = error_context(a); | ||
118 | struct severity *s; | ||
119 | |||
120 | for (s = severities;; s++) { | ||
121 | if ((a->status & s->mask) != s->result) | ||
122 | continue; | ||
123 | if ((a->mcgstatus & s->mcgmask) != s->mcgres) | ||
124 | continue; | ||
125 | if (s->ser == SER_REQUIRED && !mce_ser) | ||
126 | continue; | ||
127 | if (s->ser == NO_SER && mce_ser) | ||
128 | continue; | ||
129 | if (s->context && ctx != s->context) | ||
130 | continue; | ||
131 | if (msg) | ||
132 | *msg = s->msg; | ||
133 | s->covered = 1; | ||
134 | if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) { | ||
135 | if (panic_on_oops || tolerant < 1) | ||
136 | return MCE_PANIC_SEVERITY; | ||
137 | } | ||
138 | return s->sev; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void *s_start(struct seq_file *f, loff_t *pos) | ||
143 | { | ||
144 | if (*pos >= ARRAY_SIZE(severities)) | ||
145 | return NULL; | ||
146 | return &severities[*pos]; | ||
147 | } | ||
148 | |||
149 | static void *s_next(struct seq_file *f, void *data, loff_t *pos) | ||
150 | { | ||
151 | if (++(*pos) >= ARRAY_SIZE(severities)) | ||
152 | return NULL; | ||
153 | return &severities[*pos]; | ||
154 | } | ||
155 | |||
156 | static void s_stop(struct seq_file *f, void *data) | ||
157 | { | ||
158 | } | ||
159 | |||
160 | static int s_show(struct seq_file *f, void *data) | ||
161 | { | ||
162 | struct severity *ser = data; | ||
163 | seq_printf(f, "%d\t%s\n", ser->covered, ser->msg); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static const struct seq_operations severities_seq_ops = { | ||
168 | .start = s_start, | ||
169 | .next = s_next, | ||
170 | .stop = s_stop, | ||
171 | .show = s_show, | ||
172 | }; | ||
173 | |||
174 | static int severities_coverage_open(struct inode *inode, struct file *file) | ||
175 | { | ||
176 | return seq_open(file, &severities_seq_ops); | ||
177 | } | ||
178 | |||
179 | static ssize_t severities_coverage_write(struct file *file, | ||
180 | const char __user *ubuf, | ||
181 | size_t count, loff_t *ppos) | ||
182 | { | ||
183 | int i; | ||
184 | for (i = 0; i < ARRAY_SIZE(severities); i++) | ||
185 | severities[i].covered = 0; | ||
186 | return count; | ||
187 | } | ||
188 | |||
189 | static const struct file_operations severities_coverage_fops = { | ||
190 | .open = severities_coverage_open, | ||
191 | .release = seq_release, | ||
192 | .read = seq_read, | ||
193 | .write = severities_coverage_write, | ||
194 | }; | ||
195 | |||
196 | static int __init severities_debugfs_init(void) | ||
197 | { | ||
198 | struct dentry *dmce = NULL, *fseverities_coverage = NULL; | ||
199 | |||
200 | dmce = debugfs_create_dir("mce", NULL); | ||
201 | if (dmce == NULL) | ||
202 | goto err_out; | ||
203 | fseverities_coverage = debugfs_create_file("severities-coverage", | ||
204 | 0444, dmce, NULL, | ||
205 | &severities_coverage_fops); | ||
206 | if (fseverities_coverage == NULL) | ||
207 | goto err_out; | ||
208 | |||
209 | return 0; | ||
210 | |||
211 | err_out: | ||
212 | if (fseverities_coverage) | ||
213 | debugfs_remove(fseverities_coverage); | ||
214 | if (dmce) | ||
215 | debugfs_remove(dmce); | ||
216 | return -ENOMEM; | ||
217 | } | ||
218 | late_initcall(severities_debugfs_init); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c new file mode 100644 index 000000000000..fabba15e4558 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -0,0 +1,1964 @@ | |||
1 | /* | ||
2 | * Machine check handler. | ||
3 | * | ||
4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | ||
5 | * Rest from unknown author(s). | ||
6 | * 2004 Andi Kleen. Rewrote most of it. | ||
7 | * Copyright 2008 Intel Corporation | ||
8 | * Author: Andi Kleen | ||
9 | */ | ||
10 | #include <linux/thread_info.h> | ||
11 | #include <linux/capability.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/ratelimit.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/rcupdate.h> | ||
17 | #include <linux/kobject.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/percpu.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/sysdev.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/ctype.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/sysfs.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/kmod.h> | ||
31 | #include <linux/poll.h> | ||
32 | #include <linux/nmi.h> | ||
33 | #include <linux/cpu.h> | ||
34 | #include <linux/smp.h> | ||
35 | #include <linux/fs.h> | ||
36 | #include <linux/mm.h> | ||
37 | |||
38 | #include <asm/processor.h> | ||
39 | #include <asm/hw_irq.h> | ||
40 | #include <asm/apic.h> | ||
41 | #include <asm/idle.h> | ||
42 | #include <asm/ipi.h> | ||
43 | #include <asm/mce.h> | ||
44 | #include <asm/msr.h> | ||
45 | |||
46 | #include "mce-internal.h" | ||
47 | #include "mce.h" | ||
48 | |||
49 | /* Handle unconfigured int18 (should never happen) */ | ||
50 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) | ||
51 | { | ||
52 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", | ||
53 | smp_processor_id()); | ||
54 | } | ||
55 | |||
56 | /* Call the installed machine check handler for this CPU setup. */ | ||
57 | void (*machine_check_vector)(struct pt_regs *, long error_code) = | ||
58 | unexpected_machine_check; | ||
59 | |||
60 | int mce_disabled; | ||
61 | |||
62 | #ifdef CONFIG_X86_NEW_MCE | ||
63 | |||
64 | #define MISC_MCELOG_MINOR 227 | ||
65 | |||
66 | #define SPINUNIT 100 /* 100ns */ | ||
67 | |||
68 | atomic_t mce_entry; | ||
69 | |||
70 | DEFINE_PER_CPU(unsigned, mce_exception_count); | ||
71 | |||
72 | /* | ||
73 | * Tolerant levels: | ||
74 | * 0: always panic on uncorrected errors, log corrected errors | ||
75 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors | ||
76 | * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors | ||
77 | * 3: never panic or SIGBUS, log all errors (for testing only) | ||
78 | */ | ||
79 | static int tolerant = 1; | ||
80 | static int banks; | ||
81 | static u64 *bank; | ||
82 | static unsigned long notify_user; | ||
83 | static int rip_msr; | ||
84 | static int mce_bootlog = -1; | ||
85 | static int monarch_timeout = -1; | ||
86 | static int mce_panic_timeout; | ||
87 | static int mce_dont_log_ce; | ||
88 | int mce_cmci_disabled; | ||
89 | int mce_ignore_ce; | ||
90 | int mce_ser; | ||
91 | |||
92 | static char trigger[128]; | ||
93 | static char *trigger_argv[2] = { trigger, NULL }; | ||
94 | |||
95 | static unsigned long dont_init_banks; | ||
96 | |||
97 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | ||
98 | static DEFINE_PER_CPU(struct mce, mces_seen); | ||
99 | static int cpu_missing; | ||
100 | |||
101 | |||
102 | /* MCA banks polled by the period polling timer for corrected events */ | ||
103 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
104 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
105 | }; | ||
106 | |||
107 | static inline int skip_bank_init(int i) | ||
108 | { | ||
109 | return i < BITS_PER_LONG && test_bit(i, &dont_init_banks); | ||
110 | } | ||
111 | |||
112 | static DEFINE_PER_CPU(struct work_struct, mce_work); | ||
113 | |||
114 | /* Do initial initialization of a struct mce */ | ||
115 | void mce_setup(struct mce *m) | ||
116 | { | ||
117 | memset(m, 0, sizeof(struct mce)); | ||
118 | m->cpu = m->extcpu = smp_processor_id(); | ||
119 | rdtscll(m->tsc); | ||
120 | /* We hope get_seconds stays lockless */ | ||
121 | m->time = get_seconds(); | ||
122 | m->cpuvendor = boot_cpu_data.x86_vendor; | ||
123 | m->cpuid = cpuid_eax(1); | ||
124 | #ifdef CONFIG_SMP | ||
125 | m->socketid = cpu_data(m->extcpu).phys_proc_id; | ||
126 | #endif | ||
127 | m->apicid = cpu_data(m->extcpu).initial_apicid; | ||
128 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); | ||
129 | } | ||
130 | |||
131 | DEFINE_PER_CPU(struct mce, injectm); | ||
132 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); | ||
133 | |||
134 | /* | ||
135 | * Lockless MCE logging infrastructure. | ||
136 | * This avoids deadlocks on printk locks without having to break locks. Also | ||
137 | * separate MCEs from kernel messages to avoid bogus bug reports. | ||
138 | */ | ||
139 | |||
140 | static struct mce_log mcelog = { | ||
141 | .signature = MCE_LOG_SIGNATURE, | ||
142 | .len = MCE_LOG_LEN, | ||
143 | .recordlen = sizeof(struct mce), | ||
144 | }; | ||
145 | |||
146 | void mce_log(struct mce *mce) | ||
147 | { | ||
148 | unsigned next, entry; | ||
149 | |||
150 | mce->finished = 0; | ||
151 | wmb(); | ||
152 | for (;;) { | ||
153 | entry = rcu_dereference(mcelog.next); | ||
154 | for (;;) { | ||
155 | /* | ||
156 | * When the buffer fills up discard new entries. | ||
157 | * Assume that the earlier errors are the more | ||
158 | * interesting ones: | ||
159 | */ | ||
160 | if (entry >= MCE_LOG_LEN) { | ||
161 | set_bit(MCE_OVERFLOW, | ||
162 | (unsigned long *)&mcelog.flags); | ||
163 | return; | ||
164 | } | ||
165 | /* Old left over entry. Skip: */ | ||
166 | if (mcelog.entry[entry].finished) { | ||
167 | entry++; | ||
168 | continue; | ||
169 | } | ||
170 | break; | ||
171 | } | ||
172 | smp_rmb(); | ||
173 | next = entry + 1; | ||
174 | if (cmpxchg(&mcelog.next, entry, next) == entry) | ||
175 | break; | ||
176 | } | ||
177 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); | ||
178 | wmb(); | ||
179 | mcelog.entry[entry].finished = 1; | ||
180 | wmb(); | ||
181 | |||
182 | mce->finished = 1; | ||
183 | set_bit(0, ¬ify_user); | ||
184 | } | ||
185 | |||
186 | static void print_mce(struct mce *m) | ||
187 | { | ||
188 | printk(KERN_EMERG | ||
189 | "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", | ||
190 | m->extcpu, m->mcgstatus, m->bank, m->status); | ||
191 | if (m->ip) { | ||
192 | printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", | ||
193 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", | ||
194 | m->cs, m->ip); | ||
195 | if (m->cs == __KERNEL_CS) | ||
196 | print_symbol("{%s}", m->ip); | ||
197 | printk("\n"); | ||
198 | } | ||
199 | printk(KERN_EMERG "TSC %llx ", m->tsc); | ||
200 | if (m->addr) | ||
201 | printk("ADDR %llx ", m->addr); | ||
202 | if (m->misc) | ||
203 | printk("MISC %llx ", m->misc); | ||
204 | printk("\n"); | ||
205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | ||
206 | m->cpuvendor, m->cpuid, m->time, m->socketid, | ||
207 | m->apicid); | ||
208 | } | ||
209 | |||
210 | static void print_mce_head(void) | ||
211 | { | ||
212 | printk(KERN_EMERG "\n" KERN_EMERG "HARDWARE ERROR\n"); | ||
213 | } | ||
214 | |||
215 | static void print_mce_tail(void) | ||
216 | { | ||
217 | printk(KERN_EMERG "This is not a software problem!\n" | ||
218 | KERN_EMERG "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | ||
219 | } | ||
220 | |||
221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | ||
222 | |||
223 | static atomic_t mce_paniced; | ||
224 | |||
225 | /* Panic in progress. Enable interrupts and wait for final IPI */ | ||
226 | static void wait_for_panic(void) | ||
227 | { | ||
228 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; | ||
229 | preempt_disable(); | ||
230 | local_irq_enable(); | ||
231 | while (timeout-- > 0) | ||
232 | udelay(1); | ||
233 | if (panic_timeout == 0) | ||
234 | panic_timeout = mce_panic_timeout; | ||
235 | panic("Panicing machine check CPU died"); | ||
236 | } | ||
237 | |||
238 | static void mce_panic(char *msg, struct mce *final, char *exp) | ||
239 | { | ||
240 | int i; | ||
241 | |||
242 | /* | ||
243 | * Make sure only one CPU runs in machine check panic | ||
244 | */ | ||
245 | if (atomic_add_return(1, &mce_paniced) > 1) | ||
246 | wait_for_panic(); | ||
247 | barrier(); | ||
248 | |||
249 | bust_spinlocks(1); | ||
250 | console_verbose(); | ||
251 | print_mce_head(); | ||
252 | /* First print corrected ones that are still unlogged */ | ||
253 | for (i = 0; i < MCE_LOG_LEN; i++) { | ||
254 | struct mce *m = &mcelog.entry[i]; | ||
255 | if (!(m->status & MCI_STATUS_VAL)) | ||
256 | continue; | ||
257 | if (!(m->status & MCI_STATUS_UC)) | ||
258 | print_mce(m); | ||
259 | } | ||
260 | /* Now print uncorrected but with the final one last */ | ||
261 | for (i = 0; i < MCE_LOG_LEN; i++) { | ||
262 | struct mce *m = &mcelog.entry[i]; | ||
263 | if (!(m->status & MCI_STATUS_VAL)) | ||
264 | continue; | ||
265 | if (!(m->status & MCI_STATUS_UC)) | ||
266 | continue; | ||
267 | if (!final || memcmp(m, final, sizeof(struct mce))) | ||
268 | print_mce(m); | ||
269 | } | ||
270 | if (final) | ||
271 | print_mce(final); | ||
272 | if (cpu_missing) | ||
273 | printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n"); | ||
274 | print_mce_tail(); | ||
275 | if (exp) | ||
276 | printk(KERN_EMERG "Machine check: %s\n", exp); | ||
277 | if (panic_timeout == 0) | ||
278 | panic_timeout = mce_panic_timeout; | ||
279 | panic(msg); | ||
280 | } | ||
281 | |||
282 | /* Support code for software error injection */ | ||
283 | |||
284 | static int msr_to_offset(u32 msr) | ||
285 | { | ||
286 | unsigned bank = __get_cpu_var(injectm.bank); | ||
287 | if (msr == rip_msr) | ||
288 | return offsetof(struct mce, ip); | ||
289 | if (msr == MSR_IA32_MC0_STATUS + bank*4) | ||
290 | return offsetof(struct mce, status); | ||
291 | if (msr == MSR_IA32_MC0_ADDR + bank*4) | ||
292 | return offsetof(struct mce, addr); | ||
293 | if (msr == MSR_IA32_MC0_MISC + bank*4) | ||
294 | return offsetof(struct mce, misc); | ||
295 | if (msr == MSR_IA32_MCG_STATUS) | ||
296 | return offsetof(struct mce, mcgstatus); | ||
297 | return -1; | ||
298 | } | ||
299 | |||
300 | /* MSR access wrappers used for error injection */ | ||
301 | static u64 mce_rdmsrl(u32 msr) | ||
302 | { | ||
303 | u64 v; | ||
304 | if (__get_cpu_var(injectm).finished) { | ||
305 | int offset = msr_to_offset(msr); | ||
306 | if (offset < 0) | ||
307 | return 0; | ||
308 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | ||
309 | } | ||
310 | rdmsrl(msr, v); | ||
311 | return v; | ||
312 | } | ||
313 | |||
314 | static void mce_wrmsrl(u32 msr, u64 v) | ||
315 | { | ||
316 | if (__get_cpu_var(injectm).finished) { | ||
317 | int offset = msr_to_offset(msr); | ||
318 | if (offset >= 0) | ||
319 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | ||
320 | return; | ||
321 | } | ||
322 | wrmsrl(msr, v); | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Simple lockless ring to communicate PFNs from the exception handler with the | ||
327 | * process context work function. This is vastly simplified because there's | ||
328 | * only a single reader and a single writer. | ||
329 | */ | ||
330 | #define MCE_RING_SIZE 16 /* we use one entry less */ | ||
331 | |||
332 | struct mce_ring { | ||
333 | unsigned short start; | ||
334 | unsigned short end; | ||
335 | unsigned long ring[MCE_RING_SIZE]; | ||
336 | }; | ||
337 | static DEFINE_PER_CPU(struct mce_ring, mce_ring); | ||
338 | |||
339 | /* Runs with CPU affinity in workqueue */ | ||
340 | static int mce_ring_empty(void) | ||
341 | { | ||
342 | struct mce_ring *r = &__get_cpu_var(mce_ring); | ||
343 | |||
344 | return r->start == r->end; | ||
345 | } | ||
346 | |||
347 | static int mce_ring_get(unsigned long *pfn) | ||
348 | { | ||
349 | struct mce_ring *r; | ||
350 | int ret = 0; | ||
351 | |||
352 | *pfn = 0; | ||
353 | get_cpu(); | ||
354 | r = &__get_cpu_var(mce_ring); | ||
355 | if (r->start == r->end) | ||
356 | goto out; | ||
357 | *pfn = r->ring[r->start]; | ||
358 | r->start = (r->start + 1) % MCE_RING_SIZE; | ||
359 | ret = 1; | ||
360 | out: | ||
361 | put_cpu(); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | /* Always runs in MCE context with preempt off */ | ||
366 | static int mce_ring_add(unsigned long pfn) | ||
367 | { | ||
368 | struct mce_ring *r = &__get_cpu_var(mce_ring); | ||
369 | unsigned next; | ||
370 | |||
371 | next = (r->end + 1) % MCE_RING_SIZE; | ||
372 | if (next == r->start) | ||
373 | return -1; | ||
374 | r->ring[r->end] = pfn; | ||
375 | wmb(); | ||
376 | r->end = next; | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | int mce_available(struct cpuinfo_x86 *c) | ||
381 | { | ||
382 | if (mce_disabled) | ||
383 | return 0; | ||
384 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | ||
385 | } | ||
386 | |||
387 | static void mce_schedule_work(void) | ||
388 | { | ||
389 | if (!mce_ring_empty()) { | ||
390 | struct work_struct *work = &__get_cpu_var(mce_work); | ||
391 | if (!work_pending(work)) | ||
392 | schedule_work(work); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Get the address of the instruction at the time of the machine check | ||
398 | * error. | ||
399 | */ | ||
400 | static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | ||
401 | { | ||
402 | |||
403 | if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) { | ||
404 | m->ip = regs->ip; | ||
405 | m->cs = regs->cs; | ||
406 | } else { | ||
407 | m->ip = 0; | ||
408 | m->cs = 0; | ||
409 | } | ||
410 | if (rip_msr) | ||
411 | m->ip = mce_rdmsrl(rip_msr); | ||
412 | } | ||
413 | |||
414 | #ifdef CONFIG_X86_LOCAL_APIC | ||
415 | /* | ||
416 | * Called after interrupts have been reenabled again | ||
417 | * when a MCE happened during an interrupts off region | ||
418 | * in the kernel. | ||
419 | */ | ||
420 | asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) | ||
421 | { | ||
422 | ack_APIC_irq(); | ||
423 | exit_idle(); | ||
424 | irq_enter(); | ||
425 | mce_notify_irq(); | ||
426 | mce_schedule_work(); | ||
427 | irq_exit(); | ||
428 | } | ||
429 | #endif | ||
430 | |||
431 | static void mce_report_event(struct pt_regs *regs) | ||
432 | { | ||
433 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { | ||
434 | mce_notify_irq(); | ||
435 | /* | ||
436 | * Triggering the work queue here is just an insurance | ||
437 | * policy in case the syscall exit notify handler | ||
438 | * doesn't run soon enough or ends up running on the | ||
439 | * wrong CPU (can happen when audit sleeps) | ||
440 | */ | ||
441 | mce_schedule_work(); | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | #ifdef CONFIG_X86_LOCAL_APIC | ||
446 | /* | ||
447 | * Without APIC do not notify. The event will be picked | ||
448 | * up eventually. | ||
449 | */ | ||
450 | if (!cpu_has_apic) | ||
451 | return; | ||
452 | |||
453 | /* | ||
454 | * When interrupts are disabled we cannot use | ||
455 | * kernel services safely. Trigger an self interrupt | ||
456 | * through the APIC to instead do the notification | ||
457 | * after interrupts are reenabled again. | ||
458 | */ | ||
459 | apic->send_IPI_self(MCE_SELF_VECTOR); | ||
460 | |||
461 | /* | ||
462 | * Wait for idle afterwards again so that we don't leave the | ||
463 | * APIC in a non idle state because the normal APIC writes | ||
464 | * cannot exclude us. | ||
465 | */ | ||
466 | apic_wait_icr_idle(); | ||
467 | #endif | ||
468 | } | ||
469 | |||
470 | DEFINE_PER_CPU(unsigned, mce_poll_count); | ||
471 | |||
472 | /* | ||
473 | * Poll for corrected events or events that happened before reset. | ||
474 | * Those are just logged through /dev/mcelog. | ||
475 | * | ||
476 | * This is executed in standard interrupt context. | ||
477 | * | ||
478 | * Note: spec recommends to panic for fatal unsignalled | ||
479 | * errors here. However this would be quite problematic -- | ||
480 | * we would need to reimplement the Monarch handling and | ||
481 | * it would mess up the exclusion between exception handler | ||
482 | * and poll hander -- * so we skip this for now. | ||
483 | * These cases should not happen anyways, or only when the CPU | ||
484 | * is already totally * confused. In this case it's likely it will | ||
485 | * not fully execute the machine check handler either. | ||
486 | */ | ||
487 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
488 | { | ||
489 | struct mce m; | ||
490 | int i; | ||
491 | |||
492 | __get_cpu_var(mce_poll_count)++; | ||
493 | |||
494 | mce_setup(&m); | ||
495 | |||
496 | m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); | ||
497 | for (i = 0; i < banks; i++) { | ||
498 | if (!bank[i] || !test_bit(i, *b)) | ||
499 | continue; | ||
500 | |||
501 | m.misc = 0; | ||
502 | m.addr = 0; | ||
503 | m.bank = i; | ||
504 | m.tsc = 0; | ||
505 | |||
506 | barrier(); | ||
507 | m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); | ||
508 | if (!(m.status & MCI_STATUS_VAL)) | ||
509 | continue; | ||
510 | |||
511 | /* | ||
512 | * Uncorrected or signalled events are handled by the exception | ||
513 | * handler when it is enabled, so don't process those here. | ||
514 | * | ||
515 | * TBD do the same check for MCI_STATUS_EN here? | ||
516 | */ | ||
517 | if (!(flags & MCP_UC) && | ||
518 | (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC))) | ||
519 | continue; | ||
520 | |||
521 | if (m.status & MCI_STATUS_MISCV) | ||
522 | m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); | ||
523 | if (m.status & MCI_STATUS_ADDRV) | ||
524 | m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); | ||
525 | |||
526 | if (!(flags & MCP_TIMESTAMP)) | ||
527 | m.tsc = 0; | ||
528 | /* | ||
529 | * Don't get the IP here because it's unlikely to | ||
530 | * have anything to do with the actual error location. | ||
531 | */ | ||
532 | if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { | ||
533 | mce_log(&m); | ||
534 | add_taint(TAINT_MACHINE_CHECK); | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * Clear state for this bank. | ||
539 | */ | ||
540 | mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Don't clear MCG_STATUS here because it's only defined for | ||
545 | * exceptions. | ||
546 | */ | ||
547 | |||
548 | sync_core(); | ||
549 | } | ||
550 | EXPORT_SYMBOL_GPL(machine_check_poll); | ||
551 | |||
552 | /* | ||
553 | * Do a quick check if any of the events requires a panic. | ||
554 | * This decides if we keep the events around or clear them. | ||
555 | */ | ||
556 | static int mce_no_way_out(struct mce *m, char **msg) | ||
557 | { | ||
558 | int i; | ||
559 | |||
560 | for (i = 0; i < banks; i++) { | ||
561 | m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); | ||
562 | if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) | ||
563 | return 1; | ||
564 | } | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * Variable to establish order between CPUs while scanning. | ||
570 | * Each CPU spins initially until executing is equal its number. | ||
571 | */ | ||
572 | static atomic_t mce_executing; | ||
573 | |||
574 | /* | ||
575 | * Defines order of CPUs on entry. First CPU becomes Monarch. | ||
576 | */ | ||
577 | static atomic_t mce_callin; | ||
578 | |||
579 | /* | ||
580 | * Check if a timeout waiting for other CPUs happened. | ||
581 | */ | ||
582 | static int mce_timed_out(u64 *t) | ||
583 | { | ||
584 | /* | ||
585 | * The others already did panic for some reason. | ||
586 | * Bail out like in a timeout. | ||
587 | * rmb() to tell the compiler that system_state | ||
588 | * might have been modified by someone else. | ||
589 | */ | ||
590 | rmb(); | ||
591 | if (atomic_read(&mce_paniced)) | ||
592 | wait_for_panic(); | ||
593 | if (!monarch_timeout) | ||
594 | goto out; | ||
595 | if ((s64)*t < SPINUNIT) { | ||
596 | /* CHECKME: Make panic default for 1 too? */ | ||
597 | if (tolerant < 1) | ||
598 | mce_panic("Timeout synchronizing machine check over CPUs", | ||
599 | NULL, NULL); | ||
600 | cpu_missing = 1; | ||
601 | return 1; | ||
602 | } | ||
603 | *t -= SPINUNIT; | ||
604 | out: | ||
605 | touch_nmi_watchdog(); | ||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * The Monarch's reign. The Monarch is the CPU who entered | ||
611 | * the machine check handler first. It waits for the others to | ||
612 | * raise the exception too and then grades them. When any | ||
613 | * error is fatal panic. Only then let the others continue. | ||
614 | * | ||
615 | * The other CPUs entering the MCE handler will be controlled by the | ||
616 | * Monarch. They are called Subjects. | ||
617 | * | ||
618 | * This way we prevent any potential data corruption in a unrecoverable case | ||
619 | * and also makes sure always all CPU's errors are examined. | ||
620 | * | ||
621 | * Also this detects the case of an machine check event coming from outer | ||
622 | * space (not detected by any CPUs) In this case some external agent wants | ||
623 | * us to shut down, so panic too. | ||
624 | * | ||
625 | * The other CPUs might still decide to panic if the handler happens | ||
626 | * in a unrecoverable place, but in this case the system is in a semi-stable | ||
627 | * state and won't corrupt anything by itself. It's ok to let the others | ||
628 | * continue for a bit first. | ||
629 | * | ||
630 | * All the spin loops have timeouts; when a timeout happens a CPU | ||
631 | * typically elects itself to be Monarch. | ||
632 | */ | ||
633 | static void mce_reign(void) | ||
634 | { | ||
635 | int cpu; | ||
636 | struct mce *m = NULL; | ||
637 | int global_worst = 0; | ||
638 | char *msg = NULL; | ||
639 | char *nmsg = NULL; | ||
640 | |||
641 | /* | ||
642 | * This CPU is the Monarch and the other CPUs have run | ||
643 | * through their handlers. | ||
644 | * Grade the severity of the errors of all the CPUs. | ||
645 | */ | ||
646 | for_each_possible_cpu(cpu) { | ||
647 | int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant, | ||
648 | &nmsg); | ||
649 | if (severity > global_worst) { | ||
650 | msg = nmsg; | ||
651 | global_worst = severity; | ||
652 | m = &per_cpu(mces_seen, cpu); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * Cannot recover? Panic here then. | ||
658 | * This dumps all the mces in the log buffer and stops the | ||
659 | * other CPUs. | ||
660 | */ | ||
661 | if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3) | ||
662 | mce_panic("Fatal Machine check", m, msg); | ||
663 | |||
664 | /* | ||
665 | * For UC somewhere we let the CPU who detects it handle it. | ||
666 | * Also must let continue the others, otherwise the handling | ||
667 | * CPU could deadlock on a lock. | ||
668 | */ | ||
669 | |||
670 | /* | ||
671 | * No machine check event found. Must be some external | ||
672 | * source or one CPU is hung. Panic. | ||
673 | */ | ||
674 | if (!m && tolerant < 3) | ||
675 | mce_panic("Machine check from unknown source", NULL, NULL); | ||
676 | |||
677 | /* | ||
678 | * Now clear all the mces_seen so that they don't reappear on | ||
679 | * the next mce. | ||
680 | */ | ||
681 | for_each_possible_cpu(cpu) | ||
682 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); | ||
683 | } | ||
684 | |||
685 | static atomic_t global_nwo; | ||
686 | |||
687 | /* | ||
688 | * Start of Monarch synchronization. This waits until all CPUs have | ||
689 | * entered the exception handler and then determines if any of them | ||
690 | * saw a fatal event that requires panic. Then it executes them | ||
691 | * in the entry order. | ||
692 | * TBD double check parallel CPU hotunplug | ||
693 | */ | ||
694 | static int mce_start(int no_way_out, int *order) | ||
695 | { | ||
696 | int nwo; | ||
697 | int cpus = num_online_cpus(); | ||
698 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; | ||
699 | |||
700 | if (!timeout) { | ||
701 | *order = -1; | ||
702 | return no_way_out; | ||
703 | } | ||
704 | |||
705 | atomic_add(no_way_out, &global_nwo); | ||
706 | |||
707 | /* | ||
708 | * Wait for everyone. | ||
709 | */ | ||
710 | while (atomic_read(&mce_callin) != cpus) { | ||
711 | if (mce_timed_out(&timeout)) { | ||
712 | atomic_set(&global_nwo, 0); | ||
713 | *order = -1; | ||
714 | return no_way_out; | ||
715 | } | ||
716 | ndelay(SPINUNIT); | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * Cache the global no_way_out state. | ||
721 | */ | ||
722 | nwo = atomic_read(&global_nwo); | ||
723 | |||
724 | /* | ||
725 | * Monarch starts executing now, the others wait. | ||
726 | */ | ||
727 | if (*order == 1) { | ||
728 | atomic_set(&mce_executing, 1); | ||
729 | return nwo; | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Now start the scanning loop one by one | ||
734 | * in the original callin order. | ||
735 | * This way when there are any shared banks it will | ||
736 | * be only seen by one CPU before cleared, avoiding duplicates. | ||
737 | */ | ||
738 | while (atomic_read(&mce_executing) < *order) { | ||
739 | if (mce_timed_out(&timeout)) { | ||
740 | atomic_set(&global_nwo, 0); | ||
741 | *order = -1; | ||
742 | return no_way_out; | ||
743 | } | ||
744 | ndelay(SPINUNIT); | ||
745 | } | ||
746 | return nwo; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Synchronize between CPUs after main scanning loop. | ||
751 | * This invokes the bulk of the Monarch processing. | ||
752 | */ | ||
753 | static int mce_end(int order) | ||
754 | { | ||
755 | int ret = -1; | ||
756 | u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; | ||
757 | |||
758 | if (!timeout) | ||
759 | goto reset; | ||
760 | if (order < 0) | ||
761 | goto reset; | ||
762 | |||
763 | /* | ||
764 | * Allow others to run. | ||
765 | */ | ||
766 | atomic_inc(&mce_executing); | ||
767 | |||
768 | if (order == 1) { | ||
769 | /* CHECKME: Can this race with a parallel hotplug? */ | ||
770 | int cpus = num_online_cpus(); | ||
771 | |||
772 | /* | ||
773 | * Monarch: Wait for everyone to go through their scanning | ||
774 | * loops. | ||
775 | */ | ||
776 | while (atomic_read(&mce_executing) <= cpus) { | ||
777 | if (mce_timed_out(&timeout)) | ||
778 | goto reset; | ||
779 | ndelay(SPINUNIT); | ||
780 | } | ||
781 | |||
782 | mce_reign(); | ||
783 | barrier(); | ||
784 | ret = 0; | ||
785 | } else { | ||
786 | /* | ||
787 | * Subject: Wait for Monarch to finish. | ||
788 | */ | ||
789 | while (atomic_read(&mce_executing) != 0) { | ||
790 | if (mce_timed_out(&timeout)) | ||
791 | goto reset; | ||
792 | ndelay(SPINUNIT); | ||
793 | } | ||
794 | |||
795 | /* | ||
796 | * Don't reset anything. That's done by the Monarch. | ||
797 | */ | ||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | /* | ||
802 | * Reset all global state. | ||
803 | */ | ||
804 | reset: | ||
805 | atomic_set(&global_nwo, 0); | ||
806 | atomic_set(&mce_callin, 0); | ||
807 | barrier(); | ||
808 | |||
809 | /* | ||
810 | * Let others run again. | ||
811 | */ | ||
812 | atomic_set(&mce_executing, 0); | ||
813 | return ret; | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * Check if the address reported by the CPU is in a format we can parse. | ||
818 | * It would be possible to add code for most other cases, but all would | ||
819 | * be somewhat complicated (e.g. segment offset would require an instruction | ||
820 | * parser). So only support physical addresses upto page granuality for now. | ||
821 | */ | ||
822 | static int mce_usable_address(struct mce *m) | ||
823 | { | ||
824 | if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) | ||
825 | return 0; | ||
826 | if ((m->misc & 0x3f) > PAGE_SHIFT) | ||
827 | return 0; | ||
828 | if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS) | ||
829 | return 0; | ||
830 | return 1; | ||
831 | } | ||
832 | |||
833 | static void mce_clear_state(unsigned long *toclear) | ||
834 | { | ||
835 | int i; | ||
836 | |||
837 | for (i = 0; i < banks; i++) { | ||
838 | if (test_bit(i, toclear)) | ||
839 | mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
840 | } | ||
841 | } | ||
842 | |||
843 | /* | ||
844 | * The actual machine check handler. This only handles real | ||
845 | * exceptions when something got corrupted coming in through int 18. | ||
846 | * | ||
847 | * This is executed in NMI context not subject to normal locking rules. This | ||
848 | * implies that most kernel services cannot be safely used. Don't even | ||
849 | * think about putting a printk in there! | ||
850 | * | ||
851 | * On Intel systems this is entered on all CPUs in parallel through | ||
852 | * MCE broadcast. However some CPUs might be broken beyond repair, | ||
853 | * so be always careful when synchronizing with others. | ||
854 | */ | ||
855 | void do_machine_check(struct pt_regs *regs, long error_code) | ||
856 | { | ||
857 | struct mce m, *final; | ||
858 | int i; | ||
859 | int worst = 0; | ||
860 | int severity; | ||
861 | /* | ||
862 | * Establish sequential order between the CPUs entering the machine | ||
863 | * check handler. | ||
864 | */ | ||
865 | int order; | ||
866 | |||
867 | /* | ||
868 | * If no_way_out gets set, there is no safe way to recover from this | ||
869 | * MCE. If tolerant is cranked up, we'll try anyway. | ||
870 | */ | ||
871 | int no_way_out = 0; | ||
872 | /* | ||
873 | * If kill_it gets set, there might be a way to recover from this | ||
874 | * error. | ||
875 | */ | ||
876 | int kill_it = 0; | ||
877 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
878 | char *msg = "Unknown"; | ||
879 | |||
880 | atomic_inc(&mce_entry); | ||
881 | |||
882 | __get_cpu_var(mce_exception_count)++; | ||
883 | |||
884 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | ||
885 | 18, SIGKILL) == NOTIFY_STOP) | ||
886 | goto out; | ||
887 | if (!banks) | ||
888 | goto out; | ||
889 | |||
890 | order = atomic_add_return(1, &mce_callin); | ||
891 | mce_setup(&m); | ||
892 | |||
893 | m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); | ||
894 | no_way_out = mce_no_way_out(&m, &msg); | ||
895 | |||
896 | final = &__get_cpu_var(mces_seen); | ||
897 | *final = m; | ||
898 | |||
899 | barrier(); | ||
900 | |||
901 | /* | ||
902 | * When no restart IP must always kill or panic. | ||
903 | */ | ||
904 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | ||
905 | kill_it = 1; | ||
906 | |||
907 | /* | ||
908 | * Go through all the banks in exclusion of the other CPUs. | ||
909 | * This way we don't report duplicated events on shared banks | ||
910 | * because the first one to see it will clear it. | ||
911 | */ | ||
912 | no_way_out = mce_start(no_way_out, &order); | ||
913 | for (i = 0; i < banks; i++) { | ||
914 | __clear_bit(i, toclear); | ||
915 | if (!bank[i]) | ||
916 | continue; | ||
917 | |||
918 | m.misc = 0; | ||
919 | m.addr = 0; | ||
920 | m.bank = i; | ||
921 | |||
922 | m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); | ||
923 | if ((m.status & MCI_STATUS_VAL) == 0) | ||
924 | continue; | ||
925 | |||
926 | /* | ||
927 | * Non uncorrected or non signaled errors are handled by | ||
928 | * machine_check_poll. Leave them alone, unless this panics. | ||
929 | */ | ||
930 | if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) && | ||
931 | !no_way_out) | ||
932 | continue; | ||
933 | |||
934 | /* | ||
935 | * Set taint even when machine check was not enabled. | ||
936 | */ | ||
937 | add_taint(TAINT_MACHINE_CHECK); | ||
938 | |||
939 | severity = mce_severity(&m, tolerant, NULL); | ||
940 | |||
941 | /* | ||
942 | * When machine check was for corrected handler don't touch, | ||
943 | * unless we're panicing. | ||
944 | */ | ||
945 | if (severity == MCE_KEEP_SEVERITY && !no_way_out) | ||
946 | continue; | ||
947 | __set_bit(i, toclear); | ||
948 | if (severity == MCE_NO_SEVERITY) { | ||
949 | /* | ||
950 | * Machine check event was not enabled. Clear, but | ||
951 | * ignore. | ||
952 | */ | ||
953 | continue; | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Kill on action required. | ||
958 | */ | ||
959 | if (severity == MCE_AR_SEVERITY) | ||
960 | kill_it = 1; | ||
961 | |||
962 | if (m.status & MCI_STATUS_MISCV) | ||
963 | m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); | ||
964 | if (m.status & MCI_STATUS_ADDRV) | ||
965 | m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); | ||
966 | |||
967 | /* | ||
968 | * Action optional error. Queue address for later processing. | ||
969 | * When the ring overflows we just ignore the AO error. | ||
970 | * RED-PEN add some logging mechanism when | ||
971 | * usable_address or mce_add_ring fails. | ||
972 | * RED-PEN don't ignore overflow for tolerant == 0 | ||
973 | */ | ||
974 | if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) | ||
975 | mce_ring_add(m.addr >> PAGE_SHIFT); | ||
976 | |||
977 | mce_get_rip(&m, regs); | ||
978 | mce_log(&m); | ||
979 | |||
980 | if (severity > worst) { | ||
981 | *final = m; | ||
982 | worst = severity; | ||
983 | } | ||
984 | } | ||
985 | |||
986 | if (!no_way_out) | ||
987 | mce_clear_state(toclear); | ||
988 | |||
989 | /* | ||
990 | * Do most of the synchronization with other CPUs. | ||
991 | * When there's any problem use only local no_way_out state. | ||
992 | */ | ||
993 | if (mce_end(order) < 0) | ||
994 | no_way_out = worst >= MCE_PANIC_SEVERITY; | ||
995 | |||
996 | /* | ||
997 | * If we have decided that we just CAN'T continue, and the user | ||
998 | * has not set tolerant to an insane level, give up and die. | ||
999 | * | ||
1000 | * This is mainly used in the case when the system doesn't | ||
1001 | * support MCE broadcasting or it has been disabled. | ||
1002 | */ | ||
1003 | if (no_way_out && tolerant < 3) | ||
1004 | mce_panic("Fatal machine check on current CPU", final, msg); | ||
1005 | |||
1006 | /* | ||
1007 | * If the error seems to be unrecoverable, something should be | ||
1008 | * done. Try to kill as little as possible. If we can kill just | ||
1009 | * one task, do that. If the user has set the tolerance very | ||
1010 | * high, don't try to do anything at all. | ||
1011 | */ | ||
1012 | |||
1013 | if (kill_it && tolerant < 3) | ||
1014 | force_sig(SIGBUS, current); | ||
1015 | |||
1016 | /* notify userspace ASAP */ | ||
1017 | set_thread_flag(TIF_MCE_NOTIFY); | ||
1018 | |||
1019 | if (worst > 0) | ||
1020 | mce_report_event(regs); | ||
1021 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); | ||
1022 | out: | ||
1023 | atomic_dec(&mce_entry); | ||
1024 | sync_core(); | ||
1025 | } | ||
1026 | EXPORT_SYMBOL_GPL(do_machine_check); | ||
1027 | |||
1028 | /* dummy to break dependency. actual code is in mm/memory-failure.c */ | ||
1029 | void __attribute__((weak)) memory_failure(unsigned long pfn, int vector) | ||
1030 | { | ||
1031 | printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn); | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * Called after mce notification in process context. This code | ||
1036 | * is allowed to sleep. Call the high level VM handler to process | ||
1037 | * any corrupted pages. | ||
1038 | * Assume that the work queue code only calls this one at a time | ||
1039 | * per CPU. | ||
1040 | * Note we don't disable preemption, so this code might run on the wrong | ||
1041 | * CPU. In this case the event is picked up by the scheduled work queue. | ||
1042 | * This is merely a fast path to expedite processing in some common | ||
1043 | * cases. | ||
1044 | */ | ||
1045 | void mce_notify_process(void) | ||
1046 | { | ||
1047 | unsigned long pfn; | ||
1048 | mce_notify_irq(); | ||
1049 | while (mce_ring_get(&pfn)) | ||
1050 | memory_failure(pfn, MCE_VECTOR); | ||
1051 | } | ||
1052 | |||
1053 | static void mce_process_work(struct work_struct *dummy) | ||
1054 | { | ||
1055 | mce_notify_process(); | ||
1056 | } | ||
1057 | |||
1058 | #ifdef CONFIG_X86_MCE_INTEL | ||
1059 | /*** | ||
1060 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog | ||
1061 | * @cpu: The CPU on which the event occurred. | ||
1062 | * @status: Event status information | ||
1063 | * | ||
1064 | * This function should be called by the thermal interrupt after the | ||
1065 | * event has been processed and the decision was made to log the event | ||
1066 | * further. | ||
1067 | * | ||
1068 | * The status parameter will be saved to the 'status' field of 'struct mce' | ||
1069 | * and historically has been the register value of the | ||
1070 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | ||
1071 | */ | ||
1072 | void mce_log_therm_throt_event(__u64 status) | ||
1073 | { | ||
1074 | struct mce m; | ||
1075 | |||
1076 | mce_setup(&m); | ||
1077 | m.bank = MCE_THERMAL_BANK; | ||
1078 | m.status = status; | ||
1079 | mce_log(&m); | ||
1080 | } | ||
1081 | #endif /* CONFIG_X86_MCE_INTEL */ | ||
1082 | |||
1083 | /* | ||
1084 | * Periodic polling timer for "silent" machine check errors. If the | ||
1085 | * poller finds an MCE, poll 2x faster. When the poller finds no more | ||
1086 | * errors, poll 2x slower (up to check_interval seconds). | ||
1087 | */ | ||
1088 | static int check_interval = 5 * 60; /* 5 minutes */ | ||
1089 | |||
1090 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ | ||
1091 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | ||
1092 | |||
1093 | static void mcheck_timer(unsigned long data) | ||
1094 | { | ||
1095 | struct timer_list *t = &per_cpu(mce_timer, data); | ||
1096 | int *n; | ||
1097 | |||
1098 | WARN_ON(smp_processor_id() != data); | ||
1099 | |||
1100 | if (mce_available(¤t_cpu_data)) { | ||
1101 | machine_check_poll(MCP_TIMESTAMP, | ||
1102 | &__get_cpu_var(mce_poll_banks)); | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Alert userspace if needed. If we logged an MCE, reduce the | ||
1107 | * polling interval, otherwise increase the polling interval. | ||
1108 | */ | ||
1109 | n = &__get_cpu_var(next_interval); | ||
1110 | if (mce_notify_irq()) | ||
1111 | *n = max(*n/2, HZ/100); | ||
1112 | else | ||
1113 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); | ||
1114 | |||
1115 | t->expires = jiffies + *n; | ||
1116 | add_timer(t); | ||
1117 | } | ||
1118 | |||
1119 | static void mce_do_trigger(struct work_struct *work) | ||
1120 | { | ||
1121 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
1122 | } | ||
1123 | |||
1124 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
1125 | |||
1126 | /* | ||
1127 | * Notify the user(s) about new machine check events. | ||
1128 | * Can be called from interrupt context, but not from machine check/NMI | ||
1129 | * context. | ||
1130 | */ | ||
1131 | int mce_notify_irq(void) | ||
1132 | { | ||
1133 | /* Not more than two messages every minute */ | ||
1134 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
1135 | |||
1136 | clear_thread_flag(TIF_MCE_NOTIFY); | ||
1137 | |||
1138 | if (test_and_clear_bit(0, ¬ify_user)) { | ||
1139 | wake_up_interruptible(&mce_wait); | ||
1140 | |||
1141 | /* | ||
1142 | * There is no risk of missing notifications because | ||
1143 | * work_pending is always cleared before the function is | ||
1144 | * executed. | ||
1145 | */ | ||
1146 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
1147 | schedule_work(&mce_trigger_work); | ||
1148 | |||
1149 | if (__ratelimit(&ratelimit)) | ||
1150 | printk(KERN_INFO "Machine check events logged\n"); | ||
1151 | |||
1152 | return 1; | ||
1153 | } | ||
1154 | return 0; | ||
1155 | } | ||
1156 | EXPORT_SYMBOL_GPL(mce_notify_irq); | ||
1157 | |||
1158 | /* | ||
1159 | * Initialize Machine Checks for a CPU. | ||
1160 | */ | ||
1161 | static int mce_cap_init(void) | ||
1162 | { | ||
1163 | unsigned b; | ||
1164 | u64 cap; | ||
1165 | |||
1166 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
1167 | |||
1168 | b = cap & MCG_BANKCNT_MASK; | ||
1169 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); | ||
1170 | |||
1171 | if (b > MAX_NR_BANKS) { | ||
1172 | printk(KERN_WARNING | ||
1173 | "MCE: Using only %u machine check banks out of %u\n", | ||
1174 | MAX_NR_BANKS, b); | ||
1175 | b = MAX_NR_BANKS; | ||
1176 | } | ||
1177 | |||
1178 | /* Don't support asymmetric configurations today */ | ||
1179 | WARN_ON(banks != 0 && b != banks); | ||
1180 | banks = b; | ||
1181 | if (!bank) { | ||
1182 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
1183 | if (!bank) | ||
1184 | return -ENOMEM; | ||
1185 | memset(bank, 0xff, banks * sizeof(u64)); | ||
1186 | } | ||
1187 | |||
1188 | /* Use accurate RIP reporting if available. */ | ||
1189 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) | ||
1190 | rip_msr = MSR_IA32_MCG_EIP; | ||
1191 | |||
1192 | if (cap & MCG_SER_P) | ||
1193 | mce_ser = 1; | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | static void mce_init(void) | ||
1199 | { | ||
1200 | mce_banks_t all_banks; | ||
1201 | u64 cap; | ||
1202 | int i; | ||
1203 | |||
1204 | /* | ||
1205 | * Log the machine checks left over from the previous reset. | ||
1206 | */ | ||
1207 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
1208 | machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); | ||
1209 | |||
1210 | set_in_cr4(X86_CR4_MCE); | ||
1211 | |||
1212 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
1213 | if (cap & MCG_CTL_P) | ||
1214 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | ||
1215 | |||
1216 | for (i = 0; i < banks; i++) { | ||
1217 | if (skip_bank_init(i)) | ||
1218 | continue; | ||
1219 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
1220 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
1221 | } | ||
1222 | } | ||
1223 | |||
1224 | /* Add per CPU specific workarounds here */ | ||
1225 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) | ||
1226 | { | ||
1227 | /* This should be disabled by the BIOS, but isn't always */ | ||
1228 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
1229 | if (c->x86 == 15 && banks > 4) { | ||
1230 | /* | ||
1231 | * disable GART TBL walk error reporting, which | ||
1232 | * trips off incorrectly with the IOMMU & 3ware | ||
1233 | * & Cerberus: | ||
1234 | */ | ||
1235 | clear_bit(10, (unsigned long *)&bank[4]); | ||
1236 | } | ||
1237 | if (c->x86 <= 17 && mce_bootlog < 0) { | ||
1238 | /* | ||
1239 | * Lots of broken BIOS around that don't clear them | ||
1240 | * by default and leave crap in there. Don't log: | ||
1241 | */ | ||
1242 | mce_bootlog = 0; | ||
1243 | } | ||
1244 | /* | ||
1245 | * Various K7s with broken bank 0 around. Always disable | ||
1246 | * by default. | ||
1247 | */ | ||
1248 | if (c->x86 == 6) | ||
1249 | bank[0] = 0; | ||
1250 | } | ||
1251 | |||
1252 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
1253 | /* | ||
1254 | * SDM documents that on family 6 bank 0 should not be written | ||
1255 | * because it aliases to another special BIOS controlled | ||
1256 | * register. | ||
1257 | * But it's not aliased anymore on model 0x1a+ | ||
1258 | * Don't ignore bank 0 completely because there could be a | ||
1259 | * valid event later, merely don't write CTL0. | ||
1260 | */ | ||
1261 | |||
1262 | if (c->x86 == 6 && c->x86_model < 0x1A) | ||
1263 | __set_bit(0, &dont_init_banks); | ||
1264 | |||
1265 | /* | ||
1266 | * All newer Intel systems support MCE broadcasting. Enable | ||
1267 | * synchronization with a one second timeout. | ||
1268 | */ | ||
1269 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && | ||
1270 | monarch_timeout < 0) | ||
1271 | monarch_timeout = USEC_PER_SEC; | ||
1272 | } | ||
1273 | if (monarch_timeout < 0) | ||
1274 | monarch_timeout = 0; | ||
1275 | if (mce_bootlog != 0) | ||
1276 | mce_panic_timeout = 30; | ||
1277 | } | ||
1278 | |||
1279 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) | ||
1280 | { | ||
1281 | if (c->x86 != 5) | ||
1282 | return; | ||
1283 | switch (c->x86_vendor) { | ||
1284 | case X86_VENDOR_INTEL: | ||
1285 | if (mce_p5_enabled()) | ||
1286 | intel_p5_mcheck_init(c); | ||
1287 | break; | ||
1288 | case X86_VENDOR_CENTAUR: | ||
1289 | winchip_mcheck_init(c); | ||
1290 | break; | ||
1291 | } | ||
1292 | } | ||
1293 | |||
1294 | static void mce_cpu_features(struct cpuinfo_x86 *c) | ||
1295 | { | ||
1296 | switch (c->x86_vendor) { | ||
1297 | case X86_VENDOR_INTEL: | ||
1298 | mce_intel_feature_init(c); | ||
1299 | break; | ||
1300 | case X86_VENDOR_AMD: | ||
1301 | mce_amd_feature_init(c); | ||
1302 | break; | ||
1303 | default: | ||
1304 | break; | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | static void mce_init_timer(void) | ||
1309 | { | ||
1310 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
1311 | int *n = &__get_cpu_var(next_interval); | ||
1312 | |||
1313 | if (mce_ignore_ce) | ||
1314 | return; | ||
1315 | |||
1316 | *n = check_interval * HZ; | ||
1317 | if (!*n) | ||
1318 | return; | ||
1319 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
1320 | t->expires = round_jiffies(jiffies + *n); | ||
1321 | add_timer(t); | ||
1322 | } | ||
1323 | |||
1324 | /* | ||
1325 | * Called for each booted CPU to set up machine checks. | ||
1326 | * Must be called with preempt off: | ||
1327 | */ | ||
1328 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | ||
1329 | { | ||
1330 | if (mce_disabled) | ||
1331 | return; | ||
1332 | |||
1333 | mce_ancient_init(c); | ||
1334 | |||
1335 | if (!mce_available(c)) | ||
1336 | return; | ||
1337 | |||
1338 | if (mce_cap_init() < 0) { | ||
1339 | mce_disabled = 1; | ||
1340 | return; | ||
1341 | } | ||
1342 | mce_cpu_quirks(c); | ||
1343 | |||
1344 | machine_check_vector = do_machine_check; | ||
1345 | |||
1346 | mce_init(); | ||
1347 | mce_cpu_features(c); | ||
1348 | mce_init_timer(); | ||
1349 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | ||
1350 | } | ||
1351 | |||
1352 | /* | ||
1353 | * Character device to read and clear the MCE log. | ||
1354 | */ | ||
1355 | |||
1356 | static DEFINE_SPINLOCK(mce_state_lock); | ||
1357 | static int open_count; /* #times opened */ | ||
1358 | static int open_exclu; /* already open exclusive? */ | ||
1359 | |||
1360 | static int mce_open(struct inode *inode, struct file *file) | ||
1361 | { | ||
1362 | spin_lock(&mce_state_lock); | ||
1363 | |||
1364 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { | ||
1365 | spin_unlock(&mce_state_lock); | ||
1366 | |||
1367 | return -EBUSY; | ||
1368 | } | ||
1369 | |||
1370 | if (file->f_flags & O_EXCL) | ||
1371 | open_exclu = 1; | ||
1372 | open_count++; | ||
1373 | |||
1374 | spin_unlock(&mce_state_lock); | ||
1375 | |||
1376 | return nonseekable_open(inode, file); | ||
1377 | } | ||
1378 | |||
1379 | static int mce_release(struct inode *inode, struct file *file) | ||
1380 | { | ||
1381 | spin_lock(&mce_state_lock); | ||
1382 | |||
1383 | open_count--; | ||
1384 | open_exclu = 0; | ||
1385 | |||
1386 | spin_unlock(&mce_state_lock); | ||
1387 | |||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | static void collect_tscs(void *data) | ||
1392 | { | ||
1393 | unsigned long *cpu_tsc = (unsigned long *)data; | ||
1394 | |||
1395 | rdtscll(cpu_tsc[smp_processor_id()]); | ||
1396 | } | ||
1397 | |||
1398 | static DEFINE_MUTEX(mce_read_mutex); | ||
1399 | |||
1400 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | ||
1401 | loff_t *off) | ||
1402 | { | ||
1403 | char __user *buf = ubuf; | ||
1404 | unsigned long *cpu_tsc; | ||
1405 | unsigned prev, next; | ||
1406 | int i, err; | ||
1407 | |||
1408 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); | ||
1409 | if (!cpu_tsc) | ||
1410 | return -ENOMEM; | ||
1411 | |||
1412 | mutex_lock(&mce_read_mutex); | ||
1413 | next = rcu_dereference(mcelog.next); | ||
1414 | |||
1415 | /* Only supports full reads right now */ | ||
1416 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { | ||
1417 | mutex_unlock(&mce_read_mutex); | ||
1418 | kfree(cpu_tsc); | ||
1419 | |||
1420 | return -EINVAL; | ||
1421 | } | ||
1422 | |||
1423 | err = 0; | ||
1424 | prev = 0; | ||
1425 | do { | ||
1426 | for (i = prev; i < next; i++) { | ||
1427 | unsigned long start = jiffies; | ||
1428 | |||
1429 | while (!mcelog.entry[i].finished) { | ||
1430 | if (time_after_eq(jiffies, start + 2)) { | ||
1431 | memset(mcelog.entry + i, 0, | ||
1432 | sizeof(struct mce)); | ||
1433 | goto timeout; | ||
1434 | } | ||
1435 | cpu_relax(); | ||
1436 | } | ||
1437 | smp_rmb(); | ||
1438 | err |= copy_to_user(buf, mcelog.entry + i, | ||
1439 | sizeof(struct mce)); | ||
1440 | buf += sizeof(struct mce); | ||
1441 | timeout: | ||
1442 | ; | ||
1443 | } | ||
1444 | |||
1445 | memset(mcelog.entry + prev, 0, | ||
1446 | (next - prev) * sizeof(struct mce)); | ||
1447 | prev = next; | ||
1448 | next = cmpxchg(&mcelog.next, prev, 0); | ||
1449 | } while (next != prev); | ||
1450 | |||
1451 | synchronize_sched(); | ||
1452 | |||
1453 | /* | ||
1454 | * Collect entries that were still getting written before the | ||
1455 | * synchronize. | ||
1456 | */ | ||
1457 | on_each_cpu(collect_tscs, cpu_tsc, 1); | ||
1458 | |||
1459 | for (i = next; i < MCE_LOG_LEN; i++) { | ||
1460 | if (mcelog.entry[i].finished && | ||
1461 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | ||
1462 | err |= copy_to_user(buf, mcelog.entry+i, | ||
1463 | sizeof(struct mce)); | ||
1464 | smp_rmb(); | ||
1465 | buf += sizeof(struct mce); | ||
1466 | memset(&mcelog.entry[i], 0, sizeof(struct mce)); | ||
1467 | } | ||
1468 | } | ||
1469 | mutex_unlock(&mce_read_mutex); | ||
1470 | kfree(cpu_tsc); | ||
1471 | |||
1472 | return err ? -EFAULT : buf - ubuf; | ||
1473 | } | ||
1474 | |||
1475 | static unsigned int mce_poll(struct file *file, poll_table *wait) | ||
1476 | { | ||
1477 | poll_wait(file, &mce_wait, wait); | ||
1478 | if (rcu_dereference(mcelog.next)) | ||
1479 | return POLLIN | POLLRDNORM; | ||
1480 | return 0; | ||
1481 | } | ||
1482 | |||
1483 | static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
1484 | { | ||
1485 | int __user *p = (int __user *)arg; | ||
1486 | |||
1487 | if (!capable(CAP_SYS_ADMIN)) | ||
1488 | return -EPERM; | ||
1489 | |||
1490 | switch (cmd) { | ||
1491 | case MCE_GET_RECORD_LEN: | ||
1492 | return put_user(sizeof(struct mce), p); | ||
1493 | case MCE_GET_LOG_LEN: | ||
1494 | return put_user(MCE_LOG_LEN, p); | ||
1495 | case MCE_GETCLEAR_FLAGS: { | ||
1496 | unsigned flags; | ||
1497 | |||
1498 | do { | ||
1499 | flags = mcelog.flags; | ||
1500 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); | ||
1501 | |||
1502 | return put_user(flags, p); | ||
1503 | } | ||
1504 | default: | ||
1505 | return -ENOTTY; | ||
1506 | } | ||
1507 | } | ||
1508 | |||
1509 | /* Modified in mce-inject.c, so not static or const */ | ||
1510 | struct file_operations mce_chrdev_ops = { | ||
1511 | .open = mce_open, | ||
1512 | .release = mce_release, | ||
1513 | .read = mce_read, | ||
1514 | .poll = mce_poll, | ||
1515 | .unlocked_ioctl = mce_ioctl, | ||
1516 | }; | ||
1517 | EXPORT_SYMBOL_GPL(mce_chrdev_ops); | ||
1518 | |||
1519 | static struct miscdevice mce_log_device = { | ||
1520 | MISC_MCELOG_MINOR, | ||
1521 | "mcelog", | ||
1522 | &mce_chrdev_ops, | ||
1523 | }; | ||
1524 | |||
1525 | /* | ||
1526 | * mce=off Disables machine check | ||
1527 | * mce=no_cmci Disables CMCI | ||
1528 | * mce=dont_log_ce Clears corrected events silently, no log created for CEs. | ||
1529 | * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. | ||
1530 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) | ||
1531 | * monarchtimeout is how long to wait for other CPUs on machine | ||
1532 | * check, or 0 to not wait | ||
1533 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | ||
1534 | * mce=nobootlog Don't log MCEs from before booting. | ||
1535 | */ | ||
1536 | static int __init mcheck_enable(char *str) | ||
1537 | { | ||
1538 | if (*str == 0) | ||
1539 | enable_p5_mce(); | ||
1540 | if (*str == '=') | ||
1541 | str++; | ||
1542 | if (!strcmp(str, "off")) | ||
1543 | mce_disabled = 1; | ||
1544 | else if (!strcmp(str, "no_cmci")) | ||
1545 | mce_cmci_disabled = 1; | ||
1546 | else if (!strcmp(str, "dont_log_ce")) | ||
1547 | mce_dont_log_ce = 1; | ||
1548 | else if (!strcmp(str, "ignore_ce")) | ||
1549 | mce_ignore_ce = 1; | ||
1550 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) | ||
1551 | mce_bootlog = (str[0] == 'b'); | ||
1552 | else if (isdigit(str[0])) { | ||
1553 | get_option(&str, &tolerant); | ||
1554 | if (*str == ',') { | ||
1555 | ++str; | ||
1556 | get_option(&str, &monarch_timeout); | ||
1557 | } | ||
1558 | } else { | ||
1559 | printk(KERN_INFO "mce argument %s ignored. Please use /sys\n", | ||
1560 | str); | ||
1561 | return 0; | ||
1562 | } | ||
1563 | return 1; | ||
1564 | } | ||
1565 | __setup("mce", mcheck_enable); | ||
1566 | |||
1567 | /* | ||
1568 | * Sysfs support | ||
1569 | */ | ||
1570 | |||
1571 | /* | ||
1572 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
1573 | * them later. | ||
1574 | */ | ||
1575 | static int mce_disable(void) | ||
1576 | { | ||
1577 | int i; | ||
1578 | |||
1579 | for (i = 0; i < banks; i++) { | ||
1580 | if (!skip_bank_init(i)) | ||
1581 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
1582 | } | ||
1583 | return 0; | ||
1584 | } | ||
1585 | |||
1586 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
1587 | { | ||
1588 | return mce_disable(); | ||
1589 | } | ||
1590 | |||
1591 | static int mce_shutdown(struct sys_device *dev) | ||
1592 | { | ||
1593 | return mce_disable(); | ||
1594 | } | ||
1595 | |||
1596 | /* | ||
1597 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. | ||
1598 | * Only one CPU is active at this time, the others get re-added later using | ||
1599 | * CPU hotplug: | ||
1600 | */ | ||
1601 | static int mce_resume(struct sys_device *dev) | ||
1602 | { | ||
1603 | mce_init(); | ||
1604 | mce_cpu_features(¤t_cpu_data); | ||
1605 | |||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | static void mce_cpu_restart(void *data) | ||
1610 | { | ||
1611 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
1612 | if (mce_available(¤t_cpu_data)) | ||
1613 | mce_init(); | ||
1614 | mce_init_timer(); | ||
1615 | } | ||
1616 | |||
1617 | /* Reinit MCEs after user configuration changes */ | ||
1618 | static void mce_restart(void) | ||
1619 | { | ||
1620 | on_each_cpu(mce_cpu_restart, NULL, 1); | ||
1621 | } | ||
1622 | |||
1623 | static struct sysdev_class mce_sysclass = { | ||
1624 | .suspend = mce_suspend, | ||
1625 | .shutdown = mce_shutdown, | ||
1626 | .resume = mce_resume, | ||
1627 | .name = "machinecheck", | ||
1628 | }; | ||
1629 | |||
1630 | DEFINE_PER_CPU(struct sys_device, mce_dev); | ||
1631 | |||
1632 | __cpuinitdata | ||
1633 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | ||
1634 | |||
1635 | static struct sysdev_attribute *bank_attrs; | ||
1636 | |||
1637 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, | ||
1638 | char *buf) | ||
1639 | { | ||
1640 | u64 b = bank[attr - bank_attrs]; | ||
1641 | |||
1642 | return sprintf(buf, "%llx\n", b); | ||
1643 | } | ||
1644 | |||
1645 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, | ||
1646 | const char *buf, size_t size) | ||
1647 | { | ||
1648 | u64 new; | ||
1649 | |||
1650 | if (strict_strtoull(buf, 0, &new) < 0) | ||
1651 | return -EINVAL; | ||
1652 | |||
1653 | bank[attr - bank_attrs] = new; | ||
1654 | mce_restart(); | ||
1655 | |||
1656 | return size; | ||
1657 | } | ||
1658 | |||
1659 | static ssize_t | ||
1660 | show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf) | ||
1661 | { | ||
1662 | strcpy(buf, trigger); | ||
1663 | strcat(buf, "\n"); | ||
1664 | return strlen(trigger) + 1; | ||
1665 | } | ||
1666 | |||
1667 | static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, | ||
1668 | const char *buf, size_t siz) | ||
1669 | { | ||
1670 | char *p; | ||
1671 | int len; | ||
1672 | |||
1673 | strncpy(trigger, buf, sizeof(trigger)); | ||
1674 | trigger[sizeof(trigger)-1] = 0; | ||
1675 | len = strlen(trigger); | ||
1676 | p = strchr(trigger, '\n'); | ||
1677 | |||
1678 | if (*p) | ||
1679 | *p = 0; | ||
1680 | |||
1681 | return len; | ||
1682 | } | ||
1683 | |||
1684 | static ssize_t store_int_with_restart(struct sys_device *s, | ||
1685 | struct sysdev_attribute *attr, | ||
1686 | const char *buf, size_t size) | ||
1687 | { | ||
1688 | ssize_t ret = sysdev_store_int(s, attr, buf, size); | ||
1689 | mce_restart(); | ||
1690 | return ret; | ||
1691 | } | ||
1692 | |||
1693 | static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | ||
1694 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | ||
1695 | static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout); | ||
1696 | |||
1697 | static struct sysdev_ext_attribute attr_check_interval = { | ||
1698 | _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int, | ||
1699 | store_int_with_restart), | ||
1700 | &check_interval | ||
1701 | }; | ||
1702 | |||
1703 | static struct sysdev_attribute *mce_attrs[] = { | ||
1704 | &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger, | ||
1705 | &attr_monarch_timeout.attr, | ||
1706 | NULL | ||
1707 | }; | ||
1708 | |||
1709 | static cpumask_var_t mce_dev_initialized; | ||
1710 | |||
1711 | /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */ | ||
1712 | static __cpuinit int mce_create_device(unsigned int cpu) | ||
1713 | { | ||
1714 | int err; | ||
1715 | int i; | ||
1716 | |||
1717 | if (!mce_available(&boot_cpu_data)) | ||
1718 | return -EIO; | ||
1719 | |||
1720 | memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject)); | ||
1721 | per_cpu(mce_dev, cpu).id = cpu; | ||
1722 | per_cpu(mce_dev, cpu).cls = &mce_sysclass; | ||
1723 | |||
1724 | err = sysdev_register(&per_cpu(mce_dev, cpu)); | ||
1725 | if (err) | ||
1726 | return err; | ||
1727 | |||
1728 | for (i = 0; mce_attrs[i]; i++) { | ||
1729 | err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | ||
1730 | if (err) | ||
1731 | goto error; | ||
1732 | } | ||
1733 | for (i = 0; i < banks; i++) { | ||
1734 | err = sysdev_create_file(&per_cpu(mce_dev, cpu), | ||
1735 | &bank_attrs[i]); | ||
1736 | if (err) | ||
1737 | goto error2; | ||
1738 | } | ||
1739 | cpumask_set_cpu(cpu, mce_dev_initialized); | ||
1740 | |||
1741 | return 0; | ||
1742 | error2: | ||
1743 | while (--i >= 0) | ||
1744 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); | ||
1745 | error: | ||
1746 | while (--i >= 0) | ||
1747 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | ||
1748 | |||
1749 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | ||
1750 | |||
1751 | return err; | ||
1752 | } | ||
1753 | |||
1754 | static __cpuinit void mce_remove_device(unsigned int cpu) | ||
1755 | { | ||
1756 | int i; | ||
1757 | |||
1758 | if (!cpumask_test_cpu(cpu, mce_dev_initialized)) | ||
1759 | return; | ||
1760 | |||
1761 | for (i = 0; mce_attrs[i]; i++) | ||
1762 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); | ||
1763 | |||
1764 | for (i = 0; i < banks; i++) | ||
1765 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); | ||
1766 | |||
1767 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | ||
1768 | cpumask_clear_cpu(cpu, mce_dev_initialized); | ||
1769 | } | ||
1770 | |||
1771 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
1772 | static void mce_disable_cpu(void *h) | ||
1773 | { | ||
1774 | unsigned long action = *(unsigned long *)h; | ||
1775 | int i; | ||
1776 | |||
1777 | if (!mce_available(¤t_cpu_data)) | ||
1778 | return; | ||
1779 | if (!(action & CPU_TASKS_FROZEN)) | ||
1780 | cmci_clear(); | ||
1781 | for (i = 0; i < banks; i++) { | ||
1782 | if (!skip_bank_init(i)) | ||
1783 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
1784 | } | ||
1785 | } | ||
1786 | |||
1787 | static void mce_reenable_cpu(void *h) | ||
1788 | { | ||
1789 | unsigned long action = *(unsigned long *)h; | ||
1790 | int i; | ||
1791 | |||
1792 | if (!mce_available(¤t_cpu_data)) | ||
1793 | return; | ||
1794 | |||
1795 | if (!(action & CPU_TASKS_FROZEN)) | ||
1796 | cmci_reenable(); | ||
1797 | for (i = 0; i < banks; i++) { | ||
1798 | if (!skip_bank_init(i)) | ||
1799 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
1800 | } | ||
1801 | } | ||
1802 | |||
1803 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | ||
1804 | static int __cpuinit | ||
1805 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
1806 | { | ||
1807 | unsigned int cpu = (unsigned long)hcpu; | ||
1808 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
1809 | |||
1810 | switch (action) { | ||
1811 | case CPU_ONLINE: | ||
1812 | case CPU_ONLINE_FROZEN: | ||
1813 | mce_create_device(cpu); | ||
1814 | if (threshold_cpu_callback) | ||
1815 | threshold_cpu_callback(action, cpu); | ||
1816 | break; | ||
1817 | case CPU_DEAD: | ||
1818 | case CPU_DEAD_FROZEN: | ||
1819 | if (threshold_cpu_callback) | ||
1820 | threshold_cpu_callback(action, cpu); | ||
1821 | mce_remove_device(cpu); | ||
1822 | break; | ||
1823 | case CPU_DOWN_PREPARE: | ||
1824 | case CPU_DOWN_PREPARE_FROZEN: | ||
1825 | del_timer_sync(t); | ||
1826 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
1827 | break; | ||
1828 | case CPU_DOWN_FAILED: | ||
1829 | case CPU_DOWN_FAILED_FROZEN: | ||
1830 | t->expires = round_jiffies(jiffies + | ||
1831 | __get_cpu_var(next_interval)); | ||
1832 | add_timer_on(t, cpu); | ||
1833 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
1834 | break; | ||
1835 | case CPU_POST_DEAD: | ||
1836 | /* intentionally ignoring frozen here */ | ||
1837 | cmci_rediscover(cpu); | ||
1838 | break; | ||
1839 | } | ||
1840 | return NOTIFY_OK; | ||
1841 | } | ||
1842 | |||
1843 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { | ||
1844 | .notifier_call = mce_cpu_callback, | ||
1845 | }; | ||
1846 | |||
1847 | static __init int mce_init_banks(void) | ||
1848 | { | ||
1849 | int i; | ||
1850 | |||
1851 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
1852 | GFP_KERNEL); | ||
1853 | if (!bank_attrs) | ||
1854 | return -ENOMEM; | ||
1855 | |||
1856 | for (i = 0; i < banks; i++) { | ||
1857 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
1858 | |||
1859 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
1860 | if (!a->attr.name) | ||
1861 | goto nomem; | ||
1862 | |||
1863 | a->attr.mode = 0644; | ||
1864 | a->show = show_bank; | ||
1865 | a->store = set_bank; | ||
1866 | } | ||
1867 | return 0; | ||
1868 | |||
1869 | nomem: | ||
1870 | while (--i >= 0) | ||
1871 | kfree(bank_attrs[i].attr.name); | ||
1872 | kfree(bank_attrs); | ||
1873 | bank_attrs = NULL; | ||
1874 | |||
1875 | return -ENOMEM; | ||
1876 | } | ||
1877 | |||
1878 | static __init int mce_init_device(void) | ||
1879 | { | ||
1880 | int err; | ||
1881 | int i = 0; | ||
1882 | |||
1883 | if (!mce_available(&boot_cpu_data)) | ||
1884 | return -EIO; | ||
1885 | |||
1886 | alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL); | ||
1887 | |||
1888 | err = mce_init_banks(); | ||
1889 | if (err) | ||
1890 | return err; | ||
1891 | |||
1892 | err = sysdev_class_register(&mce_sysclass); | ||
1893 | if (err) | ||
1894 | return err; | ||
1895 | |||
1896 | for_each_online_cpu(i) { | ||
1897 | err = mce_create_device(i); | ||
1898 | if (err) | ||
1899 | return err; | ||
1900 | } | ||
1901 | |||
1902 | register_hotcpu_notifier(&mce_cpu_notifier); | ||
1903 | misc_register(&mce_log_device); | ||
1904 | |||
1905 | return err; | ||
1906 | } | ||
1907 | |||
1908 | device_initcall(mce_init_device); | ||
1909 | |||
1910 | #else /* CONFIG_X86_OLD_MCE: */ | ||
1911 | |||
1912 | int nr_mce_banks; | ||
1913 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | ||
1914 | |||
1915 | /* This has to be run for each processor */ | ||
1916 | void mcheck_init(struct cpuinfo_x86 *c) | ||
1917 | { | ||
1918 | if (mce_disabled == 1) | ||
1919 | return; | ||
1920 | |||
1921 | switch (c->x86_vendor) { | ||
1922 | case X86_VENDOR_AMD: | ||
1923 | amd_mcheck_init(c); | ||
1924 | break; | ||
1925 | |||
1926 | case X86_VENDOR_INTEL: | ||
1927 | if (c->x86 == 5) | ||
1928 | intel_p5_mcheck_init(c); | ||
1929 | if (c->x86 == 6) | ||
1930 | intel_p6_mcheck_init(c); | ||
1931 | if (c->x86 == 15) | ||
1932 | intel_p4_mcheck_init(c); | ||
1933 | break; | ||
1934 | |||
1935 | case X86_VENDOR_CENTAUR: | ||
1936 | if (c->x86 == 5) | ||
1937 | winchip_mcheck_init(c); | ||
1938 | break; | ||
1939 | |||
1940 | default: | ||
1941 | break; | ||
1942 | } | ||
1943 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks); | ||
1944 | } | ||
1945 | |||
1946 | static int __init mcheck_enable(char *str) | ||
1947 | { | ||
1948 | mce_disabled = -1; | ||
1949 | return 1; | ||
1950 | } | ||
1951 | |||
1952 | __setup("mce", mcheck_enable); | ||
1953 | |||
1954 | #endif /* CONFIG_X86_OLD_MCE */ | ||
1955 | |||
1956 | /* | ||
1957 | * Old style boot options parsing. Only for compatibility. | ||
1958 | */ | ||
1959 | static int __init mcheck_disable(char *str) | ||
1960 | { | ||
1961 | mce_disabled = 1; | ||
1962 | return 1; | ||
1963 | } | ||
1964 | __setup("nomce", mcheck_disable); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h index ae9f628838f1..84a552b458c8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.h +++ b/arch/x86/kernel/cpu/mcheck/mce.h | |||
@@ -1,14 +1,38 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <asm/mce.h> | 2 | #include <asm/mce.h> |
3 | 3 | ||
4 | #ifdef CONFIG_X86_OLD_MCE | ||
4 | void amd_mcheck_init(struct cpuinfo_x86 *c); | 5 | void amd_mcheck_init(struct cpuinfo_x86 *c); |
5 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c); | 6 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c); |
6 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c); | ||
7 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c); | 7 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c); |
8 | #endif | ||
9 | |||
10 | #ifdef CONFIG_X86_ANCIENT_MCE | ||
11 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c); | ||
8 | void winchip_mcheck_init(struct cpuinfo_x86 *c); | 12 | void winchip_mcheck_init(struct cpuinfo_x86 *c); |
13 | extern int mce_p5_enable; | ||
14 | static inline int mce_p5_enabled(void) { return mce_p5_enable; } | ||
15 | static inline void enable_p5_mce(void) { mce_p5_enable = 1; } | ||
16 | #else | ||
17 | static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {} | ||
18 | static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | ||
19 | static inline int mce_p5_enabled(void) { return 0; } | ||
20 | static inline void enable_p5_mce(void) { } | ||
21 | #endif | ||
9 | 22 | ||
10 | /* Call the installed machine check handler for this CPU setup. */ | 23 | /* Call the installed machine check handler for this CPU setup. */ |
11 | extern void (*machine_check_vector)(struct pt_regs *, long error_code); | 24 | extern void (*machine_check_vector)(struct pt_regs *, long error_code); |
12 | 25 | ||
26 | #ifdef CONFIG_X86_OLD_MCE | ||
27 | |||
13 | extern int nr_mce_banks; | 28 | extern int nr_mce_banks; |
14 | 29 | ||
30 | void intel_set_thermal_handler(void); | ||
31 | |||
32 | #else | ||
33 | |||
34 | static inline void intel_set_thermal_handler(void) { } | ||
35 | |||
36 | #endif | ||
37 | |||
38 | void intel_init_thermal(struct cpuinfo_x86 *c); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c deleted file mode 100644 index 3552119b091d..000000000000 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * mce.c - x86 Machine Check Exception Reporting | ||
3 | * (c) 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>, Dave Jones <davej@redhat.com> | ||
4 | */ | ||
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/thread_info.h> | ||
12 | |||
13 | #include <asm/processor.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <asm/mce.h> | ||
16 | |||
17 | #include "mce.h" | ||
18 | |||
19 | int mce_disabled; | ||
20 | int nr_mce_banks; | ||
21 | |||
22 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | ||
23 | |||
24 | /* Handle unconfigured int18 (should never happen) */ | ||
25 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) | ||
26 | { | ||
27 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); | ||
28 | } | ||
29 | |||
30 | /* Call the installed machine check handler for this CPU setup. */ | ||
31 | void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; | ||
32 | |||
33 | /* This has to be run for each processor */ | ||
34 | void mcheck_init(struct cpuinfo_x86 *c) | ||
35 | { | ||
36 | if (mce_disabled == 1) | ||
37 | return; | ||
38 | |||
39 | switch (c->x86_vendor) { | ||
40 | case X86_VENDOR_AMD: | ||
41 | amd_mcheck_init(c); | ||
42 | break; | ||
43 | |||
44 | case X86_VENDOR_INTEL: | ||
45 | if (c->x86 == 5) | ||
46 | intel_p5_mcheck_init(c); | ||
47 | if (c->x86 == 6) | ||
48 | intel_p6_mcheck_init(c); | ||
49 | if (c->x86 == 15) | ||
50 | intel_p4_mcheck_init(c); | ||
51 | break; | ||
52 | |||
53 | case X86_VENDOR_CENTAUR: | ||
54 | if (c->x86 == 5) | ||
55 | winchip_mcheck_init(c); | ||
56 | break; | ||
57 | |||
58 | default: | ||
59 | break; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static int __init mcheck_disable(char *str) | ||
64 | { | ||
65 | mce_disabled = 1; | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | static int __init mcheck_enable(char *str) | ||
70 | { | ||
71 | mce_disabled = -1; | ||
72 | return 1; | ||
73 | } | ||
74 | |||
75 | __setup("nomce", mcheck_disable); | ||
76 | __setup("mce", mcheck_enable); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c deleted file mode 100644 index 289cc4815028..000000000000 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ /dev/null | |||
@@ -1,1188 +0,0 @@ | |||
1 | /* | ||
2 | * Machine check handler. | ||
3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | ||
4 | * Rest from unknown author(s). | ||
5 | * 2004 Andi Kleen. Rewrote most of it. | ||
6 | * Copyright 2008 Intel Corporation | ||
7 | * Author: Andi Kleen | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/smp_lock.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/rcupdate.h> | ||
17 | #include <linux/kallsyms.h> | ||
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/miscdevice.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/capability.h> | ||
22 | #include <linux/cpu.h> | ||
23 | #include <linux/percpu.h> | ||
24 | #include <linux/poll.h> | ||
25 | #include <linux/thread_info.h> | ||
26 | #include <linux/ctype.h> | ||
27 | #include <linux/kmod.h> | ||
28 | #include <linux/kdebug.h> | ||
29 | #include <linux/kobject.h> | ||
30 | #include <linux/sysfs.h> | ||
31 | #include <linux/ratelimit.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <asm/msr.h> | ||
34 | #include <asm/mce.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/smp.h> | ||
37 | #include <asm/idle.h> | ||
38 | |||
39 | #define MISC_MCELOG_MINOR 227 | ||
40 | |||
41 | atomic_t mce_entry; | ||
42 | |||
43 | static int mce_dont_init; | ||
44 | |||
45 | /* | ||
46 | * Tolerant levels: | ||
47 | * 0: always panic on uncorrected errors, log corrected errors | ||
48 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors | ||
49 | * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors | ||
50 | * 3: never panic or SIGBUS, log all errors (for testing only) | ||
51 | */ | ||
52 | static int tolerant = 1; | ||
53 | static int banks; | ||
54 | static u64 *bank; | ||
55 | static unsigned long notify_user; | ||
56 | static int rip_msr; | ||
57 | static int mce_bootlog = -1; | ||
58 | static atomic_t mce_events; | ||
59 | |||
60 | static char trigger[128]; | ||
61 | static char *trigger_argv[2] = { trigger, NULL }; | ||
62 | |||
63 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | ||
64 | |||
65 | /* MCA banks polled by the period polling timer for corrected events */ | ||
66 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
67 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
68 | }; | ||
69 | |||
70 | /* Do initial initialization of a struct mce */ | ||
71 | void mce_setup(struct mce *m) | ||
72 | { | ||
73 | memset(m, 0, sizeof(struct mce)); | ||
74 | m->cpu = smp_processor_id(); | ||
75 | rdtscll(m->tsc); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Lockless MCE logging infrastructure. | ||
80 | * This avoids deadlocks on printk locks without having to break locks. Also | ||
81 | * separate MCEs from kernel messages to avoid bogus bug reports. | ||
82 | */ | ||
83 | |||
84 | static struct mce_log mcelog = { | ||
85 | MCE_LOG_SIGNATURE, | ||
86 | MCE_LOG_LEN, | ||
87 | }; | ||
88 | |||
89 | void mce_log(struct mce *mce) | ||
90 | { | ||
91 | unsigned next, entry; | ||
92 | atomic_inc(&mce_events); | ||
93 | mce->finished = 0; | ||
94 | wmb(); | ||
95 | for (;;) { | ||
96 | entry = rcu_dereference(mcelog.next); | ||
97 | for (;;) { | ||
98 | /* When the buffer fills up discard new entries. Assume | ||
99 | that the earlier errors are the more interesting. */ | ||
100 | if (entry >= MCE_LOG_LEN) { | ||
101 | set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags); | ||
102 | return; | ||
103 | } | ||
104 | /* Old left over entry. Skip. */ | ||
105 | if (mcelog.entry[entry].finished) { | ||
106 | entry++; | ||
107 | continue; | ||
108 | } | ||
109 | break; | ||
110 | } | ||
111 | smp_rmb(); | ||
112 | next = entry + 1; | ||
113 | if (cmpxchg(&mcelog.next, entry, next) == entry) | ||
114 | break; | ||
115 | } | ||
116 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); | ||
117 | wmb(); | ||
118 | mcelog.entry[entry].finished = 1; | ||
119 | wmb(); | ||
120 | |||
121 | set_bit(0, ¬ify_user); | ||
122 | } | ||
123 | |||
124 | static void print_mce(struct mce *m) | ||
125 | { | ||
126 | printk(KERN_EMERG "\n" | ||
127 | KERN_EMERG "HARDWARE ERROR\n" | ||
128 | KERN_EMERG | ||
129 | "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", | ||
130 | m->cpu, m->mcgstatus, m->bank, m->status); | ||
131 | if (m->ip) { | ||
132 | printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", | ||
133 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", | ||
134 | m->cs, m->ip); | ||
135 | if (m->cs == __KERNEL_CS) | ||
136 | print_symbol("{%s}", m->ip); | ||
137 | printk("\n"); | ||
138 | } | ||
139 | printk(KERN_EMERG "TSC %llx ", m->tsc); | ||
140 | if (m->addr) | ||
141 | printk("ADDR %llx ", m->addr); | ||
142 | if (m->misc) | ||
143 | printk("MISC %llx ", m->misc); | ||
144 | printk("\n"); | ||
145 | printk(KERN_EMERG "This is not a software problem!\n"); | ||
146 | printk(KERN_EMERG "Run through mcelog --ascii to decode " | ||
147 | "and contact your hardware vendor\n"); | ||
148 | } | ||
149 | |||
150 | static void mce_panic(char *msg, struct mce *backup, unsigned long start) | ||
151 | { | ||
152 | int i; | ||
153 | |||
154 | oops_begin(); | ||
155 | for (i = 0; i < MCE_LOG_LEN; i++) { | ||
156 | unsigned long tsc = mcelog.entry[i].tsc; | ||
157 | |||
158 | if (time_before(tsc, start)) | ||
159 | continue; | ||
160 | print_mce(&mcelog.entry[i]); | ||
161 | if (backup && mcelog.entry[i].tsc == backup->tsc) | ||
162 | backup = NULL; | ||
163 | } | ||
164 | if (backup) | ||
165 | print_mce(backup); | ||
166 | panic(msg); | ||
167 | } | ||
168 | |||
169 | int mce_available(struct cpuinfo_x86 *c) | ||
170 | { | ||
171 | if (mce_dont_init) | ||
172 | return 0; | ||
173 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | ||
174 | } | ||
175 | |||
176 | static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | ||
177 | { | ||
178 | if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) { | ||
179 | m->ip = regs->ip; | ||
180 | m->cs = regs->cs; | ||
181 | } else { | ||
182 | m->ip = 0; | ||
183 | m->cs = 0; | ||
184 | } | ||
185 | if (rip_msr) { | ||
186 | /* Assume the RIP in the MSR is exact. Is this true? */ | ||
187 | m->mcgstatus |= MCG_STATUS_EIPV; | ||
188 | rdmsrl(rip_msr, m->ip); | ||
189 | m->cs = 0; | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Poll for corrected events or events that happened before reset. | ||
195 | * Those are just logged through /dev/mcelog. | ||
196 | * | ||
197 | * This is executed in standard interrupt context. | ||
198 | */ | ||
199 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
200 | { | ||
201 | struct mce m; | ||
202 | int i; | ||
203 | |||
204 | mce_setup(&m); | ||
205 | |||
206 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
207 | for (i = 0; i < banks; i++) { | ||
208 | if (!bank[i] || !test_bit(i, *b)) | ||
209 | continue; | ||
210 | |||
211 | m.misc = 0; | ||
212 | m.addr = 0; | ||
213 | m.bank = i; | ||
214 | m.tsc = 0; | ||
215 | |||
216 | barrier(); | ||
217 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
218 | if (!(m.status & MCI_STATUS_VAL)) | ||
219 | continue; | ||
220 | |||
221 | /* | ||
222 | * Uncorrected events are handled by the exception handler | ||
223 | * when it is enabled. But when the exception is disabled log | ||
224 | * everything. | ||
225 | * | ||
226 | * TBD do the same check for MCI_STATUS_EN here? | ||
227 | */ | ||
228 | if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC)) | ||
229 | continue; | ||
230 | |||
231 | if (m.status & MCI_STATUS_MISCV) | ||
232 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
233 | if (m.status & MCI_STATUS_ADDRV) | ||
234 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
235 | |||
236 | if (!(flags & MCP_TIMESTAMP)) | ||
237 | m.tsc = 0; | ||
238 | /* | ||
239 | * Don't get the IP here because it's unlikely to | ||
240 | * have anything to do with the actual error location. | ||
241 | */ | ||
242 | if (!(flags & MCP_DONTLOG)) { | ||
243 | mce_log(&m); | ||
244 | add_taint(TAINT_MACHINE_CHECK); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Clear state for this bank. | ||
249 | */ | ||
250 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Don't clear MCG_STATUS here because it's only defined for | ||
255 | * exceptions. | ||
256 | */ | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * The actual machine check handler. This only handles real | ||
261 | * exceptions when something got corrupted coming in through int 18. | ||
262 | * | ||
263 | * This is executed in NMI context not subject to normal locking rules. This | ||
264 | * implies that most kernel services cannot be safely used. Don't even | ||
265 | * think about putting a printk in there! | ||
266 | */ | ||
267 | void do_machine_check(struct pt_regs * regs, long error_code) | ||
268 | { | ||
269 | struct mce m, panicm; | ||
270 | u64 mcestart = 0; | ||
271 | int i; | ||
272 | int panicm_found = 0; | ||
273 | /* | ||
274 | * If no_way_out gets set, there is no safe way to recover from this | ||
275 | * MCE. If tolerant is cranked up, we'll try anyway. | ||
276 | */ | ||
277 | int no_way_out = 0; | ||
278 | /* | ||
279 | * If kill_it gets set, there might be a way to recover from this | ||
280 | * error. | ||
281 | */ | ||
282 | int kill_it = 0; | ||
283 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
284 | |||
285 | atomic_inc(&mce_entry); | ||
286 | |||
287 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | ||
288 | 18, SIGKILL) == NOTIFY_STOP) | ||
289 | goto out2; | ||
290 | if (!banks) | ||
291 | goto out2; | ||
292 | |||
293 | mce_setup(&m); | ||
294 | |||
295 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
296 | /* if the restart IP is not valid, we're done for */ | ||
297 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | ||
298 | no_way_out = 1; | ||
299 | |||
300 | rdtscll(mcestart); | ||
301 | barrier(); | ||
302 | |||
303 | for (i = 0; i < banks; i++) { | ||
304 | __clear_bit(i, toclear); | ||
305 | if (!bank[i]) | ||
306 | continue; | ||
307 | |||
308 | m.misc = 0; | ||
309 | m.addr = 0; | ||
310 | m.bank = i; | ||
311 | |||
312 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
313 | if ((m.status & MCI_STATUS_VAL) == 0) | ||
314 | continue; | ||
315 | |||
316 | /* | ||
317 | * Non uncorrected errors are handled by machine_check_poll | ||
318 | * Leave them alone. | ||
319 | */ | ||
320 | if ((m.status & MCI_STATUS_UC) == 0) | ||
321 | continue; | ||
322 | |||
323 | /* | ||
324 | * Set taint even when machine check was not enabled. | ||
325 | */ | ||
326 | add_taint(TAINT_MACHINE_CHECK); | ||
327 | |||
328 | __set_bit(i, toclear); | ||
329 | |||
330 | if (m.status & MCI_STATUS_EN) { | ||
331 | /* if PCC was set, there's no way out */ | ||
332 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | ||
333 | /* | ||
334 | * If this error was uncorrectable and there was | ||
335 | * an overflow, we're in trouble. If no overflow, | ||
336 | * we might get away with just killing a task. | ||
337 | */ | ||
338 | if (m.status & MCI_STATUS_UC) { | ||
339 | if (tolerant < 1 || m.status & MCI_STATUS_OVER) | ||
340 | no_way_out = 1; | ||
341 | kill_it = 1; | ||
342 | } | ||
343 | } else { | ||
344 | /* | ||
345 | * Machine check event was not enabled. Clear, but | ||
346 | * ignore. | ||
347 | */ | ||
348 | continue; | ||
349 | } | ||
350 | |||
351 | if (m.status & MCI_STATUS_MISCV) | ||
352 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
353 | if (m.status & MCI_STATUS_ADDRV) | ||
354 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
355 | |||
356 | mce_get_rip(&m, regs); | ||
357 | mce_log(&m); | ||
358 | |||
359 | /* Did this bank cause the exception? */ | ||
360 | /* Assume that the bank with uncorrectable errors did it, | ||
361 | and that there is only a single one. */ | ||
362 | if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) { | ||
363 | panicm = m; | ||
364 | panicm_found = 1; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | /* If we didn't find an uncorrectable error, pick | ||
369 | the last one (shouldn't happen, just being safe). */ | ||
370 | if (!panicm_found) | ||
371 | panicm = m; | ||
372 | |||
373 | /* | ||
374 | * If we have decided that we just CAN'T continue, and the user | ||
375 | * has not set tolerant to an insane level, give up and die. | ||
376 | */ | ||
377 | if (no_way_out && tolerant < 3) | ||
378 | mce_panic("Machine check", &panicm, mcestart); | ||
379 | |||
380 | /* | ||
381 | * If the error seems to be unrecoverable, something should be | ||
382 | * done. Try to kill as little as possible. If we can kill just | ||
383 | * one task, do that. If the user has set the tolerance very | ||
384 | * high, don't try to do anything at all. | ||
385 | */ | ||
386 | if (kill_it && tolerant < 3) { | ||
387 | int user_space = 0; | ||
388 | |||
389 | /* | ||
390 | * If the EIPV bit is set, it means the saved IP is the | ||
391 | * instruction which caused the MCE. | ||
392 | */ | ||
393 | if (m.mcgstatus & MCG_STATUS_EIPV) | ||
394 | user_space = panicm.ip && (panicm.cs & 3); | ||
395 | |||
396 | /* | ||
397 | * If we know that the error was in user space, send a | ||
398 | * SIGBUS. Otherwise, panic if tolerance is low. | ||
399 | * | ||
400 | * force_sig() takes an awful lot of locks and has a slight | ||
401 | * risk of deadlocking. | ||
402 | */ | ||
403 | if (user_space) { | ||
404 | force_sig(SIGBUS, current); | ||
405 | } else if (panic_on_oops || tolerant < 2) { | ||
406 | mce_panic("Uncorrected machine check", | ||
407 | &panicm, mcestart); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | /* notify userspace ASAP */ | ||
412 | set_thread_flag(TIF_MCE_NOTIFY); | ||
413 | |||
414 | /* the last thing we do is clear state */ | ||
415 | for (i = 0; i < banks; i++) { | ||
416 | if (test_bit(i, toclear)) | ||
417 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
418 | } | ||
419 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | ||
420 | out2: | ||
421 | atomic_dec(&mce_entry); | ||
422 | } | ||
423 | EXPORT_SYMBOL_GPL(do_machine_check); | ||
424 | |||
425 | #ifdef CONFIG_X86_MCE_INTEL | ||
426 | /*** | ||
427 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog | ||
428 | * @cpu: The CPU on which the event occurred. | ||
429 | * @status: Event status information | ||
430 | * | ||
431 | * This function should be called by the thermal interrupt after the | ||
432 | * event has been processed and the decision was made to log the event | ||
433 | * further. | ||
434 | * | ||
435 | * The status parameter will be saved to the 'status' field of 'struct mce' | ||
436 | * and historically has been the register value of the | ||
437 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | ||
438 | */ | ||
439 | void mce_log_therm_throt_event(__u64 status) | ||
440 | { | ||
441 | struct mce m; | ||
442 | |||
443 | mce_setup(&m); | ||
444 | m.bank = MCE_THERMAL_BANK; | ||
445 | m.status = status; | ||
446 | mce_log(&m); | ||
447 | } | ||
448 | #endif /* CONFIG_X86_MCE_INTEL */ | ||
449 | |||
450 | /* | ||
451 | * Periodic polling timer for "silent" machine check errors. If the | ||
452 | * poller finds an MCE, poll 2x faster. When the poller finds no more | ||
453 | * errors, poll 2x slower (up to check_interval seconds). | ||
454 | */ | ||
455 | |||
456 | static int check_interval = 5 * 60; /* 5 minutes */ | ||
457 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ | ||
458 | static void mcheck_timer(unsigned long); | ||
459 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | ||
460 | |||
461 | static void mcheck_timer(unsigned long data) | ||
462 | { | ||
463 | struct timer_list *t = &per_cpu(mce_timer, data); | ||
464 | int *n; | ||
465 | |||
466 | WARN_ON(smp_processor_id() != data); | ||
467 | |||
468 | if (mce_available(¤t_cpu_data)) | ||
469 | machine_check_poll(MCP_TIMESTAMP, | ||
470 | &__get_cpu_var(mce_poll_banks)); | ||
471 | |||
472 | /* | ||
473 | * Alert userspace if needed. If we logged an MCE, reduce the | ||
474 | * polling interval, otherwise increase the polling interval. | ||
475 | */ | ||
476 | n = &__get_cpu_var(next_interval); | ||
477 | if (mce_notify_user()) { | ||
478 | *n = max(*n/2, HZ/100); | ||
479 | } else { | ||
480 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); | ||
481 | } | ||
482 | |||
483 | t->expires = jiffies + *n; | ||
484 | add_timer(t); | ||
485 | } | ||
486 | |||
487 | static void mce_do_trigger(struct work_struct *work) | ||
488 | { | ||
489 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
490 | } | ||
491 | |||
492 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
493 | |||
494 | /* | ||
495 | * Notify the user(s) about new machine check events. | ||
496 | * Can be called from interrupt context, but not from machine check/NMI | ||
497 | * context. | ||
498 | */ | ||
499 | int mce_notify_user(void) | ||
500 | { | ||
501 | /* Not more than two messages every minute */ | ||
502 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
503 | |||
504 | clear_thread_flag(TIF_MCE_NOTIFY); | ||
505 | if (test_and_clear_bit(0, ¬ify_user)) { | ||
506 | wake_up_interruptible(&mce_wait); | ||
507 | |||
508 | /* | ||
509 | * There is no risk of missing notifications because | ||
510 | * work_pending is always cleared before the function is | ||
511 | * executed. | ||
512 | */ | ||
513 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
514 | schedule_work(&mce_trigger_work); | ||
515 | |||
516 | if (__ratelimit(&ratelimit)) | ||
517 | printk(KERN_INFO "Machine check events logged\n"); | ||
518 | |||
519 | return 1; | ||
520 | } | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | /* see if the idle task needs to notify userspace */ | ||
525 | static int | ||
526 | mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk) | ||
527 | { | ||
528 | /* IDLE_END should be safe - interrupts are back on */ | ||
529 | if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY)) | ||
530 | mce_notify_user(); | ||
531 | |||
532 | return NOTIFY_OK; | ||
533 | } | ||
534 | |||
535 | static struct notifier_block mce_idle_notifier = { | ||
536 | .notifier_call = mce_idle_callback, | ||
537 | }; | ||
538 | |||
539 | static __init int periodic_mcheck_init(void) | ||
540 | { | ||
541 | idle_notifier_register(&mce_idle_notifier); | ||
542 | return 0; | ||
543 | } | ||
544 | __initcall(periodic_mcheck_init); | ||
545 | |||
546 | /* | ||
547 | * Initialize Machine Checks for a CPU. | ||
548 | */ | ||
549 | static int mce_cap_init(void) | ||
550 | { | ||
551 | u64 cap; | ||
552 | unsigned b; | ||
553 | |||
554 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
555 | b = cap & 0xff; | ||
556 | if (b > MAX_NR_BANKS) { | ||
557 | printk(KERN_WARNING | ||
558 | "MCE: Using only %u machine check banks out of %u\n", | ||
559 | MAX_NR_BANKS, b); | ||
560 | b = MAX_NR_BANKS; | ||
561 | } | ||
562 | |||
563 | /* Don't support asymmetric configurations today */ | ||
564 | WARN_ON(banks != 0 && b != banks); | ||
565 | banks = b; | ||
566 | if (!bank) { | ||
567 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
568 | if (!bank) | ||
569 | return -ENOMEM; | ||
570 | memset(bank, 0xff, banks * sizeof(u64)); | ||
571 | } | ||
572 | |||
573 | /* Use accurate RIP reporting if available. */ | ||
574 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | ||
575 | rip_msr = MSR_IA32_MCG_EIP; | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static void mce_init(void *dummy) | ||
581 | { | ||
582 | u64 cap; | ||
583 | int i; | ||
584 | mce_banks_t all_banks; | ||
585 | |||
586 | /* | ||
587 | * Log the machine checks left over from the previous reset. | ||
588 | */ | ||
589 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
590 | machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); | ||
591 | |||
592 | set_in_cr4(X86_CR4_MCE); | ||
593 | |||
594 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
595 | if (cap & MCG_CTL_P) | ||
596 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | ||
597 | |||
598 | for (i = 0; i < banks; i++) { | ||
599 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
600 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /* Add per CPU specific workarounds here */ | ||
605 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) | ||
606 | { | ||
607 | /* This should be disabled by the BIOS, but isn't always */ | ||
608 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
609 | if (c->x86 == 15 && banks > 4) | ||
610 | /* disable GART TBL walk error reporting, which trips off | ||
611 | incorrectly with the IOMMU & 3ware & Cerberus. */ | ||
612 | clear_bit(10, (unsigned long *)&bank[4]); | ||
613 | if(c->x86 <= 17 && mce_bootlog < 0) | ||
614 | /* Lots of broken BIOS around that don't clear them | ||
615 | by default and leave crap in there. Don't log. */ | ||
616 | mce_bootlog = 0; | ||
617 | } | ||
618 | |||
619 | } | ||
620 | |||
621 | static void mce_cpu_features(struct cpuinfo_x86 *c) | ||
622 | { | ||
623 | switch (c->x86_vendor) { | ||
624 | case X86_VENDOR_INTEL: | ||
625 | mce_intel_feature_init(c); | ||
626 | break; | ||
627 | case X86_VENDOR_AMD: | ||
628 | mce_amd_feature_init(c); | ||
629 | break; | ||
630 | default: | ||
631 | break; | ||
632 | } | ||
633 | } | ||
634 | |||
635 | static void mce_init_timer(void) | ||
636 | { | ||
637 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
638 | int *n = &__get_cpu_var(next_interval); | ||
639 | |||
640 | *n = check_interval * HZ; | ||
641 | if (!*n) | ||
642 | return; | ||
643 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
644 | t->expires = round_jiffies(jiffies + *n); | ||
645 | add_timer(t); | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * Called for each booted CPU to set up machine checks. | ||
650 | * Must be called with preempt off. | ||
651 | */ | ||
652 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | ||
653 | { | ||
654 | if (!mce_available(c)) | ||
655 | return; | ||
656 | |||
657 | if (mce_cap_init() < 0) { | ||
658 | mce_dont_init = 1; | ||
659 | return; | ||
660 | } | ||
661 | mce_cpu_quirks(c); | ||
662 | |||
663 | mce_init(NULL); | ||
664 | mce_cpu_features(c); | ||
665 | mce_init_timer(); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * Character device to read and clear the MCE log. | ||
670 | */ | ||
671 | |||
672 | static DEFINE_SPINLOCK(mce_state_lock); | ||
673 | static int open_count; /* #times opened */ | ||
674 | static int open_exclu; /* already open exclusive? */ | ||
675 | |||
676 | static int mce_open(struct inode *inode, struct file *file) | ||
677 | { | ||
678 | lock_kernel(); | ||
679 | spin_lock(&mce_state_lock); | ||
680 | |||
681 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { | ||
682 | spin_unlock(&mce_state_lock); | ||
683 | unlock_kernel(); | ||
684 | return -EBUSY; | ||
685 | } | ||
686 | |||
687 | if (file->f_flags & O_EXCL) | ||
688 | open_exclu = 1; | ||
689 | open_count++; | ||
690 | |||
691 | spin_unlock(&mce_state_lock); | ||
692 | unlock_kernel(); | ||
693 | |||
694 | return nonseekable_open(inode, file); | ||
695 | } | ||
696 | |||
697 | static int mce_release(struct inode *inode, struct file *file) | ||
698 | { | ||
699 | spin_lock(&mce_state_lock); | ||
700 | |||
701 | open_count--; | ||
702 | open_exclu = 0; | ||
703 | |||
704 | spin_unlock(&mce_state_lock); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static void collect_tscs(void *data) | ||
710 | { | ||
711 | unsigned long *cpu_tsc = (unsigned long *)data; | ||
712 | |||
713 | rdtscll(cpu_tsc[smp_processor_id()]); | ||
714 | } | ||
715 | |||
716 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | ||
717 | loff_t *off) | ||
718 | { | ||
719 | unsigned long *cpu_tsc; | ||
720 | static DEFINE_MUTEX(mce_read_mutex); | ||
721 | unsigned prev, next; | ||
722 | char __user *buf = ubuf; | ||
723 | int i, err; | ||
724 | |||
725 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); | ||
726 | if (!cpu_tsc) | ||
727 | return -ENOMEM; | ||
728 | |||
729 | mutex_lock(&mce_read_mutex); | ||
730 | next = rcu_dereference(mcelog.next); | ||
731 | |||
732 | /* Only supports full reads right now */ | ||
733 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { | ||
734 | mutex_unlock(&mce_read_mutex); | ||
735 | kfree(cpu_tsc); | ||
736 | return -EINVAL; | ||
737 | } | ||
738 | |||
739 | err = 0; | ||
740 | prev = 0; | ||
741 | do { | ||
742 | for (i = prev; i < next; i++) { | ||
743 | unsigned long start = jiffies; | ||
744 | |||
745 | while (!mcelog.entry[i].finished) { | ||
746 | if (time_after_eq(jiffies, start + 2)) { | ||
747 | memset(mcelog.entry + i, 0, | ||
748 | sizeof(struct mce)); | ||
749 | goto timeout; | ||
750 | } | ||
751 | cpu_relax(); | ||
752 | } | ||
753 | smp_rmb(); | ||
754 | err |= copy_to_user(buf, mcelog.entry + i, | ||
755 | sizeof(struct mce)); | ||
756 | buf += sizeof(struct mce); | ||
757 | timeout: | ||
758 | ; | ||
759 | } | ||
760 | |||
761 | memset(mcelog.entry + prev, 0, | ||
762 | (next - prev) * sizeof(struct mce)); | ||
763 | prev = next; | ||
764 | next = cmpxchg(&mcelog.next, prev, 0); | ||
765 | } while (next != prev); | ||
766 | |||
767 | synchronize_sched(); | ||
768 | |||
769 | /* | ||
770 | * Collect entries that were still getting written before the | ||
771 | * synchronize. | ||
772 | */ | ||
773 | on_each_cpu(collect_tscs, cpu_tsc, 1); | ||
774 | for (i = next; i < MCE_LOG_LEN; i++) { | ||
775 | if (mcelog.entry[i].finished && | ||
776 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | ||
777 | err |= copy_to_user(buf, mcelog.entry+i, | ||
778 | sizeof(struct mce)); | ||
779 | smp_rmb(); | ||
780 | buf += sizeof(struct mce); | ||
781 | memset(&mcelog.entry[i], 0, sizeof(struct mce)); | ||
782 | } | ||
783 | } | ||
784 | mutex_unlock(&mce_read_mutex); | ||
785 | kfree(cpu_tsc); | ||
786 | return err ? -EFAULT : buf - ubuf; | ||
787 | } | ||
788 | |||
789 | static unsigned int mce_poll(struct file *file, poll_table *wait) | ||
790 | { | ||
791 | poll_wait(file, &mce_wait, wait); | ||
792 | if (rcu_dereference(mcelog.next)) | ||
793 | return POLLIN | POLLRDNORM; | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
798 | { | ||
799 | int __user *p = (int __user *)arg; | ||
800 | |||
801 | if (!capable(CAP_SYS_ADMIN)) | ||
802 | return -EPERM; | ||
803 | switch (cmd) { | ||
804 | case MCE_GET_RECORD_LEN: | ||
805 | return put_user(sizeof(struct mce), p); | ||
806 | case MCE_GET_LOG_LEN: | ||
807 | return put_user(MCE_LOG_LEN, p); | ||
808 | case MCE_GETCLEAR_FLAGS: { | ||
809 | unsigned flags; | ||
810 | |||
811 | do { | ||
812 | flags = mcelog.flags; | ||
813 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); | ||
814 | return put_user(flags, p); | ||
815 | } | ||
816 | default: | ||
817 | return -ENOTTY; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | static const struct file_operations mce_chrdev_ops = { | ||
822 | .open = mce_open, | ||
823 | .release = mce_release, | ||
824 | .read = mce_read, | ||
825 | .poll = mce_poll, | ||
826 | .unlocked_ioctl = mce_ioctl, | ||
827 | }; | ||
828 | |||
829 | static struct miscdevice mce_log_device = { | ||
830 | MISC_MCELOG_MINOR, | ||
831 | "mcelog", | ||
832 | &mce_chrdev_ops, | ||
833 | }; | ||
834 | |||
835 | /* | ||
836 | * Old style boot options parsing. Only for compatibility. | ||
837 | */ | ||
838 | static int __init mcheck_disable(char *str) | ||
839 | { | ||
840 | mce_dont_init = 1; | ||
841 | return 1; | ||
842 | } | ||
843 | |||
844 | /* mce=off disables machine check. | ||
845 | mce=TOLERANCELEVEL (number, see above) | ||
846 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | ||
847 | mce=nobootlog Don't log MCEs from before booting. */ | ||
848 | static int __init mcheck_enable(char *str) | ||
849 | { | ||
850 | if (!strcmp(str, "off")) | ||
851 | mce_dont_init = 1; | ||
852 | else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) | ||
853 | mce_bootlog = str[0] == 'b'; | ||
854 | else if (isdigit(str[0])) | ||
855 | get_option(&str, &tolerant); | ||
856 | else | ||
857 | printk("mce= argument %s ignored. Please use /sys", str); | ||
858 | return 1; | ||
859 | } | ||
860 | |||
861 | __setup("nomce", mcheck_disable); | ||
862 | __setup("mce=", mcheck_enable); | ||
863 | |||
864 | /* | ||
865 | * Sysfs support | ||
866 | */ | ||
867 | |||
868 | /* | ||
869 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
870 | * them later. | ||
871 | */ | ||
872 | static int mce_disable(void) | ||
873 | { | ||
874 | int i; | ||
875 | |||
876 | for (i = 0; i < banks; i++) | ||
877 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
878 | return 0; | ||
879 | } | ||
880 | |||
881 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
882 | { | ||
883 | return mce_disable(); | ||
884 | } | ||
885 | |||
886 | static int mce_shutdown(struct sys_device *dev) | ||
887 | { | ||
888 | return mce_disable(); | ||
889 | } | ||
890 | |||
891 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | ||
892 | Only one CPU is active at this time, the others get readded later using | ||
893 | CPU hotplug. */ | ||
894 | static int mce_resume(struct sys_device *dev) | ||
895 | { | ||
896 | mce_init(NULL); | ||
897 | mce_cpu_features(¤t_cpu_data); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | static void mce_cpu_restart(void *data) | ||
902 | { | ||
903 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
904 | if (mce_available(¤t_cpu_data)) | ||
905 | mce_init(NULL); | ||
906 | mce_init_timer(); | ||
907 | } | ||
908 | |||
909 | /* Reinit MCEs after user configuration changes */ | ||
910 | static void mce_restart(void) | ||
911 | { | ||
912 | on_each_cpu(mce_cpu_restart, NULL, 1); | ||
913 | } | ||
914 | |||
915 | static struct sysdev_class mce_sysclass = { | ||
916 | .suspend = mce_suspend, | ||
917 | .shutdown = mce_shutdown, | ||
918 | .resume = mce_resume, | ||
919 | .name = "machinecheck", | ||
920 | }; | ||
921 | |||
922 | DEFINE_PER_CPU(struct sys_device, device_mce); | ||
923 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; | ||
924 | |||
925 | /* Why are there no generic functions for this? */ | ||
926 | #define ACCESSOR(name, var, start) \ | ||
927 | static ssize_t show_ ## name(struct sys_device *s, \ | ||
928 | struct sysdev_attribute *attr, \ | ||
929 | char *buf) { \ | ||
930 | return sprintf(buf, "%lx\n", (unsigned long)var); \ | ||
931 | } \ | ||
932 | static ssize_t set_ ## name(struct sys_device *s, \ | ||
933 | struct sysdev_attribute *attr, \ | ||
934 | const char *buf, size_t siz) { \ | ||
935 | char *end; \ | ||
936 | unsigned long new = simple_strtoul(buf, &end, 0); \ | ||
937 | if (end == buf) return -EINVAL; \ | ||
938 | var = new; \ | ||
939 | start; \ | ||
940 | return end-buf; \ | ||
941 | } \ | ||
942 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | ||
943 | |||
944 | static struct sysdev_attribute *bank_attrs; | ||
945 | |||
946 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, | ||
947 | char *buf) | ||
948 | { | ||
949 | u64 b = bank[attr - bank_attrs]; | ||
950 | return sprintf(buf, "%llx\n", b); | ||
951 | } | ||
952 | |||
953 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, | ||
954 | const char *buf, size_t siz) | ||
955 | { | ||
956 | char *end; | ||
957 | u64 new = simple_strtoull(buf, &end, 0); | ||
958 | if (end == buf) | ||
959 | return -EINVAL; | ||
960 | bank[attr - bank_attrs] = new; | ||
961 | mce_restart(); | ||
962 | return end-buf; | ||
963 | } | ||
964 | |||
965 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, | ||
966 | char *buf) | ||
967 | { | ||
968 | strcpy(buf, trigger); | ||
969 | strcat(buf, "\n"); | ||
970 | return strlen(trigger) + 1; | ||
971 | } | ||
972 | |||
973 | static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, | ||
974 | const char *buf,size_t siz) | ||
975 | { | ||
976 | char *p; | ||
977 | int len; | ||
978 | strncpy(trigger, buf, sizeof(trigger)); | ||
979 | trigger[sizeof(trigger)-1] = 0; | ||
980 | len = strlen(trigger); | ||
981 | p = strchr(trigger, '\n'); | ||
982 | if (*p) *p = 0; | ||
983 | return len; | ||
984 | } | ||
985 | |||
986 | static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | ||
987 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | ||
988 | ACCESSOR(check_interval,check_interval,mce_restart()) | ||
989 | static struct sysdev_attribute *mce_attributes[] = { | ||
990 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, | ||
991 | NULL | ||
992 | }; | ||
993 | |||
994 | static cpumask_var_t mce_device_initialized; | ||
995 | |||
996 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | ||
997 | static __cpuinit int mce_create_device(unsigned int cpu) | ||
998 | { | ||
999 | int err; | ||
1000 | int i; | ||
1001 | |||
1002 | if (!mce_available(&boot_cpu_data)) | ||
1003 | return -EIO; | ||
1004 | |||
1005 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | ||
1006 | per_cpu(device_mce,cpu).id = cpu; | ||
1007 | per_cpu(device_mce,cpu).cls = &mce_sysclass; | ||
1008 | |||
1009 | err = sysdev_register(&per_cpu(device_mce,cpu)); | ||
1010 | if (err) | ||
1011 | return err; | ||
1012 | |||
1013 | for (i = 0; mce_attributes[i]; i++) { | ||
1014 | err = sysdev_create_file(&per_cpu(device_mce,cpu), | ||
1015 | mce_attributes[i]); | ||
1016 | if (err) | ||
1017 | goto error; | ||
1018 | } | ||
1019 | for (i = 0; i < banks; i++) { | ||
1020 | err = sysdev_create_file(&per_cpu(device_mce, cpu), | ||
1021 | &bank_attrs[i]); | ||
1022 | if (err) | ||
1023 | goto error2; | ||
1024 | } | ||
1025 | cpumask_set_cpu(cpu, mce_device_initialized); | ||
1026 | |||
1027 | return 0; | ||
1028 | error2: | ||
1029 | while (--i >= 0) { | ||
1030 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1031 | &bank_attrs[i]); | ||
1032 | } | ||
1033 | error: | ||
1034 | while (--i >= 0) { | ||
1035 | sysdev_remove_file(&per_cpu(device_mce,cpu), | ||
1036 | mce_attributes[i]); | ||
1037 | } | ||
1038 | sysdev_unregister(&per_cpu(device_mce,cpu)); | ||
1039 | |||
1040 | return err; | ||
1041 | } | ||
1042 | |||
1043 | static __cpuinit void mce_remove_device(unsigned int cpu) | ||
1044 | { | ||
1045 | int i; | ||
1046 | |||
1047 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) | ||
1048 | return; | ||
1049 | |||
1050 | for (i = 0; mce_attributes[i]; i++) | ||
1051 | sysdev_remove_file(&per_cpu(device_mce,cpu), | ||
1052 | mce_attributes[i]); | ||
1053 | for (i = 0; i < banks; i++) | ||
1054 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1055 | &bank_attrs[i]); | ||
1056 | sysdev_unregister(&per_cpu(device_mce,cpu)); | ||
1057 | cpumask_clear_cpu(cpu, mce_device_initialized); | ||
1058 | } | ||
1059 | |||
1060 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
1061 | static void mce_disable_cpu(void *h) | ||
1062 | { | ||
1063 | int i; | ||
1064 | unsigned long action = *(unsigned long *)h; | ||
1065 | |||
1066 | if (!mce_available(¤t_cpu_data)) | ||
1067 | return; | ||
1068 | if (!(action & CPU_TASKS_FROZEN)) | ||
1069 | cmci_clear(); | ||
1070 | for (i = 0; i < banks; i++) | ||
1071 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
1072 | } | ||
1073 | |||
1074 | static void mce_reenable_cpu(void *h) | ||
1075 | { | ||
1076 | int i; | ||
1077 | unsigned long action = *(unsigned long *)h; | ||
1078 | |||
1079 | if (!mce_available(¤t_cpu_data)) | ||
1080 | return; | ||
1081 | if (!(action & CPU_TASKS_FROZEN)) | ||
1082 | cmci_reenable(); | ||
1083 | for (i = 0; i < banks; i++) | ||
1084 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
1085 | } | ||
1086 | |||
1087 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | ||
1088 | static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | ||
1089 | unsigned long action, void *hcpu) | ||
1090 | { | ||
1091 | unsigned int cpu = (unsigned long)hcpu; | ||
1092 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
1093 | |||
1094 | switch (action) { | ||
1095 | case CPU_ONLINE: | ||
1096 | case CPU_ONLINE_FROZEN: | ||
1097 | mce_create_device(cpu); | ||
1098 | if (threshold_cpu_callback) | ||
1099 | threshold_cpu_callback(action, cpu); | ||
1100 | break; | ||
1101 | case CPU_DEAD: | ||
1102 | case CPU_DEAD_FROZEN: | ||
1103 | if (threshold_cpu_callback) | ||
1104 | threshold_cpu_callback(action, cpu); | ||
1105 | mce_remove_device(cpu); | ||
1106 | break; | ||
1107 | case CPU_DOWN_PREPARE: | ||
1108 | case CPU_DOWN_PREPARE_FROZEN: | ||
1109 | del_timer_sync(t); | ||
1110 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
1111 | break; | ||
1112 | case CPU_DOWN_FAILED: | ||
1113 | case CPU_DOWN_FAILED_FROZEN: | ||
1114 | t->expires = round_jiffies(jiffies + | ||
1115 | __get_cpu_var(next_interval)); | ||
1116 | add_timer_on(t, cpu); | ||
1117 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
1118 | break; | ||
1119 | case CPU_POST_DEAD: | ||
1120 | /* intentionally ignoring frozen here */ | ||
1121 | cmci_rediscover(cpu); | ||
1122 | break; | ||
1123 | } | ||
1124 | return NOTIFY_OK; | ||
1125 | } | ||
1126 | |||
1127 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { | ||
1128 | .notifier_call = mce_cpu_callback, | ||
1129 | }; | ||
1130 | |||
1131 | static __init int mce_init_banks(void) | ||
1132 | { | ||
1133 | int i; | ||
1134 | |||
1135 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
1136 | GFP_KERNEL); | ||
1137 | if (!bank_attrs) | ||
1138 | return -ENOMEM; | ||
1139 | |||
1140 | for (i = 0; i < banks; i++) { | ||
1141 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
1142 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
1143 | if (!a->attr.name) | ||
1144 | goto nomem; | ||
1145 | a->attr.mode = 0644; | ||
1146 | a->show = show_bank; | ||
1147 | a->store = set_bank; | ||
1148 | } | ||
1149 | return 0; | ||
1150 | |||
1151 | nomem: | ||
1152 | while (--i >= 0) | ||
1153 | kfree(bank_attrs[i].attr.name); | ||
1154 | kfree(bank_attrs); | ||
1155 | bank_attrs = NULL; | ||
1156 | return -ENOMEM; | ||
1157 | } | ||
1158 | |||
1159 | static __init int mce_init_device(void) | ||
1160 | { | ||
1161 | int err; | ||
1162 | int i = 0; | ||
1163 | |||
1164 | if (!mce_available(&boot_cpu_data)) | ||
1165 | return -EIO; | ||
1166 | |||
1167 | zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); | ||
1168 | |||
1169 | err = mce_init_banks(); | ||
1170 | if (err) | ||
1171 | return err; | ||
1172 | |||
1173 | err = sysdev_class_register(&mce_sysclass); | ||
1174 | if (err) | ||
1175 | return err; | ||
1176 | |||
1177 | for_each_online_cpu(i) { | ||
1178 | err = mce_create_device(i); | ||
1179 | if (err) | ||
1180 | return err; | ||
1181 | } | ||
1182 | |||
1183 | register_hotcpu_notifier(&mce_cpu_notifier); | ||
1184 | misc_register(&mce_log_device); | ||
1185 | return err; | ||
1186 | } | ||
1187 | |||
1188 | device_initcall(mce_init_device); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 56dde9c4bc96..ddae21620bda 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -13,22 +13,22 @@ | |||
13 | * | 13 | * |
14 | * All MC4_MISCi registers are shared between multi-cores | 14 | * All MC4_MISCi registers are shared between multi-cores |
15 | */ | 15 | */ |
16 | |||
17 | #include <linux/cpu.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
21 | #include <linux/kobject.h> | ||
22 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
23 | #include <linux/sched.h> | 18 | #include <linux/kobject.h> |
24 | #include <linux/smp.h> | 19 | #include <linux/percpu.h> |
25 | #include <linux/sysdev.h> | 20 | #include <linux/sysdev.h> |
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
26 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
24 | #include <linux/init.h> | ||
25 | #include <linux/cpu.h> | ||
26 | #include <linux/smp.h> | ||
27 | |||
27 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
29 | #include <asm/idle.h> | ||
28 | #include <asm/mce.h> | 30 | #include <asm/mce.h> |
29 | #include <asm/msr.h> | 31 | #include <asm/msr.h> |
30 | #include <asm/percpu.h> | ||
31 | #include <asm/idle.h> | ||
32 | 32 | ||
33 | #define PFX "mce_threshold: " | 33 | #define PFX "mce_threshold: " |
34 | #define VERSION "version 1.1.1" | 34 | #define VERSION "version 1.1.1" |
@@ -48,26 +48,26 @@ | |||
48 | #define MCG_XBLK_ADDR 0xC0000400 | 48 | #define MCG_XBLK_ADDR 0xC0000400 |
49 | 49 | ||
50 | struct threshold_block { | 50 | struct threshold_block { |
51 | unsigned int block; | 51 | unsigned int block; |
52 | unsigned int bank; | 52 | unsigned int bank; |
53 | unsigned int cpu; | 53 | unsigned int cpu; |
54 | u32 address; | 54 | u32 address; |
55 | u16 interrupt_enable; | 55 | u16 interrupt_enable; |
56 | u16 threshold_limit; | 56 | u16 threshold_limit; |
57 | struct kobject kobj; | 57 | struct kobject kobj; |
58 | struct list_head miscj; | 58 | struct list_head miscj; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* defaults used early on boot */ | 61 | /* defaults used early on boot */ |
62 | static struct threshold_block threshold_defaults = { | 62 | static struct threshold_block threshold_defaults = { |
63 | .interrupt_enable = 0, | 63 | .interrupt_enable = 0, |
64 | .threshold_limit = THRESHOLD_MAX, | 64 | .threshold_limit = THRESHOLD_MAX, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct threshold_bank { | 67 | struct threshold_bank { |
68 | struct kobject *kobj; | 68 | struct kobject *kobj; |
69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
70 | cpumask_var_t cpus; | 70 | cpumask_var_t cpus; |
71 | }; | 71 | }; |
72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
73 | 73 | ||
@@ -86,9 +86,9 @@ static void amd_threshold_interrupt(void); | |||
86 | */ | 86 | */ |
87 | 87 | ||
88 | struct thresh_restart { | 88 | struct thresh_restart { |
89 | struct threshold_block *b; | 89 | struct threshold_block *b; |
90 | int reset; | 90 | int reset; |
91 | u16 old_limit; | 91 | u16 old_limit; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | /* must be called with correct cpu affinity */ | 94 | /* must be called with correct cpu affinity */ |
@@ -110,6 +110,7 @@ static void threshold_restart_bank(void *_tr) | |||
110 | } else if (tr->old_limit) { /* change limit w/o reset */ | 110 | } else if (tr->old_limit) { /* change limit w/o reset */ |
111 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | 111 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + |
112 | (tr->old_limit - tr->b->threshold_limit); | 112 | (tr->old_limit - tr->b->threshold_limit); |
113 | |||
113 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | 114 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | |
114 | (new_count & THRESHOLD_MAX); | 115 | (new_count & THRESHOLD_MAX); |
115 | } | 116 | } |
@@ -125,11 +126,11 @@ static void threshold_restart_bank(void *_tr) | |||
125 | /* cpu init entry point, called from mce.c with preempt off */ | 126 | /* cpu init entry point, called from mce.c with preempt off */ |
126 | void mce_amd_feature_init(struct cpuinfo_x86 *c) | 127 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
127 | { | 128 | { |
128 | unsigned int bank, block; | ||
129 | unsigned int cpu = smp_processor_id(); | 129 | unsigned int cpu = smp_processor_id(); |
130 | u8 lvt_off; | ||
131 | u32 low = 0, high = 0, address = 0; | 130 | u32 low = 0, high = 0, address = 0; |
131 | unsigned int bank, block; | ||
132 | struct thresh_restart tr; | 132 | struct thresh_restart tr; |
133 | u8 lvt_off; | ||
133 | 134 | ||
134 | for (bank = 0; bank < NR_BANKS; ++bank) { | 135 | for (bank = 0; bank < NR_BANKS; ++bank) { |
135 | for (block = 0; block < NR_BLOCKS; ++block) { | 136 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -140,8 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
140 | if (!address) | 141 | if (!address) |
141 | break; | 142 | break; |
142 | address += MCG_XBLK_ADDR; | 143 | address += MCG_XBLK_ADDR; |
143 | } | 144 | } else |
144 | else | ||
145 | ++address; | 145 | ++address; |
146 | 146 | ||
147 | if (rdmsr_safe(address, &low, &high)) | 147 | if (rdmsr_safe(address, &low, &high)) |
@@ -193,9 +193,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
193 | */ | 193 | */ |
194 | static void amd_threshold_interrupt(void) | 194 | static void amd_threshold_interrupt(void) |
195 | { | 195 | { |
196 | u32 low = 0, high = 0, address = 0; | ||
196 | unsigned int bank, block; | 197 | unsigned int bank, block; |
197 | struct mce m; | 198 | struct mce m; |
198 | u32 low = 0, high = 0, address = 0; | ||
199 | 199 | ||
200 | mce_setup(&m); | 200 | mce_setup(&m); |
201 | 201 | ||
@@ -204,16 +204,16 @@ static void amd_threshold_interrupt(void) | |||
204 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) | 204 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) |
205 | continue; | 205 | continue; |
206 | for (block = 0; block < NR_BLOCKS; ++block) { | 206 | for (block = 0; block < NR_BLOCKS; ++block) { |
207 | if (block == 0) | 207 | if (block == 0) { |
208 | address = MSR_IA32_MC0_MISC + bank * 4; | 208 | address = MSR_IA32_MC0_MISC + bank * 4; |
209 | else if (block == 1) { | 209 | } else if (block == 1) { |
210 | address = (low & MASK_BLKPTR_LO) >> 21; | 210 | address = (low & MASK_BLKPTR_LO) >> 21; |
211 | if (!address) | 211 | if (!address) |
212 | break; | 212 | break; |
213 | address += MCG_XBLK_ADDR; | 213 | address += MCG_XBLK_ADDR; |
214 | } | 214 | } else { |
215 | else | ||
216 | ++address; | 215 | ++address; |
216 | } | ||
217 | 217 | ||
218 | if (rdmsr_safe(address, &low, &high)) | 218 | if (rdmsr_safe(address, &low, &high)) |
219 | break; | 219 | break; |
@@ -229,8 +229,10 @@ static void amd_threshold_interrupt(void) | |||
229 | (high & MASK_LOCKED_HI)) | 229 | (high & MASK_LOCKED_HI)) |
230 | continue; | 230 | continue; |
231 | 231 | ||
232 | /* Log the machine check that caused the threshold | 232 | /* |
233 | event. */ | 233 | * Log the machine check that caused the threshold |
234 | * event. | ||
235 | */ | ||
234 | machine_check_poll(MCP_TIMESTAMP, | 236 | machine_check_poll(MCP_TIMESTAMP, |
235 | &__get_cpu_var(mce_poll_banks)); | 237 | &__get_cpu_var(mce_poll_banks)); |
236 | 238 | ||
@@ -254,48 +256,52 @@ static void amd_threshold_interrupt(void) | |||
254 | 256 | ||
255 | struct threshold_attr { | 257 | struct threshold_attr { |
256 | struct attribute attr; | 258 | struct attribute attr; |
257 | ssize_t(*show) (struct threshold_block *, char *); | 259 | ssize_t (*show) (struct threshold_block *, char *); |
258 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); | 260 | ssize_t (*store) (struct threshold_block *, const char *, size_t count); |
259 | }; | 261 | }; |
260 | 262 | ||
261 | #define SHOW_FIELDS(name) \ | 263 | #define SHOW_FIELDS(name) \ |
262 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ | 264 | static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ |
263 | { \ | 265 | { \ |
264 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ | 266 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ |
265 | } | 267 | } |
266 | SHOW_FIELDS(interrupt_enable) | 268 | SHOW_FIELDS(interrupt_enable) |
267 | SHOW_FIELDS(threshold_limit) | 269 | SHOW_FIELDS(threshold_limit) |
268 | 270 | ||
269 | static ssize_t store_interrupt_enable(struct threshold_block *b, | 271 | static ssize_t |
270 | const char *buf, size_t count) | 272 | store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) |
271 | { | 273 | { |
272 | char *end; | ||
273 | struct thresh_restart tr; | 274 | struct thresh_restart tr; |
274 | unsigned long new = simple_strtoul(buf, &end, 0); | 275 | unsigned long new; |
275 | if (end == buf) | 276 | |
277 | if (strict_strtoul(buf, 0, &new) < 0) | ||
276 | return -EINVAL; | 278 | return -EINVAL; |
279 | |||
277 | b->interrupt_enable = !!new; | 280 | b->interrupt_enable = !!new; |
278 | 281 | ||
279 | tr.b = b; | 282 | tr.b = b; |
280 | tr.reset = 0; | 283 | tr.reset = 0; |
281 | tr.old_limit = 0; | 284 | tr.old_limit = 0; |
285 | |||
282 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | 286 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
283 | 287 | ||
284 | return end - buf; | 288 | return size; |
285 | } | 289 | } |
286 | 290 | ||
287 | static ssize_t store_threshold_limit(struct threshold_block *b, | 291 | static ssize_t |
288 | const char *buf, size_t count) | 292 | store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) |
289 | { | 293 | { |
290 | char *end; | ||
291 | struct thresh_restart tr; | 294 | struct thresh_restart tr; |
292 | unsigned long new = simple_strtoul(buf, &end, 0); | 295 | unsigned long new; |
293 | if (end == buf) | 296 | |
297 | if (strict_strtoul(buf, 0, &new) < 0) | ||
294 | return -EINVAL; | 298 | return -EINVAL; |
299 | |||
295 | if (new > THRESHOLD_MAX) | 300 | if (new > THRESHOLD_MAX) |
296 | new = THRESHOLD_MAX; | 301 | new = THRESHOLD_MAX; |
297 | if (new < 1) | 302 | if (new < 1) |
298 | new = 1; | 303 | new = 1; |
304 | |||
299 | tr.old_limit = b->threshold_limit; | 305 | tr.old_limit = b->threshold_limit; |
300 | b->threshold_limit = new; | 306 | b->threshold_limit = new; |
301 | tr.b = b; | 307 | tr.b = b; |
@@ -303,12 +309,12 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
303 | 309 | ||
304 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | 310 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
305 | 311 | ||
306 | return end - buf; | 312 | return size; |
307 | } | 313 | } |
308 | 314 | ||
309 | struct threshold_block_cross_cpu { | 315 | struct threshold_block_cross_cpu { |
310 | struct threshold_block *tb; | 316 | struct threshold_block *tb; |
311 | long retval; | 317 | long retval; |
312 | }; | 318 | }; |
313 | 319 | ||
314 | static void local_error_count_handler(void *_tbcc) | 320 | static void local_error_count_handler(void *_tbcc) |
@@ -338,16 +344,13 @@ static ssize_t store_error_count(struct threshold_block *b, | |||
338 | return 1; | 344 | return 1; |
339 | } | 345 | } |
340 | 346 | ||
341 | #define THRESHOLD_ATTR(_name,_mode,_show,_store) { \ | 347 | #define RW_ATTR(val) \ |
342 | .attr = {.name = __stringify(_name), .mode = _mode }, \ | 348 | static struct threshold_attr val = { \ |
343 | .show = _show, \ | 349 | .attr = {.name = __stringify(val), .mode = 0644 }, \ |
344 | .store = _store, \ | 350 | .show = show_## val, \ |
351 | .store = store_## val, \ | ||
345 | }; | 352 | }; |
346 | 353 | ||
347 | #define RW_ATTR(name) \ | ||
348 | static struct threshold_attr name = \ | ||
349 | THRESHOLD_ATTR(name, 0644, show_## name, store_## name) | ||
350 | |||
351 | RW_ATTR(interrupt_enable); | 354 | RW_ATTR(interrupt_enable); |
352 | RW_ATTR(threshold_limit); | 355 | RW_ATTR(threshold_limit); |
353 | RW_ATTR(error_count); | 356 | RW_ATTR(error_count); |
@@ -359,15 +362,17 @@ static struct attribute *default_attrs[] = { | |||
359 | NULL | 362 | NULL |
360 | }; | 363 | }; |
361 | 364 | ||
362 | #define to_block(k) container_of(k, struct threshold_block, kobj) | 365 | #define to_block(k) container_of(k, struct threshold_block, kobj) |
363 | #define to_attr(a) container_of(a, struct threshold_attr, attr) | 366 | #define to_attr(a) container_of(a, struct threshold_attr, attr) |
364 | 367 | ||
365 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | 368 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
366 | { | 369 | { |
367 | struct threshold_block *b = to_block(kobj); | 370 | struct threshold_block *b = to_block(kobj); |
368 | struct threshold_attr *a = to_attr(attr); | 371 | struct threshold_attr *a = to_attr(attr); |
369 | ssize_t ret; | 372 | ssize_t ret; |
373 | |||
370 | ret = a->show ? a->show(b, buf) : -EIO; | 374 | ret = a->show ? a->show(b, buf) : -EIO; |
375 | |||
371 | return ret; | 376 | return ret; |
372 | } | 377 | } |
373 | 378 | ||
@@ -377,18 +382,20 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
377 | struct threshold_block *b = to_block(kobj); | 382 | struct threshold_block *b = to_block(kobj); |
378 | struct threshold_attr *a = to_attr(attr); | 383 | struct threshold_attr *a = to_attr(attr); |
379 | ssize_t ret; | 384 | ssize_t ret; |
385 | |||
380 | ret = a->store ? a->store(b, buf, count) : -EIO; | 386 | ret = a->store ? a->store(b, buf, count) : -EIO; |
387 | |||
381 | return ret; | 388 | return ret; |
382 | } | 389 | } |
383 | 390 | ||
384 | static struct sysfs_ops threshold_ops = { | 391 | static struct sysfs_ops threshold_ops = { |
385 | .show = show, | 392 | .show = show, |
386 | .store = store, | 393 | .store = store, |
387 | }; | 394 | }; |
388 | 395 | ||
389 | static struct kobj_type threshold_ktype = { | 396 | static struct kobj_type threshold_ktype = { |
390 | .sysfs_ops = &threshold_ops, | 397 | .sysfs_ops = &threshold_ops, |
391 | .default_attrs = default_attrs, | 398 | .default_attrs = default_attrs, |
392 | }; | 399 | }; |
393 | 400 | ||
394 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | 401 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, |
@@ -396,9 +403,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | |||
396 | unsigned int block, | 403 | unsigned int block, |
397 | u32 address) | 404 | u32 address) |
398 | { | 405 | { |
399 | int err; | ||
400 | u32 low, high; | ||
401 | struct threshold_block *b = NULL; | 406 | struct threshold_block *b = NULL; |
407 | u32 low, high; | ||
408 | int err; | ||
402 | 409 | ||
403 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) | 410 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) |
404 | return 0; | 411 | return 0; |
@@ -421,20 +428,21 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | |||
421 | if (!b) | 428 | if (!b) |
422 | return -ENOMEM; | 429 | return -ENOMEM; |
423 | 430 | ||
424 | b->block = block; | 431 | b->block = block; |
425 | b->bank = bank; | 432 | b->bank = bank; |
426 | b->cpu = cpu; | 433 | b->cpu = cpu; |
427 | b->address = address; | 434 | b->address = address; |
428 | b->interrupt_enable = 0; | 435 | b->interrupt_enable = 0; |
429 | b->threshold_limit = THRESHOLD_MAX; | 436 | b->threshold_limit = THRESHOLD_MAX; |
430 | 437 | ||
431 | INIT_LIST_HEAD(&b->miscj); | 438 | INIT_LIST_HEAD(&b->miscj); |
432 | 439 | ||
433 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) | 440 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) { |
434 | list_add(&b->miscj, | 441 | list_add(&b->miscj, |
435 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); | 442 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); |
436 | else | 443 | } else { |
437 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; | 444 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; |
445 | } | ||
438 | 446 | ||
439 | err = kobject_init_and_add(&b->kobj, &threshold_ktype, | 447 | err = kobject_init_and_add(&b->kobj, &threshold_ktype, |
440 | per_cpu(threshold_banks, cpu)[bank]->kobj, | 448 | per_cpu(threshold_banks, cpu)[bank]->kobj, |
@@ -447,8 +455,9 @@ recurse: | |||
447 | if (!address) | 455 | if (!address) |
448 | return 0; | 456 | return 0; |
449 | address += MCG_XBLK_ADDR; | 457 | address += MCG_XBLK_ADDR; |
450 | } else | 458 | } else { |
451 | ++address; | 459 | ++address; |
460 | } | ||
452 | 461 | ||
453 | err = allocate_threshold_blocks(cpu, bank, ++block, address); | 462 | err = allocate_threshold_blocks(cpu, bank, ++block, address); |
454 | if (err) | 463 | if (err) |
@@ -500,13 +509,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
500 | if (!b) | 509 | if (!b) |
501 | goto out; | 510 | goto out; |
502 | 511 | ||
503 | err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, | 512 | err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj, |
504 | b->kobj, name); | 513 | b->kobj, name); |
505 | if (err) | 514 | if (err) |
506 | goto out; | 515 | goto out; |
507 | 516 | ||
508 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | 517 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
509 | per_cpu(threshold_banks, cpu)[bank] = b; | 518 | per_cpu(threshold_banks, cpu)[bank] = b; |
519 | |||
510 | goto out; | 520 | goto out; |
511 | } | 521 | } |
512 | #endif | 522 | #endif |
@@ -522,7 +532,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
522 | goto out; | 532 | goto out; |
523 | } | 533 | } |
524 | 534 | ||
525 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); | 535 | b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj); |
526 | if (!b->kobj) | 536 | if (!b->kobj) |
527 | goto out_free; | 537 | goto out_free; |
528 | 538 | ||
@@ -542,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
542 | if (i == cpu) | 552 | if (i == cpu) |
543 | continue; | 553 | continue; |
544 | 554 | ||
545 | err = sysfs_create_link(&per_cpu(device_mce, i).kobj, | 555 | err = sysfs_create_link(&per_cpu(mce_dev, i).kobj, |
546 | b->kobj, name); | 556 | b->kobj, name); |
547 | if (err) | 557 | if (err) |
548 | goto out; | 558 | goto out; |
@@ -605,15 +615,13 @@ static void deallocate_threshold_block(unsigned int cpu, | |||
605 | 615 | ||
606 | static void threshold_remove_bank(unsigned int cpu, int bank) | 616 | static void threshold_remove_bank(unsigned int cpu, int bank) |
607 | { | 617 | { |
608 | int i = 0; | ||
609 | struct threshold_bank *b; | 618 | struct threshold_bank *b; |
610 | char name[32]; | 619 | char name[32]; |
620 | int i = 0; | ||
611 | 621 | ||
612 | b = per_cpu(threshold_banks, cpu)[bank]; | 622 | b = per_cpu(threshold_banks, cpu)[bank]; |
613 | |||
614 | if (!b) | 623 | if (!b) |
615 | return; | 624 | return; |
616 | |||
617 | if (!b->blocks) | 625 | if (!b->blocks) |
618 | goto free_out; | 626 | goto free_out; |
619 | 627 | ||
@@ -622,8 +630,9 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
622 | #ifdef CONFIG_SMP | 630 | #ifdef CONFIG_SMP |
623 | /* sibling symlink */ | 631 | /* sibling symlink */ |
624 | if (shared_bank[bank] && b->blocks->cpu != cpu) { | 632 | if (shared_bank[bank] && b->blocks->cpu != cpu) { |
625 | sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); | 633 | sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name); |
626 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 634 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
635 | |||
627 | return; | 636 | return; |
628 | } | 637 | } |
629 | #endif | 638 | #endif |
@@ -633,7 +642,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
633 | if (i == cpu) | 642 | if (i == cpu) |
634 | continue; | 643 | continue; |
635 | 644 | ||
636 | sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); | 645 | sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name); |
637 | per_cpu(threshold_banks, i)[bank] = NULL; | 646 | per_cpu(threshold_banks, i)[bank] = NULL; |
638 | } | 647 | } |
639 | 648 | ||
@@ -659,12 +668,9 @@ static void threshold_remove_device(unsigned int cpu) | |||
659 | } | 668 | } |
660 | 669 | ||
661 | /* get notified when a cpu comes on/off */ | 670 | /* get notified when a cpu comes on/off */ |
662 | static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, | 671 | static void __cpuinit |
663 | unsigned int cpu) | 672 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) |
664 | { | 673 | { |
665 | if (cpu >= NR_CPUS) | ||
666 | return; | ||
667 | |||
668 | switch (action) { | 674 | switch (action) { |
669 | case CPU_ONLINE: | 675 | case CPU_ONLINE: |
670 | case CPU_ONLINE_FROZEN: | 676 | case CPU_ONLINE_FROZEN: |
@@ -686,11 +692,12 @@ static __init int threshold_init_device(void) | |||
686 | /* to hit CPUs online before the notifier is up */ | 692 | /* to hit CPUs online before the notifier is up */ |
687 | for_each_online_cpu(lcpu) { | 693 | for_each_online_cpu(lcpu) { |
688 | int err = threshold_create_device(lcpu); | 694 | int err = threshold_create_device(lcpu); |
695 | |||
689 | if (err) | 696 | if (err) |
690 | return err; | 697 | return err; |
691 | } | 698 | } |
692 | threshold_cpu_callback = amd_64_threshold_cpu_callback; | 699 | threshold_cpu_callback = amd_64_threshold_cpu_callback; |
700 | |||
693 | return 0; | 701 | return 0; |
694 | } | 702 | } |
695 | |||
696 | device_initcall(threshold_init_device); | 703 | device_initcall(threshold_init_device); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c new file mode 100644 index 000000000000..2b011d2d8579 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Common code for Intel machine checks | ||
3 | */ | ||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/smp.h> | ||
9 | |||
10 | #include <asm/therm_throt.h> | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <asm/apic.h> | ||
14 | #include <asm/msr.h> | ||
15 | |||
16 | #include "mce.h" | ||
17 | |||
18 | void intel_init_thermal(struct cpuinfo_x86 *c) | ||
19 | { | ||
20 | unsigned int cpu = smp_processor_id(); | ||
21 | int tm2 = 0; | ||
22 | u32 l, h; | ||
23 | |||
24 | /* Thermal monitoring depends on ACPI and clock modulation*/ | ||
25 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
26 | return; | ||
27 | |||
28 | /* | ||
29 | * First check if its enabled already, in which case there might | ||
30 | * be some SMM goo which handles it, so we can't even put a handler | ||
31 | * since it might be delivered via SMI already: | ||
32 | */ | ||
33 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
34 | h = apic_read(APIC_LVTTHMR); | ||
35 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | ||
36 | printk(KERN_DEBUG | ||
37 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | ||
38 | return; | ||
39 | } | ||
40 | |||
41 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) | ||
42 | tm2 = 1; | ||
43 | |||
44 | /* Check whether a vector already exists */ | ||
45 | if (h & APIC_VECTOR_MASK) { | ||
46 | printk(KERN_DEBUG | ||
47 | "CPU%d: Thermal LVT vector (%#x) already installed\n", | ||
48 | cpu, (h & APIC_VECTOR_MASK)); | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | /* We'll mask the thermal vector in the lapic till we're ready: */ | ||
53 | h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; | ||
54 | apic_write(APIC_LVTTHMR, h); | ||
55 | |||
56 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | ||
57 | wrmsr(MSR_IA32_THERM_INTERRUPT, | ||
58 | l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); | ||
59 | |||
60 | intel_set_thermal_handler(); | ||
61 | |||
62 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
63 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); | ||
64 | |||
65 | /* Unmask the thermal vector: */ | ||
66 | l = apic_read(APIC_LVTTHMR); | ||
67 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | ||
68 | |||
69 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", | ||
70 | cpu, tm2 ? "TM2" : "TM1"); | ||
71 | |||
72 | /* enable thermal throttle processing */ | ||
73 | atomic_set(&therm_throt_en, 1); | ||
74 | } | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 65a0fceedcd7..f2ef6952c400 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
17 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
18 | 18 | ||
19 | #include "mce.h" | ||
20 | |||
19 | asmlinkage void smp_thermal_interrupt(void) | 21 | asmlinkage void smp_thermal_interrupt(void) |
20 | { | 22 | { |
21 | __u64 msr_val; | 23 | __u64 msr_val; |
@@ -26,67 +28,13 @@ asmlinkage void smp_thermal_interrupt(void) | |||
26 | irq_enter(); | 28 | irq_enter(); |
27 | 29 | ||
28 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 30 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
29 | if (therm_throt_process(msr_val & 1)) | 31 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT)) |
30 | mce_log_therm_throt_event(msr_val); | 32 | mce_log_therm_throt_event(msr_val); |
31 | 33 | ||
32 | inc_irq_stat(irq_thermal_count); | 34 | inc_irq_stat(irq_thermal_count); |
33 | irq_exit(); | 35 | irq_exit(); |
34 | } | 36 | } |
35 | 37 | ||
36 | static void intel_init_thermal(struct cpuinfo_x86 *c) | ||
37 | { | ||
38 | u32 l, h; | ||
39 | int tm2 = 0; | ||
40 | unsigned int cpu = smp_processor_id(); | ||
41 | |||
42 | if (!cpu_has(c, X86_FEATURE_ACPI)) | ||
43 | return; | ||
44 | |||
45 | if (!cpu_has(c, X86_FEATURE_ACC)) | ||
46 | return; | ||
47 | |||
48 | /* first check if TM1 is already enabled by the BIOS, in which | ||
49 | * case there might be some SMM goo which handles it, so we can't even | ||
50 | * put a handler since it might be delivered via SMI already. | ||
51 | */ | ||
52 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
53 | h = apic_read(APIC_LVTTHMR); | ||
54 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | ||
55 | printk(KERN_DEBUG | ||
56 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | ||
57 | return; | ||
58 | } | ||
59 | |||
60 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) | ||
61 | tm2 = 1; | ||
62 | |||
63 | if (h & APIC_VECTOR_MASK) { | ||
64 | printk(KERN_DEBUG | ||
65 | "CPU%d: Thermal LVT vector (%#x) already " | ||
66 | "installed\n", cpu, (h & APIC_VECTOR_MASK)); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | h = THERMAL_APIC_VECTOR; | ||
71 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); | ||
72 | apic_write(APIC_LVTTHMR, h); | ||
73 | |||
74 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | ||
75 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | ||
76 | |||
77 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
78 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); | ||
79 | |||
80 | l = apic_read(APIC_LVTTHMR); | ||
81 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | ||
82 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", | ||
83 | cpu, tm2 ? "TM2" : "TM1"); | ||
84 | |||
85 | /* enable thermal throttle processing */ | ||
86 | atomic_set(&therm_throt_en, 1); | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | /* | 38 | /* |
91 | * Support for Intel Correct Machine Check Interrupts. This allows | 39 | * Support for Intel Correct Machine Check Interrupts. This allows |
92 | * the CPU to raise an interrupt when a corrected machine check happened. | 40 | * the CPU to raise an interrupt when a corrected machine check happened. |
@@ -108,6 +56,9 @@ static int cmci_supported(int *banks) | |||
108 | { | 56 | { |
109 | u64 cap; | 57 | u64 cap; |
110 | 58 | ||
59 | if (mce_cmci_disabled || mce_ignore_ce) | ||
60 | return 0; | ||
61 | |||
111 | /* | 62 | /* |
112 | * Vendor check is not strictly needed, but the initial | 63 | * Vendor check is not strictly needed, but the initial |
113 | * initialization is vendor keyed and this | 64 | * initialization is vendor keyed and this |
@@ -131,7 +82,7 @@ static int cmci_supported(int *banks) | |||
131 | static void intel_threshold_interrupt(void) | 82 | static void intel_threshold_interrupt(void) |
132 | { | 83 | { |
133 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 84 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); |
134 | mce_notify_user(); | 85 | mce_notify_irq(); |
135 | } | 86 | } |
136 | 87 | ||
137 | static void print_update(char *type, int *hdr, int num) | 88 | static void print_update(char *type, int *hdr, int num) |
@@ -247,7 +198,7 @@ void cmci_rediscover(int dying) | |||
247 | return; | 198 | return; |
248 | cpumask_copy(old, ¤t->cpus_allowed); | 199 | cpumask_copy(old, ¤t->cpus_allowed); |
249 | 200 | ||
250 | for_each_online_cpu (cpu) { | 201 | for_each_online_cpu(cpu) { |
251 | if (cpu == dying) | 202 | if (cpu == dying) |
252 | continue; | 203 | continue; |
253 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) | 204 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index a74af128efc9..70b710420f74 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -6,15 +6,14 @@ | |||
6 | * This file contains routines to check for non-fatal MCEs every 15s | 6 | * This file contains routines to check for non-fatal MCEs every 15s |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/jiffies.h> | ||
14 | #include <linux/workqueue.h> | ||
15 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
16 | #include <linux/smp.h> | 10 | #include <linux/workqueue.h> |
11 | #include <linux/jiffies.h> | ||
12 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/types.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/smp.h> | ||
18 | 17 | ||
19 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
20 | #include <asm/system.h> | 19 | #include <asm/system.h> |
@@ -22,9 +21,9 @@ | |||
22 | 21 | ||
23 | #include "mce.h" | 22 | #include "mce.h" |
24 | 23 | ||
25 | static int firstbank; | 24 | static int firstbank; |
26 | 25 | ||
27 | #define MCE_RATE 15*HZ /* timer rate is 15s */ | 26 | #define MCE_RATE (15*HZ) /* timer rate is 15s */ |
28 | 27 | ||
29 | static void mce_checkregs(void *info) | 28 | static void mce_checkregs(void *info) |
30 | { | 29 | { |
@@ -34,23 +33,24 @@ static void mce_checkregs(void *info) | |||
34 | for (i = firstbank; i < nr_mce_banks; i++) { | 33 | for (i = firstbank; i < nr_mce_banks; i++) { |
35 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); | 34 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); |
36 | 35 | ||
37 | if (high & (1<<31)) { | 36 | if (!(high & (1<<31))) |
38 | printk(KERN_INFO "MCE: The hardware reports a non " | 37 | continue; |
39 | "fatal, correctable incident occurred on " | 38 | |
40 | "CPU %d.\n", | 39 | printk(KERN_INFO "MCE: The hardware reports a non fatal, " |
40 | "correctable incident occurred on CPU %d.\n", | ||
41 | smp_processor_id()); | 41 | smp_processor_id()); |
42 | printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low); | 42 | |
43 | 43 | printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low); | |
44 | /* | 44 | |
45 | * Scrub the error so we don't pick it up in MCE_RATE | 45 | /* |
46 | * seconds time. | 46 | * Scrub the error so we don't pick it up in MCE_RATE |
47 | */ | 47 | * seconds time: |
48 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | 48 | */ |
49 | 49 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | |
50 | /* Serialize */ | 50 | |
51 | wmb(); | 51 | /* Serialize: */ |
52 | add_taint(TAINT_MACHINE_CHECK); | 52 | wmb(); |
53 | } | 53 | add_taint(TAINT_MACHINE_CHECK); |
54 | } | 54 | } |
55 | } | 55 | } |
56 | 56 | ||
@@ -77,16 +77,17 @@ static int __init init_nonfatal_mce_checker(void) | |||
77 | 77 | ||
78 | /* Some Athlons misbehave when we frob bank 0 */ | 78 | /* Some Athlons misbehave when we frob bank 0 */ |
79 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 79 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
80 | boot_cpu_data.x86 == 6) | 80 | boot_cpu_data.x86 == 6) |
81 | firstbank = 1; | 81 | firstbank = 1; |
82 | else | 82 | else |
83 | firstbank = 0; | 83 | firstbank = 0; |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Check for non-fatal errors every MCE_RATE s | 86 | * Check for non-fatal errors every MCE_RATE s |
87 | */ | 87 | */ |
88 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 88 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
89 | printk(KERN_INFO "Machine check exception polling timer started.\n"); | 89 | printk(KERN_INFO "Machine check exception polling timer started.\n"); |
90 | |||
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | module_init(init_nonfatal_mce_checker); | 93 | module_init(init_nonfatal_mce_checker); |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index f53bdcbaf382..82cee108a2d3 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
@@ -2,18 +2,17 @@ | |||
2 | * P4 specific Machine Check Exception Reporting | 2 | * P4 specific Machine Check Exception Reporting |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/init.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/smp.h> | 9 | #include <linux/smp.h> |
10 | 10 | ||
11 | #include <asm/therm_throt.h> | ||
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/system.h> | 13 | #include <asm/system.h> |
13 | #include <asm/msr.h> | ||
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
15 | 15 | #include <asm/msr.h> | |
16 | #include <asm/therm_throt.h> | ||
17 | 16 | ||
18 | #include "mce.h" | 17 | #include "mce.h" |
19 | 18 | ||
@@ -36,6 +35,7 @@ static int mce_num_extended_msrs; | |||
36 | 35 | ||
37 | 36 | ||
38 | #ifdef CONFIG_X86_MCE_P4THERMAL | 37 | #ifdef CONFIG_X86_MCE_P4THERMAL |
38 | |||
39 | static void unexpected_thermal_interrupt(struct pt_regs *regs) | 39 | static void unexpected_thermal_interrupt(struct pt_regs *regs) |
40 | { | 40 | { |
41 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", | 41 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", |
@@ -43,7 +43,7 @@ static void unexpected_thermal_interrupt(struct pt_regs *regs) | |||
43 | add_taint(TAINT_MACHINE_CHECK); | 43 | add_taint(TAINT_MACHINE_CHECK); |
44 | } | 44 | } |
45 | 45 | ||
46 | /* P4/Xeon Thermal transition interrupt handler */ | 46 | /* P4/Xeon Thermal transition interrupt handler: */ |
47 | static void intel_thermal_interrupt(struct pt_regs *regs) | 47 | static void intel_thermal_interrupt(struct pt_regs *regs) |
48 | { | 48 | { |
49 | __u64 msr_val; | 49 | __u64 msr_val; |
@@ -51,11 +51,12 @@ static void intel_thermal_interrupt(struct pt_regs *regs) | |||
51 | ack_APIC_irq(); | 51 | ack_APIC_irq(); |
52 | 52 | ||
53 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 53 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
54 | therm_throt_process(msr_val & 0x1); | 54 | therm_throt_process(msr_val & THERM_STATUS_PROCHOT); |
55 | } | 55 | } |
56 | 56 | ||
57 | /* Thermal interrupt handler for this CPU setup */ | 57 | /* Thermal interrupt handler for this CPU setup: */ |
58 | static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt; | 58 | static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = |
59 | unexpected_thermal_interrupt; | ||
59 | 60 | ||
60 | void smp_thermal_interrupt(struct pt_regs *regs) | 61 | void smp_thermal_interrupt(struct pt_regs *regs) |
61 | { | 62 | { |
@@ -65,67 +66,15 @@ void smp_thermal_interrupt(struct pt_regs *regs) | |||
65 | irq_exit(); | 66 | irq_exit(); |
66 | } | 67 | } |
67 | 68 | ||
68 | /* P4/Xeon Thermal regulation detect and init */ | 69 | void intel_set_thermal_handler(void) |
69 | static void intel_init_thermal(struct cpuinfo_x86 *c) | ||
70 | { | 70 | { |
71 | u32 l, h; | ||
72 | unsigned int cpu = smp_processor_id(); | ||
73 | |||
74 | /* Thermal monitoring */ | ||
75 | if (!cpu_has(c, X86_FEATURE_ACPI)) | ||
76 | return; /* -ENODEV */ | ||
77 | |||
78 | /* Clock modulation */ | ||
79 | if (!cpu_has(c, X86_FEATURE_ACC)) | ||
80 | return; /* -ENODEV */ | ||
81 | |||
82 | /* first check if its enabled already, in which case there might | ||
83 | * be some SMM goo which handles it, so we can't even put a handler | ||
84 | * since it might be delivered via SMI already -zwanem. | ||
85 | */ | ||
86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
87 | h = apic_read(APIC_LVTTHMR); | ||
88 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | ||
89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", | ||
90 | cpu); | ||
91 | return; /* -EBUSY */ | ||
92 | } | ||
93 | |||
94 | /* check whether a vector already exists, temporarily masked? */ | ||
95 | if (h & APIC_VECTOR_MASK) { | ||
96 | printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already " | ||
97 | "installed\n", | ||
98 | cpu, (h & APIC_VECTOR_MASK)); | ||
99 | return; /* -EBUSY */ | ||
100 | } | ||
101 | |||
102 | /* The temperature transition interrupt handler setup */ | ||
103 | h = THERMAL_APIC_VECTOR; /* our delivery vector */ | ||
104 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ | ||
105 | apic_write(APIC_LVTTHMR, h); | ||
106 | |||
107 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | ||
108 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); | ||
109 | |||
110 | /* ok we're good to go... */ | ||
111 | vendor_thermal_interrupt = intel_thermal_interrupt; | 71 | vendor_thermal_interrupt = intel_thermal_interrupt; |
112 | |||
113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
114 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); | ||
115 | |||
116 | l = apic_read(APIC_LVTTHMR); | ||
117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | ||
118 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); | ||
119 | |||
120 | /* enable thermal throttle processing */ | ||
121 | atomic_set(&therm_throt_en, 1); | ||
122 | return; | ||
123 | } | 72 | } |
124 | #endif /* CONFIG_X86_MCE_P4THERMAL */ | ||
125 | 73 | ||
74 | #endif /* CONFIG_X86_MCE_P4THERMAL */ | ||
126 | 75 | ||
127 | /* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */ | 76 | /* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */ |
128 | static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) | 77 | static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) |
129 | { | 78 | { |
130 | u32 h; | 79 | u32 h; |
131 | 80 | ||
@@ -143,9 +92,9 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) | |||
143 | 92 | ||
144 | static void intel_machine_check(struct pt_regs *regs, long error_code) | 93 | static void intel_machine_check(struct pt_regs *regs, long error_code) |
145 | { | 94 | { |
146 | int recover = 1; | ||
147 | u32 alow, ahigh, high, low; | 95 | u32 alow, ahigh, high, low; |
148 | u32 mcgstl, mcgsth; | 96 | u32 mcgstl, mcgsth; |
97 | int recover = 1; | ||
149 | int i; | 98 | int i; |
150 | 99 | ||
151 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 100 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
@@ -157,7 +106,9 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
157 | 106 | ||
158 | if (mce_num_extended_msrs > 0) { | 107 | if (mce_num_extended_msrs > 0) { |
159 | struct intel_mce_extended_msrs dbg; | 108 | struct intel_mce_extended_msrs dbg; |
109 | |||
160 | intel_get_extended_msrs(&dbg); | 110 | intel_get_extended_msrs(&dbg); |
111 | |||
161 | printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n" | 112 | printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n" |
162 | "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n" | 113 | "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n" |
163 | "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", | 114 | "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", |
@@ -171,6 +122,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
171 | if (high & (1<<31)) { | 122 | if (high & (1<<31)) { |
172 | char misc[20]; | 123 | char misc[20]; |
173 | char addr[24]; | 124 | char addr[24]; |
125 | |||
174 | misc[0] = addr[0] = '\0'; | 126 | misc[0] = addr[0] = '\0'; |
175 | if (high & (1<<29)) | 127 | if (high & (1<<29)) |
176 | recover |= 1; | 128 | recover |= 1; |
@@ -196,6 +148,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
196 | panic("Unable to continue"); | 148 | panic("Unable to continue"); |
197 | 149 | ||
198 | printk(KERN_EMERG "Attempting to continue.\n"); | 150 | printk(KERN_EMERG "Attempting to continue.\n"); |
151 | |||
199 | /* | 152 | /* |
200 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not | 153 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not |
201 | * recoverable/continuable.This will allow BIOS to look at the MSRs | 154 | * recoverable/continuable.This will allow BIOS to look at the MSRs |
@@ -217,7 +170,6 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
217 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 170 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
218 | } | 171 | } |
219 | 172 | ||
220 | |||
221 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c) | 173 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c) |
222 | { | 174 | { |
223 | u32 l, h; | 175 | u32 l, h; |
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index c9f77ea69edc..015f481ab1b0 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
@@ -2,11 +2,10 @@ | |||
2 | * P5 specific Machine Check Exception Reporting | 2 | * P5 specific Machine Check Exception Reporting |
3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> | 3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> |
4 | */ | 4 | */ |
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/init.h> | ||
10 | #include <linux/smp.h> | 9 | #include <linux/smp.h> |
11 | 10 | ||
12 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
@@ -15,39 +14,58 @@ | |||
15 | 14 | ||
16 | #include "mce.h" | 15 | #include "mce.h" |
17 | 16 | ||
18 | /* Machine check handler for Pentium class Intel */ | 17 | /* By default disabled */ |
18 | int mce_p5_enable; | ||
19 | |||
20 | /* Machine check handler for Pentium class Intel CPUs: */ | ||
19 | static void pentium_machine_check(struct pt_regs *regs, long error_code) | 21 | static void pentium_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 22 | { |
21 | u32 loaddr, hi, lotype; | 23 | u32 loaddr, hi, lotype; |
24 | |||
22 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); | 25 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); |
23 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); | 26 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); |
24 | printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); | 27 | |
25 | if (lotype&(1<<5)) | 28 | printk(KERN_EMERG |
26 | printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); | 29 | "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", |
30 | smp_processor_id(), loaddr, lotype); | ||
31 | |||
32 | if (lotype & (1<<5)) { | ||
33 | printk(KERN_EMERG | ||
34 | "CPU#%d: Possible thermal failure (CPU on fire ?).\n", | ||
35 | smp_processor_id()); | ||
36 | } | ||
37 | |||
27 | add_taint(TAINT_MACHINE_CHECK); | 38 | add_taint(TAINT_MACHINE_CHECK); |
28 | } | 39 | } |
29 | 40 | ||
30 | /* Set up machine check reporting for processors with Intel style MCE */ | 41 | /* Set up machine check reporting for processors with Intel style MCE: */ |
31 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | 42 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) |
32 | { | 43 | { |
33 | u32 l, h; | 44 | u32 l, h; |
34 | 45 | ||
35 | /*Check for MCE support */ | 46 | /* Check for MCE support: */ |
36 | if (!cpu_has(c, X86_FEATURE_MCE)) | 47 | if (!cpu_has(c, X86_FEATURE_MCE)) |
37 | return; | 48 | return; |
38 | 49 | ||
39 | /* Default P5 to off as its often misconnected */ | 50 | #ifdef CONFIG_X86_OLD_MCE |
51 | /* Default P5 to off as its often misconnected: */ | ||
40 | if (mce_disabled != -1) | 52 | if (mce_disabled != -1) |
41 | return; | 53 | return; |
54 | #endif | ||
55 | |||
42 | machine_check_vector = pentium_machine_check; | 56 | machine_check_vector = pentium_machine_check; |
57 | /* Make sure the vector pointer is visible before we enable MCEs: */ | ||
43 | wmb(); | 58 | wmb(); |
44 | 59 | ||
45 | /* Read registers before enabling */ | 60 | /* Read registers before enabling: */ |
46 | rdmsr(MSR_IA32_P5_MC_ADDR, l, h); | 61 | rdmsr(MSR_IA32_P5_MC_ADDR, l, h); |
47 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); | 62 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); |
48 | printk(KERN_INFO "Intel old style machine check architecture supported.\n"); | 63 | printk(KERN_INFO |
64 | "Intel old style machine check architecture supported.\n"); | ||
49 | 65 | ||
50 | /* Enable MCE */ | 66 | /* Enable MCE: */ |
51 | set_in_cr4(X86_CR4_MCE); | 67 | set_in_cr4(X86_CR4_MCE); |
52 | printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); | 68 | printk(KERN_INFO |
69 | "Intel old style machine check reporting enabled on CPU#%d.\n", | ||
70 | smp_processor_id()); | ||
53 | } | 71 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c index 2ac52d7b434b..43c24e667457 100644 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ b/arch/x86/kernel/cpu/mcheck/p6.c | |||
@@ -2,11 +2,10 @@ | |||
2 | * P6 specific Machine Check Exception Reporting | 2 | * P6 specific Machine Check Exception Reporting |
3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> | 3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> |
4 | */ | 4 | */ |
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/init.h> | ||
10 | #include <linux/smp.h> | 9 | #include <linux/smp.h> |
11 | 10 | ||
12 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
@@ -18,9 +17,9 @@ | |||
18 | /* Machine Check Handler For PII/PIII */ | 17 | /* Machine Check Handler For PII/PIII */ |
19 | static void intel_machine_check(struct pt_regs *regs, long error_code) | 18 | static void intel_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 19 | { |
21 | int recover = 1; | ||
22 | u32 alow, ahigh, high, low; | 20 | u32 alow, ahigh, high, low; |
23 | u32 mcgstl, mcgsth; | 21 | u32 mcgstl, mcgsth; |
22 | int recover = 1; | ||
24 | int i; | 23 | int i; |
25 | 24 | ||
26 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 25 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
@@ -35,12 +34,16 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
35 | if (high & (1<<31)) { | 34 | if (high & (1<<31)) { |
36 | char misc[20]; | 35 | char misc[20]; |
37 | char addr[24]; | 36 | char addr[24]; |
38 | misc[0] = addr[0] = '\0'; | 37 | |
38 | misc[0] = '\0'; | ||
39 | addr[0] = '\0'; | ||
40 | |||
39 | if (high & (1<<29)) | 41 | if (high & (1<<29)) |
40 | recover |= 1; | 42 | recover |= 1; |
41 | if (high & (1<<25)) | 43 | if (high & (1<<25)) |
42 | recover |= 2; | 44 | recover |= 2; |
43 | high &= ~(1<<31); | 45 | high &= ~(1<<31); |
46 | |||
44 | if (high & (1<<27)) { | 47 | if (high & (1<<27)) { |
45 | rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); | 48 | rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh); |
46 | snprintf(misc, 20, "[%08x%08x]", ahigh, alow); | 49 | snprintf(misc, 20, "[%08x%08x]", ahigh, alow); |
@@ -49,6 +52,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
49 | rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); | 52 | rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh); |
50 | snprintf(addr, 24, " at %08x%08x", ahigh, alow); | 53 | snprintf(addr, 24, " at %08x%08x", ahigh, alow); |
51 | } | 54 | } |
55 | |||
52 | printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", | 56 | printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n", |
53 | smp_processor_id(), i, high, low, misc, addr); | 57 | smp_processor_id(), i, high, low, misc, addr); |
54 | } | 58 | } |
@@ -63,16 +67,17 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
63 | /* | 67 | /* |
64 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not | 68 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not |
65 | * recoverable/continuable.This will allow BIOS to look at the MSRs | 69 | * recoverable/continuable.This will allow BIOS to look at the MSRs |
66 | * for errors if the OS could not log the error. | 70 | * for errors if the OS could not log the error: |
67 | */ | 71 | */ |
68 | for (i = 0; i < nr_mce_banks; i++) { | 72 | for (i = 0; i < nr_mce_banks; i++) { |
69 | unsigned int msr; | 73 | unsigned int msr; |
74 | |||
70 | msr = MSR_IA32_MC0_STATUS+i*4; | 75 | msr = MSR_IA32_MC0_STATUS+i*4; |
71 | rdmsr(msr, low, high); | 76 | rdmsr(msr, low, high); |
72 | if (high & (1<<31)) { | 77 | if (high & (1<<31)) { |
73 | /* Clear it */ | 78 | /* Clear it: */ |
74 | wrmsr(msr, 0UL, 0UL); | 79 | wrmsr(msr, 0UL, 0UL); |
75 | /* Serialize */ | 80 | /* Serialize: */ |
76 | wmb(); | 81 | wmb(); |
77 | add_taint(TAINT_MACHINE_CHECK); | 82 | add_taint(TAINT_MACHINE_CHECK); |
78 | } | 83 | } |
@@ -81,7 +86,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code) | |||
81 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 86 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
82 | } | 87 | } |
83 | 88 | ||
84 | /* Set up machine check reporting for processors with Intel style MCE */ | 89 | /* Set up machine check reporting for processors with Intel style MCE: */ |
85 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | 90 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c) |
86 | { | 91 | { |
87 | u32 l, h; | 92 | u32 l, h; |
@@ -97,6 +102,7 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
97 | 102 | ||
98 | /* Ok machine check is available */ | 103 | /* Ok machine check is available */ |
99 | machine_check_vector = intel_machine_check; | 104 | machine_check_vector = intel_machine_check; |
105 | /* Make sure the vector pointer is visible before we enable MCEs: */ | ||
100 | wmb(); | 106 | wmb(); |
101 | 107 | ||
102 | printk(KERN_INFO "Intel machine check architecture supported.\n"); | 108 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d5ae2243f0b9..7b1ae2e20ba5 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | ||
3 | * Thermal throttle event support code (such as syslog messaging and rate | 2 | * Thermal throttle event support code (such as syslog messaging and rate |
4 | * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). | 3 | * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). |
4 | * | ||
5 | * This allows consistent reporting of CPU thermal throttle events. | 5 | * This allows consistent reporting of CPU thermal throttle events. |
6 | * | 6 | * |
7 | * Maintains a counter in /sys that keeps track of the number of thermal | 7 | * Maintains a counter in /sys that keeps track of the number of thermal |
@@ -13,43 +13,43 @@ | |||
13 | * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. | 13 | * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. |
14 | * Inspired by Ross Biro's and Al Borchers' counter code. | 14 | * Inspired by Ross Biro's and Al Borchers' counter code. |
15 | */ | 15 | */ |
16 | 16 | #include <linux/notifier.h> | |
17 | #include <linux/jiffies.h> | ||
17 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
18 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
20 | #include <asm/cpu.h> | 21 | |
21 | #include <linux/notifier.h> | ||
22 | #include <linux/jiffies.h> | ||
23 | #include <asm/therm_throt.h> | 22 | #include <asm/therm_throt.h> |
24 | 23 | ||
25 | /* How long to wait between reporting thermal events */ | 24 | /* How long to wait between reporting thermal events */ |
26 | #define CHECK_INTERVAL (300 * HZ) | 25 | #define CHECK_INTERVAL (300 * HZ) |
27 | 26 | ||
28 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; | 27 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; |
29 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); | 28 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); |
30 | atomic_t therm_throt_en = ATOMIC_INIT(0); | 29 | |
30 | atomic_t therm_throt_en = ATOMIC_INIT(0); | ||
31 | 31 | ||
32 | #ifdef CONFIG_SYSFS | 32 | #ifdef CONFIG_SYSFS |
33 | #define define_therm_throt_sysdev_one_ro(_name) \ | 33 | #define define_therm_throt_sysdev_one_ro(_name) \ |
34 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) | 34 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) |
35 | 35 | ||
36 | #define define_therm_throt_sysdev_show_func(name) \ | 36 | #define define_therm_throt_sysdev_show_func(name) \ |
37 | static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ | 37 | static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ |
38 | struct sysdev_attribute *attr, \ | 38 | struct sysdev_attribute *attr, \ |
39 | char *buf) \ | 39 | char *buf) \ |
40 | { \ | 40 | { \ |
41 | unsigned int cpu = dev->id; \ | 41 | unsigned int cpu = dev->id; \ |
42 | ssize_t ret; \ | 42 | ssize_t ret; \ |
43 | \ | 43 | \ |
44 | preempt_disable(); /* CPU hotplug */ \ | 44 | preempt_disable(); /* CPU hotplug */ \ |
45 | if (cpu_online(cpu)) \ | 45 | if (cpu_online(cpu)) \ |
46 | ret = sprintf(buf, "%lu\n", \ | 46 | ret = sprintf(buf, "%lu\n", \ |
47 | per_cpu(thermal_throttle_##name, cpu)); \ | 47 | per_cpu(thermal_throttle_##name, cpu)); \ |
48 | else \ | 48 | else \ |
49 | ret = 0; \ | 49 | ret = 0; \ |
50 | preempt_enable(); \ | 50 | preempt_enable(); \ |
51 | \ | 51 | \ |
52 | return ret; \ | 52 | return ret; \ |
53 | } | 53 | } |
54 | 54 | ||
55 | define_therm_throt_sysdev_show_func(count); | 55 | define_therm_throt_sysdev_show_func(count); |
@@ -61,8 +61,8 @@ static struct attribute *thermal_throttle_attrs[] = { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | static struct attribute_group thermal_throttle_attr_group = { | 63 | static struct attribute_group thermal_throttle_attr_group = { |
64 | .attrs = thermal_throttle_attrs, | 64 | .attrs = thermal_throttle_attrs, |
65 | .name = "thermal_throttle" | 65 | .name = "thermal_throttle" |
66 | }; | 66 | }; |
67 | #endif /* CONFIG_SYSFS */ | 67 | #endif /* CONFIG_SYSFS */ |
68 | 68 | ||
@@ -110,10 +110,11 @@ int therm_throt_process(int curr) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | #ifdef CONFIG_SYSFS | 112 | #ifdef CONFIG_SYSFS |
113 | /* Add/Remove thermal_throttle interface for CPU device */ | 113 | /* Add/Remove thermal_throttle interface for CPU device: */ |
114 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) | 114 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) |
115 | { | 115 | { |
116 | return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); | 116 | return sysfs_create_group(&sys_dev->kobj, |
117 | &thermal_throttle_attr_group); | ||
117 | } | 118 | } |
118 | 119 | ||
119 | static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) | 120 | static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) |
@@ -121,19 +122,21 @@ static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) | |||
121 | sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); | 122 | sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); |
122 | } | 123 | } |
123 | 124 | ||
124 | /* Mutex protecting device creation against CPU hotplug */ | 125 | /* Mutex protecting device creation against CPU hotplug: */ |
125 | static DEFINE_MUTEX(therm_cpu_lock); | 126 | static DEFINE_MUTEX(therm_cpu_lock); |
126 | 127 | ||
127 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 128 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
128 | static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | 129 | static __cpuinit int |
129 | unsigned long action, | 130 | thermal_throttle_cpu_callback(struct notifier_block *nfb, |
130 | void *hcpu) | 131 | unsigned long action, |
132 | void *hcpu) | ||
131 | { | 133 | { |
132 | unsigned int cpu = (unsigned long)hcpu; | 134 | unsigned int cpu = (unsigned long)hcpu; |
133 | struct sys_device *sys_dev; | 135 | struct sys_device *sys_dev; |
134 | int err = 0; | 136 | int err = 0; |
135 | 137 | ||
136 | sys_dev = get_cpu_sysdev(cpu); | 138 | sys_dev = get_cpu_sysdev(cpu); |
139 | |||
137 | switch (action) { | 140 | switch (action) { |
138 | case CPU_UP_PREPARE: | 141 | case CPU_UP_PREPARE: |
139 | case CPU_UP_PREPARE_FROZEN: | 142 | case CPU_UP_PREPARE_FROZEN: |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index 23ee9e730f78..d746df2909c9 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -17,7 +17,7 @@ static void default_threshold_interrupt(void) | |||
17 | 17 | ||
18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | 18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; |
19 | 19 | ||
20 | asmlinkage void mce_threshold_interrupt(void) | 20 | asmlinkage void smp_threshold_interrupt(void) |
21 | { | 21 | { |
22 | exit_idle(); | 22 | exit_idle(); |
23 | irq_enter(); | 23 | irq_enter(); |
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 2a043d89811d..81b02487090b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c | |||
@@ -2,11 +2,10 @@ | |||
2 | * IDT Winchip specific Machine Check Exception Reporting | 2 | * IDT Winchip specific Machine Check Exception Reporting |
3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> | 3 | * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk> |
4 | */ | 4 | */ |
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/init.h> | ||
10 | 9 | ||
11 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
12 | #include <asm/system.h> | 11 | #include <asm/system.h> |
@@ -14,7 +13,7 @@ | |||
14 | 13 | ||
15 | #include "mce.h" | 14 | #include "mce.h" |
16 | 15 | ||
17 | /* Machine check handler for WinChip C6 */ | 16 | /* Machine check handler for WinChip C6: */ |
18 | static void winchip_machine_check(struct pt_regs *regs, long error_code) | 17 | static void winchip_machine_check(struct pt_regs *regs, long error_code) |
19 | { | 18 | { |
20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); | 19 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); |
@@ -25,12 +24,18 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code) | |||
25 | void winchip_mcheck_init(struct cpuinfo_x86 *c) | 24 | void winchip_mcheck_init(struct cpuinfo_x86 *c) |
26 | { | 25 | { |
27 | u32 lo, hi; | 26 | u32 lo, hi; |
27 | |||
28 | machine_check_vector = winchip_machine_check; | 28 | machine_check_vector = winchip_machine_check; |
29 | /* Make sure the vector pointer is visible before we enable MCEs: */ | ||
29 | wmb(); | 30 | wmb(); |
31 | |||
30 | rdmsr(MSR_IDT_FCR1, lo, hi); | 32 | rdmsr(MSR_IDT_FCR1, lo, hi); |
31 | lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ | 33 | lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ |
32 | lo &= ~(1<<4); /* Enable MCE */ | 34 | lo &= ~(1<<4); /* Enable MCE */ |
33 | wrmsr(MSR_IDT_FCR1, lo, hi); | 35 | wrmsr(MSR_IDT_FCR1, lo, hi); |
36 | |||
34 | set_in_cr4(X86_CR4_MCE); | 37 | set_in_cr4(X86_CR4_MCE); |
35 | printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); | 38 | |
39 | printk(KERN_INFO | ||
40 | "Winchip machine check reporting enabled on CPU#0.\n"); | ||
36 | } | 41 | } |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a4742a340d8d..de74f0a3e0ed 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -963,6 +963,8 @@ END(\sym) | |||
963 | #ifdef CONFIG_SMP | 963 | #ifdef CONFIG_SMP |
964 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | 964 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ |
965 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | 965 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
966 | apicinterrupt REBOOT_VECTOR \ | ||
967 | reboot_interrupt smp_reboot_interrupt | ||
966 | #endif | 968 | #endif |
967 | 969 | ||
968 | #ifdef CONFIG_X86_UV | 970 | #ifdef CONFIG_X86_UV |
@@ -994,10 +996,15 @@ apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \ | |||
994 | #endif | 996 | #endif |
995 | 997 | ||
996 | apicinterrupt THRESHOLD_APIC_VECTOR \ | 998 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
997 | threshold_interrupt mce_threshold_interrupt | 999 | threshold_interrupt smp_threshold_interrupt |
998 | apicinterrupt THERMAL_APIC_VECTOR \ | 1000 | apicinterrupt THERMAL_APIC_VECTOR \ |
999 | thermal_interrupt smp_thermal_interrupt | 1001 | thermal_interrupt smp_thermal_interrupt |
1000 | 1002 | ||
1003 | #ifdef CONFIG_X86_MCE | ||
1004 | apicinterrupt MCE_SELF_VECTOR \ | ||
1005 | mce_self_interrupt smp_mce_self_interrupt | ||
1006 | #endif | ||
1007 | |||
1001 | #ifdef CONFIG_SMP | 1008 | #ifdef CONFIG_SMP |
1002 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | 1009 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ |
1003 | call_function_single_interrupt smp_call_function_single_interrupt | 1010 | call_function_single_interrupt smp_call_function_single_interrupt |
@@ -1379,7 +1386,7 @@ errorentry xen_stack_segment do_stack_segment | |||
1379 | errorentry general_protection do_general_protection | 1386 | errorentry general_protection do_general_protection |
1380 | errorentry page_fault do_page_fault | 1387 | errorentry page_fault do_page_fault |
1381 | #ifdef CONFIG_X86_MCE | 1388 | #ifdef CONFIG_X86_MCE |
1382 | paranoidzeroentry machine_check do_machine_check | 1389 | paranoidzeroentry machine_check *machine_check_vector(%rip) |
1383 | #endif | 1390 | #endif |
1384 | 1391 | ||
1385 | /* | 1392 | /* |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 38287b5f116e..b0cdde6932f5 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/io_apic.h> | 12 | #include <asm/io_apic.h> |
13 | #include <asm/irq.h> | 13 | #include <asm/irq.h> |
14 | #include <asm/idle.h> | 14 | #include <asm/idle.h> |
15 | #include <asm/mce.h> | ||
15 | #include <asm/hw_irq.h> | 16 | #include <asm/hw_irq.h> |
16 | 17 | ||
17 | atomic_t irq_err_count; | 18 | atomic_t irq_err_count; |
@@ -96,13 +97,23 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
96 | for_each_online_cpu(j) | 97 | for_each_online_cpu(j) |
97 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); | 98 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); |
98 | seq_printf(p, " Thermal event interrupts\n"); | 99 | seq_printf(p, " Thermal event interrupts\n"); |
99 | # ifdef CONFIG_X86_64 | 100 | # ifdef CONFIG_X86_MCE_THRESHOLD |
100 | seq_printf(p, "%*s: ", prec, "THR"); | 101 | seq_printf(p, "%*s: ", prec, "THR"); |
101 | for_each_online_cpu(j) | 102 | for_each_online_cpu(j) |
102 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); | 103 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); |
103 | seq_printf(p, " Threshold APIC interrupts\n"); | 104 | seq_printf(p, " Threshold APIC interrupts\n"); |
104 | # endif | 105 | # endif |
105 | #endif | 106 | #endif |
107 | #ifdef CONFIG_X86_NEW_MCE | ||
108 | seq_printf(p, "%*s: ", prec, "MCE"); | ||
109 | for_each_online_cpu(j) | ||
110 | seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); | ||
111 | seq_printf(p, " Machine check exceptions\n"); | ||
112 | seq_printf(p, "%*s: ", prec, "MCP"); | ||
113 | for_each_online_cpu(j) | ||
114 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); | ||
115 | seq_printf(p, " Machine check polls\n"); | ||
116 | #endif | ||
106 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | 117 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
107 | #if defined(CONFIG_X86_IO_APIC) | 118 | #if defined(CONFIG_X86_IO_APIC) |
108 | seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); | 119 | seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); |
@@ -185,10 +196,14 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
185 | #endif | 196 | #endif |
186 | #ifdef CONFIG_X86_MCE | 197 | #ifdef CONFIG_X86_MCE |
187 | sum += irq_stats(cpu)->irq_thermal_count; | 198 | sum += irq_stats(cpu)->irq_thermal_count; |
188 | # ifdef CONFIG_X86_64 | 199 | # ifdef CONFIG_X86_MCE_THRESHOLD |
189 | sum += irq_stats(cpu)->irq_threshold_count; | 200 | sum += irq_stats(cpu)->irq_threshold_count; |
190 | # endif | 201 | # endif |
191 | #endif | 202 | #endif |
203 | #ifdef CONFIG_X86_NEW_MCE | ||
204 | sum += per_cpu(mce_exception_count, cpu); | ||
205 | sum += per_cpu(mce_poll_count, cpu); | ||
206 | #endif | ||
192 | return sum; | 207 | return sum; |
193 | } | 208 | } |
194 | 209 | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 267c6624c77f..696f0e475c2d 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -173,6 +173,9 @@ static void __init smp_intr_init(void) | |||
173 | /* Low priority IPI to cleanup after moving an irq */ | 173 | /* Low priority IPI to cleanup after moving an irq */ |
174 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 174 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
175 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | 175 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); |
176 | |||
177 | /* IPI used for rebooting/stopping */ | ||
178 | alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); | ||
176 | #endif | 179 | #endif |
177 | #endif /* CONFIG_SMP */ | 180 | #endif /* CONFIG_SMP */ |
178 | } | 181 | } |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 0a813b17b172..4c578751e94e 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -24,11 +24,11 @@ | |||
24 | #include <asm/ucontext.h> | 24 | #include <asm/ucontext.h> |
25 | #include <asm/i387.h> | 25 | #include <asm/i387.h> |
26 | #include <asm/vdso.h> | 26 | #include <asm/vdso.h> |
27 | #include <asm/mce.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_X86_64 | 29 | #ifdef CONFIG_X86_64 |
29 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
30 | #include <asm/ia32_unistd.h> | 31 | #include <asm/ia32_unistd.h> |
31 | #include <asm/mce.h> | ||
32 | #endif /* CONFIG_X86_64 */ | 32 | #endif /* CONFIG_X86_64 */ |
33 | 33 | ||
34 | #include <asm/syscall.h> | 34 | #include <asm/syscall.h> |
@@ -856,10 +856,10 @@ static void do_signal(struct pt_regs *regs) | |||
856 | void | 856 | void |
857 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | 857 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) |
858 | { | 858 | { |
859 | #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) | 859 | #ifdef CONFIG_X86_NEW_MCE |
860 | /* notify userspace of pending MCEs */ | 860 | /* notify userspace of pending MCEs */ |
861 | if (thread_info_flags & _TIF_MCE_NOTIFY) | 861 | if (thread_info_flags & _TIF_MCE_NOTIFY) |
862 | mce_notify_user(); | 862 | mce_notify_process(); |
863 | #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ | 863 | #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ |
864 | 864 | ||
865 | /* deal with pending signal delivery */ | 865 | /* deal with pending signal delivery */ |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 28f5fb495a66..ec1de97600e7 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -150,14 +150,40 @@ void native_send_call_func_ipi(const struct cpumask *mask) | |||
150 | * this function calls the 'stop' function on all other CPUs in the system. | 150 | * this function calls the 'stop' function on all other CPUs in the system. |
151 | */ | 151 | */ |
152 | 152 | ||
153 | asmlinkage void smp_reboot_interrupt(void) | ||
154 | { | ||
155 | ack_APIC_irq(); | ||
156 | irq_enter(); | ||
157 | stop_this_cpu(NULL); | ||
158 | irq_exit(); | ||
159 | } | ||
160 | |||
153 | static void native_smp_send_stop(void) | 161 | static void native_smp_send_stop(void) |
154 | { | 162 | { |
155 | unsigned long flags; | 163 | unsigned long flags; |
164 | unsigned long wait; | ||
156 | 165 | ||
157 | if (reboot_force) | 166 | if (reboot_force) |
158 | return; | 167 | return; |
159 | 168 | ||
160 | smp_call_function(stop_this_cpu, NULL, 0); | 169 | /* |
170 | * Use an own vector here because smp_call_function | ||
171 | * does lots of things not suitable in a panic situation. | ||
172 | * On most systems we could also use an NMI here, | ||
173 | * but there are a few systems around where NMI | ||
174 | * is problematic so stay with an non NMI for now | ||
175 | * (this implies we cannot stop CPUs spinning with irq off | ||
176 | * currently) | ||
177 | */ | ||
178 | if (num_online_cpus() > 1) { | ||
179 | apic->send_IPI_allbutself(REBOOT_VECTOR); | ||
180 | |||
181 | /* Don't wait longer than a second */ | ||
182 | wait = USEC_PER_SEC; | ||
183 | while (num_online_cpus() > 1 && wait--) | ||
184 | udelay(1); | ||
185 | } | ||
186 | |||
161 | local_irq_save(flags); | 187 | local_irq_save(flags); |
162 | disable_local_APIC(); | 188 | disable_local_APIC(); |
163 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 07d60c870ce2..1e1e27b7d438 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -798,15 +798,15 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | |||
798 | 798 | ||
799 | return new_kesp; | 799 | return new_kesp; |
800 | } | 800 | } |
801 | #else | 801 | #endif |
802 | |||
802 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 803 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
803 | { | 804 | { |
804 | } | 805 | } |
805 | 806 | ||
806 | asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void) | 807 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) |
807 | { | 808 | { |
808 | } | 809 | } |
809 | #endif | ||
810 | 810 | ||
811 | /* | 811 | /* |
812 | * 'math_state_restore()' saves the current math information in the | 812 | * 'math_state_restore()' saves the current math information in the |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 74d0e622a515..4dfdd03e708f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -241,6 +241,11 @@ config CRYPTO_XTS | |||
241 | key size 256, 384 or 512 bits. This implementation currently | 241 | key size 256, 384 or 512 bits. This implementation currently |
242 | can't handle a sectorsize which is not a multiple of 16 bytes. | 242 | can't handle a sectorsize which is not a multiple of 16 bytes. |
243 | 243 | ||
244 | config CRYPTO_FPU | ||
245 | tristate | ||
246 | select CRYPTO_BLKCIPHER | ||
247 | select CRYPTO_MANAGER | ||
248 | |||
244 | comment "Hash modes" | 249 | comment "Hash modes" |
245 | 250 | ||
246 | config CRYPTO_HMAC | 251 | config CRYPTO_HMAC |
@@ -486,6 +491,7 @@ config CRYPTO_AES_NI_INTEL | |||
486 | select CRYPTO_AES_X86_64 | 491 | select CRYPTO_AES_X86_64 |
487 | select CRYPTO_CRYPTD | 492 | select CRYPTO_CRYPTD |
488 | select CRYPTO_ALGAPI | 493 | select CRYPTO_ALGAPI |
494 | select CRYPTO_FPU | ||
489 | help | 495 | help |
490 | Use Intel AES-NI instructions for AES algorithm. | 496 | Use Intel AES-NI instructions for AES algorithm. |
491 | 497 | ||
@@ -505,6 +511,10 @@ config CRYPTO_AES_NI_INTEL | |||
505 | 511 | ||
506 | See <http://csrc.nist.gov/encryption/aes/> for more information. | 512 | See <http://csrc.nist.gov/encryption/aes/> for more information. |
507 | 513 | ||
514 | In addition to AES cipher algorithm support, the | ||
515 | acceleration for some popular block cipher mode is supported | ||
516 | too, including ECB, CBC, CTR, LRW, PCBC, XTS. | ||
517 | |||
508 | config CRYPTO_ANUBIS | 518 | config CRYPTO_ANUBIS |
509 | tristate "Anubis cipher algorithm" | 519 | tristate "Anubis cipher algorithm" |
510 | select CRYPTO_ALGAPI | 520 | select CRYPTO_ALGAPI |
diff --git a/crypto/algboss.c b/crypto/algboss.c index 6906f92aeac0..9908dd830c26 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -280,29 +280,13 @@ static struct notifier_block cryptomgr_notifier = { | |||
280 | 280 | ||
281 | static int __init cryptomgr_init(void) | 281 | static int __init cryptomgr_init(void) |
282 | { | 282 | { |
283 | int err; | 283 | return crypto_register_notifier(&cryptomgr_notifier); |
284 | |||
285 | err = testmgr_init(); | ||
286 | if (err) | ||
287 | return err; | ||
288 | |||
289 | err = crypto_register_notifier(&cryptomgr_notifier); | ||
290 | if (err) | ||
291 | goto free_testmgr; | ||
292 | |||
293 | return 0; | ||
294 | |||
295 | free_testmgr: | ||
296 | testmgr_exit(); | ||
297 | return err; | ||
298 | } | 284 | } |
299 | 285 | ||
300 | static void __exit cryptomgr_exit(void) | 286 | static void __exit cryptomgr_exit(void) |
301 | { | 287 | { |
302 | int err = crypto_unregister_notifier(&cryptomgr_notifier); | 288 | int err = crypto_unregister_notifier(&cryptomgr_notifier); |
303 | BUG_ON(err); | 289 | BUG_ON(err); |
304 | |||
305 | testmgr_exit(); | ||
306 | } | 290 | } |
307 | 291 | ||
308 | subsys_initcall(cryptomgr_init); | 292 | subsys_initcall(cryptomgr_init); |
diff --git a/crypto/api.c b/crypto/api.c index fd2545decb28..d5944f92b416 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -217,14 +217,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | |||
217 | 217 | ||
218 | alg = crypto_alg_lookup(name, type, mask); | 218 | alg = crypto_alg_lookup(name, type, mask); |
219 | if (!alg) { | 219 | if (!alg) { |
220 | char tmp[CRYPTO_MAX_ALG_NAME]; | 220 | request_module("%s", name); |
221 | |||
222 | request_module(name); | ||
223 | 221 | ||
224 | if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & | 222 | if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & |
225 | CRYPTO_ALG_NEED_FALLBACK) && | 223 | CRYPTO_ALG_NEED_FALLBACK)) |
226 | snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp)) | 224 | request_module("%s-all", name); |
227 | request_module(tmp); | ||
228 | 225 | ||
229 | alg = crypto_alg_lookup(name, type, mask); | 226 | alg = crypto_alg_lookup(name, type, mask); |
230 | } | 227 | } |
@@ -580,20 +577,17 @@ EXPORT_SYMBOL_GPL(crypto_alloc_tfm); | |||
580 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) | 577 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) |
581 | { | 578 | { |
582 | struct crypto_alg *alg; | 579 | struct crypto_alg *alg; |
583 | int size; | ||
584 | 580 | ||
585 | if (unlikely(!mem)) | 581 | if (unlikely(!mem)) |
586 | return; | 582 | return; |
587 | 583 | ||
588 | alg = tfm->__crt_alg; | 584 | alg = tfm->__crt_alg; |
589 | size = ksize(mem); | ||
590 | 585 | ||
591 | if (!tfm->exit && alg->cra_exit) | 586 | if (!tfm->exit && alg->cra_exit) |
592 | alg->cra_exit(tfm); | 587 | alg->cra_exit(tfm); |
593 | crypto_exit_ops(tfm); | 588 | crypto_exit_ops(tfm); |
594 | crypto_mod_put(alg); | 589 | crypto_mod_put(alg); |
595 | memset(mem, 0, size); | 590 | kzfree(mem); |
596 | kfree(mem); | ||
597 | } | 591 | } |
598 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); | 592 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); |
599 | 593 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d14b22658d7a..ae5fa99d5d36 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -586,20 +586,24 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | |||
586 | u32 type, u32 mask) | 586 | u32 type, u32 mask) |
587 | { | 587 | { |
588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
589 | struct crypto_ablkcipher *tfm; | 589 | struct crypto_tfm *tfm; |
590 | 590 | ||
591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
593 | return ERR_PTR(-EINVAL); | 593 | return ERR_PTR(-EINVAL); |
594 | tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); | 594 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
595 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | ||
596 | mask &= ~CRYPTO_ALG_TYPE_MASK; | ||
597 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | ||
598 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | ||
595 | if (IS_ERR(tfm)) | 599 | if (IS_ERR(tfm)) |
596 | return ERR_CAST(tfm); | 600 | return ERR_CAST(tfm); |
597 | if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { | 601 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
598 | crypto_free_ablkcipher(tfm); | 602 | crypto_free_tfm(tfm); |
599 | return ERR_PTR(-EINVAL); | 603 | return ERR_PTR(-EINVAL); |
600 | } | 604 | } |
601 | 605 | ||
602 | return __cryptd_ablkcipher_cast(tfm); | 606 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
603 | } | 607 | } |
604 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | 608 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
605 | 609 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index fc76e1f37fc3..113579a82dff 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -121,9 +121,6 @@ int crypto_register_notifier(struct notifier_block *nb); | |||
121 | int crypto_unregister_notifier(struct notifier_block *nb); | 121 | int crypto_unregister_notifier(struct notifier_block *nb); |
122 | int crypto_probing_notify(unsigned long val, void *v); | 122 | int crypto_probing_notify(unsigned long val, void *v); |
123 | 123 | ||
124 | int __init testmgr_init(void); | ||
125 | void testmgr_exit(void); | ||
126 | |||
127 | static inline void crypto_alg_put(struct crypto_alg *alg) | 124 | static inline void crypto_alg_put(struct crypto_alg *alg) |
128 | { | 125 | { |
129 | if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) | 126 | if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c index ca9a4af91efe..bcadc03726b7 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | 27 | ||
28 | #include <crypto/compress.h> | 28 | #include <crypto/compress.h> |
29 | #include <crypto/internal/compress.h> | ||
29 | 30 | ||
30 | #include "internal.h" | 31 | #include "internal.h" |
31 | 32 | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index c3c9124209a1..d59ba5079d14 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/timex.h> | 27 | #include <linux/timex.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include "tcrypt.h" | 29 | #include "tcrypt.h" |
30 | #include "internal.h" | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * Need slab memory for testing (size in number of pages). | 33 | * Need slab memory for testing (size in number of pages). |
@@ -396,16 +397,16 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
396 | struct scatterlist sg[TVMEMSIZE]; | 397 | struct scatterlist sg[TVMEMSIZE]; |
397 | struct crypto_hash *tfm; | 398 | struct crypto_hash *tfm; |
398 | struct hash_desc desc; | 399 | struct hash_desc desc; |
399 | char output[1024]; | 400 | static char output[1024]; |
400 | int i; | 401 | int i; |
401 | int ret; | 402 | int ret; |
402 | 403 | ||
403 | printk("\ntesting speed of %s\n", algo); | 404 | printk(KERN_INFO "\ntesting speed of %s\n", algo); |
404 | 405 | ||
405 | tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); | 406 | tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); |
406 | 407 | ||
407 | if (IS_ERR(tfm)) { | 408 | if (IS_ERR(tfm)) { |
408 | printk("failed to load transform for %s: %ld\n", algo, | 409 | printk(KERN_ERR "failed to load transform for %s: %ld\n", algo, |
409 | PTR_ERR(tfm)); | 410 | PTR_ERR(tfm)); |
410 | return; | 411 | return; |
411 | } | 412 | } |
@@ -414,7 +415,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
414 | desc.flags = 0; | 415 | desc.flags = 0; |
415 | 416 | ||
416 | if (crypto_hash_digestsize(tfm) > sizeof(output)) { | 417 | if (crypto_hash_digestsize(tfm) > sizeof(output)) { |
417 | printk("digestsize(%u) > outputbuffer(%zu)\n", | 418 | printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n", |
418 | crypto_hash_digestsize(tfm), sizeof(output)); | 419 | crypto_hash_digestsize(tfm), sizeof(output)); |
419 | goto out; | 420 | goto out; |
420 | } | 421 | } |
@@ -427,12 +428,14 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
427 | 428 | ||
428 | for (i = 0; speed[i].blen != 0; i++) { | 429 | for (i = 0; speed[i].blen != 0; i++) { |
429 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | 430 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { |
430 | printk("template (%u) too big for tvmem (%lu)\n", | 431 | printk(KERN_ERR |
432 | "template (%u) too big for tvmem (%lu)\n", | ||
431 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); | 433 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); |
432 | goto out; | 434 | goto out; |
433 | } | 435 | } |
434 | 436 | ||
435 | printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", | 437 | printk(KERN_INFO "test%3u " |
438 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | ||
436 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | 439 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); |
437 | 440 | ||
438 | if (sec) | 441 | if (sec) |
@@ -443,7 +446,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
443 | speed[i].plen, output); | 446 | speed[i].plen, output); |
444 | 447 | ||
445 | if (ret) { | 448 | if (ret) { |
446 | printk("hashing failed ret=%d\n", ret); | 449 | printk(KERN_ERR "hashing failed ret=%d\n", ret); |
447 | break; | 450 | break; |
448 | } | 451 | } |
449 | } | 452 | } |
@@ -466,239 +469,255 @@ static void test_available(void) | |||
466 | 469 | ||
467 | static inline int tcrypt_test(const char *alg) | 470 | static inline int tcrypt_test(const char *alg) |
468 | { | 471 | { |
469 | return alg_test(alg, alg, 0, 0); | 472 | int ret; |
473 | |||
474 | ret = alg_test(alg, alg, 0, 0); | ||
475 | /* non-fips algs return -EINVAL in fips mode */ | ||
476 | if (fips_enabled && ret == -EINVAL) | ||
477 | ret = 0; | ||
478 | return ret; | ||
470 | } | 479 | } |
471 | 480 | ||
472 | static void do_test(int m) | 481 | static int do_test(int m) |
473 | { | 482 | { |
474 | int i; | 483 | int i; |
484 | int ret = 0; | ||
475 | 485 | ||
476 | switch (m) { | 486 | switch (m) { |
477 | case 0: | 487 | case 0: |
478 | for (i = 1; i < 200; i++) | 488 | for (i = 1; i < 200; i++) |
479 | do_test(i); | 489 | ret += do_test(i); |
480 | break; | 490 | break; |
481 | 491 | ||
482 | case 1: | 492 | case 1: |
483 | tcrypt_test("md5"); | 493 | ret += tcrypt_test("md5"); |
484 | break; | 494 | break; |
485 | 495 | ||
486 | case 2: | 496 | case 2: |
487 | tcrypt_test("sha1"); | 497 | ret += tcrypt_test("sha1"); |
488 | break; | 498 | break; |
489 | 499 | ||
490 | case 3: | 500 | case 3: |
491 | tcrypt_test("ecb(des)"); | 501 | ret += tcrypt_test("ecb(des)"); |
492 | tcrypt_test("cbc(des)"); | 502 | ret += tcrypt_test("cbc(des)"); |
493 | break; | 503 | break; |
494 | 504 | ||
495 | case 4: | 505 | case 4: |
496 | tcrypt_test("ecb(des3_ede)"); | 506 | ret += tcrypt_test("ecb(des3_ede)"); |
497 | tcrypt_test("cbc(des3_ede)"); | 507 | ret += tcrypt_test("cbc(des3_ede)"); |
498 | break; | 508 | break; |
499 | 509 | ||
500 | case 5: | 510 | case 5: |
501 | tcrypt_test("md4"); | 511 | ret += tcrypt_test("md4"); |
502 | break; | 512 | break; |
503 | 513 | ||
504 | case 6: | 514 | case 6: |
505 | tcrypt_test("sha256"); | 515 | ret += tcrypt_test("sha256"); |
506 | break; | 516 | break; |
507 | 517 | ||
508 | case 7: | 518 | case 7: |
509 | tcrypt_test("ecb(blowfish)"); | 519 | ret += tcrypt_test("ecb(blowfish)"); |
510 | tcrypt_test("cbc(blowfish)"); | 520 | ret += tcrypt_test("cbc(blowfish)"); |
511 | break; | 521 | break; |
512 | 522 | ||
513 | case 8: | 523 | case 8: |
514 | tcrypt_test("ecb(twofish)"); | 524 | ret += tcrypt_test("ecb(twofish)"); |
515 | tcrypt_test("cbc(twofish)"); | 525 | ret += tcrypt_test("cbc(twofish)"); |
516 | break; | 526 | break; |
517 | 527 | ||
518 | case 9: | 528 | case 9: |
519 | tcrypt_test("ecb(serpent)"); | 529 | ret += tcrypt_test("ecb(serpent)"); |
520 | break; | 530 | break; |
521 | 531 | ||
522 | case 10: | 532 | case 10: |
523 | tcrypt_test("ecb(aes)"); | 533 | ret += tcrypt_test("ecb(aes)"); |
524 | tcrypt_test("cbc(aes)"); | 534 | ret += tcrypt_test("cbc(aes)"); |
525 | tcrypt_test("lrw(aes)"); | 535 | ret += tcrypt_test("lrw(aes)"); |
526 | tcrypt_test("xts(aes)"); | 536 | ret += tcrypt_test("xts(aes)"); |
527 | tcrypt_test("rfc3686(ctr(aes))"); | 537 | ret += tcrypt_test("ctr(aes)"); |
538 | ret += tcrypt_test("rfc3686(ctr(aes))"); | ||
528 | break; | 539 | break; |
529 | 540 | ||
530 | case 11: | 541 | case 11: |
531 | tcrypt_test("sha384"); | 542 | ret += tcrypt_test("sha384"); |
532 | break; | 543 | break; |
533 | 544 | ||
534 | case 12: | 545 | case 12: |
535 | tcrypt_test("sha512"); | 546 | ret += tcrypt_test("sha512"); |
536 | break; | 547 | break; |
537 | 548 | ||
538 | case 13: | 549 | case 13: |
539 | tcrypt_test("deflate"); | 550 | ret += tcrypt_test("deflate"); |
540 | break; | 551 | break; |
541 | 552 | ||
542 | case 14: | 553 | case 14: |
543 | tcrypt_test("ecb(cast5)"); | 554 | ret += tcrypt_test("ecb(cast5)"); |
544 | break; | 555 | break; |
545 | 556 | ||
546 | case 15: | 557 | case 15: |
547 | tcrypt_test("ecb(cast6)"); | 558 | ret += tcrypt_test("ecb(cast6)"); |
548 | break; | 559 | break; |
549 | 560 | ||
550 | case 16: | 561 | case 16: |
551 | tcrypt_test("ecb(arc4)"); | 562 | ret += tcrypt_test("ecb(arc4)"); |
552 | break; | 563 | break; |
553 | 564 | ||
554 | case 17: | 565 | case 17: |
555 | tcrypt_test("michael_mic"); | 566 | ret += tcrypt_test("michael_mic"); |
556 | break; | 567 | break; |
557 | 568 | ||
558 | case 18: | 569 | case 18: |
559 | tcrypt_test("crc32c"); | 570 | ret += tcrypt_test("crc32c"); |
560 | break; | 571 | break; |
561 | 572 | ||
562 | case 19: | 573 | case 19: |
563 | tcrypt_test("ecb(tea)"); | 574 | ret += tcrypt_test("ecb(tea)"); |
564 | break; | 575 | break; |
565 | 576 | ||
566 | case 20: | 577 | case 20: |
567 | tcrypt_test("ecb(xtea)"); | 578 | ret += tcrypt_test("ecb(xtea)"); |
568 | break; | 579 | break; |
569 | 580 | ||
570 | case 21: | 581 | case 21: |
571 | tcrypt_test("ecb(khazad)"); | 582 | ret += tcrypt_test("ecb(khazad)"); |
572 | break; | 583 | break; |
573 | 584 | ||
574 | case 22: | 585 | case 22: |
575 | tcrypt_test("wp512"); | 586 | ret += tcrypt_test("wp512"); |
576 | break; | 587 | break; |
577 | 588 | ||
578 | case 23: | 589 | case 23: |
579 | tcrypt_test("wp384"); | 590 | ret += tcrypt_test("wp384"); |
580 | break; | 591 | break; |
581 | 592 | ||
582 | case 24: | 593 | case 24: |
583 | tcrypt_test("wp256"); | 594 | ret += tcrypt_test("wp256"); |
584 | break; | 595 | break; |
585 | 596 | ||
586 | case 25: | 597 | case 25: |
587 | tcrypt_test("ecb(tnepres)"); | 598 | ret += tcrypt_test("ecb(tnepres)"); |
588 | break; | 599 | break; |
589 | 600 | ||
590 | case 26: | 601 | case 26: |
591 | tcrypt_test("ecb(anubis)"); | 602 | ret += tcrypt_test("ecb(anubis)"); |
592 | tcrypt_test("cbc(anubis)"); | 603 | ret += tcrypt_test("cbc(anubis)"); |
593 | break; | 604 | break; |
594 | 605 | ||
595 | case 27: | 606 | case 27: |
596 | tcrypt_test("tgr192"); | 607 | ret += tcrypt_test("tgr192"); |
597 | break; | 608 | break; |
598 | 609 | ||
599 | case 28: | 610 | case 28: |
600 | 611 | ||
601 | tcrypt_test("tgr160"); | 612 | ret += tcrypt_test("tgr160"); |
602 | break; | 613 | break; |
603 | 614 | ||
604 | case 29: | 615 | case 29: |
605 | tcrypt_test("tgr128"); | 616 | ret += tcrypt_test("tgr128"); |
606 | break; | 617 | break; |
607 | 618 | ||
608 | case 30: | 619 | case 30: |
609 | tcrypt_test("ecb(xeta)"); | 620 | ret += tcrypt_test("ecb(xeta)"); |
610 | break; | 621 | break; |
611 | 622 | ||
612 | case 31: | 623 | case 31: |
613 | tcrypt_test("pcbc(fcrypt)"); | 624 | ret += tcrypt_test("pcbc(fcrypt)"); |
614 | break; | 625 | break; |
615 | 626 | ||
616 | case 32: | 627 | case 32: |
617 | tcrypt_test("ecb(camellia)"); | 628 | ret += tcrypt_test("ecb(camellia)"); |
618 | tcrypt_test("cbc(camellia)"); | 629 | ret += tcrypt_test("cbc(camellia)"); |
619 | break; | 630 | break; |
620 | case 33: | 631 | case 33: |
621 | tcrypt_test("sha224"); | 632 | ret += tcrypt_test("sha224"); |
622 | break; | 633 | break; |
623 | 634 | ||
624 | case 34: | 635 | case 34: |
625 | tcrypt_test("salsa20"); | 636 | ret += tcrypt_test("salsa20"); |
626 | break; | 637 | break; |
627 | 638 | ||
628 | case 35: | 639 | case 35: |
629 | tcrypt_test("gcm(aes)"); | 640 | ret += tcrypt_test("gcm(aes)"); |
630 | break; | 641 | break; |
631 | 642 | ||
632 | case 36: | 643 | case 36: |
633 | tcrypt_test("lzo"); | 644 | ret += tcrypt_test("lzo"); |
634 | break; | 645 | break; |
635 | 646 | ||
636 | case 37: | 647 | case 37: |
637 | tcrypt_test("ccm(aes)"); | 648 | ret += tcrypt_test("ccm(aes)"); |
638 | break; | 649 | break; |
639 | 650 | ||
640 | case 38: | 651 | case 38: |
641 | tcrypt_test("cts(cbc(aes))"); | 652 | ret += tcrypt_test("cts(cbc(aes))"); |
642 | break; | 653 | break; |
643 | 654 | ||
644 | case 39: | 655 | case 39: |
645 | tcrypt_test("rmd128"); | 656 | ret += tcrypt_test("rmd128"); |
646 | break; | 657 | break; |
647 | 658 | ||
648 | case 40: | 659 | case 40: |
649 | tcrypt_test("rmd160"); | 660 | ret += tcrypt_test("rmd160"); |
650 | break; | 661 | break; |
651 | 662 | ||
652 | case 41: | 663 | case 41: |
653 | tcrypt_test("rmd256"); | 664 | ret += tcrypt_test("rmd256"); |
654 | break; | 665 | break; |
655 | 666 | ||
656 | case 42: | 667 | case 42: |
657 | tcrypt_test("rmd320"); | 668 | ret += tcrypt_test("rmd320"); |
658 | break; | 669 | break; |
659 | 670 | ||
660 | case 43: | 671 | case 43: |
661 | tcrypt_test("ecb(seed)"); | 672 | ret += tcrypt_test("ecb(seed)"); |
662 | break; | 673 | break; |
663 | 674 | ||
664 | case 44: | 675 | case 44: |
665 | tcrypt_test("zlib"); | 676 | ret += tcrypt_test("zlib"); |
677 | break; | ||
678 | |||
679 | case 45: | ||
680 | ret += tcrypt_test("rfc4309(ccm(aes))"); | ||
666 | break; | 681 | break; |
667 | 682 | ||
668 | case 100: | 683 | case 100: |
669 | tcrypt_test("hmac(md5)"); | 684 | ret += tcrypt_test("hmac(md5)"); |
670 | break; | 685 | break; |
671 | 686 | ||
672 | case 101: | 687 | case 101: |
673 | tcrypt_test("hmac(sha1)"); | 688 | ret += tcrypt_test("hmac(sha1)"); |
674 | break; | 689 | break; |
675 | 690 | ||
676 | case 102: | 691 | case 102: |
677 | tcrypt_test("hmac(sha256)"); | 692 | ret += tcrypt_test("hmac(sha256)"); |
678 | break; | 693 | break; |
679 | 694 | ||
680 | case 103: | 695 | case 103: |
681 | tcrypt_test("hmac(sha384)"); | 696 | ret += tcrypt_test("hmac(sha384)"); |
682 | break; | 697 | break; |
683 | 698 | ||
684 | case 104: | 699 | case 104: |
685 | tcrypt_test("hmac(sha512)"); | 700 | ret += tcrypt_test("hmac(sha512)"); |
686 | break; | 701 | break; |
687 | 702 | ||
688 | case 105: | 703 | case 105: |
689 | tcrypt_test("hmac(sha224)"); | 704 | ret += tcrypt_test("hmac(sha224)"); |
690 | break; | 705 | break; |
691 | 706 | ||
692 | case 106: | 707 | case 106: |
693 | tcrypt_test("xcbc(aes)"); | 708 | ret += tcrypt_test("xcbc(aes)"); |
694 | break; | 709 | break; |
695 | 710 | ||
696 | case 107: | 711 | case 107: |
697 | tcrypt_test("hmac(rmd128)"); | 712 | ret += tcrypt_test("hmac(rmd128)"); |
698 | break; | 713 | break; |
699 | 714 | ||
700 | case 108: | 715 | case 108: |
701 | tcrypt_test("hmac(rmd160)"); | 716 | ret += tcrypt_test("hmac(rmd160)"); |
717 | break; | ||
718 | |||
719 | case 150: | ||
720 | ret += tcrypt_test("ansi_cprng"); | ||
702 | break; | 721 | break; |
703 | 722 | ||
704 | case 200: | 723 | case 200: |
@@ -862,6 +881,8 @@ static void do_test(int m) | |||
862 | test_available(); | 881 | test_available(); |
863 | break; | 882 | break; |
864 | } | 883 | } |
884 | |||
885 | return ret; | ||
865 | } | 886 | } |
866 | 887 | ||
867 | static int __init tcrypt_mod_init(void) | 888 | static int __init tcrypt_mod_init(void) |
@@ -875,15 +896,21 @@ static int __init tcrypt_mod_init(void) | |||
875 | goto err_free_tv; | 896 | goto err_free_tv; |
876 | } | 897 | } |
877 | 898 | ||
878 | do_test(mode); | 899 | err = do_test(mode); |
900 | if (err) { | ||
901 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); | ||
902 | goto err_free_tv; | ||
903 | } | ||
879 | 904 | ||
880 | /* We intentionaly return -EAGAIN to prevent keeping | 905 | /* We intentionaly return -EAGAIN to prevent keeping the module, |
881 | * the module. It does all its work from init() | 906 | * unless we're running in fips mode. It does all its work from |
882 | * and doesn't offer any runtime functionality | 907 | * init() and doesn't offer any runtime functionality, but in |
908 | * the fips case, checking for a successful load is helpful. | ||
883 | * => we don't need it in the memory, do we? | 909 | * => we don't need it in the memory, do we? |
884 | * -- mludvig | 910 | * -- mludvig |
885 | */ | 911 | */ |
886 | err = -EAGAIN; | 912 | if (!fips_enabled) |
913 | err = -EAGAIN; | ||
887 | 914 | ||
888 | err_free_tv: | 915 | err_free_tv: |
889 | for (i = 0; i < TVMEMSIZE && tvmem[i]; i++) | 916 | for (i = 0; i < TVMEMSIZE && tvmem[i]; i++) |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b50c3c6b17a2..e9e9d84293b9 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/scatterlist.h> | 19 | #include <linux/scatterlist.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <crypto/rng.h> | ||
22 | 23 | ||
23 | #include "internal.h" | 24 | #include "internal.h" |
24 | #include "testmgr.h" | 25 | #include "testmgr.h" |
@@ -84,10 +85,16 @@ struct hash_test_suite { | |||
84 | unsigned int count; | 85 | unsigned int count; |
85 | }; | 86 | }; |
86 | 87 | ||
88 | struct cprng_test_suite { | ||
89 | struct cprng_testvec *vecs; | ||
90 | unsigned int count; | ||
91 | }; | ||
92 | |||
87 | struct alg_test_desc { | 93 | struct alg_test_desc { |
88 | const char *alg; | 94 | const char *alg; |
89 | int (*test)(const struct alg_test_desc *desc, const char *driver, | 95 | int (*test)(const struct alg_test_desc *desc, const char *driver, |
90 | u32 type, u32 mask); | 96 | u32 type, u32 mask); |
97 | int fips_allowed; /* set if alg is allowed in fips mode */ | ||
91 | 98 | ||
92 | union { | 99 | union { |
93 | struct aead_test_suite aead; | 100 | struct aead_test_suite aead; |
@@ -95,14 +102,12 @@ struct alg_test_desc { | |||
95 | struct comp_test_suite comp; | 102 | struct comp_test_suite comp; |
96 | struct pcomp_test_suite pcomp; | 103 | struct pcomp_test_suite pcomp; |
97 | struct hash_test_suite hash; | 104 | struct hash_test_suite hash; |
105 | struct cprng_test_suite cprng; | ||
98 | } suite; | 106 | } suite; |
99 | }; | 107 | }; |
100 | 108 | ||
101 | static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; | 109 | static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; |
102 | 110 | ||
103 | static char *xbuf[XBUFSIZE]; | ||
104 | static char *axbuf[XBUFSIZE]; | ||
105 | |||
106 | static void hexdump(unsigned char *buf, unsigned int len) | 111 | static void hexdump(unsigned char *buf, unsigned int len) |
107 | { | 112 | { |
108 | print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, | 113 | print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, |
@@ -121,6 +126,33 @@ static void tcrypt_complete(struct crypto_async_request *req, int err) | |||
121 | complete(&res->completion); | 126 | complete(&res->completion); |
122 | } | 127 | } |
123 | 128 | ||
129 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) | ||
130 | { | ||
131 | int i; | ||
132 | |||
133 | for (i = 0; i < XBUFSIZE; i++) { | ||
134 | buf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
135 | if (!buf[i]) | ||
136 | goto err_free_buf; | ||
137 | } | ||
138 | |||
139 | return 0; | ||
140 | |||
141 | err_free_buf: | ||
142 | while (i-- > 0) | ||
143 | free_page((unsigned long)buf[i]); | ||
144 | |||
145 | return -ENOMEM; | ||
146 | } | ||
147 | |||
148 | static void testmgr_free_buf(char *buf[XBUFSIZE]) | ||
149 | { | ||
150 | int i; | ||
151 | |||
152 | for (i = 0; i < XBUFSIZE; i++) | ||
153 | free_page((unsigned long)buf[i]); | ||
154 | } | ||
155 | |||
124 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | 156 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, |
125 | unsigned int tcount) | 157 | unsigned int tcount) |
126 | { | 158 | { |
@@ -130,8 +162,12 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
130 | char result[64]; | 162 | char result[64]; |
131 | struct ahash_request *req; | 163 | struct ahash_request *req; |
132 | struct tcrypt_result tresult; | 164 | struct tcrypt_result tresult; |
133 | int ret; | ||
134 | void *hash_buff; | 165 | void *hash_buff; |
166 | char *xbuf[XBUFSIZE]; | ||
167 | int ret = -ENOMEM; | ||
168 | |||
169 | if (testmgr_alloc_buf(xbuf)) | ||
170 | goto out_nobuf; | ||
135 | 171 | ||
136 | init_completion(&tresult.completion); | 172 | init_completion(&tresult.completion); |
137 | 173 | ||
@@ -139,17 +175,25 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
139 | if (!req) { | 175 | if (!req) { |
140 | printk(KERN_ERR "alg: hash: Failed to allocate request for " | 176 | printk(KERN_ERR "alg: hash: Failed to allocate request for " |
141 | "%s\n", algo); | 177 | "%s\n", algo); |
142 | ret = -ENOMEM; | ||
143 | goto out_noreq; | 178 | goto out_noreq; |
144 | } | 179 | } |
145 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 180 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
146 | tcrypt_complete, &tresult); | 181 | tcrypt_complete, &tresult); |
147 | 182 | ||
183 | j = 0; | ||
148 | for (i = 0; i < tcount; i++) { | 184 | for (i = 0; i < tcount; i++) { |
185 | if (template[i].np) | ||
186 | continue; | ||
187 | |||
188 | j++; | ||
149 | memset(result, 0, 64); | 189 | memset(result, 0, 64); |
150 | 190 | ||
151 | hash_buff = xbuf[0]; | 191 | hash_buff = xbuf[0]; |
152 | 192 | ||
193 | ret = -EINVAL; | ||
194 | if (WARN_ON(template[i].psize > PAGE_SIZE)) | ||
195 | goto out; | ||
196 | |||
153 | memcpy(hash_buff, template[i].plaintext, template[i].psize); | 197 | memcpy(hash_buff, template[i].plaintext, template[i].psize); |
154 | sg_init_one(&sg[0], hash_buff, template[i].psize); | 198 | sg_init_one(&sg[0], hash_buff, template[i].psize); |
155 | 199 | ||
@@ -159,7 +203,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
159 | template[i].ksize); | 203 | template[i].ksize); |
160 | if (ret) { | 204 | if (ret) { |
161 | printk(KERN_ERR "alg: hash: setkey failed on " | 205 | printk(KERN_ERR "alg: hash: setkey failed on " |
162 | "test %d for %s: ret=%d\n", i + 1, algo, | 206 | "test %d for %s: ret=%d\n", j, algo, |
163 | -ret); | 207 | -ret); |
164 | goto out; | 208 | goto out; |
165 | } | 209 | } |
@@ -181,14 +225,14 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
181 | /* fall through */ | 225 | /* fall through */ |
182 | default: | 226 | default: |
183 | printk(KERN_ERR "alg: hash: digest failed on test %d " | 227 | printk(KERN_ERR "alg: hash: digest failed on test %d " |
184 | "for %s: ret=%d\n", i + 1, algo, -ret); | 228 | "for %s: ret=%d\n", j, algo, -ret); |
185 | goto out; | 229 | goto out; |
186 | } | 230 | } |
187 | 231 | ||
188 | if (memcmp(result, template[i].digest, | 232 | if (memcmp(result, template[i].digest, |
189 | crypto_ahash_digestsize(tfm))) { | 233 | crypto_ahash_digestsize(tfm))) { |
190 | printk(KERN_ERR "alg: hash: Test %d failed for %s\n", | 234 | printk(KERN_ERR "alg: hash: Test %d failed for %s\n", |
191 | i + 1, algo); | 235 | j, algo); |
192 | hexdump(result, crypto_ahash_digestsize(tfm)); | 236 | hexdump(result, crypto_ahash_digestsize(tfm)); |
193 | ret = -EINVAL; | 237 | ret = -EINVAL; |
194 | goto out; | 238 | goto out; |
@@ -203,7 +247,11 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
203 | 247 | ||
204 | temp = 0; | 248 | temp = 0; |
205 | sg_init_table(sg, template[i].np); | 249 | sg_init_table(sg, template[i].np); |
250 | ret = -EINVAL; | ||
206 | for (k = 0; k < template[i].np; k++) { | 251 | for (k = 0; k < template[i].np; k++) { |
252 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
253 | template[i].tap[k] > PAGE_SIZE)) | ||
254 | goto out; | ||
207 | sg_set_buf(&sg[k], | 255 | sg_set_buf(&sg[k], |
208 | memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + | 256 | memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + |
209 | offset_in_page(IDX[k]), | 257 | offset_in_page(IDX[k]), |
@@ -265,6 +313,8 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
265 | out: | 313 | out: |
266 | ahash_request_free(req); | 314 | ahash_request_free(req); |
267 | out_noreq: | 315 | out_noreq: |
316 | testmgr_free_buf(xbuf); | ||
317 | out_nobuf: | ||
268 | return ret; | 318 | return ret; |
269 | } | 319 | } |
270 | 320 | ||
@@ -273,7 +323,7 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
273 | { | 323 | { |
274 | const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); | 324 | const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); |
275 | unsigned int i, j, k, n, temp; | 325 | unsigned int i, j, k, n, temp; |
276 | int ret = 0; | 326 | int ret = -ENOMEM; |
277 | char *q; | 327 | char *q; |
278 | char *key; | 328 | char *key; |
279 | struct aead_request *req; | 329 | struct aead_request *req; |
@@ -285,6 +335,13 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
285 | void *input; | 335 | void *input; |
286 | void *assoc; | 336 | void *assoc; |
287 | char iv[MAX_IVLEN]; | 337 | char iv[MAX_IVLEN]; |
338 | char *xbuf[XBUFSIZE]; | ||
339 | char *axbuf[XBUFSIZE]; | ||
340 | |||
341 | if (testmgr_alloc_buf(xbuf)) | ||
342 | goto out_noxbuf; | ||
343 | if (testmgr_alloc_buf(axbuf)) | ||
344 | goto out_noaxbuf; | ||
288 | 345 | ||
289 | if (enc == ENCRYPT) | 346 | if (enc == ENCRYPT) |
290 | e = "encryption"; | 347 | e = "encryption"; |
@@ -297,7 +354,6 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
297 | if (!req) { | 354 | if (!req) { |
298 | printk(KERN_ERR "alg: aead: Failed to allocate request for " | 355 | printk(KERN_ERR "alg: aead: Failed to allocate request for " |
299 | "%s\n", algo); | 356 | "%s\n", algo); |
300 | ret = -ENOMEM; | ||
301 | goto out; | 357 | goto out; |
302 | } | 358 | } |
303 | 359 | ||
@@ -314,6 +370,11 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
314 | input = xbuf[0]; | 370 | input = xbuf[0]; |
315 | assoc = axbuf[0]; | 371 | assoc = axbuf[0]; |
316 | 372 | ||
373 | ret = -EINVAL; | ||
374 | if (WARN_ON(template[i].ilen > PAGE_SIZE || | ||
375 | template[i].alen > PAGE_SIZE)) | ||
376 | goto out; | ||
377 | |||
317 | memcpy(input, template[i].input, template[i].ilen); | 378 | memcpy(input, template[i].input, template[i].ilen); |
318 | memcpy(assoc, template[i].assoc, template[i].alen); | 379 | memcpy(assoc, template[i].assoc, template[i].alen); |
319 | if (template[i].iv) | 380 | if (template[i].iv) |
@@ -363,6 +424,16 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
363 | 424 | ||
364 | switch (ret) { | 425 | switch (ret) { |
365 | case 0: | 426 | case 0: |
427 | if (template[i].novrfy) { | ||
428 | /* verification was supposed to fail */ | ||
429 | printk(KERN_ERR "alg: aead: %s failed " | ||
430 | "on test %d for %s: ret was 0, " | ||
431 | "expected -EBADMSG\n", | ||
432 | e, j, algo); | ||
433 | /* so really, we got a bad message */ | ||
434 | ret = -EBADMSG; | ||
435 | goto out; | ||
436 | } | ||
366 | break; | 437 | break; |
367 | case -EINPROGRESS: | 438 | case -EINPROGRESS: |
368 | case -EBUSY: | 439 | case -EBUSY: |
@@ -372,6 +443,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
372 | INIT_COMPLETION(result.completion); | 443 | INIT_COMPLETION(result.completion); |
373 | break; | 444 | break; |
374 | } | 445 | } |
446 | case -EBADMSG: | ||
447 | if (template[i].novrfy) | ||
448 | /* verification failure was expected */ | ||
449 | continue; | ||
375 | /* fall through */ | 450 | /* fall through */ |
376 | default: | 451 | default: |
377 | printk(KERN_ERR "alg: aead: %s failed on test " | 452 | printk(KERN_ERR "alg: aead: %s failed on test " |
@@ -459,7 +534,11 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
459 | } | 534 | } |
460 | 535 | ||
461 | sg_init_table(asg, template[i].anp); | 536 | sg_init_table(asg, template[i].anp); |
537 | ret = -EINVAL; | ||
462 | for (k = 0, temp = 0; k < template[i].anp; k++) { | 538 | for (k = 0, temp = 0; k < template[i].anp; k++) { |
539 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
540 | template[i].atap[k] > PAGE_SIZE)) | ||
541 | goto out; | ||
463 | sg_set_buf(&asg[k], | 542 | sg_set_buf(&asg[k], |
464 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + | 543 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + |
465 | offset_in_page(IDX[k]), | 544 | offset_in_page(IDX[k]), |
@@ -481,6 +560,16 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
481 | 560 | ||
482 | switch (ret) { | 561 | switch (ret) { |
483 | case 0: | 562 | case 0: |
563 | if (template[i].novrfy) { | ||
564 | /* verification was supposed to fail */ | ||
565 | printk(KERN_ERR "alg: aead: %s failed " | ||
566 | "on chunk test %d for %s: ret " | ||
567 | "was 0, expected -EBADMSG\n", | ||
568 | e, j, algo); | ||
569 | /* so really, we got a bad message */ | ||
570 | ret = -EBADMSG; | ||
571 | goto out; | ||
572 | } | ||
484 | break; | 573 | break; |
485 | case -EINPROGRESS: | 574 | case -EINPROGRESS: |
486 | case -EBUSY: | 575 | case -EBUSY: |
@@ -490,6 +579,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
490 | INIT_COMPLETION(result.completion); | 579 | INIT_COMPLETION(result.completion); |
491 | break; | 580 | break; |
492 | } | 581 | } |
582 | case -EBADMSG: | ||
583 | if (template[i].novrfy) | ||
584 | /* verification failure was expected */ | ||
585 | continue; | ||
493 | /* fall through */ | 586 | /* fall through */ |
494 | default: | 587 | default: |
495 | printk(KERN_ERR "alg: aead: %s failed on " | 588 | printk(KERN_ERR "alg: aead: %s failed on " |
@@ -546,6 +639,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, | |||
546 | 639 | ||
547 | out: | 640 | out: |
548 | aead_request_free(req); | 641 | aead_request_free(req); |
642 | testmgr_free_buf(axbuf); | ||
643 | out_noaxbuf: | ||
644 | testmgr_free_buf(xbuf); | ||
645 | out_noxbuf: | ||
549 | return ret; | 646 | return ret; |
550 | } | 647 | } |
551 | 648 | ||
@@ -554,10 +651,14 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, | |||
554 | { | 651 | { |
555 | const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm)); | 652 | const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm)); |
556 | unsigned int i, j, k; | 653 | unsigned int i, j, k; |
557 | int ret; | ||
558 | char *q; | 654 | char *q; |
559 | const char *e; | 655 | const char *e; |
560 | void *data; | 656 | void *data; |
657 | char *xbuf[XBUFSIZE]; | ||
658 | int ret = -ENOMEM; | ||
659 | |||
660 | if (testmgr_alloc_buf(xbuf)) | ||
661 | goto out_nobuf; | ||
561 | 662 | ||
562 | if (enc == ENCRYPT) | 663 | if (enc == ENCRYPT) |
563 | e = "encryption"; | 664 | e = "encryption"; |
@@ -571,6 +672,10 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, | |||
571 | 672 | ||
572 | j++; | 673 | j++; |
573 | 674 | ||
675 | ret = -EINVAL; | ||
676 | if (WARN_ON(template[i].ilen > PAGE_SIZE)) | ||
677 | goto out; | ||
678 | |||
574 | data = xbuf[0]; | 679 | data = xbuf[0]; |
575 | memcpy(data, template[i].input, template[i].ilen); | 680 | memcpy(data, template[i].input, template[i].ilen); |
576 | 681 | ||
@@ -611,6 +716,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, | |||
611 | ret = 0; | 716 | ret = 0; |
612 | 717 | ||
613 | out: | 718 | out: |
719 | testmgr_free_buf(xbuf); | ||
720 | out_nobuf: | ||
614 | return ret; | 721 | return ret; |
615 | } | 722 | } |
616 | 723 | ||
@@ -620,7 +727,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
620 | const char *algo = | 727 | const char *algo = |
621 | crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm)); | 728 | crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm)); |
622 | unsigned int i, j, k, n, temp; | 729 | unsigned int i, j, k, n, temp; |
623 | int ret; | ||
624 | char *q; | 730 | char *q; |
625 | struct ablkcipher_request *req; | 731 | struct ablkcipher_request *req; |
626 | struct scatterlist sg[8]; | 732 | struct scatterlist sg[8]; |
@@ -628,6 +734,11 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
628 | struct tcrypt_result result; | 734 | struct tcrypt_result result; |
629 | void *data; | 735 | void *data; |
630 | char iv[MAX_IVLEN]; | 736 | char iv[MAX_IVLEN]; |
737 | char *xbuf[XBUFSIZE]; | ||
738 | int ret = -ENOMEM; | ||
739 | |||
740 | if (testmgr_alloc_buf(xbuf)) | ||
741 | goto out_nobuf; | ||
631 | 742 | ||
632 | if (enc == ENCRYPT) | 743 | if (enc == ENCRYPT) |
633 | e = "encryption"; | 744 | e = "encryption"; |
@@ -640,7 +751,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
640 | if (!req) { | 751 | if (!req) { |
641 | printk(KERN_ERR "alg: skcipher: Failed to allocate request " | 752 | printk(KERN_ERR "alg: skcipher: Failed to allocate request " |
642 | "for %s\n", algo); | 753 | "for %s\n", algo); |
643 | ret = -ENOMEM; | ||
644 | goto out; | 754 | goto out; |
645 | } | 755 | } |
646 | 756 | ||
@@ -657,6 +767,10 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
657 | if (!(template[i].np)) { | 767 | if (!(template[i].np)) { |
658 | j++; | 768 | j++; |
659 | 769 | ||
770 | ret = -EINVAL; | ||
771 | if (WARN_ON(template[i].ilen > PAGE_SIZE)) | ||
772 | goto out; | ||
773 | |||
660 | data = xbuf[0]; | 774 | data = xbuf[0]; |
661 | memcpy(data, template[i].input, template[i].ilen); | 775 | memcpy(data, template[i].input, template[i].ilen); |
662 | 776 | ||
@@ -825,6 +939,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
825 | 939 | ||
826 | out: | 940 | out: |
827 | ablkcipher_request_free(req); | 941 | ablkcipher_request_free(req); |
942 | testmgr_free_buf(xbuf); | ||
943 | out_nobuf: | ||
828 | return ret; | 944 | return ret; |
829 | } | 945 | } |
830 | 946 | ||
@@ -837,7 +953,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, | |||
837 | int ret; | 953 | int ret; |
838 | 954 | ||
839 | for (i = 0; i < ctcount; i++) { | 955 | for (i = 0; i < ctcount; i++) { |
840 | int ilen, dlen = COMP_BUF_SIZE; | 956 | int ilen; |
957 | unsigned int dlen = COMP_BUF_SIZE; | ||
841 | 958 | ||
842 | memset(result, 0, sizeof (result)); | 959 | memset(result, 0, sizeof (result)); |
843 | 960 | ||
@@ -869,7 +986,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, | |||
869 | } | 986 | } |
870 | 987 | ||
871 | for (i = 0; i < dtcount; i++) { | 988 | for (i = 0; i < dtcount; i++) { |
872 | int ilen, dlen = COMP_BUF_SIZE; | 989 | int ilen; |
990 | unsigned int dlen = COMP_BUF_SIZE; | ||
873 | 991 | ||
874 | memset(result, 0, sizeof (result)); | 992 | memset(result, 0, sizeof (result)); |
875 | 993 | ||
@@ -914,24 +1032,25 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
914 | const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm)); | 1032 | const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm)); |
915 | unsigned int i; | 1033 | unsigned int i; |
916 | char result[COMP_BUF_SIZE]; | 1034 | char result[COMP_BUF_SIZE]; |
917 | int error; | 1035 | int res; |
918 | 1036 | ||
919 | for (i = 0; i < ctcount; i++) { | 1037 | for (i = 0; i < ctcount; i++) { |
920 | struct comp_request req; | 1038 | struct comp_request req; |
1039 | unsigned int produced = 0; | ||
921 | 1040 | ||
922 | error = crypto_compress_setup(tfm, ctemplate[i].params, | 1041 | res = crypto_compress_setup(tfm, ctemplate[i].params, |
923 | ctemplate[i].paramsize); | 1042 | ctemplate[i].paramsize); |
924 | if (error) { | 1043 | if (res) { |
925 | pr_err("alg: pcomp: compression setup failed on test " | 1044 | pr_err("alg: pcomp: compression setup failed on test " |
926 | "%d for %s: error=%d\n", i + 1, algo, error); | 1045 | "%d for %s: error=%d\n", i + 1, algo, res); |
927 | return error; | 1046 | return res; |
928 | } | 1047 | } |
929 | 1048 | ||
930 | error = crypto_compress_init(tfm); | 1049 | res = crypto_compress_init(tfm); |
931 | if (error) { | 1050 | if (res) { |
932 | pr_err("alg: pcomp: compression init failed on test " | 1051 | pr_err("alg: pcomp: compression init failed on test " |
933 | "%d for %s: error=%d\n", i + 1, algo, error); | 1052 | "%d for %s: error=%d\n", i + 1, algo, res); |
934 | return error; | 1053 | return res; |
935 | } | 1054 | } |
936 | 1055 | ||
937 | memset(result, 0, sizeof(result)); | 1056 | memset(result, 0, sizeof(result)); |
@@ -941,32 +1060,37 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
941 | req.next_out = result; | 1060 | req.next_out = result; |
942 | req.avail_out = ctemplate[i].outlen / 2; | 1061 | req.avail_out = ctemplate[i].outlen / 2; |
943 | 1062 | ||
944 | error = crypto_compress_update(tfm, &req); | 1063 | res = crypto_compress_update(tfm, &req); |
945 | if (error && (error != -EAGAIN || req.avail_in)) { | 1064 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { |
946 | pr_err("alg: pcomp: compression update failed on test " | 1065 | pr_err("alg: pcomp: compression update failed on test " |
947 | "%d for %s: error=%d\n", i + 1, algo, error); | 1066 | "%d for %s: error=%d\n", i + 1, algo, res); |
948 | return error; | 1067 | return res; |
949 | } | 1068 | } |
1069 | if (res > 0) | ||
1070 | produced += res; | ||
950 | 1071 | ||
951 | /* Add remaining input data */ | 1072 | /* Add remaining input data */ |
952 | req.avail_in += (ctemplate[i].inlen + 1) / 2; | 1073 | req.avail_in += (ctemplate[i].inlen + 1) / 2; |
953 | 1074 | ||
954 | error = crypto_compress_update(tfm, &req); | 1075 | res = crypto_compress_update(tfm, &req); |
955 | if (error && (error != -EAGAIN || req.avail_in)) { | 1076 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { |
956 | pr_err("alg: pcomp: compression update failed on test " | 1077 | pr_err("alg: pcomp: compression update failed on test " |
957 | "%d for %s: error=%d\n", i + 1, algo, error); | 1078 | "%d for %s: error=%d\n", i + 1, algo, res); |
958 | return error; | 1079 | return res; |
959 | } | 1080 | } |
1081 | if (res > 0) | ||
1082 | produced += res; | ||
960 | 1083 | ||
961 | /* Provide remaining output space */ | 1084 | /* Provide remaining output space */ |
962 | req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2; | 1085 | req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2; |
963 | 1086 | ||
964 | error = crypto_compress_final(tfm, &req); | 1087 | res = crypto_compress_final(tfm, &req); |
965 | if (error) { | 1088 | if (res < 0) { |
966 | pr_err("alg: pcomp: compression final failed on test " | 1089 | pr_err("alg: pcomp: compression final failed on test " |
967 | "%d for %s: error=%d\n", i + 1, algo, error); | 1090 | "%d for %s: error=%d\n", i + 1, algo, res); |
968 | return error; | 1091 | return res; |
969 | } | 1092 | } |
1093 | produced += res; | ||
970 | 1094 | ||
971 | if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) { | 1095 | if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) { |
972 | pr_err("alg: comp: Compression test %d failed for %s: " | 1096 | pr_err("alg: comp: Compression test %d failed for %s: " |
@@ -976,6 +1100,13 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
976 | return -EINVAL; | 1100 | return -EINVAL; |
977 | } | 1101 | } |
978 | 1102 | ||
1103 | if (produced != ctemplate[i].outlen) { | ||
1104 | pr_err("alg: comp: Compression test %d failed for %s: " | ||
1105 | "returned len = %u (expected %d)\n", i + 1, | ||
1106 | algo, produced, ctemplate[i].outlen); | ||
1107 | return -EINVAL; | ||
1108 | } | ||
1109 | |||
979 | if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) { | 1110 | if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) { |
980 | pr_err("alg: pcomp: Compression test %d failed for " | 1111 | pr_err("alg: pcomp: Compression test %d failed for " |
981 | "%s\n", i + 1, algo); | 1112 | "%s\n", i + 1, algo); |
@@ -986,21 +1117,21 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
986 | 1117 | ||
987 | for (i = 0; i < dtcount; i++) { | 1118 | for (i = 0; i < dtcount; i++) { |
988 | struct comp_request req; | 1119 | struct comp_request req; |
1120 | unsigned int produced = 0; | ||
989 | 1121 | ||
990 | error = crypto_decompress_setup(tfm, dtemplate[i].params, | 1122 | res = crypto_decompress_setup(tfm, dtemplate[i].params, |
991 | dtemplate[i].paramsize); | 1123 | dtemplate[i].paramsize); |
992 | if (error) { | 1124 | if (res) { |
993 | pr_err("alg: pcomp: decompression setup failed on " | 1125 | pr_err("alg: pcomp: decompression setup failed on " |
994 | "test %d for %s: error=%d\n", i + 1, algo, | 1126 | "test %d for %s: error=%d\n", i + 1, algo, res); |
995 | error); | 1127 | return res; |
996 | return error; | ||
997 | } | 1128 | } |
998 | 1129 | ||
999 | error = crypto_decompress_init(tfm); | 1130 | res = crypto_decompress_init(tfm); |
1000 | if (error) { | 1131 | if (res) { |
1001 | pr_err("alg: pcomp: decompression init failed on test " | 1132 | pr_err("alg: pcomp: decompression init failed on test " |
1002 | "%d for %s: error=%d\n", i + 1, algo, error); | 1133 | "%d for %s: error=%d\n", i + 1, algo, res); |
1003 | return error; | 1134 | return res; |
1004 | } | 1135 | } |
1005 | 1136 | ||
1006 | memset(result, 0, sizeof(result)); | 1137 | memset(result, 0, sizeof(result)); |
@@ -1010,35 +1141,38 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
1010 | req.next_out = result; | 1141 | req.next_out = result; |
1011 | req.avail_out = dtemplate[i].outlen / 2; | 1142 | req.avail_out = dtemplate[i].outlen / 2; |
1012 | 1143 | ||
1013 | error = crypto_decompress_update(tfm, &req); | 1144 | res = crypto_decompress_update(tfm, &req); |
1014 | if (error && (error != -EAGAIN || req.avail_in)) { | 1145 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { |
1015 | pr_err("alg: pcomp: decompression update failed on " | 1146 | pr_err("alg: pcomp: decompression update failed on " |
1016 | "test %d for %s: error=%d\n", i + 1, algo, | 1147 | "test %d for %s: error=%d\n", i + 1, algo, res); |
1017 | error); | 1148 | return res; |
1018 | return error; | ||
1019 | } | 1149 | } |
1150 | if (res > 0) | ||
1151 | produced += res; | ||
1020 | 1152 | ||
1021 | /* Add remaining input data */ | 1153 | /* Add remaining input data */ |
1022 | req.avail_in += (dtemplate[i].inlen + 1) / 2; | 1154 | req.avail_in += (dtemplate[i].inlen + 1) / 2; |
1023 | 1155 | ||
1024 | error = crypto_decompress_update(tfm, &req); | 1156 | res = crypto_decompress_update(tfm, &req); |
1025 | if (error && (error != -EAGAIN || req.avail_in)) { | 1157 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { |
1026 | pr_err("alg: pcomp: decompression update failed on " | 1158 | pr_err("alg: pcomp: decompression update failed on " |
1027 | "test %d for %s: error=%d\n", i + 1, algo, | 1159 | "test %d for %s: error=%d\n", i + 1, algo, res); |
1028 | error); | 1160 | return res; |
1029 | return error; | ||
1030 | } | 1161 | } |
1162 | if (res > 0) | ||
1163 | produced += res; | ||
1031 | 1164 | ||
1032 | /* Provide remaining output space */ | 1165 | /* Provide remaining output space */ |
1033 | req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2; | 1166 | req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2; |
1034 | 1167 | ||
1035 | error = crypto_decompress_final(tfm, &req); | 1168 | res = crypto_decompress_final(tfm, &req); |
1036 | if (error && (error != -EAGAIN || req.avail_in)) { | 1169 | if (res < 0 && (res != -EAGAIN || req.avail_in)) { |
1037 | pr_err("alg: pcomp: decompression final failed on " | 1170 | pr_err("alg: pcomp: decompression final failed on " |
1038 | "test %d for %s: error=%d\n", i + 1, algo, | 1171 | "test %d for %s: error=%d\n", i + 1, algo, res); |
1039 | error); | 1172 | return res; |
1040 | return error; | ||
1041 | } | 1173 | } |
1174 | if (res > 0) | ||
1175 | produced += res; | ||
1042 | 1176 | ||
1043 | if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) { | 1177 | if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) { |
1044 | pr_err("alg: comp: Decompression test %d failed for " | 1178 | pr_err("alg: comp: Decompression test %d failed for " |
@@ -1048,6 +1182,13 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
1048 | return -EINVAL; | 1182 | return -EINVAL; |
1049 | } | 1183 | } |
1050 | 1184 | ||
1185 | if (produced != dtemplate[i].outlen) { | ||
1186 | pr_err("alg: comp: Decompression test %d failed for " | ||
1187 | "%s: returned len = %u (expected %d)\n", i + 1, | ||
1188 | algo, produced, dtemplate[i].outlen); | ||
1189 | return -EINVAL; | ||
1190 | } | ||
1191 | |||
1051 | if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) { | 1192 | if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) { |
1052 | pr_err("alg: pcomp: Decompression test %d failed for " | 1193 | pr_err("alg: pcomp: Decompression test %d failed for " |
1053 | "%s\n", i + 1, algo); | 1194 | "%s\n", i + 1, algo); |
@@ -1059,6 +1200,68 @@ static int test_pcomp(struct crypto_pcomp *tfm, | |||
1059 | return 0; | 1200 | return 0; |
1060 | } | 1201 | } |
1061 | 1202 | ||
1203 | |||
1204 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | ||
1205 | unsigned int tcount) | ||
1206 | { | ||
1207 | const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); | ||
1208 | int err, i, j, seedsize; | ||
1209 | u8 *seed; | ||
1210 | char result[32]; | ||
1211 | |||
1212 | seedsize = crypto_rng_seedsize(tfm); | ||
1213 | |||
1214 | seed = kmalloc(seedsize, GFP_KERNEL); | ||
1215 | if (!seed) { | ||
1216 | printk(KERN_ERR "alg: cprng: Failed to allocate seed space " | ||
1217 | "for %s\n", algo); | ||
1218 | return -ENOMEM; | ||
1219 | } | ||
1220 | |||
1221 | for (i = 0; i < tcount; i++) { | ||
1222 | memset(result, 0, 32); | ||
1223 | |||
1224 | memcpy(seed, template[i].v, template[i].vlen); | ||
1225 | memcpy(seed + template[i].vlen, template[i].key, | ||
1226 | template[i].klen); | ||
1227 | memcpy(seed + template[i].vlen + template[i].klen, | ||
1228 | template[i].dt, template[i].dtlen); | ||
1229 | |||
1230 | err = crypto_rng_reset(tfm, seed, seedsize); | ||
1231 | if (err) { | ||
1232 | printk(KERN_ERR "alg: cprng: Failed to reset rng " | ||
1233 | "for %s\n", algo); | ||
1234 | goto out; | ||
1235 | } | ||
1236 | |||
1237 | for (j = 0; j < template[i].loops; j++) { | ||
1238 | err = crypto_rng_get_bytes(tfm, result, | ||
1239 | template[i].rlen); | ||
1240 | if (err != template[i].rlen) { | ||
1241 | printk(KERN_ERR "alg: cprng: Failed to obtain " | ||
1242 | "the correct amount of random data for " | ||
1243 | "%s (requested %d, got %d)\n", algo, | ||
1244 | template[i].rlen, err); | ||
1245 | goto out; | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | err = memcmp(result, template[i].result, | ||
1250 | template[i].rlen); | ||
1251 | if (err) { | ||
1252 | printk(KERN_ERR "alg: cprng: Test %d failed for %s\n", | ||
1253 | i, algo); | ||
1254 | hexdump(result, template[i].rlen); | ||
1255 | err = -EINVAL; | ||
1256 | goto out; | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | out: | ||
1261 | kfree(seed); | ||
1262 | return err; | ||
1263 | } | ||
1264 | |||
1062 | static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, | 1265 | static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, |
1063 | u32 type, u32 mask) | 1266 | u32 type, u32 mask) |
1064 | { | 1267 | { |
@@ -1258,11 +1461,42 @@ out: | |||
1258 | return err; | 1461 | return err; |
1259 | } | 1462 | } |
1260 | 1463 | ||
1464 | static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, | ||
1465 | u32 type, u32 mask) | ||
1466 | { | ||
1467 | struct crypto_rng *rng; | ||
1468 | int err; | ||
1469 | |||
1470 | rng = crypto_alloc_rng(driver, type, mask); | ||
1471 | if (IS_ERR(rng)) { | ||
1472 | printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " | ||
1473 | "%ld\n", driver, PTR_ERR(rng)); | ||
1474 | return PTR_ERR(rng); | ||
1475 | } | ||
1476 | |||
1477 | err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count); | ||
1478 | |||
1479 | crypto_free_rng(rng); | ||
1480 | |||
1481 | return err; | ||
1482 | } | ||
1483 | |||
1261 | /* Please keep this list sorted by algorithm name. */ | 1484 | /* Please keep this list sorted by algorithm name. */ |
1262 | static const struct alg_test_desc alg_test_descs[] = { | 1485 | static const struct alg_test_desc alg_test_descs[] = { |
1263 | { | 1486 | { |
1487 | .alg = "ansi_cprng", | ||
1488 | .test = alg_test_cprng, | ||
1489 | .fips_allowed = 1, | ||
1490 | .suite = { | ||
1491 | .cprng = { | ||
1492 | .vecs = ansi_cprng_aes_tv_template, | ||
1493 | .count = ANSI_CPRNG_AES_TEST_VECTORS | ||
1494 | } | ||
1495 | } | ||
1496 | }, { | ||
1264 | .alg = "cbc(aes)", | 1497 | .alg = "cbc(aes)", |
1265 | .test = alg_test_skcipher, | 1498 | .test = alg_test_skcipher, |
1499 | .fips_allowed = 1, | ||
1266 | .suite = { | 1500 | .suite = { |
1267 | .cipher = { | 1501 | .cipher = { |
1268 | .enc = { | 1502 | .enc = { |
@@ -1338,6 +1572,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1338 | }, { | 1572 | }, { |
1339 | .alg = "cbc(des3_ede)", | 1573 | .alg = "cbc(des3_ede)", |
1340 | .test = alg_test_skcipher, | 1574 | .test = alg_test_skcipher, |
1575 | .fips_allowed = 1, | ||
1341 | .suite = { | 1576 | .suite = { |
1342 | .cipher = { | 1577 | .cipher = { |
1343 | .enc = { | 1578 | .enc = { |
@@ -1368,6 +1603,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1368 | }, { | 1603 | }, { |
1369 | .alg = "ccm(aes)", | 1604 | .alg = "ccm(aes)", |
1370 | .test = alg_test_aead, | 1605 | .test = alg_test_aead, |
1606 | .fips_allowed = 1, | ||
1371 | .suite = { | 1607 | .suite = { |
1372 | .aead = { | 1608 | .aead = { |
1373 | .enc = { | 1609 | .enc = { |
@@ -1383,6 +1619,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1383 | }, { | 1619 | }, { |
1384 | .alg = "crc32c", | 1620 | .alg = "crc32c", |
1385 | .test = alg_test_crc32c, | 1621 | .test = alg_test_crc32c, |
1622 | .fips_allowed = 1, | ||
1386 | .suite = { | 1623 | .suite = { |
1387 | .hash = { | 1624 | .hash = { |
1388 | .vecs = crc32c_tv_template, | 1625 | .vecs = crc32c_tv_template, |
@@ -1390,6 +1627,22 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1390 | } | 1627 | } |
1391 | } | 1628 | } |
1392 | }, { | 1629 | }, { |
1630 | .alg = "ctr(aes)", | ||
1631 | .test = alg_test_skcipher, | ||
1632 | .fips_allowed = 1, | ||
1633 | .suite = { | ||
1634 | .cipher = { | ||
1635 | .enc = { | ||
1636 | .vecs = aes_ctr_enc_tv_template, | ||
1637 | .count = AES_CTR_ENC_TEST_VECTORS | ||
1638 | }, | ||
1639 | .dec = { | ||
1640 | .vecs = aes_ctr_dec_tv_template, | ||
1641 | .count = AES_CTR_DEC_TEST_VECTORS | ||
1642 | } | ||
1643 | } | ||
1644 | } | ||
1645 | }, { | ||
1393 | .alg = "cts(cbc(aes))", | 1646 | .alg = "cts(cbc(aes))", |
1394 | .test = alg_test_skcipher, | 1647 | .test = alg_test_skcipher, |
1395 | .suite = { | 1648 | .suite = { |
@@ -1422,6 +1675,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1422 | }, { | 1675 | }, { |
1423 | .alg = "ecb(aes)", | 1676 | .alg = "ecb(aes)", |
1424 | .test = alg_test_skcipher, | 1677 | .test = alg_test_skcipher, |
1678 | .fips_allowed = 1, | ||
1425 | .suite = { | 1679 | .suite = { |
1426 | .cipher = { | 1680 | .cipher = { |
1427 | .enc = { | 1681 | .enc = { |
@@ -1527,6 +1781,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1527 | }, { | 1781 | }, { |
1528 | .alg = "ecb(des)", | 1782 | .alg = "ecb(des)", |
1529 | .test = alg_test_skcipher, | 1783 | .test = alg_test_skcipher, |
1784 | .fips_allowed = 1, | ||
1530 | .suite = { | 1785 | .suite = { |
1531 | .cipher = { | 1786 | .cipher = { |
1532 | .enc = { | 1787 | .enc = { |
@@ -1542,6 +1797,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1542 | }, { | 1797 | }, { |
1543 | .alg = "ecb(des3_ede)", | 1798 | .alg = "ecb(des3_ede)", |
1544 | .test = alg_test_skcipher, | 1799 | .test = alg_test_skcipher, |
1800 | .fips_allowed = 1, | ||
1545 | .suite = { | 1801 | .suite = { |
1546 | .cipher = { | 1802 | .cipher = { |
1547 | .enc = { | 1803 | .enc = { |
@@ -1677,6 +1933,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1677 | }, { | 1933 | }, { |
1678 | .alg = "gcm(aes)", | 1934 | .alg = "gcm(aes)", |
1679 | .test = alg_test_aead, | 1935 | .test = alg_test_aead, |
1936 | .fips_allowed = 1, | ||
1680 | .suite = { | 1937 | .suite = { |
1681 | .aead = { | 1938 | .aead = { |
1682 | .enc = { | 1939 | .enc = { |
@@ -1719,6 +1976,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1719 | }, { | 1976 | }, { |
1720 | .alg = "hmac(sha1)", | 1977 | .alg = "hmac(sha1)", |
1721 | .test = alg_test_hash, | 1978 | .test = alg_test_hash, |
1979 | .fips_allowed = 1, | ||
1722 | .suite = { | 1980 | .suite = { |
1723 | .hash = { | 1981 | .hash = { |
1724 | .vecs = hmac_sha1_tv_template, | 1982 | .vecs = hmac_sha1_tv_template, |
@@ -1728,6 +1986,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1728 | }, { | 1986 | }, { |
1729 | .alg = "hmac(sha224)", | 1987 | .alg = "hmac(sha224)", |
1730 | .test = alg_test_hash, | 1988 | .test = alg_test_hash, |
1989 | .fips_allowed = 1, | ||
1731 | .suite = { | 1990 | .suite = { |
1732 | .hash = { | 1991 | .hash = { |
1733 | .vecs = hmac_sha224_tv_template, | 1992 | .vecs = hmac_sha224_tv_template, |
@@ -1737,6 +1996,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1737 | }, { | 1996 | }, { |
1738 | .alg = "hmac(sha256)", | 1997 | .alg = "hmac(sha256)", |
1739 | .test = alg_test_hash, | 1998 | .test = alg_test_hash, |
1999 | .fips_allowed = 1, | ||
1740 | .suite = { | 2000 | .suite = { |
1741 | .hash = { | 2001 | .hash = { |
1742 | .vecs = hmac_sha256_tv_template, | 2002 | .vecs = hmac_sha256_tv_template, |
@@ -1746,6 +2006,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1746 | }, { | 2006 | }, { |
1747 | .alg = "hmac(sha384)", | 2007 | .alg = "hmac(sha384)", |
1748 | .test = alg_test_hash, | 2008 | .test = alg_test_hash, |
2009 | .fips_allowed = 1, | ||
1749 | .suite = { | 2010 | .suite = { |
1750 | .hash = { | 2011 | .hash = { |
1751 | .vecs = hmac_sha384_tv_template, | 2012 | .vecs = hmac_sha384_tv_template, |
@@ -1755,6 +2016,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1755 | }, { | 2016 | }, { |
1756 | .alg = "hmac(sha512)", | 2017 | .alg = "hmac(sha512)", |
1757 | .test = alg_test_hash, | 2018 | .test = alg_test_hash, |
2019 | .fips_allowed = 1, | ||
1758 | .suite = { | 2020 | .suite = { |
1759 | .hash = { | 2021 | .hash = { |
1760 | .vecs = hmac_sha512_tv_template, | 2022 | .vecs = hmac_sha512_tv_template, |
@@ -1836,15 +2098,32 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1836 | }, { | 2098 | }, { |
1837 | .alg = "rfc3686(ctr(aes))", | 2099 | .alg = "rfc3686(ctr(aes))", |
1838 | .test = alg_test_skcipher, | 2100 | .test = alg_test_skcipher, |
2101 | .fips_allowed = 1, | ||
1839 | .suite = { | 2102 | .suite = { |
1840 | .cipher = { | 2103 | .cipher = { |
1841 | .enc = { | 2104 | .enc = { |
1842 | .vecs = aes_ctr_enc_tv_template, | 2105 | .vecs = aes_ctr_rfc3686_enc_tv_template, |
1843 | .count = AES_CTR_ENC_TEST_VECTORS | 2106 | .count = AES_CTR_3686_ENC_TEST_VECTORS |
1844 | }, | 2107 | }, |
1845 | .dec = { | 2108 | .dec = { |
1846 | .vecs = aes_ctr_dec_tv_template, | 2109 | .vecs = aes_ctr_rfc3686_dec_tv_template, |
1847 | .count = AES_CTR_DEC_TEST_VECTORS | 2110 | .count = AES_CTR_3686_DEC_TEST_VECTORS |
2111 | } | ||
2112 | } | ||
2113 | } | ||
2114 | }, { | ||
2115 | .alg = "rfc4309(ccm(aes))", | ||
2116 | .test = alg_test_aead, | ||
2117 | .fips_allowed = 1, | ||
2118 | .suite = { | ||
2119 | .aead = { | ||
2120 | .enc = { | ||
2121 | .vecs = aes_ccm_rfc4309_enc_tv_template, | ||
2122 | .count = AES_CCM_4309_ENC_TEST_VECTORS | ||
2123 | }, | ||
2124 | .dec = { | ||
2125 | .vecs = aes_ccm_rfc4309_dec_tv_template, | ||
2126 | .count = AES_CCM_4309_DEC_TEST_VECTORS | ||
1848 | } | 2127 | } |
1849 | } | 2128 | } |
1850 | } | 2129 | } |
@@ -1898,6 +2177,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1898 | }, { | 2177 | }, { |
1899 | .alg = "sha1", | 2178 | .alg = "sha1", |
1900 | .test = alg_test_hash, | 2179 | .test = alg_test_hash, |
2180 | .fips_allowed = 1, | ||
1901 | .suite = { | 2181 | .suite = { |
1902 | .hash = { | 2182 | .hash = { |
1903 | .vecs = sha1_tv_template, | 2183 | .vecs = sha1_tv_template, |
@@ -1907,6 +2187,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1907 | }, { | 2187 | }, { |
1908 | .alg = "sha224", | 2188 | .alg = "sha224", |
1909 | .test = alg_test_hash, | 2189 | .test = alg_test_hash, |
2190 | .fips_allowed = 1, | ||
1910 | .suite = { | 2191 | .suite = { |
1911 | .hash = { | 2192 | .hash = { |
1912 | .vecs = sha224_tv_template, | 2193 | .vecs = sha224_tv_template, |
@@ -1916,6 +2197,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1916 | }, { | 2197 | }, { |
1917 | .alg = "sha256", | 2198 | .alg = "sha256", |
1918 | .test = alg_test_hash, | 2199 | .test = alg_test_hash, |
2200 | .fips_allowed = 1, | ||
1919 | .suite = { | 2201 | .suite = { |
1920 | .hash = { | 2202 | .hash = { |
1921 | .vecs = sha256_tv_template, | 2203 | .vecs = sha256_tv_template, |
@@ -1925,6 +2207,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1925 | }, { | 2207 | }, { |
1926 | .alg = "sha384", | 2208 | .alg = "sha384", |
1927 | .test = alg_test_hash, | 2209 | .test = alg_test_hash, |
2210 | .fips_allowed = 1, | ||
1928 | .suite = { | 2211 | .suite = { |
1929 | .hash = { | 2212 | .hash = { |
1930 | .vecs = sha384_tv_template, | 2213 | .vecs = sha384_tv_template, |
@@ -1934,6 +2217,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1934 | }, { | 2217 | }, { |
1935 | .alg = "sha512", | 2218 | .alg = "sha512", |
1936 | .test = alg_test_hash, | 2219 | .test = alg_test_hash, |
2220 | .fips_allowed = 1, | ||
1937 | .suite = { | 2221 | .suite = { |
1938 | .hash = { | 2222 | .hash = { |
1939 | .vecs = sha512_tv_template, | 2223 | .vecs = sha512_tv_template, |
@@ -2077,60 +2361,36 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
2077 | if (i < 0) | 2361 | if (i < 0) |
2078 | goto notest; | 2362 | goto notest; |
2079 | 2363 | ||
2080 | return alg_test_cipher(alg_test_descs + i, driver, type, mask); | 2364 | if (fips_enabled && !alg_test_descs[i].fips_allowed) |
2365 | goto non_fips_alg; | ||
2366 | |||
2367 | rc = alg_test_cipher(alg_test_descs + i, driver, type, mask); | ||
2368 | goto test_done; | ||
2081 | } | 2369 | } |
2082 | 2370 | ||
2083 | i = alg_find_test(alg); | 2371 | i = alg_find_test(alg); |
2084 | if (i < 0) | 2372 | if (i < 0) |
2085 | goto notest; | 2373 | goto notest; |
2086 | 2374 | ||
2375 | if (fips_enabled && !alg_test_descs[i].fips_allowed) | ||
2376 | goto non_fips_alg; | ||
2377 | |||
2087 | rc = alg_test_descs[i].test(alg_test_descs + i, driver, | 2378 | rc = alg_test_descs[i].test(alg_test_descs + i, driver, |
2088 | type, mask); | 2379 | type, mask); |
2380 | test_done: | ||
2089 | if (fips_enabled && rc) | 2381 | if (fips_enabled && rc) |
2090 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); | 2382 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); |
2091 | 2383 | ||
2384 | if (fips_enabled && !rc) | ||
2385 | printk(KERN_INFO "alg: self-tests for %s (%s) passed\n", | ||
2386 | driver, alg); | ||
2387 | |||
2092 | return rc; | 2388 | return rc; |
2093 | 2389 | ||
2094 | notest: | 2390 | notest: |
2095 | printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); | 2391 | printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); |
2096 | return 0; | 2392 | return 0; |
2393 | non_fips_alg: | ||
2394 | return -EINVAL; | ||
2097 | } | 2395 | } |
2098 | EXPORT_SYMBOL_GPL(alg_test); | 2396 | EXPORT_SYMBOL_GPL(alg_test); |
2099 | |||
2100 | int __init testmgr_init(void) | ||
2101 | { | ||
2102 | int i; | ||
2103 | |||
2104 | for (i = 0; i < XBUFSIZE; i++) { | ||
2105 | xbuf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
2106 | if (!xbuf[i]) | ||
2107 | goto err_free_xbuf; | ||
2108 | } | ||
2109 | |||
2110 | for (i = 0; i < XBUFSIZE; i++) { | ||
2111 | axbuf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
2112 | if (!axbuf[i]) | ||
2113 | goto err_free_axbuf; | ||
2114 | } | ||
2115 | |||
2116 | return 0; | ||
2117 | |||
2118 | err_free_axbuf: | ||
2119 | for (i = 0; i < XBUFSIZE && axbuf[i]; i++) | ||
2120 | free_page((unsigned long)axbuf[i]); | ||
2121 | err_free_xbuf: | ||
2122 | for (i = 0; i < XBUFSIZE && xbuf[i]; i++) | ||
2123 | free_page((unsigned long)xbuf[i]); | ||
2124 | |||
2125 | return -ENOMEM; | ||
2126 | } | ||
2127 | |||
2128 | void testmgr_exit(void) | ||
2129 | { | ||
2130 | int i; | ||
2131 | |||
2132 | for (i = 0; i < XBUFSIZE; i++) | ||
2133 | free_page((unsigned long)axbuf[i]); | ||
2134 | for (i = 0; i < XBUFSIZE; i++) | ||
2135 | free_page((unsigned long)xbuf[i]); | ||
2136 | } | ||
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 526f00a9c72f..69316228fc19 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -62,6 +62,7 @@ struct aead_testvec { | |||
62 | int np; | 62 | int np; |
63 | int anp; | 63 | int anp; |
64 | unsigned char fail; | 64 | unsigned char fail; |
65 | unsigned char novrfy; /* ccm dec verification failure expected */ | ||
65 | unsigned char wk; /* weak key flag */ | 66 | unsigned char wk; /* weak key flag */ |
66 | unsigned char klen; | 67 | unsigned char klen; |
67 | unsigned short ilen; | 68 | unsigned short ilen; |
@@ -69,6 +70,18 @@ struct aead_testvec { | |||
69 | unsigned short rlen; | 70 | unsigned short rlen; |
70 | }; | 71 | }; |
71 | 72 | ||
73 | struct cprng_testvec { | ||
74 | char *key; | ||
75 | char *dt; | ||
76 | char *v; | ||
77 | char *result; | ||
78 | unsigned char klen; | ||
79 | unsigned short dtlen; | ||
80 | unsigned short vlen; | ||
81 | unsigned short rlen; | ||
82 | unsigned short loops; | ||
83 | }; | ||
84 | |||
72 | static char zeroed_string[48]; | 85 | static char zeroed_string[48]; |
73 | 86 | ||
74 | /* | 87 | /* |
@@ -2841,12 +2854,16 @@ static struct cipher_testvec cast6_dec_tv_template[] = { | |||
2841 | #define AES_LRW_DEC_TEST_VECTORS 8 | 2854 | #define AES_LRW_DEC_TEST_VECTORS 8 |
2842 | #define AES_XTS_ENC_TEST_VECTORS 4 | 2855 | #define AES_XTS_ENC_TEST_VECTORS 4 |
2843 | #define AES_XTS_DEC_TEST_VECTORS 4 | 2856 | #define AES_XTS_DEC_TEST_VECTORS 4 |
2844 | #define AES_CTR_ENC_TEST_VECTORS 7 | 2857 | #define AES_CTR_ENC_TEST_VECTORS 3 |
2845 | #define AES_CTR_DEC_TEST_VECTORS 6 | 2858 | #define AES_CTR_DEC_TEST_VECTORS 3 |
2859 | #define AES_CTR_3686_ENC_TEST_VECTORS 7 | ||
2860 | #define AES_CTR_3686_DEC_TEST_VECTORS 6 | ||
2846 | #define AES_GCM_ENC_TEST_VECTORS 9 | 2861 | #define AES_GCM_ENC_TEST_VECTORS 9 |
2847 | #define AES_GCM_DEC_TEST_VECTORS 8 | 2862 | #define AES_GCM_DEC_TEST_VECTORS 8 |
2848 | #define AES_CCM_ENC_TEST_VECTORS 7 | 2863 | #define AES_CCM_ENC_TEST_VECTORS 7 |
2849 | #define AES_CCM_DEC_TEST_VECTORS 7 | 2864 | #define AES_CCM_DEC_TEST_VECTORS 7 |
2865 | #define AES_CCM_4309_ENC_TEST_VECTORS 7 | ||
2866 | #define AES_CCM_4309_DEC_TEST_VECTORS 10 | ||
2850 | 2867 | ||
2851 | static struct cipher_testvec aes_enc_tv_template[] = { | 2868 | static struct cipher_testvec aes_enc_tv_template[] = { |
2852 | { /* From FIPS-197 */ | 2869 | { /* From FIPS-197 */ |
@@ -3983,6 +4000,164 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { | |||
3983 | 4000 | ||
3984 | 4001 | ||
3985 | static struct cipher_testvec aes_ctr_enc_tv_template[] = { | 4002 | static struct cipher_testvec aes_ctr_enc_tv_template[] = { |
4003 | { /* From NIST Special Publication 800-38A, Appendix F.5 */ | ||
4004 | .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" | ||
4005 | "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", | ||
4006 | .klen = 16, | ||
4007 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4008 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4009 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4010 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4011 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4012 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4013 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4014 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4015 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4016 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4017 | .ilen = 64, | ||
4018 | .result = "\x87\x4d\x61\x91\xb6\x20\xe3\x26" | ||
4019 | "\x1b\xef\x68\x64\x99\x0d\xb6\xce" | ||
4020 | "\x98\x06\xf6\x6b\x79\x70\xfd\xff" | ||
4021 | "\x86\x17\x18\x7b\xb9\xff\xfd\xff" | ||
4022 | "\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e" | ||
4023 | "\x5b\x4f\x09\x02\x0d\xb0\x3e\xab" | ||
4024 | "\x1e\x03\x1d\xda\x2f\xbe\x03\xd1" | ||
4025 | "\x79\x21\x70\xa0\xf3\x00\x9c\xee", | ||
4026 | .rlen = 64, | ||
4027 | }, { | ||
4028 | .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" | ||
4029 | "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" | ||
4030 | "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", | ||
4031 | .klen = 24, | ||
4032 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4033 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4034 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4035 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4036 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4037 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4038 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4039 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4040 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4041 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4042 | .ilen = 64, | ||
4043 | .result = "\x1a\xbc\x93\x24\x17\x52\x1c\xa2" | ||
4044 | "\x4f\x2b\x04\x59\xfe\x7e\x6e\x0b" | ||
4045 | "\x09\x03\x39\xec\x0a\xa6\xfa\xef" | ||
4046 | "\xd5\xcc\xc2\xc6\xf4\xce\x8e\x94" | ||
4047 | "\x1e\x36\xb2\x6b\xd1\xeb\xc6\x70" | ||
4048 | "\xd1\xbd\x1d\x66\x56\x20\xab\xf7" | ||
4049 | "\x4f\x78\xa7\xf6\xd2\x98\x09\x58" | ||
4050 | "\x5a\x97\xda\xec\x58\xc6\xb0\x50", | ||
4051 | .rlen = 64, | ||
4052 | }, { | ||
4053 | .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" | ||
4054 | "\x2b\x73\xae\xf0\x85\x7d\x77\x81" | ||
4055 | "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" | ||
4056 | "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", | ||
4057 | .klen = 32, | ||
4058 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4059 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4060 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4061 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4062 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4063 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4064 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4065 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4066 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4067 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4068 | .ilen = 64, | ||
4069 | .result = "\x60\x1e\xc3\x13\x77\x57\x89\xa5" | ||
4070 | "\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28" | ||
4071 | "\xf4\x43\xe3\xca\x4d\x62\xb5\x9a" | ||
4072 | "\xca\x84\xe9\x90\xca\xca\xf5\xc5" | ||
4073 | "\x2b\x09\x30\xda\xa2\x3d\xe9\x4c" | ||
4074 | "\xe8\x70\x17\xba\x2d\x84\x98\x8d" | ||
4075 | "\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6" | ||
4076 | "\x13\xc2\xdd\x08\x45\x79\x41\xa6", | ||
4077 | .rlen = 64, | ||
4078 | } | ||
4079 | }; | ||
4080 | |||
4081 | static struct cipher_testvec aes_ctr_dec_tv_template[] = { | ||
4082 | { /* From NIST Special Publication 800-38A, Appendix F.5 */ | ||
4083 | .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" | ||
4084 | "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", | ||
4085 | .klen = 16, | ||
4086 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4087 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4088 | .input = "\x87\x4d\x61\x91\xb6\x20\xe3\x26" | ||
4089 | "\x1b\xef\x68\x64\x99\x0d\xb6\xce" | ||
4090 | "\x98\x06\xf6\x6b\x79\x70\xfd\xff" | ||
4091 | "\x86\x17\x18\x7b\xb9\xff\xfd\xff" | ||
4092 | "\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e" | ||
4093 | "\x5b\x4f\x09\x02\x0d\xb0\x3e\xab" | ||
4094 | "\x1e\x03\x1d\xda\x2f\xbe\x03\xd1" | ||
4095 | "\x79\x21\x70\xa0\xf3\x00\x9c\xee", | ||
4096 | .ilen = 64, | ||
4097 | .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4098 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4099 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4100 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4101 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4102 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4103 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4104 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4105 | .rlen = 64, | ||
4106 | }, { | ||
4107 | .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" | ||
4108 | "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" | ||
4109 | "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", | ||
4110 | .klen = 24, | ||
4111 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4112 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4113 | .input = "\x1a\xbc\x93\x24\x17\x52\x1c\xa2" | ||
4114 | "\x4f\x2b\x04\x59\xfe\x7e\x6e\x0b" | ||
4115 | "\x09\x03\x39\xec\x0a\xa6\xfa\xef" | ||
4116 | "\xd5\xcc\xc2\xc6\xf4\xce\x8e\x94" | ||
4117 | "\x1e\x36\xb2\x6b\xd1\xeb\xc6\x70" | ||
4118 | "\xd1\xbd\x1d\x66\x56\x20\xab\xf7" | ||
4119 | "\x4f\x78\xa7\xf6\xd2\x98\x09\x58" | ||
4120 | "\x5a\x97\xda\xec\x58\xc6\xb0\x50", | ||
4121 | .ilen = 64, | ||
4122 | .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4123 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4124 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4125 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4126 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4127 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4128 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4129 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4130 | .rlen = 64, | ||
4131 | }, { | ||
4132 | .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" | ||
4133 | "\x2b\x73\xae\xf0\x85\x7d\x77\x81" | ||
4134 | "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" | ||
4135 | "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", | ||
4136 | .klen = 32, | ||
4137 | .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4138 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4139 | .input = "\x60\x1e\xc3\x13\x77\x57\x89\xa5" | ||
4140 | "\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28" | ||
4141 | "\xf4\x43\xe3\xca\x4d\x62\xb5\x9a" | ||
4142 | "\xca\x84\xe9\x90\xca\xca\xf5\xc5" | ||
4143 | "\x2b\x09\x30\xda\xa2\x3d\xe9\x4c" | ||
4144 | "\xe8\x70\x17\xba\x2d\x84\x98\x8d" | ||
4145 | "\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6" | ||
4146 | "\x13\xc2\xdd\x08\x45\x79\x41\xa6", | ||
4147 | .ilen = 64, | ||
4148 | .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
4149 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
4150 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
4151 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
4152 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
4153 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
4154 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
4155 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
4156 | .rlen = 64, | ||
4157 | } | ||
4158 | }; | ||
4159 | |||
4160 | static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = { | ||
3986 | { /* From RFC 3686 */ | 4161 | { /* From RFC 3686 */ |
3987 | .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" | 4162 | .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" |
3988 | "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" | 4163 | "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" |
@@ -5114,7 +5289,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = { | |||
5114 | }, | 5289 | }, |
5115 | }; | 5290 | }; |
5116 | 5291 | ||
5117 | static struct cipher_testvec aes_ctr_dec_tv_template[] = { | 5292 | static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { |
5118 | { /* From RFC 3686 */ | 5293 | { /* From RFC 3686 */ |
5119 | .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" | 5294 | .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" |
5120 | "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" | 5295 | "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" |
@@ -5825,6 +6000,470 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = { | |||
5825 | }, | 6000 | }, |
5826 | }; | 6001 | }; |
5827 | 6002 | ||
6003 | /* | ||
6004 | * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all | ||
6005 | * use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of | ||
6006 | * Special Publication 800-38C's test vectors also use nonce lengths our | ||
6007 | * implementation doesn't support. The following are taken from fips cavs | ||
6008 | * fax files on hand at Red Hat. | ||
6009 | * | ||
6010 | * nb: actual key lengths are (klen - 3), the last 3 bytes are actually | ||
6011 | * part of the nonce which combine w/the iv, but need to be input this way. | ||
6012 | */ | ||
6013 | static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { | ||
6014 | { | ||
6015 | .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" | ||
6016 | "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e" | ||
6017 | "\x96\xac\x59", | ||
6018 | .klen = 19, | ||
6019 | .iv = "\x30\x07\xa1\xe2\xa2\xc7\x55\x24", | ||
6020 | .alen = 0, | ||
6021 | .input = "\x19\xc8\x81\xf6\xe9\x86\xff\x93" | ||
6022 | "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" | ||
6023 | "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" | ||
6024 | "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", | ||
6025 | .ilen = 32, | ||
6026 | .result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" | ||
6027 | "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" | ||
6028 | "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" | ||
6029 | "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" | ||
6030 | "\xda\x24\xea\xd9\xa1\x39\x98\xfd" | ||
6031 | "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", | ||
6032 | .rlen = 48, | ||
6033 | }, { | ||
6034 | .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" | ||
6035 | "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3" | ||
6036 | "\x4f\xa3\x19", | ||
6037 | .klen = 19, | ||
6038 | .iv = "\xd3\x01\x5a\xd8\x30\x60\x15\x56", | ||
6039 | .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" | ||
6040 | "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" | ||
6041 | "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" | ||
6042 | "\x0a\x63\x09\x78\xbc\x2c\x55\xde", | ||
6043 | .alen = 32, | ||
6044 | .input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" | ||
6045 | "\xa9\x28\x63\xba\x12\xa3\x14\x85" | ||
6046 | "\x57\x1e\x06\xc9\x7b\x21\xef\x76" | ||
6047 | "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", | ||
6048 | .ilen = 32, | ||
6049 | .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" | ||
6050 | "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" | ||
6051 | "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" | ||
6052 | "\x3b\xb5\xce\x53\xef\xde\xbb\x02" | ||
6053 | "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" | ||
6054 | "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", | ||
6055 | .rlen = 48, | ||
6056 | }, { | ||
6057 | .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" | ||
6058 | "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" | ||
6059 | "\x53\x14\x73\x66\x8d\x88\xf6\x80" | ||
6060 | "\xa0\x20\x35", | ||
6061 | .klen = 27, | ||
6062 | .iv = "\x26\xf2\x21\x8d\x50\x20\xda\xe2", | ||
6063 | .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" | ||
6064 | "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" | ||
6065 | "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" | ||
6066 | "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", | ||
6067 | .alen = 32, | ||
6068 | .ilen = 0, | ||
6069 | .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" | ||
6070 | "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", | ||
6071 | .rlen = 16, | ||
6072 | }, { | ||
6073 | .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" | ||
6074 | "\xef\x7a\xd3\xce\xfc\x84\x60\x62" | ||
6075 | "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01" | ||
6076 | "\xd6\x3c\x8c", | ||
6077 | .klen = 27, | ||
6078 | .iv = "\x86\x84\xb6\xcd\xef\x09\x2e\x94", | ||
6079 | .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" | ||
6080 | "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" | ||
6081 | "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" | ||
6082 | "\xe3\x00\x73\x69\x84\x69\x87\x79", | ||
6083 | .alen = 32, | ||
6084 | .input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" | ||
6085 | "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" | ||
6086 | "\xe0\xe5\x46\x09\x80\x89\x13\xb2" | ||
6087 | "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", | ||
6088 | .ilen = 32, | ||
6089 | .result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" | ||
6090 | "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" | ||
6091 | "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" | ||
6092 | "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" | ||
6093 | "\xc7\x79\x11\x58\xe5\x6b\x20\x40" | ||
6094 | "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", | ||
6095 | .rlen = 48, | ||
6096 | }, { | ||
6097 | .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" | ||
6098 | "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" | ||
6099 | "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" | ||
6100 | "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b" | ||
6101 | "\x1e\x29\x91", | ||
6102 | .klen = 35, | ||
6103 | .iv = "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe", | ||
6104 | .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" | ||
6105 | "\x78\x2b\x94\x02\x29\x0f\x42\x27" | ||
6106 | "\x6b\x75\xcb\x98\x34\x08\x7e\x79" | ||
6107 | "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", | ||
6108 | .alen = 32, | ||
6109 | .input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" | ||
6110 | "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" | ||
6111 | "\xbf\x17\x99\x62\x4a\x39\x27\x1f" | ||
6112 | "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", | ||
6113 | .ilen = 32, | ||
6114 | .result = "\x19\xb8\x61\x33\x45\x2b\x43\x96" | ||
6115 | "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" | ||
6116 | "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" | ||
6117 | "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" | ||
6118 | "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", | ||
6119 | .rlen = 40, | ||
6120 | }, { | ||
6121 | .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" | ||
6122 | "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" | ||
6123 | "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" | ||
6124 | "\x09\x75\x9a\x9b\x3c\x9b\x27\x39" | ||
6125 | "\xf9\xd9\x4e", | ||
6126 | .klen = 35, | ||
6127 | .iv = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50", | ||
6128 | .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" | ||
6129 | "\x13\x02\x01\x0c\x83\x4c\x96\x35" | ||
6130 | "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" | ||
6131 | "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", | ||
6132 | .alen = 32, | ||
6133 | .input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" | ||
6134 | "\x83\x72\x07\x4f\xcf\xfa\x66\x89" | ||
6135 | "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" | ||
6136 | "\x30\xdb\x75\x09\x93\xd4\x65\xe4", | ||
6137 | .ilen = 32, | ||
6138 | .result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" | ||
6139 | "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" | ||
6140 | "\xcc\x63\x44\x25\x07\x78\x4f\x9e" | ||
6141 | "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" | ||
6142 | "\x39\xaf\x39\xac\xd8\x4a\x80\x39" | ||
6143 | "\x7b\x72\x8a\xf7", | ||
6144 | .rlen = 44, | ||
6145 | }, { | ||
6146 | .key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83" | ||
6147 | "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46" | ||
6148 | "\xf9\x8f\xad\xe3\x02\x13\x83\x77" | ||
6149 | "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b" | ||
6150 | "\x24\xa7\x8b", | ||
6151 | .klen = 35, | ||
6152 | .iv = "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5", | ||
6153 | .assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f" | ||
6154 | "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d" | ||
6155 | "\xab\x90\x65\x8d\x8e\xca\x4d\x4f" | ||
6156 | "\x16\x0c\x40\x90\x4b\xc7\x36\x73", | ||
6157 | .alen = 32, | ||
6158 | .input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92" | ||
6159 | "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d" | ||
6160 | "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2" | ||
6161 | "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a", | ||
6162 | .ilen = 32, | ||
6163 | .result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13" | ||
6164 | "\xef\xbb\x80\x21\x04\x6c\x58\x09" | ||
6165 | "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7" | ||
6166 | "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf" | ||
6167 | "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d" | ||
6168 | "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8", | ||
6169 | .rlen = 48, | ||
6170 | }, | ||
6171 | }; | ||
6172 | |||
6173 | static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | ||
6174 | { | ||
6175 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" | ||
6176 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9" | ||
6177 | "\xc6\xfb\x7d", | ||
6178 | .klen = 19, | ||
6179 | .iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8", | ||
6180 | .alen = 0, | ||
6181 | .input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b", | ||
6182 | .ilen = 8, | ||
6183 | .result = "\x00", | ||
6184 | .rlen = 0, | ||
6185 | .novrfy = 1, | ||
6186 | }, { | ||
6187 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" | ||
6188 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9" | ||
6189 | "\xaf\x94\x87", | ||
6190 | .klen = 19, | ||
6191 | .iv = "\x78\x35\x82\x81\x7f\x88\x94\x68", | ||
6192 | .alen = 0, | ||
6193 | .input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3", | ||
6194 | .ilen = 8, | ||
6195 | .result = "\x00", | ||
6196 | .rlen = 0, | ||
6197 | }, { | ||
6198 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" | ||
6199 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8" | ||
6200 | "\xc6\xfb\x7d", | ||
6201 | .klen = 19, | ||
6202 | .iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8", | ||
6203 | .assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f" | ||
6204 | "\x88\x94\x68\xb1\x78\x6b\x2b\xd6" | ||
6205 | "\x04\x1f\x4e\xed\x78\xd5\x33\x66" | ||
6206 | "\xd8\x94\x99\x91\x81\x54\x62\x57", | ||
6207 | .alen = 32, | ||
6208 | .input = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55" | ||
6209 | "\xd0\xd1\x3d\x1a\xa3\x6d\xe4\x0a" | ||
6210 | "\x86\xb0\x87\x6b\x62\x33\x8c\x34" | ||
6211 | "\xce\xab\x57\xcc\x79\x0b\xe0\x6f" | ||
6212 | "\x5c\x3e\x48\x1f\x6c\x46\xf7\x51" | ||
6213 | "\x8b\x84\x83\x2a\xc1\x05\xb8\xc5", | ||
6214 | .ilen = 48, | ||
6215 | .result = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb" | ||
6216 | "\x33\xe4\x73\xce\xd2\xfb\x95\x79" | ||
6217 | "\xe8\xb4\xb5\x77\x11\x10\x62\x6f" | ||
6218 | "\x6a\x82\xd1\x13\xec\xf5\xd0\x48", | ||
6219 | .rlen = 32, | ||
6220 | .novrfy = 1, | ||
6221 | }, { | ||
6222 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" | ||
6223 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8" | ||
6224 | "\x05\xe0\xc9", | ||
6225 | .klen = 19, | ||
6226 | .iv = "\x0f\xed\x34\xea\x97\xd4\x3b\xdf", | ||
6227 | .assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81" | ||
6228 | "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1" | ||
6229 | "\xd8\x5c\x42\x68\xe0\x6c\xda\x89" | ||
6230 | "\x05\xac\x56\xac\x1b\x2a\xd3\x86", | ||
6231 | .alen = 32, | ||
6232 | .input = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c" | ||
6233 | "\xad\x83\x52\x6d\x71\x03\x25\x1c" | ||
6234 | "\xed\x81\x3a\x9a\x16\x7d\x19\x80" | ||
6235 | "\x72\x04\x72\xd0\xf6\xff\x05\x0f" | ||
6236 | "\xb7\x14\x30\x00\x32\x9e\xa0\xa6" | ||
6237 | "\x9e\x5a\x18\xa1\xb8\xfe\xdb\xd3", | ||
6238 | .ilen = 48, | ||
6239 | .result = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60" | ||
6240 | "\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7" | ||
6241 | "\xf2\xae\x61\x05\x8f\x82\x24\x3f" | ||
6242 | "\x9c\x67\x91\xe1\x38\x4f\xe4\x0c", | ||
6243 | .rlen = 32, | ||
6244 | }, { | ||
6245 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" | ||
6246 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" | ||
6247 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6" | ||
6248 | "\xee\x49\x83", | ||
6249 | .klen = 27, | ||
6250 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | ||
6251 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" | ||
6252 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" | ||
6253 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" | ||
6254 | "\x11\xc4\xc6\xdb\x00\x56\x36\x61", | ||
6255 | .alen = 32, | ||
6256 | .input = "\x71\x99\xfa\xf4\x44\x12\x68\x9b", | ||
6257 | .ilen = 8, | ||
6258 | .result = "\x00", | ||
6259 | .rlen = 0, | ||
6260 | }, { | ||
6261 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | ||
6262 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | ||
6263 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | ||
6264 | "\xee\x49\x83", | ||
6265 | .klen = 27, | ||
6266 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | ||
6267 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" | ||
6268 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" | ||
6269 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" | ||
6270 | "\x11\xc4\xc6\xdb\x00\x56\x36\x61", | ||
6271 | .alen = 32, | ||
6272 | .input = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7" | ||
6273 | "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2" | ||
6274 | "\x66\xca\x61\x1e\x96\x7a\x61\xb3" | ||
6275 | "\x1c\x16\x45\x52\xba\x04\x9c\x9f" | ||
6276 | "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1", | ||
6277 | .ilen = 40, | ||
6278 | .result = "\x85\x34\x66\x42\xc8\x92\x0f\x36" | ||
6279 | "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb" | ||
6280 | "\x0a\x85\xcc\x02\xad\x7a\x96\xe9" | ||
6281 | "\x65\x43\xa4\xc3\x0f\xdc\x55\x81", | ||
6282 | .rlen = 32, | ||
6283 | }, { | ||
6284 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | ||
6285 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | ||
6286 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | ||
6287 | "\xd1\xfc\x57", | ||
6288 | .klen = 27, | ||
6289 | .iv = "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f", | ||
6290 | .assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6" | ||
6291 | "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3" | ||
6292 | "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7" | ||
6293 | "\x3c\xa1\x52\x13\x03\x8a\x23\x3a", | ||
6294 | .alen = 32, | ||
6295 | .input = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00" | ||
6296 | "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37" | ||
6297 | "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa" | ||
6298 | "\x64\x19\xc0\x30\xd7\xfc\x14\x6b" | ||
6299 | "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f" | ||
6300 | "\xa9\xb4\x2d\x68\x03\xa3\x44\xef", | ||
6301 | .ilen = 48, | ||
6302 | .result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed" | ||
6303 | "\x99\x2a\xa8\xca\x04\x25\x45\x90" | ||
6304 | "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c" | ||
6305 | "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b", | ||
6306 | .rlen = 32, | ||
6307 | .novrfy = 1, | ||
6308 | }, { | ||
6309 | .key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01" | ||
6310 | "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c" | ||
6311 | "\x20\x2c\xad\x30\xc2\x2b\x41\xfb" | ||
6312 | "\x0e\x85\xbc\x33\xad\x0f\x2b\xff" | ||
6313 | "\xee\x49\x83", | ||
6314 | .klen = 35, | ||
6315 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | ||
6316 | .alen = 0, | ||
6317 | .input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2", | ||
6318 | .ilen = 8, | ||
6319 | .result = "\x00", | ||
6320 | .rlen = 0, | ||
6321 | }, { | ||
6322 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" | ||
6323 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" | ||
6324 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6" | ||
6325 | "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb" | ||
6326 | "\x85\x34\x66", | ||
6327 | .klen = 35, | ||
6328 | .iv = "\x42\xc8\x92\x0f\x36\x58\xe0\x6b", | ||
6329 | .alen = 0, | ||
6330 | .input = "\x48\x01\x5e\x02\x24\x04\x66\x47" | ||
6331 | "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd" | ||
6332 | "\xa5\xa9\x87\x8d\x84\xee\x2e\x77" | ||
6333 | "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6" | ||
6334 | "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07" | ||
6335 | "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a", | ||
6336 | .ilen = 48, | ||
6337 | .result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c" | ||
6338 | "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99" | ||
6339 | "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39" | ||
6340 | "\x89\xd4\x75\x7a\x63\xb1\xda\x93", | ||
6341 | .rlen = 32, | ||
6342 | .novrfy = 1, | ||
6343 | }, { | ||
6344 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | ||
6345 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | ||
6346 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | ||
6347 | "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b" | ||
6348 | "\xcf\x76\x3f", | ||
6349 | .klen = 35, | ||
6350 | .iv = "\xd9\x95\x75\x8f\x44\x89\x40\x7b", | ||
6351 | .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" | ||
6352 | "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" | ||
6353 | "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" | ||
6354 | "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe", | ||
6355 | .alen = 32, | ||
6356 | .input = "\x48\x58\xd6\xf3\xad\x63\x58\xbf" | ||
6357 | "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4" | ||
6358 | "\x78\x5c\x4c\x67\x71\x89\x94\xbf" | ||
6359 | "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5" | ||
6360 | "\x7f\x44\x0a\x0c\x01\x18\x07\x92" | ||
6361 | "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b", | ||
6362 | .ilen = 48, | ||
6363 | .result = "\xc2\x54\xc8\xde\x78\x87\x77\x40" | ||
6364 | "\x49\x71\xe4\xb7\xe7\xcb\x76\x61" | ||
6365 | "\x0a\x41\xb9\xe9\xc0\x76\x54\xab" | ||
6366 | "\x04\x49\x3b\x19\x93\x57\x25\x5d", | ||
6367 | .rlen = 32, | ||
6368 | }, | ||
6369 | }; | ||
6370 | |||
6371 | /* | ||
6372 | * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) | ||
6373 | * test vectors, taken from Appendix B.2.9 and B.2.10: | ||
6374 | * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf | ||
6375 | * Only AES-128 is supported at this time. | ||
6376 | */ | ||
6377 | #define ANSI_CPRNG_AES_TEST_VECTORS 6 | ||
6378 | |||
6379 | static struct cprng_testvec ansi_cprng_aes_tv_template[] = { | ||
6380 | { | ||
6381 | .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" | ||
6382 | "\xed\x06\x1c\xab\xb8\xd4\x62\x02", | ||
6383 | .klen = 16, | ||
6384 | .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" | ||
6385 | "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9", | ||
6386 | .dtlen = 16, | ||
6387 | .v = "\x80\x00\x00\x00\x00\x00\x00\x00" | ||
6388 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
6389 | .vlen = 16, | ||
6390 | .result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55" | ||
6391 | "\x84\x79\x66\x85\xc1\x2f\x76\x41", | ||
6392 | .rlen = 16, | ||
6393 | .loops = 1, | ||
6394 | }, { | ||
6395 | .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" | ||
6396 | "\xed\x06\x1c\xab\xb8\xd4\x62\x02", | ||
6397 | .klen = 16, | ||
6398 | .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" | ||
6399 | "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa", | ||
6400 | .dtlen = 16, | ||
6401 | .v = "\xc0\x00\x00\x00\x00\x00\x00\x00" | ||
6402 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
6403 | .vlen = 16, | ||
6404 | .result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c" | ||
6405 | "\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d", | ||
6406 | .rlen = 16, | ||
6407 | .loops = 1, | ||
6408 | }, { | ||
6409 | .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" | ||
6410 | "\xed\x06\x1c\xab\xb8\xd4\x62\x02", | ||
6411 | .klen = 16, | ||
6412 | .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" | ||
6413 | "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb", | ||
6414 | .dtlen = 16, | ||
6415 | .v = "\xe0\x00\x00\x00\x00\x00\x00\x00" | ||
6416 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
6417 | .vlen = 16, | ||
6418 | .result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5" | ||
6419 | "\x29\x14\x28\x81\xa9\x4d\x4e\xc7", | ||
6420 | .rlen = 16, | ||
6421 | .loops = 1, | ||
6422 | }, { | ||
6423 | .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" | ||
6424 | "\xed\x06\x1c\xab\xb8\xd4\x62\x02", | ||
6425 | .klen = 16, | ||
6426 | .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" | ||
6427 | "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc", | ||
6428 | .dtlen = 16, | ||
6429 | .v = "\xf0\x00\x00\x00\x00\x00\x00\x00" | ||
6430 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
6431 | .vlen = 16, | ||
6432 | .result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5" | ||
6433 | "\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a", | ||
6434 | .rlen = 16, | ||
6435 | .loops = 1, | ||
6436 | }, { | ||
6437 | .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" | ||
6438 | "\xed\x06\x1c\xab\xb8\xd4\x62\x02", | ||
6439 | .klen = 16, | ||
6440 | .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62" | ||
6441 | "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd", | ||
6442 | .dtlen = 16, | ||
6443 | .v = "\xf8\x00\x00\x00\x00\x00\x00\x00" | ||
6444 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
6445 | .vlen = 16, | ||
6446 | .result = "\x05\x25\x92\x46\x61\x79\xd2\xcb" | ||
6447 | "\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8", | ||
6448 | .rlen = 16, | ||
6449 | .loops = 1, | ||
6450 | }, { /* Monte Carlo Test */ | ||
6451 | .key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5" | ||
6452 | "\xd8\x2b\xe8\xc3\x72\x55\xc8\x48", | ||
6453 | .klen = 16, | ||
6454 | .dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b" | ||
6455 | "\x67\xc9\x25\xfa\x70\x1f\x11\xac", | ||
6456 | .dtlen = 16, | ||
6457 | .v = "\x57\x2c\x8e\x76\x87\x26\x47\x97" | ||
6458 | "\x7e\x74\xfb\xdd\xc4\x95\x01\xd1", | ||
6459 | .vlen = 16, | ||
6460 | .result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb" | ||
6461 | "\xe4\x57\x90\xd5\xc3\xfc\x9b\x73", | ||
6462 | .rlen = 16, | ||
6463 | .loops = 10000, | ||
6464 | }, | ||
6465 | }; | ||
6466 | |||
5828 | /* Cast5 test vectors from RFC 2144 */ | 6467 | /* Cast5 test vectors from RFC 2144 */ |
5829 | #define CAST5_ENC_TEST_VECTORS 3 | 6468 | #define CAST5_ENC_TEST_VECTORS 3 |
5830 | #define CAST5_DEC_TEST_VECTORS 3 | 6469 | #define CAST5_DEC_TEST_VECTORS 3 |
diff --git a/crypto/zlib.c b/crypto/zlib.c index 33609bab614e..c3015733c990 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c | |||
@@ -165,15 +165,15 @@ static int zlib_compress_update(struct crypto_pcomp *tfm, | |||
165 | return -EINVAL; | 165 | return -EINVAL; |
166 | } | 166 | } |
167 | 167 | ||
168 | ret = req->avail_out - stream->avail_out; | ||
168 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", | 169 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", |
169 | stream->avail_in, stream->avail_out, | 170 | stream->avail_in, stream->avail_out, |
170 | req->avail_in - stream->avail_in, | 171 | req->avail_in - stream->avail_in, ret); |
171 | req->avail_out - stream->avail_out); | ||
172 | req->next_in = stream->next_in; | 172 | req->next_in = stream->next_in; |
173 | req->avail_in = stream->avail_in; | 173 | req->avail_in = stream->avail_in; |
174 | req->next_out = stream->next_out; | 174 | req->next_out = stream->next_out; |
175 | req->avail_out = stream->avail_out; | 175 | req->avail_out = stream->avail_out; |
176 | return 0; | 176 | return ret; |
177 | } | 177 | } |
178 | 178 | ||
179 | static int zlib_compress_final(struct crypto_pcomp *tfm, | 179 | static int zlib_compress_final(struct crypto_pcomp *tfm, |
@@ -195,15 +195,15 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, | |||
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | } | 196 | } |
197 | 197 | ||
198 | ret = req->avail_out - stream->avail_out; | ||
198 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", | 199 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", |
199 | stream->avail_in, stream->avail_out, | 200 | stream->avail_in, stream->avail_out, |
200 | req->avail_in - stream->avail_in, | 201 | req->avail_in - stream->avail_in, ret); |
201 | req->avail_out - stream->avail_out); | ||
202 | req->next_in = stream->next_in; | 202 | req->next_in = stream->next_in; |
203 | req->avail_in = stream->avail_in; | 203 | req->avail_in = stream->avail_in; |
204 | req->next_out = stream->next_out; | 204 | req->next_out = stream->next_out; |
205 | req->avail_out = stream->avail_out; | 205 | req->avail_out = stream->avail_out; |
206 | return 0; | 206 | return ret; |
207 | } | 207 | } |
208 | 208 | ||
209 | 209 | ||
@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct crypto_pcomp *tfm, | |||
280 | return -EINVAL; | 280 | return -EINVAL; |
281 | } | 281 | } |
282 | 282 | ||
283 | ret = req->avail_out - stream->avail_out; | ||
283 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", | 284 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", |
284 | stream->avail_in, stream->avail_out, | 285 | stream->avail_in, stream->avail_out, |
285 | req->avail_in - stream->avail_in, | 286 | req->avail_in - stream->avail_in, ret); |
286 | req->avail_out - stream->avail_out); | ||
287 | req->next_in = stream->next_in; | 287 | req->next_in = stream->next_in; |
288 | req->avail_in = stream->avail_in; | 288 | req->avail_in = stream->avail_in; |
289 | req->next_out = stream->next_out; | 289 | req->next_out = stream->next_out; |
290 | req->avail_out = stream->avail_out; | 290 | req->avail_out = stream->avail_out; |
291 | return 0; | 291 | return ret; |
292 | } | 292 | } |
293 | 293 | ||
294 | static int zlib_decompress_final(struct crypto_pcomp *tfm, | 294 | static int zlib_decompress_final(struct crypto_pcomp *tfm, |
@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct crypto_pcomp *tfm, | |||
328 | return -EINVAL; | 328 | return -EINVAL; |
329 | } | 329 | } |
330 | 330 | ||
331 | ret = req->avail_out - stream->avail_out; | ||
331 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", | 332 | pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", |
332 | stream->avail_in, stream->avail_out, | 333 | stream->avail_in, stream->avail_out, |
333 | req->avail_in - stream->avail_in, | 334 | req->avail_in - stream->avail_in, ret); |
334 | req->avail_out - stream->avail_out); | ||
335 | req->next_in = stream->next_in; | 335 | req->next_in = stream->next_in; |
336 | req->avail_in = stream->avail_in; | 336 | req->avail_in = stream->avail_in; |
337 | req->next_out = stream->next_out; | 337 | req->next_out = stream->next_out; |
338 | req->avail_out = stream->avail_out; | 338 | req->avail_out = stream->avail_out; |
339 | return 0; | 339 | return ret; |
340 | } | 340 | } |
341 | 341 | ||
342 | 342 | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 5fab6470f4b2..9c00440dcf86 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG | |||
88 | 88 | ||
89 | config HW_RANDOM_VIA | 89 | config HW_RANDOM_VIA |
90 | tristate "VIA HW Random Number Generator support" | 90 | tristate "VIA HW Random Number Generator support" |
91 | depends on HW_RANDOM && X86_32 | 91 | depends on HW_RANDOM && X86 |
92 | default HW_RANDOM | 92 | default HW_RANDOM |
93 | ---help--- | 93 | ---help--- |
94 | This driver provides kernel-side support for the Random Number | 94 | This driver provides kernel-side support for the Random Number |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 538313f9e7ac..00dd3de1be51 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = { | |||
89 | .data_read = omap_rng_data_read, | 89 | .data_read = omap_rng_data_read, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static int __init omap_rng_probe(struct platform_device *pdev) | 92 | static int __devinit omap_rng_probe(struct platform_device *pdev) |
93 | { | 93 | { |
94 | struct resource *res, *mem; | 94 | struct resource *res, *mem; |
95 | int ret; | 95 | int ret; |
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index dcd352ad0e7f..a94e930575f2 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c | |||
@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = { | |||
88 | .priv = 0, | 88 | .priv = 0, |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static int __init timeriomem_rng_probe(struct platform_device *pdev) | 91 | static int __devinit timeriomem_rng_probe(struct platform_device *pdev) |
92 | { | 92 | { |
93 | struct resource *res, *mem; | 93 | struct resource *res; |
94 | int ret; | 94 | int ret; |
95 | 95 | ||
96 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 96 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) | |||
98 | if (!res) | 98 | if (!res) |
99 | return -ENOENT; | 99 | return -ENOENT; |
100 | 100 | ||
101 | mem = request_mem_region(res->start, res->end - res->start + 1, | ||
102 | pdev->name); | ||
103 | if (mem == NULL) | ||
104 | return -EBUSY; | ||
105 | |||
106 | dev_set_drvdata(&pdev->dev, mem); | ||
107 | |||
108 | timeriomem_rng_data = pdev->dev.platform_data; | 101 | timeriomem_rng_data = pdev->dev.platform_data; |
109 | 102 | ||
110 | timeriomem_rng_data->address = ioremap(res->start, | 103 | timeriomem_rng_data->address = ioremap(res->start, |
111 | res->end - res->start + 1); | 104 | res->end - res->start + 1); |
112 | if (!timeriomem_rng_data->address) { | 105 | if (!timeriomem_rng_data->address) |
113 | ret = -ENOMEM; | 106 | return -EIO; |
114 | goto err_ioremap; | ||
115 | } | ||
116 | 107 | ||
117 | if (timeriomem_rng_data->period != 0 | 108 | if (timeriomem_rng_data->period != 0 |
118 | && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { | 109 | && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { |
@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) | |||
125 | 116 | ||
126 | ret = hwrng_register(&timeriomem_rng_ops); | 117 | ret = hwrng_register(&timeriomem_rng_ops); |
127 | if (ret) | 118 | if (ret) |
128 | goto err_register; | 119 | goto failed; |
129 | 120 | ||
130 | dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", | 121 | dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", |
131 | timeriomem_rng_data->address, | 122 | timeriomem_rng_data->address, |
@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) | |||
133 | 124 | ||
134 | return 0; | 125 | return 0; |
135 | 126 | ||
136 | err_register: | 127 | failed: |
137 | dev_err(&pdev->dev, "problem registering\n"); | 128 | dev_err(&pdev->dev, "problem registering\n"); |
138 | iounmap(timeriomem_rng_data->address); | 129 | iounmap(timeriomem_rng_data->address); |
139 | err_ioremap: | ||
140 | release_resource(mem); | ||
141 | 130 | ||
142 | return ret; | 131 | return ret; |
143 | } | 132 | } |
144 | 133 | ||
145 | static int __devexit timeriomem_rng_remove(struct platform_device *pdev) | 134 | static int __devexit timeriomem_rng_remove(struct platform_device *pdev) |
146 | { | 135 | { |
147 | struct resource *mem = dev_get_drvdata(&pdev->dev); | ||
148 | |||
149 | del_timer_sync(&timeriomem_rng_timer); | 136 | del_timer_sync(&timeriomem_rng_timer); |
150 | hwrng_unregister(&timeriomem_rng_ops); | 137 | hwrng_unregister(&timeriomem_rng_ops); |
151 | 138 | ||
152 | iounmap(timeriomem_rng_data->address); | 139 | iounmap(timeriomem_rng_data->address); |
153 | release_resource(mem); | ||
154 | 140 | ||
155 | return 0; | 141 | return 0; |
156 | } | 142 | } |
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 4e9573c1d39e..794aacb715c1 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng) | |||
132 | struct cpuinfo_x86 *c = &cpu_data(0); | 132 | struct cpuinfo_x86 *c = &cpu_data(0); |
133 | u32 lo, hi, old_lo; | 133 | u32 lo, hi, old_lo; |
134 | 134 | ||
135 | /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG | ||
136 | * is always enabled if CPUID rng_en is set. There is no | ||
137 | * RNG configuration like it used to be the case in this | ||
138 | * register */ | ||
139 | if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { | ||
140 | if (!cpu_has_xstore_enabled) { | ||
141 | printk(KERN_ERR PFX "can't enable hardware RNG " | ||
142 | "if XSTORE is not enabled\n"); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | return 0; | ||
146 | } | ||
147 | |||
135 | /* Control the RNG via MSR. Tread lightly and pay very close | 148 | /* Control the RNG via MSR. Tread lightly and pay very close |
136 | * close attention to values written, as the reserved fields | 149 | * close attention to values written, as the reserved fields |
137 | * are documented to be "undefined and unpredictable"; but it | 150 | * are documented to be "undefined and unpredictable"; but it |
@@ -205,5 +218,5 @@ static void __exit mod_exit(void) | |||
205 | module_init(mod_init); | 218 | module_init(mod_init); |
206 | module_exit(mod_exit); | 219 | module_exit(mod_exit); |
207 | 220 | ||
208 | MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); | 221 | MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); |
209 | MODULE_LICENSE("GPL"); | 222 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072f..e748e55bd86b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -12,7 +12,7 @@ if CRYPTO_HW | |||
12 | 12 | ||
13 | config CRYPTO_DEV_PADLOCK | 13 | config CRYPTO_DEV_PADLOCK |
14 | tristate "Support for VIA PadLock ACE" | 14 | tristate "Support for VIA PadLock ACE" |
15 | depends on X86_32 && !UML | 15 | depends on X86 && !UML |
16 | select CRYPTO_ALGAPI | 16 | select CRYPTO_ALGAPI |
17 | help | 17 | help |
18 | Some VIA processors come with an integrated crypto engine | 18 | Some VIA processors come with an integrated crypto engine |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 2bef086fb342..5f753fc08730 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) | |||
2564 | hifn_process_queue(dev); | 2564 | hifn_process_queue(dev); |
2565 | } | 2565 | } |
2566 | 2566 | ||
2567 | static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 2567 | static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
2568 | { | 2568 | { |
2569 | int err, i; | 2569 | int err, i; |
2570 | struct hifn_device *dev; | 2570 | struct hifn_device *dev; |
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device: | |||
2696 | return err; | 2696 | return err; |
2697 | } | 2697 | } |
2698 | 2698 | ||
2699 | static void hifn_remove(struct pci_dev *pdev) | 2699 | static void __devexit hifn_remove(struct pci_dev *pdev) |
2700 | { | 2700 | { |
2701 | int i; | 2701 | int i; |
2702 | struct hifn_device *dev; | 2702 | struct hifn_device *dev; |
@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { | |||
2744 | .remove = __devexit_p(hifn_remove), | 2744 | .remove = __devexit_p(hifn_remove), |
2745 | }; | 2745 | }; |
2746 | 2746 | ||
2747 | static int __devinit hifn_init(void) | 2747 | static int __init hifn_init(void) |
2748 | { | 2748 | { |
2749 | unsigned int freq; | 2749 | unsigned int freq; |
2750 | int err; | 2750 | int err; |
@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) | |||
2789 | return 0; | 2789 | return 0; |
2790 | } | 2790 | } |
2791 | 2791 | ||
2792 | static void __devexit hifn_fini(void) | 2792 | static void __exit hifn_fini(void) |
2793 | { | 2793 | { |
2794 | pci_unregister_driver(&hifn_pci_driver); | 2794 | pci_unregister_driver(&hifn_pci_driver); |
2795 | 2795 | ||
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 856b3cc25583..87f92c39b5f0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) | |||
154 | int cpu = raw_smp_processor_id(); | 154 | int cpu = raw_smp_processor_id(); |
155 | 155 | ||
156 | if (cword != per_cpu(last_cword, cpu)) | 156 | if (cword != per_cpu(last_cword, cpu)) |
157 | #ifndef CONFIG_X86_64 | ||
157 | asm volatile ("pushfl; popfl"); | 158 | asm volatile ("pushfl; popfl"); |
159 | #else | ||
160 | asm volatile ("pushfq; popfq"); | ||
161 | #endif | ||
158 | } | 162 | } |
159 | 163 | ||
160 | static inline void padlock_store_cword(struct cword *cword) | 164 | static inline void padlock_store_cword(struct cword *cword) |
@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, | |||
208 | 212 | ||
209 | asm volatile ("test $1, %%cl;" | 213 | asm volatile ("test $1, %%cl;" |
210 | "je 1f;" | 214 | "je 1f;" |
215 | #ifndef CONFIG_X86_64 | ||
211 | "lea -1(%%ecx), %%eax;" | 216 | "lea -1(%%ecx), %%eax;" |
212 | "mov $1, %%ecx;" | 217 | "mov $1, %%ecx;" |
218 | #else | ||
219 | "lea -1(%%rcx), %%rax;" | ||
220 | "mov $1, %%rcx;" | ||
221 | #endif | ||
213 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ | 222 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ |
223 | #ifndef CONFIG_X86_64 | ||
214 | "mov %%eax, %%ecx;" | 224 | "mov %%eax, %%ecx;" |
225 | #else | ||
226 | "mov %%rax, %%rcx;" | ||
227 | #endif | ||
215 | "1:" | 228 | "1:" |
216 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 229 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
217 | : "+S"(input), "+D"(output) | 230 | : "+S"(input), "+D"(output) |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a3918c16b3db..c70775fd3ce2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <crypto/sha.h> | 44 | #include <crypto/sha.h> |
45 | #include <crypto/aead.h> | 45 | #include <crypto/aead.h> |
46 | #include <crypto/authenc.h> | 46 | #include <crypto/authenc.h> |
47 | #include <crypto/skcipher.h> | ||
48 | #include <crypto/scatterwalk.h> | ||
47 | 49 | ||
48 | #include "talitos.h" | 50 | #include "talitos.h" |
49 | 51 | ||
@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
339 | status = error; | 341 | status = error; |
340 | 342 | ||
341 | dma_unmap_single(dev, request->dma_desc, | 343 | dma_unmap_single(dev, request->dma_desc, |
342 | sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); | 344 | sizeof(struct talitos_desc), |
345 | DMA_BIDIRECTIONAL); | ||
343 | 346 | ||
344 | /* copy entries so we can call callback outside lock */ | 347 | /* copy entries so we can call callback outside lock */ |
345 | saved_req.desc = request->desc; | 348 | saved_req.desc = request->desc; |
@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) | |||
413 | /* | 416 | /* |
414 | * user diagnostics; report root cause of error based on execution unit status | 417 | * user diagnostics; report root cause of error based on execution unit status |
415 | */ | 418 | */ |
416 | static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) | 419 | static void report_eu_error(struct device *dev, int ch, |
420 | struct talitos_desc *desc) | ||
417 | { | 421 | { |
418 | struct talitos_private *priv = dev_get_drvdata(dev); | 422 | struct talitos_private *priv = dev_get_drvdata(dev); |
419 | int i; | 423 | int i; |
@@ -684,8 +688,8 @@ struct talitos_ctx { | |||
684 | unsigned int authsize; | 688 | unsigned int authsize; |
685 | }; | 689 | }; |
686 | 690 | ||
687 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | 691 | static int aead_setauthsize(struct crypto_aead *authenc, |
688 | unsigned int authsize) | 692 | unsigned int authsize) |
689 | { | 693 | { |
690 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 694 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
691 | 695 | ||
@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc, | |||
694 | return 0; | 698 | return 0; |
695 | } | 699 | } |
696 | 700 | ||
697 | static int aead_authenc_setkey(struct crypto_aead *authenc, | 701 | static int aead_setkey(struct crypto_aead *authenc, |
698 | const u8 *key, unsigned int keylen) | 702 | const u8 *key, unsigned int keylen) |
699 | { | 703 | { |
700 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 704 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
701 | struct rtattr *rta = (void *)key; | 705 | struct rtattr *rta = (void *)key; |
@@ -740,7 +744,7 @@ badkey: | |||
740 | } | 744 | } |
741 | 745 | ||
742 | /* | 746 | /* |
743 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | 747 | * talitos_edesc - s/w-extended descriptor |
744 | * @src_nents: number of segments in input scatterlist | 748 | * @src_nents: number of segments in input scatterlist |
745 | * @dst_nents: number of segments in output scatterlist | 749 | * @dst_nents: number of segments in output scatterlist |
746 | * @dma_len: length of dma mapped link_tbl space | 750 | * @dma_len: length of dma mapped link_tbl space |
@@ -752,17 +756,67 @@ badkey: | |||
752 | * is greater than 1, an integrity check value is concatenated to the end | 756 | * is greater than 1, an integrity check value is concatenated to the end |
753 | * of link_tbl data | 757 | * of link_tbl data |
754 | */ | 758 | */ |
755 | struct ipsec_esp_edesc { | 759 | struct talitos_edesc { |
756 | int src_nents; | 760 | int src_nents; |
757 | int dst_nents; | 761 | int dst_nents; |
762 | int src_is_chained; | ||
763 | int dst_is_chained; | ||
758 | int dma_len; | 764 | int dma_len; |
759 | dma_addr_t dma_link_tbl; | 765 | dma_addr_t dma_link_tbl; |
760 | struct talitos_desc desc; | 766 | struct talitos_desc desc; |
761 | struct talitos_ptr link_tbl[0]; | 767 | struct talitos_ptr link_tbl[0]; |
762 | }; | 768 | }; |
763 | 769 | ||
770 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, | ||
771 | unsigned int nents, enum dma_data_direction dir, | ||
772 | int chained) | ||
773 | { | ||
774 | if (unlikely(chained)) | ||
775 | while (sg) { | ||
776 | dma_map_sg(dev, sg, 1, dir); | ||
777 | sg = scatterwalk_sg_next(sg); | ||
778 | } | ||
779 | else | ||
780 | dma_map_sg(dev, sg, nents, dir); | ||
781 | return nents; | ||
782 | } | ||
783 | |||
784 | static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, | ||
785 | enum dma_data_direction dir) | ||
786 | { | ||
787 | while (sg) { | ||
788 | dma_unmap_sg(dev, sg, 1, dir); | ||
789 | sg = scatterwalk_sg_next(sg); | ||
790 | } | ||
791 | } | ||
792 | |||
793 | static void talitos_sg_unmap(struct device *dev, | ||
794 | struct talitos_edesc *edesc, | ||
795 | struct scatterlist *src, | ||
796 | struct scatterlist *dst) | ||
797 | { | ||
798 | unsigned int src_nents = edesc->src_nents ? : 1; | ||
799 | unsigned int dst_nents = edesc->dst_nents ? : 1; | ||
800 | |||
801 | if (src != dst) { | ||
802 | if (edesc->src_is_chained) | ||
803 | talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); | ||
804 | else | ||
805 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | ||
806 | |||
807 | if (edesc->dst_is_chained) | ||
808 | talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); | ||
809 | else | ||
810 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | ||
811 | } else | ||
812 | if (edesc->src_is_chained) | ||
813 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); | ||
814 | else | ||
815 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); | ||
816 | } | ||
817 | |||
764 | static void ipsec_esp_unmap(struct device *dev, | 818 | static void ipsec_esp_unmap(struct device *dev, |
765 | struct ipsec_esp_edesc *edesc, | 819 | struct talitos_edesc *edesc, |
766 | struct aead_request *areq) | 820 | struct aead_request *areq) |
767 | { | 821 | { |
768 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); | 822 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); |
@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
772 | 826 | ||
773 | dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); | 827 | dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); |
774 | 828 | ||
775 | if (areq->src != areq->dst) { | 829 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); |
776 | dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
777 | DMA_TO_DEVICE); | ||
778 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1, | ||
779 | DMA_FROM_DEVICE); | ||
780 | } else { | ||
781 | dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
782 | DMA_BIDIRECTIONAL); | ||
783 | } | ||
784 | 830 | ||
785 | if (edesc->dma_len) | 831 | if (edesc->dma_len) |
786 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 832 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
795 | int err) | 841 | int err) |
796 | { | 842 | { |
797 | struct aead_request *areq = context; | 843 | struct aead_request *areq = context; |
798 | struct ipsec_esp_edesc *edesc = | ||
799 | container_of(desc, struct ipsec_esp_edesc, desc); | ||
800 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 844 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
801 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 845 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
846 | struct talitos_edesc *edesc; | ||
802 | struct scatterlist *sg; | 847 | struct scatterlist *sg; |
803 | void *icvdata; | 848 | void *icvdata; |
804 | 849 | ||
850 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
851 | |||
805 | ipsec_esp_unmap(dev, edesc, areq); | 852 | ipsec_esp_unmap(dev, edesc, areq); |
806 | 853 | ||
807 | /* copy the generated ICV to dst */ | 854 | /* copy the generated ICV to dst */ |
@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
819 | } | 866 | } |
820 | 867 | ||
821 | static void ipsec_esp_decrypt_swauth_done(struct device *dev, | 868 | static void ipsec_esp_decrypt_swauth_done(struct device *dev, |
822 | struct talitos_desc *desc, void *context, | 869 | struct talitos_desc *desc, |
823 | int err) | 870 | void *context, int err) |
824 | { | 871 | { |
825 | struct aead_request *req = context; | 872 | struct aead_request *req = context; |
826 | struct ipsec_esp_edesc *edesc = | ||
827 | container_of(desc, struct ipsec_esp_edesc, desc); | ||
828 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 873 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
829 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 874 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
875 | struct talitos_edesc *edesc; | ||
830 | struct scatterlist *sg; | 876 | struct scatterlist *sg; |
831 | void *icvdata; | 877 | void *icvdata; |
832 | 878 | ||
879 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
880 | |||
833 | ipsec_esp_unmap(dev, edesc, req); | 881 | ipsec_esp_unmap(dev, edesc, req); |
834 | 882 | ||
835 | if (!err) { | 883 | if (!err) { |
@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
851 | } | 899 | } |
852 | 900 | ||
853 | static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | 901 | static void ipsec_esp_decrypt_hwauth_done(struct device *dev, |
854 | struct talitos_desc *desc, void *context, | 902 | struct talitos_desc *desc, |
855 | int err) | 903 | void *context, int err) |
856 | { | 904 | { |
857 | struct aead_request *req = context; | 905 | struct aead_request *req = context; |
858 | struct ipsec_esp_edesc *edesc = | 906 | struct talitos_edesc *edesc; |
859 | container_of(desc, struct ipsec_esp_edesc, desc); | 907 | |
908 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
860 | 909 | ||
861 | ipsec_esp_unmap(dev, edesc, req); | 910 | ipsec_esp_unmap(dev, edesc, req); |
862 | 911 | ||
863 | /* check ICV auth status */ | 912 | /* check ICV auth status */ |
864 | if (!err) | 913 | if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != |
865 | if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != | 914 | DESC_HDR_LO_ICCR1_PASS)) |
866 | DESC_HDR_LO_ICCR1_PASS) | 915 | err = -EBADMSG; |
867 | err = -EBADMSG; | ||
868 | 916 | ||
869 | kfree(edesc); | 917 | kfree(edesc); |
870 | 918 | ||
@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
886 | link_tbl_ptr->j_extent = 0; | 934 | link_tbl_ptr->j_extent = 0; |
887 | link_tbl_ptr++; | 935 | link_tbl_ptr++; |
888 | cryptlen -= sg_dma_len(sg); | 936 | cryptlen -= sg_dma_len(sg); |
889 | sg = sg_next(sg); | 937 | sg = scatterwalk_sg_next(sg); |
890 | } | 938 | } |
891 | 939 | ||
892 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ | 940 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ |
@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
910 | /* | 958 | /* |
911 | * fill in and submit ipsec_esp descriptor | 959 | * fill in and submit ipsec_esp descriptor |
912 | */ | 960 | */ |
913 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | 961 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, |
914 | u8 *giv, u64 seq, | 962 | u8 *giv, u64 seq, |
915 | void (*callback) (struct device *dev, | 963 | void (*callback) (struct device *dev, |
916 | struct talitos_desc *desc, | 964 | struct talitos_desc *desc, |
@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
952 | desc->ptr[4].len = cpu_to_be16(cryptlen); | 1000 | desc->ptr[4].len = cpu_to_be16(cryptlen); |
953 | desc->ptr[4].j_extent = authsize; | 1001 | desc->ptr[4].j_extent = authsize; |
954 | 1002 | ||
955 | if (areq->src == areq->dst) | 1003 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, |
956 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1004 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
957 | DMA_BIDIRECTIONAL); | 1005 | : DMA_TO_DEVICE, |
958 | else | 1006 | edesc->src_is_chained); |
959 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
960 | DMA_TO_DEVICE); | ||
961 | 1007 | ||
962 | if (sg_count == 1) { | 1008 | if (sg_count == 1) { |
963 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1009 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); |
964 | } else { | 1010 | } else { |
965 | sg_link_tbl_len = cryptlen; | 1011 | sg_link_tbl_len = cryptlen; |
966 | 1012 | ||
967 | if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && | 1013 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) |
968 | (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { | ||
969 | sg_link_tbl_len = cryptlen + authsize; | 1014 | sg_link_tbl_len = cryptlen + authsize; |
970 | } | 1015 | |
971 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, | 1016 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, |
972 | &edesc->link_tbl[0]); | 1017 | &edesc->link_tbl[0]); |
973 | if (sg_count > 1) { | 1018 | if (sg_count > 1) { |
974 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1019 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
975 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 1020 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); |
976 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1021 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
977 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1022 | edesc->dma_len, |
1023 | DMA_BIDIRECTIONAL); | ||
978 | } else { | 1024 | } else { |
979 | /* Only one segment now, so no link tbl needed */ | 1025 | /* Only one segment now, so no link tbl needed */ |
980 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1026 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> |
1027 | src)); | ||
981 | } | 1028 | } |
982 | } | 1029 | } |
983 | 1030 | ||
@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
985 | desc->ptr[5].len = cpu_to_be16(cryptlen); | 1032 | desc->ptr[5].len = cpu_to_be16(cryptlen); |
986 | desc->ptr[5].j_extent = authsize; | 1033 | desc->ptr[5].j_extent = authsize; |
987 | 1034 | ||
988 | if (areq->src != areq->dst) { | 1035 | if (areq->src != areq->dst) |
989 | sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, | 1036 | sg_count = talitos_map_sg(dev, areq->dst, |
990 | DMA_FROM_DEVICE); | 1037 | edesc->dst_nents ? : 1, |
991 | } | 1038 | DMA_FROM_DEVICE, |
1039 | edesc->dst_is_chained); | ||
992 | 1040 | ||
993 | if (sg_count == 1) { | 1041 | if (sg_count == 1) { |
994 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1042 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); |
@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
1033 | return ret; | 1081 | return ret; |
1034 | } | 1082 | } |
1035 | 1083 | ||
1036 | |||
1037 | /* | 1084 | /* |
1038 | * derive number of elements in scatterlist | 1085 | * derive number of elements in scatterlist |
1039 | */ | 1086 | */ |
1040 | static int sg_count(struct scatterlist *sg_list, int nbytes) | 1087 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) |
1041 | { | 1088 | { |
1042 | struct scatterlist *sg = sg_list; | 1089 | struct scatterlist *sg = sg_list; |
1043 | int sg_nents = 0; | 1090 | int sg_nents = 0; |
1044 | 1091 | ||
1045 | while (nbytes) { | 1092 | *chained = 0; |
1093 | while (nbytes > 0) { | ||
1046 | sg_nents++; | 1094 | sg_nents++; |
1047 | nbytes -= sg->length; | 1095 | nbytes -= sg->length; |
1048 | sg = sg_next(sg); | 1096 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
1097 | *chained = 1; | ||
1098 | sg = scatterwalk_sg_next(sg); | ||
1049 | } | 1099 | } |
1050 | 1100 | ||
1051 | return sg_nents; | 1101 | return sg_nents; |
1052 | } | 1102 | } |
1053 | 1103 | ||
1054 | /* | 1104 | /* |
1055 | * allocate and map the ipsec_esp extended descriptor | 1105 | * allocate and map the extended descriptor |
1056 | */ | 1106 | */ |
1057 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | 1107 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1058 | int icv_stashing) | 1108 | struct scatterlist *src, |
1109 | struct scatterlist *dst, | ||
1110 | unsigned int cryptlen, | ||
1111 | unsigned int authsize, | ||
1112 | int icv_stashing, | ||
1113 | u32 cryptoflags) | ||
1059 | { | 1114 | { |
1060 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1115 | struct talitos_edesc *edesc; |
1061 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
1062 | struct ipsec_esp_edesc *edesc; | ||
1063 | int src_nents, dst_nents, alloc_len, dma_len; | 1116 | int src_nents, dst_nents, alloc_len, dma_len; |
1064 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1117 | int src_chained, dst_chained = 0; |
1118 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
1065 | GFP_ATOMIC; | 1119 | GFP_ATOMIC; |
1066 | 1120 | ||
1067 | if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { | 1121 | if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { |
1068 | dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); | 1122 | dev_err(dev, "length exceeds h/w max limit\n"); |
1069 | return ERR_PTR(-EINVAL); | 1123 | return ERR_PTR(-EINVAL); |
1070 | } | 1124 | } |
1071 | 1125 | ||
1072 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); | 1126 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1073 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1127 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1074 | 1128 | ||
1075 | if (areq->dst == areq->src) { | 1129 | if (dst == src) { |
1076 | dst_nents = src_nents; | 1130 | dst_nents = src_nents; |
1077 | } else { | 1131 | } else { |
1078 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); | 1132 | dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); |
1079 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1133 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1080 | } | 1134 | } |
1081 | 1135 | ||
@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |||
1084 | * allowing for two separate entries for ICV and generated ICV (+ 2), | 1138 | * allowing for two separate entries for ICV and generated ICV (+ 2), |
1085 | * and the ICV data itself | 1139 | * and the ICV data itself |
1086 | */ | 1140 | */ |
1087 | alloc_len = sizeof(struct ipsec_esp_edesc); | 1141 | alloc_len = sizeof(struct talitos_edesc); |
1088 | if (src_nents || dst_nents) { | 1142 | if (src_nents || dst_nents) { |
1089 | dma_len = (src_nents + dst_nents + 2) * | 1143 | dma_len = (src_nents + dst_nents + 2) * |
1090 | sizeof(struct talitos_ptr) + ctx->authsize; | 1144 | sizeof(struct talitos_ptr) + authsize; |
1091 | alloc_len += dma_len; | 1145 | alloc_len += dma_len; |
1092 | } else { | 1146 | } else { |
1093 | dma_len = 0; | 1147 | dma_len = 0; |
1094 | alloc_len += icv_stashing ? ctx->authsize : 0; | 1148 | alloc_len += icv_stashing ? authsize : 0; |
1095 | } | 1149 | } |
1096 | 1150 | ||
1097 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1151 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1098 | if (!edesc) { | 1152 | if (!edesc) { |
1099 | dev_err(ctx->dev, "could not allocate edescriptor\n"); | 1153 | dev_err(dev, "could not allocate edescriptor\n"); |
1100 | return ERR_PTR(-ENOMEM); | 1154 | return ERR_PTR(-ENOMEM); |
1101 | } | 1155 | } |
1102 | 1156 | ||
1103 | edesc->src_nents = src_nents; | 1157 | edesc->src_nents = src_nents; |
1104 | edesc->dst_nents = dst_nents; | 1158 | edesc->dst_nents = dst_nents; |
1159 | edesc->src_is_chained = src_chained; | ||
1160 | edesc->dst_is_chained = dst_chained; | ||
1105 | edesc->dma_len = dma_len; | 1161 | edesc->dma_len = dma_len; |
1106 | edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], | 1162 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
1107 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1163 | edesc->dma_len, DMA_BIDIRECTIONAL); |
1108 | 1164 | ||
1109 | return edesc; | 1165 | return edesc; |
1110 | } | 1166 | } |
1111 | 1167 | ||
1112 | static int aead_authenc_encrypt(struct aead_request *req) | 1168 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, |
1169 | int icv_stashing) | ||
1170 | { | ||
1171 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | ||
1172 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
1173 | |||
1174 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, | ||
1175 | areq->cryptlen, ctx->authsize, icv_stashing, | ||
1176 | areq->base.flags); | ||
1177 | } | ||
1178 | |||
1179 | static int aead_encrypt(struct aead_request *req) | ||
1113 | { | 1180 | { |
1114 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1181 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1115 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1182 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1116 | struct ipsec_esp_edesc *edesc; | 1183 | struct talitos_edesc *edesc; |
1117 | 1184 | ||
1118 | /* allocate extended descriptor */ | 1185 | /* allocate extended descriptor */ |
1119 | edesc = ipsec_esp_edesc_alloc(req, 0); | 1186 | edesc = aead_edesc_alloc(req, 0); |
1120 | if (IS_ERR(edesc)) | 1187 | if (IS_ERR(edesc)) |
1121 | return PTR_ERR(edesc); | 1188 | return PTR_ERR(edesc); |
1122 | 1189 | ||
@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req) | |||
1126 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); | 1193 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); |
1127 | } | 1194 | } |
1128 | 1195 | ||
1129 | 1196 | static int aead_decrypt(struct aead_request *req) | |
1130 | |||
1131 | static int aead_authenc_decrypt(struct aead_request *req) | ||
1132 | { | 1197 | { |
1133 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1198 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1134 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1199 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1135 | unsigned int authsize = ctx->authsize; | 1200 | unsigned int authsize = ctx->authsize; |
1136 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | 1201 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); |
1137 | struct ipsec_esp_edesc *edesc; | 1202 | struct talitos_edesc *edesc; |
1138 | struct scatterlist *sg; | 1203 | struct scatterlist *sg; |
1139 | void *icvdata; | 1204 | void *icvdata; |
1140 | 1205 | ||
1141 | req->cryptlen -= authsize; | 1206 | req->cryptlen -= authsize; |
1142 | 1207 | ||
1143 | /* allocate extended descriptor */ | 1208 | /* allocate extended descriptor */ |
1144 | edesc = ipsec_esp_edesc_alloc(req, 1); | 1209 | edesc = aead_edesc_alloc(req, 1); |
1145 | if (IS_ERR(edesc)) | 1210 | if (IS_ERR(edesc)) |
1146 | return PTR_ERR(edesc); | 1211 | return PTR_ERR(edesc); |
1147 | 1212 | ||
1148 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && | 1213 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && |
1149 | (((!edesc->src_nents && !edesc->dst_nents) || | 1214 | ((!edesc->src_nents && !edesc->dst_nents) || |
1150 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { | 1215 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { |
1151 | 1216 | ||
1152 | /* decrypt and check the ICV */ | 1217 | /* decrypt and check the ICV */ |
1153 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | | 1218 | edesc->desc.hdr = ctx->desc_hdr_template | |
1219 | DESC_HDR_DIR_INBOUND | | ||
1154 | DESC_HDR_MODE1_MDEU_CICV; | 1220 | DESC_HDR_MODE1_MDEU_CICV; |
1155 | 1221 | ||
1156 | /* reset integrity check result bits */ | 1222 | /* reset integrity check result bits */ |
1157 | edesc->desc.hdr_lo = 0; | 1223 | edesc->desc.hdr_lo = 0; |
1158 | 1224 | ||
1159 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); | 1225 | return ipsec_esp(edesc, req, NULL, 0, |
1226 | ipsec_esp_decrypt_hwauth_done); | ||
1160 | 1227 | ||
1161 | } else { | 1228 | } |
1162 | |||
1163 | /* Have to check the ICV with software */ | ||
1164 | 1229 | ||
1165 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1230 | /* Have to check the ICV with software */ |
1231 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | ||
1166 | 1232 | ||
1167 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1233 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
1168 | if (edesc->dma_len) | 1234 | if (edesc->dma_len) |
1169 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1235 | icvdata = &edesc->link_tbl[edesc->src_nents + |
1170 | edesc->dst_nents + 2]; | 1236 | edesc->dst_nents + 2]; |
1171 | else | 1237 | else |
1172 | icvdata = &edesc->link_tbl[0]; | 1238 | icvdata = &edesc->link_tbl[0]; |
1173 | 1239 | ||
1174 | sg = sg_last(req->src, edesc->src_nents ? : 1); | 1240 | sg = sg_last(req->src, edesc->src_nents ? : 1); |
1175 | 1241 | ||
1176 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, | 1242 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, |
1177 | ctx->authsize); | 1243 | ctx->authsize); |
1178 | 1244 | ||
1179 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); | 1245 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); |
1180 | } | ||
1181 | } | 1246 | } |
1182 | 1247 | ||
1183 | static int aead_authenc_givencrypt( | 1248 | static int aead_givencrypt(struct aead_givcrypt_request *req) |
1184 | struct aead_givcrypt_request *req) | ||
1185 | { | 1249 | { |
1186 | struct aead_request *areq = &req->areq; | 1250 | struct aead_request *areq = &req->areq; |
1187 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1251 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1188 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1252 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1189 | struct ipsec_esp_edesc *edesc; | 1253 | struct talitos_edesc *edesc; |
1190 | 1254 | ||
1191 | /* allocate extended descriptor */ | 1255 | /* allocate extended descriptor */ |
1192 | edesc = ipsec_esp_edesc_alloc(areq, 0); | 1256 | edesc = aead_edesc_alloc(areq, 0); |
1193 | if (IS_ERR(edesc)) | 1257 | if (IS_ERR(edesc)) |
1194 | return PTR_ERR(edesc); | 1258 | return PTR_ERR(edesc); |
1195 | 1259 | ||
@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt( | |||
1204 | ipsec_esp_encrypt_done); | 1268 | ipsec_esp_encrypt_done); |
1205 | } | 1269 | } |
1206 | 1270 | ||
1271 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | ||
1272 | const u8 *key, unsigned int keylen) | ||
1273 | { | ||
1274 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1275 | struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); | ||
1276 | |||
1277 | if (keylen > TALITOS_MAX_KEY_SIZE) | ||
1278 | goto badkey; | ||
1279 | |||
1280 | if (keylen < alg->min_keysize || keylen > alg->max_keysize) | ||
1281 | goto badkey; | ||
1282 | |||
1283 | memcpy(&ctx->key, key, keylen); | ||
1284 | ctx->keylen = keylen; | ||
1285 | |||
1286 | return 0; | ||
1287 | |||
1288 | badkey: | ||
1289 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1290 | return -EINVAL; | ||
1291 | } | ||
1292 | |||
1293 | static void common_nonsnoop_unmap(struct device *dev, | ||
1294 | struct talitos_edesc *edesc, | ||
1295 | struct ablkcipher_request *areq) | ||
1296 | { | ||
1297 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
1298 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | ||
1299 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); | ||
1300 | |||
1301 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); | ||
1302 | |||
1303 | if (edesc->dma_len) | ||
1304 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | ||
1305 | DMA_BIDIRECTIONAL); | ||
1306 | } | ||
1307 | |||
1308 | static void ablkcipher_done(struct device *dev, | ||
1309 | struct talitos_desc *desc, void *context, | ||
1310 | int err) | ||
1311 | { | ||
1312 | struct ablkcipher_request *areq = context; | ||
1313 | struct talitos_edesc *edesc; | ||
1314 | |||
1315 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
1316 | |||
1317 | common_nonsnoop_unmap(dev, edesc, areq); | ||
1318 | |||
1319 | kfree(edesc); | ||
1320 | |||
1321 | areq->base.complete(&areq->base, err); | ||
1322 | } | ||
1323 | |||
1324 | static int common_nonsnoop(struct talitos_edesc *edesc, | ||
1325 | struct ablkcipher_request *areq, | ||
1326 | u8 *giv, | ||
1327 | void (*callback) (struct device *dev, | ||
1328 | struct talitos_desc *desc, | ||
1329 | void *context, int error)) | ||
1330 | { | ||
1331 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1332 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1333 | struct device *dev = ctx->dev; | ||
1334 | struct talitos_desc *desc = &edesc->desc; | ||
1335 | unsigned int cryptlen = areq->nbytes; | ||
1336 | unsigned int ivsize; | ||
1337 | int sg_count, ret; | ||
1338 | |||
1339 | /* first DWORD empty */ | ||
1340 | desc->ptr[0].len = 0; | ||
1341 | desc->ptr[0].ptr = 0; | ||
1342 | desc->ptr[0].j_extent = 0; | ||
1343 | |||
1344 | /* cipher iv */ | ||
1345 | ivsize = crypto_ablkcipher_ivsize(cipher); | ||
1346 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, | ||
1347 | DMA_TO_DEVICE); | ||
1348 | |||
1349 | /* cipher key */ | ||
1350 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | ||
1351 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | ||
1352 | |||
1353 | /* | ||
1354 | * cipher in | ||
1355 | */ | ||
1356 | desc->ptr[3].len = cpu_to_be16(cryptlen); | ||
1357 | desc->ptr[3].j_extent = 0; | ||
1358 | |||
1359 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
1360 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | ||
1361 | : DMA_TO_DEVICE, | ||
1362 | edesc->src_is_chained); | ||
1363 | |||
1364 | if (sg_count == 1) { | ||
1365 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); | ||
1366 | } else { | ||
1367 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | ||
1368 | &edesc->link_tbl[0]); | ||
1369 | if (sg_count > 1) { | ||
1370 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1371 | desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); | ||
1372 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
1373 | edesc->dma_len, | ||
1374 | DMA_BIDIRECTIONAL); | ||
1375 | } else { | ||
1376 | /* Only one segment now, so no link tbl needed */ | ||
1377 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> | ||
1378 | src)); | ||
1379 | } | ||
1380 | } | ||
1381 | |||
1382 | /* cipher out */ | ||
1383 | desc->ptr[4].len = cpu_to_be16(cryptlen); | ||
1384 | desc->ptr[4].j_extent = 0; | ||
1385 | |||
1386 | if (areq->src != areq->dst) | ||
1387 | sg_count = talitos_map_sg(dev, areq->dst, | ||
1388 | edesc->dst_nents ? : 1, | ||
1389 | DMA_FROM_DEVICE, | ||
1390 | edesc->dst_is_chained); | ||
1391 | |||
1392 | if (sg_count == 1) { | ||
1393 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | ||
1394 | } else { | ||
1395 | struct talitos_ptr *link_tbl_ptr = | ||
1396 | &edesc->link_tbl[edesc->src_nents + 1]; | ||
1397 | |||
1398 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1399 | desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) | ||
1400 | edesc->dma_link_tbl + | ||
1401 | edesc->src_nents + 1); | ||
1402 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | ||
1403 | link_tbl_ptr); | ||
1404 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | ||
1405 | edesc->dma_len, DMA_BIDIRECTIONAL); | ||
1406 | } | ||
1407 | |||
1408 | /* iv out */ | ||
1409 | map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, | ||
1410 | DMA_FROM_DEVICE); | ||
1411 | |||
1412 | /* last DWORD empty */ | ||
1413 | desc->ptr[6].len = 0; | ||
1414 | desc->ptr[6].ptr = 0; | ||
1415 | desc->ptr[6].j_extent = 0; | ||
1416 | |||
1417 | ret = talitos_submit(dev, desc, callback, areq); | ||
1418 | if (ret != -EINPROGRESS) { | ||
1419 | common_nonsnoop_unmap(dev, edesc, areq); | ||
1420 | kfree(edesc); | ||
1421 | } | ||
1422 | return ret; | ||
1423 | } | ||
1424 | |||
1425 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | ||
1426 | areq) | ||
1427 | { | ||
1428 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1429 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1430 | |||
1431 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, | ||
1432 | 0, 0, areq->base.flags); | ||
1433 | } | ||
1434 | |||
1435 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | ||
1436 | { | ||
1437 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1438 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1439 | struct talitos_edesc *edesc; | ||
1440 | |||
1441 | /* allocate extended descriptor */ | ||
1442 | edesc = ablkcipher_edesc_alloc(areq); | ||
1443 | if (IS_ERR(edesc)) | ||
1444 | return PTR_ERR(edesc); | ||
1445 | |||
1446 | /* set encrypt */ | ||
1447 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | ||
1448 | |||
1449 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | ||
1450 | } | ||
1451 | |||
1452 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) | ||
1453 | { | ||
1454 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1455 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1456 | struct talitos_edesc *edesc; | ||
1457 | |||
1458 | /* allocate extended descriptor */ | ||
1459 | edesc = ablkcipher_edesc_alloc(areq); | ||
1460 | if (IS_ERR(edesc)) | ||
1461 | return PTR_ERR(edesc); | ||
1462 | |||
1463 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | ||
1464 | |||
1465 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | ||
1466 | } | ||
1467 | |||
1207 | struct talitos_alg_template { | 1468 | struct talitos_alg_template { |
1208 | char name[CRYPTO_MAX_ALG_NAME]; | 1469 | struct crypto_alg alg; |
1209 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
1210 | unsigned int blocksize; | ||
1211 | struct aead_alg aead; | ||
1212 | struct device *dev; | ||
1213 | __be32 desc_hdr_template; | 1470 | __be32 desc_hdr_template; |
1214 | }; | 1471 | }; |
1215 | 1472 | ||
1216 | static struct talitos_alg_template driver_algs[] = { | 1473 | static struct talitos_alg_template driver_algs[] = { |
1217 | /* single-pass ipsec_esp descriptor */ | 1474 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
1218 | { | 1475 | { |
1219 | .name = "authenc(hmac(sha1),cbc(aes))", | 1476 | .alg = { |
1220 | .driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 1477 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1221 | .blocksize = AES_BLOCK_SIZE, | 1478 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", |
1222 | .aead = { | 1479 | .cra_blocksize = AES_BLOCK_SIZE, |
1223 | .setkey = aead_authenc_setkey, | 1480 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1224 | .setauthsize = aead_authenc_setauthsize, | 1481 | .cra_type = &crypto_aead_type, |
1225 | .encrypt = aead_authenc_encrypt, | 1482 | .cra_aead = { |
1226 | .decrypt = aead_authenc_decrypt, | 1483 | .setkey = aead_setkey, |
1227 | .givencrypt = aead_authenc_givencrypt, | 1484 | .setauthsize = aead_setauthsize, |
1228 | .geniv = "<built-in>", | 1485 | .encrypt = aead_encrypt, |
1229 | .ivsize = AES_BLOCK_SIZE, | 1486 | .decrypt = aead_decrypt, |
1230 | .maxauthsize = SHA1_DIGEST_SIZE, | 1487 | .givencrypt = aead_givencrypt, |
1231 | }, | 1488 | .geniv = "<built-in>", |
1489 | .ivsize = AES_BLOCK_SIZE, | ||
1490 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1491 | } | ||
1492 | }, | ||
1232 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1493 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1233 | DESC_HDR_SEL0_AESU | | 1494 | DESC_HDR_SEL0_AESU | |
1234 | DESC_HDR_MODE0_AESU_CBC | | 1495 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1238 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1499 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1239 | }, | 1500 | }, |
1240 | { | 1501 | { |
1241 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | 1502 | .alg = { |
1242 | .driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 1503 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
1243 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1504 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", |
1244 | .aead = { | 1505 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1245 | .setkey = aead_authenc_setkey, | 1506 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1246 | .setauthsize = aead_authenc_setauthsize, | 1507 | .cra_type = &crypto_aead_type, |
1247 | .encrypt = aead_authenc_encrypt, | 1508 | .cra_aead = { |
1248 | .decrypt = aead_authenc_decrypt, | 1509 | .setkey = aead_setkey, |
1249 | .givencrypt = aead_authenc_givencrypt, | 1510 | .setauthsize = aead_setauthsize, |
1250 | .geniv = "<built-in>", | 1511 | .encrypt = aead_encrypt, |
1251 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1512 | .decrypt = aead_decrypt, |
1252 | .maxauthsize = SHA1_DIGEST_SIZE, | 1513 | .givencrypt = aead_givencrypt, |
1253 | }, | 1514 | .geniv = "<built-in>", |
1515 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1516 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1517 | } | ||
1518 | }, | ||
1254 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1519 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1255 | DESC_HDR_SEL0_DEU | | 1520 | DESC_HDR_SEL0_DEU | |
1256 | DESC_HDR_MODE0_DEU_CBC | | 1521 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1261 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1526 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1262 | }, | 1527 | }, |
1263 | { | 1528 | { |
1264 | .name = "authenc(hmac(sha256),cbc(aes))", | 1529 | .alg = { |
1265 | .driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 1530 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
1266 | .blocksize = AES_BLOCK_SIZE, | 1531 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", |
1267 | .aead = { | 1532 | .cra_blocksize = AES_BLOCK_SIZE, |
1268 | .setkey = aead_authenc_setkey, | 1533 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1269 | .setauthsize = aead_authenc_setauthsize, | 1534 | .cra_type = &crypto_aead_type, |
1270 | .encrypt = aead_authenc_encrypt, | 1535 | .cra_aead = { |
1271 | .decrypt = aead_authenc_decrypt, | 1536 | .setkey = aead_setkey, |
1272 | .givencrypt = aead_authenc_givencrypt, | 1537 | .setauthsize = aead_setauthsize, |
1273 | .geniv = "<built-in>", | 1538 | .encrypt = aead_encrypt, |
1274 | .ivsize = AES_BLOCK_SIZE, | 1539 | .decrypt = aead_decrypt, |
1275 | .maxauthsize = SHA256_DIGEST_SIZE, | 1540 | .givencrypt = aead_givencrypt, |
1276 | }, | 1541 | .geniv = "<built-in>", |
1542 | .ivsize = AES_BLOCK_SIZE, | ||
1543 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1544 | } | ||
1545 | }, | ||
1277 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1546 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1278 | DESC_HDR_SEL0_AESU | | 1547 | DESC_HDR_SEL0_AESU | |
1279 | DESC_HDR_MODE0_AESU_CBC | | 1548 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1283 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1552 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1284 | }, | 1553 | }, |
1285 | { | 1554 | { |
1286 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | 1555 | .alg = { |
1287 | .driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 1556 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", |
1288 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1557 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", |
1289 | .aead = { | 1558 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1290 | .setkey = aead_authenc_setkey, | 1559 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1291 | .setauthsize = aead_authenc_setauthsize, | 1560 | .cra_type = &crypto_aead_type, |
1292 | .encrypt = aead_authenc_encrypt, | 1561 | .cra_aead = { |
1293 | .decrypt = aead_authenc_decrypt, | 1562 | .setkey = aead_setkey, |
1294 | .givencrypt = aead_authenc_givencrypt, | 1563 | .setauthsize = aead_setauthsize, |
1295 | .geniv = "<built-in>", | 1564 | .encrypt = aead_encrypt, |
1296 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1565 | .decrypt = aead_decrypt, |
1297 | .maxauthsize = SHA256_DIGEST_SIZE, | 1566 | .givencrypt = aead_givencrypt, |
1298 | }, | 1567 | .geniv = "<built-in>", |
1568 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1569 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1570 | } | ||
1571 | }, | ||
1299 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1572 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1300 | DESC_HDR_SEL0_DEU | | 1573 | DESC_HDR_SEL0_DEU | |
1301 | DESC_HDR_MODE0_DEU_CBC | | 1574 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1306 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1579 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1307 | }, | 1580 | }, |
1308 | { | 1581 | { |
1309 | .name = "authenc(hmac(md5),cbc(aes))", | 1582 | .alg = { |
1310 | .driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 1583 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
1311 | .blocksize = AES_BLOCK_SIZE, | 1584 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
1312 | .aead = { | 1585 | .cra_blocksize = AES_BLOCK_SIZE, |
1313 | .setkey = aead_authenc_setkey, | 1586 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1314 | .setauthsize = aead_authenc_setauthsize, | 1587 | .cra_type = &crypto_aead_type, |
1315 | .encrypt = aead_authenc_encrypt, | 1588 | .cra_aead = { |
1316 | .decrypt = aead_authenc_decrypt, | 1589 | .setkey = aead_setkey, |
1317 | .givencrypt = aead_authenc_givencrypt, | 1590 | .setauthsize = aead_setauthsize, |
1318 | .geniv = "<built-in>", | 1591 | .encrypt = aead_encrypt, |
1319 | .ivsize = AES_BLOCK_SIZE, | 1592 | .decrypt = aead_decrypt, |
1320 | .maxauthsize = MD5_DIGEST_SIZE, | 1593 | .givencrypt = aead_givencrypt, |
1321 | }, | 1594 | .geniv = "<built-in>", |
1595 | .ivsize = AES_BLOCK_SIZE, | ||
1596 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1597 | } | ||
1598 | }, | ||
1322 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1599 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1323 | DESC_HDR_SEL0_AESU | | 1600 | DESC_HDR_SEL0_AESU | |
1324 | DESC_HDR_MODE0_AESU_CBC | | 1601 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1328 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 1605 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1329 | }, | 1606 | }, |
1330 | { | 1607 | { |
1331 | .name = "authenc(hmac(md5),cbc(des3_ede))", | 1608 | .alg = { |
1332 | .driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 1609 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
1333 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1610 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", |
1334 | .aead = { | 1611 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1335 | .setkey = aead_authenc_setkey, | 1612 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1336 | .setauthsize = aead_authenc_setauthsize, | 1613 | .cra_type = &crypto_aead_type, |
1337 | .encrypt = aead_authenc_encrypt, | 1614 | .cra_aead = { |
1338 | .decrypt = aead_authenc_decrypt, | 1615 | .setkey = aead_setkey, |
1339 | .givencrypt = aead_authenc_givencrypt, | 1616 | .setauthsize = aead_setauthsize, |
1340 | .geniv = "<built-in>", | 1617 | .encrypt = aead_encrypt, |
1341 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1618 | .decrypt = aead_decrypt, |
1342 | .maxauthsize = MD5_DIGEST_SIZE, | 1619 | .givencrypt = aead_givencrypt, |
1343 | }, | 1620 | .geniv = "<built-in>", |
1621 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1622 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1623 | } | ||
1624 | }, | ||
1344 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1625 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1345 | DESC_HDR_SEL0_DEU | | 1626 | DESC_HDR_SEL0_DEU | |
1346 | DESC_HDR_MODE0_DEU_CBC | | 1627 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = { | |||
1349 | DESC_HDR_MODE1_MDEU_INIT | | 1630 | DESC_HDR_MODE1_MDEU_INIT | |
1350 | DESC_HDR_MODE1_MDEU_PAD | | 1631 | DESC_HDR_MODE1_MDEU_PAD | |
1351 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 1632 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1633 | }, | ||
1634 | /* ABLKCIPHER algorithms. */ | ||
1635 | { | ||
1636 | .alg = { | ||
1637 | .cra_name = "cbc(aes)", | ||
1638 | .cra_driver_name = "cbc-aes-talitos", | ||
1639 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1640 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1641 | CRYPTO_ALG_ASYNC, | ||
1642 | .cra_type = &crypto_ablkcipher_type, | ||
1643 | .cra_ablkcipher = { | ||
1644 | .setkey = ablkcipher_setkey, | ||
1645 | .encrypt = ablkcipher_encrypt, | ||
1646 | .decrypt = ablkcipher_decrypt, | ||
1647 | .geniv = "eseqiv", | ||
1648 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1649 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1650 | .ivsize = AES_BLOCK_SIZE, | ||
1651 | } | ||
1652 | }, | ||
1653 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
1654 | DESC_HDR_SEL0_AESU | | ||
1655 | DESC_HDR_MODE0_AESU_CBC, | ||
1656 | }, | ||
1657 | { | ||
1658 | .alg = { | ||
1659 | .cra_name = "cbc(des3_ede)", | ||
1660 | .cra_driver_name = "cbc-3des-talitos", | ||
1661 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1662 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1663 | CRYPTO_ALG_ASYNC, | ||
1664 | .cra_type = &crypto_ablkcipher_type, | ||
1665 | .cra_ablkcipher = { | ||
1666 | .setkey = ablkcipher_setkey, | ||
1667 | .encrypt = ablkcipher_encrypt, | ||
1668 | .decrypt = ablkcipher_decrypt, | ||
1669 | .geniv = "eseqiv", | ||
1670 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
1671 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
1672 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1673 | } | ||
1674 | }, | ||
1675 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
1676 | DESC_HDR_SEL0_DEU | | ||
1677 | DESC_HDR_MODE0_DEU_CBC | | ||
1678 | DESC_HDR_MODE0_DEU_3DES, | ||
1352 | } | 1679 | } |
1353 | }; | 1680 | }; |
1354 | 1681 | ||
@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg { | |||
1362 | static int talitos_cra_init(struct crypto_tfm *tfm) | 1689 | static int talitos_cra_init(struct crypto_tfm *tfm) |
1363 | { | 1690 | { |
1364 | struct crypto_alg *alg = tfm->__crt_alg; | 1691 | struct crypto_alg *alg = tfm->__crt_alg; |
1365 | struct talitos_crypto_alg *talitos_alg = | 1692 | struct talitos_crypto_alg *talitos_alg; |
1366 | container_of(alg, struct talitos_crypto_alg, crypto_alg); | ||
1367 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 1693 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
1368 | 1694 | ||
1695 | talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); | ||
1696 | |||
1369 | /* update context with ptr to dev */ | 1697 | /* update context with ptr to dev */ |
1370 | ctx->dev = talitos_alg->dev; | 1698 | ctx->dev = talitos_alg->dev; |
1699 | |||
1371 | /* copy descriptor header template value */ | 1700 | /* copy descriptor header template value */ |
1372 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; | 1701 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; |
1373 | 1702 | ||
@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1453 | return ERR_PTR(-ENOMEM); | 1782 | return ERR_PTR(-ENOMEM); |
1454 | 1783 | ||
1455 | alg = &t_alg->crypto_alg; | 1784 | alg = &t_alg->crypto_alg; |
1785 | *alg = template->alg; | ||
1456 | 1786 | ||
1457 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | ||
1458 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1459 | template->driver_name); | ||
1460 | alg->cra_module = THIS_MODULE; | 1787 | alg->cra_module = THIS_MODULE; |
1461 | alg->cra_init = talitos_cra_init; | 1788 | alg->cra_init = talitos_cra_init; |
1462 | alg->cra_priority = TALITOS_CRA_PRIORITY; | 1789 | alg->cra_priority = TALITOS_CRA_PRIORITY; |
1463 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1464 | alg->cra_blocksize = template->blocksize; | ||
1465 | alg->cra_alignmask = 0; | 1790 | alg->cra_alignmask = 0; |
1466 | alg->cra_type = &crypto_aead_type; | ||
1467 | alg->cra_ctxsize = sizeof(struct talitos_ctx); | 1791 | alg->cra_ctxsize = sizeof(struct talitos_ctx); |
1468 | alg->cra_u.aead = template->aead; | ||
1469 | 1792 | ||
1470 | t_alg->desc_hdr_template = template->desc_hdr_template; | 1793 | t_alg->desc_hdr_template = template->desc_hdr_template; |
1471 | t_alg->dev = dev; | 1794 | t_alg->dev = dev; |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f1c6ca7e2852..c8460fa9cfac 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -298,7 +298,7 @@ config I2C_BLACKFIN_TWI | |||
298 | config I2C_BLACKFIN_TWI_CLK_KHZ | 298 | config I2C_BLACKFIN_TWI_CLK_KHZ |
299 | int "Blackfin TWI I2C clock (kHz)" | 299 | int "Blackfin TWI I2C clock (kHz)" |
300 | depends on I2C_BLACKFIN_TWI | 300 | depends on I2C_BLACKFIN_TWI |
301 | range 10 400 | 301 | range 21 400 |
302 | default 50 | 302 | default 50 |
303 | help | 303 | help |
304 | The unit of the TWI clock is kHz. | 304 | The unit of the TWI clock is kHz. |
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index fc548b3d002e..26d8987e69bf 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c | |||
@@ -104,9 +104,14 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) | |||
104 | write_MASTER_CTL(iface, | 104 | write_MASTER_CTL(iface, |
105 | read_MASTER_CTL(iface) | STOP); | 105 | read_MASTER_CTL(iface) | STOP); |
106 | else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && | 106 | else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && |
107 | iface->cur_msg+1 < iface->msg_num) | 107 | iface->cur_msg + 1 < iface->msg_num) { |
108 | write_MASTER_CTL(iface, | 108 | if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) |
109 | read_MASTER_CTL(iface) | RSTART); | 109 | write_MASTER_CTL(iface, |
110 | read_MASTER_CTL(iface) | RSTART | MDIR); | ||
111 | else | ||
112 | write_MASTER_CTL(iface, | ||
113 | (read_MASTER_CTL(iface) | RSTART) & ~MDIR); | ||
114 | } | ||
110 | SSYNC(); | 115 | SSYNC(); |
111 | /* Clear status */ | 116 | /* Clear status */ |
112 | write_INT_STAT(iface, XMTSERV); | 117 | write_INT_STAT(iface, XMTSERV); |
@@ -134,9 +139,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) | |||
134 | read_MASTER_CTL(iface) | STOP); | 139 | read_MASTER_CTL(iface) | STOP); |
135 | SSYNC(); | 140 | SSYNC(); |
136 | } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && | 141 | } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && |
137 | iface->cur_msg+1 < iface->msg_num) { | 142 | iface->cur_msg + 1 < iface->msg_num) { |
138 | write_MASTER_CTL(iface, | 143 | if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) |
139 | read_MASTER_CTL(iface) | RSTART); | 144 | write_MASTER_CTL(iface, |
145 | read_MASTER_CTL(iface) | RSTART | MDIR); | ||
146 | else | ||
147 | write_MASTER_CTL(iface, | ||
148 | (read_MASTER_CTL(iface) | RSTART) & ~MDIR); | ||
140 | SSYNC(); | 149 | SSYNC(); |
141 | } | 150 | } |
142 | /* Clear interrupt source */ | 151 | /* Clear interrupt source */ |
@@ -196,8 +205,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) | |||
196 | /* remove restart bit and enable master receive */ | 205 | /* remove restart bit and enable master receive */ |
197 | write_MASTER_CTL(iface, | 206 | write_MASTER_CTL(iface, |
198 | read_MASTER_CTL(iface) & ~RSTART); | 207 | read_MASTER_CTL(iface) & ~RSTART); |
199 | write_MASTER_CTL(iface, | ||
200 | read_MASTER_CTL(iface) | MEN | MDIR); | ||
201 | SSYNC(); | 208 | SSYNC(); |
202 | } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && | 209 | } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && |
203 | iface->cur_msg+1 < iface->msg_num) { | 210 | iface->cur_msg+1 < iface->msg_num) { |
@@ -222,18 +229,19 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) | |||
222 | } | 229 | } |
223 | 230 | ||
224 | if (iface->pmsg[iface->cur_msg].len <= 255) | 231 | if (iface->pmsg[iface->cur_msg].len <= 255) |
225 | write_MASTER_CTL(iface, | 232 | write_MASTER_CTL(iface, |
226 | iface->pmsg[iface->cur_msg].len << 6); | 233 | (read_MASTER_CTL(iface) & |
234 | (~(0xff << 6))) | | ||
235 | (iface->pmsg[iface->cur_msg].len << 6)); | ||
227 | else { | 236 | else { |
228 | write_MASTER_CTL(iface, 0xff << 6); | 237 | write_MASTER_CTL(iface, |
238 | (read_MASTER_CTL(iface) | | ||
239 | (0xff << 6))); | ||
229 | iface->manual_stop = 1; | 240 | iface->manual_stop = 1; |
230 | } | 241 | } |
231 | /* remove restart bit and enable master receive */ | 242 | /* remove restart bit and enable master receive */ |
232 | write_MASTER_CTL(iface, | 243 | write_MASTER_CTL(iface, |
233 | read_MASTER_CTL(iface) & ~RSTART); | 244 | read_MASTER_CTL(iface) & ~RSTART); |
234 | write_MASTER_CTL(iface, read_MASTER_CTL(iface) | | ||
235 | MEN | ((iface->read_write == I2C_SMBUS_READ) ? | ||
236 | MDIR : 0)); | ||
237 | SSYNC(); | 245 | SSYNC(); |
238 | } else { | 246 | } else { |
239 | iface->result = 1; | 247 | iface->result = 1; |
@@ -441,6 +449,16 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr, | |||
441 | } | 449 | } |
442 | iface->transPtr = data->block; | 450 | iface->transPtr = data->block; |
443 | break; | 451 | break; |
452 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
453 | if (read_write == I2C_SMBUS_READ) { | ||
454 | iface->readNum = data->block[0]; | ||
455 | iface->cur_mode = TWI_I2C_MODE_COMBINED; | ||
456 | } else { | ||
457 | iface->writeNum = data->block[0]; | ||
458 | iface->cur_mode = TWI_I2C_MODE_STANDARDSUB; | ||
459 | } | ||
460 | iface->transPtr = (u8 *)&data->block[1]; | ||
461 | break; | ||
444 | default: | 462 | default: |
445 | return -1; | 463 | return -1; |
446 | } | 464 | } |
@@ -564,7 +582,7 @@ static u32 bfin_twi_functionality(struct i2c_adapter *adap) | |||
564 | return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | | 582 | return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | |
565 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | | 583 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | |
566 | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | | 584 | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | |
567 | I2C_FUNC_I2C; | 585 | I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK; |
568 | } | 586 | } |
569 | 587 | ||
570 | static struct i2c_algorithm bfin_twi_algorithm = { | 588 | static struct i2c_algorithm bfin_twi_algorithm = { |
@@ -614,6 +632,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) | |||
614 | struct i2c_adapter *p_adap; | 632 | struct i2c_adapter *p_adap; |
615 | struct resource *res; | 633 | struct resource *res; |
616 | int rc; | 634 | int rc; |
635 | unsigned int clkhilow; | ||
617 | 636 | ||
618 | iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL); | 637 | iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL); |
619 | if (!iface) { | 638 | if (!iface) { |
@@ -675,10 +694,14 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) | |||
675 | /* Set TWI internal clock as 10MHz */ | 694 | /* Set TWI internal clock as 10MHz */ |
676 | write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); | 695 | write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); |
677 | 696 | ||
697 | /* | ||
698 | * We will not end up with a CLKDIV=0 because no one will specify | ||
699 | * 20kHz SCL or less in Kconfig now. (5 * 1024 / 20 = 0x100) | ||
700 | */ | ||
701 | clkhilow = 5 * 1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ; | ||
702 | |||
678 | /* Set Twi interface clock as specified */ | 703 | /* Set Twi interface clock as specified */ |
679 | write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) | 704 | write_CLKDIV(iface, (clkhilow << 8) | clkhilow); |
680 | << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) | ||
681 | & 0xFF)); | ||
682 | 705 | ||
683 | /* Enable TWI */ | 706 | /* Enable TWI */ |
684 | write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); | 707 | write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index e5193bf75483..3542c6ba98f1 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -216,6 +216,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) | |||
216 | struct ocores_i2c_platform_data *pdata; | 216 | struct ocores_i2c_platform_data *pdata; |
217 | struct resource *res, *res2; | 217 | struct resource *res, *res2; |
218 | int ret; | 218 | int ret; |
219 | int i; | ||
219 | 220 | ||
220 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 221 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
221 | if (!res) | 222 | if (!res) |
@@ -271,6 +272,10 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) | |||
271 | goto add_adapter_failed; | 272 | goto add_adapter_failed; |
272 | } | 273 | } |
273 | 274 | ||
275 | /* add in known devices to the bus */ | ||
276 | for (i = 0; i < pdata->num_devices; i++) | ||
277 | i2c_new_device(&i2c->adap, pdata->devices + i); | ||
278 | |||
274 | return 0; | 279 | return 0; |
275 | 280 | ||
276 | add_adapter_failed: | 281 | add_adapter_failed: |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index ece0125a1ee5..c73475dd0fba 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -333,8 +333,18 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) | |||
333 | 333 | ||
334 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | 334 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { |
335 | 335 | ||
336 | /* HSI2C controller internal clk rate should be 19.2 Mhz */ | 336 | /* |
337 | internal_clk = 19200; | 337 | * HSI2C controller internal clk rate should be 19.2 Mhz for |
338 | * HS and for all modes on 2430. On 34xx we can use lower rate | ||
339 | * to get longer filter period for better noise suppression. | ||
340 | * The filter is iclk (fclk for HS) period. | ||
341 | */ | ||
342 | if (dev->speed > 400 || cpu_is_omap_2430()) | ||
343 | internal_clk = 19200; | ||
344 | else if (dev->speed > 100) | ||
345 | internal_clk = 9600; | ||
346 | else | ||
347 | internal_clk = 4000; | ||
338 | fclk_rate = clk_get_rate(dev->fclk) / 1000; | 348 | fclk_rate = clk_get_rate(dev->fclk) / 1000; |
339 | 349 | ||
340 | /* Compute prescaler divisor */ | 350 | /* Compute prescaler divisor */ |
@@ -343,17 +353,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) | |||
343 | 353 | ||
344 | /* If configured for High Speed */ | 354 | /* If configured for High Speed */ |
345 | if (dev->speed > 400) { | 355 | if (dev->speed > 400) { |
356 | unsigned long scl; | ||
357 | |||
346 | /* For first phase of HS mode */ | 358 | /* For first phase of HS mode */ |
347 | fsscll = internal_clk / (400 * 2) - 6; | 359 | scl = internal_clk / 400; |
348 | fssclh = internal_clk / (400 * 2) - 6; | 360 | fsscll = scl - (scl / 3) - 7; |
361 | fssclh = (scl / 3) - 5; | ||
349 | 362 | ||
350 | /* For second phase of HS mode */ | 363 | /* For second phase of HS mode */ |
351 | hsscll = fclk_rate / (dev->speed * 2) - 6; | 364 | scl = fclk_rate / dev->speed; |
352 | hssclh = fclk_rate / (dev->speed * 2) - 6; | 365 | hsscll = scl - (scl / 3) - 7; |
366 | hssclh = (scl / 3) - 5; | ||
367 | } else if (dev->speed > 100) { | ||
368 | unsigned long scl; | ||
369 | |||
370 | /* Fast mode */ | ||
371 | scl = internal_clk / dev->speed; | ||
372 | fsscll = scl - (scl / 3) - 7; | ||
373 | fssclh = (scl / 3) - 5; | ||
353 | } else { | 374 | } else { |
354 | /* To handle F/S modes */ | 375 | /* Standard mode */ |
355 | fsscll = internal_clk / (dev->speed * 2) - 6; | 376 | fsscll = internal_clk / (dev->speed * 2) - 7; |
356 | fssclh = internal_clk / (dev->speed * 2) - 6; | 377 | fssclh = internal_clk / (dev->speed * 2) - 5; |
357 | } | 378 | } |
358 | scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; | 379 | scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; |
359 | sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; | 380 | sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 1691ef0f1ee1..079a312d36fd 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -51,6 +51,11 @@ enum s3c24xx_i2c_state { | |||
51 | STATE_STOP | 51 | STATE_STOP |
52 | }; | 52 | }; |
53 | 53 | ||
54 | enum s3c24xx_i2c_type { | ||
55 | TYPE_S3C2410, | ||
56 | TYPE_S3C2440, | ||
57 | }; | ||
58 | |||
54 | struct s3c24xx_i2c { | 59 | struct s3c24xx_i2c { |
55 | spinlock_t lock; | 60 | spinlock_t lock; |
56 | wait_queue_head_t wait; | 61 | wait_queue_head_t wait; |
@@ -88,8 +93,10 @@ struct s3c24xx_i2c { | |||
88 | static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c) | 93 | static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c) |
89 | { | 94 | { |
90 | struct platform_device *pdev = to_platform_device(i2c->dev); | 95 | struct platform_device *pdev = to_platform_device(i2c->dev); |
96 | enum s3c24xx_i2c_type type; | ||
91 | 97 | ||
92 | return !strcmp(pdev->name, "s3c2440-i2c"); | 98 | type = platform_get_device_id(pdev)->driver_data; |
99 | return type == TYPE_S3C2440; | ||
93 | } | 100 | } |
94 | 101 | ||
95 | /* s3c24xx_i2c_master_complete | 102 | /* s3c24xx_i2c_master_complete |
@@ -969,52 +976,41 @@ static int s3c24xx_i2c_resume(struct platform_device *dev) | |||
969 | 976 | ||
970 | /* device driver for platform bus bits */ | 977 | /* device driver for platform bus bits */ |
971 | 978 | ||
972 | static struct platform_driver s3c2410_i2c_driver = { | 979 | static struct platform_device_id s3c24xx_driver_ids[] = { |
973 | .probe = s3c24xx_i2c_probe, | 980 | { |
974 | .remove = s3c24xx_i2c_remove, | 981 | .name = "s3c2410-i2c", |
975 | .suspend_late = s3c24xx_i2c_suspend_late, | 982 | .driver_data = TYPE_S3C2410, |
976 | .resume = s3c24xx_i2c_resume, | 983 | }, { |
977 | .driver = { | 984 | .name = "s3c2440-i2c", |
978 | .owner = THIS_MODULE, | 985 | .driver_data = TYPE_S3C2440, |
979 | .name = "s3c2410-i2c", | 986 | }, { }, |
980 | }, | ||
981 | }; | 987 | }; |
988 | MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); | ||
982 | 989 | ||
983 | static struct platform_driver s3c2440_i2c_driver = { | 990 | static struct platform_driver s3c24xx_i2c_driver = { |
984 | .probe = s3c24xx_i2c_probe, | 991 | .probe = s3c24xx_i2c_probe, |
985 | .remove = s3c24xx_i2c_remove, | 992 | .remove = s3c24xx_i2c_remove, |
986 | .suspend_late = s3c24xx_i2c_suspend_late, | 993 | .suspend_late = s3c24xx_i2c_suspend_late, |
987 | .resume = s3c24xx_i2c_resume, | 994 | .resume = s3c24xx_i2c_resume, |
995 | .id_table = s3c24xx_driver_ids, | ||
988 | .driver = { | 996 | .driver = { |
989 | .owner = THIS_MODULE, | 997 | .owner = THIS_MODULE, |
990 | .name = "s3c2440-i2c", | 998 | .name = "s3c-i2c", |
991 | }, | 999 | }, |
992 | }; | 1000 | }; |
993 | 1001 | ||
994 | static int __init i2c_adap_s3c_init(void) | 1002 | static int __init i2c_adap_s3c_init(void) |
995 | { | 1003 | { |
996 | int ret; | 1004 | return platform_driver_register(&s3c24xx_i2c_driver); |
997 | |||
998 | ret = platform_driver_register(&s3c2410_i2c_driver); | ||
999 | if (ret == 0) { | ||
1000 | ret = platform_driver_register(&s3c2440_i2c_driver); | ||
1001 | if (ret) | ||
1002 | platform_driver_unregister(&s3c2410_i2c_driver); | ||
1003 | } | ||
1004 | |||
1005 | return ret; | ||
1006 | } | 1005 | } |
1007 | subsys_initcall(i2c_adap_s3c_init); | 1006 | subsys_initcall(i2c_adap_s3c_init); |
1008 | 1007 | ||
1009 | static void __exit i2c_adap_s3c_exit(void) | 1008 | static void __exit i2c_adap_s3c_exit(void) |
1010 | { | 1009 | { |
1011 | platform_driver_unregister(&s3c2410_i2c_driver); | 1010 | platform_driver_unregister(&s3c24xx_i2c_driver); |
1012 | platform_driver_unregister(&s3c2440_i2c_driver); | ||
1013 | } | 1011 | } |
1014 | module_exit(i2c_adap_s3c_exit); | 1012 | module_exit(i2c_adap_s3c_exit); |
1015 | 1013 | ||
1016 | MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); | 1014 | MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); |
1017 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); | 1015 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); |
1018 | MODULE_LICENSE("GPL"); | 1016 | MODULE_LICENSE("GPL"); |
1019 | MODULE_ALIAS("platform:s3c2410-i2c"); | ||
1020 | MODULE_ALIAS("platform:s3c2440-i2c"); | ||
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index cf6a100bb38f..7b603e4b41db 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -177,6 +177,7 @@ struct atmel_mci { | |||
177 | * available. | 177 | * available. |
178 | * @wp_pin: GPIO pin used for card write protect sending, or negative | 178 | * @wp_pin: GPIO pin used for card write protect sending, or negative |
179 | * if not available. | 179 | * if not available. |
180 | * @detect_is_active_high: The state of the detect pin when it is active. | ||
180 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. | 181 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. |
181 | */ | 182 | */ |
182 | struct atmel_mci_slot { | 183 | struct atmel_mci_slot { |
@@ -196,6 +197,7 @@ struct atmel_mci_slot { | |||
196 | 197 | ||
197 | int detect_pin; | 198 | int detect_pin; |
198 | int wp_pin; | 199 | int wp_pin; |
200 | bool detect_is_active_high; | ||
199 | 201 | ||
200 | struct timer_list detect_timer; | 202 | struct timer_list detect_timer; |
201 | }; | 203 | }; |
@@ -924,7 +926,8 @@ static int atmci_get_cd(struct mmc_host *mmc) | |||
924 | struct atmel_mci_slot *slot = mmc_priv(mmc); | 926 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
925 | 927 | ||
926 | if (gpio_is_valid(slot->detect_pin)) { | 928 | if (gpio_is_valid(slot->detect_pin)) { |
927 | present = !gpio_get_value(slot->detect_pin); | 929 | present = !(gpio_get_value(slot->detect_pin) ^ |
930 | slot->detect_is_active_high); | ||
928 | dev_dbg(&mmc->class_dev, "card is %spresent\n", | 931 | dev_dbg(&mmc->class_dev, "card is %spresent\n", |
929 | present ? "" : "not "); | 932 | present ? "" : "not "); |
930 | } | 933 | } |
@@ -1028,7 +1031,8 @@ static void atmci_detect_change(unsigned long data) | |||
1028 | return; | 1031 | return; |
1029 | 1032 | ||
1030 | enable_irq(gpio_to_irq(slot->detect_pin)); | 1033 | enable_irq(gpio_to_irq(slot->detect_pin)); |
1031 | present = !gpio_get_value(slot->detect_pin); | 1034 | present = !(gpio_get_value(slot->detect_pin) ^ |
1035 | slot->detect_is_active_high); | ||
1032 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1036 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1033 | 1037 | ||
1034 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", | 1038 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", |
@@ -1456,6 +1460,7 @@ static int __init atmci_init_slot(struct atmel_mci *host, | |||
1456 | slot->host = host; | 1460 | slot->host = host; |
1457 | slot->detect_pin = slot_data->detect_pin; | 1461 | slot->detect_pin = slot_data->detect_pin; |
1458 | slot->wp_pin = slot_data->wp_pin; | 1462 | slot->wp_pin = slot_data->wp_pin; |
1463 | slot->detect_is_active_high = slot_data->detect_is_active_high; | ||
1459 | slot->sdc_reg = sdc_reg; | 1464 | slot->sdc_reg = sdc_reg; |
1460 | 1465 | ||
1461 | mmc->ops = &atmci_ops; | 1466 | mmc->ops = &atmci_ops; |
@@ -1477,7 +1482,8 @@ static int __init atmci_init_slot(struct atmel_mci *host, | |||
1477 | if (gpio_request(slot->detect_pin, "mmc_detect")) { | 1482 | if (gpio_request(slot->detect_pin, "mmc_detect")) { |
1478 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); | 1483 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); |
1479 | slot->detect_pin = -EBUSY; | 1484 | slot->detect_pin = -EBUSY; |
1480 | } else if (gpio_get_value(slot->detect_pin)) { | 1485 | } else if (gpio_get_value(slot->detect_pin) ^ |
1486 | slot->detect_is_active_high) { | ||
1481 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1487 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1482 | } | 1488 | } |
1483 | } | 1489 | } |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 8d740376bbd2..44f77eb1180f 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ethtool.h> | 26 | #include <linux/ethtool.h> |
27 | #include <linux/if_vlan.h> | 27 | #include <linux/if_vlan.h> |
28 | #include <linux/module.h> | ||
29 | |||
28 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 30 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
29 | #define BCM_VLAN 1 | 31 | #define BCM_VLAN 1 |
30 | #endif | 32 | #endif |
@@ -1454,6 +1456,7 @@ static inline u16 cnic_get_vlan(struct net_device *dev, | |||
1454 | static int cnic_get_v4_route(struct sockaddr_in *dst_addr, | 1456 | static int cnic_get_v4_route(struct sockaddr_in *dst_addr, |
1455 | struct dst_entry **dst) | 1457 | struct dst_entry **dst) |
1456 | { | 1458 | { |
1459 | #if defined(CONFIG_INET) | ||
1457 | struct flowi fl; | 1460 | struct flowi fl; |
1458 | int err; | 1461 | int err; |
1459 | struct rtable *rt; | 1462 | struct rtable *rt; |
@@ -1465,12 +1468,15 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, | |||
1465 | if (!err) | 1468 | if (!err) |
1466 | *dst = &rt->u.dst; | 1469 | *dst = &rt->u.dst; |
1467 | return err; | 1470 | return err; |
1471 | #else | ||
1472 | return -ENETUNREACH; | ||
1473 | #endif | ||
1468 | } | 1474 | } |
1469 | 1475 | ||
1470 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, | 1476 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, |
1471 | struct dst_entry **dst) | 1477 | struct dst_entry **dst) |
1472 | { | 1478 | { |
1473 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1479 | #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) |
1474 | struct flowi fl; | 1480 | struct flowi fl; |
1475 | 1481 | ||
1476 | memset(&fl, 0, sizeof(fl)); | 1482 | memset(&fl, 0, sizeof(fl)); |
@@ -1550,7 +1556,7 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) | |||
1550 | clear_bit(SK_F_IPV6, &csk->flags); | 1556 | clear_bit(SK_F_IPV6, &csk->flags); |
1551 | 1557 | ||
1552 | if (is_v6) { | 1558 | if (is_v6) { |
1553 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1559 | #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) |
1554 | set_bit(SK_F_IPV6, &csk->flags); | 1560 | set_bit(SK_F_IPV6, &csk->flags); |
1555 | err = cnic_get_v6_route(&saddr->remote.v6, &dst); | 1561 | err = cnic_get_v6_route(&saddr->remote.v6, &dst); |
1556 | if (err) | 1562 | if (err) |
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 29228f5899cd..480f28127f09 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig | |||
@@ -39,6 +39,7 @@ config XFS_QUOTA | |||
39 | config XFS_POSIX_ACL | 39 | config XFS_POSIX_ACL |
40 | bool "XFS POSIX ACL support" | 40 | bool "XFS POSIX ACL support" |
41 | depends on XFS_FS | 41 | depends on XFS_FS |
42 | select FS_POSIX_ACL | ||
42 | help | 43 | help |
43 | POSIX Access Control Lists (ACLs) support permissions for users and | 44 | POSIX Access Control Lists (ACLs) support permissions for users and |
44 | groups beyond the owner/group/world scheme. | 45 | groups beyond the owner/group/world scheme. |
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 60f107e47fe9..7a59daed1782 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile | |||
@@ -40,7 +40,7 @@ xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o | |||
40 | endif | 40 | endif |
41 | 41 | ||
42 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o | 42 | xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o |
43 | xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o | 43 | xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o |
44 | xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o | 44 | xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o |
45 | xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o | 45 | xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o |
46 | xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o | 46 | xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o |
@@ -88,8 +88,7 @@ xfs-y += xfs_alloc.o \ | |||
88 | xfs_utils.o \ | 88 | xfs_utils.o \ |
89 | xfs_vnodeops.o \ | 89 | xfs_vnodeops.o \ |
90 | xfs_rw.o \ | 90 | xfs_rw.o \ |
91 | xfs_dmops.o \ | 91 | xfs_dmops.o |
92 | xfs_qmops.o | ||
93 | 92 | ||
94 | xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ | 93 | xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ |
95 | xfs_dir2_trace.o | 94 | xfs_dir2_trace.o |
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c new file mode 100644 index 000000000000..1e9d1246eebc --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_acl.c | |||
@@ -0,0 +1,523 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, Christoph Hellwig | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_acl.h" | ||
20 | #include "xfs_attr.h" | ||
21 | #include "xfs_bmap_btree.h" | ||
22 | #include "xfs_inode.h" | ||
23 | #include "xfs_vnodeops.h" | ||
24 | #include <linux/xattr.h> | ||
25 | #include <linux/posix_acl_xattr.h> | ||
26 | |||
27 | |||
28 | #define XFS_ACL_NOT_CACHED ((void *)-1) | ||
29 | |||
30 | /* | ||
31 | * Locking scheme: | ||
32 | * - all ACL updates are protected by inode->i_mutex, which is taken before | ||
33 | * calling into this file. | ||
34 | * - access and updates to the ip->i_acl and ip->i_default_acl pointers are | ||
35 | * protected by inode->i_lock. | ||
36 | */ | ||
37 | |||
38 | STATIC struct posix_acl * | ||
39 | xfs_acl_from_disk(struct xfs_acl *aclp) | ||
40 | { | ||
41 | struct posix_acl_entry *acl_e; | ||
42 | struct posix_acl *acl; | ||
43 | struct xfs_acl_entry *ace; | ||
44 | int count, i; | ||
45 | |||
46 | count = be32_to_cpu(aclp->acl_cnt); | ||
47 | |||
48 | acl = posix_acl_alloc(count, GFP_KERNEL); | ||
49 | if (!acl) | ||
50 | return ERR_PTR(-ENOMEM); | ||
51 | |||
52 | for (i = 0; i < count; i++) { | ||
53 | acl_e = &acl->a_entries[i]; | ||
54 | ace = &aclp->acl_entry[i]; | ||
55 | |||
56 | /* | ||
57 | * The tag is 32 bits on disk and 16 bits in core. | ||
58 | * | ||
59 | * Because every access to it goes through the core | ||
60 | * format first this is not a problem. | ||
61 | */ | ||
62 | acl_e->e_tag = be32_to_cpu(ace->ae_tag); | ||
63 | acl_e->e_perm = be16_to_cpu(ace->ae_perm); | ||
64 | |||
65 | switch (acl_e->e_tag) { | ||
66 | case ACL_USER: | ||
67 | case ACL_GROUP: | ||
68 | acl_e->e_id = be32_to_cpu(ace->ae_id); | ||
69 | break; | ||
70 | case ACL_USER_OBJ: | ||
71 | case ACL_GROUP_OBJ: | ||
72 | case ACL_MASK: | ||
73 | case ACL_OTHER: | ||
74 | acl_e->e_id = ACL_UNDEFINED_ID; | ||
75 | break; | ||
76 | default: | ||
77 | goto fail; | ||
78 | } | ||
79 | } | ||
80 | return acl; | ||
81 | |||
82 | fail: | ||
83 | posix_acl_release(acl); | ||
84 | return ERR_PTR(-EINVAL); | ||
85 | } | ||
86 | |||
87 | STATIC void | ||
88 | xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) | ||
89 | { | ||
90 | const struct posix_acl_entry *acl_e; | ||
91 | struct xfs_acl_entry *ace; | ||
92 | int i; | ||
93 | |||
94 | aclp->acl_cnt = cpu_to_be32(acl->a_count); | ||
95 | for (i = 0; i < acl->a_count; i++) { | ||
96 | ace = &aclp->acl_entry[i]; | ||
97 | acl_e = &acl->a_entries[i]; | ||
98 | |||
99 | ace->ae_tag = cpu_to_be32(acl_e->e_tag); | ||
100 | ace->ae_id = cpu_to_be32(acl_e->e_id); | ||
101 | ace->ae_perm = cpu_to_be16(acl_e->e_perm); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Update the cached ACL pointer in the inode. | ||
107 | * | ||
108 | * Because we don't hold any locks while reading/writing the attribute | ||
109 | * from/to disk another thread could have raced and updated the cached | ||
110 | * ACL value before us. In that case we release the previous cached value | ||
111 | * and update it with our new value. | ||
112 | */ | ||
113 | STATIC void | ||
114 | xfs_update_cached_acl(struct inode *inode, struct posix_acl **p_acl, | ||
115 | struct posix_acl *acl) | ||
116 | { | ||
117 | spin_lock(&inode->i_lock); | ||
118 | if (*p_acl && *p_acl != XFS_ACL_NOT_CACHED) | ||
119 | posix_acl_release(*p_acl); | ||
120 | *p_acl = posix_acl_dup(acl); | ||
121 | spin_unlock(&inode->i_lock); | ||
122 | } | ||
123 | |||
124 | struct posix_acl * | ||
125 | xfs_get_acl(struct inode *inode, int type) | ||
126 | { | ||
127 | struct xfs_inode *ip = XFS_I(inode); | ||
128 | struct posix_acl *acl = NULL, **p_acl; | ||
129 | struct xfs_acl *xfs_acl; | ||
130 | int len = sizeof(struct xfs_acl); | ||
131 | char *ea_name; | ||
132 | int error; | ||
133 | |||
134 | switch (type) { | ||
135 | case ACL_TYPE_ACCESS: | ||
136 | ea_name = SGI_ACL_FILE; | ||
137 | p_acl = &ip->i_acl; | ||
138 | break; | ||
139 | case ACL_TYPE_DEFAULT: | ||
140 | ea_name = SGI_ACL_DEFAULT; | ||
141 | p_acl = &ip->i_default_acl; | ||
142 | break; | ||
143 | default: | ||
144 | return ERR_PTR(-EINVAL); | ||
145 | } | ||
146 | |||
147 | spin_lock(&inode->i_lock); | ||
148 | if (*p_acl != XFS_ACL_NOT_CACHED) | ||
149 | acl = posix_acl_dup(*p_acl); | ||
150 | spin_unlock(&inode->i_lock); | ||
151 | |||
152 | /* | ||
153 | * If we have a cached ACLs value just return it, not need to | ||
154 | * go out to the disk. | ||
155 | */ | ||
156 | if (acl) | ||
157 | return acl; | ||
158 | |||
159 | xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); | ||
160 | if (!xfs_acl) | ||
161 | return ERR_PTR(-ENOMEM); | ||
162 | |||
163 | error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT); | ||
164 | if (error) { | ||
165 | /* | ||
166 | * If the attribute doesn't exist make sure we have a negative | ||
167 | * cache entry, for any other error assume it is transient and | ||
168 | * leave the cache entry as XFS_ACL_NOT_CACHED. | ||
169 | */ | ||
170 | if (error == -ENOATTR) { | ||
171 | acl = NULL; | ||
172 | goto out_update_cache; | ||
173 | } | ||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | acl = xfs_acl_from_disk(xfs_acl); | ||
178 | if (IS_ERR(acl)) | ||
179 | goto out; | ||
180 | |||
181 | out_update_cache: | ||
182 | xfs_update_cached_acl(inode, p_acl, acl); | ||
183 | out: | ||
184 | kfree(xfs_acl); | ||
185 | return acl; | ||
186 | } | ||
187 | |||
188 | STATIC int | ||
189 | xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) | ||
190 | { | ||
191 | struct xfs_inode *ip = XFS_I(inode); | ||
192 | struct posix_acl **p_acl; | ||
193 | char *ea_name; | ||
194 | int error; | ||
195 | |||
196 | if (S_ISLNK(inode->i_mode)) | ||
197 | return -EOPNOTSUPP; | ||
198 | |||
199 | switch (type) { | ||
200 | case ACL_TYPE_ACCESS: | ||
201 | ea_name = SGI_ACL_FILE; | ||
202 | p_acl = &ip->i_acl; | ||
203 | break; | ||
204 | case ACL_TYPE_DEFAULT: | ||
205 | if (!S_ISDIR(inode->i_mode)) | ||
206 | return acl ? -EACCES : 0; | ||
207 | ea_name = SGI_ACL_DEFAULT; | ||
208 | p_acl = &ip->i_default_acl; | ||
209 | break; | ||
210 | default: | ||
211 | return -EINVAL; | ||
212 | } | ||
213 | |||
214 | if (acl) { | ||
215 | struct xfs_acl *xfs_acl; | ||
216 | int len; | ||
217 | |||
218 | xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); | ||
219 | if (!xfs_acl) | ||
220 | return -ENOMEM; | ||
221 | |||
222 | xfs_acl_to_disk(xfs_acl, acl); | ||
223 | len = sizeof(struct xfs_acl) - | ||
224 | (sizeof(struct xfs_acl_entry) * | ||
225 | (XFS_ACL_MAX_ENTRIES - acl->a_count)); | ||
226 | |||
227 | error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl, | ||
228 | len, ATTR_ROOT); | ||
229 | |||
230 | kfree(xfs_acl); | ||
231 | } else { | ||
232 | /* | ||
233 | * A NULL ACL argument means we want to remove the ACL. | ||
234 | */ | ||
235 | error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); | ||
236 | |||
237 | /* | ||
238 | * If the attribute didn't exist to start with that's fine. | ||
239 | */ | ||
240 | if (error == -ENOATTR) | ||
241 | error = 0; | ||
242 | } | ||
243 | |||
244 | if (!error) | ||
245 | xfs_update_cached_acl(inode, p_acl, acl); | ||
246 | return error; | ||
247 | } | ||
248 | |||
249 | int | ||
250 | xfs_check_acl(struct inode *inode, int mask) | ||
251 | { | ||
252 | struct xfs_inode *ip = XFS_I(inode); | ||
253 | struct posix_acl *acl; | ||
254 | int error = -EAGAIN; | ||
255 | |||
256 | xfs_itrace_entry(ip); | ||
257 | |||
258 | /* | ||
259 | * If there is no attribute fork no ACL exists on this inode and | ||
260 | * we can skip the whole exercise. | ||
261 | */ | ||
262 | if (!XFS_IFORK_Q(ip)) | ||
263 | return -EAGAIN; | ||
264 | |||
265 | acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); | ||
266 | if (IS_ERR(acl)) | ||
267 | return PTR_ERR(acl); | ||
268 | if (acl) { | ||
269 | error = posix_acl_permission(inode, acl, mask); | ||
270 | posix_acl_release(acl); | ||
271 | } | ||
272 | |||
273 | return error; | ||
274 | } | ||
275 | |||
276 | static int | ||
277 | xfs_set_mode(struct inode *inode, mode_t mode) | ||
278 | { | ||
279 | int error = 0; | ||
280 | |||
281 | if (mode != inode->i_mode) { | ||
282 | struct iattr iattr; | ||
283 | |||
284 | iattr.ia_valid = ATTR_MODE; | ||
285 | iattr.ia_mode = mode; | ||
286 | |||
287 | error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL); | ||
288 | } | ||
289 | |||
290 | return error; | ||
291 | } | ||
292 | |||
293 | static int | ||
294 | xfs_acl_exists(struct inode *inode, char *name) | ||
295 | { | ||
296 | int len = sizeof(struct xfs_acl); | ||
297 | |||
298 | return (xfs_attr_get(XFS_I(inode), name, NULL, &len, | ||
299 | ATTR_ROOT|ATTR_KERNOVAL) == 0); | ||
300 | } | ||
301 | |||
302 | int | ||
303 | posix_acl_access_exists(struct inode *inode) | ||
304 | { | ||
305 | return xfs_acl_exists(inode, SGI_ACL_FILE); | ||
306 | } | ||
307 | |||
308 | int | ||
309 | posix_acl_default_exists(struct inode *inode) | ||
310 | { | ||
311 | if (!S_ISDIR(inode->i_mode)) | ||
312 | return 0; | ||
313 | return xfs_acl_exists(inode, SGI_ACL_DEFAULT); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * No need for i_mutex because the inode is not yet exposed to the VFS. | ||
318 | */ | ||
319 | int | ||
320 | xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl) | ||
321 | { | ||
322 | struct posix_acl *clone; | ||
323 | mode_t mode; | ||
324 | int error = 0, inherit = 0; | ||
325 | |||
326 | if (S_ISDIR(inode->i_mode)) { | ||
327 | error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl); | ||
328 | if (error) | ||
329 | return error; | ||
330 | } | ||
331 | |||
332 | clone = posix_acl_clone(default_acl, GFP_KERNEL); | ||
333 | if (!clone) | ||
334 | return -ENOMEM; | ||
335 | |||
336 | mode = inode->i_mode; | ||
337 | error = posix_acl_create_masq(clone, &mode); | ||
338 | if (error < 0) | ||
339 | goto out_release_clone; | ||
340 | |||
341 | /* | ||
342 | * If posix_acl_create_masq returns a positive value we need to | ||
343 | * inherit a permission that can't be represented using the Unix | ||
344 | * mode bits and we actually need to set an ACL. | ||
345 | */ | ||
346 | if (error > 0) | ||
347 | inherit = 1; | ||
348 | |||
349 | error = xfs_set_mode(inode, mode); | ||
350 | if (error) | ||
351 | goto out_release_clone; | ||
352 | |||
353 | if (inherit) | ||
354 | error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone); | ||
355 | |||
356 | out_release_clone: | ||
357 | posix_acl_release(clone); | ||
358 | return error; | ||
359 | } | ||
360 | |||
361 | int | ||
362 | xfs_acl_chmod(struct inode *inode) | ||
363 | { | ||
364 | struct posix_acl *acl, *clone; | ||
365 | int error; | ||
366 | |||
367 | if (S_ISLNK(inode->i_mode)) | ||
368 | return -EOPNOTSUPP; | ||
369 | |||
370 | acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); | ||
371 | if (IS_ERR(acl) || !acl) | ||
372 | return PTR_ERR(acl); | ||
373 | |||
374 | clone = posix_acl_clone(acl, GFP_KERNEL); | ||
375 | posix_acl_release(acl); | ||
376 | if (!clone) | ||
377 | return -ENOMEM; | ||
378 | |||
379 | error = posix_acl_chmod_masq(clone, inode->i_mode); | ||
380 | if (!error) | ||
381 | error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone); | ||
382 | |||
383 | posix_acl_release(clone); | ||
384 | return error; | ||
385 | } | ||
386 | |||
387 | void | ||
388 | xfs_inode_init_acls(struct xfs_inode *ip) | ||
389 | { | ||
390 | /* | ||
391 | * No need for locking, inode is not live yet. | ||
392 | */ | ||
393 | ip->i_acl = XFS_ACL_NOT_CACHED; | ||
394 | ip->i_default_acl = XFS_ACL_NOT_CACHED; | ||
395 | } | ||
396 | |||
397 | void | ||
398 | xfs_inode_clear_acls(struct xfs_inode *ip) | ||
399 | { | ||
400 | /* | ||
401 | * No need for locking here, the inode is not live anymore | ||
402 | * and just about to be freed. | ||
403 | */ | ||
404 | if (ip->i_acl != XFS_ACL_NOT_CACHED) | ||
405 | posix_acl_release(ip->i_acl); | ||
406 | if (ip->i_default_acl != XFS_ACL_NOT_CACHED) | ||
407 | posix_acl_release(ip->i_default_acl); | ||
408 | } | ||
409 | |||
410 | |||
411 | /* | ||
412 | * System xattr handlers. | ||
413 | * | ||
414 | * Currently Posix ACLs are the only system namespace extended attribute | ||
415 | * handlers supported by XFS, so we just implement the handlers here. | ||
416 | * If we ever support other system extended attributes this will need | ||
417 | * some refactoring. | ||
418 | */ | ||
419 | |||
420 | static int | ||
421 | xfs_decode_acl(const char *name) | ||
422 | { | ||
423 | if (strcmp(name, "posix_acl_access") == 0) | ||
424 | return ACL_TYPE_ACCESS; | ||
425 | else if (strcmp(name, "posix_acl_default") == 0) | ||
426 | return ACL_TYPE_DEFAULT; | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | |||
430 | static int | ||
431 | xfs_xattr_system_get(struct inode *inode, const char *name, | ||
432 | void *value, size_t size) | ||
433 | { | ||
434 | struct posix_acl *acl; | ||
435 | int type, error; | ||
436 | |||
437 | type = xfs_decode_acl(name); | ||
438 | if (type < 0) | ||
439 | return type; | ||
440 | |||
441 | acl = xfs_get_acl(inode, type); | ||
442 | if (IS_ERR(acl)) | ||
443 | return PTR_ERR(acl); | ||
444 | if (acl == NULL) | ||
445 | return -ENODATA; | ||
446 | |||
447 | error = posix_acl_to_xattr(acl, value, size); | ||
448 | posix_acl_release(acl); | ||
449 | |||
450 | return error; | ||
451 | } | ||
452 | |||
453 | static int | ||
454 | xfs_xattr_system_set(struct inode *inode, const char *name, | ||
455 | const void *value, size_t size, int flags) | ||
456 | { | ||
457 | struct posix_acl *acl = NULL; | ||
458 | int error = 0, type; | ||
459 | |||
460 | type = xfs_decode_acl(name); | ||
461 | if (type < 0) | ||
462 | return type; | ||
463 | if (flags & XATTR_CREATE) | ||
464 | return -EINVAL; | ||
465 | if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) | ||
466 | return value ? -EACCES : 0; | ||
467 | if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) | ||
468 | return -EPERM; | ||
469 | |||
470 | if (!value) | ||
471 | goto set_acl; | ||
472 | |||
473 | acl = posix_acl_from_xattr(value, size); | ||
474 | if (!acl) { | ||
475 | /* | ||
476 | * acl_set_file(3) may request that we set default ACLs with | ||
477 | * zero length -- defend (gracefully) against that here. | ||
478 | */ | ||
479 | goto out; | ||
480 | } | ||
481 | if (IS_ERR(acl)) { | ||
482 | error = PTR_ERR(acl); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | error = posix_acl_valid(acl); | ||
487 | if (error) | ||
488 | goto out_release; | ||
489 | |||
490 | error = -EINVAL; | ||
491 | if (acl->a_count > XFS_ACL_MAX_ENTRIES) | ||
492 | goto out_release; | ||
493 | |||
494 | if (type == ACL_TYPE_ACCESS) { | ||
495 | mode_t mode = inode->i_mode; | ||
496 | error = posix_acl_equiv_mode(acl, &mode); | ||
497 | |||
498 | if (error <= 0) { | ||
499 | posix_acl_release(acl); | ||
500 | acl = NULL; | ||
501 | |||
502 | if (error < 0) | ||
503 | return error; | ||
504 | } | ||
505 | |||
506 | error = xfs_set_mode(inode, mode); | ||
507 | if (error) | ||
508 | goto out_release; | ||
509 | } | ||
510 | |||
511 | set_acl: | ||
512 | error = xfs_set_acl(inode, type, acl); | ||
513 | out_release: | ||
514 | posix_acl_release(acl); | ||
515 | out: | ||
516 | return error; | ||
517 | } | ||
518 | |||
519 | struct xattr_handler xfs_xattr_system_handler = { | ||
520 | .prefix = XATTR_SYSTEM_PREFIX, | ||
521 | .get = xfs_xattr_system_get, | ||
522 | .set = xfs_xattr_system_set, | ||
523 | }; | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 34eaab608e6e..5bb523d7f37e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include "xfs_itable.h" | 41 | #include "xfs_itable.h" |
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_rw.h" | 43 | #include "xfs_rw.h" |
44 | #include "xfs_acl.h" | ||
45 | #include "xfs_attr.h" | 44 | #include "xfs_attr.h" |
46 | #include "xfs_bmap.h" | 45 | #include "xfs_bmap.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
@@ -899,7 +898,8 @@ xfs_ioctl_setattr( | |||
899 | struct xfs_mount *mp = ip->i_mount; | 898 | struct xfs_mount *mp = ip->i_mount; |
900 | struct xfs_trans *tp; | 899 | struct xfs_trans *tp; |
901 | unsigned int lock_flags = 0; | 900 | unsigned int lock_flags = 0; |
902 | struct xfs_dquot *udqp = NULL, *gdqp = NULL; | 901 | struct xfs_dquot *udqp = NULL; |
902 | struct xfs_dquot *gdqp = NULL; | ||
903 | struct xfs_dquot *olddquot = NULL; | 903 | struct xfs_dquot *olddquot = NULL; |
904 | int code; | 904 | int code; |
905 | 905 | ||
@@ -919,7 +919,7 @@ xfs_ioctl_setattr( | |||
919 | * because the i_*dquot fields will get updated anyway. | 919 | * because the i_*dquot fields will get updated anyway. |
920 | */ | 920 | */ |
921 | if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { | 921 | if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { |
922 | code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid, | 922 | code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, |
923 | ip->i_d.di_gid, fa->fsx_projid, | 923 | ip->i_d.di_gid, fa->fsx_projid, |
924 | XFS_QMOPT_PQUOTA, &udqp, &gdqp); | 924 | XFS_QMOPT_PQUOTA, &udqp, &gdqp); |
925 | if (code) | 925 | if (code) |
@@ -954,10 +954,11 @@ xfs_ioctl_setattr( | |||
954 | * Do a quota reservation only if projid is actually going to change. | 954 | * Do a quota reservation only if projid is actually going to change. |
955 | */ | 955 | */ |
956 | if (mask & FSX_PROJID) { | 956 | if (mask & FSX_PROJID) { |
957 | if (XFS_IS_PQUOTA_ON(mp) && | 957 | if (XFS_IS_QUOTA_RUNNING(mp) && |
958 | XFS_IS_PQUOTA_ON(mp) && | ||
958 | ip->i_d.di_projid != fa->fsx_projid) { | 959 | ip->i_d.di_projid != fa->fsx_projid) { |
959 | ASSERT(tp); | 960 | ASSERT(tp); |
960 | code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, | 961 | code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, |
961 | capable(CAP_FOWNER) ? | 962 | capable(CAP_FOWNER) ? |
962 | XFS_QMOPT_FORCE_RES : 0); | 963 | XFS_QMOPT_FORCE_RES : 0); |
963 | if (code) /* out of quota */ | 964 | if (code) /* out of quota */ |
@@ -1059,8 +1060,8 @@ xfs_ioctl_setattr( | |||
1059 | * in the transaction. | 1060 | * in the transaction. |
1060 | */ | 1061 | */ |
1061 | if (ip->i_d.di_projid != fa->fsx_projid) { | 1062 | if (ip->i_d.di_projid != fa->fsx_projid) { |
1062 | if (XFS_IS_PQUOTA_ON(mp)) { | 1063 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { |
1063 | olddquot = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 1064 | olddquot = xfs_qm_vop_chown(tp, ip, |
1064 | &ip->i_gdquot, gdqp); | 1065 | &ip->i_gdquot, gdqp); |
1065 | } | 1066 | } |
1066 | ip->i_d.di_projid = fa->fsx_projid; | 1067 | ip->i_d.di_projid = fa->fsx_projid; |
@@ -1106,9 +1107,9 @@ xfs_ioctl_setattr( | |||
1106 | /* | 1107 | /* |
1107 | * Release any dquot(s) the inode had kept before chown. | 1108 | * Release any dquot(s) the inode had kept before chown. |
1108 | */ | 1109 | */ |
1109 | XFS_QM_DQRELE(mp, olddquot); | 1110 | xfs_qm_dqrele(olddquot); |
1110 | XFS_QM_DQRELE(mp, udqp); | 1111 | xfs_qm_dqrele(udqp); |
1111 | XFS_QM_DQRELE(mp, gdqp); | 1112 | xfs_qm_dqrele(gdqp); |
1112 | 1113 | ||
1113 | if (code) | 1114 | if (code) |
1114 | return code; | 1115 | return code; |
@@ -1122,8 +1123,8 @@ xfs_ioctl_setattr( | |||
1122 | return 0; | 1123 | return 0; |
1123 | 1124 | ||
1124 | error_return: | 1125 | error_return: |
1125 | XFS_QM_DQRELE(mp, udqp); | 1126 | xfs_qm_dqrele(udqp); |
1126 | XFS_QM_DQRELE(mp, gdqp); | 1127 | xfs_qm_dqrele(gdqp); |
1127 | xfs_trans_cancel(tp, 0); | 1128 | xfs_trans_cancel(tp, 0); |
1128 | if (lock_flags) | 1129 | if (lock_flags) |
1129 | xfs_iunlock(ip, lock_flags); | 1130 | xfs_iunlock(ip, lock_flags); |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 6075382336d7..58973bb46038 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 19 | #include "xfs_fs.h" |
20 | #include "xfs_acl.h" | ||
20 | #include "xfs_bit.h" | 21 | #include "xfs_bit.h" |
21 | #include "xfs_log.h" | 22 | #include "xfs_log.h" |
22 | #include "xfs_inum.h" | 23 | #include "xfs_inum.h" |
@@ -51,6 +52,7 @@ | |||
51 | #include <linux/capability.h> | 52 | #include <linux/capability.h> |
52 | #include <linux/xattr.h> | 53 | #include <linux/xattr.h> |
53 | #include <linux/namei.h> | 54 | #include <linux/namei.h> |
55 | #include <linux/posix_acl.h> | ||
54 | #include <linux/security.h> | 56 | #include <linux/security.h> |
55 | #include <linux/falloc.h> | 57 | #include <linux/falloc.h> |
56 | #include <linux/fiemap.h> | 58 | #include <linux/fiemap.h> |
@@ -202,9 +204,8 @@ xfs_vn_mknod( | |||
202 | { | 204 | { |
203 | struct inode *inode; | 205 | struct inode *inode; |
204 | struct xfs_inode *ip = NULL; | 206 | struct xfs_inode *ip = NULL; |
205 | xfs_acl_t *default_acl = NULL; | 207 | struct posix_acl *default_acl = NULL; |
206 | struct xfs_name name; | 208 | struct xfs_name name; |
207 | int (*test_default_acl)(struct inode *) = _ACL_DEFAULT_EXISTS; | ||
208 | int error; | 209 | int error; |
209 | 210 | ||
210 | /* | 211 | /* |
@@ -219,18 +220,14 @@ xfs_vn_mknod( | |||
219 | rdev = 0; | 220 | rdev = 0; |
220 | } | 221 | } |
221 | 222 | ||
222 | if (test_default_acl && test_default_acl(dir)) { | 223 | if (IS_POSIXACL(dir)) { |
223 | if (!_ACL_ALLOC(default_acl)) { | 224 | default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); |
224 | return -ENOMEM; | 225 | if (IS_ERR(default_acl)) |
225 | } | 226 | return -PTR_ERR(default_acl); |
226 | if (!_ACL_GET_DEFAULT(dir, default_acl)) { | ||
227 | _ACL_FREE(default_acl); | ||
228 | default_acl = NULL; | ||
229 | } | ||
230 | } | ||
231 | 227 | ||
232 | if (IS_POSIXACL(dir) && !default_acl) | 228 | if (!default_acl) |
233 | mode &= ~current_umask(); | 229 | mode &= ~current_umask(); |
230 | } | ||
234 | 231 | ||
235 | xfs_dentry_to_name(&name, dentry); | 232 | xfs_dentry_to_name(&name, dentry); |
236 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); | 233 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); |
@@ -244,10 +241,10 @@ xfs_vn_mknod( | |||
244 | goto out_cleanup_inode; | 241 | goto out_cleanup_inode; |
245 | 242 | ||
246 | if (default_acl) { | 243 | if (default_acl) { |
247 | error = _ACL_INHERIT(inode, mode, default_acl); | 244 | error = -xfs_inherit_acl(inode, default_acl); |
248 | if (unlikely(error)) | 245 | if (unlikely(error)) |
249 | goto out_cleanup_inode; | 246 | goto out_cleanup_inode; |
250 | _ACL_FREE(default_acl); | 247 | posix_acl_release(default_acl); |
251 | } | 248 | } |
252 | 249 | ||
253 | 250 | ||
@@ -257,8 +254,7 @@ xfs_vn_mknod( | |||
257 | out_cleanup_inode: | 254 | out_cleanup_inode: |
258 | xfs_cleanup_inode(dir, inode, dentry); | 255 | xfs_cleanup_inode(dir, inode, dentry); |
259 | out_free_acl: | 256 | out_free_acl: |
260 | if (default_acl) | 257 | posix_acl_release(default_acl); |
261 | _ACL_FREE(default_acl); | ||
262 | return -error; | 258 | return -error; |
263 | } | 259 | } |
264 | 260 | ||
@@ -488,26 +484,6 @@ xfs_vn_put_link( | |||
488 | kfree(s); | 484 | kfree(s); |
489 | } | 485 | } |
490 | 486 | ||
491 | #ifdef CONFIG_XFS_POSIX_ACL | ||
492 | STATIC int | ||
493 | xfs_check_acl( | ||
494 | struct inode *inode, | ||
495 | int mask) | ||
496 | { | ||
497 | struct xfs_inode *ip = XFS_I(inode); | ||
498 | int error; | ||
499 | |||
500 | xfs_itrace_entry(ip); | ||
501 | |||
502 | if (XFS_IFORK_Q(ip)) { | ||
503 | error = xfs_acl_iaccess(ip, mask, NULL); | ||
504 | if (error != -1) | ||
505 | return -error; | ||
506 | } | ||
507 | |||
508 | return -EAGAIN; | ||
509 | } | ||
510 | |||
511 | STATIC int | 487 | STATIC int |
512 | xfs_vn_permission( | 488 | xfs_vn_permission( |
513 | struct inode *inode, | 489 | struct inode *inode, |
@@ -515,9 +491,6 @@ xfs_vn_permission( | |||
515 | { | 491 | { |
516 | return generic_permission(inode, mask, xfs_check_acl); | 492 | return generic_permission(inode, mask, xfs_check_acl); |
517 | } | 493 | } |
518 | #else | ||
519 | #define xfs_vn_permission NULL | ||
520 | #endif | ||
521 | 494 | ||
522 | STATIC int | 495 | STATIC int |
523 | xfs_vn_getattr( | 496 | xfs_vn_getattr( |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 9142192ccbe6..7078974a6eee 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_inode_item.h" | 46 | #include "xfs_inode_item.h" |
48 | #include "xfs_buf_item.h" | 47 | #include "xfs_buf_item.h" |
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index 94d9a633d3d9..cb6e2cca214f 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c | |||
@@ -50,9 +50,11 @@ xfs_fs_quota_sync( | |||
50 | { | 50 | { |
51 | struct xfs_mount *mp = XFS_M(sb); | 51 | struct xfs_mount *mp = XFS_M(sb); |
52 | 52 | ||
53 | if (sb->s_flags & MS_RDONLY) | ||
54 | return -EROFS; | ||
53 | if (!XFS_IS_QUOTA_RUNNING(mp)) | 55 | if (!XFS_IS_QUOTA_RUNNING(mp)) |
54 | return -ENOSYS; | 56 | return -ENOSYS; |
55 | return -xfs_sync_inodes(mp, SYNC_DELWRI); | 57 | return -xfs_sync_data(mp, 0); |
56 | } | 58 | } |
57 | 59 | ||
58 | STATIC int | 60 | STATIC int |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 08d6bd9a3947..2e09efbca8db 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_fsops.h" | 44 | #include "xfs_fsops.h" |
45 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
46 | #include "xfs_acl.h" | ||
47 | #include "xfs_attr.h" | 46 | #include "xfs_attr.h" |
48 | #include "xfs_buf_item.h" | 47 | #include "xfs_buf_item.h" |
49 | #include "xfs_utils.h" | 48 | #include "xfs_utils.h" |
@@ -405,6 +404,14 @@ xfs_parseargs( | |||
405 | return EINVAL; | 404 | return EINVAL; |
406 | } | 405 | } |
407 | 406 | ||
407 | #ifndef CONFIG_XFS_QUOTA | ||
408 | if (XFS_IS_QUOTA_RUNNING(mp)) { | ||
409 | cmn_err(CE_WARN, | ||
410 | "XFS: quota support not available in this kernel."); | ||
411 | return EINVAL; | ||
412 | } | ||
413 | #endif | ||
414 | |||
408 | if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && | 415 | if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && |
409 | (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { | 416 | (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { |
410 | cmn_err(CE_WARN, | 417 | cmn_err(CE_WARN, |
@@ -1063,7 +1070,18 @@ xfs_fs_put_super( | |||
1063 | int unmount_event_flags = 0; | 1070 | int unmount_event_flags = 0; |
1064 | 1071 | ||
1065 | xfs_syncd_stop(mp); | 1072 | xfs_syncd_stop(mp); |
1066 | xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI); | 1073 | |
1074 | if (!(sb->s_flags & MS_RDONLY)) { | ||
1075 | /* | ||
1076 | * XXX(hch): this should be SYNC_WAIT. | ||
1077 | * | ||
1078 | * Or more likely not needed at all because the VFS is already | ||
1079 | * calling ->sync_fs after shutting down all filestem | ||
1080 | * operations and just before calling ->put_super. | ||
1081 | */ | ||
1082 | xfs_sync_data(mp, 0); | ||
1083 | xfs_sync_attr(mp, 0); | ||
1084 | } | ||
1067 | 1085 | ||
1068 | #ifdef HAVE_DMAPI | 1086 | #ifdef HAVE_DMAPI |
1069 | if (mp->m_flags & XFS_MOUNT_DMAPI) { | 1087 | if (mp->m_flags & XFS_MOUNT_DMAPI) { |
@@ -1098,7 +1116,6 @@ xfs_fs_put_super( | |||
1098 | xfs_freesb(mp); | 1116 | xfs_freesb(mp); |
1099 | xfs_icsb_destroy_counters(mp); | 1117 | xfs_icsb_destroy_counters(mp); |
1100 | xfs_close_devices(mp); | 1118 | xfs_close_devices(mp); |
1101 | xfs_qmops_put(mp); | ||
1102 | xfs_dmops_put(mp); | 1119 | xfs_dmops_put(mp); |
1103 | xfs_free_fsname(mp); | 1120 | xfs_free_fsname(mp); |
1104 | kfree(mp); | 1121 | kfree(mp); |
@@ -1158,6 +1175,7 @@ xfs_fs_statfs( | |||
1158 | { | 1175 | { |
1159 | struct xfs_mount *mp = XFS_M(dentry->d_sb); | 1176 | struct xfs_mount *mp = XFS_M(dentry->d_sb); |
1160 | xfs_sb_t *sbp = &mp->m_sb; | 1177 | xfs_sb_t *sbp = &mp->m_sb; |
1178 | struct xfs_inode *ip = XFS_I(dentry->d_inode); | ||
1161 | __uint64_t fakeinos, id; | 1179 | __uint64_t fakeinos, id; |
1162 | xfs_extlen_t lsize; | 1180 | xfs_extlen_t lsize; |
1163 | 1181 | ||
@@ -1186,7 +1204,10 @@ xfs_fs_statfs( | |||
1186 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | 1204 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); |
1187 | spin_unlock(&mp->m_sb_lock); | 1205 | spin_unlock(&mp->m_sb_lock); |
1188 | 1206 | ||
1189 | XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp); | 1207 | if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || |
1208 | ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == | ||
1209 | (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) | ||
1210 | xfs_qm_statvfs(ip, statp); | ||
1190 | return 0; | 1211 | return 0; |
1191 | } | 1212 | } |
1192 | 1213 | ||
@@ -1394,16 +1415,13 @@ xfs_fs_fill_super( | |||
1394 | error = xfs_dmops_get(mp); | 1415 | error = xfs_dmops_get(mp); |
1395 | if (error) | 1416 | if (error) |
1396 | goto out_free_fsname; | 1417 | goto out_free_fsname; |
1397 | error = xfs_qmops_get(mp); | ||
1398 | if (error) | ||
1399 | goto out_put_dmops; | ||
1400 | 1418 | ||
1401 | if (silent) | 1419 | if (silent) |
1402 | flags |= XFS_MFSI_QUIET; | 1420 | flags |= XFS_MFSI_QUIET; |
1403 | 1421 | ||
1404 | error = xfs_open_devices(mp); | 1422 | error = xfs_open_devices(mp); |
1405 | if (error) | 1423 | if (error) |
1406 | goto out_put_qmops; | 1424 | goto out_put_dmops; |
1407 | 1425 | ||
1408 | if (xfs_icsb_init_counters(mp)) | 1426 | if (xfs_icsb_init_counters(mp)) |
1409 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; | 1427 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; |
@@ -1471,8 +1489,6 @@ xfs_fs_fill_super( | |||
1471 | out_destroy_counters: | 1489 | out_destroy_counters: |
1472 | xfs_icsb_destroy_counters(mp); | 1490 | xfs_icsb_destroy_counters(mp); |
1473 | xfs_close_devices(mp); | 1491 | xfs_close_devices(mp); |
1474 | out_put_qmops: | ||
1475 | xfs_qmops_put(mp); | ||
1476 | out_put_dmops: | 1492 | out_put_dmops: |
1477 | xfs_dmops_put(mp); | 1493 | xfs_dmops_put(mp); |
1478 | out_free_fsname: | 1494 | out_free_fsname: |
@@ -1706,18 +1722,8 @@ xfs_init_zones(void) | |||
1706 | if (!xfs_ili_zone) | 1722 | if (!xfs_ili_zone) |
1707 | goto out_destroy_inode_zone; | 1723 | goto out_destroy_inode_zone; |
1708 | 1724 | ||
1709 | #ifdef CONFIG_XFS_POSIX_ACL | ||
1710 | xfs_acl_zone = kmem_zone_init(sizeof(xfs_acl_t), "xfs_acl"); | ||
1711 | if (!xfs_acl_zone) | ||
1712 | goto out_destroy_ili_zone; | ||
1713 | #endif | ||
1714 | |||
1715 | return 0; | 1725 | return 0; |
1716 | 1726 | ||
1717 | #ifdef CONFIG_XFS_POSIX_ACL | ||
1718 | out_destroy_ili_zone: | ||
1719 | #endif | ||
1720 | kmem_zone_destroy(xfs_ili_zone); | ||
1721 | out_destroy_inode_zone: | 1727 | out_destroy_inode_zone: |
1722 | kmem_zone_destroy(xfs_inode_zone); | 1728 | kmem_zone_destroy(xfs_inode_zone); |
1723 | out_destroy_efi_zone: | 1729 | out_destroy_efi_zone: |
@@ -1751,9 +1757,6 @@ xfs_init_zones(void) | |||
1751 | STATIC void | 1757 | STATIC void |
1752 | xfs_destroy_zones(void) | 1758 | xfs_destroy_zones(void) |
1753 | { | 1759 | { |
1754 | #ifdef CONFIG_XFS_POSIX_ACL | ||
1755 | kmem_zone_destroy(xfs_acl_zone); | ||
1756 | #endif | ||
1757 | kmem_zone_destroy(xfs_ili_zone); | 1760 | kmem_zone_destroy(xfs_ili_zone); |
1758 | kmem_zone_destroy(xfs_inode_zone); | 1761 | kmem_zone_destroy(xfs_inode_zone); |
1759 | kmem_zone_destroy(xfs_efi_zone); | 1762 | kmem_zone_destroy(xfs_efi_zone); |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index f7ba76633c29..b619d6b8ca43 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -43,166 +43,267 @@ | |||
43 | #include "xfs_buf_item.h" | 43 | #include "xfs_buf_item.h" |
44 | #include "xfs_inode_item.h" | 44 | #include "xfs_inode_item.h" |
45 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
46 | #include "xfs_quota.h" | ||
46 | 47 | ||
47 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
48 | #include <linux/freezer.h> | 49 | #include <linux/freezer.h> |
49 | 50 | ||
50 | /* | ||
51 | * Sync all the inodes in the given AG according to the | ||
52 | * direction given by the flags. | ||
53 | */ | ||
54 | STATIC int | ||
55 | xfs_sync_inodes_ag( | ||
56 | xfs_mount_t *mp, | ||
57 | int ag, | ||
58 | int flags) | ||
59 | { | ||
60 | xfs_perag_t *pag = &mp->m_perag[ag]; | ||
61 | int nr_found; | ||
62 | uint32_t first_index = 0; | ||
63 | int error = 0; | ||
64 | int last_error = 0; | ||
65 | 51 | ||
66 | do { | 52 | STATIC xfs_inode_t * |
67 | struct inode *inode; | 53 | xfs_inode_ag_lookup( |
68 | xfs_inode_t *ip = NULL; | 54 | struct xfs_mount *mp, |
69 | int lock_flags = XFS_ILOCK_SHARED; | 55 | struct xfs_perag *pag, |
56 | uint32_t *first_index, | ||
57 | int tag) | ||
58 | { | ||
59 | int nr_found; | ||
60 | struct xfs_inode *ip; | ||
70 | 61 | ||
71 | /* | 62 | /* |
72 | * use a gang lookup to find the next inode in the tree | 63 | * use a gang lookup to find the next inode in the tree |
73 | * as the tree is sparse and a gang lookup walks to find | 64 | * as the tree is sparse and a gang lookup walks to find |
74 | * the number of objects requested. | 65 | * the number of objects requested. |
75 | */ | 66 | */ |
76 | read_lock(&pag->pag_ici_lock); | 67 | read_lock(&pag->pag_ici_lock); |
68 | if (tag == XFS_ICI_NO_TAG) { | ||
77 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 69 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
78 | (void**)&ip, first_index, 1); | 70 | (void **)&ip, *first_index, 1); |
71 | } else { | ||
72 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | ||
73 | (void **)&ip, *first_index, 1, tag); | ||
74 | } | ||
75 | if (!nr_found) | ||
76 | goto unlock; | ||
79 | 77 | ||
80 | if (!nr_found) { | 78 | /* |
81 | read_unlock(&pag->pag_ici_lock); | 79 | * Update the index for the next lookup. Catch overflows |
82 | break; | 80 | * into the next AG range which can occur if we have inodes |
83 | } | 81 | * in the last block of the AG and we are currently |
82 | * pointing to the last inode. | ||
83 | */ | ||
84 | *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
85 | if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
86 | goto unlock; | ||
84 | 87 | ||
85 | /* | 88 | return ip; |
86 | * Update the index for the next lookup. Catch overflows | ||
87 | * into the next AG range which can occur if we have inodes | ||
88 | * in the last block of the AG and we are currently | ||
89 | * pointing to the last inode. | ||
90 | */ | ||
91 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
92 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { | ||
93 | read_unlock(&pag->pag_ici_lock); | ||
94 | break; | ||
95 | } | ||
96 | 89 | ||
97 | /* nothing to sync during shutdown */ | 90 | unlock: |
98 | if (XFS_FORCED_SHUTDOWN(mp)) { | 91 | read_unlock(&pag->pag_ici_lock); |
99 | read_unlock(&pag->pag_ici_lock); | 92 | return NULL; |
100 | return 0; | 93 | } |
101 | } | ||
102 | 94 | ||
103 | /* | 95 | STATIC int |
104 | * If we can't get a reference on the inode, it must be | 96 | xfs_inode_ag_walk( |
105 | * in reclaim. Leave it for the reclaim code to flush. | 97 | struct xfs_mount *mp, |
106 | */ | 98 | xfs_agnumber_t ag, |
107 | inode = VFS_I(ip); | 99 | int (*execute)(struct xfs_inode *ip, |
108 | if (!igrab(inode)) { | 100 | struct xfs_perag *pag, int flags), |
109 | read_unlock(&pag->pag_ici_lock); | 101 | int flags, |
110 | continue; | 102 | int tag) |
111 | } | 103 | { |
112 | read_unlock(&pag->pag_ici_lock); | 104 | struct xfs_perag *pag = &mp->m_perag[ag]; |
105 | uint32_t first_index; | ||
106 | int last_error = 0; | ||
107 | int skipped; | ||
113 | 108 | ||
114 | /* avoid new or bad inodes */ | 109 | restart: |
115 | if (is_bad_inode(inode) || | 110 | skipped = 0; |
116 | xfs_iflags_test(ip, XFS_INEW)) { | 111 | first_index = 0; |
117 | IRELE(ip); | 112 | do { |
118 | continue; | 113 | int error = 0; |
119 | } | 114 | xfs_inode_t *ip; |
120 | 115 | ||
121 | /* | 116 | ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); |
122 | * If we have to flush data or wait for I/O completion | 117 | if (!ip) |
123 | * we need to hold the iolock. | 118 | break; |
124 | */ | ||
125 | if (flags & SYNC_DELWRI) { | ||
126 | if (VN_DIRTY(inode)) { | ||
127 | if (flags & SYNC_TRYLOCK) { | ||
128 | if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) | ||
129 | lock_flags |= XFS_IOLOCK_SHARED; | ||
130 | } else { | ||
131 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||
132 | lock_flags |= XFS_IOLOCK_SHARED; | ||
133 | } | ||
134 | if (lock_flags & XFS_IOLOCK_SHARED) { | ||
135 | error = xfs_flush_pages(ip, 0, -1, | ||
136 | (flags & SYNC_WAIT) ? 0 | ||
137 | : XFS_B_ASYNC, | ||
138 | FI_NONE); | ||
139 | } | ||
140 | } | ||
141 | if (VN_CACHED(inode) && (flags & SYNC_IOWAIT)) | ||
142 | xfs_ioend_wait(ip); | ||
143 | } | ||
144 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
145 | |||
146 | if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { | ||
147 | if (flags & SYNC_WAIT) { | ||
148 | xfs_iflock(ip); | ||
149 | if (!xfs_inode_clean(ip)) | ||
150 | error = xfs_iflush(ip, XFS_IFLUSH_SYNC); | ||
151 | else | ||
152 | xfs_ifunlock(ip); | ||
153 | } else if (xfs_iflock_nowait(ip)) { | ||
154 | if (!xfs_inode_clean(ip)) | ||
155 | error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); | ||
156 | else | ||
157 | xfs_ifunlock(ip); | ||
158 | } | ||
159 | } | ||
160 | xfs_iput(ip, lock_flags); | ||
161 | 119 | ||
120 | error = execute(ip, pag, flags); | ||
121 | if (error == EAGAIN) { | ||
122 | skipped++; | ||
123 | continue; | ||
124 | } | ||
162 | if (error) | 125 | if (error) |
163 | last_error = error; | 126 | last_error = error; |
164 | /* | 127 | /* |
165 | * bail out if the filesystem is corrupted. | 128 | * bail out if the filesystem is corrupted. |
166 | */ | 129 | */ |
167 | if (error == EFSCORRUPTED) | 130 | if (error == EFSCORRUPTED) |
168 | return XFS_ERROR(error); | 131 | break; |
169 | 132 | ||
170 | } while (nr_found); | 133 | } while (1); |
134 | |||
135 | if (skipped) { | ||
136 | delay(1); | ||
137 | goto restart; | ||
138 | } | ||
171 | 139 | ||
140 | xfs_put_perag(mp, pag); | ||
172 | return last_error; | 141 | return last_error; |
173 | } | 142 | } |
174 | 143 | ||
175 | int | 144 | int |
176 | xfs_sync_inodes( | 145 | xfs_inode_ag_iterator( |
177 | xfs_mount_t *mp, | 146 | struct xfs_mount *mp, |
178 | int flags) | 147 | int (*execute)(struct xfs_inode *ip, |
148 | struct xfs_perag *pag, int flags), | ||
149 | int flags, | ||
150 | int tag) | ||
179 | { | 151 | { |
180 | int error; | 152 | int error = 0; |
181 | int last_error; | 153 | int last_error = 0; |
182 | int i; | 154 | xfs_agnumber_t ag; |
183 | int lflags = XFS_LOG_FORCE; | ||
184 | 155 | ||
185 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 156 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { |
186 | return 0; | 157 | if (!mp->m_perag[ag].pag_ici_init) |
187 | error = 0; | 158 | continue; |
188 | last_error = 0; | 159 | error = xfs_inode_ag_walk(mp, ag, execute, flags, tag); |
160 | if (error) { | ||
161 | last_error = error; | ||
162 | if (error == EFSCORRUPTED) | ||
163 | break; | ||
164 | } | ||
165 | } | ||
166 | return XFS_ERROR(last_error); | ||
167 | } | ||
168 | |||
169 | /* must be called with pag_ici_lock held and releases it */ | ||
170 | int | ||
171 | xfs_sync_inode_valid( | ||
172 | struct xfs_inode *ip, | ||
173 | struct xfs_perag *pag) | ||
174 | { | ||
175 | struct inode *inode = VFS_I(ip); | ||
176 | |||
177 | /* nothing to sync during shutdown */ | ||
178 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
179 | read_unlock(&pag->pag_ici_lock); | ||
180 | return EFSCORRUPTED; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * If we can't get a reference on the inode, it must be in reclaim. | ||
185 | * Leave it for the reclaim code to flush. Also avoid inodes that | ||
186 | * haven't been fully initialised. | ||
187 | */ | ||
188 | if (!igrab(inode)) { | ||
189 | read_unlock(&pag->pag_ici_lock); | ||
190 | return ENOENT; | ||
191 | } | ||
192 | read_unlock(&pag->pag_ici_lock); | ||
193 | |||
194 | if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) { | ||
195 | IRELE(ip); | ||
196 | return ENOENT; | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | STATIC int | ||
203 | xfs_sync_inode_data( | ||
204 | struct xfs_inode *ip, | ||
205 | struct xfs_perag *pag, | ||
206 | int flags) | ||
207 | { | ||
208 | struct inode *inode = VFS_I(ip); | ||
209 | struct address_space *mapping = inode->i_mapping; | ||
210 | int error = 0; | ||
211 | |||
212 | error = xfs_sync_inode_valid(ip, pag); | ||
213 | if (error) | ||
214 | return error; | ||
215 | |||
216 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | ||
217 | goto out_wait; | ||
218 | |||
219 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | ||
220 | if (flags & SYNC_TRYLOCK) | ||
221 | goto out_wait; | ||
222 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||
223 | } | ||
224 | |||
225 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | ||
226 | 0 : XFS_B_ASYNC, FI_NONE); | ||
227 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
189 | 228 | ||
229 | out_wait: | ||
190 | if (flags & SYNC_WAIT) | 230 | if (flags & SYNC_WAIT) |
191 | lflags |= XFS_LOG_SYNC; | 231 | xfs_ioend_wait(ip); |
232 | IRELE(ip); | ||
233 | return error; | ||
234 | } | ||
192 | 235 | ||
193 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | 236 | STATIC int |
194 | if (!mp->m_perag[i].pag_ici_init) | 237 | xfs_sync_inode_attr( |
195 | continue; | 238 | struct xfs_inode *ip, |
196 | error = xfs_sync_inodes_ag(mp, i, flags); | 239 | struct xfs_perag *pag, |
197 | if (error) | 240 | int flags) |
198 | last_error = error; | 241 | { |
199 | if (error == EFSCORRUPTED) | 242 | int error = 0; |
200 | break; | 243 | |
244 | error = xfs_sync_inode_valid(ip, pag); | ||
245 | if (error) | ||
246 | return error; | ||
247 | |||
248 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
249 | if (xfs_inode_clean(ip)) | ||
250 | goto out_unlock; | ||
251 | if (!xfs_iflock_nowait(ip)) { | ||
252 | if (!(flags & SYNC_WAIT)) | ||
253 | goto out_unlock; | ||
254 | xfs_iflock(ip); | ||
201 | } | 255 | } |
202 | if (flags & SYNC_DELWRI) | ||
203 | xfs_log_force(mp, 0, lflags); | ||
204 | 256 | ||
205 | return XFS_ERROR(last_error); | 257 | if (xfs_inode_clean(ip)) { |
258 | xfs_ifunlock(ip); | ||
259 | goto out_unlock; | ||
260 | } | ||
261 | |||
262 | error = xfs_iflush(ip, (flags & SYNC_WAIT) ? | ||
263 | XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI); | ||
264 | |||
265 | out_unlock: | ||
266 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
267 | IRELE(ip); | ||
268 | return error; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Write out pagecache data for the whole filesystem. | ||
273 | */ | ||
274 | int | ||
275 | xfs_sync_data( | ||
276 | struct xfs_mount *mp, | ||
277 | int flags) | ||
278 | { | ||
279 | int error; | ||
280 | |||
281 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | ||
282 | |||
283 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, | ||
284 | XFS_ICI_NO_TAG); | ||
285 | if (error) | ||
286 | return XFS_ERROR(error); | ||
287 | |||
288 | xfs_log_force(mp, 0, | ||
289 | (flags & SYNC_WAIT) ? | ||
290 | XFS_LOG_FORCE | XFS_LOG_SYNC : | ||
291 | XFS_LOG_FORCE); | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Write out inode metadata (attributes) for the whole filesystem. | ||
297 | */ | ||
298 | int | ||
299 | xfs_sync_attr( | ||
300 | struct xfs_mount *mp, | ||
301 | int flags) | ||
302 | { | ||
303 | ASSERT((flags & ~SYNC_WAIT) == 0); | ||
304 | |||
305 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, | ||
306 | XFS_ICI_NO_TAG); | ||
206 | } | 307 | } |
207 | 308 | ||
208 | STATIC int | 309 | STATIC int |
@@ -252,7 +353,7 @@ xfs_sync_fsdata( | |||
252 | * If this is xfssyncd() then only sync the superblock if we can | 353 | * If this is xfssyncd() then only sync the superblock if we can |
253 | * lock it without sleeping and it is not pinned. | 354 | * lock it without sleeping and it is not pinned. |
254 | */ | 355 | */ |
255 | if (flags & SYNC_BDFLUSH) { | 356 | if (flags & SYNC_TRYLOCK) { |
256 | ASSERT(!(flags & SYNC_WAIT)); | 357 | ASSERT(!(flags & SYNC_WAIT)); |
257 | 358 | ||
258 | bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); | 359 | bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); |
@@ -316,13 +417,13 @@ xfs_quiesce_data( | |||
316 | int error; | 417 | int error; |
317 | 418 | ||
318 | /* push non-blocking */ | 419 | /* push non-blocking */ |
319 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH); | 420 | xfs_sync_data(mp, 0); |
320 | XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | 421 | xfs_qm_sync(mp, SYNC_TRYLOCK); |
321 | xfs_filestream_flush(mp); | 422 | xfs_filestream_flush(mp); |
322 | 423 | ||
323 | /* push and block */ | 424 | /* push and block */ |
324 | xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); | 425 | xfs_sync_data(mp, SYNC_WAIT); |
325 | XFS_QM_DQSYNC(mp, SYNC_WAIT); | 426 | xfs_qm_sync(mp, SYNC_WAIT); |
326 | 427 | ||
327 | /* write superblock and hoover up shutdown errors */ | 428 | /* write superblock and hoover up shutdown errors */ |
328 | error = xfs_sync_fsdata(mp, 0); | 429 | error = xfs_sync_fsdata(mp, 0); |
@@ -341,7 +442,7 @@ xfs_quiesce_fs( | |||
341 | int count = 0, pincount; | 442 | int count = 0, pincount; |
342 | 443 | ||
343 | xfs_flush_buftarg(mp->m_ddev_targp, 0); | 444 | xfs_flush_buftarg(mp->m_ddev_targp, 0); |
344 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 445 | xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
345 | 446 | ||
346 | /* | 447 | /* |
347 | * This loop must run at least twice. The first instance of the loop | 448 | * This loop must run at least twice. The first instance of the loop |
@@ -350,7 +451,7 @@ xfs_quiesce_fs( | |||
350 | * logged before we can write the unmount record. | 451 | * logged before we can write the unmount record. |
351 | */ | 452 | */ |
352 | do { | 453 | do { |
353 | xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); | 454 | xfs_sync_attr(mp, SYNC_WAIT); |
354 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); | 455 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); |
355 | if (!pincount) { | 456 | if (!pincount) { |
356 | delay(50); | 457 | delay(50); |
@@ -433,8 +534,8 @@ xfs_flush_inodes_work( | |||
433 | void *arg) | 534 | void *arg) |
434 | { | 535 | { |
435 | struct inode *inode = arg; | 536 | struct inode *inode = arg; |
436 | xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK); | 537 | xfs_sync_data(mp, SYNC_TRYLOCK); |
437 | xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT); | 538 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); |
438 | iput(inode); | 539 | iput(inode); |
439 | } | 540 | } |
440 | 541 | ||
@@ -465,10 +566,10 @@ xfs_sync_worker( | |||
465 | 566 | ||
466 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 567 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
467 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | 568 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); |
468 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 569 | xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); |
469 | /* dgc: errors ignored here */ | 570 | /* dgc: errors ignored here */ |
470 | error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); | 571 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
471 | error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); | 572 | error = xfs_sync_fsdata(mp, SYNC_TRYLOCK); |
472 | if (xfs_log_need_covered(mp)) | 573 | if (xfs_log_need_covered(mp)) |
473 | error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); | 574 | error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); |
474 | } | 575 | } |
@@ -569,7 +670,7 @@ xfs_reclaim_inode( | |||
569 | xfs_ifunlock(ip); | 670 | xfs_ifunlock(ip); |
570 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 671 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
571 | } | 672 | } |
572 | return 1; | 673 | return -EAGAIN; |
573 | } | 674 | } |
574 | __xfs_iflags_set(ip, XFS_IRECLAIM); | 675 | __xfs_iflags_set(ip, XFS_IRECLAIM); |
575 | spin_unlock(&ip->i_flags_lock); | 676 | spin_unlock(&ip->i_flags_lock); |
@@ -654,101 +755,27 @@ xfs_inode_clear_reclaim_tag( | |||
654 | xfs_put_perag(mp, pag); | 755 | xfs_put_perag(mp, pag); |
655 | } | 756 | } |
656 | 757 | ||
657 | 758 | STATIC int | |
658 | STATIC void | 759 | xfs_reclaim_inode_now( |
659 | xfs_reclaim_inodes_ag( | 760 | struct xfs_inode *ip, |
660 | xfs_mount_t *mp, | 761 | struct xfs_perag *pag, |
661 | int ag, | 762 | int flags) |
662 | int noblock, | ||
663 | int mode) | ||
664 | { | 763 | { |
665 | xfs_inode_t *ip = NULL; | 764 | /* ignore if already under reclaim */ |
666 | xfs_perag_t *pag = &mp->m_perag[ag]; | 765 | if (xfs_iflags_test(ip, XFS_IRECLAIM)) { |
667 | int nr_found; | ||
668 | uint32_t first_index; | ||
669 | int skipped; | ||
670 | |||
671 | restart: | ||
672 | first_index = 0; | ||
673 | skipped = 0; | ||
674 | do { | ||
675 | /* | ||
676 | * use a gang lookup to find the next inode in the tree | ||
677 | * as the tree is sparse and a gang lookup walks to find | ||
678 | * the number of objects requested. | ||
679 | */ | ||
680 | read_lock(&pag->pag_ici_lock); | ||
681 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | ||
682 | (void**)&ip, first_index, 1, | ||
683 | XFS_ICI_RECLAIM_TAG); | ||
684 | |||
685 | if (!nr_found) { | ||
686 | read_unlock(&pag->pag_ici_lock); | ||
687 | break; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Update the index for the next lookup. Catch overflows | ||
692 | * into the next AG range which can occur if we have inodes | ||
693 | * in the last block of the AG and we are currently | ||
694 | * pointing to the last inode. | ||
695 | */ | ||
696 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
697 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { | ||
698 | read_unlock(&pag->pag_ici_lock); | ||
699 | break; | ||
700 | } | ||
701 | |||
702 | /* ignore if already under reclaim */ | ||
703 | if (xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
704 | read_unlock(&pag->pag_ici_lock); | ||
705 | continue; | ||
706 | } | ||
707 | |||
708 | if (noblock) { | ||
709 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | ||
710 | read_unlock(&pag->pag_ici_lock); | ||
711 | continue; | ||
712 | } | ||
713 | if (xfs_ipincount(ip) || | ||
714 | !xfs_iflock_nowait(ip)) { | ||
715 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
716 | read_unlock(&pag->pag_ici_lock); | ||
717 | continue; | ||
718 | } | ||
719 | } | ||
720 | read_unlock(&pag->pag_ici_lock); | 766 | read_unlock(&pag->pag_ici_lock); |
721 | 767 | return 0; | |
722 | /* | ||
723 | * hmmm - this is an inode already in reclaim. Do | ||
724 | * we even bother catching it here? | ||
725 | */ | ||
726 | if (xfs_reclaim_inode(ip, noblock, mode)) | ||
727 | skipped++; | ||
728 | } while (nr_found); | ||
729 | |||
730 | if (skipped) { | ||
731 | delay(1); | ||
732 | goto restart; | ||
733 | } | 768 | } |
734 | return; | 769 | read_unlock(&pag->pag_ici_lock); |
735 | 770 | ||
771 | return xfs_reclaim_inode(ip, 0, flags); | ||
736 | } | 772 | } |
737 | 773 | ||
738 | int | 774 | int |
739 | xfs_reclaim_inodes( | 775 | xfs_reclaim_inodes( |
740 | xfs_mount_t *mp, | 776 | xfs_mount_t *mp, |
741 | int noblock, | ||
742 | int mode) | 777 | int mode) |
743 | { | 778 | { |
744 | int i; | 779 | return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode, |
745 | 780 | XFS_ICI_RECLAIM_TAG); | |
746 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | ||
747 | if (!mp->m_perag[i].pag_ici_init) | ||
748 | continue; | ||
749 | xfs_reclaim_inodes_ag(mp, i, noblock, mode); | ||
750 | } | ||
751 | return 0; | ||
752 | } | 781 | } |
753 | |||
754 | |||
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 308d5bf6dfbd..2a10301c99c7 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h | |||
@@ -29,17 +29,14 @@ typedef struct xfs_sync_work { | |||
29 | struct completion *w_completion; | 29 | struct completion *w_completion; |
30 | } xfs_sync_work_t; | 30 | } xfs_sync_work_t; |
31 | 31 | ||
32 | #define SYNC_ATTR 0x0001 /* sync attributes */ | 32 | #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ |
33 | #define SYNC_DELWRI 0x0002 /* look at delayed writes */ | 33 | #define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ |
34 | #define SYNC_WAIT 0x0004 /* wait for i/o to complete */ | ||
35 | #define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */ | ||
36 | #define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */ | ||
37 | #define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */ | ||
38 | 34 | ||
39 | int xfs_syncd_init(struct xfs_mount *mp); | 35 | int xfs_syncd_init(struct xfs_mount *mp); |
40 | void xfs_syncd_stop(struct xfs_mount *mp); | 36 | void xfs_syncd_stop(struct xfs_mount *mp); |
41 | 37 | ||
42 | int xfs_sync_inodes(struct xfs_mount *mp, int flags); | 38 | int xfs_sync_attr(struct xfs_mount *mp, int flags); |
39 | int xfs_sync_data(struct xfs_mount *mp, int flags); | ||
43 | int xfs_sync_fsdata(struct xfs_mount *mp, int flags); | 40 | int xfs_sync_fsdata(struct xfs_mount *mp, int flags); |
44 | 41 | ||
45 | int xfs_quiesce_data(struct xfs_mount *mp); | 42 | int xfs_quiesce_data(struct xfs_mount *mp); |
@@ -48,10 +45,16 @@ void xfs_quiesce_attr(struct xfs_mount *mp); | |||
48 | void xfs_flush_inodes(struct xfs_inode *ip); | 45 | void xfs_flush_inodes(struct xfs_inode *ip); |
49 | 46 | ||
50 | int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); | 47 | int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); |
51 | int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); | 48 | int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); |
52 | 49 | ||
53 | void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); | 50 | void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); |
54 | void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); | 51 | void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); |
55 | void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, | 52 | void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, |
56 | struct xfs_inode *ip); | 53 | struct xfs_inode *ip); |
54 | |||
55 | int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); | ||
56 | int xfs_inode_ag_iterator(struct xfs_mount *mp, | ||
57 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), | ||
58 | int flags, int tag); | ||
59 | |||
57 | #endif | 60 | #endif |
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c index 964621fde6ed..497c7fb75cc1 100644 --- a/fs/xfs/linux-2.6/xfs_xattr.c +++ b/fs/xfs/linux-2.6/xfs_xattr.c | |||
@@ -29,67 +29,6 @@ | |||
29 | #include <linux/xattr.h> | 29 | #include <linux/xattr.h> |
30 | 30 | ||
31 | 31 | ||
32 | /* | ||
33 | * ACL handling. Should eventually be moved into xfs_acl.c | ||
34 | */ | ||
35 | |||
36 | static int | ||
37 | xfs_decode_acl(const char *name) | ||
38 | { | ||
39 | if (strcmp(name, "posix_acl_access") == 0) | ||
40 | return _ACL_TYPE_ACCESS; | ||
41 | else if (strcmp(name, "posix_acl_default") == 0) | ||
42 | return _ACL_TYPE_DEFAULT; | ||
43 | return -EINVAL; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Get system extended attributes which at the moment only | ||
48 | * includes Posix ACLs. | ||
49 | */ | ||
50 | static int | ||
51 | xfs_xattr_system_get(struct inode *inode, const char *name, | ||
52 | void *buffer, size_t size) | ||
53 | { | ||
54 | int acl; | ||
55 | |||
56 | acl = xfs_decode_acl(name); | ||
57 | if (acl < 0) | ||
58 | return acl; | ||
59 | |||
60 | return xfs_acl_vget(inode, buffer, size, acl); | ||
61 | } | ||
62 | |||
63 | static int | ||
64 | xfs_xattr_system_set(struct inode *inode, const char *name, | ||
65 | const void *value, size_t size, int flags) | ||
66 | { | ||
67 | int acl; | ||
68 | |||
69 | acl = xfs_decode_acl(name); | ||
70 | if (acl < 0) | ||
71 | return acl; | ||
72 | if (flags & XATTR_CREATE) | ||
73 | return -EINVAL; | ||
74 | |||
75 | if (!value) | ||
76 | return xfs_acl_vremove(inode, acl); | ||
77 | |||
78 | return xfs_acl_vset(inode, (void *)value, size, acl); | ||
79 | } | ||
80 | |||
81 | static struct xattr_handler xfs_xattr_system_handler = { | ||
82 | .prefix = XATTR_SYSTEM_PREFIX, | ||
83 | .get = xfs_xattr_system_get, | ||
84 | .set = xfs_xattr_system_set, | ||
85 | }; | ||
86 | |||
87 | |||
88 | /* | ||
89 | * Real xattr handling. The only difference between the namespaces is | ||
90 | * a flag passed to the low-level attr code. | ||
91 | */ | ||
92 | |||
93 | static int | 32 | static int |
94 | __xfs_xattr_get(struct inode *inode, const char *name, | 33 | __xfs_xattr_get(struct inode *inode, const char *name, |
95 | void *value, size_t size, int xflags) | 34 | void *value, size_t size, int xflags) |
@@ -199,7 +138,9 @@ struct xattr_handler *xfs_xattr_handlers[] = { | |||
199 | &xfs_xattr_user_handler, | 138 | &xfs_xattr_user_handler, |
200 | &xfs_xattr_trusted_handler, | 139 | &xfs_xattr_trusted_handler, |
201 | &xfs_xattr_security_handler, | 140 | &xfs_xattr_security_handler, |
141 | #ifdef CONFIG_XFS_POSIX_ACL | ||
202 | &xfs_xattr_system_handler, | 142 | &xfs_xattr_system_handler, |
143 | #endif | ||
203 | NULL | 144 | NULL |
204 | }; | 145 | }; |
205 | 146 | ||
@@ -310,7 +251,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) | |||
310 | /* | 251 | /* |
311 | * Then add the two synthetic ACL attributes. | 252 | * Then add the two synthetic ACL attributes. |
312 | */ | 253 | */ |
313 | if (xfs_acl_vhasacl_access(inode)) { | 254 | if (posix_acl_access_exists(inode)) { |
314 | error = list_one_attr(POSIX_ACL_XATTR_ACCESS, | 255 | error = list_one_attr(POSIX_ACL_XATTR_ACCESS, |
315 | strlen(POSIX_ACL_XATTR_ACCESS) + 1, | 256 | strlen(POSIX_ACL_XATTR_ACCESS) + 1, |
316 | data, size, &context.count); | 257 | data, size, &context.count); |
@@ -318,7 +259,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) | |||
318 | return error; | 259 | return error; |
319 | } | 260 | } |
320 | 261 | ||
321 | if (xfs_acl_vhasacl_default(inode)) { | 262 | if (posix_acl_default_exists(inode)) { |
322 | error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, | 263 | error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, |
323 | strlen(POSIX_ACL_XATTR_DEFAULT) + 1, | 264 | strlen(POSIX_ACL_XATTR_DEFAULT) + 1, |
324 | data, size, &context.count); | 265 | data, size, &context.count); |
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index e4babcc63423..2f3f2229eaaf 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_trans_space.h" | 47 | #include "xfs_trans_space.h" |
@@ -1194,7 +1193,9 @@ void | |||
1194 | xfs_qm_dqrele( | 1193 | xfs_qm_dqrele( |
1195 | xfs_dquot_t *dqp) | 1194 | xfs_dquot_t *dqp) |
1196 | { | 1195 | { |
1197 | ASSERT(dqp); | 1196 | if (!dqp) |
1197 | return; | ||
1198 | |||
1198 | xfs_dqtrace_entry(dqp, "DQRELE"); | 1199 | xfs_dqtrace_entry(dqp, "DQRELE"); |
1199 | 1200 | ||
1200 | xfs_dqlock(dqp); | 1201 | xfs_dqlock(dqp); |
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index de0f402ddb4c..6533ead9b889 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h | |||
@@ -181,7 +181,6 @@ extern void xfs_qm_adjust_dqlimits(xfs_mount_t *, | |||
181 | extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, | 181 | extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, |
182 | xfs_dqid_t, uint, uint, xfs_dquot_t **); | 182 | xfs_dqid_t, uint, uint, xfs_dquot_t **); |
183 | extern void xfs_qm_dqput(xfs_dquot_t *); | 183 | extern void xfs_qm_dqput(xfs_dquot_t *); |
184 | extern void xfs_qm_dqrele(xfs_dquot_t *); | ||
185 | extern void xfs_dqlock(xfs_dquot_t *); | 184 | extern void xfs_dqlock(xfs_dquot_t *); |
186 | extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); | 185 | extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); |
187 | extern void xfs_dqunlock(xfs_dquot_t *); | 186 | extern void xfs_dqunlock(xfs_dquot_t *); |
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 1728f6a7c4f5..d0d4a9a0bbd7 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_trans_priv.h" | 47 | #include "xfs_trans_priv.h" |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 5b6695049e00..45b1bfef7388 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_bmap.h" | 43 | #include "xfs_bmap.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_trans_space.h" | 47 | #include "xfs_trans_space.h" |
@@ -287,11 +286,13 @@ xfs_qm_rele_quotafs_ref( | |||
287 | * Just destroy the quotainfo structure. | 286 | * Just destroy the quotainfo structure. |
288 | */ | 287 | */ |
289 | void | 288 | void |
290 | xfs_qm_unmount_quotadestroy( | 289 | xfs_qm_unmount( |
291 | xfs_mount_t *mp) | 290 | struct xfs_mount *mp) |
292 | { | 291 | { |
293 | if (mp->m_quotainfo) | 292 | if (mp->m_quotainfo) { |
293 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); | ||
294 | xfs_qm_destroy_quotainfo(mp); | 294 | xfs_qm_destroy_quotainfo(mp); |
295 | } | ||
295 | } | 296 | } |
296 | 297 | ||
297 | 298 | ||
@@ -385,8 +386,13 @@ xfs_qm_mount_quotas( | |||
385 | if (error) { | 386 | if (error) { |
386 | xfs_fs_cmn_err(CE_WARN, mp, | 387 | xfs_fs_cmn_err(CE_WARN, mp, |
387 | "Failed to initialize disk quotas."); | 388 | "Failed to initialize disk quotas."); |
389 | return; | ||
388 | } | 390 | } |
389 | return; | 391 | |
392 | #ifdef QUOTADEBUG | ||
393 | if (XFS_IS_QUOTA_ON(mp)) | ||
394 | xfs_qm_internalqcheck(mp); | ||
395 | #endif | ||
390 | } | 396 | } |
391 | 397 | ||
392 | /* | 398 | /* |
@@ -774,12 +780,11 @@ xfs_qm_dqattach_grouphint( | |||
774 | * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON | 780 | * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON |
775 | * into account. | 781 | * into account. |
776 | * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. | 782 | * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. |
777 | * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL. | ||
778 | * Inode may get unlocked and relocked in here, and the caller must deal with | 783 | * Inode may get unlocked and relocked in here, and the caller must deal with |
779 | * the consequences. | 784 | * the consequences. |
780 | */ | 785 | */ |
781 | int | 786 | int |
782 | xfs_qm_dqattach( | 787 | xfs_qm_dqattach_locked( |
783 | xfs_inode_t *ip, | 788 | xfs_inode_t *ip, |
784 | uint flags) | 789 | uint flags) |
785 | { | 790 | { |
@@ -787,17 +792,14 @@ xfs_qm_dqattach( | |||
787 | uint nquotas = 0; | 792 | uint nquotas = 0; |
788 | int error = 0; | 793 | int error = 0; |
789 | 794 | ||
790 | if ((! XFS_IS_QUOTA_ON(mp)) || | 795 | if (!XFS_IS_QUOTA_RUNNING(mp) || |
791 | (! XFS_NOT_DQATTACHED(mp, ip)) || | 796 | !XFS_IS_QUOTA_ON(mp) || |
792 | (ip->i_ino == mp->m_sb.sb_uquotino) || | 797 | !XFS_NOT_DQATTACHED(mp, ip) || |
793 | (ip->i_ino == mp->m_sb.sb_gquotino)) | 798 | ip->i_ino == mp->m_sb.sb_uquotino || |
799 | ip->i_ino == mp->m_sb.sb_gquotino) | ||
794 | return 0; | 800 | return 0; |
795 | 801 | ||
796 | ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || | 802 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
797 | xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
798 | |||
799 | if (! (flags & XFS_QMOPT_ILOCKED)) | ||
800 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
801 | 803 | ||
802 | if (XFS_IS_UQUOTA_ON(mp)) { | 804 | if (XFS_IS_UQUOTA_ON(mp)) { |
803 | error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, | 805 | error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, |
@@ -849,8 +851,7 @@ xfs_qm_dqattach( | |||
849 | xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); | 851 | xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); |
850 | } | 852 | } |
851 | 853 | ||
852 | done: | 854 | done: |
853 | |||
854 | #ifdef QUOTADEBUG | 855 | #ifdef QUOTADEBUG |
855 | if (! error) { | 856 | if (! error) { |
856 | if (XFS_IS_UQUOTA_ON(mp)) | 857 | if (XFS_IS_UQUOTA_ON(mp)) |
@@ -858,15 +859,22 @@ xfs_qm_dqattach( | |||
858 | if (XFS_IS_OQUOTA_ON(mp)) | 859 | if (XFS_IS_OQUOTA_ON(mp)) |
859 | ASSERT(ip->i_gdquot); | 860 | ASSERT(ip->i_gdquot); |
860 | } | 861 | } |
862 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
861 | #endif | 863 | #endif |
864 | return error; | ||
865 | } | ||
862 | 866 | ||
863 | if (! (flags & XFS_QMOPT_ILOCKED)) | 867 | int |
864 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 868 | xfs_qm_dqattach( |
869 | struct xfs_inode *ip, | ||
870 | uint flags) | ||
871 | { | ||
872 | int error; | ||
873 | |||
874 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
875 | error = xfs_qm_dqattach_locked(ip, flags); | ||
876 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
865 | 877 | ||
866 | #ifdef QUOTADEBUG | ||
867 | else | ||
868 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
869 | #endif | ||
870 | return error; | 878 | return error; |
871 | } | 879 | } |
872 | 880 | ||
@@ -896,11 +904,6 @@ xfs_qm_dqdetach( | |||
896 | } | 904 | } |
897 | } | 905 | } |
898 | 906 | ||
899 | /* | ||
900 | * This is called to sync quotas. We can be told to use non-blocking | ||
901 | * semantics by either the SYNC_BDFLUSH flag or the absence of the | ||
902 | * SYNC_WAIT flag. | ||
903 | */ | ||
904 | int | 907 | int |
905 | xfs_qm_sync( | 908 | xfs_qm_sync( |
906 | xfs_mount_t *mp, | 909 | xfs_mount_t *mp, |
@@ -909,17 +912,13 @@ xfs_qm_sync( | |||
909 | int recl, restarts; | 912 | int recl, restarts; |
910 | xfs_dquot_t *dqp; | 913 | xfs_dquot_t *dqp; |
911 | uint flush_flags; | 914 | uint flush_flags; |
912 | boolean_t nowait; | ||
913 | int error; | 915 | int error; |
914 | 916 | ||
915 | if (! XFS_IS_QUOTA_ON(mp)) | 917 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
916 | return 0; | 918 | return 0; |
917 | 919 | ||
920 | flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI; | ||
918 | restarts = 0; | 921 | restarts = 0; |
919 | /* | ||
920 | * We won't block unless we are asked to. | ||
921 | */ | ||
922 | nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0); | ||
923 | 922 | ||
924 | again: | 923 | again: |
925 | xfs_qm_mplist_lock(mp); | 924 | xfs_qm_mplist_lock(mp); |
@@ -939,18 +938,10 @@ xfs_qm_sync( | |||
939 | * don't 'seem' to be dirty. ie. don't acquire dqlock. | 938 | * don't 'seem' to be dirty. ie. don't acquire dqlock. |
940 | * This is very similar to what xfs_sync does with inodes. | 939 | * This is very similar to what xfs_sync does with inodes. |
941 | */ | 940 | */ |
942 | if (flags & SYNC_BDFLUSH) { | 941 | if (flags & SYNC_TRYLOCK) { |
943 | if (! XFS_DQ_IS_DIRTY(dqp)) | 942 | if (!XFS_DQ_IS_DIRTY(dqp)) |
944 | continue; | 943 | continue; |
945 | } | 944 | if (!xfs_qm_dqlock_nowait(dqp)) |
946 | |||
947 | if (nowait) { | ||
948 | /* | ||
949 | * Try to acquire the dquot lock. We are NOT out of | ||
950 | * lock order, but we just don't want to wait for this | ||
951 | * lock, unless somebody wanted us to. | ||
952 | */ | ||
953 | if (! xfs_qm_dqlock_nowait(dqp)) | ||
954 | continue; | 945 | continue; |
955 | } else { | 946 | } else { |
956 | xfs_dqlock(dqp); | 947 | xfs_dqlock(dqp); |
@@ -967,7 +958,7 @@ xfs_qm_sync( | |||
967 | /* XXX a sentinel would be better */ | 958 | /* XXX a sentinel would be better */ |
968 | recl = XFS_QI_MPLRECLAIMS(mp); | 959 | recl = XFS_QI_MPLRECLAIMS(mp); |
969 | if (!xfs_dqflock_nowait(dqp)) { | 960 | if (!xfs_dqflock_nowait(dqp)) { |
970 | if (nowait) { | 961 | if (flags & SYNC_TRYLOCK) { |
971 | xfs_dqunlock(dqp); | 962 | xfs_dqunlock(dqp); |
972 | continue; | 963 | continue; |
973 | } | 964 | } |
@@ -985,7 +976,6 @@ xfs_qm_sync( | |||
985 | * Let go of the mplist lock. We don't want to hold it | 976 | * Let go of the mplist lock. We don't want to hold it |
986 | * across a disk write | 977 | * across a disk write |
987 | */ | 978 | */ |
988 | flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC; | ||
989 | xfs_qm_mplist_unlock(mp); | 979 | xfs_qm_mplist_unlock(mp); |
990 | xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); | 980 | xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); |
991 | error = xfs_qm_dqflush(dqp, flush_flags); | 981 | error = xfs_qm_dqflush(dqp, flush_flags); |
@@ -2319,20 +2309,20 @@ xfs_qm_write_sb_changes( | |||
2319 | */ | 2309 | */ |
2320 | int | 2310 | int |
2321 | xfs_qm_vop_dqalloc( | 2311 | xfs_qm_vop_dqalloc( |
2322 | xfs_mount_t *mp, | 2312 | struct xfs_inode *ip, |
2323 | xfs_inode_t *ip, | 2313 | uid_t uid, |
2324 | uid_t uid, | 2314 | gid_t gid, |
2325 | gid_t gid, | 2315 | prid_t prid, |
2326 | prid_t prid, | 2316 | uint flags, |
2327 | uint flags, | 2317 | struct xfs_dquot **O_udqpp, |
2328 | xfs_dquot_t **O_udqpp, | 2318 | struct xfs_dquot **O_gdqpp) |
2329 | xfs_dquot_t **O_gdqpp) | ||
2330 | { | 2319 | { |
2331 | int error; | 2320 | struct xfs_mount *mp = ip->i_mount; |
2332 | xfs_dquot_t *uq, *gq; | 2321 | struct xfs_dquot *uq, *gq; |
2333 | uint lockflags; | 2322 | int error; |
2323 | uint lockflags; | ||
2334 | 2324 | ||
2335 | if (!XFS_IS_QUOTA_ON(mp)) | 2325 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
2336 | return 0; | 2326 | return 0; |
2337 | 2327 | ||
2338 | lockflags = XFS_ILOCK_EXCL; | 2328 | lockflags = XFS_ILOCK_EXCL; |
@@ -2346,8 +2336,8 @@ xfs_qm_vop_dqalloc( | |||
2346 | * if necessary. The dquot(s) will not be locked. | 2336 | * if necessary. The dquot(s) will not be locked. |
2347 | */ | 2337 | */ |
2348 | if (XFS_NOT_DQATTACHED(mp, ip)) { | 2338 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
2349 | if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | | 2339 | error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); |
2350 | XFS_QMOPT_ILOCKED))) { | 2340 | if (error) { |
2351 | xfs_iunlock(ip, lockflags); | 2341 | xfs_iunlock(ip, lockflags); |
2352 | return error; | 2342 | return error; |
2353 | } | 2343 | } |
@@ -2469,6 +2459,7 @@ xfs_qm_vop_chown( | |||
2469 | uint bfield = XFS_IS_REALTIME_INODE(ip) ? | 2459 | uint bfield = XFS_IS_REALTIME_INODE(ip) ? |
2470 | XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; | 2460 | XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; |
2471 | 2461 | ||
2462 | |||
2472 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2463 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2473 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); | 2464 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); |
2474 | 2465 | ||
@@ -2508,13 +2499,13 @@ xfs_qm_vop_chown_reserve( | |||
2508 | xfs_dquot_t *gdqp, | 2499 | xfs_dquot_t *gdqp, |
2509 | uint flags) | 2500 | uint flags) |
2510 | { | 2501 | { |
2511 | int error; | 2502 | xfs_mount_t *mp = ip->i_mount; |
2512 | xfs_mount_t *mp; | ||
2513 | uint delblks, blkflags, prjflags = 0; | 2503 | uint delblks, blkflags, prjflags = 0; |
2514 | xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; | 2504 | xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; |
2505 | int error; | ||
2506 | |||
2515 | 2507 | ||
2516 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 2508 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
2517 | mp = ip->i_mount; | ||
2518 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2509 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
2519 | 2510 | ||
2520 | delblks = ip->i_delayed_blks; | 2511 | delblks = ip->i_delayed_blks; |
@@ -2582,28 +2573,23 @@ xfs_qm_vop_chown_reserve( | |||
2582 | 2573 | ||
2583 | int | 2574 | int |
2584 | xfs_qm_vop_rename_dqattach( | 2575 | xfs_qm_vop_rename_dqattach( |
2585 | xfs_inode_t **i_tab) | 2576 | struct xfs_inode **i_tab) |
2586 | { | 2577 | { |
2587 | xfs_inode_t *ip; | 2578 | struct xfs_mount *mp = i_tab[0]->i_mount; |
2588 | int i; | 2579 | int i; |
2589 | int error; | ||
2590 | 2580 | ||
2591 | ip = i_tab[0]; | 2581 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
2592 | |||
2593 | if (! XFS_IS_QUOTA_ON(ip->i_mount)) | ||
2594 | return 0; | 2582 | return 0; |
2595 | 2583 | ||
2596 | if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { | 2584 | for (i = 0; (i < 4 && i_tab[i]); i++) { |
2597 | error = xfs_qm_dqattach(ip, 0); | 2585 | struct xfs_inode *ip = i_tab[i]; |
2598 | if (error) | 2586 | int error; |
2599 | return error; | 2587 | |
2600 | } | ||
2601 | for (i = 1; (i < 4 && i_tab[i]); i++) { | ||
2602 | /* | 2588 | /* |
2603 | * Watch out for duplicate entries in the table. | 2589 | * Watch out for duplicate entries in the table. |
2604 | */ | 2590 | */ |
2605 | if ((ip = i_tab[i]) != i_tab[i-1]) { | 2591 | if (i == 0 || ip != i_tab[i-1]) { |
2606 | if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { | 2592 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
2607 | error = xfs_qm_dqattach(ip, 0); | 2593 | error = xfs_qm_dqattach(ip, 0); |
2608 | if (error) | 2594 | if (error) |
2609 | return error; | 2595 | return error; |
@@ -2614,17 +2600,19 @@ xfs_qm_vop_rename_dqattach( | |||
2614 | } | 2600 | } |
2615 | 2601 | ||
2616 | void | 2602 | void |
2617 | xfs_qm_vop_dqattach_and_dqmod_newinode( | 2603 | xfs_qm_vop_create_dqattach( |
2618 | xfs_trans_t *tp, | 2604 | struct xfs_trans *tp, |
2619 | xfs_inode_t *ip, | 2605 | struct xfs_inode *ip, |
2620 | xfs_dquot_t *udqp, | 2606 | struct xfs_dquot *udqp, |
2621 | xfs_dquot_t *gdqp) | 2607 | struct xfs_dquot *gdqp) |
2622 | { | 2608 | { |
2623 | if (!XFS_IS_QUOTA_ON(tp->t_mountp)) | 2609 | struct xfs_mount *mp = tp->t_mountp; |
2610 | |||
2611 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) | ||
2624 | return; | 2612 | return; |
2625 | 2613 | ||
2626 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2614 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2627 | ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); | 2615 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
2628 | 2616 | ||
2629 | if (udqp) { | 2617 | if (udqp) { |
2630 | xfs_dqlock(udqp); | 2618 | xfs_dqlock(udqp); |
@@ -2632,7 +2620,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode( | |||
2632 | xfs_dqunlock(udqp); | 2620 | xfs_dqunlock(udqp); |
2633 | ASSERT(ip->i_udquot == NULL); | 2621 | ASSERT(ip->i_udquot == NULL); |
2634 | ip->i_udquot = udqp; | 2622 | ip->i_udquot = udqp; |
2635 | ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp)); | 2623 | ASSERT(XFS_IS_UQUOTA_ON(mp)); |
2636 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); | 2624 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); |
2637 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); | 2625 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
2638 | } | 2626 | } |
@@ -2642,8 +2630,8 @@ xfs_qm_vop_dqattach_and_dqmod_newinode( | |||
2642 | xfs_dqunlock(gdqp); | 2630 | xfs_dqunlock(gdqp); |
2643 | ASSERT(ip->i_gdquot == NULL); | 2631 | ASSERT(ip->i_gdquot == NULL); |
2644 | ip->i_gdquot = gdqp; | 2632 | ip->i_gdquot = gdqp; |
2645 | ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp)); | 2633 | ASSERT(XFS_IS_OQUOTA_ON(mp)); |
2646 | ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ? | 2634 | ASSERT((XFS_IS_GQUOTA_ON(mp) ? |
2647 | ip->i_d.di_gid : ip->i_d.di_projid) == | 2635 | ip->i_d.di_gid : ip->i_d.di_projid) == |
2648 | be32_to_cpu(gdqp->q_core.d_id)); | 2636 | be32_to_cpu(gdqp->q_core.d_id)); |
2649 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2637 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index a371954cae1b..495564b8af38 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h | |||
@@ -127,8 +127,6 @@ typedef struct xfs_quotainfo { | |||
127 | } xfs_quotainfo_t; | 127 | } xfs_quotainfo_t; |
128 | 128 | ||
129 | 129 | ||
130 | extern xfs_dqtrxops_t xfs_trans_dquot_ops; | ||
131 | |||
132 | extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); | 130 | extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); |
133 | extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, | 131 | extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, |
134 | xfs_dquot_t *, xfs_dquot_t *, long, long, uint); | 132 | xfs_dquot_t *, xfs_dquot_t *, long, long, uint); |
@@ -159,17 +157,11 @@ typedef struct xfs_dquot_acct { | |||
159 | #define XFS_QM_RTBWARNLIMIT 5 | 157 | #define XFS_QM_RTBWARNLIMIT 5 |
160 | 158 | ||
161 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); | 159 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); |
162 | extern void xfs_qm_mount_quotas(xfs_mount_t *); | ||
163 | extern int xfs_qm_quotacheck(xfs_mount_t *); | 160 | extern int xfs_qm_quotacheck(xfs_mount_t *); |
164 | extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); | ||
165 | extern void xfs_qm_unmount_quotas(xfs_mount_t *); | ||
166 | extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); | 161 | extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); |
167 | extern int xfs_qm_sync(xfs_mount_t *, int); | ||
168 | 162 | ||
169 | /* dquot stuff */ | 163 | /* dquot stuff */ |
170 | extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); | 164 | extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); |
171 | extern int xfs_qm_dqattach(xfs_inode_t *, uint); | ||
172 | extern void xfs_qm_dqdetach(xfs_inode_t *); | ||
173 | extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); | 165 | extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); |
174 | extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); | 166 | extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); |
175 | 167 | ||
@@ -183,19 +175,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); | |||
183 | extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); | 175 | extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); |
184 | extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); | 176 | extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); |
185 | 177 | ||
186 | /* vop stuff */ | ||
187 | extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *, | ||
188 | uid_t, gid_t, prid_t, uint, | ||
189 | xfs_dquot_t **, xfs_dquot_t **); | ||
190 | extern void xfs_qm_vop_dqattach_and_dqmod_newinode( | ||
191 | xfs_trans_t *, xfs_inode_t *, | ||
192 | xfs_dquot_t *, xfs_dquot_t *); | ||
193 | extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **); | ||
194 | extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *, | ||
195 | xfs_dquot_t **, xfs_dquot_t *); | ||
196 | extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *, | ||
197 | xfs_dquot_t *, xfs_dquot_t *, uint); | ||
198 | |||
199 | /* list stuff */ | 178 | /* list stuff */ |
200 | extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); | 179 | extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); |
201 | extern void xfs_qm_freelist_unlink(xfs_dquot_t *); | 180 | extern void xfs_qm_freelist_unlink(xfs_dquot_t *); |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index 63037c689a4b..a5346630dfae 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_rtalloc.h" | 42 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 43 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_qm.h" | 47 | #include "xfs_qm.h" |
@@ -84,7 +83,7 @@ xfs_fill_statvfs_from_dquot( | |||
84 | * return a statvfs of the project, not the entire filesystem. | 83 | * return a statvfs of the project, not the entire filesystem. |
85 | * This makes such trees appear as if they are filesystems in themselves. | 84 | * This makes such trees appear as if they are filesystems in themselves. |
86 | */ | 85 | */ |
87 | STATIC void | 86 | void |
88 | xfs_qm_statvfs( | 87 | xfs_qm_statvfs( |
89 | xfs_inode_t *ip, | 88 | xfs_inode_t *ip, |
90 | struct kstatfs *statp) | 89 | struct kstatfs *statp) |
@@ -92,20 +91,13 @@ xfs_qm_statvfs( | |||
92 | xfs_mount_t *mp = ip->i_mount; | 91 | xfs_mount_t *mp = ip->i_mount; |
93 | xfs_dquot_t *dqp; | 92 | xfs_dquot_t *dqp; |
94 | 93 | ||
95 | if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || | ||
96 | !((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == | ||
97 | (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) | ||
98 | return; | ||
99 | |||
100 | if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { | 94 | if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { |
101 | xfs_disk_dquot_t *dp = &dqp->q_core; | 95 | xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); |
102 | |||
103 | xfs_fill_statvfs_from_dquot(statp, dp); | ||
104 | xfs_qm_dqput(dqp); | 96 | xfs_qm_dqput(dqp); |
105 | } | 97 | } |
106 | } | 98 | } |
107 | 99 | ||
108 | STATIC int | 100 | int |
109 | xfs_qm_newmount( | 101 | xfs_qm_newmount( |
110 | xfs_mount_t *mp, | 102 | xfs_mount_t *mp, |
111 | uint *needquotamount, | 103 | uint *needquotamount, |
@@ -114,9 +106,6 @@ xfs_qm_newmount( | |||
114 | uint quotaondisk; | 106 | uint quotaondisk; |
115 | uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; | 107 | uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; |
116 | 108 | ||
117 | *quotaflags = 0; | ||
118 | *needquotamount = B_FALSE; | ||
119 | |||
120 | quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && | 109 | quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && |
121 | (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); | 110 | (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); |
122 | 111 | ||
@@ -179,66 +168,6 @@ xfs_qm_newmount( | |||
179 | return 0; | 168 | return 0; |
180 | } | 169 | } |
181 | 170 | ||
182 | STATIC int | ||
183 | xfs_qm_endmount( | ||
184 | xfs_mount_t *mp, | ||
185 | uint needquotamount, | ||
186 | uint quotaflags) | ||
187 | { | ||
188 | if (needquotamount) { | ||
189 | ASSERT(mp->m_qflags == 0); | ||
190 | mp->m_qflags = quotaflags; | ||
191 | xfs_qm_mount_quotas(mp); | ||
192 | } | ||
193 | |||
194 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) | ||
195 | if (! (XFS_IS_QUOTA_ON(mp))) | ||
196 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on"); | ||
197 | else | ||
198 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on"); | ||
199 | #endif | ||
200 | |||
201 | #ifdef QUOTADEBUG | ||
202 | if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp)) | ||
203 | cmn_err(CE_WARN, "XFS: mount internalqcheck failed"); | ||
204 | #endif | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | STATIC void | ||
210 | xfs_qm_dqrele_null( | ||
211 | xfs_dquot_t *dq) | ||
212 | { | ||
213 | /* | ||
214 | * Called from XFS, where we always check first for a NULL dquot. | ||
215 | */ | ||
216 | if (!dq) | ||
217 | return; | ||
218 | xfs_qm_dqrele(dq); | ||
219 | } | ||
220 | |||
221 | |||
222 | struct xfs_qmops xfs_qmcore_xfs = { | ||
223 | .xfs_qminit = xfs_qm_newmount, | ||
224 | .xfs_qmdone = xfs_qm_unmount_quotadestroy, | ||
225 | .xfs_qmmount = xfs_qm_endmount, | ||
226 | .xfs_qmunmount = xfs_qm_unmount_quotas, | ||
227 | .xfs_dqrele = xfs_qm_dqrele_null, | ||
228 | .xfs_dqattach = xfs_qm_dqattach, | ||
229 | .xfs_dqdetach = xfs_qm_dqdetach, | ||
230 | .xfs_dqpurgeall = xfs_qm_dqpurge_all, | ||
231 | .xfs_dqvopalloc = xfs_qm_vop_dqalloc, | ||
232 | .xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode, | ||
233 | .xfs_dqvoprename = xfs_qm_vop_rename_dqattach, | ||
234 | .xfs_dqvopchown = xfs_qm_vop_chown, | ||
235 | .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve, | ||
236 | .xfs_dqstatvfs = xfs_qm_statvfs, | ||
237 | .xfs_dqsync = xfs_qm_sync, | ||
238 | .xfs_dqtrxops = &xfs_trans_dquot_ops, | ||
239 | }; | ||
240 | EXPORT_SYMBOL(xfs_qmcore_xfs); | ||
241 | |||
242 | void __init | 171 | void __init |
243 | xfs_qm_init(void) | 172 | xfs_qm_init(void) |
244 | { | 173 | { |
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c index 709f5f545cf5..21b08c0396a1 100644 --- a/fs/xfs/quota/xfs_qm_stats.c +++ b/fs/xfs/quota/xfs_qm_stats.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_rtalloc.h" | 42 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 43 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_qm.h" | 47 | #include "xfs_qm.h" |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index c7b66f6506ce..4e4276b956e8 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include "xfs_rtalloc.h" | 45 | #include "xfs_rtalloc.h" |
46 | #include "xfs_error.h" | 46 | #include "xfs_error.h" |
47 | #include "xfs_rw.h" | 47 | #include "xfs_rw.h" |
48 | #include "xfs_acl.h" | ||
49 | #include "xfs_attr.h" | 48 | #include "xfs_attr.h" |
50 | #include "xfs_buf_item.h" | 49 | #include "xfs_buf_item.h" |
51 | #include "xfs_utils.h" | 50 | #include "xfs_utils.h" |
@@ -847,105 +846,55 @@ xfs_qm_export_flags( | |||
847 | } | 846 | } |
848 | 847 | ||
849 | 848 | ||
850 | /* | 849 | STATIC int |
851 | * Release all the dquots on the inodes in an AG. | 850 | xfs_dqrele_inode( |
852 | */ | 851 | struct xfs_inode *ip, |
853 | STATIC void | 852 | struct xfs_perag *pag, |
854 | xfs_qm_dqrele_inodes_ag( | 853 | int flags) |
855 | xfs_mount_t *mp, | ||
856 | int ag, | ||
857 | uint flags) | ||
858 | { | 854 | { |
859 | xfs_inode_t *ip = NULL; | 855 | int error; |
860 | xfs_perag_t *pag = &mp->m_perag[ag]; | ||
861 | int first_index = 0; | ||
862 | int nr_found; | ||
863 | |||
864 | do { | ||
865 | /* | ||
866 | * use a gang lookup to find the next inode in the tree | ||
867 | * as the tree is sparse and a gang lookup walks to find | ||
868 | * the number of objects requested. | ||
869 | */ | ||
870 | read_lock(&pag->pag_ici_lock); | ||
871 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | ||
872 | (void**)&ip, first_index, 1); | ||
873 | |||
874 | if (!nr_found) { | ||
875 | read_unlock(&pag->pag_ici_lock); | ||
876 | break; | ||
877 | } | ||
878 | |||
879 | /* | ||
880 | * Update the index for the next lookup. Catch overflows | ||
881 | * into the next AG range which can occur if we have inodes | ||
882 | * in the last block of the AG and we are currently | ||
883 | * pointing to the last inode. | ||
884 | */ | ||
885 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
886 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { | ||
887 | read_unlock(&pag->pag_ici_lock); | ||
888 | break; | ||
889 | } | ||
890 | |||
891 | /* skip quota inodes */ | ||
892 | if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { | ||
893 | ASSERT(ip->i_udquot == NULL); | ||
894 | ASSERT(ip->i_gdquot == NULL); | ||
895 | read_unlock(&pag->pag_ici_lock); | ||
896 | continue; | ||
897 | } | ||
898 | 856 | ||
899 | /* | 857 | /* skip quota inodes */ |
900 | * If we can't get a reference on the inode, it must be | 858 | if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) { |
901 | * in reclaim. Leave it for the reclaim code to flush. | 859 | ASSERT(ip->i_udquot == NULL); |
902 | */ | 860 | ASSERT(ip->i_gdquot == NULL); |
903 | if (!igrab(VFS_I(ip))) { | ||
904 | read_unlock(&pag->pag_ici_lock); | ||
905 | continue; | ||
906 | } | ||
907 | read_unlock(&pag->pag_ici_lock); | 861 | read_unlock(&pag->pag_ici_lock); |
862 | return 0; | ||
863 | } | ||
908 | 864 | ||
909 | /* avoid new inodes though we shouldn't find any here */ | 865 | error = xfs_sync_inode_valid(ip, pag); |
910 | if (xfs_iflags_test(ip, XFS_INEW)) { | 866 | if (error) |
911 | IRELE(ip); | 867 | return error; |
912 | continue; | ||
913 | } | ||
914 | 868 | ||
915 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 869 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
916 | if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { | 870 | if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { |
917 | xfs_qm_dqrele(ip->i_udquot); | 871 | xfs_qm_dqrele(ip->i_udquot); |
918 | ip->i_udquot = NULL; | 872 | ip->i_udquot = NULL; |
919 | } | 873 | } |
920 | if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && | 874 | if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { |
921 | ip->i_gdquot) { | 875 | xfs_qm_dqrele(ip->i_gdquot); |
922 | xfs_qm_dqrele(ip->i_gdquot); | 876 | ip->i_gdquot = NULL; |
923 | ip->i_gdquot = NULL; | 877 | } |
924 | } | 878 | xfs_iput(ip, XFS_ILOCK_EXCL); |
925 | xfs_iput(ip, XFS_ILOCK_EXCL); | 879 | IRELE(ip); |
926 | 880 | ||
927 | } while (nr_found); | 881 | return 0; |
928 | } | 882 | } |
929 | 883 | ||
884 | |||
930 | /* | 885 | /* |
931 | * Go thru all the inodes in the file system, releasing their dquots. | 886 | * Go thru all the inodes in the file system, releasing their dquots. |
887 | * | ||
932 | * Note that the mount structure gets modified to indicate that quotas are off | 888 | * Note that the mount structure gets modified to indicate that quotas are off |
933 | * AFTER this, in the case of quotaoff. This also gets called from | 889 | * AFTER this, in the case of quotaoff. |
934 | * xfs_rootumount. | ||
935 | */ | 890 | */ |
936 | void | 891 | void |
937 | xfs_qm_dqrele_all_inodes( | 892 | xfs_qm_dqrele_all_inodes( |
938 | struct xfs_mount *mp, | 893 | struct xfs_mount *mp, |
939 | uint flags) | 894 | uint flags) |
940 | { | 895 | { |
941 | int i; | ||
942 | |||
943 | ASSERT(mp->m_quotainfo); | 896 | ASSERT(mp->m_quotainfo); |
944 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | 897 | xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG); |
945 | if (!mp->m_perag[i].pag_ici_init) | ||
946 | continue; | ||
947 | xfs_qm_dqrele_inodes_ag(mp, i, flags); | ||
948 | } | ||
949 | } | 898 | } |
950 | 899 | ||
951 | /*------------------------------------------------------------------------*/ | 900 | /*------------------------------------------------------------------------*/ |
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 447173bcf96d..97ac9640be98 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_rtalloc.h" | 42 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 43 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_trans_priv.h" | 47 | #include "xfs_trans_priv.h" |
@@ -111,7 +110,7 @@ xfs_trans_log_dquot( | |||
111 | * Carry forward whatever is left of the quota blk reservation to | 110 | * Carry forward whatever is left of the quota blk reservation to |
112 | * the spanky new transaction | 111 | * the spanky new transaction |
113 | */ | 112 | */ |
114 | STATIC void | 113 | void |
115 | xfs_trans_dup_dqinfo( | 114 | xfs_trans_dup_dqinfo( |
116 | xfs_trans_t *otp, | 115 | xfs_trans_t *otp, |
117 | xfs_trans_t *ntp) | 116 | xfs_trans_t *ntp) |
@@ -167,19 +166,17 @@ xfs_trans_dup_dqinfo( | |||
167 | /* | 166 | /* |
168 | * Wrap around mod_dquot to account for both user and group quotas. | 167 | * Wrap around mod_dquot to account for both user and group quotas. |
169 | */ | 168 | */ |
170 | STATIC void | 169 | void |
171 | xfs_trans_mod_dquot_byino( | 170 | xfs_trans_mod_dquot_byino( |
172 | xfs_trans_t *tp, | 171 | xfs_trans_t *tp, |
173 | xfs_inode_t *ip, | 172 | xfs_inode_t *ip, |
174 | uint field, | 173 | uint field, |
175 | long delta) | 174 | long delta) |
176 | { | 175 | { |
177 | xfs_mount_t *mp; | 176 | xfs_mount_t *mp = tp->t_mountp; |
178 | |||
179 | ASSERT(tp); | ||
180 | mp = tp->t_mountp; | ||
181 | 177 | ||
182 | if (!XFS_IS_QUOTA_ON(mp) || | 178 | if (!XFS_IS_QUOTA_RUNNING(mp) || |
179 | !XFS_IS_QUOTA_ON(mp) || | ||
183 | ip->i_ino == mp->m_sb.sb_uquotino || | 180 | ip->i_ino == mp->m_sb.sb_uquotino || |
184 | ip->i_ino == mp->m_sb.sb_gquotino) | 181 | ip->i_ino == mp->m_sb.sb_gquotino) |
185 | return; | 182 | return; |
@@ -229,6 +226,7 @@ xfs_trans_mod_dquot( | |||
229 | xfs_dqtrx_t *qtrx; | 226 | xfs_dqtrx_t *qtrx; |
230 | 227 | ||
231 | ASSERT(tp); | 228 | ASSERT(tp); |
229 | ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); | ||
232 | qtrx = NULL; | 230 | qtrx = NULL; |
233 | 231 | ||
234 | if (tp->t_dqinfo == NULL) | 232 | if (tp->t_dqinfo == NULL) |
@@ -346,7 +344,7 @@ xfs_trans_dqlockedjoin( | |||
346 | * Unreserve just the reservations done by this transaction. | 344 | * Unreserve just the reservations done by this transaction. |
347 | * dquot is still left locked at exit. | 345 | * dquot is still left locked at exit. |
348 | */ | 346 | */ |
349 | STATIC void | 347 | void |
350 | xfs_trans_apply_dquot_deltas( | 348 | xfs_trans_apply_dquot_deltas( |
351 | xfs_trans_t *tp) | 349 | xfs_trans_t *tp) |
352 | { | 350 | { |
@@ -357,7 +355,7 @@ xfs_trans_apply_dquot_deltas( | |||
357 | long totalbdelta; | 355 | long totalbdelta; |
358 | long totalrtbdelta; | 356 | long totalrtbdelta; |
359 | 357 | ||
360 | if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY)) | 358 | if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) |
361 | return; | 359 | return; |
362 | 360 | ||
363 | ASSERT(tp->t_dqinfo); | 361 | ASSERT(tp->t_dqinfo); |
@@ -531,7 +529,7 @@ xfs_trans_apply_dquot_deltas( | |||
531 | * we simply throw those away, since that's the expected behavior | 529 | * we simply throw those away, since that's the expected behavior |
532 | * when a transaction is curtailed without a commit. | 530 | * when a transaction is curtailed without a commit. |
533 | */ | 531 | */ |
534 | STATIC void | 532 | void |
535 | xfs_trans_unreserve_and_mod_dquots( | 533 | xfs_trans_unreserve_and_mod_dquots( |
536 | xfs_trans_t *tp) | 534 | xfs_trans_t *tp) |
537 | { | 535 | { |
@@ -768,7 +766,7 @@ xfs_trans_reserve_quota_bydquots( | |||
768 | { | 766 | { |
769 | int resvd = 0, error; | 767 | int resvd = 0, error; |
770 | 768 | ||
771 | if (!XFS_IS_QUOTA_ON(mp)) | 769 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
772 | return 0; | 770 | return 0; |
773 | 771 | ||
774 | if (tp && tp->t_dqinfo == NULL) | 772 | if (tp && tp->t_dqinfo == NULL) |
@@ -811,18 +809,17 @@ xfs_trans_reserve_quota_bydquots( | |||
811 | * This doesn't change the actual usage, just the reservation. | 809 | * This doesn't change the actual usage, just the reservation. |
812 | * The inode sent in is locked. | 810 | * The inode sent in is locked. |
813 | */ | 811 | */ |
814 | STATIC int | 812 | int |
815 | xfs_trans_reserve_quota_nblks( | 813 | xfs_trans_reserve_quota_nblks( |
816 | xfs_trans_t *tp, | 814 | struct xfs_trans *tp, |
817 | xfs_mount_t *mp, | 815 | struct xfs_inode *ip, |
818 | xfs_inode_t *ip, | 816 | long nblks, |
819 | long nblks, | 817 | long ninos, |
820 | long ninos, | 818 | uint flags) |
821 | uint flags) | ||
822 | { | 819 | { |
823 | int error; | 820 | struct xfs_mount *mp = ip->i_mount; |
824 | 821 | ||
825 | if (!XFS_IS_QUOTA_ON(mp)) | 822 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
826 | return 0; | 823 | return 0; |
827 | if (XFS_IS_PQUOTA_ON(mp)) | 824 | if (XFS_IS_PQUOTA_ON(mp)) |
828 | flags |= XFS_QMOPT_ENOSPC; | 825 | flags |= XFS_QMOPT_ENOSPC; |
@@ -831,7 +828,6 @@ xfs_trans_reserve_quota_nblks( | |||
831 | ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); | 828 | ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); |
832 | 829 | ||
833 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 830 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
834 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); | ||
835 | ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == | 831 | ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == |
836 | XFS_TRANS_DQ_RES_RTBLKS || | 832 | XFS_TRANS_DQ_RES_RTBLKS || |
837 | (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == | 833 | (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == |
@@ -840,11 +836,9 @@ xfs_trans_reserve_quota_nblks( | |||
840 | /* | 836 | /* |
841 | * Reserve nblks against these dquots, with trans as the mediator. | 837 | * Reserve nblks against these dquots, with trans as the mediator. |
842 | */ | 838 | */ |
843 | error = xfs_trans_reserve_quota_bydquots(tp, mp, | 839 | return xfs_trans_reserve_quota_bydquots(tp, mp, |
844 | ip->i_udquot, ip->i_gdquot, | 840 | ip->i_udquot, ip->i_gdquot, |
845 | nblks, ninos, | 841 | nblks, ninos, flags); |
846 | flags); | ||
847 | return error; | ||
848 | } | 842 | } |
849 | 843 | ||
850 | /* | 844 | /* |
@@ -895,25 +889,15 @@ STATIC void | |||
895 | xfs_trans_alloc_dqinfo( | 889 | xfs_trans_alloc_dqinfo( |
896 | xfs_trans_t *tp) | 890 | xfs_trans_t *tp) |
897 | { | 891 | { |
898 | (tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); | 892 | tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); |
899 | } | 893 | } |
900 | 894 | ||
901 | STATIC void | 895 | void |
902 | xfs_trans_free_dqinfo( | 896 | xfs_trans_free_dqinfo( |
903 | xfs_trans_t *tp) | 897 | xfs_trans_t *tp) |
904 | { | 898 | { |
905 | if (!tp->t_dqinfo) | 899 | if (!tp->t_dqinfo) |
906 | return; | 900 | return; |
907 | kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); | 901 | kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo); |
908 | (tp)->t_dqinfo = NULL; | 902 | tp->t_dqinfo = NULL; |
909 | } | 903 | } |
910 | |||
911 | xfs_dqtrxops_t xfs_trans_dquot_ops = { | ||
912 | .qo_dup_dqinfo = xfs_trans_dup_dqinfo, | ||
913 | .qo_free_dqinfo = xfs_trans_free_dqinfo, | ||
914 | .qo_mod_dquot_byino = xfs_trans_mod_dquot_byino, | ||
915 | .qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas, | ||
916 | .qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks, | ||
917 | .qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots, | ||
918 | .qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots, | ||
919 | }; | ||
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c deleted file mode 100644 index a8cdd73999a4..000000000000 --- a/fs/xfs/xfs_acl.c +++ /dev/null | |||
@@ -1,874 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_bit.h" | ||
22 | #include "xfs_inum.h" | ||
23 | #include "xfs_ag.h" | ||
24 | #include "xfs_dir2.h" | ||
25 | #include "xfs_bmap_btree.h" | ||
26 | #include "xfs_alloc_btree.h" | ||
27 | #include "xfs_ialloc_btree.h" | ||
28 | #include "xfs_dir2_sf.h" | ||
29 | #include "xfs_attr_sf.h" | ||
30 | #include "xfs_dinode.h" | ||
31 | #include "xfs_inode.h" | ||
32 | #include "xfs_btree.h" | ||
33 | #include "xfs_acl.h" | ||
34 | #include "xfs_attr.h" | ||
35 | #include "xfs_vnodeops.h" | ||
36 | |||
37 | #include <linux/capability.h> | ||
38 | #include <linux/posix_acl_xattr.h> | ||
39 | |||
40 | STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *); | ||
41 | STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); | ||
42 | STATIC void xfs_acl_get_endian(xfs_acl_t *); | ||
43 | STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); | ||
44 | STATIC int xfs_acl_invalid(xfs_acl_t *); | ||
45 | STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); | ||
46 | STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *); | ||
47 | STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *); | ||
48 | STATIC int xfs_acl_allow_set(struct inode *, int); | ||
49 | |||
50 | kmem_zone_t *xfs_acl_zone; | ||
51 | |||
52 | |||
53 | /* | ||
54 | * Test for existence of access ACL attribute as efficiently as possible. | ||
55 | */ | ||
56 | int | ||
57 | xfs_acl_vhasacl_access( | ||
58 | struct inode *vp) | ||
59 | { | ||
60 | int error; | ||
61 | |||
62 | xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error); | ||
63 | return (error == 0); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Test for existence of default ACL attribute as efficiently as possible. | ||
68 | */ | ||
69 | int | ||
70 | xfs_acl_vhasacl_default( | ||
71 | struct inode *vp) | ||
72 | { | ||
73 | int error; | ||
74 | |||
75 | if (!S_ISDIR(vp->i_mode)) | ||
76 | return 0; | ||
77 | xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); | ||
78 | return (error == 0); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Convert from extended attribute representation to in-memory for XFS. | ||
83 | */ | ||
84 | STATIC int | ||
85 | posix_acl_xattr_to_xfs( | ||
86 | posix_acl_xattr_header *src, | ||
87 | size_t size, | ||
88 | xfs_acl_t *dest) | ||
89 | { | ||
90 | posix_acl_xattr_entry *src_entry; | ||
91 | xfs_acl_entry_t *dest_entry; | ||
92 | int n; | ||
93 | |||
94 | if (!src || !dest) | ||
95 | return EINVAL; | ||
96 | |||
97 | if (size < sizeof(posix_acl_xattr_header)) | ||
98 | return EINVAL; | ||
99 | |||
100 | if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) | ||
101 | return EOPNOTSUPP; | ||
102 | |||
103 | memset(dest, 0, sizeof(xfs_acl_t)); | ||
104 | dest->acl_cnt = posix_acl_xattr_count(size); | ||
105 | if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES) | ||
106 | return EINVAL; | ||
107 | |||
108 | /* | ||
109 | * acl_set_file(3) may request that we set default ACLs with | ||
110 | * zero length -- defend (gracefully) against that here. | ||
111 | */ | ||
112 | if (!dest->acl_cnt) | ||
113 | return 0; | ||
114 | |||
115 | src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src)); | ||
116 | dest_entry = &dest->acl_entry[0]; | ||
117 | |||
118 | for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) { | ||
119 | dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm); | ||
120 | if (_ACL_PERM_INVALID(dest_entry->ae_perm)) | ||
121 | return EINVAL; | ||
122 | dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag); | ||
123 | switch(dest_entry->ae_tag) { | ||
124 | case ACL_USER: | ||
125 | case ACL_GROUP: | ||
126 | dest_entry->ae_id = le32_to_cpu(src_entry->e_id); | ||
127 | break; | ||
128 | case ACL_USER_OBJ: | ||
129 | case ACL_GROUP_OBJ: | ||
130 | case ACL_MASK: | ||
131 | case ACL_OTHER: | ||
132 | dest_entry->ae_id = ACL_UNDEFINED_ID; | ||
133 | break; | ||
134 | default: | ||
135 | return EINVAL; | ||
136 | } | ||
137 | } | ||
138 | if (xfs_acl_invalid(dest)) | ||
139 | return EINVAL; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Comparison function called from xfs_sort(). | ||
146 | * Primary key is ae_tag, secondary key is ae_id. | ||
147 | */ | ||
148 | STATIC int | ||
149 | xfs_acl_entry_compare( | ||
150 | const void *va, | ||
151 | const void *vb) | ||
152 | { | ||
153 | xfs_acl_entry_t *a = (xfs_acl_entry_t *)va, | ||
154 | *b = (xfs_acl_entry_t *)vb; | ||
155 | |||
156 | if (a->ae_tag == b->ae_tag) | ||
157 | return (a->ae_id - b->ae_id); | ||
158 | return (a->ae_tag - b->ae_tag); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Convert from in-memory XFS to extended attribute representation. | ||
163 | */ | ||
164 | STATIC int | ||
165 | posix_acl_xfs_to_xattr( | ||
166 | xfs_acl_t *src, | ||
167 | posix_acl_xattr_header *dest, | ||
168 | size_t size) | ||
169 | { | ||
170 | int n; | ||
171 | size_t new_size = posix_acl_xattr_size(src->acl_cnt); | ||
172 | posix_acl_xattr_entry *dest_entry; | ||
173 | xfs_acl_entry_t *src_entry; | ||
174 | |||
175 | if (size < new_size) | ||
176 | return -ERANGE; | ||
177 | |||
178 | /* Need to sort src XFS ACL by <ae_tag,ae_id> */ | ||
179 | xfs_sort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]), | ||
180 | xfs_acl_entry_compare); | ||
181 | |||
182 | dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); | ||
183 | dest_entry = &dest->a_entries[0]; | ||
184 | src_entry = &src->acl_entry[0]; | ||
185 | for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) { | ||
186 | dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm); | ||
187 | if (_ACL_PERM_INVALID(src_entry->ae_perm)) | ||
188 | return -EINVAL; | ||
189 | dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag); | ||
190 | switch (src_entry->ae_tag) { | ||
191 | case ACL_USER: | ||
192 | case ACL_GROUP: | ||
193 | dest_entry->e_id = cpu_to_le32(src_entry->ae_id); | ||
194 | break; | ||
195 | case ACL_USER_OBJ: | ||
196 | case ACL_GROUP_OBJ: | ||
197 | case ACL_MASK: | ||
198 | case ACL_OTHER: | ||
199 | dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); | ||
200 | break; | ||
201 | default: | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | } | ||
205 | return new_size; | ||
206 | } | ||
207 | |||
208 | int | ||
209 | xfs_acl_vget( | ||
210 | struct inode *vp, | ||
211 | void *acl, | ||
212 | size_t size, | ||
213 | int kind) | ||
214 | { | ||
215 | int error; | ||
216 | xfs_acl_t *xfs_acl = NULL; | ||
217 | posix_acl_xattr_header *ext_acl = acl; | ||
218 | int flags = 0; | ||
219 | |||
220 | if(size) { | ||
221 | if (!(_ACL_ALLOC(xfs_acl))) { | ||
222 | error = ENOMEM; | ||
223 | goto out; | ||
224 | } | ||
225 | memset(xfs_acl, 0, sizeof(xfs_acl_t)); | ||
226 | } else | ||
227 | flags = ATTR_KERNOVAL; | ||
228 | |||
229 | xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error); | ||
230 | if (error) | ||
231 | goto out; | ||
232 | |||
233 | if (!size) { | ||
234 | error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES); | ||
235 | } else { | ||
236 | if (xfs_acl_invalid(xfs_acl)) { | ||
237 | error = EINVAL; | ||
238 | goto out; | ||
239 | } | ||
240 | if (kind == _ACL_TYPE_ACCESS) | ||
241 | xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl); | ||
242 | error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); | ||
243 | } | ||
244 | out: | ||
245 | if(xfs_acl) | ||
246 | _ACL_FREE(xfs_acl); | ||
247 | return -error; | ||
248 | } | ||
249 | |||
250 | int | ||
251 | xfs_acl_vremove( | ||
252 | struct inode *vp, | ||
253 | int kind) | ||
254 | { | ||
255 | int error; | ||
256 | |||
257 | error = xfs_acl_allow_set(vp, kind); | ||
258 | if (!error) { | ||
259 | error = xfs_attr_remove(XFS_I(vp), | ||
260 | kind == _ACL_TYPE_DEFAULT? | ||
261 | SGI_ACL_DEFAULT: SGI_ACL_FILE, | ||
262 | ATTR_ROOT); | ||
263 | if (error == ENOATTR) | ||
264 | error = 0; /* 'scool */ | ||
265 | } | ||
266 | return -error; | ||
267 | } | ||
268 | |||
269 | int | ||
270 | xfs_acl_vset( | ||
271 | struct inode *vp, | ||
272 | void *acl, | ||
273 | size_t size, | ||
274 | int kind) | ||
275 | { | ||
276 | posix_acl_xattr_header *ext_acl = acl; | ||
277 | xfs_acl_t *xfs_acl; | ||
278 | int error; | ||
279 | int basicperms = 0; /* more than std unix perms? */ | ||
280 | |||
281 | if (!acl) | ||
282 | return -EINVAL; | ||
283 | |||
284 | if (!(_ACL_ALLOC(xfs_acl))) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl); | ||
288 | if (error) { | ||
289 | _ACL_FREE(xfs_acl); | ||
290 | return -error; | ||
291 | } | ||
292 | if (!xfs_acl->acl_cnt) { | ||
293 | _ACL_FREE(xfs_acl); | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | error = xfs_acl_allow_set(vp, kind); | ||
298 | |||
299 | /* Incoming ACL exists, set file mode based on its value */ | ||
300 | if (!error && kind == _ACL_TYPE_ACCESS) | ||
301 | error = xfs_acl_setmode(vp, xfs_acl, &basicperms); | ||
302 | |||
303 | if (error) | ||
304 | goto out; | ||
305 | |||
306 | /* | ||
307 | * If we have more than std unix permissions, set up the actual attr. | ||
308 | * Otherwise, delete any existing attr. This prevents us from | ||
309 | * having actual attrs for permissions that can be stored in the | ||
310 | * standard permission bits. | ||
311 | */ | ||
312 | if (!basicperms) { | ||
313 | xfs_acl_set_attr(vp, xfs_acl, kind, &error); | ||
314 | } else { | ||
315 | error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); | ||
316 | } | ||
317 | |||
318 | out: | ||
319 | _ACL_FREE(xfs_acl); | ||
320 | return -error; | ||
321 | } | ||
322 | |||
323 | int | ||
324 | xfs_acl_iaccess( | ||
325 | xfs_inode_t *ip, | ||
326 | mode_t mode, | ||
327 | cred_t *cr) | ||
328 | { | ||
329 | xfs_acl_t *acl; | ||
330 | int rval; | ||
331 | struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE}; | ||
332 | |||
333 | if (!(_ACL_ALLOC(acl))) | ||
334 | return -1; | ||
335 | |||
336 | /* If the file has no ACL return -1. */ | ||
337 | rval = sizeof(xfs_acl_t); | ||
338 | if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, ATTR_ROOT)) { | ||
339 | _ACL_FREE(acl); | ||
340 | return -1; | ||
341 | } | ||
342 | xfs_acl_get_endian(acl); | ||
343 | |||
344 | /* If the file has an empty ACL return -1. */ | ||
345 | if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) { | ||
346 | _ACL_FREE(acl); | ||
347 | return -1; | ||
348 | } | ||
349 | |||
350 | /* Synchronize ACL with mode bits */ | ||
351 | xfs_acl_sync_mode(ip->i_d.di_mode, acl); | ||
352 | |||
353 | rval = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr); | ||
354 | _ACL_FREE(acl); | ||
355 | return rval; | ||
356 | } | ||
357 | |||
358 | STATIC int | ||
359 | xfs_acl_allow_set( | ||
360 | struct inode *vp, | ||
361 | int kind) | ||
362 | { | ||
363 | if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) | ||
364 | return EPERM; | ||
365 | if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode)) | ||
366 | return ENOTDIR; | ||
367 | if (vp->i_sb->s_flags & MS_RDONLY) | ||
368 | return EROFS; | ||
369 | if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER)) | ||
370 | return EPERM; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Note: cr is only used here for the capability check if the ACL test fails. | ||
376 | * It is not used to find out the credentials uid or groups etc, as was | ||
377 | * done in IRIX. It is assumed that the uid and groups for the current | ||
378 | * thread are taken from "current" instead of the cr parameter. | ||
379 | */ | ||
380 | STATIC int | ||
381 | xfs_acl_access( | ||
382 | uid_t fuid, | ||
383 | gid_t fgid, | ||
384 | xfs_acl_t *fap, | ||
385 | mode_t md, | ||
386 | cred_t *cr) | ||
387 | { | ||
388 | xfs_acl_entry_t matched; | ||
389 | int i, allows; | ||
390 | int maskallows = -1; /* true, but not 1, either */ | ||
391 | int seen_userobj = 0; | ||
392 | |||
393 | matched.ae_tag = 0; /* Invalid type */ | ||
394 | matched.ae_perm = 0; | ||
395 | |||
396 | for (i = 0; i < fap->acl_cnt; i++) { | ||
397 | /* | ||
398 | * Break out if we've got a user_obj entry or | ||
399 | * a user entry and the mask (and have processed USER_OBJ) | ||
400 | */ | ||
401 | if (matched.ae_tag == ACL_USER_OBJ) | ||
402 | break; | ||
403 | if (matched.ae_tag == ACL_USER) { | ||
404 | if (maskallows != -1 && seen_userobj) | ||
405 | break; | ||
406 | if (fap->acl_entry[i].ae_tag != ACL_MASK && | ||
407 | fap->acl_entry[i].ae_tag != ACL_USER_OBJ) | ||
408 | continue; | ||
409 | } | ||
410 | /* True if this entry allows the requested access */ | ||
411 | allows = ((fap->acl_entry[i].ae_perm & md) == md); | ||
412 | |||
413 | switch (fap->acl_entry[i].ae_tag) { | ||
414 | case ACL_USER_OBJ: | ||
415 | seen_userobj = 1; | ||
416 | if (fuid != current_fsuid()) | ||
417 | continue; | ||
418 | matched.ae_tag = ACL_USER_OBJ; | ||
419 | matched.ae_perm = allows; | ||
420 | break; | ||
421 | case ACL_USER: | ||
422 | if (fap->acl_entry[i].ae_id != current_fsuid()) | ||
423 | continue; | ||
424 | matched.ae_tag = ACL_USER; | ||
425 | matched.ae_perm = allows; | ||
426 | break; | ||
427 | case ACL_GROUP_OBJ: | ||
428 | if ((matched.ae_tag == ACL_GROUP_OBJ || | ||
429 | matched.ae_tag == ACL_GROUP) && !allows) | ||
430 | continue; | ||
431 | if (!in_group_p(fgid)) | ||
432 | continue; | ||
433 | matched.ae_tag = ACL_GROUP_OBJ; | ||
434 | matched.ae_perm = allows; | ||
435 | break; | ||
436 | case ACL_GROUP: | ||
437 | if ((matched.ae_tag == ACL_GROUP_OBJ || | ||
438 | matched.ae_tag == ACL_GROUP) && !allows) | ||
439 | continue; | ||
440 | if (!in_group_p(fap->acl_entry[i].ae_id)) | ||
441 | continue; | ||
442 | matched.ae_tag = ACL_GROUP; | ||
443 | matched.ae_perm = allows; | ||
444 | break; | ||
445 | case ACL_MASK: | ||
446 | maskallows = allows; | ||
447 | break; | ||
448 | case ACL_OTHER: | ||
449 | if (matched.ae_tag != 0) | ||
450 | continue; | ||
451 | matched.ae_tag = ACL_OTHER; | ||
452 | matched.ae_perm = allows; | ||
453 | break; | ||
454 | } | ||
455 | } | ||
456 | /* | ||
457 | * First possibility is that no matched entry allows access. | ||
458 | * The capability to override DAC may exist, so check for it. | ||
459 | */ | ||
460 | switch (matched.ae_tag) { | ||
461 | case ACL_OTHER: | ||
462 | case ACL_USER_OBJ: | ||
463 | if (matched.ae_perm) | ||
464 | return 0; | ||
465 | break; | ||
466 | case ACL_USER: | ||
467 | case ACL_GROUP_OBJ: | ||
468 | case ACL_GROUP: | ||
469 | if (maskallows && matched.ae_perm) | ||
470 | return 0; | ||
471 | break; | ||
472 | case 0: | ||
473 | break; | ||
474 | } | ||
475 | |||
476 | /* EACCES tells generic_permission to check for capability overrides */ | ||
477 | return EACCES; | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * ACL validity checker. | ||
482 | * This acl validation routine checks each ACL entry read in makes sense. | ||
483 | */ | ||
484 | STATIC int | ||
485 | xfs_acl_invalid( | ||
486 | xfs_acl_t *aclp) | ||
487 | { | ||
488 | xfs_acl_entry_t *entry, *e; | ||
489 | int user = 0, group = 0, other = 0, mask = 0; | ||
490 | int mask_required = 0; | ||
491 | int i, j; | ||
492 | |||
493 | if (!aclp) | ||
494 | goto acl_invalid; | ||
495 | |||
496 | if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES) | ||
497 | goto acl_invalid; | ||
498 | |||
499 | for (i = 0; i < aclp->acl_cnt; i++) { | ||
500 | entry = &aclp->acl_entry[i]; | ||
501 | switch (entry->ae_tag) { | ||
502 | case ACL_USER_OBJ: | ||
503 | if (user++) | ||
504 | goto acl_invalid; | ||
505 | break; | ||
506 | case ACL_GROUP_OBJ: | ||
507 | if (group++) | ||
508 | goto acl_invalid; | ||
509 | break; | ||
510 | case ACL_OTHER: | ||
511 | if (other++) | ||
512 | goto acl_invalid; | ||
513 | break; | ||
514 | case ACL_USER: | ||
515 | case ACL_GROUP: | ||
516 | for (j = i + 1; j < aclp->acl_cnt; j++) { | ||
517 | e = &aclp->acl_entry[j]; | ||
518 | if (e->ae_id == entry->ae_id && | ||
519 | e->ae_tag == entry->ae_tag) | ||
520 | goto acl_invalid; | ||
521 | } | ||
522 | mask_required++; | ||
523 | break; | ||
524 | case ACL_MASK: | ||
525 | if (mask++) | ||
526 | goto acl_invalid; | ||
527 | break; | ||
528 | default: | ||
529 | goto acl_invalid; | ||
530 | } | ||
531 | } | ||
532 | if (!user || !group || !other || (mask_required && !mask)) | ||
533 | goto acl_invalid; | ||
534 | else | ||
535 | return 0; | ||
536 | acl_invalid: | ||
537 | return EINVAL; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * Do ACL endian conversion. | ||
542 | */ | ||
543 | STATIC void | ||
544 | xfs_acl_get_endian( | ||
545 | xfs_acl_t *aclp) | ||
546 | { | ||
547 | xfs_acl_entry_t *ace, *end; | ||
548 | |||
549 | INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); | ||
550 | end = &aclp->acl_entry[0]+aclp->acl_cnt; | ||
551 | for (ace = &aclp->acl_entry[0]; ace < end; ace++) { | ||
552 | INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag); | ||
553 | INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id); | ||
554 | INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm); | ||
555 | } | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * Get the ACL from the EA and do endian conversion. | ||
560 | */ | ||
561 | STATIC void | ||
562 | xfs_acl_get_attr( | ||
563 | struct inode *vp, | ||
564 | xfs_acl_t *aclp, | ||
565 | int kind, | ||
566 | int flags, | ||
567 | int *error) | ||
568 | { | ||
569 | int len = sizeof(xfs_acl_t); | ||
570 | |||
571 | ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); | ||
572 | flags |= ATTR_ROOT; | ||
573 | *error = xfs_attr_get(XFS_I(vp), | ||
574 | kind == _ACL_TYPE_ACCESS ? | ||
575 | SGI_ACL_FILE : SGI_ACL_DEFAULT, | ||
576 | (char *)aclp, &len, flags); | ||
577 | if (*error || (flags & ATTR_KERNOVAL)) | ||
578 | return; | ||
579 | xfs_acl_get_endian(aclp); | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Set the EA with the ACL and do endian conversion. | ||
584 | */ | ||
585 | STATIC void | ||
586 | xfs_acl_set_attr( | ||
587 | struct inode *vp, | ||
588 | xfs_acl_t *aclp, | ||
589 | int kind, | ||
590 | int *error) | ||
591 | { | ||
592 | xfs_acl_entry_t *ace, *newace, *end; | ||
593 | xfs_acl_t *newacl; | ||
594 | int len; | ||
595 | |||
596 | if (!(_ACL_ALLOC(newacl))) { | ||
597 | *error = ENOMEM; | ||
598 | return; | ||
599 | } | ||
600 | |||
601 | len = sizeof(xfs_acl_t) - | ||
602 | (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt)); | ||
603 | end = &aclp->acl_entry[0]+aclp->acl_cnt; | ||
604 | for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0]; | ||
605 | ace < end; | ||
606 | ace++, newace++) { | ||
607 | INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag); | ||
608 | INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id); | ||
609 | INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); | ||
610 | } | ||
611 | INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); | ||
612 | *error = xfs_attr_set(XFS_I(vp), | ||
613 | kind == _ACL_TYPE_ACCESS ? | ||
614 | SGI_ACL_FILE: SGI_ACL_DEFAULT, | ||
615 | (char *)newacl, len, ATTR_ROOT); | ||
616 | _ACL_FREE(newacl); | ||
617 | } | ||
618 | |||
619 | int | ||
620 | xfs_acl_vtoacl( | ||
621 | struct inode *vp, | ||
622 | xfs_acl_t *access_acl, | ||
623 | xfs_acl_t *default_acl) | ||
624 | { | ||
625 | int error = 0; | ||
626 | |||
627 | if (access_acl) { | ||
628 | /* | ||
629 | * Get the Access ACL and the mode. If either cannot | ||
630 | * be obtained for some reason, invalidate the access ACL. | ||
631 | */ | ||
632 | xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); | ||
633 | if (error) | ||
634 | access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; | ||
635 | else /* We have a good ACL and the file mode, synchronize. */ | ||
636 | xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl); | ||
637 | } | ||
638 | |||
639 | if (default_acl) { | ||
640 | xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error); | ||
641 | if (error) | ||
642 | default_acl->acl_cnt = XFS_ACL_NOT_PRESENT; | ||
643 | } | ||
644 | return error; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | * This function retrieves the parent directory's acl, processes it | ||
649 | * and lets the child inherit the acl(s) that it should. | ||
650 | */ | ||
651 | int | ||
652 | xfs_acl_inherit( | ||
653 | struct inode *vp, | ||
654 | mode_t mode, | ||
655 | xfs_acl_t *pdaclp) | ||
656 | { | ||
657 | xfs_acl_t *cacl; | ||
658 | int error = 0; | ||
659 | int basicperms = 0; | ||
660 | |||
661 | /* | ||
662 | * If the parent does not have a default ACL, or it's an | ||
663 | * invalid ACL, we're done. | ||
664 | */ | ||
665 | if (!vp) | ||
666 | return 0; | ||
667 | if (!pdaclp || xfs_acl_invalid(pdaclp)) | ||
668 | return 0; | ||
669 | |||
670 | /* | ||
671 | * Copy the default ACL of the containing directory to | ||
672 | * the access ACL of the new file and use the mode that | ||
673 | * was passed in to set up the correct initial values for | ||
674 | * the u::,g::[m::], and o:: entries. This is what makes | ||
675 | * umask() "work" with ACL's. | ||
676 | */ | ||
677 | |||
678 | if (!(_ACL_ALLOC(cacl))) | ||
679 | return ENOMEM; | ||
680 | |||
681 | memcpy(cacl, pdaclp, sizeof(xfs_acl_t)); | ||
682 | xfs_acl_filter_mode(mode, cacl); | ||
683 | error = xfs_acl_setmode(vp, cacl, &basicperms); | ||
684 | if (error) | ||
685 | goto out_error; | ||
686 | |||
687 | /* | ||
688 | * Set the Default and Access ACL on the file. The mode is already | ||
689 | * set on the file, so we don't need to worry about that. | ||
690 | * | ||
691 | * If the new file is a directory, its default ACL is a copy of | ||
692 | * the containing directory's default ACL. | ||
693 | */ | ||
694 | if (S_ISDIR(vp->i_mode)) | ||
695 | xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); | ||
696 | if (!error && !basicperms) | ||
697 | xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); | ||
698 | out_error: | ||
699 | _ACL_FREE(cacl); | ||
700 | return error; | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Set up the correct mode on the file based on the supplied ACL. This | ||
705 | * makes sure that the mode on the file reflects the state of the | ||
706 | * u::,g::[m::], and o:: entries in the ACL. Since the mode is where | ||
707 | * the ACL is going to get the permissions for these entries, we must | ||
708 | * synchronize the mode whenever we set the ACL on a file. | ||
709 | */ | ||
710 | STATIC int | ||
711 | xfs_acl_setmode( | ||
712 | struct inode *vp, | ||
713 | xfs_acl_t *acl, | ||
714 | int *basicperms) | ||
715 | { | ||
716 | struct iattr iattr; | ||
717 | xfs_acl_entry_t *ap; | ||
718 | xfs_acl_entry_t *gap = NULL; | ||
719 | int i, nomask = 1; | ||
720 | |||
721 | *basicperms = 1; | ||
722 | |||
723 | if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) | ||
724 | return 0; | ||
725 | |||
726 | /* | ||
727 | * Copy the u::, g::, o::, and m:: bits from the ACL into the | ||
728 | * mode. The m:: bits take precedence over the g:: bits. | ||
729 | */ | ||
730 | iattr.ia_valid = ATTR_MODE; | ||
731 | iattr.ia_mode = XFS_I(vp)->i_d.di_mode; | ||
732 | iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); | ||
733 | ap = acl->acl_entry; | ||
734 | for (i = 0; i < acl->acl_cnt; ++i) { | ||
735 | switch (ap->ae_tag) { | ||
736 | case ACL_USER_OBJ: | ||
737 | iattr.ia_mode |= ap->ae_perm << 6; | ||
738 | break; | ||
739 | case ACL_GROUP_OBJ: | ||
740 | gap = ap; | ||
741 | break; | ||
742 | case ACL_MASK: /* more than just standard modes */ | ||
743 | nomask = 0; | ||
744 | iattr.ia_mode |= ap->ae_perm << 3; | ||
745 | *basicperms = 0; | ||
746 | break; | ||
747 | case ACL_OTHER: | ||
748 | iattr.ia_mode |= ap->ae_perm; | ||
749 | break; | ||
750 | default: /* more than just standard modes */ | ||
751 | *basicperms = 0; | ||
752 | break; | ||
753 | } | ||
754 | ap++; | ||
755 | } | ||
756 | |||
757 | /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */ | ||
758 | if (gap && nomask) | ||
759 | iattr.ia_mode |= gap->ae_perm << 3; | ||
760 | |||
761 | return xfs_setattr(XFS_I(vp), &iattr, 0); | ||
762 | } | ||
763 | |||
764 | /* | ||
765 | * The permissions for the special ACL entries (u::, g::[m::], o::) are | ||
766 | * actually stored in the file mode (if there is both a group and a mask, | ||
767 | * the group is stored in the ACL entry and the mask is stored on the file). | ||
768 | * This allows the mode to remain automatically in sync with the ACL without | ||
769 | * the need for a call-back to the ACL system at every point where the mode | ||
770 | * could change. This function takes the permissions from the specified mode | ||
771 | * and places it in the supplied ACL. | ||
772 | * | ||
773 | * This implementation draws its validity from the fact that, when the ACL | ||
774 | * was assigned, the mode was copied from the ACL. | ||
775 | * If the mode did not change, therefore, the mode remains exactly what was | ||
776 | * taken from the special ACL entries at assignment. | ||
777 | * If a subsequent chmod() was done, the POSIX spec says that the change in | ||
778 | * mode must cause an update to the ACL seen at user level and used for | ||
779 | * access checks. Before and after a mode change, therefore, the file mode | ||
780 | * most accurately reflects what the special ACL entries should permit/deny. | ||
781 | * | ||
782 | * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly, | ||
783 | * the existing mode bits will override whatever is in the | ||
784 | * ACL. Similarly, if there is a pre-existing ACL that was | ||
785 | * never in sync with its mode (owing to a bug in 6.5 and | ||
786 | * before), it will now magically (or mystically) be | ||
787 | * synchronized. This could cause slight astonishment, but | ||
788 | * it is better than inconsistent permissions. | ||
789 | * | ||
790 | * The supplied ACL is a template that may contain any combination | ||
791 | * of special entries. These are treated as place holders when we fill | ||
792 | * out the ACL. This routine does not add or remove special entries, it | ||
793 | * simply unites each special entry with its associated set of permissions. | ||
794 | */ | ||
795 | STATIC void | ||
796 | xfs_acl_sync_mode( | ||
797 | mode_t mode, | ||
798 | xfs_acl_t *acl) | ||
799 | { | ||
800 | int i, nomask = 1; | ||
801 | xfs_acl_entry_t *ap; | ||
802 | xfs_acl_entry_t *gap = NULL; | ||
803 | |||
804 | /* | ||
805 | * Set ACL entries. POSIX1003.1eD16 requires that the MASK | ||
806 | * be set instead of the GROUP entry, if there is a MASK. | ||
807 | */ | ||
808 | for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { | ||
809 | switch (ap->ae_tag) { | ||
810 | case ACL_USER_OBJ: | ||
811 | ap->ae_perm = (mode >> 6) & 0x7; | ||
812 | break; | ||
813 | case ACL_GROUP_OBJ: | ||
814 | gap = ap; | ||
815 | break; | ||
816 | case ACL_MASK: | ||
817 | nomask = 0; | ||
818 | ap->ae_perm = (mode >> 3) & 0x7; | ||
819 | break; | ||
820 | case ACL_OTHER: | ||
821 | ap->ae_perm = mode & 0x7; | ||
822 | break; | ||
823 | default: | ||
824 | break; | ||
825 | } | ||
826 | } | ||
827 | /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ | ||
828 | if (gap && nomask) | ||
829 | gap->ae_perm = (mode >> 3) & 0x7; | ||
830 | } | ||
831 | |||
832 | /* | ||
833 | * When inheriting an Access ACL from a directory Default ACL, | ||
834 | * the ACL bits are set to the intersection of the ACL default | ||
835 | * permission bits and the file permission bits in mode. If there | ||
836 | * are no permission bits on the file then we must not give them | ||
837 | * the ACL. This is what what makes umask() work with ACLs. | ||
838 | */ | ||
839 | STATIC void | ||
840 | xfs_acl_filter_mode( | ||
841 | mode_t mode, | ||
842 | xfs_acl_t *acl) | ||
843 | { | ||
844 | int i, nomask = 1; | ||
845 | xfs_acl_entry_t *ap; | ||
846 | xfs_acl_entry_t *gap = NULL; | ||
847 | |||
848 | /* | ||
849 | * Set ACL entries. POSIX1003.1eD16 requires that the MASK | ||
850 | * be merged with GROUP entry, if there is a MASK. | ||
851 | */ | ||
852 | for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) { | ||
853 | switch (ap->ae_tag) { | ||
854 | case ACL_USER_OBJ: | ||
855 | ap->ae_perm &= (mode >> 6) & 0x7; | ||
856 | break; | ||
857 | case ACL_GROUP_OBJ: | ||
858 | gap = ap; | ||
859 | break; | ||
860 | case ACL_MASK: | ||
861 | nomask = 0; | ||
862 | ap->ae_perm &= (mode >> 3) & 0x7; | ||
863 | break; | ||
864 | case ACL_OTHER: | ||
865 | ap->ae_perm &= mode & 0x7; | ||
866 | break; | ||
867 | default: | ||
868 | break; | ||
869 | } | ||
870 | } | ||
871 | /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */ | ||
872 | if (gap && nomask) | ||
873 | gap->ae_perm &= (mode >> 3) & 0x7; | ||
874 | } | ||
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 642f1db4def4..63dc1f2efad5 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h | |||
@@ -18,81 +18,48 @@ | |||
18 | #ifndef __XFS_ACL_H__ | 18 | #ifndef __XFS_ACL_H__ |
19 | #define __XFS_ACL_H__ | 19 | #define __XFS_ACL_H__ |
20 | 20 | ||
21 | /* | 21 | struct inode; |
22 | * Access Control Lists | 22 | struct posix_acl; |
23 | */ | 23 | struct xfs_inode; |
24 | typedef __uint16_t xfs_acl_perm_t; | ||
25 | typedef __int32_t xfs_acl_tag_t; | ||
26 | typedef __int32_t xfs_acl_id_t; | ||
27 | 24 | ||
28 | #define XFS_ACL_MAX_ENTRIES 25 | 25 | #define XFS_ACL_MAX_ENTRIES 25 |
29 | #define XFS_ACL_NOT_PRESENT (-1) | 26 | #define XFS_ACL_NOT_PRESENT (-1) |
30 | 27 | ||
31 | typedef struct xfs_acl_entry { | 28 | /* On-disk XFS access control list structure */ |
32 | xfs_acl_tag_t ae_tag; | 29 | struct xfs_acl { |
33 | xfs_acl_id_t ae_id; | 30 | __be32 acl_cnt; |
34 | xfs_acl_perm_t ae_perm; | 31 | struct xfs_acl_entry { |
35 | } xfs_acl_entry_t; | 32 | __be32 ae_tag; |
36 | 33 | __be32 ae_id; | |
37 | typedef struct xfs_acl { | 34 | __be16 ae_perm; |
38 | __int32_t acl_cnt; | 35 | } acl_entry[XFS_ACL_MAX_ENTRIES]; |
39 | xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES]; | 36 | }; |
40 | } xfs_acl_t; | ||
41 | 37 | ||
42 | /* On-disk XFS extended attribute names */ | 38 | /* On-disk XFS extended attribute names */ |
43 | #define SGI_ACL_FILE "SGI_ACL_FILE" | 39 | #define SGI_ACL_FILE "SGI_ACL_FILE" |
44 | #define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" | 40 | #define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" |
45 | #define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) | 41 | #define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) |
46 | #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) | 42 | #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) |
47 | 43 | ||
48 | #define _ACL_TYPE_ACCESS 1 | ||
49 | #define _ACL_TYPE_DEFAULT 2 | ||
50 | |||
51 | #ifdef CONFIG_XFS_POSIX_ACL | 44 | #ifdef CONFIG_XFS_POSIX_ACL |
45 | extern int xfs_check_acl(struct inode *inode, int mask); | ||
46 | extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); | ||
47 | extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl); | ||
48 | extern int xfs_acl_chmod(struct inode *inode); | ||
49 | extern void xfs_inode_init_acls(struct xfs_inode *ip); | ||
50 | extern void xfs_inode_clear_acls(struct xfs_inode *ip); | ||
51 | extern int posix_acl_access_exists(struct inode *inode); | ||
52 | extern int posix_acl_default_exists(struct inode *inode); | ||
52 | 53 | ||
53 | struct vattr; | 54 | extern struct xattr_handler xfs_xattr_system_handler; |
54 | struct xfs_inode; | ||
55 | |||
56 | extern struct kmem_zone *xfs_acl_zone; | ||
57 | #define xfs_acl_zone_init(zone, name) \ | ||
58 | (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) | ||
59 | #define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) | ||
60 | |||
61 | extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *); | ||
62 | extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); | ||
63 | extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *); | ||
64 | extern int xfs_acl_vhasacl_access(struct inode *); | ||
65 | extern int xfs_acl_vhasacl_default(struct inode *); | ||
66 | extern int xfs_acl_vset(struct inode *, void *, size_t, int); | ||
67 | extern int xfs_acl_vget(struct inode *, void *, size_t, int); | ||
68 | extern int xfs_acl_vremove(struct inode *, int); | ||
69 | |||
70 | #define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) | ||
71 | |||
72 | #define _ACL_INHERIT(c,m,d) (xfs_acl_inherit(c,m,d)) | ||
73 | #define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0) | ||
74 | #define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) | ||
75 | #define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access | ||
76 | #define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default | ||
77 | |||
78 | #define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) | ||
79 | #define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0) | ||
80 | |||
81 | #else | 55 | #else |
82 | #define xfs_acl_zone_init(zone,name) | 56 | # define xfs_check_acl NULL |
83 | #define xfs_acl_zone_destroy(zone) | 57 | # define xfs_get_acl(inode, type) NULL |
84 | #define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP) | 58 | # define xfs_inherit_acl(inode, default_acl) 0 |
85 | #define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP) | 59 | # define xfs_acl_chmod(inode) 0 |
86 | #define xfs_acl_vremove(v,t) (-EOPNOTSUPP) | 60 | # define xfs_inode_init_acls(ip) |
87 | #define xfs_acl_vhasacl_access(v) (0) | 61 | # define xfs_inode_clear_acls(ip) |
88 | #define xfs_acl_vhasacl_default(v) (0) | 62 | # define posix_acl_access_exists(inode) 0 |
89 | #define _ACL_ALLOC(a) (1) /* successfully allocate nothing */ | 63 | # define posix_acl_default_exists(inode) 0 |
90 | #define _ACL_FREE(a) ((void)0) | 64 | #endif /* CONFIG_XFS_POSIX_ACL */ |
91 | #define _ACL_INHERIT(c,m,d) (0) | ||
92 | #define _ACL_GET_ACCESS(pv,pa) (0) | ||
93 | #define _ACL_GET_DEFAULT(pv,pd) (0) | ||
94 | #define _ACL_ACCESS_EXISTS (NULL) | ||
95 | #define _ACL_DEFAULT_EXISTS (NULL) | ||
96 | #endif | ||
97 | |||
98 | #endif /* __XFS_ACL_H__ */ | 65 | #endif /* __XFS_ACL_H__ */ |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index c8641f713caa..f24b50b68d03 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -212,6 +212,8 @@ typedef struct xfs_perag | |||
212 | /* | 212 | /* |
213 | * tags for inode radix tree | 213 | * tags for inode radix tree |
214 | */ | 214 | */ |
215 | #define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup | ||
216 | in xfs_inode_ag_iterator */ | ||
215 | #define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ | 217 | #define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ |
216 | 218 | ||
217 | #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) | 219 | #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) |
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h index 53d5e70d1360..0902249354a0 100644 --- a/fs/xfs/xfs_arch.h +++ b/fs/xfs/xfs_arch.h | |||
@@ -73,28 +73,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b) | |||
73 | 73 | ||
74 | #endif /* __KERNEL__ */ | 74 | #endif /* __KERNEL__ */ |
75 | 75 | ||
76 | /* do we need conversion? */ | ||
77 | #define ARCH_NOCONVERT 1 | ||
78 | #ifdef XFS_NATIVE_HOST | ||
79 | # define ARCH_CONVERT ARCH_NOCONVERT | ||
80 | #else | ||
81 | # define ARCH_CONVERT 0 | ||
82 | #endif | ||
83 | |||
84 | /* generic swapping macros */ | ||
85 | |||
86 | #ifndef HAVE_SWABMACROS | ||
87 | #define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var)))) | ||
88 | #define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var)))) | ||
89 | #define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var)))) | ||
90 | #endif | ||
91 | |||
92 | #define INT_SWAP(type, var) \ | ||
93 | ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \ | ||
94 | ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \ | ||
95 | ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \ | ||
96 | (var)))) | ||
97 | |||
98 | /* | 76 | /* |
99 | * get and set integers from potentially unaligned locations | 77 | * get and set integers from potentially unaligned locations |
100 | */ | 78 | */ |
@@ -107,16 +85,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b) | |||
107 | ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ | 85 | ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ |
108 | } | 86 | } |
109 | 87 | ||
110 | /* does not return a value */ | ||
111 | #define INT_SET(reference,arch,valueref) \ | ||
112 | (__builtin_constant_p(valueref) ? \ | ||
113 | (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \ | ||
114 | (void)( \ | ||
115 | ((reference) = (valueref)), \ | ||
116 | ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \ | ||
117 | ) \ | ||
118 | ) | ||
119 | |||
120 | /* | 88 | /* |
121 | * In directories inode numbers are stored as unaligned arrays of unsigned | 89 | * In directories inode numbers are stored as unaligned arrays of unsigned |
122 | * 8bit integers on disk. | 90 | * 8bit integers on disk. |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 5fde1654b430..db15feb906ff 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include "xfs_error.h" | 45 | #include "xfs_error.h" |
46 | #include "xfs_quota.h" | 46 | #include "xfs_quota.h" |
47 | #include "xfs_trans_space.h" | 47 | #include "xfs_trans_space.h" |
48 | #include "xfs_acl.h" | ||
49 | #include "xfs_rw.h" | 48 | #include "xfs_rw.h" |
50 | #include "xfs_vnodeops.h" | 49 | #include "xfs_vnodeops.h" |
51 | 50 | ||
@@ -249,8 +248,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
249 | /* | 248 | /* |
250 | * Attach the dquots to the inode. | 249 | * Attach the dquots to the inode. |
251 | */ | 250 | */ |
252 | if ((error = XFS_QM_DQATTACH(mp, dp, 0))) | 251 | error = xfs_qm_dqattach(dp, 0); |
253 | return (error); | 252 | if (error) |
253 | return error; | ||
254 | 254 | ||
255 | /* | 255 | /* |
256 | * If the inode doesn't have an attribute fork, add one. | 256 | * If the inode doesn't have an attribute fork, add one. |
@@ -311,7 +311,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
311 | } | 311 | } |
312 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 312 | xfs_ilock(dp, XFS_ILOCK_EXCL); |
313 | 313 | ||
314 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0, | 314 | error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0, |
315 | rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : | 315 | rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : |
316 | XFS_QMOPT_RES_REGBLKS); | 316 | XFS_QMOPT_RES_REGBLKS); |
317 | if (error) { | 317 | if (error) { |
@@ -501,8 +501,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) | |||
501 | /* | 501 | /* |
502 | * Attach the dquots to the inode. | 502 | * Attach the dquots to the inode. |
503 | */ | 503 | */ |
504 | if ((error = XFS_QM_DQATTACH(mp, dp, 0))) | 504 | error = xfs_qm_dqattach(dp, 0); |
505 | return (error); | 505 | if (error) |
506 | return error; | ||
506 | 507 | ||
507 | /* | 508 | /* |
508 | * Start our first transaction of the day. | 509 | * Start our first transaction of the day. |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index ca7c6005a487..7928b9983c1d 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -2691,7 +2691,7 @@ xfs_bmap_rtalloc( | |||
2691 | * Adjust the disk quota also. This was reserved | 2691 | * Adjust the disk quota also. This was reserved |
2692 | * earlier. | 2692 | * earlier. |
2693 | */ | 2693 | */ |
2694 | XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, | 2694 | xfs_trans_mod_dquot_byino(ap->tp, ap->ip, |
2695 | ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : | 2695 | ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : |
2696 | XFS_TRANS_DQ_RTBCOUNT, (long) ralen); | 2696 | XFS_TRANS_DQ_RTBCOUNT, (long) ralen); |
2697 | } else { | 2697 | } else { |
@@ -2995,7 +2995,7 @@ xfs_bmap_btalloc( | |||
2995 | * Adjust the disk quota also. This was reserved | 2995 | * Adjust the disk quota also. This was reserved |
2996 | * earlier. | 2996 | * earlier. |
2997 | */ | 2997 | */ |
2998 | XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, | 2998 | xfs_trans_mod_dquot_byino(ap->tp, ap->ip, |
2999 | ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : | 2999 | ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : |
3000 | XFS_TRANS_DQ_BCOUNT, | 3000 | XFS_TRANS_DQ_BCOUNT, |
3001 | (long) args.len); | 3001 | (long) args.len); |
@@ -3066,7 +3066,7 @@ xfs_bmap_btree_to_extents( | |||
3066 | return error; | 3066 | return error; |
3067 | xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); | 3067 | xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); |
3068 | ip->i_d.di_nblocks--; | 3068 | ip->i_d.di_nblocks--; |
3069 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); | 3069 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); |
3070 | xfs_trans_binval(tp, cbp); | 3070 | xfs_trans_binval(tp, cbp); |
3071 | if (cur->bc_bufs[0] == cbp) | 3071 | if (cur->bc_bufs[0] == cbp) |
3072 | cur->bc_bufs[0] = NULL; | 3072 | cur->bc_bufs[0] = NULL; |
@@ -3386,7 +3386,7 @@ xfs_bmap_del_extent( | |||
3386 | * Adjust quota data. | 3386 | * Adjust quota data. |
3387 | */ | 3387 | */ |
3388 | if (qfield) | 3388 | if (qfield) |
3389 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks); | 3389 | xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); |
3390 | 3390 | ||
3391 | /* | 3391 | /* |
3392 | * Account for change in delayed indirect blocks. | 3392 | * Account for change in delayed indirect blocks. |
@@ -3523,7 +3523,7 @@ xfs_bmap_extents_to_btree( | |||
3523 | *firstblock = cur->bc_private.b.firstblock = args.fsbno; | 3523 | *firstblock = cur->bc_private.b.firstblock = args.fsbno; |
3524 | cur->bc_private.b.allocated++; | 3524 | cur->bc_private.b.allocated++; |
3525 | ip->i_d.di_nblocks++; | 3525 | ip->i_d.di_nblocks++; |
3526 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); | 3526 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); |
3527 | abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); | 3527 | abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); |
3528 | /* | 3528 | /* |
3529 | * Fill in the child block. | 3529 | * Fill in the child block. |
@@ -3690,7 +3690,7 @@ xfs_bmap_local_to_extents( | |||
3690 | XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); | 3690 | XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); |
3691 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); | 3691 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); |
3692 | ip->i_d.di_nblocks = 1; | 3692 | ip->i_d.di_nblocks = 1; |
3693 | XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, | 3693 | xfs_trans_mod_dquot_byino(tp, ip, |
3694 | XFS_TRANS_DQ_BCOUNT, 1L); | 3694 | XFS_TRANS_DQ_BCOUNT, 1L); |
3695 | flags |= xfs_ilog_fext(whichfork); | 3695 | flags |= xfs_ilog_fext(whichfork); |
3696 | } else { | 3696 | } else { |
@@ -4048,7 +4048,7 @@ xfs_bmap_add_attrfork( | |||
4048 | XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) | 4048 | XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT))) |
4049 | goto error0; | 4049 | goto error0; |
4050 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 4050 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
4051 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ? | 4051 | error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? |
4052 | XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : | 4052 | XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : |
4053 | XFS_QMOPT_RES_REGBLKS); | 4053 | XFS_QMOPT_RES_REGBLKS); |
4054 | if (error) { | 4054 | if (error) { |
@@ -4983,10 +4983,11 @@ xfs_bmapi( | |||
4983 | * adjusted later. We return if we haven't | 4983 | * adjusted later. We return if we haven't |
4984 | * allocated blocks already inside this loop. | 4984 | * allocated blocks already inside this loop. |
4985 | */ | 4985 | */ |
4986 | if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS( | 4986 | error = xfs_trans_reserve_quota_nblks( |
4987 | mp, NULL, ip, (long)alen, 0, | 4987 | NULL, ip, (long)alen, 0, |
4988 | rt ? XFS_QMOPT_RES_RTBLKS : | 4988 | rt ? XFS_QMOPT_RES_RTBLKS : |
4989 | XFS_QMOPT_RES_REGBLKS))) { | 4989 | XFS_QMOPT_RES_REGBLKS); |
4990 | if (error) { | ||
4990 | if (n == 0) { | 4991 | if (n == 0) { |
4991 | *nmap = 0; | 4992 | *nmap = 0; |
4992 | ASSERT(cur == NULL); | 4993 | ASSERT(cur == NULL); |
@@ -5035,8 +5036,8 @@ xfs_bmapi( | |||
5035 | if (XFS_IS_QUOTA_ON(mp)) | 5036 | if (XFS_IS_QUOTA_ON(mp)) |
5036 | /* unreserve the blocks now */ | 5037 | /* unreserve the blocks now */ |
5037 | (void) | 5038 | (void) |
5038 | XFS_TRANS_UNRESERVE_QUOTA_NBLKS( | 5039 | xfs_trans_unreserve_quota_nblks( |
5039 | mp, NULL, ip, | 5040 | NULL, ip, |
5040 | (long)alen, 0, rt ? | 5041 | (long)alen, 0, rt ? |
5041 | XFS_QMOPT_RES_RTBLKS : | 5042 | XFS_QMOPT_RES_RTBLKS : |
5042 | XFS_QMOPT_RES_REGBLKS); | 5043 | XFS_QMOPT_RES_REGBLKS); |
@@ -5691,14 +5692,14 @@ xfs_bunmapi( | |||
5691 | do_div(rtexts, mp->m_sb.sb_rextsize); | 5692 | do_div(rtexts, mp->m_sb.sb_rextsize); |
5692 | xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, | 5693 | xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, |
5693 | (int64_t)rtexts, rsvd); | 5694 | (int64_t)rtexts, rsvd); |
5694 | (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, | 5695 | (void)xfs_trans_reserve_quota_nblks(NULL, |
5695 | NULL, ip, -((long)del.br_blockcount), 0, | 5696 | ip, -((long)del.br_blockcount), 0, |
5696 | XFS_QMOPT_RES_RTBLKS); | 5697 | XFS_QMOPT_RES_RTBLKS); |
5697 | } else { | 5698 | } else { |
5698 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, | 5699 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, |
5699 | (int64_t)del.br_blockcount, rsvd); | 5700 | (int64_t)del.br_blockcount, rsvd); |
5700 | (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, | 5701 | (void)xfs_trans_reserve_quota_nblks(NULL, |
5701 | NULL, ip, -((long)del.br_blockcount), 0, | 5702 | ip, -((long)del.br_blockcount), 0, |
5702 | XFS_QMOPT_RES_REGBLKS); | 5703 | XFS_QMOPT_RES_REGBLKS); |
5703 | } | 5704 | } |
5704 | ip->i_delayed_blks -= del.br_blockcount; | 5705 | ip->i_delayed_blks -= del.br_blockcount; |
@@ -6085,6 +6086,7 @@ xfs_getbmap( | |||
6085 | break; | 6086 | break; |
6086 | } | 6087 | } |
6087 | 6088 | ||
6089 | kmem_free(out); | ||
6088 | return error; | 6090 | return error; |
6089 | } | 6091 | } |
6090 | 6092 | ||
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 0760d352586f..5c1ade06578e 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
@@ -590,7 +590,7 @@ xfs_bmbt_alloc_block( | |||
590 | cur->bc_private.b.allocated++; | 590 | cur->bc_private.b.allocated++; |
591 | cur->bc_private.b.ip->i_d.di_nblocks++; | 591 | cur->bc_private.b.ip->i_d.di_nblocks++; |
592 | xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); | 592 | xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); |
593 | XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, | 593 | xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip, |
594 | XFS_TRANS_DQ_BCOUNT, 1L); | 594 | XFS_TRANS_DQ_BCOUNT, 1L); |
595 | 595 | ||
596 | new->l = cpu_to_be64(args.fsbno); | 596 | new->l = cpu_to_be64(args.fsbno); |
@@ -618,7 +618,7 @@ xfs_bmbt_free_block( | |||
618 | ip->i_d.di_nblocks--; | 618 | ip->i_d.di_nblocks--; |
619 | 619 | ||
620 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 620 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
621 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); | 621 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); |
622 | xfs_trans_binval(tp, bp); | 622 | xfs_trans_binval(tp, bp); |
623 | return 0; | 623 | return 0; |
624 | } | 624 | } |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 6c87c8f304ef..edf8bdf4141f 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -542,10 +542,8 @@ xfs_filestream_associate( | |||
542 | * waiting for the lock because someone else is waiting on the lock we | 542 | * waiting for the lock because someone else is waiting on the lock we |
543 | * hold and we cannot drop that as we are in a transaction here. | 543 | * hold and we cannot drop that as we are in a transaction here. |
544 | * | 544 | * |
545 | * Lucky for us, this inversion is rarely a problem because it's a | 545 | * Lucky for us, this inversion is not a problem because it's a |
546 | * directory inode that we are trying to lock here and that means the | 546 | * directory inode that we are trying to lock here. |
547 | * only place that matters is xfs_sync_inodes() and SYNC_DELWRI is | ||
548 | * used. i.e. freeze, remount-ro, quotasync or unmount. | ||
549 | * | 547 | * |
550 | * So, if we can't get the iolock without sleeping then just give up | 548 | * So, if we can't get the iolock without sleeping then just give up |
551 | */ | 549 | */ |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index f7c06fac8229..c4ea51b55dce 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
@@ -239,10 +239,13 @@ typedef struct xfs_fsop_resblks { | |||
239 | * Minimum and maximum sizes need for growth checks | 239 | * Minimum and maximum sizes need for growth checks |
240 | */ | 240 | */ |
241 | #define XFS_MIN_AG_BLOCKS 64 | 241 | #define XFS_MIN_AG_BLOCKS 64 |
242 | #define XFS_MIN_LOG_BLOCKS 512 | 242 | #define XFS_MIN_LOG_BLOCKS 512ULL |
243 | #define XFS_MAX_LOG_BLOCKS (64 * 1024) | 243 | #define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL) |
244 | #define XFS_MIN_LOG_BYTES (256 * 1024) | 244 | #define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL) |
245 | #define XFS_MAX_LOG_BYTES (128 * 1024 * 1024) | 245 | |
246 | /* keep the maximum size under 2^31 by a small amount */ | ||
247 | #define XFS_MAX_LOG_BYTES \ | ||
248 | ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) | ||
246 | 249 | ||
247 | /* | 250 | /* |
248 | * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT | 251 | * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 89b81eedce6a..76c540f719e4 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 19 | #include "xfs_fs.h" |
20 | #include "xfs_types.h" | 20 | #include "xfs_types.h" |
21 | #include "xfs_acl.h" | ||
21 | #include "xfs_bit.h" | 22 | #include "xfs_bit.h" |
22 | #include "xfs_log.h" | 23 | #include "xfs_log.h" |
23 | #include "xfs_inum.h" | 24 | #include "xfs_inum.h" |
@@ -82,6 +83,7 @@ xfs_inode_alloc( | |||
82 | memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); | 83 | memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); |
83 | ip->i_size = 0; | 84 | ip->i_size = 0; |
84 | ip->i_new_size = 0; | 85 | ip->i_new_size = 0; |
86 | xfs_inode_init_acls(ip); | ||
85 | 87 | ||
86 | /* | 88 | /* |
87 | * Initialize inode's trace buffers. | 89 | * Initialize inode's trace buffers. |
@@ -500,10 +502,7 @@ xfs_ireclaim( | |||
500 | * ilock one but will still hold the iolock. | 502 | * ilock one but will still hold the iolock. |
501 | */ | 503 | */ |
502 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 504 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
503 | /* | 505 | xfs_qm_dqdetach(ip); |
504 | * Release dquots (and their references) if any. | ||
505 | */ | ||
506 | XFS_QM_DQDETACH(ip->i_mount, ip); | ||
507 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 506 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
508 | 507 | ||
509 | switch (ip->i_d.di_mode & S_IFMT) { | 508 | switch (ip->i_d.di_mode & S_IFMT) { |
@@ -561,6 +560,7 @@ xfs_ireclaim( | |||
561 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 560 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
562 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | 561 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
563 | ASSERT(completion_done(&ip->i_flush)); | 562 | ASSERT(completion_done(&ip->i_flush)); |
563 | xfs_inode_clear_acls(ip); | ||
564 | kmem_zone_free(xfs_inode_zone, ip); | 564 | kmem_zone_free(xfs_inode_zone, ip); |
565 | } | 565 | } |
566 | 566 | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 123b20c8cbf2..1f22d65fed0a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include "xfs_utils.h" | 49 | #include "xfs_utils.h" |
50 | #include "xfs_dir2_trace.h" | 50 | #include "xfs_dir2_trace.h" |
51 | #include "xfs_quota.h" | 51 | #include "xfs_quota.h" |
52 | #include "xfs_acl.h" | ||
53 | #include "xfs_filestream.h" | 52 | #include "xfs_filestream.h" |
54 | #include "xfs_vnodeops.h" | 53 | #include "xfs_vnodeops.h" |
55 | 54 | ||
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index f879c1bc4b96..77016702938b 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #ifndef __XFS_INODE_H__ | 18 | #ifndef __XFS_INODE_H__ |
19 | #define __XFS_INODE_H__ | 19 | #define __XFS_INODE_H__ |
20 | 20 | ||
21 | struct posix_acl; | ||
21 | struct xfs_dinode; | 22 | struct xfs_dinode; |
22 | struct xfs_inode; | 23 | struct xfs_inode; |
23 | 24 | ||
@@ -272,6 +273,11 @@ typedef struct xfs_inode { | |||
272 | /* VFS inode */ | 273 | /* VFS inode */ |
273 | struct inode i_vnode; /* embedded VFS inode */ | 274 | struct inode i_vnode; /* embedded VFS inode */ |
274 | 275 | ||
276 | #ifdef CONFIG_XFS_POSIX_ACL | ||
277 | struct posix_acl *i_acl; | ||
278 | struct posix_acl *i_default_acl; | ||
279 | #endif | ||
280 | |||
275 | /* Trace buffers per inode. */ | 281 | /* Trace buffers per inode. */ |
276 | #ifdef XFS_INODE_TRACE | 282 | #ifdef XFS_INODE_TRACE |
277 | struct ktrace *i_trace; /* general inode trace */ | 283 | struct ktrace *i_trace; /* general inode trace */ |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 5aaa2d7ec155..67ae5555a30a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | ||
46 | #include "xfs_attr.h" | 45 | #include "xfs_attr.h" |
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_trans_space.h" | 47 | #include "xfs_trans_space.h" |
@@ -385,7 +384,7 @@ xfs_iomap_write_direct( | |||
385 | * Make sure that the dquots are there. This doesn't hold | 384 | * Make sure that the dquots are there. This doesn't hold |
386 | * the ilock across a disk read. | 385 | * the ilock across a disk read. |
387 | */ | 386 | */ |
388 | error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED); | 387 | error = xfs_qm_dqattach_locked(ip, 0); |
389 | if (error) | 388 | if (error) |
390 | return XFS_ERROR(error); | 389 | return XFS_ERROR(error); |
391 | 390 | ||
@@ -444,8 +443,7 @@ xfs_iomap_write_direct( | |||
444 | if (error) | 443 | if (error) |
445 | goto error_out; | 444 | goto error_out; |
446 | 445 | ||
447 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, | 446 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
448 | qblocks, 0, quota_flag); | ||
449 | if (error) | 447 | if (error) |
450 | goto error1; | 448 | goto error1; |
451 | 449 | ||
@@ -495,7 +493,7 @@ xfs_iomap_write_direct( | |||
495 | 493 | ||
496 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ | 494 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ |
497 | xfs_bmap_cancel(&free_list); | 495 | xfs_bmap_cancel(&free_list); |
498 | XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag); | 496 | xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
499 | 497 | ||
500 | error1: /* Just cancel transaction */ | 498 | error1: /* Just cancel transaction */ |
501 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 499 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
@@ -582,7 +580,7 @@ xfs_iomap_write_delay( | |||
582 | * Make sure that the dquots are there. This doesn't hold | 580 | * Make sure that the dquots are there. This doesn't hold |
583 | * the ilock across a disk read. | 581 | * the ilock across a disk read. |
584 | */ | 582 | */ |
585 | error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); | 583 | error = xfs_qm_dqattach_locked(ip, 0); |
586 | if (error) | 584 | if (error) |
587 | return XFS_ERROR(error); | 585 | return XFS_ERROR(error); |
588 | 586 | ||
@@ -684,7 +682,8 @@ xfs_iomap_write_allocate( | |||
684 | /* | 682 | /* |
685 | * Make sure that the dquots are there. | 683 | * Make sure that the dquots are there. |
686 | */ | 684 | */ |
687 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 685 | error = xfs_qm_dqattach(ip, 0); |
686 | if (error) | ||
688 | return XFS_ERROR(error); | 687 | return XFS_ERROR(error); |
689 | 688 | ||
690 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 689 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7ba450116d4f..47da2fb45377 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1975,16 +1975,30 @@ xlog_recover_do_reg_buffer( | |||
1975 | error = 0; | 1975 | error = 0; |
1976 | if (buf_f->blf_flags & | 1976 | if (buf_f->blf_flags & |
1977 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { | 1977 | (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { |
1978 | if (item->ri_buf[i].i_addr == NULL) { | ||
1979 | cmn_err(CE_ALERT, | ||
1980 | "XFS: NULL dquot in %s.", __func__); | ||
1981 | goto next; | ||
1982 | } | ||
1983 | if (item->ri_buf[i].i_len < sizeof(xfs_dqblk_t)) { | ||
1984 | cmn_err(CE_ALERT, | ||
1985 | "XFS: dquot too small (%d) in %s.", | ||
1986 | item->ri_buf[i].i_len, __func__); | ||
1987 | goto next; | ||
1988 | } | ||
1978 | error = xfs_qm_dqcheck((xfs_disk_dquot_t *) | 1989 | error = xfs_qm_dqcheck((xfs_disk_dquot_t *) |
1979 | item->ri_buf[i].i_addr, | 1990 | item->ri_buf[i].i_addr, |
1980 | -1, 0, XFS_QMOPT_DOWARN, | 1991 | -1, 0, XFS_QMOPT_DOWARN, |
1981 | "dquot_buf_recover"); | 1992 | "dquot_buf_recover"); |
1993 | if (error) | ||
1994 | goto next; | ||
1982 | } | 1995 | } |
1983 | if (!error) | 1996 | |
1984 | memcpy(xfs_buf_offset(bp, | 1997 | memcpy(xfs_buf_offset(bp, |
1985 | (uint)bit << XFS_BLI_SHIFT), /* dest */ | 1998 | (uint)bit << XFS_BLI_SHIFT), /* dest */ |
1986 | item->ri_buf[i].i_addr, /* source */ | 1999 | item->ri_buf[i].i_addr, /* source */ |
1987 | nbits<<XFS_BLI_SHIFT); /* length */ | 2000 | nbits<<XFS_BLI_SHIFT); /* length */ |
2001 | next: | ||
1988 | i++; | 2002 | i++; |
1989 | bit += nbits; | 2003 | bit += nbits; |
1990 | } | 2004 | } |
@@ -2615,7 +2629,19 @@ xlog_recover_do_dquot_trans( | |||
2615 | return (0); | 2629 | return (0); |
2616 | 2630 | ||
2617 | recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; | 2631 | recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; |
2618 | ASSERT(recddq); | 2632 | |
2633 | if (item->ri_buf[1].i_addr == NULL) { | ||
2634 | cmn_err(CE_ALERT, | ||
2635 | "XFS: NULL dquot in %s.", __func__); | ||
2636 | return XFS_ERROR(EIO); | ||
2637 | } | ||
2638 | if (item->ri_buf[1].i_len < sizeof(xfs_dqblk_t)) { | ||
2639 | cmn_err(CE_ALERT, | ||
2640 | "XFS: dquot too small (%d) in %s.", | ||
2641 | item->ri_buf[1].i_len, __func__); | ||
2642 | return XFS_ERROR(EIO); | ||
2643 | } | ||
2644 | |||
2619 | /* | 2645 | /* |
2620 | * This type of quotas was turned off, so ignore this record. | 2646 | * This type of quotas was turned off, so ignore this record. |
2621 | */ | 2647 | */ |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 65a99725d0cc..5c6f092659c1 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -960,6 +960,53 @@ xfs_check_sizes(xfs_mount_t *mp) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * Clear the quotaflags in memory and in the superblock. | ||
964 | */ | ||
965 | int | ||
966 | xfs_mount_reset_sbqflags( | ||
967 | struct xfs_mount *mp) | ||
968 | { | ||
969 | int error; | ||
970 | struct xfs_trans *tp; | ||
971 | |||
972 | mp->m_qflags = 0; | ||
973 | |||
974 | /* | ||
975 | * It is OK to look at sb_qflags here in mount path, | ||
976 | * without m_sb_lock. | ||
977 | */ | ||
978 | if (mp->m_sb.sb_qflags == 0) | ||
979 | return 0; | ||
980 | spin_lock(&mp->m_sb_lock); | ||
981 | mp->m_sb.sb_qflags = 0; | ||
982 | spin_unlock(&mp->m_sb_lock); | ||
983 | |||
984 | /* | ||
985 | * If the fs is readonly, let the incore superblock run | ||
986 | * with quotas off but don't flush the update out to disk | ||
987 | */ | ||
988 | if (mp->m_flags & XFS_MOUNT_RDONLY) | ||
989 | return 0; | ||
990 | |||
991 | #ifdef QUOTADEBUG | ||
992 | xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); | ||
993 | #endif | ||
994 | |||
995 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); | ||
996 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, | ||
997 | XFS_DEFAULT_LOG_COUNT); | ||
998 | if (error) { | ||
999 | xfs_trans_cancel(tp, 0); | ||
1000 | xfs_fs_cmn_err(CE_ALERT, mp, | ||
1001 | "xfs_mount_reset_sbqflags: Superblock update failed!"); | ||
1002 | return error; | ||
1003 | } | ||
1004 | |||
1005 | xfs_mod_sb(tp, XFS_SB_QFLAGS); | ||
1006 | return xfs_trans_commit(tp, 0); | ||
1007 | } | ||
1008 | |||
1009 | /* | ||
963 | * This function does the following on an initial mount of a file system: | 1010 | * This function does the following on an initial mount of a file system: |
964 | * - reads the superblock from disk and init the mount struct | 1011 | * - reads the superblock from disk and init the mount struct |
965 | * - if we're a 32-bit kernel, do a size check on the superblock | 1012 | * - if we're a 32-bit kernel, do a size check on the superblock |
@@ -976,7 +1023,8 @@ xfs_mountfs( | |||
976 | xfs_sb_t *sbp = &(mp->m_sb); | 1023 | xfs_sb_t *sbp = &(mp->m_sb); |
977 | xfs_inode_t *rip; | 1024 | xfs_inode_t *rip; |
978 | __uint64_t resblks; | 1025 | __uint64_t resblks; |
979 | uint quotamount, quotaflags; | 1026 | uint quotamount = 0; |
1027 | uint quotaflags = 0; | ||
980 | int error = 0; | 1028 | int error = 0; |
981 | 1029 | ||
982 | xfs_mount_common(mp, sbp); | 1030 | xfs_mount_common(mp, sbp); |
@@ -1210,9 +1258,28 @@ xfs_mountfs( | |||
1210 | /* | 1258 | /* |
1211 | * Initialise the XFS quota management subsystem for this mount | 1259 | * Initialise the XFS quota management subsystem for this mount |
1212 | */ | 1260 | */ |
1213 | error = XFS_QM_INIT(mp, "amount, "aflags); | 1261 | if (XFS_IS_QUOTA_RUNNING(mp)) { |
1214 | if (error) | 1262 | error = xfs_qm_newmount(mp, "amount, "aflags); |
1215 | goto out_rtunmount; | 1263 | if (error) |
1264 | goto out_rtunmount; | ||
1265 | } else { | ||
1266 | ASSERT(!XFS_IS_QUOTA_ON(mp)); | ||
1267 | |||
1268 | /* | ||
1269 | * If a file system had quotas running earlier, but decided to | ||
1270 | * mount without -o uquota/pquota/gquota options, revoke the | ||
1271 | * quotachecked license. | ||
1272 | */ | ||
1273 | if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { | ||
1274 | cmn_err(CE_NOTE, | ||
1275 | "XFS: resetting qflags for filesystem %s", | ||
1276 | mp->m_fsname); | ||
1277 | |||
1278 | error = xfs_mount_reset_sbqflags(mp); | ||
1279 | if (error) | ||
1280 | return error; | ||
1281 | } | ||
1282 | } | ||
1216 | 1283 | ||
1217 | /* | 1284 | /* |
1218 | * Finish recovering the file system. This part needed to be | 1285 | * Finish recovering the file system. This part needed to be |
@@ -1228,9 +1295,19 @@ xfs_mountfs( | |||
1228 | /* | 1295 | /* |
1229 | * Complete the quota initialisation, post-log-replay component. | 1296 | * Complete the quota initialisation, post-log-replay component. |
1230 | */ | 1297 | */ |
1231 | error = XFS_QM_MOUNT(mp, quotamount, quotaflags); | 1298 | if (quotamount) { |
1232 | if (error) | 1299 | ASSERT(mp->m_qflags == 0); |
1233 | goto out_rtunmount; | 1300 | mp->m_qflags = quotaflags; |
1301 | |||
1302 | xfs_qm_mount_quotas(mp); | ||
1303 | } | ||
1304 | |||
1305 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) | ||
1306 | if (XFS_IS_QUOTA_ON(mp)) | ||
1307 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on"); | ||
1308 | else | ||
1309 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on"); | ||
1310 | #endif | ||
1234 | 1311 | ||
1235 | /* | 1312 | /* |
1236 | * Now we are mounted, reserve a small amount of unused space for | 1313 | * Now we are mounted, reserve a small amount of unused space for |
@@ -1279,12 +1356,7 @@ xfs_unmountfs( | |||
1279 | __uint64_t resblks; | 1356 | __uint64_t resblks; |
1280 | int error; | 1357 | int error; |
1281 | 1358 | ||
1282 | /* | 1359 | xfs_qm_unmount_quotas(mp); |
1283 | * Release dquot that rootinode, rbmino and rsumino might be holding, | ||
1284 | * and release the quota inodes. | ||
1285 | */ | ||
1286 | XFS_QM_UNMOUNT(mp); | ||
1287 | |||
1288 | xfs_rtunmount_inodes(mp); | 1360 | xfs_rtunmount_inodes(mp); |
1289 | IRELE(mp->m_rootip); | 1361 | IRELE(mp->m_rootip); |
1290 | 1362 | ||
@@ -1299,12 +1371,9 @@ xfs_unmountfs( | |||
1299 | * need to force the log first. | 1371 | * need to force the log first. |
1300 | */ | 1372 | */ |
1301 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); | 1373 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); |
1302 | xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_ASYNC); | 1374 | xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC); |
1303 | |||
1304 | XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); | ||
1305 | 1375 | ||
1306 | if (mp->m_quotainfo) | 1376 | xfs_qm_unmount(mp); |
1307 | XFS_QM_DONE(mp); | ||
1308 | 1377 | ||
1309 | /* | 1378 | /* |
1310 | * Flush out the log synchronously so that we know for sure | 1379 | * Flush out the log synchronously so that we know for sure |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d6a64392f983..a5122382afde 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -64,6 +64,8 @@ struct xfs_swapext; | |||
64 | struct xfs_mru_cache; | 64 | struct xfs_mru_cache; |
65 | struct xfs_nameops; | 65 | struct xfs_nameops; |
66 | struct xfs_ail; | 66 | struct xfs_ail; |
67 | struct xfs_quotainfo; | ||
68 | |||
67 | 69 | ||
68 | /* | 70 | /* |
69 | * Prototypes and functions for the Data Migration subsystem. | 71 | * Prototypes and functions for the Data Migration subsystem. |
@@ -107,86 +109,6 @@ typedef struct xfs_dmops { | |||
107 | (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) | 109 | (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) |
108 | 110 | ||
109 | 111 | ||
110 | /* | ||
111 | * Prototypes and functions for the Quota Management subsystem. | ||
112 | */ | ||
113 | |||
114 | struct xfs_dquot; | ||
115 | struct xfs_dqtrxops; | ||
116 | struct xfs_quotainfo; | ||
117 | |||
118 | typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); | ||
119 | typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); | ||
120 | typedef void (*xfs_qmunmount_t)(struct xfs_mount *); | ||
121 | typedef void (*xfs_qmdone_t)(struct xfs_mount *); | ||
122 | typedef void (*xfs_dqrele_t)(struct xfs_dquot *); | ||
123 | typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint); | ||
124 | typedef void (*xfs_dqdetach_t)(struct xfs_inode *); | ||
125 | typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint); | ||
126 | typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *, | ||
127 | struct xfs_inode *, uid_t, gid_t, prid_t, uint, | ||
128 | struct xfs_dquot **, struct xfs_dquot **); | ||
129 | typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *, | ||
130 | struct xfs_dquot *, struct xfs_dquot *); | ||
131 | typedef int (*xfs_dqvoprename_t)(struct xfs_inode **); | ||
132 | typedef struct xfs_dquot * (*xfs_dqvopchown_t)( | ||
133 | struct xfs_trans *, struct xfs_inode *, | ||
134 | struct xfs_dquot **, struct xfs_dquot *); | ||
135 | typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *, | ||
136 | struct xfs_dquot *, struct xfs_dquot *, uint); | ||
137 | typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *); | ||
138 | typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags); | ||
139 | |||
140 | typedef struct xfs_qmops { | ||
141 | xfs_qminit_t xfs_qminit; | ||
142 | xfs_qmdone_t xfs_qmdone; | ||
143 | xfs_qmmount_t xfs_qmmount; | ||
144 | xfs_qmunmount_t xfs_qmunmount; | ||
145 | xfs_dqrele_t xfs_dqrele; | ||
146 | xfs_dqattach_t xfs_dqattach; | ||
147 | xfs_dqdetach_t xfs_dqdetach; | ||
148 | xfs_dqpurgeall_t xfs_dqpurgeall; | ||
149 | xfs_dqvopalloc_t xfs_dqvopalloc; | ||
150 | xfs_dqvopcreate_t xfs_dqvopcreate; | ||
151 | xfs_dqvoprename_t xfs_dqvoprename; | ||
152 | xfs_dqvopchown_t xfs_dqvopchown; | ||
153 | xfs_dqvopchownresv_t xfs_dqvopchownresv; | ||
154 | xfs_dqstatvfs_t xfs_dqstatvfs; | ||
155 | xfs_dqsync_t xfs_dqsync; | ||
156 | struct xfs_dqtrxops *xfs_dqtrxops; | ||
157 | } xfs_qmops_t; | ||
158 | |||
159 | #define XFS_QM_INIT(mp, mnt, fl) \ | ||
160 | (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl) | ||
161 | #define XFS_QM_MOUNT(mp, mnt, fl) \ | ||
162 | (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl) | ||
163 | #define XFS_QM_UNMOUNT(mp) \ | ||
164 | (*(mp)->m_qm_ops->xfs_qmunmount)(mp) | ||
165 | #define XFS_QM_DONE(mp) \ | ||
166 | (*(mp)->m_qm_ops->xfs_qmdone)(mp) | ||
167 | #define XFS_QM_DQRELE(mp, dq) \ | ||
168 | (*(mp)->m_qm_ops->xfs_dqrele)(dq) | ||
169 | #define XFS_QM_DQATTACH(mp, ip, fl) \ | ||
170 | (*(mp)->m_qm_ops->xfs_dqattach)(ip, fl) | ||
171 | #define XFS_QM_DQDETACH(mp, ip) \ | ||
172 | (*(mp)->m_qm_ops->xfs_dqdetach)(ip) | ||
173 | #define XFS_QM_DQPURGEALL(mp, fl) \ | ||
174 | (*(mp)->m_qm_ops->xfs_dqpurgeall)(mp, fl) | ||
175 | #define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, prid, fl, dq1, dq2) \ | ||
176 | (*(mp)->m_qm_ops->xfs_dqvopalloc)(mp, ip, uid, gid, prid, fl, dq1, dq2) | ||
177 | #define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \ | ||
178 | (*(mp)->m_qm_ops->xfs_dqvopcreate)(tp, ip, dq1, dq2) | ||
179 | #define XFS_QM_DQVOPRENAME(mp, ip) \ | ||
180 | (*(mp)->m_qm_ops->xfs_dqvoprename)(ip) | ||
181 | #define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \ | ||
182 | (*(mp)->m_qm_ops->xfs_dqvopchown)(tp, ip, dqp, dq) | ||
183 | #define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \ | ||
184 | (*(mp)->m_qm_ops->xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl) | ||
185 | #define XFS_QM_DQSTATVFS(ip, statp) \ | ||
186 | (*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp) | ||
187 | #define XFS_QM_DQSYNC(mp, flags) \ | ||
188 | (*(mp)->m_qm_ops->xfs_dqsync)(mp, flags) | ||
189 | |||
190 | #ifdef HAVE_PERCPU_SB | 112 | #ifdef HAVE_PERCPU_SB |
191 | 113 | ||
192 | /* | 114 | /* |
@@ -510,8 +432,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); | |||
510 | 432 | ||
511 | extern int xfs_dmops_get(struct xfs_mount *); | 433 | extern int xfs_dmops_get(struct xfs_mount *); |
512 | extern void xfs_dmops_put(struct xfs_mount *); | 434 | extern void xfs_dmops_put(struct xfs_mount *); |
513 | extern int xfs_qmops_get(struct xfs_mount *); | ||
514 | extern void xfs_qmops_put(struct xfs_mount *); | ||
515 | 435 | ||
516 | extern struct xfs_dmops xfs_dmcore_xfs; | 436 | extern struct xfs_dmops xfs_dmcore_xfs; |
517 | 437 | ||
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c deleted file mode 100644 index e101790ea8e7..000000000000 --- a/fs/xfs/xfs_qmops.c +++ /dev/null | |||
@@ -1,152 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_log.h" | ||
22 | #include "xfs_inum.h" | ||
23 | #include "xfs_trans.h" | ||
24 | #include "xfs_sb.h" | ||
25 | #include "xfs_ag.h" | ||
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | ||
29 | #include "xfs_quota.h" | ||
30 | #include "xfs_error.h" | ||
31 | |||
32 | |||
33 | STATIC struct xfs_dquot * | ||
34 | xfs_dqvopchown_default( | ||
35 | struct xfs_trans *tp, | ||
36 | struct xfs_inode *ip, | ||
37 | struct xfs_dquot **dqp, | ||
38 | struct xfs_dquot *dq) | ||
39 | { | ||
40 | return NULL; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Clear the quotaflags in memory and in the superblock. | ||
45 | */ | ||
46 | int | ||
47 | xfs_mount_reset_sbqflags(xfs_mount_t *mp) | ||
48 | { | ||
49 | int error; | ||
50 | xfs_trans_t *tp; | ||
51 | |||
52 | mp->m_qflags = 0; | ||
53 | /* | ||
54 | * It is OK to look at sb_qflags here in mount path, | ||
55 | * without m_sb_lock. | ||
56 | */ | ||
57 | if (mp->m_sb.sb_qflags == 0) | ||
58 | return 0; | ||
59 | spin_lock(&mp->m_sb_lock); | ||
60 | mp->m_sb.sb_qflags = 0; | ||
61 | spin_unlock(&mp->m_sb_lock); | ||
62 | |||
63 | /* | ||
64 | * if the fs is readonly, let the incore superblock run | ||
65 | * with quotas off but don't flush the update out to disk | ||
66 | */ | ||
67 | if (mp->m_flags & XFS_MOUNT_RDONLY) | ||
68 | return 0; | ||
69 | #ifdef QUOTADEBUG | ||
70 | xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); | ||
71 | #endif | ||
72 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); | ||
73 | if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, | ||
74 | XFS_DEFAULT_LOG_COUNT))) { | ||
75 | xfs_trans_cancel(tp, 0); | ||
76 | xfs_fs_cmn_err(CE_ALERT, mp, | ||
77 | "xfs_mount_reset_sbqflags: Superblock update failed!"); | ||
78 | return error; | ||
79 | } | ||
80 | xfs_mod_sb(tp, XFS_SB_QFLAGS); | ||
81 | error = xfs_trans_commit(tp, 0); | ||
82 | return error; | ||
83 | } | ||
84 | |||
85 | STATIC int | ||
86 | xfs_noquota_init( | ||
87 | xfs_mount_t *mp, | ||
88 | uint *needquotamount, | ||
89 | uint *quotaflags) | ||
90 | { | ||
91 | int error = 0; | ||
92 | |||
93 | *quotaflags = 0; | ||
94 | *needquotamount = B_FALSE; | ||
95 | |||
96 | ASSERT(!XFS_IS_QUOTA_ON(mp)); | ||
97 | |||
98 | /* | ||
99 | * If a file system had quotas running earlier, but decided to | ||
100 | * mount without -o uquota/pquota/gquota options, revoke the | ||
101 | * quotachecked license. | ||
102 | */ | ||
103 | if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { | ||
104 | cmn_err(CE_NOTE, | ||
105 | "XFS resetting qflags for filesystem %s", | ||
106 | mp->m_fsname); | ||
107 | |||
108 | error = xfs_mount_reset_sbqflags(mp); | ||
109 | } | ||
110 | return error; | ||
111 | } | ||
112 | |||
113 | static struct xfs_qmops xfs_qmcore_stub = { | ||
114 | .xfs_qminit = (xfs_qminit_t) xfs_noquota_init, | ||
115 | .xfs_qmdone = (xfs_qmdone_t) fs_noerr, | ||
116 | .xfs_qmmount = (xfs_qmmount_t) fs_noerr, | ||
117 | .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr, | ||
118 | .xfs_dqrele = (xfs_dqrele_t) fs_noerr, | ||
119 | .xfs_dqattach = (xfs_dqattach_t) fs_noerr, | ||
120 | .xfs_dqdetach = (xfs_dqdetach_t) fs_noerr, | ||
121 | .xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr, | ||
122 | .xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr, | ||
123 | .xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr, | ||
124 | .xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr, | ||
125 | .xfs_dqvopchown = xfs_dqvopchown_default, | ||
126 | .xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr, | ||
127 | .xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval, | ||
128 | .xfs_dqsync = (xfs_dqsync_t) fs_noerr, | ||
129 | }; | ||
130 | |||
131 | int | ||
132 | xfs_qmops_get(struct xfs_mount *mp) | ||
133 | { | ||
134 | if (XFS_IS_QUOTA_RUNNING(mp)) { | ||
135 | #ifdef CONFIG_XFS_QUOTA | ||
136 | mp->m_qm_ops = &xfs_qmcore_xfs; | ||
137 | #else | ||
138 | cmn_err(CE_WARN, | ||
139 | "XFS: qouta support not available in this kernel."); | ||
140 | return EINVAL; | ||
141 | #endif | ||
142 | } else { | ||
143 | mp->m_qm_ops = &xfs_qmcore_stub; | ||
144 | } | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | void | ||
150 | xfs_qmops_put(struct xfs_mount *mp) | ||
151 | { | ||
152 | } | ||
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index f5d1202dde25..3ec91ac74c2a 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h | |||
@@ -197,7 +197,6 @@ typedef struct xfs_qoff_logformat { | |||
197 | #define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ | 197 | #define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ |
198 | #define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ | 198 | #define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ |
199 | #define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */ | 199 | #define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */ |
200 | #define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */ | ||
201 | #define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */ | 200 | #define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */ |
202 | #define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ | 201 | #define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ |
203 | #define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ | 202 | #define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ |
@@ -302,69 +301,79 @@ typedef struct xfs_dqtrx { | |||
302 | long qt_delrtb_delta; /* delayed RT blk count changes */ | 301 | long qt_delrtb_delta; /* delayed RT blk count changes */ |
303 | } xfs_dqtrx_t; | 302 | } xfs_dqtrx_t; |
304 | 303 | ||
305 | /* | 304 | #ifdef CONFIG_XFS_QUOTA |
306 | * Dquot transaction functions, used if quota is enabled. | 305 | extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *); |
307 | */ | 306 | extern void xfs_trans_free_dqinfo(struct xfs_trans *); |
308 | typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *); | 307 | extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *, |
309 | typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *, | 308 | uint, long); |
310 | struct xfs_inode *, uint, long); | 309 | extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *); |
311 | typedef void (*qo_free_dqinfo_t)(struct xfs_trans *); | 310 | extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *); |
312 | typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *); | 311 | extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *, |
313 | typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *); | 312 | struct xfs_inode *, long, long, uint); |
314 | typedef int (*qo_reserve_quota_nblks_t)( | 313 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, |
315 | struct xfs_trans *, struct xfs_mount *, | 314 | struct xfs_mount *, struct xfs_dquot *, |
316 | struct xfs_inode *, long, long, uint); | 315 | struct xfs_dquot *, long, long, uint); |
317 | typedef int (*qo_reserve_quota_bydquots_t)( | 316 | |
318 | struct xfs_trans *, struct xfs_mount *, | 317 | extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint, |
319 | struct xfs_dquot *, struct xfs_dquot *, | 318 | struct xfs_dquot **, struct xfs_dquot **); |
320 | long, long, uint); | 319 | extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, |
321 | typedef struct xfs_dqtrxops { | 320 | struct xfs_dquot *, struct xfs_dquot *); |
322 | qo_dup_dqinfo_t qo_dup_dqinfo; | 321 | extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **); |
323 | qo_free_dqinfo_t qo_free_dqinfo; | 322 | extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *, |
324 | qo_mod_dquot_byino_t qo_mod_dquot_byino; | 323 | struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *); |
325 | qo_apply_dquot_deltas_t qo_apply_dquot_deltas; | 324 | extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *, |
326 | qo_reserve_quota_nblks_t qo_reserve_quota_nblks; | 325 | struct xfs_dquot *, struct xfs_dquot *, uint); |
327 | qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots; | 326 | extern int xfs_qm_dqattach(struct xfs_inode *, uint); |
328 | qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots; | 327 | extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint); |
329 | } xfs_dqtrxops_t; | 328 | extern void xfs_qm_dqdetach(struct xfs_inode *); |
330 | 329 | extern void xfs_qm_dqrele(struct xfs_dquot *); | |
331 | #define XFS_DQTRXOP(mp, tp, op, args...) \ | 330 | extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); |
332 | ((mp)->m_qm_ops->xfs_dqtrxops ? \ | 331 | extern int xfs_qm_sync(struct xfs_mount *, int); |
333 | ((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : 0) | 332 | extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); |
334 | 333 | extern void xfs_qm_mount_quotas(struct xfs_mount *); | |
335 | #define XFS_DQTRXOP_VOID(mp, tp, op, args...) \ | 334 | extern void xfs_qm_unmount(struct xfs_mount *); |
336 | ((mp)->m_qm_ops->xfs_dqtrxops ? \ | 335 | extern void xfs_qm_unmount_quotas(struct xfs_mount *); |
337 | ((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : (void)0) | 336 | |
338 | 337 | #else | |
339 | #define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \ | 338 | static inline int |
340 | XFS_DQTRXOP_VOID(mp, otp, qo_dup_dqinfo, ntp) | 339 | xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, |
341 | #define XFS_TRANS_FREE_DQINFO(mp, tp) \ | 340 | uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp) |
342 | XFS_DQTRXOP_VOID(mp, tp, qo_free_dqinfo) | 341 | { |
343 | #define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \ | 342 | *udqp = NULL; |
344 | XFS_DQTRXOP_VOID(mp, tp, qo_mod_dquot_byino, ip, field, delta) | 343 | *gdqp = NULL; |
345 | #define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \ | 344 | return 0; |
346 | XFS_DQTRXOP_VOID(mp, tp, qo_apply_dquot_deltas) | 345 | } |
347 | #define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \ | 346 | #define xfs_trans_dup_dqinfo(tp, tp2) |
348 | XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl) | 347 | #define xfs_trans_free_dqinfo(tp) |
349 | #define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \ | 348 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
350 | XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl) | 349 | #define xfs_trans_apply_dquot_deltas(tp) |
351 | #define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \ | 350 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
352 | XFS_DQTRXOP_VOID(mp, tp, qo_unreserve_and_mod_dquots) | 351 | #define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) |
353 | 352 | #define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) | |
354 | #define XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, flags) \ | 353 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) |
355 | XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), -(ninos), flags) | 354 | #define xfs_qm_vop_rename_dqattach(it) (0) |
356 | #define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ | 355 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
357 | XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \ | 356 | #define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0) |
358 | f | XFS_QMOPT_RES_REGBLKS) | 357 | #define xfs_qm_dqattach(ip, fl) (0) |
359 | #define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \ | 358 | #define xfs_qm_dqattach_locked(ip, fl) (0) |
360 | XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \ | 359 | #define xfs_qm_dqdetach(ip) |
360 | #define xfs_qm_dqrele(d) | ||
361 | #define xfs_qm_statvfs(ip, s) | ||
362 | #define xfs_qm_sync(mp, fl) (0) | ||
363 | #define xfs_qm_newmount(mp, a, b) (0) | ||
364 | #define xfs_qm_mount_quotas(mp) | ||
365 | #define xfs_qm_unmount(mp) | ||
366 | #define xfs_qm_unmount_quotas(mp) (0) | ||
367 | #endif /* CONFIG_XFS_QUOTA */ | ||
368 | |||
369 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ | ||
370 | xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags) | ||
371 | #define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \ | ||
372 | xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \ | ||
361 | f | XFS_QMOPT_RES_REGBLKS) | 373 | f | XFS_QMOPT_RES_REGBLKS) |
362 | 374 | ||
363 | extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); | 375 | extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); |
364 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); | 376 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); |
365 | 377 | ||
366 | extern struct xfs_qmops xfs_qmcore_xfs; | ||
367 | |||
368 | #endif /* __KERNEL__ */ | 378 | #endif /* __KERNEL__ */ |
369 | |||
370 | #endif /* __XFS_QUOTA_H__ */ | 379 | #endif /* __XFS_QUOTA_H__ */ |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 58f85e9cd11d..b81deea0ce19 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -166,7 +166,8 @@ xfs_rename( | |||
166 | /* | 166 | /* |
167 | * Attach the dquots to the inodes | 167 | * Attach the dquots to the inodes |
168 | */ | 168 | */ |
169 | if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) { | 169 | error = xfs_qm_vop_rename_dqattach(inodes); |
170 | if (error) { | ||
170 | xfs_trans_cancel(tp, cancel_flags); | 171 | xfs_trans_cancel(tp, cancel_flags); |
171 | goto std_return; | 172 | goto std_return; |
172 | } | 173 | } |
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 36f3a21c54d2..fea68615ed23 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include "xfs_ialloc.h" | 41 | #include "xfs_ialloc.h" |
42 | #include "xfs_attr.h" | 42 | #include "xfs_attr.h" |
43 | #include "xfs_bmap.h" | 43 | #include "xfs_bmap.h" |
44 | #include "xfs_acl.h" | ||
45 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
46 | #include "xfs_buf_item.h" | 45 | #include "xfs_buf_item.h" |
47 | #include "xfs_rw.h" | 46 | #include "xfs_rw.h" |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index bcc39d358ad3..66b849358e62 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -297,7 +297,7 @@ xfs_trans_dup( | |||
297 | tp->t_rtx_res = tp->t_rtx_res_used; | 297 | tp->t_rtx_res = tp->t_rtx_res_used; |
298 | ntp->t_pflags = tp->t_pflags; | 298 | ntp->t_pflags = tp->t_pflags; |
299 | 299 | ||
300 | XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); | 300 | xfs_trans_dup_dqinfo(tp, ntp); |
301 | 301 | ||
302 | atomic_inc(&tp->t_mountp->m_active_trans); | 302 | atomic_inc(&tp->t_mountp->m_active_trans); |
303 | return ntp; | 303 | return ntp; |
@@ -829,7 +829,7 @@ shut_us_down: | |||
829 | * means is that we have some (non-persistent) quota | 829 | * means is that we have some (non-persistent) quota |
830 | * reservations that need to be unreserved. | 830 | * reservations that need to be unreserved. |
831 | */ | 831 | */ |
832 | XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); | 832 | xfs_trans_unreserve_and_mod_dquots(tp); |
833 | if (tp->t_ticket) { | 833 | if (tp->t_ticket) { |
834 | commit_lsn = xfs_log_done(mp, tp->t_ticket, | 834 | commit_lsn = xfs_log_done(mp, tp->t_ticket, |
835 | NULL, log_flags); | 835 | NULL, log_flags); |
@@ -848,10 +848,9 @@ shut_us_down: | |||
848 | /* | 848 | /* |
849 | * If we need to update the superblock, then do it now. | 849 | * If we need to update the superblock, then do it now. |
850 | */ | 850 | */ |
851 | if (tp->t_flags & XFS_TRANS_SB_DIRTY) { | 851 | if (tp->t_flags & XFS_TRANS_SB_DIRTY) |
852 | xfs_trans_apply_sb_deltas(tp); | 852 | xfs_trans_apply_sb_deltas(tp); |
853 | } | 853 | xfs_trans_apply_dquot_deltas(tp); |
854 | XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); | ||
855 | 854 | ||
856 | /* | 855 | /* |
857 | * Ask each log item how many log_vector entries it will | 856 | * Ask each log item how many log_vector entries it will |
@@ -1056,7 +1055,7 @@ xfs_trans_uncommit( | |||
1056 | } | 1055 | } |
1057 | 1056 | ||
1058 | xfs_trans_unreserve_and_mod_sb(tp); | 1057 | xfs_trans_unreserve_and_mod_sb(tp); |
1059 | XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); | 1058 | xfs_trans_unreserve_and_mod_dquots(tp); |
1060 | 1059 | ||
1061 | xfs_trans_free_items(tp, flags); | 1060 | xfs_trans_free_items(tp, flags); |
1062 | xfs_trans_free_busy(tp); | 1061 | xfs_trans_free_busy(tp); |
@@ -1181,7 +1180,7 @@ xfs_trans_cancel( | |||
1181 | } | 1180 | } |
1182 | #endif | 1181 | #endif |
1183 | xfs_trans_unreserve_and_mod_sb(tp); | 1182 | xfs_trans_unreserve_and_mod_sb(tp); |
1184 | XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); | 1183 | xfs_trans_unreserve_and_mod_dquots(tp); |
1185 | 1184 | ||
1186 | if (tp->t_ticket) { | 1185 | if (tp->t_ticket) { |
1187 | if (flags & XFS_TRANS_RELEASE_LOG_RES) { | 1186 | if (flags & XFS_TRANS_RELEASE_LOG_RES) { |
@@ -1211,7 +1210,7 @@ xfs_trans_free( | |||
1211 | xfs_trans_t *tp) | 1210 | xfs_trans_t *tp) |
1212 | { | 1211 | { |
1213 | atomic_dec(&tp->t_mountp->m_active_trans); | 1212 | atomic_dec(&tp->t_mountp->m_active_trans); |
1214 | XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); | 1213 | xfs_trans_free_dqinfo(tp); |
1215 | kmem_zone_free(xfs_trans_zone, tp); | 1214 | kmem_zone_free(xfs_trans_zone, tp); |
1216 | } | 1215 | } |
1217 | 1216 | ||
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 79b9e5ea5359..4d88616bde91 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -166,7 +166,7 @@ xfs_dir_ialloc( | |||
166 | xfs_buf_relse(ialloc_context); | 166 | xfs_buf_relse(ialloc_context); |
167 | if (dqinfo) { | 167 | if (dqinfo) { |
168 | tp->t_dqinfo = dqinfo; | 168 | tp->t_dqinfo = dqinfo; |
169 | XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); | 169 | xfs_trans_free_dqinfo(tp); |
170 | } | 170 | } |
171 | *tpp = ntp; | 171 | *tpp = ntp; |
172 | *ipp = NULL; | 172 | *ipp = NULL; |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 19cf90a9c762..c4eca5ed5dab 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "xfs_ialloc.h" | 42 | #include "xfs_ialloc.h" |
43 | #include "xfs_alloc.h" | 43 | #include "xfs_alloc.h" |
44 | #include "xfs_bmap.h" | 44 | #include "xfs_bmap.h" |
45 | #include "xfs_acl.h" | ||
45 | #include "xfs_attr.h" | 46 | #include "xfs_attr.h" |
46 | #include "xfs_rw.h" | 47 | #include "xfs_rw.h" |
47 | #include "xfs_error.h" | 48 | #include "xfs_error.h" |
@@ -118,7 +119,7 @@ xfs_setattr( | |||
118 | */ | 119 | */ |
119 | ASSERT(udqp == NULL); | 120 | ASSERT(udqp == NULL); |
120 | ASSERT(gdqp == NULL); | 121 | ASSERT(gdqp == NULL); |
121 | code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, ip->i_d.di_projid, | 122 | code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid, |
122 | qflags, &udqp, &gdqp); | 123 | qflags, &udqp, &gdqp); |
123 | if (code) | 124 | if (code) |
124 | return code; | 125 | return code; |
@@ -180,10 +181,11 @@ xfs_setattr( | |||
180 | * Do a quota reservation only if uid/gid is actually | 181 | * Do a quota reservation only if uid/gid is actually |
181 | * going to change. | 182 | * going to change. |
182 | */ | 183 | */ |
183 | if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || | 184 | if (XFS_IS_QUOTA_RUNNING(mp) && |
184 | (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { | 185 | ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || |
186 | (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { | ||
185 | ASSERT(tp); | 187 | ASSERT(tp); |
186 | code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, | 188 | code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, |
187 | capable(CAP_FOWNER) ? | 189 | capable(CAP_FOWNER) ? |
188 | XFS_QMOPT_FORCE_RES : 0); | 190 | XFS_QMOPT_FORCE_RES : 0); |
189 | if (code) /* out of quota */ | 191 | if (code) /* out of quota */ |
@@ -217,7 +219,7 @@ xfs_setattr( | |||
217 | /* | 219 | /* |
218 | * Make sure that the dquots are attached to the inode. | 220 | * Make sure that the dquots are attached to the inode. |
219 | */ | 221 | */ |
220 | code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); | 222 | code = xfs_qm_dqattach_locked(ip, 0); |
221 | if (code) | 223 | if (code) |
222 | goto error_return; | 224 | goto error_return; |
223 | 225 | ||
@@ -351,21 +353,21 @@ xfs_setattr( | |||
351 | * in the transaction. | 353 | * in the transaction. |
352 | */ | 354 | */ |
353 | if (iuid != uid) { | 355 | if (iuid != uid) { |
354 | if (XFS_IS_UQUOTA_ON(mp)) { | 356 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { |
355 | ASSERT(mask & ATTR_UID); | 357 | ASSERT(mask & ATTR_UID); |
356 | ASSERT(udqp); | 358 | ASSERT(udqp); |
357 | olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 359 | olddquot1 = xfs_qm_vop_chown(tp, ip, |
358 | &ip->i_udquot, udqp); | 360 | &ip->i_udquot, udqp); |
359 | } | 361 | } |
360 | ip->i_d.di_uid = uid; | 362 | ip->i_d.di_uid = uid; |
361 | inode->i_uid = uid; | 363 | inode->i_uid = uid; |
362 | } | 364 | } |
363 | if (igid != gid) { | 365 | if (igid != gid) { |
364 | if (XFS_IS_GQUOTA_ON(mp)) { | 366 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { |
365 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); | 367 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); |
366 | ASSERT(mask & ATTR_GID); | 368 | ASSERT(mask & ATTR_GID); |
367 | ASSERT(gdqp); | 369 | ASSERT(gdqp); |
368 | olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, | 370 | olddquot2 = xfs_qm_vop_chown(tp, ip, |
369 | &ip->i_gdquot, gdqp); | 371 | &ip->i_gdquot, gdqp); |
370 | } | 372 | } |
371 | ip->i_d.di_gid = gid; | 373 | ip->i_d.di_gid = gid; |
@@ -461,13 +463,25 @@ xfs_setattr( | |||
461 | /* | 463 | /* |
462 | * Release any dquot(s) the inode had kept before chown. | 464 | * Release any dquot(s) the inode had kept before chown. |
463 | */ | 465 | */ |
464 | XFS_QM_DQRELE(mp, olddquot1); | 466 | xfs_qm_dqrele(olddquot1); |
465 | XFS_QM_DQRELE(mp, olddquot2); | 467 | xfs_qm_dqrele(olddquot2); |
466 | XFS_QM_DQRELE(mp, udqp); | 468 | xfs_qm_dqrele(udqp); |
467 | XFS_QM_DQRELE(mp, gdqp); | 469 | xfs_qm_dqrele(gdqp); |
468 | 470 | ||
469 | if (code) { | 471 | if (code) |
470 | return code; | 472 | return code; |
473 | |||
474 | /* | ||
475 | * XXX(hch): Updating the ACL entries is not atomic vs the i_mode | ||
476 | * update. We could avoid this with linked transactions | ||
477 | * and passing down the transaction pointer all the way | ||
478 | * to attr_set. No previous user of the generic | ||
479 | * Posix ACL code seems to care about this issue either. | ||
480 | */ | ||
481 | if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { | ||
482 | code = -xfs_acl_chmod(inode); | ||
483 | if (code) | ||
484 | return XFS_ERROR(code); | ||
471 | } | 485 | } |
472 | 486 | ||
473 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && | 487 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && |
@@ -482,8 +496,8 @@ xfs_setattr( | |||
482 | commit_flags |= XFS_TRANS_ABORT; | 496 | commit_flags |= XFS_TRANS_ABORT; |
483 | /* FALLTHROUGH */ | 497 | /* FALLTHROUGH */ |
484 | error_return: | 498 | error_return: |
485 | XFS_QM_DQRELE(mp, udqp); | 499 | xfs_qm_dqrele(udqp); |
486 | XFS_QM_DQRELE(mp, gdqp); | 500 | xfs_qm_dqrele(gdqp); |
487 | if (tp) { | 501 | if (tp) { |
488 | xfs_trans_cancel(tp, commit_flags); | 502 | xfs_trans_cancel(tp, commit_flags); |
489 | } | 503 | } |
@@ -739,7 +753,8 @@ xfs_free_eofblocks( | |||
739 | /* | 753 | /* |
740 | * Attach the dquots to the inode up front. | 754 | * Attach the dquots to the inode up front. |
741 | */ | 755 | */ |
742 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 756 | error = xfs_qm_dqattach(ip, 0); |
757 | if (error) | ||
743 | return error; | 758 | return error; |
744 | 759 | ||
745 | /* | 760 | /* |
@@ -1181,7 +1196,8 @@ xfs_inactive( | |||
1181 | 1196 | ||
1182 | ASSERT(ip->i_d.di_nlink == 0); | 1197 | ASSERT(ip->i_d.di_nlink == 0); |
1183 | 1198 | ||
1184 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 1199 | error = xfs_qm_dqattach(ip, 0); |
1200 | if (error) | ||
1185 | return VN_INACTIVE_CACHE; | 1201 | return VN_INACTIVE_CACHE; |
1186 | 1202 | ||
1187 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); | 1203 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); |
@@ -1307,7 +1323,7 @@ xfs_inactive( | |||
1307 | /* | 1323 | /* |
1308 | * Credit the quota account(s). The inode is gone. | 1324 | * Credit the quota account(s). The inode is gone. |
1309 | */ | 1325 | */ |
1310 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); | 1326 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); |
1311 | 1327 | ||
1312 | /* | 1328 | /* |
1313 | * Just ignore errors at this point. There is nothing we can | 1329 | * Just ignore errors at this point. There is nothing we can |
@@ -1323,11 +1339,11 @@ xfs_inactive( | |||
1323 | xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " | 1339 | xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " |
1324 | "xfs_trans_commit() returned error %d", error); | 1340 | "xfs_trans_commit() returned error %d", error); |
1325 | } | 1341 | } |
1342 | |||
1326 | /* | 1343 | /* |
1327 | * Release the dquots held by inode, if any. | 1344 | * Release the dquots held by inode, if any. |
1328 | */ | 1345 | */ |
1329 | XFS_QM_DQDETACH(mp, ip); | 1346 | xfs_qm_dqdetach(ip); |
1330 | |||
1331 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 1347 | xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
1332 | 1348 | ||
1333 | out: | 1349 | out: |
@@ -1427,8 +1443,7 @@ xfs_create( | |||
1427 | /* | 1443 | /* |
1428 | * Make sure that we have allocated dquot(s) on disk. | 1444 | * Make sure that we have allocated dquot(s) on disk. |
1429 | */ | 1445 | */ |
1430 | error = XFS_QM_DQVOPALLOC(mp, dp, | 1446 | error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, |
1431 | current_fsuid(), current_fsgid(), prid, | ||
1432 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); | 1447 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); |
1433 | if (error) | 1448 | if (error) |
1434 | goto std_return; | 1449 | goto std_return; |
@@ -1489,7 +1504,7 @@ xfs_create( | |||
1489 | /* | 1504 | /* |
1490 | * Reserve disk quota and the inode. | 1505 | * Reserve disk quota and the inode. |
1491 | */ | 1506 | */ |
1492 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); | 1507 | error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0); |
1493 | if (error) | 1508 | if (error) |
1494 | goto out_trans_cancel; | 1509 | goto out_trans_cancel; |
1495 | 1510 | ||
@@ -1561,7 +1576,7 @@ xfs_create( | |||
1561 | * These ids of the inode couldn't have changed since the new | 1576 | * These ids of the inode couldn't have changed since the new |
1562 | * inode has been locked ever since it was created. | 1577 | * inode has been locked ever since it was created. |
1563 | */ | 1578 | */ |
1564 | XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); | 1579 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); |
1565 | 1580 | ||
1566 | /* | 1581 | /* |
1567 | * xfs_trans_commit normally decrements the vnode ref count | 1582 | * xfs_trans_commit normally decrements the vnode ref count |
@@ -1580,8 +1595,8 @@ xfs_create( | |||
1580 | goto out_dqrele; | 1595 | goto out_dqrele; |
1581 | } | 1596 | } |
1582 | 1597 | ||
1583 | XFS_QM_DQRELE(mp, udqp); | 1598 | xfs_qm_dqrele(udqp); |
1584 | XFS_QM_DQRELE(mp, gdqp); | 1599 | xfs_qm_dqrele(gdqp); |
1585 | 1600 | ||
1586 | *ipp = ip; | 1601 | *ipp = ip; |
1587 | 1602 | ||
@@ -1602,8 +1617,8 @@ xfs_create( | |||
1602 | out_trans_cancel: | 1617 | out_trans_cancel: |
1603 | xfs_trans_cancel(tp, cancel_flags); | 1618 | xfs_trans_cancel(tp, cancel_flags); |
1604 | out_dqrele: | 1619 | out_dqrele: |
1605 | XFS_QM_DQRELE(mp, udqp); | 1620 | xfs_qm_dqrele(udqp); |
1606 | XFS_QM_DQRELE(mp, gdqp); | 1621 | xfs_qm_dqrele(gdqp); |
1607 | 1622 | ||
1608 | if (unlock_dp_on_error) | 1623 | if (unlock_dp_on_error) |
1609 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 1624 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
@@ -1837,11 +1852,11 @@ xfs_remove( | |||
1837 | return error; | 1852 | return error; |
1838 | } | 1853 | } |
1839 | 1854 | ||
1840 | error = XFS_QM_DQATTACH(mp, dp, 0); | 1855 | error = xfs_qm_dqattach(dp, 0); |
1841 | if (error) | 1856 | if (error) |
1842 | goto std_return; | 1857 | goto std_return; |
1843 | 1858 | ||
1844 | error = XFS_QM_DQATTACH(mp, ip, 0); | 1859 | error = xfs_qm_dqattach(ip, 0); |
1845 | if (error) | 1860 | if (error) |
1846 | goto std_return; | 1861 | goto std_return; |
1847 | 1862 | ||
@@ -2028,11 +2043,11 @@ xfs_link( | |||
2028 | 2043 | ||
2029 | /* Return through std_return after this point. */ | 2044 | /* Return through std_return after this point. */ |
2030 | 2045 | ||
2031 | error = XFS_QM_DQATTACH(mp, sip, 0); | 2046 | error = xfs_qm_dqattach(sip, 0); |
2032 | if (error) | 2047 | if (error) |
2033 | goto std_return; | 2048 | goto std_return; |
2034 | 2049 | ||
2035 | error = XFS_QM_DQATTACH(mp, tdp, 0); | 2050 | error = xfs_qm_dqattach(tdp, 0); |
2036 | if (error) | 2051 | if (error) |
2037 | goto std_return; | 2052 | goto std_return; |
2038 | 2053 | ||
@@ -2205,8 +2220,7 @@ xfs_symlink( | |||
2205 | /* | 2220 | /* |
2206 | * Make sure that we have allocated dquot(s) on disk. | 2221 | * Make sure that we have allocated dquot(s) on disk. |
2207 | */ | 2222 | */ |
2208 | error = XFS_QM_DQVOPALLOC(mp, dp, | 2223 | error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, |
2209 | current_fsuid(), current_fsgid(), prid, | ||
2210 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); | 2224 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); |
2211 | if (error) | 2225 | if (error) |
2212 | goto std_return; | 2226 | goto std_return; |
@@ -2248,7 +2262,7 @@ xfs_symlink( | |||
2248 | /* | 2262 | /* |
2249 | * Reserve disk quota : blocks and inode. | 2263 | * Reserve disk quota : blocks and inode. |
2250 | */ | 2264 | */ |
2251 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0); | 2265 | error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0); |
2252 | if (error) | 2266 | if (error) |
2253 | goto error_return; | 2267 | goto error_return; |
2254 | 2268 | ||
@@ -2288,7 +2302,7 @@ xfs_symlink( | |||
2288 | /* | 2302 | /* |
2289 | * Also attach the dquot(s) to it, if applicable. | 2303 | * Also attach the dquot(s) to it, if applicable. |
2290 | */ | 2304 | */ |
2291 | XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp); | 2305 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); |
2292 | 2306 | ||
2293 | if (resblks) | 2307 | if (resblks) |
2294 | resblks -= XFS_IALLOC_SPACE_RES(mp); | 2308 | resblks -= XFS_IALLOC_SPACE_RES(mp); |
@@ -2376,8 +2390,8 @@ xfs_symlink( | |||
2376 | goto error2; | 2390 | goto error2; |
2377 | } | 2391 | } |
2378 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 2392 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
2379 | XFS_QM_DQRELE(mp, udqp); | 2393 | xfs_qm_dqrele(udqp); |
2380 | XFS_QM_DQRELE(mp, gdqp); | 2394 | xfs_qm_dqrele(gdqp); |
2381 | 2395 | ||
2382 | /* Fall through to std_return with error = 0 or errno from | 2396 | /* Fall through to std_return with error = 0 or errno from |
2383 | * xfs_trans_commit */ | 2397 | * xfs_trans_commit */ |
@@ -2401,8 +2415,8 @@ std_return: | |||
2401 | cancel_flags |= XFS_TRANS_ABORT; | 2415 | cancel_flags |= XFS_TRANS_ABORT; |
2402 | error_return: | 2416 | error_return: |
2403 | xfs_trans_cancel(tp, cancel_flags); | 2417 | xfs_trans_cancel(tp, cancel_flags); |
2404 | XFS_QM_DQRELE(mp, udqp); | 2418 | xfs_qm_dqrele(udqp); |
2405 | XFS_QM_DQRELE(mp, gdqp); | 2419 | xfs_qm_dqrele(gdqp); |
2406 | 2420 | ||
2407 | if (unlock_dp_on_error) | 2421 | if (unlock_dp_on_error) |
2408 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 2422 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
@@ -2541,7 +2555,8 @@ xfs_alloc_file_space( | |||
2541 | if (XFS_FORCED_SHUTDOWN(mp)) | 2555 | if (XFS_FORCED_SHUTDOWN(mp)) |
2542 | return XFS_ERROR(EIO); | 2556 | return XFS_ERROR(EIO); |
2543 | 2557 | ||
2544 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 2558 | error = xfs_qm_dqattach(ip, 0); |
2559 | if (error) | ||
2545 | return error; | 2560 | return error; |
2546 | 2561 | ||
2547 | if (len <= 0) | 2562 | if (len <= 0) |
@@ -2628,8 +2643,8 @@ retry: | |||
2628 | break; | 2643 | break; |
2629 | } | 2644 | } |
2630 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 2645 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2631 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, | 2646 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, |
2632 | qblocks, 0, quota_flag); | 2647 | 0, quota_flag); |
2633 | if (error) | 2648 | if (error) |
2634 | goto error1; | 2649 | goto error1; |
2635 | 2650 | ||
@@ -2688,7 +2703,7 @@ dmapi_enospc_check: | |||
2688 | 2703 | ||
2689 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ | 2704 | error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ |
2690 | xfs_bmap_cancel(&free_list); | 2705 | xfs_bmap_cancel(&free_list); |
2691 | XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag); | 2706 | xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
2692 | 2707 | ||
2693 | error1: /* Just cancel transaction */ | 2708 | error1: /* Just cancel transaction */ |
2694 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 2709 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
@@ -2827,7 +2842,8 @@ xfs_free_file_space( | |||
2827 | 2842 | ||
2828 | xfs_itrace_entry(ip); | 2843 | xfs_itrace_entry(ip); |
2829 | 2844 | ||
2830 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 2845 | error = xfs_qm_dqattach(ip, 0); |
2846 | if (error) | ||
2831 | return error; | 2847 | return error; |
2832 | 2848 | ||
2833 | error = 0; | 2849 | error = 0; |
@@ -2953,9 +2969,9 @@ xfs_free_file_space( | |||
2953 | break; | 2969 | break; |
2954 | } | 2970 | } |
2955 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 2971 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2956 | error = XFS_TRANS_RESERVE_QUOTA(mp, tp, | 2972 | error = xfs_trans_reserve_quota(tp, mp, |
2957 | ip->i_udquot, ip->i_gdquot, resblks, 0, | 2973 | ip->i_udquot, ip->i_gdquot, |
2958 | XFS_QMOPT_RES_REGBLKS); | 2974 | resblks, 0, XFS_QMOPT_RES_REGBLKS); |
2959 | if (error) | 2975 | if (error) |
2960 | goto error1; | 2976 | goto error1; |
2961 | 2977 | ||
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 04373c6c61ff..a9e102de71a1 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); | |||
18 | #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ | 18 | #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ |
19 | #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ | 19 | #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ |
20 | #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ | 20 | #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ |
21 | #define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */ | ||
21 | 22 | ||
22 | int xfs_readlink(struct xfs_inode *ip, char *link); | 23 | int xfs_readlink(struct xfs_inode *ip, char *link); |
23 | int xfs_fsync(struct xfs_inode *ip); | 24 | int xfs_fsync(struct xfs_inode *ip); |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2f1f95737acb..57b1846a3c87 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -10,6 +10,7 @@ | |||
10 | * @bus_width: Number of data lines wired up the slot | 10 | * @bus_width: Number of data lines wired up the slot |
11 | * @detect_pin: GPIO pin wired to the card detect switch | 11 | * @detect_pin: GPIO pin wired to the card detect switch |
12 | * @wp_pin: GPIO pin wired to the write protect sensor | 12 | * @wp_pin: GPIO pin wired to the write protect sensor |
13 | * @detect_is_active_high: The state of the detect pin when it is active | ||
13 | * | 14 | * |
14 | * If a given slot is not present on the board, @bus_width should be | 15 | * If a given slot is not present on the board, @bus_width should be |
15 | * set to 0. The other fields are ignored in this case. | 16 | * set to 0. The other fields are ignored in this case. |
@@ -24,6 +25,7 @@ struct mci_slot_pdata { | |||
24 | unsigned int bus_width; | 25 | unsigned int bus_width; |
25 | int detect_pin; | 26 | int detect_pin; |
26 | int wp_pin; | 27 | int wp_pin; |
28 | bool detect_is_active_high; | ||
27 | }; | 29 | }; |
28 | 30 | ||
29 | /** | 31 | /** |
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h index 8ed591b0887e..4d5e57ff6614 100644 --- a/include/linux/i2c-ocores.h +++ b/include/linux/i2c-ocores.h | |||
@@ -14,6 +14,8 @@ | |||
14 | struct ocores_i2c_platform_data { | 14 | struct ocores_i2c_platform_data { |
15 | u32 regstep; /* distance between registers */ | 15 | u32 regstep; /* distance between registers */ |
16 | u32 clock_khz; /* input clock in kHz */ | 16 | u32 clock_khz; /* input clock in kHz */ |
17 | u8 num_devices; /* number of devices in the devices list */ | ||
18 | struct i2c_board_info const *devices; /* devices connected to the bus */ | ||
17 | }; | 19 | }; |
18 | 20 | ||
19 | #endif /* _LINUX_I2C_OCORES_H */ | 21 | #endif /* _LINUX_I2C_OCORES_H */ |
diff --git a/kernel/timer.c b/kernel/timer.c index c01e568935ea..faf2db897de4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -757,6 +757,7 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
757 | wake_up_idle_cpu(cpu); | 757 | wake_up_idle_cpu(cpu); |
758 | spin_unlock_irqrestore(&base->lock, flags); | 758 | spin_unlock_irqrestore(&base->lock, flags); |
759 | } | 759 | } |
760 | EXPORT_SYMBOL_GPL(add_timer_on); | ||
760 | 761 | ||
761 | /** | 762 | /** |
762 | * del_timer - deactive a timer. | 763 | * del_timer - deactive a timer. |