diff options
276 files changed, 5246 insertions, 3074 deletions
| @@ -2564,6 +2564,10 @@ N: Wolfgang Muees | |||
| 2564 | E: wolfgang@iksw-muees.de | 2564 | E: wolfgang@iksw-muees.de |
| 2565 | D: Auerswald USB driver | 2565 | D: Auerswald USB driver |
| 2566 | 2566 | ||
| 2567 | N: Paul Mundt | ||
| 2568 | E: paul.mundt@gmail.com | ||
| 2569 | D: SuperH maintainer | ||
| 2570 | |||
| 2567 | N: Ian A. Murdock | 2571 | N: Ian A. Murdock |
| 2568 | E: imurdock@gnu.ai.mit.edu | 2572 | E: imurdock@gnu.ai.mit.edu |
| 2569 | D: Creator of Debian distribution | 2573 | D: Creator of Debian distribution |
| @@ -2707,6 +2711,9 @@ N: Greg Page | |||
| 2707 | E: gpage@sovereign.org | 2711 | E: gpage@sovereign.org |
| 2708 | D: IPX development and support | 2712 | D: IPX development and support |
| 2709 | 2713 | ||
| 2714 | N: Venkatesh Pallipadi (Venki) | ||
| 2715 | D: x86/HPET | ||
| 2716 | |||
| 2710 | N: David Parsons | 2717 | N: David Parsons |
| 2711 | E: orc@pell.chi.il.us | 2718 | E: orc@pell.chi.il.us |
| 2712 | D: improved memory detection code. | 2719 | D: improved memory detection code. |
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 26b1e31d5a13..2a8e89e13e45 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches | |||
| @@ -14,7 +14,10 @@ Read Documentation/SubmitChecklist for a list of items to check | |||
| 14 | before submitting code. If you are submitting a driver, also read | 14 | before submitting code. If you are submitting a driver, also read |
| 15 | Documentation/SubmittingDrivers. | 15 | Documentation/SubmittingDrivers. |
| 16 | 16 | ||
| 17 | 17 | Many of these steps describe the default behavior of the git version | |
| 18 | control system; if you use git to prepare your patches, you'll find much | ||
| 19 | of the mechanical work done for you, though you'll still need to prepare | ||
| 20 | and document a sensible set of patches. | ||
| 18 | 21 | ||
| 19 | -------------------------------------------- | 22 | -------------------------------------------- |
| 20 | SECTION 1 - CREATING AND SENDING YOUR CHANGE | 23 | SECTION 1 - CREATING AND SENDING YOUR CHANGE |
| @@ -25,7 +28,9 @@ SECTION 1 - CREATING AND SENDING YOUR CHANGE | |||
| 25 | 1) "diff -up" | 28 | 1) "diff -up" |
| 26 | ------------ | 29 | ------------ |
| 27 | 30 | ||
| 28 | Use "diff -up" or "diff -uprN" to create patches. | 31 | Use "diff -up" or "diff -uprN" to create patches. git generates patches |
| 32 | in this form by default; if you're using git, you can skip this section | ||
| 33 | entirely. | ||
| 29 | 34 | ||
| 30 | All changes to the Linux kernel occur in the form of patches, as | 35 | All changes to the Linux kernel occur in the form of patches, as |
| 31 | generated by diff(1). When creating your patch, make sure to create it | 36 | generated by diff(1). When creating your patch, make sure to create it |
| @@ -66,19 +71,14 @@ Make sure your patch does not include any extra files which do not | |||
| 66 | belong in a patch submission. Make sure to review your patch -after- | 71 | belong in a patch submission. Make sure to review your patch -after- |
| 67 | generated it with diff(1), to ensure accuracy. | 72 | generated it with diff(1), to ensure accuracy. |
| 68 | 73 | ||
| 69 | If your changes produce a lot of deltas, you may want to look into | 74 | If your changes produce a lot of deltas, you need to split them into |
| 70 | splitting them into individual patches which modify things in | 75 | individual patches which modify things in logical stages; see section |
| 71 | logical stages. This will facilitate easier reviewing by other | 76 | #3. This will facilitate easier reviewing by other kernel developers, |
| 72 | kernel developers, very important if you want your patch accepted. | 77 | very important if you want your patch accepted. |
| 73 | There are a number of scripts which can aid in this: | ||
| 74 | |||
| 75 | Quilt: | ||
| 76 | http://savannah.nongnu.org/projects/quilt | ||
| 77 | 78 | ||
| 78 | Andrew Morton's patch scripts: | 79 | If you're using git, "git rebase -i" can help you with this process. If |
| 79 | http://userweb.kernel.org/~akpm/stuff/patch-scripts.tar.gz | 80 | you're not using git, quilt <http://savannah.nongnu.org/projects/quilt> |
| 80 | Instead of these scripts, quilt is the recommended patch management | 81 | is another popular alternative. |
| 81 | tool (see above). | ||
| 82 | 82 | ||
| 83 | 83 | ||
| 84 | 84 | ||
| @@ -106,8 +106,21 @@ I.e., the patch (series) and its description should be self-contained. | |||
| 106 | This benefits both the patch merger(s) and reviewers. Some reviewers | 106 | This benefits both the patch merger(s) and reviewers. Some reviewers |
| 107 | probably didn't even receive earlier versions of the patch. | 107 | probably didn't even receive earlier versions of the patch. |
| 108 | 108 | ||
| 109 | Describe your changes in imperative mood, e.g. "make xyzzy do frotz" | ||
| 110 | instead of "[This patch] makes xyzzy do frotz" or "[I] changed xyzzy | ||
| 111 | to do frotz", as if you are giving orders to the codebase to change | ||
| 112 | its behaviour. | ||
| 113 | |||
| 109 | If the patch fixes a logged bug entry, refer to that bug entry by | 114 | If the patch fixes a logged bug entry, refer to that bug entry by |
| 110 | number and URL. | 115 | number and URL. If the patch follows from a mailing list discussion, |
| 116 | give a URL to the mailing list archive; use the https://lkml.kernel.org/ | ||
| 117 | redirector with a Message-Id, to ensure that the links cannot become | ||
| 118 | stale. | ||
| 119 | |||
| 120 | However, try to make your explanation understandable without external | ||
| 121 | resources. In addition to giving a URL to a mailing list archive or | ||
| 122 | bug, summarize the relevant points of the discussion that led to the | ||
| 123 | patch as submitted. | ||
| 111 | 124 | ||
| 112 | If you want to refer to a specific commit, don't just refer to the | 125 | If you want to refer to a specific commit, don't just refer to the |
| 113 | SHA-1 ID of the commit. Please also include the oneline summary of | 126 | SHA-1 ID of the commit. Please also include the oneline summary of |
| @@ -594,7 +607,8 @@ patch. | |||
| 594 | If you are going to include a diffstat after the "---" marker, please | 607 | If you are going to include a diffstat after the "---" marker, please |
| 595 | use diffstat options "-p 1 -w 70" so that filenames are listed from | 608 | use diffstat options "-p 1 -w 70" so that filenames are listed from |
| 596 | the top of the kernel source tree and don't use too much horizontal | 609 | the top of the kernel source tree and don't use too much horizontal |
| 597 | space (easily fit in 80 columns, maybe with some indentation). | 610 | space (easily fit in 80 columns, maybe with some indentation). (git |
| 611 | generates appropriate diffstats by default.) | ||
| 598 | 612 | ||
| 599 | See more details on the proper patch format in the following | 613 | See more details on the proper patch format in the following |
| 600 | references. | 614 | references. |
| @@ -725,7 +739,7 @@ SECTION 3 - REFERENCES | |||
| 725 | ---------------------- | 739 | ---------------------- |
| 726 | 740 | ||
| 727 | Andrew Morton, "The perfect patch" (tpp). | 741 | Andrew Morton, "The perfect patch" (tpp). |
| 728 | <http://userweb.kernel.org/~akpm/stuff/tpp.txt> | 742 | <http://www.ozlabs.org/~akpm/stuff/tpp.txt> |
| 729 | 743 | ||
| 730 | Jeff Garzik, "Linux kernel patch submission format". | 744 | Jeff Garzik, "Linux kernel patch submission format". |
| 731 | <http://linux.yyz.us/patch-format.html> | 745 | <http://linux.yyz.us/patch-format.html> |
| @@ -738,7 +752,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer". | |||
| 738 | <http://www.kroah.com/log/linux/maintainer-05.html> | 752 | <http://www.kroah.com/log/linux/maintainer-05.html> |
| 739 | 753 | ||
| 740 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! | 754 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! |
| 741 | <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2> | 755 | <https://lkml.org/lkml/2005/7/11/336> |
| 742 | 756 | ||
| 743 | Kernel Documentation/CodingStyle: | 757 | Kernel Documentation/CodingStyle: |
| 744 | <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle> | 758 | <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle> |
diff --git a/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt b/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt index 7cb9dbf34878..6983aad376c3 100644 --- a/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | RTC controller for the Allwinner A10/A20 | 3 | RTC controller for the Allwinner A10/A20 |
| 4 | 4 | ||
| 5 | Required properties: | 5 | Required properties: |
| 6 | - compatible : Should be "allwinner,sun4i-rtc" or "allwinner,sun7i-a20-rtc" | 6 | - compatible : Should be "allwinner,sun4i-a10-rtc" or "allwinner,sun7i-a20-rtc" |
| 7 | - reg: physical base address of the controller and length of memory mapped | 7 | - reg: physical base address of the controller and length of memory mapped |
| 8 | region. | 8 | region. |
| 9 | - interrupts: IRQ line for the RTC. | 9 | - interrupts: IRQ line for the RTC. |
| @@ -11,7 +11,7 @@ Required properties: | |||
| 11 | Example: | 11 | Example: |
| 12 | 12 | ||
| 13 | rtc: rtc@01c20d00 { | 13 | rtc: rtc@01c20d00 { |
| 14 | compatible = "allwinner,sun4i-rtc"; | 14 | compatible = "allwinner,sun4i-a10-rtc"; |
| 15 | reg = <0x01c20d00 0x20>; | 15 | reg = <0x01c20d00 0x20>; |
| 16 | interrupts = <24>; | 16 | interrupts = <24>; |
| 17 | }; | 17 | }; |
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index 06887d46ccf2..41c3d332acc9 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt | |||
| @@ -25,9 +25,8 @@ available from the following download page. At least "mkfs.nilfs2", | |||
| 25 | cleaner or garbage collector) are required. Details on the tools are | 25 | cleaner or garbage collector) are required. Details on the tools are |
| 26 | described in the man pages included in the package. | 26 | described in the man pages included in the package. |
| 27 | 27 | ||
| 28 | Project web page: http://www.nilfs.org/en/ | 28 | Project web page: http://nilfs.sourceforge.net/ |
| 29 | Download page: http://www.nilfs.org/en/download.html | 29 | Download page: http://nilfs.sourceforge.net/en/download.html |
| 30 | Git tree web page: http://www.nilfs.org/git/ | ||
| 31 | List info: http://vger.kernel.org/vger-lists.html#linux-nilfs | 30 | List info: http://vger.kernel.org/vger-lists.html#linux-nilfs |
| 32 | 31 | ||
| 33 | Caveats | 32 | Caveats |
| @@ -111,6 +110,13 @@ Table of NILFS2 specific ioctls | |||
| 111 | nilfs_resize utilities and by nilfs_cleanerd | 110 | nilfs_resize utilities and by nilfs_cleanerd |
| 112 | daemon. | 111 | daemon. |
| 113 | 112 | ||
| 113 | NILFS_IOCTL_SET_SUINFO Modify segment usage info of requested | ||
| 114 | segments. This ioctl is used by | ||
| 115 | nilfs_cleanerd daemon to skip unnecessary | ||
| 116 | cleaning operation of segments and reduce | ||
| 117 | performance penalty or wear of flash device | ||
| 118 | due to redundant move of in-use blocks. | ||
| 119 | |||
| 114 | NILFS_IOCTL_GET_SUSTAT Return segment usage statistics. This ioctl | 120 | NILFS_IOCTL_GET_SUSTAT Return segment usage statistics. This ioctl |
| 115 | is used in lssu, nilfs_resize utilities and | 121 | is used in lssu, nilfs_resize utilities and |
| 116 | by nilfs_cleanerd daemon. | 122 | by nilfs_cleanerd daemon. |
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt index 791af8dac065..61947facfc07 100644 --- a/Documentation/filesystems/ntfs.txt +++ b/Documentation/filesystems/ntfs.txt | |||
| @@ -455,8 +455,6 @@ not have this problem with odd numbers of sectors. | |||
| 455 | ChangeLog | 455 | ChangeLog |
| 456 | ========= | 456 | ========= |
| 457 | 457 | ||
| 458 | Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. | ||
| 459 | |||
| 460 | 2.1.30: | 458 | 2.1.30: |
| 461 | - Fix writev() (it kept writing the first segment over and over again | 459 | - Fix writev() (it kept writing the first segment over and over again |
| 462 | instead of moving onto subsequent segments). | 460 | instead of moving onto subsequent segments). |
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index fe2b7ae6f962..0f3a1390bf00 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting | |||
| @@ -295,9 +295,9 @@ in the beginning of ->setattr unconditionally. | |||
| 295 | ->clear_inode() and ->delete_inode() are gone; ->evict_inode() should | 295 | ->clear_inode() and ->delete_inode() are gone; ->evict_inode() should |
| 296 | be used instead. It gets called whenever the inode is evicted, whether it has | 296 | be used instead. It gets called whenever the inode is evicted, whether it has |
| 297 | remaining links or not. Caller does *not* evict the pagecache or inode-associated | 297 | remaining links or not. Caller does *not* evict the pagecache or inode-associated |
| 298 | metadata buffers; getting rid of those is responsibility of method, as it had | 298 | metadata buffers; the method has to use truncate_inode_pages_final() to get rid |
| 299 | been for ->delete_inode(). Caller makes sure async writeback cannot be running | 299 | of those. Caller makes sure async writeback cannot be running for the inode while |
| 300 | for the inode while (or after) ->evict_inode() is called. | 300 | (or after) ->evict_inode() is called. |
| 301 | 301 | ||
| 302 | ->drop_inode() returns int now; it's called on final iput() with | 302 | ->drop_inode() returns int now; it's called on final iput() with |
| 303 | inode->i_lock held and it returns true if filesystems wants the inode to be | 303 | inode->i_lock held and it returns true if filesystems wants the inode to be |
diff --git a/Documentation/ja_JP/SubmittingPatches b/Documentation/ja_JP/SubmittingPatches index 97f78dd0c085..5d6ae639bfa0 100644 --- a/Documentation/ja_JP/SubmittingPatches +++ b/Documentation/ja_JP/SubmittingPatches | |||
| @@ -98,11 +98,6 @@ dontdiff ファイルã«ã¯ Linux カーãƒãƒ«ã®ãƒ“ルドプãƒã‚»ã‚¹ã®éŽç¨‹ã | |||
| 98 | Quilt: | 98 | Quilt: |
| 99 | http://savannah.nongnu.org/projects/quilt | 99 | http://savannah.nongnu.org/projects/quilt |
| 100 | 100 | ||
| 101 | Andrew Morton's patch scripts: | ||
| 102 | http://userweb.kernel.org/~akpm/stuff/patch-scripts.tar.gz | ||
| 103 | ã“ã®ãƒªãƒ³ã‚¯ã®å…ˆã®ã‚¹ã‚¯ãƒªãƒ—トã®ä»£ã‚りã¨ã—ã¦ã€quilt ãŒãƒ‘ッãƒãƒžãƒã‚¸ãƒ¡ãƒ³ãƒˆ | ||
| 104 | ツールã¨ã—ã¦æŽ¨å¥¨ã•れã¦ã„ã¾ã™(上ã®ãƒªãƒ³ã‚¯ã‚’見ã¦ãã ã•ã„)。 | ||
| 105 | |||
| 106 | 2) パッãƒã«å¯¾ã™ã‚‹èª¬æ˜Ž | 101 | 2) パッãƒã«å¯¾ã™ã‚‹èª¬æ˜Ž |
| 107 | 102 | ||
| 108 | パッãƒã®ä¸ã®å¤‰æ›´ç‚¹ã«å¯¾ã™ã‚‹æŠ€è¡“çš„ãªè©³ç´°ã«ã¤ã„ã¦èª¬æ˜Žã—ã¦ãã ã•ã„。 | 103 | パッãƒã®ä¸ã®å¤‰æ›´ç‚¹ã«å¯¾ã™ã‚‹æŠ€è¡“çš„ãªè©³ç´°ã«ã¤ã„ã¦èª¬æ˜Žã—ã¦ãã ã•ã„。 |
| @@ -695,7 +690,7 @@ gcc ã«ãŠã„ã¦ã¯ã€ãƒžã‚¯ãƒã¨åŒã˜ãらã„軽ã„ã§ã™ã€‚ | |||
| 695 | ---------------------- | 690 | ---------------------- |
| 696 | 691 | ||
| 697 | Andrew Morton, "The perfect patch" (tpp). | 692 | Andrew Morton, "The perfect patch" (tpp). |
| 698 | <http://userweb.kernel.org/~akpm/stuff/tpp.txt> | 693 | <http://www.ozlabs.org/~akpm/stuff/tpp.txt> |
| 699 | 694 | ||
| 700 | Jeff Garzik, "Linux kernel patch submission format". | 695 | Jeff Garzik, "Linux kernel patch submission format". |
| 701 | <http://linux.yyz.us/patch-format.html> | 696 | <http://linux.yyz.us/patch-format.html> |
| @@ -707,7 +702,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer". | |||
| 707 | <http://www.kroah.com/log/2006/01/11/> | 702 | <http://www.kroah.com/log/2006/01/11/> |
| 708 | 703 | ||
| 709 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! | 704 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! |
| 710 | <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2> | 705 | <https://lkml.org/lkml/2005/7/11/336> |
| 711 | 706 | ||
| 712 | Kernel Documentation/CodingStyle: | 707 | Kernel Documentation/CodingStyle: |
| 713 | <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle> | 708 | <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle> |
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index b6e39739a36d..a7563ec4ea7b 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt | |||
| @@ -11,9 +11,7 @@ with the difference that the orphan objects are not freed but only | |||
| 11 | reported via /sys/kernel/debug/kmemleak. A similar method is used by the | 11 | reported via /sys/kernel/debug/kmemleak. A similar method is used by the |
| 12 | Valgrind tool (memcheck --leak-check) to detect the memory leaks in | 12 | Valgrind tool (memcheck --leak-check) to detect the memory leaks in |
| 13 | user-space applications. | 13 | user-space applications. |
| 14 | 14 | Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze, ppc, mips, s390, metag and tile. | |
| 15 | Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported | ||
| 16 | architectures. | ||
| 17 | 15 | ||
| 18 | Usage | 16 | Usage |
| 19 | ----- | 17 | ----- |
| @@ -53,7 +51,8 @@ Memory scanning parameters can be modified at run-time by writing to the | |||
| 53 | (default 600, 0 to stop the automatic scanning) | 51 | (default 600, 0 to stop the automatic scanning) |
| 54 | scan - trigger a memory scan | 52 | scan - trigger a memory scan |
| 55 | clear - clear list of current memory leak suspects, done by | 53 | clear - clear list of current memory leak suspects, done by |
| 56 | marking all current reported unreferenced objects grey | 54 | marking all current reported unreferenced objects grey, |
| 55 | or free all kmemleak objects if kmemleak has been disabled. | ||
| 57 | dump=<addr> - dump information about the object found at <addr> | 56 | dump=<addr> - dump information about the object found at <addr> |
| 58 | 57 | ||
| 59 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on | 58 | Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on |
| @@ -68,7 +67,7 @@ Basic Algorithm | |||
| 68 | 67 | ||
| 69 | The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and | 68 | The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and |
| 70 | friends are traced and the pointers, together with additional | 69 | friends are traced and the pointers, together with additional |
| 71 | information like size and stack trace, are stored in a prio search tree. | 70 | information like size and stack trace, are stored in a rbtree. |
| 72 | The corresponding freeing function calls are tracked and the pointers | 71 | The corresponding freeing function calls are tracked and the pointers |
| 73 | removed from the kmemleak data structures. | 72 | removed from the kmemleak data structures. |
| 74 | 73 | ||
| @@ -84,7 +83,7 @@ The scanning algorithm steps: | |||
| 84 | 1. mark all objects as white (remaining white objects will later be | 83 | 1. mark all objects as white (remaining white objects will later be |
| 85 | considered orphan) | 84 | considered orphan) |
| 86 | 2. scan the memory starting with the data section and stacks, checking | 85 | 2. scan the memory starting with the data section and stacks, checking |
| 87 | the values against the addresses stored in the prio search tree. If | 86 | the values against the addresses stored in the rbtree. If |
| 88 | a pointer to a white object is found, the object is added to the | 87 | a pointer to a white object is found, the object is added to the |
| 89 | gray list | 88 | gray list |
| 90 | 3. scan the gray objects for matching addresses (some white objects | 89 | 3. scan the gray objects for matching addresses (some white objects |
| @@ -120,6 +119,18 @@ Then as usual to get your report with: | |||
| 120 | 119 | ||
| 121 | # cat /sys/kernel/debug/kmemleak | 120 | # cat /sys/kernel/debug/kmemleak |
| 122 | 121 | ||
| 122 | Freeing kmemleak internal objects | ||
| 123 | --------------------------------- | ||
| 124 | |||
| 125 | To allow access to previosuly found memory leaks after kmemleak has been | ||
| 126 | disabled by the user or due to an fatal error, internal kmemleak objects | ||
| 127 | won't be freed when kmemleak is disabled, and those objects may occupy | ||
| 128 | a large part of physical memory. | ||
| 129 | |||
| 130 | In this situation, you may reclaim memory with: | ||
| 131 | |||
| 132 | # echo clear > /sys/kernel/debug/kmemleak | ||
| 133 | |||
| 123 | Kmemleak API | 134 | Kmemleak API |
| 124 | ------------ | 135 | ------------ |
| 125 | 136 | ||
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index d614a9b6a280..dd9d0e33b443 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
| @@ -175,18 +175,39 @@ Setting this to zero disables periodic writeback altogether. | |||
| 175 | 175 | ||
| 176 | drop_caches | 176 | drop_caches |
| 177 | 177 | ||
| 178 | Writing to this will cause the kernel to drop clean caches, dentries and | 178 | Writing to this will cause the kernel to drop clean caches, as well as |
| 179 | inodes from memory, causing that memory to become free. | 179 | reclaimable slab objects like dentries and inodes. Once dropped, their |
| 180 | memory becomes free. | ||
| 180 | 181 | ||
| 181 | To free pagecache: | 182 | To free pagecache: |
| 182 | echo 1 > /proc/sys/vm/drop_caches | 183 | echo 1 > /proc/sys/vm/drop_caches |
| 183 | To free dentries and inodes: | 184 | To free reclaimable slab objects (includes dentries and inodes): |
| 184 | echo 2 > /proc/sys/vm/drop_caches | 185 | echo 2 > /proc/sys/vm/drop_caches |
| 185 | To free pagecache, dentries and inodes: | 186 | To free slab objects and pagecache: |
| 186 | echo 3 > /proc/sys/vm/drop_caches | 187 | echo 3 > /proc/sys/vm/drop_caches |
| 187 | 188 | ||
| 188 | As this is a non-destructive operation and dirty objects are not freeable, the | 189 | This is a non-destructive operation and will not free any dirty objects. |
| 189 | user should run `sync' first. | 190 | To increase the number of objects freed by this operation, the user may run |
| 191 | `sync' prior to writing to /proc/sys/vm/drop_caches. This will minimize the | ||
| 192 | number of dirty objects on the system and create more candidates to be | ||
| 193 | dropped. | ||
| 194 | |||
| 195 | This file is not a means to control the growth of the various kernel caches | ||
| 196 | (inodes, dentries, pagecache, etc...) These objects are automatically | ||
| 197 | reclaimed by the kernel when memory is needed elsewhere on the system. | ||
| 198 | |||
| 199 | Use of this file can cause performance problems. Since it discards cached | ||
| 200 | objects, it may cost a significant amount of I/O and CPU to recreate the | ||
| 201 | dropped objects, especially if they were under heavy use. Because of this, | ||
| 202 | use outside of a testing or debugging environment is not recommended. | ||
| 203 | |||
| 204 | You may see informational messages in your kernel log when this file is | ||
| 205 | used: | ||
| 206 | |||
| 207 | cat (1234): drop_caches: 3 | ||
| 208 | |||
| 209 | These are informational only. They do not mean that anything is wrong | ||
| 210 | with your system. To disable them, echo 4 (bit 3) into drop_caches. | ||
| 190 | 211 | ||
| 191 | ============================================================== | 212 | ============================================================== |
| 192 | 213 | ||
diff --git a/Documentation/zh_CN/SubmittingPatches b/Documentation/zh_CN/SubmittingPatches index be0bd4725062..1d3a10f8746b 100644 --- a/Documentation/zh_CN/SubmittingPatches +++ b/Documentation/zh_CN/SubmittingPatches | |||
| @@ -82,10 +82,6 @@ Documentation/SubmittingDrivers 。 | |||
| 82 | Quilt: | 82 | Quilt: |
| 83 | http://savannah.nongnu.org/projects/quilt | 83 | http://savannah.nongnu.org/projects/quilt |
| 84 | 84 | ||
| 85 | Andrew Morton 的补ä¸è„šæœ¬: | ||
| 86 | http://userweb.kernel.org/~akpm/stuff/patch-scripts.tar.gz | ||
| 87 | 作为这些脚本的替代,quilt 是值得推è的补ä¸ç®¡ç†å·¥å…·(看上é¢çš„链接)。 | ||
| 88 | |||
| 89 | 2)æè¿°ä½ 的改动。 | 85 | 2)æè¿°ä½ 的改动。 |
| 90 | æè¿°ä½ 的改动包å«çš„æŠ€æœ¯ç»†èŠ‚ã€‚ | 86 | æè¿°ä½ 的改动包å«çš„æŠ€æœ¯ç»†èŠ‚ã€‚ |
| 91 | 87 | ||
| @@ -394,7 +390,7 @@ Static inline å‡½æ•°ç›¸æ¯”å®æ¥è¯´ï¼Œæ˜¯å¥½å¾—多的选择。Static inline 函æ | |||
| 394 | ---------------- | 390 | ---------------- |
| 395 | 391 | ||
| 396 | Andrew Morton, "The perfect patch" (tpp). | 392 | Andrew Morton, "The perfect patch" (tpp). |
| 397 | <http://userweb.kernel.org/~akpm/stuff/tpp.txt> | 393 | <http://www.ozlabs.org/~akpm/stuff/tpp.txt> |
| 398 | 394 | ||
| 399 | Jeff Garzik, "Linux kernel patch submission format". | 395 | Jeff Garzik, "Linux kernel patch submission format". |
| 400 | <http://linux.yyz.us/patch-format.html> | 396 | <http://linux.yyz.us/patch-format.html> |
| @@ -406,7 +402,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer". | |||
| 406 | <http://www.kroah.com/log/2006/01/11/> | 402 | <http://www.kroah.com/log/2006/01/11/> |
| 407 | 403 | ||
| 408 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! | 404 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! |
| 409 | <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2> | 405 | <https://lkml.org/lkml/2005/7/11/336> |
| 410 | 406 | ||
| 411 | Kernel Documentation/CodingStyle: | 407 | Kernel Documentation/CodingStyle: |
| 412 | <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle> | 408 | <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle> |
diff --git a/MAINTAINERS b/MAINTAINERS index 346744599b4f..e1af73272fd7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1704,6 +1704,8 @@ F: drivers/net/wireless/b43legacy/ | |||
| 1704 | 1704 | ||
| 1705 | BACKLIGHT CLASS/SUBSYSTEM | 1705 | BACKLIGHT CLASS/SUBSYSTEM |
| 1706 | M: Jingoo Han <jg1.han@samsung.com> | 1706 | M: Jingoo Han <jg1.han@samsung.com> |
| 1707 | M: Bryan Wu <cooloney@gmail.com> | ||
| 1708 | M: Lee Jones <lee.jones@linaro.org> | ||
| 1707 | S: Maintained | 1709 | S: Maintained |
| 1708 | F: drivers/video/backlight/ | 1710 | F: drivers/video/backlight/ |
| 1709 | F: include/linux/backlight.h | 1711 | F: include/linux/backlight.h |
| @@ -2728,6 +2730,31 @@ F: include/linux/device-mapper.h | |||
| 2728 | F: include/linux/dm-*.h | 2730 | F: include/linux/dm-*.h |
| 2729 | F: include/uapi/linux/dm-*.h | 2731 | F: include/uapi/linux/dm-*.h |
| 2730 | 2732 | ||
| 2733 | DIALOG SEMICONDUCTOR DRIVERS | ||
| 2734 | M: Support Opensource <support.opensource@diasemi.com> | ||
| 2735 | W: http://www.dialog-semiconductor.com/products | ||
| 2736 | S: Supported | ||
| 2737 | F: Documentation/hwmon/da90?? | ||
| 2738 | F: drivers/gpio/gpio-da90??.c | ||
| 2739 | F: drivers/hwmon/da90??-hwmon.c | ||
| 2740 | F: drivers/input/misc/da90??_onkey.c | ||
| 2741 | F: drivers/input/touchscreen/da9052_tsi.c | ||
| 2742 | F: drivers/leds/leds-da90??.c | ||
| 2743 | F: drivers/mfd/da903x.c | ||
| 2744 | F: drivers/mfd/da90??-*.c | ||
| 2745 | F: drivers/power/da9052-battery.c | ||
| 2746 | F: drivers/regulator/da903x.c | ||
| 2747 | F: drivers/regulator/da9???-regulator.[ch] | ||
| 2748 | F: drivers/rtc/rtc-da90??.c | ||
| 2749 | F: drivers/video/backlight/da90??_bl.c | ||
| 2750 | F: drivers/watchdog/da90??_wdt.c | ||
| 2751 | F: include/linux/mfd/da903x.h | ||
| 2752 | F: include/linux/mfd/da9052/ | ||
| 2753 | F: include/linux/mfd/da9055/ | ||
| 2754 | F: include/linux/mfd/da9063/ | ||
| 2755 | F: include/sound/da[79]*.h | ||
| 2756 | F: sound/soc/codecs/da[79]*.[ch] | ||
| 2757 | |||
| 2731 | DIGI NEO AND CLASSIC PCI PRODUCTS | 2758 | DIGI NEO AND CLASSIC PCI PRODUCTS |
| 2732 | M: Lidza Louina <lidza.louina@gmail.com> | 2759 | M: Lidza Louina <lidza.louina@gmail.com> |
| 2733 | L: driverdev-devel@linuxdriverproject.org | 2760 | L: driverdev-devel@linuxdriverproject.org |
| @@ -4128,8 +4155,7 @@ F: include/linux/hpet.h | |||
| 4128 | F: include/uapi/linux/hpet.h | 4155 | F: include/uapi/linux/hpet.h |
| 4129 | 4156 | ||
| 4130 | HPET: x86 | 4157 | HPET: x86 |
| 4131 | M: "Venkatesh Pallipadi (Venki)" <venki@google.com> | 4158 | S: Orphan |
| 4132 | S: Maintained | ||
| 4133 | F: arch/x86/kernel/hpet.c | 4159 | F: arch/x86/kernel/hpet.c |
| 4134 | F: arch/x86/include/asm/hpet.h | 4160 | F: arch/x86/include/asm/hpet.h |
| 4135 | 4161 | ||
| @@ -4620,7 +4646,7 @@ F: arch/x86/kernel/tboot.c | |||
| 4620 | INTEL WIRELESS WIMAX CONNECTION 2400 | 4646 | INTEL WIRELESS WIMAX CONNECTION 2400 |
| 4621 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | 4647 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
| 4622 | M: linux-wimax@intel.com | 4648 | M: linux-wimax@intel.com |
| 4623 | L: wimax@linuxwimax.org | 4649 | L: wimax@linuxwimax.org (subscribers-only) |
| 4624 | S: Supported | 4650 | S: Supported |
| 4625 | W: http://linuxwimax.org | 4651 | W: http://linuxwimax.org |
| 4626 | F: Documentation/wimax/README.i2400m | 4652 | F: Documentation/wimax/README.i2400m |
| @@ -5734,7 +5760,6 @@ F: fs/imgdafs/ | |||
| 5734 | 5760 | ||
| 5735 | MICROBLAZE ARCHITECTURE | 5761 | MICROBLAZE ARCHITECTURE |
| 5736 | M: Michal Simek <monstr@monstr.eu> | 5762 | M: Michal Simek <monstr@monstr.eu> |
| 5737 | L: microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers) | ||
| 5738 | W: http://www.monstr.eu/fdt/ | 5763 | W: http://www.monstr.eu/fdt/ |
| 5739 | T: git git://git.monstr.eu/linux-2.6-microblaze.git | 5764 | T: git git://git.monstr.eu/linux-2.6-microblaze.git |
| 5740 | S: Supported | 5765 | S: Supported |
| @@ -6156,10 +6181,10 @@ F: include/uapi/linux/nfs* | |||
| 6156 | F: include/uapi/linux/sunrpc/ | 6181 | F: include/uapi/linux/sunrpc/ |
| 6157 | 6182 | ||
| 6158 | NILFS2 FILESYSTEM | 6183 | NILFS2 FILESYSTEM |
| 6159 | M: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> | 6184 | M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> |
| 6160 | L: linux-nilfs@vger.kernel.org | 6185 | L: linux-nilfs@vger.kernel.org |
| 6161 | W: http://www.nilfs.org/en/ | 6186 | W: http://nilfs.sourceforge.net/ |
| 6162 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2.git | 6187 | T: git git://github.com/konis/nilfs2.git |
| 6163 | S: Supported | 6188 | S: Supported |
| 6164 | F: Documentation/filesystems/nilfs2.txt | 6189 | F: Documentation/filesystems/nilfs2.txt |
| 6165 | F: fs/nilfs2/ | 6190 | F: fs/nilfs2/ |
| @@ -8476,12 +8501,10 @@ S: Maintained | |||
| 8476 | F: drivers/net/ethernet/dlink/sundance.c | 8501 | F: drivers/net/ethernet/dlink/sundance.c |
| 8477 | 8502 | ||
| 8478 | SUPERH | 8503 | SUPERH |
| 8479 | M: Paul Mundt <lethal@linux-sh.org> | ||
| 8480 | L: linux-sh@vger.kernel.org | 8504 | L: linux-sh@vger.kernel.org |
| 8481 | W: http://www.linux-sh.org | 8505 | W: http://www.linux-sh.org |
| 8482 | Q: http://patchwork.kernel.org/project/linux-sh/list/ | 8506 | Q: http://patchwork.kernel.org/project/linux-sh/list/ |
| 8483 | T: git git://github.com/pmundt/linux-sh.git sh-latest | 8507 | S: Orphan |
| 8484 | S: Supported | ||
| 8485 | F: Documentation/sh/ | 8508 | F: Documentation/sh/ |
| 8486 | F: arch/sh/ | 8509 | F: arch/sh/ |
| 8487 | F: drivers/sh/ | 8510 | F: drivers/sh/ |
| @@ -8765,6 +8788,7 @@ M: Max Filippov <jcmvbkbc@gmail.com> | |||
| 8765 | L: linux-xtensa@linux-xtensa.org | 8788 | L: linux-xtensa@linux-xtensa.org |
| 8766 | S: Maintained | 8789 | S: Maintained |
| 8767 | F: arch/xtensa/ | 8790 | F: arch/xtensa/ |
| 8791 | F: drivers/irqchip/irq-xtensa-* | ||
| 8768 | 8792 | ||
| 8769 | THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER | 8793 | THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER |
| 8770 | M: Hans Verkuil <hverkuil@xs4all.nl> | 8794 | M: Hans Verkuil <hverkuil@xs4all.nl> |
| @@ -9656,7 +9680,7 @@ F: drivers/media/rc/winbond-cir.c | |||
| 9656 | WIMAX STACK | 9680 | WIMAX STACK |
| 9657 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | 9681 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
| 9658 | M: linux-wimax@intel.com | 9682 | M: linux-wimax@intel.com |
| 9659 | L: wimax@linuxwimax.org | 9683 | L: wimax@linuxwimax.org (subscribers-only) |
| 9660 | S: Supported | 9684 | S: Supported |
| 9661 | W: http://linuxwimax.org | 9685 | W: http://linuxwimax.org |
| 9662 | F: Documentation/wimax/README.wimax | 9686 | F: Documentation/wimax/README.wimax |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 7753be0c86d7..9321681cc45a 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
| @@ -415,7 +415,7 @@ | |||
| 415 | }; | 415 | }; |
| 416 | 416 | ||
| 417 | rtc: rtc@01c20d00 { | 417 | rtc: rtc@01c20d00 { |
| 418 | compatible = "allwinner,sun4i-rtc"; | 418 | compatible = "allwinner,sun4i-a10-rtc"; |
| 419 | reg = <0x01c20d00 0x20>; | 419 | reg = <0x01c20d00 0x20>; |
| 420 | interrupts = <24>; | 420 | interrupts = <24>; |
| 421 | }; | 421 | }; |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c75d06aa27c3..4ac8cae5727c 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
| @@ -22,27 +22,21 @@ choice | |||
| 22 | config ARCH_SCORE7 | 22 | config ARCH_SCORE7 |
| 23 | bool "SCORE7 processor" | 23 | bool "SCORE7 processor" |
| 24 | select SYS_SUPPORTS_32BIT_KERNEL | 24 | select SYS_SUPPORTS_32BIT_KERNEL |
| 25 | select CPU_SCORE7 | ||
| 26 | select GENERIC_HAS_IOMAP | 25 | select GENERIC_HAS_IOMAP |
| 27 | 26 | ||
| 28 | config MACH_SPCT6600 | 27 | config MACH_SPCT6600 |
| 29 | bool "SPCT6600 series based machines" | 28 | bool "SPCT6600 series based machines" |
| 30 | select SYS_SUPPORTS_32BIT_KERNEL | 29 | select SYS_SUPPORTS_32BIT_KERNEL |
| 31 | select CPU_SCORE7 | ||
| 32 | select GENERIC_HAS_IOMAP | 30 | select GENERIC_HAS_IOMAP |
| 33 | 31 | ||
| 34 | config SCORE_SIM | 32 | config SCORE_SIM |
| 35 | bool "Score simulator" | 33 | bool "Score simulator" |
| 36 | select SYS_SUPPORTS_32BIT_KERNEL | 34 | select SYS_SUPPORTS_32BIT_KERNEL |
| 37 | select CPU_SCORE7 | ||
| 38 | select GENERIC_HAS_IOMAP | 35 | select GENERIC_HAS_IOMAP |
| 39 | endchoice | 36 | endchoice |
| 40 | 37 | ||
| 41 | endmenu | 38 | endmenu |
| 42 | 39 | ||
| 43 | config CPU_SCORE7 | ||
| 44 | bool | ||
| 45 | |||
| 46 | config NO_DMA | 40 | config NO_DMA |
| 47 | bool | 41 | bool |
| 48 | default y | 42 | default y |
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 25c5a932f9fe..669df51a82e3 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c | |||
| @@ -252,7 +252,7 @@ static struct sh_mobile_sdhi_info sdhi_info = { | |||
| 252 | static struct resource sdhi_resources[] = { | 252 | static struct resource sdhi_resources[] = { |
| 253 | [0] = { | 253 | [0] = { |
| 254 | .start = 0xffe50000, | 254 | .start = 0xffe50000, |
| 255 | .end = 0xffe501ff, | 255 | .end = 0xffe500ff, |
| 256 | .flags = IORESOURCE_MEM, | 256 | .flags = IORESOURCE_MEM, |
| 257 | }, | 257 | }, |
| 258 | [1] = { | 258 | [1] = { |
diff --git a/arch/sh/drivers/pci/pcie-sh7786.h b/arch/sh/drivers/pci/pcie-sh7786.h index 1ee054e47eae..4a6ff55f759b 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.h +++ b/arch/sh/drivers/pci/pcie-sh7786.h | |||
| @@ -145,9 +145,6 @@ | |||
| 145 | /* PCIERMSGIER */ | 145 | /* PCIERMSGIER */ |
| 146 | #define SH4A_PCIERMSGIER (0x004040) /* R/W - 0x0000 0000 32 */ | 146 | #define SH4A_PCIERMSGIER (0x004040) /* R/W - 0x0000 0000 32 */ |
| 147 | 147 | ||
| 148 | /* PCIEPHYCTLR */ | ||
| 149 | #define SH4A_PCIEPHYCTLR (0x010000) /* R/W - 0x0000 0000 32 */ | ||
| 150 | |||
| 151 | /* PCIEPHYADRR */ | 148 | /* PCIEPHYADRR */ |
| 152 | #define SH4A_PCIEPHYADRR (0x010004) /* R/W - 0x0000 0000 32 */ | 149 | #define SH4A_PCIEPHYADRR (0x010004) /* R/W - 0x0000 0000 32 */ |
| 153 | #define BITS_ACK (24) // Rev1.171 | 150 | #define BITS_ACK (24) // Rev1.171 |
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h index 4f97df87d7d5..4f643aa718e3 100644 --- a/arch/sh/include/asm/syscalls_32.h +++ b/arch/sh/include/asm/syscalls_32.h | |||
| @@ -9,15 +9,9 @@ | |||
| 9 | 9 | ||
| 10 | struct pt_regs; | 10 | struct pt_regs; |
| 11 | 11 | ||
| 12 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, | 12 | asmlinkage int sys_sigreturn(void); |
| 13 | unsigned long r6, unsigned long r7, | 13 | asmlinkage int sys_rt_sigreturn(void); |
| 14 | struct pt_regs __regs); | 14 | asmlinkage int sys_sh_pipe(void); |
| 15 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, | ||
| 16 | unsigned long r6, unsigned long r7, | ||
| 17 | struct pt_regs __regs); | ||
| 18 | asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5, | ||
| 19 | unsigned long r6, unsigned long r7, | ||
| 20 | struct pt_regs __regs); | ||
| 21 | asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, | 15 | asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, |
| 22 | size_t count, long dummy, loff_t pos); | 16 | size_t count, long dummy, loff_t pos); |
| 23 | asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, | 17 | asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, |
diff --git a/arch/sh/include/asm/traps_32.h b/arch/sh/include/asm/traps_32.h index cfd55ff9dff2..17e129fe459c 100644 --- a/arch/sh/include/asm/traps_32.h +++ b/arch/sh/include/asm/traps_32.h | |||
| @@ -42,18 +42,10 @@ static inline void trigger_address_error(void) | |||
| 42 | asmlinkage void do_address_error(struct pt_regs *regs, | 42 | asmlinkage void do_address_error(struct pt_regs *regs, |
| 43 | unsigned long writeaccess, | 43 | unsigned long writeaccess, |
| 44 | unsigned long address); | 44 | unsigned long address); |
| 45 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | 45 | asmlinkage void do_divide_error(unsigned long r4); |
| 46 | unsigned long r6, unsigned long r7, | 46 | asmlinkage void do_reserved_inst(void); |
| 47 | struct pt_regs __regs); | 47 | asmlinkage void do_illegal_slot_inst(void); |
| 48 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | 48 | asmlinkage void do_exception_error(void); |
| 49 | unsigned long r6, unsigned long r7, | ||
| 50 | struct pt_regs __regs); | ||
| 51 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | ||
| 52 | unsigned long r6, unsigned long r7, | ||
| 53 | struct pt_regs __regs); | ||
| 54 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | ||
| 55 | unsigned long r6, unsigned long r7, | ||
| 56 | struct pt_regs __regs); | ||
| 57 | 49 | ||
| 58 | #define BUILD_TRAP_HANDLER(name) \ | 50 | #define BUILD_TRAP_HANDLER(name) \ |
| 59 | asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ | 51 | asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ |
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c index e84a43229b9c..5c0e3c335161 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c | |||
| @@ -132,7 +132,7 @@ static struct clk_lookup lookups[] = { | |||
| 132 | CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]), | 132 | CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]), |
| 133 | CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]), | 133 | CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]), |
| 134 | CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]), | 134 | CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]), |
| 135 | CLKDEV_CON_ID("rspi2", &mstp_clks[MSTP127]), | 135 | CLKDEV_DEV_ID("rspi.2", &mstp_clks[MSTP127]), |
| 136 | }; | 136 | }; |
| 137 | 137 | ||
| 138 | int __init arch_clk_init(void) | 138 | int __init arch_clk_init(void) |
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index b959f5592604..8dfe645bcc4b 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c | |||
| @@ -115,7 +115,7 @@ static int print_trace_stack(void *data, char *name) | |||
| 115 | */ | 115 | */ |
| 116 | static void print_trace_address(void *data, unsigned long addr, int reliable) | 116 | static void print_trace_address(void *data, unsigned long addr, int reliable) |
| 117 | { | 117 | { |
| 118 | printk(data); | 118 | printk("%s", (char *)data); |
| 119 | printk_address(addr, reliable); | 119 | printk_address(addr, reliable); |
| 120 | } | 120 | } |
| 121 | 121 | ||
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index ca46834294b7..13047a4facd2 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S | |||
| @@ -193,10 +193,10 @@ syscall_trace_entry: | |||
| 193 | ! Reload R0-R4 from kernel stack, where the | 193 | ! Reload R0-R4 from kernel stack, where the |
| 194 | ! parent may have modified them using | 194 | ! parent may have modified them using |
| 195 | ! ptrace(POKEUSR). (Note that R0-R2 are | 195 | ! ptrace(POKEUSR). (Note that R0-R2 are |
| 196 | ! used by the system call handler directly | 196 | ! reloaded from the kernel stack by syscall_call |
| 197 | ! from the kernel stack anyway, so don't need | 197 | ! below, so don't need to be reloaded here.) |
| 198 | ! to be reloaded here.) This allows the parent | 198 | ! This allows the parent to rewrite system calls |
| 199 | ! to rewrite system calls and args on the fly. | 199 | ! and args on the fly. |
| 200 | mov.l @(OFF_R4,r15), r4 ! arg0 | 200 | mov.l @(OFF_R4,r15), r4 ! arg0 |
| 201 | mov.l @(OFF_R5,r15), r5 | 201 | mov.l @(OFF_R5,r15), r5 |
| 202 | mov.l @(OFF_R6,r15), r6 | 202 | mov.l @(OFF_R6,r15), r6 |
| @@ -357,8 +357,15 @@ syscall_call: | |||
| 357 | mov.l 3f, r8 ! Load the address of sys_call_table | 357 | mov.l 3f, r8 ! Load the address of sys_call_table |
| 358 | add r8, r3 | 358 | add r8, r3 |
| 359 | mov.l @r3, r8 | 359 | mov.l @r3, r8 |
| 360 | mov.l @(OFF_R2,r15), r2 | ||
| 361 | mov.l @(OFF_R1,r15), r1 | ||
| 362 | mov.l @(OFF_R0,r15), r0 | ||
| 363 | mov.l r2, @-r15 | ||
| 364 | mov.l r1, @-r15 | ||
| 365 | mov.l r0, @-r15 | ||
| 360 | jsr @r8 ! jump to specific syscall handler | 366 | jsr @r8 ! jump to specific syscall handler |
| 361 | nop | 367 | nop |
| 368 | add #12, r15 | ||
| 362 | mov.l @(OFF_R0,r15), r12 ! save r0 | 369 | mov.l @(OFF_R0,r15), r12 ! save r0 |
| 363 | mov.l r0, @(OFF_R0,r15) ! save the return value | 370 | mov.l r0, @(OFF_R0,r15) ! save the return value |
| 364 | ! | 371 | ! |
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 6af6e7c5cac8..594cd371aa28 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
| @@ -148,11 +148,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p | |||
| 148 | return err; | 148 | return err; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, | 151 | asmlinkage int sys_sigreturn(void) |
| 152 | unsigned long r6, unsigned long r7, | ||
| 153 | struct pt_regs __regs) | ||
| 154 | { | 152 | { |
| 155 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | 153 | struct pt_regs *regs = current_pt_regs(); |
| 156 | struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; | 154 | struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; |
| 157 | sigset_t set; | 155 | sigset_t set; |
| 158 | int r0; | 156 | int r0; |
| @@ -180,11 +178,9 @@ badframe: | |||
| 180 | return 0; | 178 | return 0; |
| 181 | } | 179 | } |
| 182 | 180 | ||
| 183 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, | 181 | asmlinkage int sys_rt_sigreturn(void) |
| 184 | unsigned long r6, unsigned long r7, | ||
| 185 | struct pt_regs __regs) | ||
| 186 | { | 182 | { |
| 187 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | 183 | struct pt_regs *regs = current_pt_regs(); |
| 188 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; | 184 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; |
| 189 | sigset_t set; | 185 | sigset_t set; |
| 190 | int r0; | 186 | int r0; |
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index 497bab3a0401..b66d1c62eb19 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c | |||
| @@ -21,17 +21,14 @@ | |||
| 21 | * sys_pipe() is the normal C calling standard for creating | 21 | * sys_pipe() is the normal C calling standard for creating |
| 22 | * a pipe. It's not the way Unix traditionally does this, though. | 22 | * a pipe. It's not the way Unix traditionally does this, though. |
| 23 | */ | 23 | */ |
| 24 | asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5, | 24 | asmlinkage int sys_sh_pipe(void) |
| 25 | unsigned long r6, unsigned long r7, | ||
| 26 | struct pt_regs __regs) | ||
| 27 | { | 25 | { |
| 28 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
| 29 | int fd[2]; | 26 | int fd[2]; |
| 30 | int error; | 27 | int error; |
| 31 | 28 | ||
| 32 | error = do_pipe_flags(fd, 0); | 29 | error = do_pipe_flags(fd, 0); |
| 33 | if (!error) { | 30 | if (!error) { |
| 34 | regs->regs[1] = fd[1]; | 31 | current_pt_regs()->regs[1] = fd[1]; |
| 35 | return fd[0]; | 32 | return fd[0]; |
| 36 | } | 33 | } |
| 37 | return error; | 34 | return error; |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 68e99f09171d..ff639342a8be 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
| @@ -594,9 +594,7 @@ int is_dsp_inst(struct pt_regs *regs) | |||
| 594 | #endif /* CONFIG_SH_DSP */ | 594 | #endif /* CONFIG_SH_DSP */ |
| 595 | 595 | ||
| 596 | #ifdef CONFIG_CPU_SH2A | 596 | #ifdef CONFIG_CPU_SH2A |
| 597 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | 597 | asmlinkage void do_divide_error(unsigned long r4) |
| 598 | unsigned long r6, unsigned long r7, | ||
| 599 | struct pt_regs __regs) | ||
| 600 | { | 598 | { |
| 601 | siginfo_t info; | 599 | siginfo_t info; |
| 602 | 600 | ||
| @@ -613,11 +611,9 @@ asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | |||
| 613 | } | 611 | } |
| 614 | #endif | 612 | #endif |
| 615 | 613 | ||
| 616 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | 614 | asmlinkage void do_reserved_inst(void) |
| 617 | unsigned long r6, unsigned long r7, | ||
| 618 | struct pt_regs __regs) | ||
| 619 | { | 615 | { |
| 620 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | 616 | struct pt_regs *regs = current_pt_regs(); |
| 621 | unsigned long error_code; | 617 | unsigned long error_code; |
| 622 | struct task_struct *tsk = current; | 618 | struct task_struct *tsk = current; |
| 623 | 619 | ||
| @@ -701,11 +697,9 @@ static int emulate_branch(unsigned short inst, struct pt_regs *regs) | |||
| 701 | } | 697 | } |
| 702 | #endif | 698 | #endif |
| 703 | 699 | ||
| 704 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | 700 | asmlinkage void do_illegal_slot_inst(void) |
| 705 | unsigned long r6, unsigned long r7, | ||
| 706 | struct pt_regs __regs) | ||
| 707 | { | 701 | { |
| 708 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | 702 | struct pt_regs *regs = current_pt_regs(); |
| 709 | unsigned long inst; | 703 | unsigned long inst; |
| 710 | struct task_struct *tsk = current; | 704 | struct task_struct *tsk = current; |
| 711 | 705 | ||
| @@ -730,15 +724,12 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | |||
| 730 | die_if_no_fixup("illegal slot instruction", regs, inst); | 724 | die_if_no_fixup("illegal slot instruction", regs, inst); |
| 731 | } | 725 | } |
| 732 | 726 | ||
| 733 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | 727 | asmlinkage void do_exception_error(void) |
| 734 | unsigned long r6, unsigned long r7, | ||
| 735 | struct pt_regs __regs) | ||
| 736 | { | 728 | { |
| 737 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
| 738 | long ex; | 729 | long ex; |
| 739 | 730 | ||
| 740 | ex = lookup_exception_vector(); | 731 | ex = lookup_exception_vector(); |
| 741 | die_if_kernel("exception", regs, ex); | 732 | die_if_kernel("exception", current_pt_regs(), ex); |
| 742 | } | 733 | } |
| 743 | 734 | ||
| 744 | void per_cpu_trap_init(void) | 735 | void per_cpu_trap_init(void) |
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index b876780c1e1c..04aa55fa8c75 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c | |||
| @@ -574,24 +574,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 574 | return 0; | 574 | return 0; |
| 575 | } | 575 | } |
| 576 | 576 | ||
| 577 | asmlinkage void do_fpu_error(unsigned long r4, unsigned long r5, | ||
| 578 | unsigned long r6, unsigned long r7, | ||
| 579 | struct pt_regs regs) | ||
| 580 | { | ||
| 581 | struct task_struct *tsk = current; | ||
| 582 | siginfo_t info; | ||
| 583 | |||
| 584 | if (ieee_fpe_handler (®s)) | ||
| 585 | return; | ||
| 586 | |||
| 587 | regs.pc += 2; | ||
| 588 | info.si_signo = SIGFPE; | ||
| 589 | info.si_errno = 0; | ||
| 590 | info.si_code = FPE_FLTINV; | ||
| 591 | info.si_addr = (void __user *)regs.pc; | ||
| 592 | force_sig_info(SIGFPE, &info, tsk); | ||
| 593 | } | ||
| 594 | |||
| 595 | /** | 577 | /** |
| 596 | * fpu_init - Initialize FPU registers | 578 | * fpu_init - Initialize FPU registers |
| 597 | * @fpu: Pointer to software emulated FPU registers. | 579 | * @fpu: Pointer to software emulated FPU registers. |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index db933decc39c..2e565f8e5165 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
| @@ -573,6 +573,18 @@ config RTC_DRV_DS1305 | |||
| 573 | This driver can also be built as a module. If so, the module | 573 | This driver can also be built as a module. If so, the module |
| 574 | will be called rtc-ds1305. | 574 | will be called rtc-ds1305. |
| 575 | 575 | ||
| 576 | config RTC_DRV_DS1347 | ||
| 577 | tristate "Dallas/Maxim DS1347" | ||
| 578 | help | ||
| 579 | If you say yes here you get support for the | ||
| 580 | Dallas/Maxim DS1347 chips. | ||
| 581 | |||
| 582 | This driver only supports the RTC feature, and not other chip | ||
| 583 | features such as alarms. | ||
| 584 | |||
| 585 | This driver can also be built as a module. If so, the module | ||
| 586 | will be called rtc-ds1347. | ||
| 587 | |||
| 576 | config RTC_DRV_DS1390 | 588 | config RTC_DRV_DS1390 |
| 577 | tristate "Dallas/Maxim DS1390/93/94" | 589 | tristate "Dallas/Maxim DS1390/93/94" |
| 578 | help | 590 | help |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index b427bf7dd20d..40a09915c8f6 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
| @@ -40,6 +40,7 @@ obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o | |||
| 40 | obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o | 40 | obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o |
| 41 | obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o | 41 | obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o |
| 42 | obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o | 42 | obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o |
| 43 | obj-$(CONFIG_RTC_DRV_DS1347) += rtc-ds1347.o | ||
| 43 | obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o | 44 | obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o |
| 44 | obj-$(CONFIG_RTC_DRV_DS1390) += rtc-ds1390.o | 45 | obj-$(CONFIG_RTC_DRV_DS1390) += rtc-ds1390.o |
| 45 | obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o | 46 | obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 544be722937c..c2eff6082363 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
| @@ -584,6 +584,9 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) | |||
| 584 | void rtc_update_irq(struct rtc_device *rtc, | 584 | void rtc_update_irq(struct rtc_device *rtc, |
| 585 | unsigned long num, unsigned long events) | 585 | unsigned long num, unsigned long events) |
| 586 | { | 586 | { |
| 587 | if (unlikely(IS_ERR_OR_NULL(rtc))) | ||
| 588 | return; | ||
| 589 | |||
| 587 | pm_stay_awake(rtc->dev.parent); | 590 | pm_stay_awake(rtc->dev.parent); |
| 588 | schedule_work(&rtc->irqwork); | 591 | schedule_work(&rtc->irqwork); |
| 589 | } | 592 | } |
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c index 4af016985890..9f38eda69154 100644 --- a/drivers/rtc/rtc-as3722.c +++ b/drivers/rtc/rtc-as3722.c | |||
| @@ -242,9 +242,8 @@ static int as3722_rtc_resume(struct device *dev) | |||
| 242 | } | 242 | } |
| 243 | #endif | 243 | #endif |
| 244 | 244 | ||
| 245 | static const struct dev_pm_ops as3722_rtc_pm_ops = { | 245 | static SIMPLE_DEV_PM_OPS(as3722_rtc_pm_ops, as3722_rtc_suspend, |
| 246 | SET_SYSTEM_SLEEP_PM_OPS(as3722_rtc_suspend, as3722_rtc_resume) | 246 | as3722_rtc_resume); |
| 247 | }; | ||
| 248 | 247 | ||
| 249 | static struct platform_driver as3722_rtc_driver = { | 248 | static struct platform_driver as3722_rtc_driver = { |
| 250 | .probe = as3722_rtc_probe, | 249 | .probe = as3722_rtc_probe, |
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index 3161ab5263ed..aee3387fb099 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c | |||
| @@ -204,10 +204,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev) | |||
| 204 | 204 | ||
| 205 | rtc = devm_kzalloc(&pdev->dev, sizeof(struct rtc_at32ap700x), | 205 | rtc = devm_kzalloc(&pdev->dev, sizeof(struct rtc_at32ap700x), |
| 206 | GFP_KERNEL); | 206 | GFP_KERNEL); |
| 207 | if (!rtc) { | 207 | if (!rtc) |
| 208 | dev_dbg(&pdev->dev, "out of memory\n"); | ||
| 209 | return -ENOMEM; | 208 | return -ENOMEM; |
| 210 | } | ||
| 211 | 209 | ||
| 212 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 210 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 213 | if (!regs) { | 211 | if (!regs) { |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index cae212f30d65..0963c9309c74 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
| @@ -837,7 +837,7 @@ static void __exit cmos_do_remove(struct device *dev) | |||
| 837 | cmos->dev = NULL; | 837 | cmos->dev = NULL; |
| 838 | } | 838 | } |
| 839 | 839 | ||
| 840 | #ifdef CONFIG_PM | 840 | #ifdef CONFIG_PM_SLEEP |
| 841 | 841 | ||
| 842 | static int cmos_suspend(struct device *dev) | 842 | static int cmos_suspend(struct device *dev) |
| 843 | { | 843 | { |
| @@ -935,8 +935,6 @@ static int cmos_resume(struct device *dev) | |||
| 935 | return 0; | 935 | return 0; |
| 936 | } | 936 | } |
| 937 | 937 | ||
| 938 | static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); | ||
| 939 | |||
| 940 | #else | 938 | #else |
| 941 | 939 | ||
| 942 | static inline int cmos_poweroff(struct device *dev) | 940 | static inline int cmos_poweroff(struct device *dev) |
| @@ -946,6 +944,8 @@ static inline int cmos_poweroff(struct device *dev) | |||
| 946 | 944 | ||
| 947 | #endif | 945 | #endif |
| 948 | 946 | ||
| 947 | static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); | ||
| 948 | |||
| 949 | /*----------------------------------------------------------------*/ | 949 | /*----------------------------------------------------------------*/ |
| 950 | 950 | ||
| 951 | /* On non-x86 systems, a "CMOS" RTC lives most naturally on platform_bus. | 951 | /* On non-x86 systems, a "CMOS" RTC lives most naturally on platform_bus. |
| @@ -1088,11 +1088,9 @@ static struct pnp_driver cmos_pnp_driver = { | |||
| 1088 | 1088 | ||
| 1089 | /* flag ensures resume() gets called, and stops syslog spam */ | 1089 | /* flag ensures resume() gets called, and stops syslog spam */ |
| 1090 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | 1090 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, |
| 1091 | #ifdef CONFIG_PM_SLEEP | ||
| 1092 | .driver = { | 1091 | .driver = { |
| 1093 | .pm = &cmos_pm_ops, | 1092 | .pm = &cmos_pm_ops, |
| 1094 | }, | 1093 | }, |
| 1095 | #endif | ||
| 1096 | }; | 1094 | }; |
| 1097 | 1095 | ||
| 1098 | #endif /* CONFIG_PNP */ | 1096 | #endif /* CONFIG_PNP */ |
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c index 73f157519dff..869cae273799 100644 --- a/drivers/rtc/rtc-coh901331.c +++ b/drivers/rtc/rtc-coh901331.c | |||
| @@ -43,8 +43,6 @@ | |||
| 43 | struct coh901331_port { | 43 | struct coh901331_port { |
| 44 | struct rtc_device *rtc; | 44 | struct rtc_device *rtc; |
| 45 | struct clk *clk; | 45 | struct clk *clk; |
| 46 | u32 phybase; | ||
| 47 | u32 physize; | ||
| 48 | void __iomem *virtbase; | 46 | void __iomem *virtbase; |
| 49 | int irq; | 47 | int irq; |
| 50 | #ifdef CONFIG_PM_SLEEP | 48 | #ifdef CONFIG_PM_SLEEP |
| @@ -173,19 +171,9 @@ static int __init coh901331_probe(struct platform_device *pdev) | |||
| 173 | return -ENOMEM; | 171 | return -ENOMEM; |
| 174 | 172 | ||
| 175 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 176 | if (!res) | 174 | rtap->virtbase = devm_ioremap_resource(&pdev->dev, res); |
| 177 | return -ENOENT; | 175 | if (IS_ERR(rtap->virtbase)) |
| 178 | 176 | return PTR_ERR(rtap->virtbase); | |
| 179 | rtap->phybase = res->start; | ||
| 180 | rtap->physize = resource_size(res); | ||
| 181 | |||
| 182 | if (devm_request_mem_region(&pdev->dev, rtap->phybase, rtap->physize, | ||
| 183 | "rtc-coh901331") == NULL) | ||
| 184 | return -EBUSY; | ||
| 185 | |||
| 186 | rtap->virtbase = devm_ioremap(&pdev->dev, rtap->phybase, rtap->physize); | ||
| 187 | if (!rtap->virtbase) | ||
| 188 | return -ENOMEM; | ||
| 189 | 177 | ||
| 190 | rtap->irq = platform_get_irq(pdev, 0); | 178 | rtap->irq = platform_get_irq(pdev, 0); |
| 191 | if (devm_request_irq(&pdev->dev, rtap->irq, coh901331_interrupt, 0, | 179 | if (devm_request_irq(&pdev->dev, rtap->irq, coh901331_interrupt, 0, |
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 4385ca4503da..a1cbf64242a5 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | struct da9052_rtc { | 26 | struct da9052_rtc { |
| 27 | struct rtc_device *rtc; | 27 | struct rtc_device *rtc; |
| 28 | struct da9052 *da9052; | 28 | struct da9052 *da9052; |
| 29 | int irq; | ||
| 30 | }; | 29 | }; |
| 31 | 30 | ||
| 32 | static int da9052_rtc_enable_alarm(struct da9052 *da9052, bool enable) | 31 | static int da9052_rtc_enable_alarm(struct da9052 *da9052, bool enable) |
| @@ -240,8 +239,7 @@ static int da9052_rtc_probe(struct platform_device *pdev) | |||
| 240 | 239 | ||
| 241 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); | 240 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); |
| 242 | platform_set_drvdata(pdev, rtc); | 241 | platform_set_drvdata(pdev, rtc); |
| 243 | rtc->irq = DA9052_IRQ_ALARM; | 242 | ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM", |
| 244 | ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM", | ||
| 245 | da9052_rtc_irq, rtc); | 243 | da9052_rtc_irq, rtc); |
| 246 | if (ret != 0) { | 244 | if (ret != 0) { |
| 247 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); | 245 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); |
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c index 48cb2ac3bd3e..a825491331c8 100644 --- a/drivers/rtc/rtc-da9055.c +++ b/drivers/rtc/rtc-da9055.c | |||
| @@ -302,7 +302,9 @@ static int da9055_rtc_probe(struct platform_device *pdev) | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | alm_irq = platform_get_irq_byname(pdev, "ALM"); | 304 | alm_irq = platform_get_irq_byname(pdev, "ALM"); |
| 305 | alm_irq = regmap_irq_get_virq(rtc->da9055->irq_data, alm_irq); | 305 | if (alm_irq < 0) |
| 306 | return alm_irq; | ||
| 307 | |||
| 306 | ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL, | 308 | ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL, |
| 307 | da9055_rtc_alm_irq, | 309 | da9055_rtc_alm_irq, |
| 308 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, | 310 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 24677ef8c39a..c0a3b59f65a2 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c | |||
| @@ -119,8 +119,6 @@ static DEFINE_SPINLOCK(davinci_rtc_lock); | |||
| 119 | struct davinci_rtc { | 119 | struct davinci_rtc { |
| 120 | struct rtc_device *rtc; | 120 | struct rtc_device *rtc; |
| 121 | void __iomem *base; | 121 | void __iomem *base; |
| 122 | resource_size_t pbase; | ||
| 123 | size_t base_size; | ||
| 124 | int irq; | 122 | int irq; |
| 125 | }; | 123 | }; |
| 126 | 124 | ||
| @@ -482,14 +480,12 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
| 482 | { | 480 | { |
| 483 | struct device *dev = &pdev->dev; | 481 | struct device *dev = &pdev->dev; |
| 484 | struct davinci_rtc *davinci_rtc; | 482 | struct davinci_rtc *davinci_rtc; |
| 485 | struct resource *res, *mem; | 483 | struct resource *res; |
| 486 | int ret = 0; | 484 | int ret = 0; |
| 487 | 485 | ||
| 488 | davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL); | 486 | davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL); |
| 489 | if (!davinci_rtc) { | 487 | if (!davinci_rtc) |
| 490 | dev_dbg(dev, "could not allocate memory for private data\n"); | ||
| 491 | return -ENOMEM; | 488 | return -ENOMEM; |
| 492 | } | ||
| 493 | 489 | ||
| 494 | davinci_rtc->irq = platform_get_irq(pdev, 0); | 490 | davinci_rtc->irq = platform_get_irq(pdev, 0); |
| 495 | if (davinci_rtc->irq < 0) { | 491 | if (davinci_rtc->irq < 0) { |
| @@ -498,28 +494,9 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
| 498 | } | 494 | } |
| 499 | 495 | ||
| 500 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 496 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 501 | if (!res) { | 497 | davinci_rtc->base = devm_ioremap_resource(dev, res); |
| 502 | dev_err(dev, "no mem resource\n"); | 498 | if (IS_ERR(davinci_rtc->base)) |
| 503 | return -EINVAL; | 499 | return PTR_ERR(davinci_rtc->base); |
| 504 | } | ||
| 505 | |||
| 506 | davinci_rtc->pbase = res->start; | ||
| 507 | davinci_rtc->base_size = resource_size(res); | ||
| 508 | |||
| 509 | mem = devm_request_mem_region(dev, davinci_rtc->pbase, | ||
| 510 | davinci_rtc->base_size, pdev->name); | ||
| 511 | if (!mem) { | ||
| 512 | dev_err(dev, "RTC registers at %08x are not free\n", | ||
| 513 | davinci_rtc->pbase); | ||
| 514 | return -EBUSY; | ||
| 515 | } | ||
| 516 | |||
| 517 | davinci_rtc->base = devm_ioremap(dev, davinci_rtc->pbase, | ||
| 518 | davinci_rtc->base_size); | ||
| 519 | if (!davinci_rtc->base) { | ||
| 520 | dev_err(dev, "unable to ioremap MEM resource\n"); | ||
| 521 | return -ENOMEM; | ||
| 522 | } | ||
| 523 | 500 | ||
| 524 | platform_set_drvdata(pdev, davinci_rtc); | 501 | platform_set_drvdata(pdev, davinci_rtc); |
| 525 | 502 | ||
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 2dd586a19b59..129add77065d 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c | |||
| @@ -756,19 +756,17 @@ static int ds1305_probe(struct spi_device *spi) | |||
| 756 | status = devm_request_irq(&spi->dev, spi->irq, ds1305_irq, | 756 | status = devm_request_irq(&spi->dev, spi->irq, ds1305_irq, |
| 757 | 0, dev_name(&ds1305->rtc->dev), ds1305); | 757 | 0, dev_name(&ds1305->rtc->dev), ds1305); |
| 758 | if (status < 0) { | 758 | if (status < 0) { |
| 759 | dev_dbg(&spi->dev, "request_irq %d --> %d\n", | 759 | dev_err(&spi->dev, "request_irq %d --> %d\n", |
| 760 | spi->irq, status); | 760 | spi->irq, status); |
| 761 | return status; | 761 | } else { |
| 762 | device_set_wakeup_capable(&spi->dev, 1); | ||
| 762 | } | 763 | } |
| 763 | |||
| 764 | device_set_wakeup_capable(&spi->dev, 1); | ||
| 765 | } | 764 | } |
| 766 | 765 | ||
| 767 | /* export NVRAM */ | 766 | /* export NVRAM */ |
| 768 | status = sysfs_create_bin_file(&spi->dev.kobj, &nvram); | 767 | status = sysfs_create_bin_file(&spi->dev.kobj, &nvram); |
| 769 | if (status < 0) { | 768 | if (status < 0) { |
| 770 | dev_dbg(&spi->dev, "register nvram --> %d\n", status); | 769 | dev_err(&spi->dev, "register nvram --> %d\n", status); |
| 771 | return status; | ||
| 772 | } | 770 | } |
| 773 | 771 | ||
| 774 | return 0; | 772 | return 0; |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4e75345a559a..f03d5ba96db1 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
| @@ -154,6 +154,7 @@ static const struct chip_desc chips[last_ds_type] = { | |||
| 154 | .alarm = 1, | 154 | .alarm = 1, |
| 155 | }, | 155 | }, |
| 156 | [mcp7941x] = { | 156 | [mcp7941x] = { |
| 157 | .alarm = 1, | ||
| 157 | /* this is battery backed SRAM */ | 158 | /* this is battery backed SRAM */ |
| 158 | .nvram_offset = 0x20, | 159 | .nvram_offset = 0x20, |
| 159 | .nvram_size = 0x40, | 160 | .nvram_size = 0x40, |
| @@ -606,6 +607,178 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { | |||
| 606 | 607 | ||
| 607 | /*----------------------------------------------------------------------*/ | 608 | /*----------------------------------------------------------------------*/ |
| 608 | 609 | ||
| 610 | /* | ||
| 611 | * Alarm support for mcp7941x devices. | ||
| 612 | */ | ||
| 613 | |||
| 614 | #define MCP7941X_REG_CONTROL 0x07 | ||
| 615 | # define MCP7941X_BIT_ALM0_EN 0x10 | ||
| 616 | # define MCP7941X_BIT_ALM1_EN 0x20 | ||
| 617 | #define MCP7941X_REG_ALARM0_BASE 0x0a | ||
| 618 | #define MCP7941X_REG_ALARM0_CTRL 0x0d | ||
| 619 | #define MCP7941X_REG_ALARM1_BASE 0x11 | ||
| 620 | #define MCP7941X_REG_ALARM1_CTRL 0x14 | ||
| 621 | # define MCP7941X_BIT_ALMX_IF (1 << 3) | ||
| 622 | # define MCP7941X_BIT_ALMX_C0 (1 << 4) | ||
| 623 | # define MCP7941X_BIT_ALMX_C1 (1 << 5) | ||
| 624 | # define MCP7941X_BIT_ALMX_C2 (1 << 6) | ||
| 625 | # define MCP7941X_BIT_ALMX_POL (1 << 7) | ||
| 626 | # define MCP7941X_MSK_ALMX_MATCH (MCP7941X_BIT_ALMX_C0 | \ | ||
| 627 | MCP7941X_BIT_ALMX_C1 | \ | ||
| 628 | MCP7941X_BIT_ALMX_C2) | ||
| 629 | |||
| 630 | static void mcp7941x_work(struct work_struct *work) | ||
| 631 | { | ||
| 632 | struct ds1307 *ds1307 = container_of(work, struct ds1307, work); | ||
| 633 | struct i2c_client *client = ds1307->client; | ||
| 634 | int reg, ret; | ||
| 635 | |||
| 636 | mutex_lock(&ds1307->rtc->ops_lock); | ||
| 637 | |||
| 638 | /* Check and clear alarm 0 interrupt flag. */ | ||
| 639 | reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_ALARM0_CTRL); | ||
| 640 | if (reg < 0) | ||
| 641 | goto out; | ||
| 642 | if (!(reg & MCP7941X_BIT_ALMX_IF)) | ||
| 643 | goto out; | ||
| 644 | reg &= ~MCP7941X_BIT_ALMX_IF; | ||
| 645 | ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_ALARM0_CTRL, reg); | ||
| 646 | if (ret < 0) | ||
| 647 | goto out; | ||
| 648 | |||
| 649 | /* Disable alarm 0. */ | ||
| 650 | reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL); | ||
| 651 | if (reg < 0) | ||
| 652 | goto out; | ||
| 653 | reg &= ~MCP7941X_BIT_ALM0_EN; | ||
| 654 | ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg); | ||
| 655 | if (ret < 0) | ||
| 656 | goto out; | ||
| 657 | |||
| 658 | rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); | ||
| 659 | |||
| 660 | out: | ||
| 661 | if (test_bit(HAS_ALARM, &ds1307->flags)) | ||
| 662 | enable_irq(client->irq); | ||
| 663 | mutex_unlock(&ds1307->rtc->ops_lock); | ||
| 664 | } | ||
| 665 | |||
| 666 | static int mcp7941x_read_alarm(struct device *dev, struct rtc_wkalrm *t) | ||
| 667 | { | ||
| 668 | struct i2c_client *client = to_i2c_client(dev); | ||
| 669 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | ||
| 670 | u8 *regs = ds1307->regs; | ||
| 671 | int ret; | ||
| 672 | |||
| 673 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
| 674 | return -EINVAL; | ||
| 675 | |||
| 676 | /* Read control and alarm 0 registers. */ | ||
| 677 | ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs); | ||
| 678 | if (ret < 0) | ||
| 679 | return ret; | ||
| 680 | |||
| 681 | t->enabled = !!(regs[0] & MCP7941X_BIT_ALM0_EN); | ||
| 682 | |||
| 683 | /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ | ||
| 684 | t->time.tm_sec = bcd2bin(ds1307->regs[3] & 0x7f); | ||
| 685 | t->time.tm_min = bcd2bin(ds1307->regs[4] & 0x7f); | ||
| 686 | t->time.tm_hour = bcd2bin(ds1307->regs[5] & 0x3f); | ||
| 687 | t->time.tm_wday = bcd2bin(ds1307->regs[6] & 0x7) - 1; | ||
| 688 | t->time.tm_mday = bcd2bin(ds1307->regs[7] & 0x3f); | ||
| 689 | t->time.tm_mon = bcd2bin(ds1307->regs[8] & 0x1f) - 1; | ||
| 690 | t->time.tm_year = -1; | ||
| 691 | t->time.tm_yday = -1; | ||
| 692 | t->time.tm_isdst = -1; | ||
| 693 | |||
| 694 | dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " | ||
| 695 | "enabled=%d polarity=%d irq=%d match=%d\n", __func__, | ||
| 696 | t->time.tm_sec, t->time.tm_min, t->time.tm_hour, | ||
| 697 | t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled, | ||
| 698 | !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_POL), | ||
| 699 | !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_IF), | ||
| 700 | (ds1307->regs[6] & MCP7941X_MSK_ALMX_MATCH) >> 4); | ||
| 701 | |||
| 702 | return 0; | ||
| 703 | } | ||
| 704 | |||
| 705 | static int mcp7941x_set_alarm(struct device *dev, struct rtc_wkalrm *t) | ||
| 706 | { | ||
| 707 | struct i2c_client *client = to_i2c_client(dev); | ||
| 708 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | ||
| 709 | unsigned char *regs = ds1307->regs; | ||
| 710 | int ret; | ||
| 711 | |||
| 712 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
| 713 | return -EINVAL; | ||
| 714 | |||
| 715 | dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " | ||
| 716 | "enabled=%d pending=%d\n", __func__, | ||
| 717 | t->time.tm_sec, t->time.tm_min, t->time.tm_hour, | ||
| 718 | t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, | ||
| 719 | t->enabled, t->pending); | ||
| 720 | |||
| 721 | /* Read control and alarm 0 registers. */ | ||
| 722 | ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs); | ||
| 723 | if (ret < 0) | ||
| 724 | return ret; | ||
| 725 | |||
| 726 | /* Set alarm 0, using 24-hour and day-of-month modes. */ | ||
| 727 | regs[3] = bin2bcd(t->time.tm_sec); | ||
| 728 | regs[4] = bin2bcd(t->time.tm_min); | ||
| 729 | regs[5] = bin2bcd(t->time.tm_hour); | ||
| 730 | regs[6] = bin2bcd(t->time.tm_wday) + 1; | ||
| 731 | regs[7] = bin2bcd(t->time.tm_mday); | ||
| 732 | regs[8] = bin2bcd(t->time.tm_mon) + 1; | ||
| 733 | |||
| 734 | /* Clear the alarm 0 interrupt flag. */ | ||
| 735 | regs[6] &= ~MCP7941X_BIT_ALMX_IF; | ||
| 736 | /* Set alarm match: second, minute, hour, day, date, month. */ | ||
| 737 | regs[6] |= MCP7941X_MSK_ALMX_MATCH; | ||
| 738 | |||
| 739 | if (t->enabled) | ||
| 740 | regs[0] |= MCP7941X_BIT_ALM0_EN; | ||
| 741 | else | ||
| 742 | regs[0] &= ~MCP7941X_BIT_ALM0_EN; | ||
| 743 | |||
| 744 | ret = ds1307->write_block_data(client, MCP7941X_REG_CONTROL, 10, regs); | ||
| 745 | if (ret < 0) | ||
| 746 | return ret; | ||
| 747 | |||
| 748 | return 0; | ||
| 749 | } | ||
| 750 | |||
| 751 | static int mcp7941x_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
| 752 | { | ||
| 753 | struct i2c_client *client = to_i2c_client(dev); | ||
| 754 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | ||
| 755 | int reg; | ||
| 756 | |||
| 757 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
| 758 | return -EINVAL; | ||
| 759 | |||
| 760 | reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL); | ||
| 761 | if (reg < 0) | ||
| 762 | return reg; | ||
| 763 | |||
| 764 | if (enabled) | ||
| 765 | reg |= MCP7941X_BIT_ALM0_EN; | ||
| 766 | else | ||
| 767 | reg &= ~MCP7941X_BIT_ALM0_EN; | ||
| 768 | |||
| 769 | return i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg); | ||
| 770 | } | ||
| 771 | |||
| 772 | static const struct rtc_class_ops mcp7941x_rtc_ops = { | ||
| 773 | .read_time = ds1307_get_time, | ||
| 774 | .set_time = ds1307_set_time, | ||
| 775 | .read_alarm = mcp7941x_read_alarm, | ||
| 776 | .set_alarm = mcp7941x_set_alarm, | ||
| 777 | .alarm_irq_enable = mcp7941x_alarm_irq_enable, | ||
| 778 | }; | ||
| 779 | |||
| 780 | /*----------------------------------------------------------------------*/ | ||
| 781 | |||
| 609 | static ssize_t | 782 | static ssize_t |
| 610 | ds1307_nvram_read(struct file *filp, struct kobject *kobj, | 783 | ds1307_nvram_read(struct file *filp, struct kobject *kobj, |
| 611 | struct bin_attribute *attr, | 784 | struct bin_attribute *attr, |
| @@ -678,6 +851,7 @@ static int ds1307_probe(struct i2c_client *client, | |||
| 678 | [ds_1339] = DS1339_BIT_BBSQI, | 851 | [ds_1339] = DS1339_BIT_BBSQI, |
| 679 | [ds_3231] = DS3231_BIT_BBSQW, | 852 | [ds_3231] = DS3231_BIT_BBSQW, |
| 680 | }; | 853 | }; |
| 854 | const struct rtc_class_ops *rtc_ops = &ds13xx_rtc_ops; | ||
| 681 | 855 | ||
| 682 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA) | 856 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA) |
| 683 | && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) | 857 | && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) |
| @@ -816,6 +990,13 @@ static int ds1307_probe(struct i2c_client *client, | |||
| 816 | case ds_1388: | 990 | case ds_1388: |
| 817 | ds1307->offset = 1; /* Seconds starts at 1 */ | 991 | ds1307->offset = 1; /* Seconds starts at 1 */ |
| 818 | break; | 992 | break; |
| 993 | case mcp7941x: | ||
| 994 | rtc_ops = &mcp7941x_rtc_ops; | ||
| 995 | if (ds1307->client->irq > 0 && chip->alarm) { | ||
| 996 | INIT_WORK(&ds1307->work, mcp7941x_work); | ||
| 997 | want_irq = true; | ||
| 998 | } | ||
| 999 | break; | ||
| 819 | default: | 1000 | default: |
| 820 | break; | 1001 | break; |
| 821 | } | 1002 | } |
| @@ -927,55 +1108,61 @@ read_rtc: | |||
| 927 | bin2bcd(tmp)); | 1108 | bin2bcd(tmp)); |
| 928 | } | 1109 | } |
| 929 | 1110 | ||
| 1111 | device_set_wakeup_capable(&client->dev, want_irq); | ||
| 930 | ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, | 1112 | ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, |
| 931 | &ds13xx_rtc_ops, THIS_MODULE); | 1113 | rtc_ops, THIS_MODULE); |
| 932 | if (IS_ERR(ds1307->rtc)) { | 1114 | if (IS_ERR(ds1307->rtc)) { |
| 933 | err = PTR_ERR(ds1307->rtc); | 1115 | return PTR_ERR(ds1307->rtc); |
| 934 | dev_err(&client->dev, | ||
| 935 | "unable to register the class device\n"); | ||
| 936 | goto exit; | ||
| 937 | } | 1116 | } |
| 938 | 1117 | ||
| 939 | if (want_irq) { | 1118 | if (want_irq) { |
| 940 | err = request_irq(client->irq, ds1307_irq, IRQF_SHARED, | 1119 | err = request_irq(client->irq, ds1307_irq, IRQF_SHARED, |
| 941 | ds1307->rtc->name, client); | 1120 | ds1307->rtc->name, client); |
| 942 | if (err) { | 1121 | if (err) { |
| 943 | dev_err(&client->dev, | 1122 | client->irq = 0; |
| 944 | "unable to request IRQ!\n"); | 1123 | dev_err(&client->dev, "unable to request IRQ!\n"); |
| 945 | goto exit; | 1124 | } else { |
| 946 | } | ||
| 947 | 1125 | ||
| 948 | device_set_wakeup_capable(&client->dev, 1); | 1126 | set_bit(HAS_ALARM, &ds1307->flags); |
| 949 | set_bit(HAS_ALARM, &ds1307->flags); | 1127 | dev_dbg(&client->dev, "got IRQ %d\n", client->irq); |
| 950 | dev_dbg(&client->dev, "got IRQ %d\n", client->irq); | 1128 | } |
| 951 | } | 1129 | } |
| 952 | 1130 | ||
| 953 | if (chip->nvram_size) { | 1131 | if (chip->nvram_size) { |
| 1132 | |||
| 954 | ds1307->nvram = devm_kzalloc(&client->dev, | 1133 | ds1307->nvram = devm_kzalloc(&client->dev, |
| 955 | sizeof(struct bin_attribute), | 1134 | sizeof(struct bin_attribute), |
| 956 | GFP_KERNEL); | 1135 | GFP_KERNEL); |
| 957 | if (!ds1307->nvram) { | 1136 | if (!ds1307->nvram) { |
| 958 | err = -ENOMEM; | 1137 | dev_err(&client->dev, "cannot allocate memory for nvram sysfs\n"); |
| 959 | goto err_irq; | 1138 | } else { |
| 1139 | |||
| 1140 | ds1307->nvram->attr.name = "nvram"; | ||
| 1141 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; | ||
| 1142 | |||
| 1143 | sysfs_bin_attr_init(ds1307->nvram); | ||
| 1144 | |||
| 1145 | ds1307->nvram->read = ds1307_nvram_read; | ||
| 1146 | ds1307->nvram->write = ds1307_nvram_write; | ||
| 1147 | ds1307->nvram->size = chip->nvram_size; | ||
| 1148 | ds1307->nvram_offset = chip->nvram_offset; | ||
| 1149 | |||
| 1150 | err = sysfs_create_bin_file(&client->dev.kobj, | ||
| 1151 | ds1307->nvram); | ||
| 1152 | if (err) { | ||
| 1153 | dev_err(&client->dev, | ||
| 1154 | "unable to create sysfs file: %s\n", | ||
| 1155 | ds1307->nvram->attr.name); | ||
| 1156 | } else { | ||
| 1157 | set_bit(HAS_NVRAM, &ds1307->flags); | ||
| 1158 | dev_info(&client->dev, "%zu bytes nvram\n", | ||
| 1159 | ds1307->nvram->size); | ||
| 1160 | } | ||
| 960 | } | 1161 | } |
| 961 | ds1307->nvram->attr.name = "nvram"; | ||
| 962 | ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; | ||
| 963 | sysfs_bin_attr_init(ds1307->nvram); | ||
| 964 | ds1307->nvram->read = ds1307_nvram_read; | ||
| 965 | ds1307->nvram->write = ds1307_nvram_write; | ||
| 966 | ds1307->nvram->size = chip->nvram_size; | ||
| 967 | ds1307->nvram_offset = chip->nvram_offset; | ||
| 968 | err = sysfs_create_bin_file(&client->dev.kobj, ds1307->nvram); | ||
| 969 | if (err) | ||
| 970 | goto err_irq; | ||
| 971 | set_bit(HAS_NVRAM, &ds1307->flags); | ||
| 972 | dev_info(&client->dev, "%zu bytes nvram\n", ds1307->nvram->size); | ||
| 973 | } | 1162 | } |
| 974 | 1163 | ||
| 975 | return 0; | 1164 | return 0; |
| 976 | 1165 | ||
| 977 | err_irq: | ||
| 978 | free_irq(client->irq, client); | ||
| 979 | exit: | 1166 | exit: |
| 980 | return err; | 1167 | return err; |
| 981 | } | 1168 | } |
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c new file mode 100644 index 000000000000..c82b4c050326 --- /dev/null +++ b/drivers/rtc/rtc-ds1347.c | |||
| @@ -0,0 +1,166 @@ | |||
| 1 | /* rtc-ds1347.c | ||
| 2 | * | ||
| 3 | * Driver for Dallas Semiconductor DS1347 Low Current, SPI Compatible | ||
| 4 | * Real Time Clock | ||
| 5 | * | ||
| 6 | * Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/device.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/rtc.h> | ||
| 19 | #include <linux/spi/spi.h> | ||
| 20 | #include <linux/bcd.h> | ||
| 21 | |||
| 22 | /* Registers in ds1347 rtc */ | ||
| 23 | |||
| 24 | #define DS1347_SECONDS_REG 0x01 | ||
| 25 | #define DS1347_MINUTES_REG 0x03 | ||
| 26 | #define DS1347_HOURS_REG 0x05 | ||
| 27 | #define DS1347_DATE_REG 0x07 | ||
| 28 | #define DS1347_MONTH_REG 0x09 | ||
| 29 | #define DS1347_DAY_REG 0x0B | ||
| 30 | #define DS1347_YEAR_REG 0x0D | ||
| 31 | #define DS1347_CONTROL_REG 0x0F | ||
| 32 | #define DS1347_STATUS_REG 0x17 | ||
| 33 | #define DS1347_CLOCK_BURST 0x3F | ||
| 34 | |||
| 35 | static int ds1347_read_reg(struct device *dev, unsigned char address, | ||
| 36 | unsigned char *data) | ||
| 37 | { | ||
| 38 | struct spi_device *spi = to_spi_device(dev); | ||
| 39 | |||
| 40 | *data = address | 0x80; | ||
| 41 | |||
| 42 | return spi_write_then_read(spi, data, 1, data, 1); | ||
| 43 | } | ||
| 44 | |||
| 45 | static int ds1347_write_reg(struct device *dev, unsigned char address, | ||
| 46 | unsigned char data) | ||
| 47 | { | ||
| 48 | struct spi_device *spi = to_spi_device(dev); | ||
| 49 | unsigned char buf[2]; | ||
| 50 | |||
| 51 | buf[0] = address & 0x7F; | ||
| 52 | buf[1] = data; | ||
| 53 | |||
| 54 | return spi_write_then_read(spi, buf, 2, NULL, 0); | ||
| 55 | } | ||
| 56 | |||
| 57 | static int ds1347_read_time(struct device *dev, struct rtc_time *dt) | ||
| 58 | { | ||
| 59 | struct spi_device *spi = to_spi_device(dev); | ||
| 60 | int err; | ||
| 61 | unsigned char buf[8]; | ||
| 62 | |||
| 63 | buf[0] = DS1347_CLOCK_BURST | 0x80; | ||
| 64 | |||
| 65 | err = spi_write_then_read(spi, buf, 1, buf, 8); | ||
| 66 | if (err) | ||
| 67 | return err; | ||
| 68 | |||
| 69 | dt->tm_sec = bcd2bin(buf[0]); | ||
| 70 | dt->tm_min = bcd2bin(buf[1]); | ||
| 71 | dt->tm_hour = bcd2bin(buf[2] & 0x3F); | ||
| 72 | dt->tm_mday = bcd2bin(buf[3]); | ||
| 73 | dt->tm_mon = bcd2bin(buf[4]) - 1; | ||
| 74 | dt->tm_wday = bcd2bin(buf[5]) - 1; | ||
| 75 | dt->tm_year = bcd2bin(buf[6]) + 100; | ||
| 76 | |||
| 77 | return rtc_valid_tm(dt); | ||
| 78 | } | ||
| 79 | |||
| 80 | static int ds1347_set_time(struct device *dev, struct rtc_time *dt) | ||
| 81 | { | ||
| 82 | struct spi_device *spi = to_spi_device(dev); | ||
| 83 | unsigned char buf[9]; | ||
| 84 | |||
| 85 | buf[0] = DS1347_CLOCK_BURST & 0x7F; | ||
| 86 | buf[1] = bin2bcd(dt->tm_sec); | ||
| 87 | buf[2] = bin2bcd(dt->tm_min); | ||
| 88 | buf[3] = (bin2bcd(dt->tm_hour) & 0x3F); | ||
| 89 | buf[4] = bin2bcd(dt->tm_mday); | ||
| 90 | buf[5] = bin2bcd(dt->tm_mon + 1); | ||
| 91 | buf[6] = bin2bcd(dt->tm_wday + 1); | ||
| 92 | |||
| 93 | /* year in linux is from 1900 i.e in range of 100 | ||
| 94 | in rtc it is from 00 to 99 */ | ||
| 95 | dt->tm_year = dt->tm_year % 100; | ||
| 96 | |||
| 97 | buf[7] = bin2bcd(dt->tm_year); | ||
| 98 | buf[8] = bin2bcd(0x00); | ||
| 99 | |||
| 100 | /* write the rtc settings */ | ||
| 101 | return spi_write_then_read(spi, buf, 9, NULL, 0); | ||
| 102 | } | ||
| 103 | |||
| 104 | static const struct rtc_class_ops ds1347_rtc_ops = { | ||
| 105 | .read_time = ds1347_read_time, | ||
| 106 | .set_time = ds1347_set_time, | ||
| 107 | }; | ||
| 108 | |||
| 109 | static int ds1347_probe(struct spi_device *spi) | ||
| 110 | { | ||
| 111 | struct rtc_device *rtc; | ||
| 112 | unsigned char data; | ||
| 113 | int res; | ||
| 114 | |||
| 115 | /* spi setup with ds1347 in mode 3 and bits per word as 8 */ | ||
| 116 | spi->mode = SPI_MODE_3; | ||
| 117 | spi->bits_per_word = 8; | ||
| 118 | spi_setup(spi); | ||
| 119 | |||
| 120 | /* RTC Settings */ | ||
| 121 | res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data); | ||
| 122 | if (res) | ||
| 123 | return res; | ||
| 124 | |||
| 125 | /* Disable the write protect of rtc */ | ||
| 126 | ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); | ||
| 127 | data = data & ~(1<<7); | ||
| 128 | ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data); | ||
| 129 | |||
| 130 | /* Enable the oscillator , disable the oscillator stop flag, | ||
| 131 | and glitch filter to reduce current consumption */ | ||
| 132 | ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); | ||
| 133 | data = data & 0x1B; | ||
| 134 | ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data); | ||
| 135 | |||
| 136 | /* display the settings */ | ||
| 137 | ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); | ||
| 138 | dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data); | ||
| 139 | |||
| 140 | ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); | ||
| 141 | dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data); | ||
| 142 | |||
| 143 | rtc = devm_rtc_device_register(&spi->dev, "ds1347", | ||
| 144 | &ds1347_rtc_ops, THIS_MODULE); | ||
| 145 | |||
| 146 | if (IS_ERR(rtc)) | ||
| 147 | return PTR_ERR(rtc); | ||
| 148 | |||
| 149 | spi_set_drvdata(spi, rtc); | ||
| 150 | |||
| 151 | return 0; | ||
| 152 | } | ||
| 153 | |||
| 154 | static struct spi_driver ds1347_driver = { | ||
| 155 | .driver = { | ||
| 156 | .name = "ds1347", | ||
| 157 | .owner = THIS_MODULE, | ||
| 158 | }, | ||
| 159 | .probe = ds1347_probe, | ||
| 160 | }; | ||
| 161 | |||
| 162 | module_spi_driver(ds1347_driver); | ||
| 163 | |||
| 164 | MODULE_DESCRIPTION("DS1347 SPI RTC DRIVER"); | ||
| 165 | MODULE_AUTHOR("Raghavendra C Ganiga <ravi23ganiga@gmail.com>"); | ||
| 166 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c index be9d8c0a7e3a..e67bfcb3a1aa 100644 --- a/drivers/rtc/rtc-ds1390.c +++ b/drivers/rtc/rtc-ds1390.c | |||
| @@ -132,10 +132,9 @@ static int ds1390_probe(struct spi_device *spi) | |||
| 132 | spi_setup(spi); | 132 | spi_setup(spi); |
| 133 | 133 | ||
| 134 | chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL); | 134 | chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL); |
| 135 | if (!chip) { | 135 | if (!chip) |
| 136 | dev_err(&spi->dev, "unable to allocate device memory\n"); | ||
| 137 | return -ENOMEM; | 136 | return -ENOMEM; |
| 138 | } | 137 | |
| 139 | spi_set_drvdata(spi, chip); | 138 | spi_set_drvdata(spi, chip); |
| 140 | 139 | ||
| 141 | res = ds1390_get_reg(&spi->dev, DS1390_REG_SECONDS, &tmp); | 140 | res = ds1390_get_reg(&spi->dev, DS1390_REG_SECONDS, &tmp); |
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c index bc7b4fcf603c..b13d1399b81a 100644 --- a/drivers/rtc/rtc-ds1511.c +++ b/drivers/rtc/rtc-ds1511.c | |||
| @@ -371,8 +371,7 @@ ds1511_interrupt(int irq, void *dev_id) | |||
| 371 | events |= RTC_UF; | 371 | events |= RTC_UF; |
| 372 | else | 372 | else |
| 373 | events |= RTC_AF; | 373 | events |= RTC_AF; |
| 374 | if (likely(pdata->rtc)) | 374 | rtc_update_irq(pdata->rtc, 1, events); |
| 375 | rtc_update_irq(pdata->rtc, 1, events); | ||
| 376 | } | 375 | } |
| 377 | spin_unlock(&pdata->lock); | 376 | spin_unlock(&pdata->lock); |
| 378 | return events ? IRQ_HANDLED : IRQ_NONE; | 377 | return events ? IRQ_HANDLED : IRQ_NONE; |
| @@ -473,7 +472,6 @@ static struct bin_attribute ds1511_nvram_attr = { | |||
| 473 | 472 | ||
| 474 | static int ds1511_rtc_probe(struct platform_device *pdev) | 473 | static int ds1511_rtc_probe(struct platform_device *pdev) |
| 475 | { | 474 | { |
| 476 | struct rtc_device *rtc; | ||
| 477 | struct resource *res; | 475 | struct resource *res; |
| 478 | struct rtc_plat_data *pdata; | 476 | struct rtc_plat_data *pdata; |
| 479 | int ret = 0; | 477 | int ret = 0; |
| @@ -512,6 +510,12 @@ static int ds1511_rtc_probe(struct platform_device *pdev) | |||
| 512 | 510 | ||
| 513 | spin_lock_init(&pdata->lock); | 511 | spin_lock_init(&pdata->lock); |
| 514 | platform_set_drvdata(pdev, pdata); | 512 | platform_set_drvdata(pdev, pdata); |
| 513 | |||
| 514 | pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | ||
| 515 | &ds1511_rtc_ops, THIS_MODULE); | ||
| 516 | if (IS_ERR(pdata->rtc)) | ||
| 517 | return PTR_ERR(pdata->rtc); | ||
| 518 | |||
| 515 | /* | 519 | /* |
| 516 | * if the platform has an interrupt in mind for this device, | 520 | * if the platform has an interrupt in mind for this device, |
| 517 | * then by all means, set it | 521 | * then by all means, set it |
| @@ -526,15 +530,12 @@ static int ds1511_rtc_probe(struct platform_device *pdev) | |||
| 526 | } | 530 | } |
| 527 | } | 531 | } |
| 528 | 532 | ||
| 529 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &ds1511_rtc_ops, | ||
| 530 | THIS_MODULE); | ||
| 531 | if (IS_ERR(rtc)) | ||
| 532 | return PTR_ERR(rtc); | ||
| 533 | pdata->rtc = rtc; | ||
| 534 | |||
| 535 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); | 533 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); |
| 534 | if (ret) | ||
| 535 | dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n", | ||
| 536 | ds1511_nvram_attr.attr.name); | ||
| 536 | 537 | ||
| 537 | return ret; | 538 | return 0; |
| 538 | } | 539 | } |
| 539 | 540 | ||
| 540 | static int ds1511_rtc_remove(struct platform_device *pdev) | 541 | static int ds1511_rtc_remove(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c index fd31571941f5..ab56893aac73 100644 --- a/drivers/rtc/rtc-ds1553.c +++ b/drivers/rtc/rtc-ds1553.c | |||
| @@ -206,8 +206,7 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id) | |||
| 206 | events |= RTC_UF; | 206 | events |= RTC_UF; |
| 207 | else | 207 | else |
| 208 | events |= RTC_AF; | 208 | events |= RTC_AF; |
| 209 | if (likely(pdata->rtc)) | 209 | rtc_update_irq(pdata->rtc, 1, events); |
| 210 | rtc_update_irq(pdata->rtc, 1, events); | ||
| 211 | } | 210 | } |
| 212 | spin_unlock(&pdata->lock); | 211 | spin_unlock(&pdata->lock); |
| 213 | return events ? IRQ_HANDLED : IRQ_NONE; | 212 | return events ? IRQ_HANDLED : IRQ_NONE; |
| @@ -278,7 +277,6 @@ static struct bin_attribute ds1553_nvram_attr = { | |||
| 278 | 277 | ||
| 279 | static int ds1553_rtc_probe(struct platform_device *pdev) | 278 | static int ds1553_rtc_probe(struct platform_device *pdev) |
| 280 | { | 279 | { |
| 281 | struct rtc_device *rtc; | ||
| 282 | struct resource *res; | 280 | struct resource *res; |
| 283 | unsigned int cen, sec; | 281 | unsigned int cen, sec; |
| 284 | struct rtc_plat_data *pdata; | 282 | struct rtc_plat_data *pdata; |
| @@ -311,6 +309,12 @@ static int ds1553_rtc_probe(struct platform_device *pdev) | |||
| 311 | spin_lock_init(&pdata->lock); | 309 | spin_lock_init(&pdata->lock); |
| 312 | pdata->last_jiffies = jiffies; | 310 | pdata->last_jiffies = jiffies; |
| 313 | platform_set_drvdata(pdev, pdata); | 311 | platform_set_drvdata(pdev, pdata); |
| 312 | |||
| 313 | pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | ||
| 314 | &ds1553_rtc_ops, THIS_MODULE); | ||
| 315 | if (IS_ERR(pdata->rtc)) | ||
| 316 | return PTR_ERR(pdata->rtc); | ||
| 317 | |||
| 314 | if (pdata->irq > 0) { | 318 | if (pdata->irq > 0) { |
| 315 | writeb(0, ioaddr + RTC_INTERRUPTS); | 319 | writeb(0, ioaddr + RTC_INTERRUPTS); |
| 316 | if (devm_request_irq(&pdev->dev, pdata->irq, | 320 | if (devm_request_irq(&pdev->dev, pdata->irq, |
| @@ -321,15 +325,12 @@ static int ds1553_rtc_probe(struct platform_device *pdev) | |||
| 321 | } | 325 | } |
| 322 | } | 326 | } |
| 323 | 327 | ||
| 324 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | ||
| 325 | &ds1553_rtc_ops, THIS_MODULE); | ||
| 326 | if (IS_ERR(rtc)) | ||
| 327 | return PTR_ERR(rtc); | ||
| 328 | pdata->rtc = rtc; | ||
| 329 | |||
| 330 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr); | 328 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr); |
| 329 | if (ret) | ||
| 330 | dev_err(&pdev->dev, "unable to create sysfs file: %s\n", | ||
| 331 | ds1553_nvram_attr.attr.name); | ||
| 331 | 332 | ||
| 332 | return ret; | 333 | return 0; |
| 333 | } | 334 | } |
| 334 | 335 | ||
| 335 | static int ds1553_rtc_remove(struct platform_device *pdev) | 336 | static int ds1553_rtc_remove(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 18e2d8471472..a4888dbca2e1 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c | |||
| @@ -177,8 +177,9 @@ static int ds1672_probe(struct i2c_client *client, | |||
| 177 | 177 | ||
| 178 | /* read control register */ | 178 | /* read control register */ |
| 179 | err = ds1672_get_control(client, &control); | 179 | err = ds1672_get_control(client, &control); |
| 180 | if (err) | 180 | if (err) { |
| 181 | goto exit_devreg; | 181 | dev_warn(&client->dev, "Unable to read the control register\n"); |
| 182 | } | ||
| 182 | 183 | ||
| 183 | if (control & DS1672_REG_CONTROL_EOSC) | 184 | if (control & DS1672_REG_CONTROL_EOSC) |
| 184 | dev_warn(&client->dev, "Oscillator not enabled. " | 185 | dev_warn(&client->dev, "Oscillator not enabled. " |
| @@ -187,12 +188,10 @@ static int ds1672_probe(struct i2c_client *client, | |||
| 187 | /* Register sysfs hooks */ | 188 | /* Register sysfs hooks */ |
| 188 | err = device_create_file(&client->dev, &dev_attr_control); | 189 | err = device_create_file(&client->dev, &dev_attr_control); |
| 189 | if (err) | 190 | if (err) |
| 190 | goto exit_devreg; | 191 | dev_err(&client->dev, "Unable to create sysfs entry: %s\n", |
| 192 | dev_attr_control.attr.name); | ||
| 191 | 193 | ||
| 192 | return 0; | 194 | return 0; |
| 193 | |||
| 194 | exit_devreg: | ||
| 195 | return err; | ||
| 196 | } | 195 | } |
| 197 | 196 | ||
| 198 | static struct i2c_device_id ds1672_id[] = { | 197 | static struct i2c_device_id ds1672_id[] = { |
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index 5a1f3b2a8f1e..942103dac30f 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c | |||
| @@ -204,8 +204,11 @@ static int ds1742_rtc_probe(struct platform_device *pdev) | |||
| 204 | return PTR_ERR(rtc); | 204 | return PTR_ERR(rtc); |
| 205 | 205 | ||
| 206 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); | 206 | ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); |
| 207 | if (ret) | ||
| 208 | dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n", | ||
| 209 | pdata->nvram_attr.attr.name); | ||
| 207 | 210 | ||
| 208 | return ret; | 211 | return 0; |
| 209 | } | 212 | } |
| 210 | 213 | ||
| 211 | static int ds1742_rtc_remove(struct platform_device *pdev) | 214 | static int ds1742_rtc_remove(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index b83bb5a527f8..adaf06c41479 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c | |||
| @@ -57,6 +57,7 @@ struct ds3232 { | |||
| 57 | * in the remove function. | 57 | * in the remove function. |
| 58 | */ | 58 | */ |
| 59 | struct mutex mutex; | 59 | struct mutex mutex; |
| 60 | bool suspended; | ||
| 60 | int exiting; | 61 | int exiting; |
| 61 | }; | 62 | }; |
| 62 | 63 | ||
| @@ -345,7 +346,15 @@ static irqreturn_t ds3232_irq(int irq, void *dev_id) | |||
| 345 | struct ds3232 *ds3232 = i2c_get_clientdata(client); | 346 | struct ds3232 *ds3232 = i2c_get_clientdata(client); |
| 346 | 347 | ||
| 347 | disable_irq_nosync(irq); | 348 | disable_irq_nosync(irq); |
| 348 | schedule_work(&ds3232->work); | 349 | |
| 350 | /* | ||
| 351 | * If rtc as a wakeup source, can't schedule the work | ||
| 352 | * at system resume flow, because at this time the i2c bus | ||
| 353 | * has not been resumed. | ||
| 354 | */ | ||
| 355 | if (!ds3232->suspended) | ||
| 356 | schedule_work(&ds3232->work); | ||
| 357 | |||
| 349 | return IRQ_HANDLED; | 358 | return IRQ_HANDLED; |
| 350 | } | 359 | } |
| 351 | 360 | ||
| @@ -363,22 +372,26 @@ static void ds3232_work(struct work_struct *work) | |||
| 363 | 372 | ||
| 364 | if (stat & DS3232_REG_SR_A1F) { | 373 | if (stat & DS3232_REG_SR_A1F) { |
| 365 | control = i2c_smbus_read_byte_data(client, DS3232_REG_CR); | 374 | control = i2c_smbus_read_byte_data(client, DS3232_REG_CR); |
| 366 | if (control < 0) | 375 | if (control < 0) { |
| 367 | goto out; | 376 | pr_warn("Read DS3232 Control Register error." |
| 368 | /* disable alarm1 interrupt */ | 377 | "Disable IRQ%d.\n", client->irq); |
| 369 | control &= ~(DS3232_REG_CR_A1IE); | 378 | } else { |
| 370 | i2c_smbus_write_byte_data(client, DS3232_REG_CR, control); | 379 | /* disable alarm1 interrupt */ |
| 371 | 380 | control &= ~(DS3232_REG_CR_A1IE); | |
| 372 | /* clear the alarm pend flag */ | 381 | i2c_smbus_write_byte_data(client, DS3232_REG_CR, |
| 373 | stat &= ~DS3232_REG_SR_A1F; | 382 | control); |
| 374 | i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat); | 383 | |
| 375 | 384 | /* clear the alarm pend flag */ | |
| 376 | rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF); | 385 | stat &= ~DS3232_REG_SR_A1F; |
| 386 | i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat); | ||
| 387 | |||
| 388 | rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF); | ||
| 389 | |||
| 390 | if (!ds3232->exiting) | ||
| 391 | enable_irq(client->irq); | ||
| 392 | } | ||
| 377 | } | 393 | } |
| 378 | 394 | ||
| 379 | out: | ||
| 380 | if (!ds3232->exiting) | ||
| 381 | enable_irq(client->irq); | ||
| 382 | unlock: | 395 | unlock: |
| 383 | mutex_unlock(&ds3232->mutex); | 396 | mutex_unlock(&ds3232->mutex); |
| 384 | } | 397 | } |
| @@ -411,23 +424,17 @@ static int ds3232_probe(struct i2c_client *client, | |||
| 411 | if (ret) | 424 | if (ret) |
| 412 | return ret; | 425 | return ret; |
| 413 | 426 | ||
| 414 | ds3232->rtc = devm_rtc_device_register(&client->dev, client->name, | 427 | if (client->irq > 0) { |
| 415 | &ds3232_rtc_ops, THIS_MODULE); | 428 | ret = devm_request_irq(&client->dev, client->irq, ds3232_irq, |
| 416 | if (IS_ERR(ds3232->rtc)) { | 429 | IRQF_SHARED, "ds3232", client); |
| 417 | dev_err(&client->dev, "unable to register the class device\n"); | ||
| 418 | return PTR_ERR(ds3232->rtc); | ||
| 419 | } | ||
| 420 | |||
| 421 | if (client->irq >= 0) { | ||
| 422 | ret = devm_request_irq(&client->dev, client->irq, ds3232_irq, 0, | ||
| 423 | "ds3232", client); | ||
| 424 | if (ret) { | 430 | if (ret) { |
| 425 | dev_err(&client->dev, "unable to request IRQ\n"); | 431 | dev_err(&client->dev, "unable to request IRQ\n"); |
| 426 | return ret; | ||
| 427 | } | 432 | } |
| 433 | device_init_wakeup(&client->dev, 1); | ||
| 428 | } | 434 | } |
| 429 | 435 | ds3232->rtc = devm_rtc_device_register(&client->dev, client->name, | |
| 430 | return 0; | 436 | &ds3232_rtc_ops, THIS_MODULE); |
| 437 | return PTR_ERR_OR_ZERO(ds3232->rtc); | ||
| 431 | } | 438 | } |
| 432 | 439 | ||
| 433 | static int ds3232_remove(struct i2c_client *client) | 440 | static int ds3232_remove(struct i2c_client *client) |
| @@ -446,6 +453,42 @@ static int ds3232_remove(struct i2c_client *client) | |||
| 446 | return 0; | 453 | return 0; |
| 447 | } | 454 | } |
| 448 | 455 | ||
| 456 | #ifdef CONFIG_PM_SLEEP | ||
| 457 | static int ds3232_suspend(struct device *dev) | ||
| 458 | { | ||
| 459 | struct ds3232 *ds3232 = dev_get_drvdata(dev); | ||
| 460 | struct i2c_client *client = to_i2c_client(dev); | ||
| 461 | |||
| 462 | if (device_can_wakeup(dev)) { | ||
| 463 | ds3232->suspended = true; | ||
| 464 | irq_set_irq_wake(client->irq, 1); | ||
| 465 | } | ||
| 466 | |||
| 467 | return 0; | ||
| 468 | } | ||
| 469 | |||
| 470 | static int ds3232_resume(struct device *dev) | ||
| 471 | { | ||
| 472 | struct ds3232 *ds3232 = dev_get_drvdata(dev); | ||
| 473 | struct i2c_client *client = to_i2c_client(dev); | ||
| 474 | |||
| 475 | if (ds3232->suspended) { | ||
| 476 | ds3232->suspended = false; | ||
| 477 | |||
| 478 | /* Clear the hardware alarm pend flag */ | ||
| 479 | schedule_work(&ds3232->work); | ||
| 480 | |||
| 481 | irq_set_irq_wake(client->irq, 0); | ||
| 482 | } | ||
| 483 | |||
| 484 | return 0; | ||
| 485 | } | ||
| 486 | #endif | ||
| 487 | |||
| 488 | static const struct dev_pm_ops ds3232_pm_ops = { | ||
| 489 | SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume) | ||
| 490 | }; | ||
| 491 | |||
| 449 | static const struct i2c_device_id ds3232_id[] = { | 492 | static const struct i2c_device_id ds3232_id[] = { |
| 450 | { "ds3232", 0 }, | 493 | { "ds3232", 0 }, |
| 451 | { } | 494 | { } |
| @@ -456,6 +499,7 @@ static struct i2c_driver ds3232_driver = { | |||
| 456 | .driver = { | 499 | .driver = { |
| 457 | .name = "rtc-ds3232", | 500 | .name = "rtc-ds3232", |
| 458 | .owner = THIS_MODULE, | 501 | .owner = THIS_MODULE, |
| 502 | .pm = &ds3232_pm_ops, | ||
| 459 | }, | 503 | }, |
| 460 | .probe = ds3232_probe, | 504 | .probe = ds3232_probe, |
| 461 | .remove = ds3232_remove, | 505 | .remove = ds3232_remove, |
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index abd7f9091f34..cd741c77e085 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c | |||
| @@ -401,7 +401,9 @@ static int __init dryice_rtc_probe(struct platform_device *pdev) | |||
| 401 | imxdi->clk = devm_clk_get(&pdev->dev, NULL); | 401 | imxdi->clk = devm_clk_get(&pdev->dev, NULL); |
| 402 | if (IS_ERR(imxdi->clk)) | 402 | if (IS_ERR(imxdi->clk)) |
| 403 | return PTR_ERR(imxdi->clk); | 403 | return PTR_ERR(imxdi->clk); |
| 404 | clk_prepare_enable(imxdi->clk); | 404 | rc = clk_prepare_enable(imxdi->clk); |
| 405 | if (rc) | ||
| 406 | return rc; | ||
| 405 | 407 | ||
| 406 | /* | 408 | /* |
| 407 | * Initialize dryice hardware | 409 | * Initialize dryice hardware |
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c index 7854a656628f..7e5ead936a04 100644 --- a/drivers/rtc/rtc-isl12057.c +++ b/drivers/rtc/rtc-isl12057.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/rtc.h> | 26 | #include <linux/rtc.h> |
| 27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
| 28 | #include <linux/bcd.h> | 28 | #include <linux/bcd.h> |
| 29 | #include <linux/rtc.h> | ||
| 30 | #include <linux/of.h> | 29 | #include <linux/of.h> |
| 31 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
| 32 | #include <linux/regmap.h> | 31 | #include <linux/regmap.h> |
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 1b126d2513de..08f5160fb6d4 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #define JZ_RTC_CTRL_ENABLE BIT(0) | 38 | #define JZ_RTC_CTRL_ENABLE BIT(0) |
| 39 | 39 | ||
| 40 | struct jz4740_rtc { | 40 | struct jz4740_rtc { |
| 41 | struct resource *mem; | ||
| 42 | void __iomem *base; | 41 | void __iomem *base; |
| 43 | 42 | ||
| 44 | struct rtc_device *rtc; | 43 | struct rtc_device *rtc; |
| @@ -216,6 +215,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev) | |||
| 216 | int ret; | 215 | int ret; |
| 217 | struct jz4740_rtc *rtc; | 216 | struct jz4740_rtc *rtc; |
| 218 | uint32_t scratchpad; | 217 | uint32_t scratchpad; |
| 218 | struct resource *mem; | ||
| 219 | 219 | ||
| 220 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); | 220 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); |
| 221 | if (!rtc) | 221 | if (!rtc) |
| @@ -227,25 +227,10 @@ static int jz4740_rtc_probe(struct platform_device *pdev) | |||
| 227 | return -ENOENT; | 227 | return -ENOENT; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 230 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 231 | if (!rtc->mem) { | 231 | rtc->base = devm_ioremap_resource(&pdev->dev, mem); |
| 232 | dev_err(&pdev->dev, "Failed to get platform mmio memory\n"); | 232 | if (IS_ERR(rtc->base)) |
| 233 | return -ENOENT; | 233 | return PTR_ERR(rtc->base); |
| 234 | } | ||
| 235 | |||
| 236 | rtc->mem = devm_request_mem_region(&pdev->dev, rtc->mem->start, | ||
| 237 | resource_size(rtc->mem), pdev->name); | ||
| 238 | if (!rtc->mem) { | ||
| 239 | dev_err(&pdev->dev, "Failed to request mmio memory region\n"); | ||
| 240 | return -EBUSY; | ||
| 241 | } | ||
| 242 | |||
| 243 | rtc->base = devm_ioremap_nocache(&pdev->dev, rtc->mem->start, | ||
| 244 | resource_size(rtc->mem)); | ||
| 245 | if (!rtc->base) { | ||
| 246 | dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); | ||
| 247 | return -EBUSY; | ||
| 248 | } | ||
| 249 | 234 | ||
| 250 | spin_lock_init(&rtc->lock); | 235 | spin_lock_init(&rtc->lock); |
| 251 | 236 | ||
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c index bfdbcb82d069..f130c08c98f8 100644 --- a/drivers/rtc/rtc-lpc32xx.c +++ b/drivers/rtc/rtc-lpc32xx.c | |||
| @@ -211,10 +211,9 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev) | |||
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); | 213 | rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); |
| 214 | if (unlikely(!rtc)) { | 214 | if (unlikely(!rtc)) |
| 215 | dev_err(&pdev->dev, "Can't allocate memory\n"); | ||
| 216 | return -ENOMEM; | 215 | return -ENOMEM; |
| 217 | } | 216 | |
| 218 | rtc->irq = rtcirq; | 217 | rtc->irq = rtcirq; |
| 219 | 218 | ||
| 220 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 219 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index 77ea9896b5ba..0765606a2d14 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #define MC13XXX_RTCDAY 22 | 23 | #define MC13XXX_RTCDAY 22 |
| 24 | #define MC13XXX_RTCDAYA 23 | 24 | #define MC13XXX_RTCDAYA 23 |
| 25 | 25 | ||
| 26 | #define SEC_PER_DAY (24 * 60 * 60) | ||
| 27 | |||
| 26 | struct mc13xxx_rtc { | 28 | struct mc13xxx_rtc { |
| 27 | struct rtc_device *rtc; | 29 | struct rtc_device *rtc; |
| 28 | struct mc13xxx *mc13xxx; | 30 | struct mc13xxx *mc13xxx; |
| @@ -42,15 +44,15 @@ static int mc13xxx_rtc_irq_enable_unlocked(struct device *dev, | |||
| 42 | return func(priv->mc13xxx, irq); | 44 | return func(priv->mc13xxx, irq); |
| 43 | } | 45 | } |
| 44 | 46 | ||
| 45 | static int mc13xxx_rtc_irq_enable(struct device *dev, | 47 | static int mc13xxx_rtc_alarm_irq_enable(struct device *dev, |
| 46 | unsigned int enabled, int irq) | 48 | unsigned int enabled) |
| 47 | { | 49 | { |
| 48 | struct mc13xxx_rtc *priv = dev_get_drvdata(dev); | 50 | struct mc13xxx_rtc *priv = dev_get_drvdata(dev); |
| 49 | int ret; | 51 | int ret; |
| 50 | 52 | ||
| 51 | mc13xxx_lock(priv->mc13xxx); | 53 | mc13xxx_lock(priv->mc13xxx); |
| 52 | 54 | ||
| 53 | ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, irq); | 55 | ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, MC13XXX_IRQ_TODA); |
| 54 | 56 | ||
| 55 | mc13xxx_unlock(priv->mc13xxx); | 57 | mc13xxx_unlock(priv->mc13xxx); |
| 56 | 58 | ||
| @@ -61,44 +63,27 @@ static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
| 61 | { | 63 | { |
| 62 | struct mc13xxx_rtc *priv = dev_get_drvdata(dev); | 64 | struct mc13xxx_rtc *priv = dev_get_drvdata(dev); |
| 63 | unsigned int seconds, days1, days2; | 65 | unsigned int seconds, days1, days2; |
| 64 | unsigned long s1970; | ||
| 65 | int ret; | ||
| 66 | |||
| 67 | mc13xxx_lock(priv->mc13xxx); | ||
| 68 | |||
| 69 | if (!priv->valid) { | ||
| 70 | ret = -ENODATA; | ||
| 71 | goto out; | ||
| 72 | } | ||
| 73 | 66 | ||
| 74 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1); | 67 | if (!priv->valid) |
| 75 | if (unlikely(ret)) | 68 | return -ENODATA; |
| 76 | goto out; | ||
| 77 | |||
| 78 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds); | ||
| 79 | if (unlikely(ret)) | ||
| 80 | goto out; | ||
| 81 | |||
| 82 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2); | ||
| 83 | out: | ||
| 84 | mc13xxx_unlock(priv->mc13xxx); | ||
| 85 | 69 | ||
| 86 | if (ret) | 70 | do { |
| 87 | return ret; | 71 | int ret; |
| 88 | 72 | ||
| 89 | if (days2 == days1 + 1) { | 73 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1); |
| 90 | if (seconds >= 86400 / 2) | 74 | if (ret) |
| 91 | days2 = days1; | 75 | return ret; |
| 92 | else | ||
| 93 | days1 = days2; | ||
| 94 | } | ||
| 95 | 76 | ||
| 96 | if (days1 != days2) | 77 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds); |
| 97 | return -EIO; | 78 | if (ret) |
| 79 | return ret; | ||
| 98 | 80 | ||
| 99 | s1970 = days1 * 86400 + seconds; | 81 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2); |
| 82 | if (ret) | ||
| 83 | return ret; | ||
| 84 | } while (days1 != days2); | ||
| 100 | 85 | ||
| 101 | rtc_time_to_tm(s1970, tm); | 86 | rtc_time_to_tm(days1 * SEC_PER_DAY + seconds, tm); |
| 102 | 87 | ||
| 103 | return rtc_valid_tm(tm); | 88 | return rtc_valid_tm(tm); |
| 104 | } | 89 | } |
| @@ -110,8 +95,8 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs) | |||
| 110 | unsigned int alarmseconds; | 95 | unsigned int alarmseconds; |
| 111 | int ret; | 96 | int ret; |
| 112 | 97 | ||
| 113 | seconds = secs % 86400; | 98 | seconds = secs % SEC_PER_DAY; |
| 114 | days = secs / 86400; | 99 | days = secs / SEC_PER_DAY; |
| 115 | 100 | ||
| 116 | mc13xxx_lock(priv->mc13xxx); | 101 | mc13xxx_lock(priv->mc13xxx); |
| 117 | 102 | ||
| @@ -123,7 +108,7 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs) | |||
| 123 | if (unlikely(ret)) | 108 | if (unlikely(ret)) |
| 124 | goto out; | 109 | goto out; |
| 125 | 110 | ||
| 126 | if (alarmseconds < 86400) { | 111 | if (alarmseconds < SEC_PER_DAY) { |
| 127 | ret = mc13xxx_reg_write(priv->mc13xxx, | 112 | ret = mc13xxx_reg_write(priv->mc13xxx, |
| 128 | MC13XXX_RTCTODA, 0x1ffff); | 113 | MC13XXX_RTCTODA, 0x1ffff); |
| 129 | if (unlikely(ret)) | 114 | if (unlikely(ret)) |
| @@ -147,18 +132,21 @@ static int mc13xxx_rtc_set_mmss(struct device *dev, unsigned long secs) | |||
| 147 | goto out; | 132 | goto out; |
| 148 | 133 | ||
| 149 | /* restore alarm */ | 134 | /* restore alarm */ |
| 150 | if (alarmseconds < 86400) { | 135 | if (alarmseconds < SEC_PER_DAY) { |
| 151 | ret = mc13xxx_reg_write(priv->mc13xxx, | 136 | ret = mc13xxx_reg_write(priv->mc13xxx, |
| 152 | MC13XXX_RTCTODA, alarmseconds); | 137 | MC13XXX_RTCTODA, alarmseconds); |
| 153 | if (unlikely(ret)) | 138 | if (unlikely(ret)) |
| 154 | goto out; | 139 | goto out; |
| 155 | } | 140 | } |
| 156 | 141 | ||
| 157 | ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST); | 142 | if (!priv->valid) { |
| 158 | if (unlikely(ret)) | 143 | ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST); |
| 159 | goto out; | 144 | if (unlikely(ret)) |
| 145 | goto out; | ||
| 146 | |||
| 147 | ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST); | ||
| 148 | } | ||
| 160 | 149 | ||
| 161 | ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST); | ||
| 162 | out: | 150 | out: |
| 163 | priv->valid = !ret; | 151 | priv->valid = !ret; |
| 164 | 152 | ||
| @@ -180,7 +168,7 @@ static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 180 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds); | 168 | ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds); |
| 181 | if (unlikely(ret)) | 169 | if (unlikely(ret)) |
| 182 | goto out; | 170 | goto out; |
| 183 | if (seconds >= 86400) { | 171 | if (seconds >= SEC_PER_DAY) { |
| 184 | ret = -ENODATA; | 172 | ret = -ENODATA; |
| 185 | goto out; | 173 | goto out; |
| 186 | } | 174 | } |
| @@ -201,7 +189,7 @@ out: | |||
| 201 | alarm->enabled = enabled; | 189 | alarm->enabled = enabled; |
| 202 | alarm->pending = pending; | 190 | alarm->pending = pending; |
| 203 | 191 | ||
| 204 | s1970 = days * 86400 + seconds; | 192 | s1970 = days * SEC_PER_DAY + seconds; |
| 205 | 193 | ||
| 206 | rtc_time_to_tm(s1970, &alarm->time); | 194 | rtc_time_to_tm(s1970, &alarm->time); |
| 207 | dev_dbg(dev, "%s: %lu\n", __func__, s1970); | 195 | dev_dbg(dev, "%s: %lu\n", __func__, s1970); |
| @@ -239,8 +227,8 @@ static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 239 | if (unlikely(ret)) | 227 | if (unlikely(ret)) |
| 240 | goto out; | 228 | goto out; |
| 241 | 229 | ||
| 242 | seconds = s1970 % 86400; | 230 | seconds = s1970 % SEC_PER_DAY; |
| 243 | days = s1970 / 86400; | 231 | days = s1970 / SEC_PER_DAY; |
| 244 | 232 | ||
| 245 | ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days); | 233 | ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days); |
| 246 | if (unlikely(ret)) | 234 | if (unlikely(ret)) |
| @@ -259,8 +247,6 @@ static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev) | |||
| 259 | struct mc13xxx_rtc *priv = dev; | 247 | struct mc13xxx_rtc *priv = dev; |
| 260 | struct mc13xxx *mc13xxx = priv->mc13xxx; | 248 | struct mc13xxx *mc13xxx = priv->mc13xxx; |
| 261 | 249 | ||
| 262 | dev_dbg(&priv->rtc->dev, "Alarm\n"); | ||
| 263 | |||
| 264 | rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF); | 250 | rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF); |
| 265 | 251 | ||
| 266 | mc13xxx_irq_ack(mc13xxx, irq); | 252 | mc13xxx_irq_ack(mc13xxx, irq); |
| @@ -273,8 +259,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev) | |||
| 273 | struct mc13xxx_rtc *priv = dev; | 259 | struct mc13xxx_rtc *priv = dev; |
| 274 | struct mc13xxx *mc13xxx = priv->mc13xxx; | 260 | struct mc13xxx *mc13xxx = priv->mc13xxx; |
| 275 | 261 | ||
| 276 | dev_dbg(&priv->rtc->dev, "1HZ\n"); | ||
| 277 | |||
| 278 | rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF); | 262 | rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF); |
| 279 | 263 | ||
| 280 | mc13xxx_irq_ack(mc13xxx, irq); | 264 | mc13xxx_irq_ack(mc13xxx, irq); |
| @@ -282,12 +266,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev) | |||
| 282 | return IRQ_HANDLED; | 266 | return IRQ_HANDLED; |
| 283 | } | 267 | } |
| 284 | 268 | ||
| 285 | static int mc13xxx_rtc_alarm_irq_enable(struct device *dev, | ||
| 286 | unsigned int enabled) | ||
| 287 | { | ||
| 288 | return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_TODA); | ||
| 289 | } | ||
| 290 | |||
| 291 | static const struct rtc_class_ops mc13xxx_rtc_ops = { | 269 | static const struct rtc_class_ops mc13xxx_rtc_ops = { |
| 292 | .read_time = mc13xxx_rtc_read_time, | 270 | .read_time = mc13xxx_rtc_read_time, |
| 293 | .set_mmss = mc13xxx_rtc_set_mmss, | 271 | .set_mmss = mc13xxx_rtc_set_mmss, |
| @@ -301,7 +279,6 @@ static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev) | |||
| 301 | struct mc13xxx_rtc *priv = dev; | 279 | struct mc13xxx_rtc *priv = dev; |
| 302 | struct mc13xxx *mc13xxx = priv->mc13xxx; | 280 | struct mc13xxx *mc13xxx = priv->mc13xxx; |
| 303 | 281 | ||
| 304 | dev_dbg(&priv->rtc->dev, "RTCRST\n"); | ||
| 305 | priv->valid = 0; | 282 | priv->valid = 0; |
| 306 | 283 | ||
| 307 | mc13xxx_irq_mask(mc13xxx, irq); | 284 | mc13xxx_irq_mask(mc13xxx, irq); |
| @@ -314,7 +291,6 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev) | |||
| 314 | int ret; | 291 | int ret; |
| 315 | struct mc13xxx_rtc *priv; | 292 | struct mc13xxx_rtc *priv; |
| 316 | struct mc13xxx *mc13xxx; | 293 | struct mc13xxx *mc13xxx; |
| 317 | int rtcrst_pending; | ||
| 318 | 294 | ||
| 319 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 295 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
| 320 | if (!priv) | 296 | if (!priv) |
| @@ -322,60 +298,47 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev) | |||
| 322 | 298 | ||
| 323 | mc13xxx = dev_get_drvdata(pdev->dev.parent); | 299 | mc13xxx = dev_get_drvdata(pdev->dev.parent); |
| 324 | priv->mc13xxx = mc13xxx; | 300 | priv->mc13xxx = mc13xxx; |
| 301 | priv->valid = 1; | ||
| 325 | 302 | ||
| 326 | platform_set_drvdata(pdev, priv); | 303 | platform_set_drvdata(pdev, priv); |
| 327 | 304 | ||
| 328 | mc13xxx_lock(mc13xxx); | 305 | mc13xxx_lock(mc13xxx); |
| 329 | 306 | ||
| 307 | mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_RTCRST); | ||
| 308 | |||
| 330 | ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST, | 309 | ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST, |
| 331 | mc13xxx_rtc_reset_handler, DRIVER_NAME, priv); | 310 | mc13xxx_rtc_reset_handler, DRIVER_NAME, priv); |
| 332 | if (ret) | 311 | if (ret) |
| 333 | goto err_reset_irq_request; | 312 | goto err_irq_request; |
| 334 | |||
| 335 | ret = mc13xxx_irq_status(mc13xxx, MC13XXX_IRQ_RTCRST, | ||
| 336 | NULL, &rtcrst_pending); | ||
| 337 | if (ret) | ||
| 338 | goto err_reset_irq_status; | ||
| 339 | |||
| 340 | priv->valid = !rtcrst_pending; | ||
| 341 | 313 | ||
| 342 | ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_1HZ, | 314 | ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_1HZ, |
| 343 | mc13xxx_rtc_update_handler, DRIVER_NAME, priv); | 315 | mc13xxx_rtc_update_handler, DRIVER_NAME, priv); |
| 344 | if (ret) | 316 | if (ret) |
| 345 | goto err_update_irq_request; | 317 | goto err_irq_request; |
| 346 | 318 | ||
| 347 | ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA, | 319 | ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA, |
| 348 | mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv); | 320 | mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv); |
| 349 | if (ret) | 321 | if (ret) |
| 350 | goto err_alarm_irq_request; | 322 | goto err_irq_request; |
| 351 | 323 | ||
| 352 | mc13xxx_unlock(mc13xxx); | 324 | mc13xxx_unlock(mc13xxx); |
| 353 | 325 | ||
| 354 | priv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, | 326 | priv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
| 355 | &mc13xxx_rtc_ops, THIS_MODULE); | 327 | &mc13xxx_rtc_ops, THIS_MODULE); |
| 356 | if (IS_ERR(priv->rtc)) { | ||
| 357 | ret = PTR_ERR(priv->rtc); | ||
| 358 | 328 | ||
| 359 | mc13xxx_lock(mc13xxx); | 329 | return 0; |
| 360 | |||
| 361 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); | ||
| 362 | err_alarm_irq_request: | ||
| 363 | |||
| 364 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv); | ||
| 365 | err_update_irq_request: | ||
| 366 | |||
| 367 | err_reset_irq_status: | ||
| 368 | 330 | ||
| 369 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); | 331 | err_irq_request: |
| 370 | err_reset_irq_request: | 332 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); |
| 333 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv); | ||
| 334 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); | ||
| 371 | 335 | ||
| 372 | mc13xxx_unlock(mc13xxx); | 336 | mc13xxx_unlock(mc13xxx); |
| 373 | } | ||
| 374 | 337 | ||
| 375 | return ret; | 338 | return ret; |
| 376 | } | 339 | } |
| 377 | 340 | ||
| 378 | static int __exit mc13xxx_rtc_remove(struct platform_device *pdev) | 341 | static int mc13xxx_rtc_remove(struct platform_device *pdev) |
| 379 | { | 342 | { |
| 380 | struct mc13xxx_rtc *priv = platform_get_drvdata(pdev); | 343 | struct mc13xxx_rtc *priv = platform_get_drvdata(pdev); |
| 381 | 344 | ||
| @@ -404,7 +367,7 @@ MODULE_DEVICE_TABLE(platform, mc13xxx_rtc_idtable); | |||
| 404 | 367 | ||
| 405 | static struct platform_driver mc13xxx_rtc_driver = { | 368 | static struct platform_driver mc13xxx_rtc_driver = { |
| 406 | .id_table = mc13xxx_rtc_idtable, | 369 | .id_table = mc13xxx_rtc_idtable, |
| 407 | .remove = __exit_p(mc13xxx_rtc_remove), | 370 | .remove = mc13xxx_rtc_remove, |
| 408 | .driver = { | 371 | .driver = { |
| 409 | .name = DRIVER_NAME, | 372 | .name = DRIVER_NAME, |
| 410 | .owner = THIS_MODULE, | 373 | .owner = THIS_MODULE, |
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c index c29dee0946e6..c31846238871 100644 --- a/drivers/rtc/rtc-moxart.c +++ b/drivers/rtc/rtc-moxart.c | |||
| @@ -247,10 +247,8 @@ static int moxart_rtc_probe(struct platform_device *pdev) | |||
| 247 | int ret = 0; | 247 | int ret = 0; |
| 248 | 248 | ||
| 249 | moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL); | 249 | moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL); |
| 250 | if (!moxart_rtc) { | 250 | if (!moxart_rtc) |
| 251 | dev_err(&pdev->dev, "devm_kzalloc failed\n"); | ||
| 252 | return -ENOMEM; | 251 | return -ENOMEM; |
| 253 | } | ||
| 254 | 252 | ||
| 255 | moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node, | 253 | moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node, |
| 256 | "gpio-rtc-data", 0); | 254 | "gpio-rtc-data", 0); |
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index 248653c74b80..a53da0958e95 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c | |||
| @@ -229,10 +229,9 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) | |||
| 229 | 229 | ||
| 230 | nuc900_rtc = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_rtc), | 230 | nuc900_rtc = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_rtc), |
| 231 | GFP_KERNEL); | 231 | GFP_KERNEL); |
| 232 | if (!nuc900_rtc) { | 232 | if (!nuc900_rtc) |
| 233 | dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n"); | ||
| 234 | return -ENOMEM; | 233 | return -ENOMEM; |
| 235 | } | 234 | |
| 236 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 235 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 237 | nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); | 236 | nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); |
| 238 | if (IS_ERR(nuc900_rtc->rtc_reg)) | 237 | if (IS_ERR(nuc900_rtc->rtc_reg)) |
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index fffb7d3449d7..c360d62fb3f6 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c | |||
| @@ -348,9 +348,8 @@ static int palmas_rtc_resume(struct device *dev) | |||
| 348 | } | 348 | } |
| 349 | #endif | 349 | #endif |
| 350 | 350 | ||
| 351 | static const struct dev_pm_ops palmas_rtc_pm_ops = { | 351 | static SIMPLE_DEV_PM_OPS(palmas_rtc_pm_ops, palmas_rtc_suspend, |
| 352 | SET_SYSTEM_SLEEP_PM_OPS(palmas_rtc_suspend, palmas_rtc_resume) | 352 | palmas_rtc_resume); |
| 353 | }; | ||
| 354 | 353 | ||
| 355 | #ifdef CONFIG_OF | 354 | #ifdef CONFIG_OF |
| 356 | static struct of_device_id of_palmas_rtc_match[] = { | 355 | static struct of_device_id of_palmas_rtc_match[] = { |
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index 03f8f75d5af2..197699f358c7 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c | |||
| @@ -9,18 +9,16 @@ | |||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
| 11 | */ | 11 | */ |
| 12 | 12 | #include <linux/of.h> | |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/rtc.h> | 15 | #include <linux/rtc.h> |
| 16 | #include <linux/platform_device.h> | ||
| 16 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
| 18 | #include <linux/regmap.h> | ||
| 17 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 18 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
| 19 | 21 | ||
| 20 | #include <linux/mfd/pm8xxx/core.h> | ||
| 21 | #include <linux/mfd/pm8xxx/rtc.h> | ||
| 22 | |||
| 23 | |||
| 24 | /* RTC Register offsets from RTC CTRL REG */ | 22 | /* RTC Register offsets from RTC CTRL REG */ |
| 25 | #define PM8XXX_ALARM_CTRL_OFFSET 0x01 | 23 | #define PM8XXX_ALARM_CTRL_OFFSET 0x01 |
| 26 | #define PM8XXX_RTC_WRITE_OFFSET 0x02 | 24 | #define PM8XXX_RTC_WRITE_OFFSET 0x02 |
| @@ -37,6 +35,8 @@ | |||
| 37 | /** | 35 | /** |
| 38 | * struct pm8xxx_rtc - rtc driver internal structure | 36 | * struct pm8xxx_rtc - rtc driver internal structure |
| 39 | * @rtc: rtc device for this driver. | 37 | * @rtc: rtc device for this driver. |
| 38 | * @regmap: regmap used to access RTC registers | ||
| 39 | * @allow_set_time: indicates whether writing to the RTC is allowed | ||
| 40 | * @rtc_alarm_irq: rtc alarm irq number. | 40 | * @rtc_alarm_irq: rtc alarm irq number. |
| 41 | * @rtc_base: address of rtc control register. | 41 | * @rtc_base: address of rtc control register. |
| 42 | * @rtc_read_base: base address of read registers. | 42 | * @rtc_read_base: base address of read registers. |
| @@ -48,55 +48,19 @@ | |||
| 48 | */ | 48 | */ |
| 49 | struct pm8xxx_rtc { | 49 | struct pm8xxx_rtc { |
| 50 | struct rtc_device *rtc; | 50 | struct rtc_device *rtc; |
| 51 | struct regmap *regmap; | ||
| 52 | bool allow_set_time; | ||
| 51 | int rtc_alarm_irq; | 53 | int rtc_alarm_irq; |
| 52 | int rtc_base; | 54 | int rtc_base; |
| 53 | int rtc_read_base; | 55 | int rtc_read_base; |
| 54 | int rtc_write_base; | 56 | int rtc_write_base; |
| 55 | int alarm_rw_base; | 57 | int alarm_rw_base; |
| 56 | u8 ctrl_reg; | 58 | u8 ctrl_reg; |
| 57 | struct device *rtc_dev; | 59 | struct device *rtc_dev; |
| 58 | spinlock_t ctrl_reg_lock; | 60 | spinlock_t ctrl_reg_lock; |
| 59 | }; | 61 | }; |
| 60 | 62 | ||
| 61 | /* | 63 | /* |
| 62 | * The RTC registers need to be read/written one byte at a time. This is a | ||
| 63 | * hardware limitation. | ||
| 64 | */ | ||
| 65 | static int pm8xxx_read_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val, | ||
| 66 | int base, int count) | ||
| 67 | { | ||
| 68 | int i, rc; | ||
| 69 | struct device *parent = rtc_dd->rtc_dev->parent; | ||
| 70 | |||
| 71 | for (i = 0; i < count; i++) { | ||
| 72 | rc = pm8xxx_readb(parent, base + i, &rtc_val[i]); | ||
| 73 | if (rc < 0) { | ||
| 74 | dev_err(rtc_dd->rtc_dev, "PMIC read failed\n"); | ||
| 75 | return rc; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static int pm8xxx_write_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val, | ||
| 83 | int base, int count) | ||
| 84 | { | ||
| 85 | int i, rc; | ||
| 86 | struct device *parent = rtc_dd->rtc_dev->parent; | ||
| 87 | |||
| 88 | for (i = 0; i < count; i++) { | ||
| 89 | rc = pm8xxx_writeb(parent, base + i, rtc_val[i]); | ||
| 90 | if (rc < 0) { | ||
| 91 | dev_err(rtc_dd->rtc_dev, "PMIC write failed\n"); | ||
| 92 | return rc; | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Steps to write the RTC registers. | 64 | * Steps to write the RTC registers. |
| 101 | * 1. Disable alarm if enabled. | 65 | * 1. Disable alarm if enabled. |
| 102 | * 2. Write 0x00 to LSB. | 66 | * 2. Write 0x00 to LSB. |
| @@ -107,9 +71,12 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 107 | { | 71 | { |
| 108 | int rc, i; | 72 | int rc, i; |
| 109 | unsigned long secs, irq_flags; | 73 | unsigned long secs, irq_flags; |
| 110 | u8 value[NUM_8_BIT_RTC_REGS], reg = 0, alarm_enabled = 0, ctrl_reg; | 74 | u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg; |
| 111 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); | 75 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); |
| 112 | 76 | ||
| 77 | if (!rtc_dd->allow_set_time) | ||
| 78 | return -EACCES; | ||
| 79 | |||
| 113 | rtc_tm_to_time(tm, &secs); | 80 | rtc_tm_to_time(tm, &secs); |
| 114 | 81 | ||
| 115 | for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) { | 82 | for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) { |
| @@ -125,47 +92,43 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 125 | if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { | 92 | if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { |
| 126 | alarm_enabled = 1; | 93 | alarm_enabled = 1; |
| 127 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; | 94 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; |
| 128 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, | 95 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); |
| 129 | 1); | 96 | if (rc) { |
| 130 | if (rc < 0) { | 97 | dev_err(dev, "Write to RTC control register failed\n"); |
| 131 | dev_err(dev, "Write to RTC control register " | ||
| 132 | "failed\n"); | ||
| 133 | goto rtc_rw_fail; | 98 | goto rtc_rw_fail; |
| 134 | } | 99 | } |
| 135 | rtc_dd->ctrl_reg = ctrl_reg; | 100 | rtc_dd->ctrl_reg = ctrl_reg; |
| 136 | } else | 101 | } else { |
| 137 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); | 102 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 103 | } | ||
| 138 | 104 | ||
| 139 | /* Write 0 to Byte[0] */ | 105 | /* Write 0 to Byte[0] */ |
| 140 | reg = 0; | 106 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0); |
| 141 | rc = pm8xxx_write_wrapper(rtc_dd, ®, rtc_dd->rtc_write_base, 1); | 107 | if (rc) { |
| 142 | if (rc < 0) { | ||
| 143 | dev_err(dev, "Write to RTC write data register failed\n"); | 108 | dev_err(dev, "Write to RTC write data register failed\n"); |
| 144 | goto rtc_rw_fail; | 109 | goto rtc_rw_fail; |
| 145 | } | 110 | } |
| 146 | 111 | ||
| 147 | /* Write Byte[1], Byte[2], Byte[3] */ | 112 | /* Write Byte[1], Byte[2], Byte[3] */ |
| 148 | rc = pm8xxx_write_wrapper(rtc_dd, value + 1, | 113 | rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1, |
| 149 | rtc_dd->rtc_write_base + 1, 3); | 114 | &value[1], sizeof(value) - 1); |
| 150 | if (rc < 0) { | 115 | if (rc) { |
| 151 | dev_err(dev, "Write to RTC write data register failed\n"); | 116 | dev_err(dev, "Write to RTC write data register failed\n"); |
| 152 | goto rtc_rw_fail; | 117 | goto rtc_rw_fail; |
| 153 | } | 118 | } |
| 154 | 119 | ||
| 155 | /* Write Byte[0] */ | 120 | /* Write Byte[0] */ |
| 156 | rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->rtc_write_base, 1); | 121 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]); |
| 157 | if (rc < 0) { | 122 | if (rc) { |
| 158 | dev_err(dev, "Write to RTC write data register failed\n"); | 123 | dev_err(dev, "Write to RTC write data register failed\n"); |
| 159 | goto rtc_rw_fail; | 124 | goto rtc_rw_fail; |
| 160 | } | 125 | } |
| 161 | 126 | ||
| 162 | if (alarm_enabled) { | 127 | if (alarm_enabled) { |
| 163 | ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; | 128 | ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; |
| 164 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, | 129 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); |
| 165 | 1); | 130 | if (rc) { |
| 166 | if (rc < 0) { | 131 | dev_err(dev, "Write to RTC control register failed\n"); |
| 167 | dev_err(dev, "Write to RTC control register " | ||
| 168 | "failed\n"); | ||
| 169 | goto rtc_rw_fail; | 132 | goto rtc_rw_fail; |
| 170 | } | 133 | } |
| 171 | rtc_dd->ctrl_reg = ctrl_reg; | 134 | rtc_dd->ctrl_reg = ctrl_reg; |
| @@ -181,13 +144,14 @@ rtc_rw_fail: | |||
| 181 | static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) | 144 | static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) |
| 182 | { | 145 | { |
| 183 | int rc; | 146 | int rc; |
| 184 | u8 value[NUM_8_BIT_RTC_REGS], reg; | 147 | u8 value[NUM_8_BIT_RTC_REGS]; |
| 185 | unsigned long secs; | 148 | unsigned long secs; |
| 149 | unsigned int reg; | ||
| 186 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); | 150 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); |
| 187 | 151 | ||
| 188 | rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->rtc_read_base, | 152 | rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, |
| 189 | NUM_8_BIT_RTC_REGS); | 153 | value, sizeof(value)); |
| 190 | if (rc < 0) { | 154 | if (rc) { |
| 191 | dev_err(dev, "RTC read data register failed\n"); | 155 | dev_err(dev, "RTC read data register failed\n"); |
| 192 | return rc; | 156 | return rc; |
| 193 | } | 157 | } |
| @@ -196,16 +160,16 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
| 196 | * Read the LSB again and check if there has been a carry over. | 160 | * Read the LSB again and check if there has been a carry over. |
| 197 | * If there is, redo the read operation. | 161 | * If there is, redo the read operation. |
| 198 | */ | 162 | */ |
| 199 | rc = pm8xxx_read_wrapper(rtc_dd, ®, rtc_dd->rtc_read_base, 1); | 163 | rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, ®); |
| 200 | if (rc < 0) { | 164 | if (rc < 0) { |
| 201 | dev_err(dev, "RTC read data register failed\n"); | 165 | dev_err(dev, "RTC read data register failed\n"); |
| 202 | return rc; | 166 | return rc; |
| 203 | } | 167 | } |
| 204 | 168 | ||
| 205 | if (unlikely(reg < value[0])) { | 169 | if (unlikely(reg < value[0])) { |
| 206 | rc = pm8xxx_read_wrapper(rtc_dd, value, | 170 | rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, |
| 207 | rtc_dd->rtc_read_base, NUM_8_BIT_RTC_REGS); | 171 | value, sizeof(value)); |
| 208 | if (rc < 0) { | 172 | if (rc) { |
| 209 | dev_err(dev, "RTC read data register failed\n"); | 173 | dev_err(dev, "RTC read data register failed\n"); |
| 210 | return rc; | 174 | return rc; |
| 211 | } | 175 | } |
| @@ -222,8 +186,8 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
| 222 | } | 186 | } |
| 223 | 187 | ||
| 224 | dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n", | 188 | dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n", |
| 225 | secs, tm->tm_hour, tm->tm_min, tm->tm_sec, | 189 | secs, tm->tm_hour, tm->tm_min, tm->tm_sec, |
| 226 | tm->tm_mday, tm->tm_mon, tm->tm_year); | 190 | tm->tm_mday, tm->tm_mon, tm->tm_year); |
| 227 | 191 | ||
| 228 | return 0; | 192 | return 0; |
| 229 | } | 193 | } |
| @@ -244,19 +208,22 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 244 | 208 | ||
| 245 | spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); | 209 | spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 246 | 210 | ||
| 247 | rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base, | 211 | rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, |
| 248 | NUM_8_BIT_RTC_REGS); | 212 | sizeof(value)); |
| 249 | if (rc < 0) { | 213 | if (rc) { |
| 250 | dev_err(dev, "Write to RTC ALARM register failed\n"); | 214 | dev_err(dev, "Write to RTC ALARM register failed\n"); |
| 251 | goto rtc_rw_fail; | 215 | goto rtc_rw_fail; |
| 252 | } | 216 | } |
| 253 | 217 | ||
| 254 | ctrl_reg = rtc_dd->ctrl_reg; | 218 | ctrl_reg = rtc_dd->ctrl_reg; |
| 255 | ctrl_reg = alarm->enabled ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) : | ||
| 256 | (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE); | ||
| 257 | 219 | ||
| 258 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); | 220 | if (alarm->enabled) |
| 259 | if (rc < 0) { | 221 | ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; |
| 222 | else | ||
| 223 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; | ||
| 224 | |||
| 225 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); | ||
| 226 | if (rc) { | ||
| 260 | dev_err(dev, "Write to RTC control register failed\n"); | 227 | dev_err(dev, "Write to RTC control register failed\n"); |
| 261 | goto rtc_rw_fail; | 228 | goto rtc_rw_fail; |
| 262 | } | 229 | } |
| @@ -264,9 +231,9 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 264 | rtc_dd->ctrl_reg = ctrl_reg; | 231 | rtc_dd->ctrl_reg = ctrl_reg; |
| 265 | 232 | ||
| 266 | dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", | 233 | dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", |
| 267 | alarm->time.tm_hour, alarm->time.tm_min, | 234 | alarm->time.tm_hour, alarm->time.tm_min, |
| 268 | alarm->time.tm_sec, alarm->time.tm_mday, | 235 | alarm->time.tm_sec, alarm->time.tm_mday, |
| 269 | alarm->time.tm_mon, alarm->time.tm_year); | 236 | alarm->time.tm_mon, alarm->time.tm_year); |
| 270 | rtc_rw_fail: | 237 | rtc_rw_fail: |
| 271 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); | 238 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 272 | return rc; | 239 | return rc; |
| @@ -279,9 +246,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 279 | unsigned long secs; | 246 | unsigned long secs; |
| 280 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); | 247 | struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); |
| 281 | 248 | ||
| 282 | rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base, | 249 | rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, |
| 283 | NUM_8_BIT_RTC_REGS); | 250 | sizeof(value)); |
| 284 | if (rc < 0) { | 251 | if (rc) { |
| 285 | dev_err(dev, "RTC alarm time read failed\n"); | 252 | dev_err(dev, "RTC alarm time read failed\n"); |
| 286 | return rc; | 253 | return rc; |
| 287 | } | 254 | } |
| @@ -297,9 +264,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) | |||
| 297 | } | 264 | } |
| 298 | 265 | ||
| 299 | dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", | 266 | dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", |
| 300 | alarm->time.tm_hour, alarm->time.tm_min, | 267 | alarm->time.tm_hour, alarm->time.tm_min, |
| 301 | alarm->time.tm_sec, alarm->time.tm_mday, | 268 | alarm->time.tm_sec, alarm->time.tm_mday, |
| 302 | alarm->time.tm_mon, alarm->time.tm_year); | 269 | alarm->time.tm_mon, alarm->time.tm_year); |
| 303 | 270 | ||
| 304 | return 0; | 271 | return 0; |
| 305 | } | 272 | } |
| @@ -312,12 +279,16 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) | |||
| 312 | u8 ctrl_reg; | 279 | u8 ctrl_reg; |
| 313 | 280 | ||
| 314 | spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); | 281 | spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 282 | |||
| 315 | ctrl_reg = rtc_dd->ctrl_reg; | 283 | ctrl_reg = rtc_dd->ctrl_reg; |
| 316 | ctrl_reg = (enable) ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) : | ||
| 317 | (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE); | ||
| 318 | 284 | ||
| 319 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); | 285 | if (enable) |
| 320 | if (rc < 0) { | 286 | ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; |
| 287 | else | ||
| 288 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; | ||
| 289 | |||
| 290 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); | ||
| 291 | if (rc) { | ||
| 321 | dev_err(dev, "Write to RTC control register failed\n"); | 292 | dev_err(dev, "Write to RTC control register failed\n"); |
| 322 | goto rtc_rw_fail; | 293 | goto rtc_rw_fail; |
| 323 | } | 294 | } |
| @@ -329,8 +300,9 @@ rtc_rw_fail: | |||
| 329 | return rc; | 300 | return rc; |
| 330 | } | 301 | } |
| 331 | 302 | ||
| 332 | static struct rtc_class_ops pm8xxx_rtc_ops = { | 303 | static const struct rtc_class_ops pm8xxx_rtc_ops = { |
| 333 | .read_time = pm8xxx_rtc_read_time, | 304 | .read_time = pm8xxx_rtc_read_time, |
| 305 | .set_time = pm8xxx_rtc_set_time, | ||
| 334 | .set_alarm = pm8xxx_rtc_set_alarm, | 306 | .set_alarm = pm8xxx_rtc_set_alarm, |
| 335 | .read_alarm = pm8xxx_rtc_read_alarm, | 307 | .read_alarm = pm8xxx_rtc_read_alarm, |
| 336 | .alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable, | 308 | .alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable, |
| @@ -339,7 +311,7 @@ static struct rtc_class_ops pm8xxx_rtc_ops = { | |||
| 339 | static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) | 311 | static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) |
| 340 | { | 312 | { |
| 341 | struct pm8xxx_rtc *rtc_dd = dev_id; | 313 | struct pm8xxx_rtc *rtc_dd = dev_id; |
| 342 | u8 ctrl_reg; | 314 | unsigned int ctrl_reg; |
| 343 | int rc; | 315 | int rc; |
| 344 | unsigned long irq_flags; | 316 | unsigned long irq_flags; |
| 345 | 317 | ||
| @@ -351,11 +323,11 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) | |||
| 351 | ctrl_reg = rtc_dd->ctrl_reg; | 323 | ctrl_reg = rtc_dd->ctrl_reg; |
| 352 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; | 324 | ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; |
| 353 | 325 | ||
| 354 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); | 326 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); |
| 355 | if (rc < 0) { | 327 | if (rc) { |
| 356 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); | 328 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 357 | dev_err(rtc_dd->rtc_dev, "Write to RTC control register " | 329 | dev_err(rtc_dd->rtc_dev, |
| 358 | "failed\n"); | 330 | "Write to RTC control register failed\n"); |
| 359 | goto rtc_alarm_handled; | 331 | goto rtc_alarm_handled; |
| 360 | } | 332 | } |
| 361 | 333 | ||
| @@ -363,61 +335,71 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) | |||
| 363 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); | 335 | spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); |
| 364 | 336 | ||
| 365 | /* Clear RTC alarm register */ | 337 | /* Clear RTC alarm register */ |
| 366 | rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base + | 338 | rc = regmap_read(rtc_dd->regmap, |
| 367 | PM8XXX_ALARM_CTRL_OFFSET, 1); | 339 | rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET, |
| 368 | if (rc < 0) { | 340 | &ctrl_reg); |
| 369 | dev_err(rtc_dd->rtc_dev, "RTC Alarm control register read " | 341 | if (rc) { |
| 370 | "failed\n"); | 342 | dev_err(rtc_dd->rtc_dev, |
| 343 | "RTC Alarm control register read failed\n"); | ||
| 371 | goto rtc_alarm_handled; | 344 | goto rtc_alarm_handled; |
| 372 | } | 345 | } |
| 373 | 346 | ||
| 374 | ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; | 347 | ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; |
| 375 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base + | 348 | rc = regmap_write(rtc_dd->regmap, |
| 376 | PM8XXX_ALARM_CTRL_OFFSET, 1); | 349 | rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET, |
| 377 | if (rc < 0) | 350 | ctrl_reg); |
| 378 | dev_err(rtc_dd->rtc_dev, "Write to RTC Alarm control register" | 351 | if (rc) |
| 379 | " failed\n"); | 352 | dev_err(rtc_dd->rtc_dev, |
| 353 | "Write to RTC Alarm control register failed\n"); | ||
| 380 | 354 | ||
| 381 | rtc_alarm_handled: | 355 | rtc_alarm_handled: |
| 382 | return IRQ_HANDLED; | 356 | return IRQ_HANDLED; |
| 383 | } | 357 | } |
| 384 | 358 | ||
| 359 | /* | ||
| 360 | * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out | ||
| 361 | */ | ||
| 362 | static const struct of_device_id pm8xxx_id_table[] = { | ||
| 363 | { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D }, | ||
| 364 | { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 }, | ||
| 365 | { }, | ||
| 366 | }; | ||
| 367 | MODULE_DEVICE_TABLE(of, pm8xxx_id_table); | ||
| 368 | |||
| 385 | static int pm8xxx_rtc_probe(struct platform_device *pdev) | 369 | static int pm8xxx_rtc_probe(struct platform_device *pdev) |
| 386 | { | 370 | { |
| 387 | int rc; | 371 | int rc; |
| 388 | u8 ctrl_reg; | 372 | unsigned int ctrl_reg; |
| 389 | bool rtc_write_enable = false; | ||
| 390 | struct pm8xxx_rtc *rtc_dd; | 373 | struct pm8xxx_rtc *rtc_dd; |
| 391 | struct resource *rtc_resource; | 374 | const struct of_device_id *match; |
| 392 | const struct pm8xxx_rtc_platform_data *pdata = | ||
| 393 | dev_get_platdata(&pdev->dev); | ||
| 394 | 375 | ||
| 395 | if (pdata != NULL) | 376 | match = of_match_node(pm8xxx_id_table, pdev->dev.of_node); |
| 396 | rtc_write_enable = pdata->rtc_write_enable; | 377 | if (!match) |
| 378 | return -ENXIO; | ||
| 397 | 379 | ||
| 398 | rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL); | 380 | rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL); |
| 399 | if (rtc_dd == NULL) { | 381 | if (rtc_dd == NULL) |
| 400 | dev_err(&pdev->dev, "Unable to allocate memory!\n"); | ||
| 401 | return -ENOMEM; | 382 | return -ENOMEM; |
| 402 | } | ||
| 403 | 383 | ||
| 404 | /* Initialise spinlock to protect RTC control register */ | 384 | /* Initialise spinlock to protect RTC control register */ |
| 405 | spin_lock_init(&rtc_dd->ctrl_reg_lock); | 385 | spin_lock_init(&rtc_dd->ctrl_reg_lock); |
| 406 | 386 | ||
| 387 | rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL); | ||
| 388 | if (!rtc_dd->regmap) { | ||
| 389 | dev_err(&pdev->dev, "Parent regmap unavailable.\n"); | ||
| 390 | return -ENXIO; | ||
| 391 | } | ||
| 392 | |||
| 407 | rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0); | 393 | rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0); |
| 408 | if (rtc_dd->rtc_alarm_irq < 0) { | 394 | if (rtc_dd->rtc_alarm_irq < 0) { |
| 409 | dev_err(&pdev->dev, "Alarm IRQ resource absent!\n"); | 395 | dev_err(&pdev->dev, "Alarm IRQ resource absent!\n"); |
| 410 | return -ENXIO; | 396 | return -ENXIO; |
| 411 | } | 397 | } |
| 412 | 398 | ||
| 413 | rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO, | 399 | rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, |
| 414 | "pmic_rtc_base"); | 400 | "allow-set-time"); |
| 415 | if (!(rtc_resource && rtc_resource->start)) { | ||
| 416 | dev_err(&pdev->dev, "RTC IO resource absent!\n"); | ||
| 417 | return -ENXIO; | ||
| 418 | } | ||
| 419 | 401 | ||
| 420 | rtc_dd->rtc_base = rtc_resource->start; | 402 | rtc_dd->rtc_base = (long) match->data; |
| 421 | 403 | ||
| 422 | /* Setup RTC register addresses */ | 404 | /* Setup RTC register addresses */ |
| 423 | rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET; | 405 | rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET; |
| @@ -427,64 +409,52 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) | |||
| 427 | rtc_dd->rtc_dev = &pdev->dev; | 409 | rtc_dd->rtc_dev = &pdev->dev; |
| 428 | 410 | ||
| 429 | /* Check if the RTC is on, else turn it on */ | 411 | /* Check if the RTC is on, else turn it on */ |
| 430 | rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1); | 412 | rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg); |
| 431 | if (rc < 0) { | 413 | if (rc) { |
| 432 | dev_err(&pdev->dev, "RTC control register read failed!\n"); | 414 | dev_err(&pdev->dev, "RTC control register read failed!\n"); |
| 433 | return rc; | 415 | return rc; |
| 434 | } | 416 | } |
| 435 | 417 | ||
| 436 | if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { | 418 | if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { |
| 437 | ctrl_reg |= PM8xxx_RTC_ENABLE; | 419 | ctrl_reg |= PM8xxx_RTC_ENABLE; |
| 438 | rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, | 420 | rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); |
| 439 | 1); | 421 | if (rc) { |
| 440 | if (rc < 0) { | 422 | dev_err(&pdev->dev, |
| 441 | dev_err(&pdev->dev, "Write to RTC control register " | 423 | "Write to RTC control register failed\n"); |
| 442 | "failed\n"); | ||
| 443 | return rc; | 424 | return rc; |
| 444 | } | 425 | } |
| 445 | } | 426 | } |
| 446 | 427 | ||
| 447 | rtc_dd->ctrl_reg = ctrl_reg; | 428 | rtc_dd->ctrl_reg = ctrl_reg; |
| 448 | if (rtc_write_enable == true) | ||
| 449 | pm8xxx_rtc_ops.set_time = pm8xxx_rtc_set_time; | ||
| 450 | 429 | ||
| 451 | platform_set_drvdata(pdev, rtc_dd); | 430 | platform_set_drvdata(pdev, rtc_dd); |
| 452 | 431 | ||
| 432 | device_init_wakeup(&pdev->dev, 1); | ||
| 433 | |||
| 453 | /* Register the RTC device */ | 434 | /* Register the RTC device */ |
| 454 | rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc", | 435 | rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc", |
| 455 | &pm8xxx_rtc_ops, THIS_MODULE); | 436 | &pm8xxx_rtc_ops, THIS_MODULE); |
| 456 | if (IS_ERR(rtc_dd->rtc)) { | 437 | if (IS_ERR(rtc_dd->rtc)) { |
| 457 | dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n", | 438 | dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n", |
| 458 | __func__, PTR_ERR(rtc_dd->rtc)); | 439 | __func__, PTR_ERR(rtc_dd->rtc)); |
| 459 | return PTR_ERR(rtc_dd->rtc); | 440 | return PTR_ERR(rtc_dd->rtc); |
| 460 | } | 441 | } |
| 461 | 442 | ||
| 462 | /* Request the alarm IRQ */ | 443 | /* Request the alarm IRQ */ |
| 463 | rc = request_any_context_irq(rtc_dd->rtc_alarm_irq, | 444 | rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq, |
| 464 | pm8xxx_alarm_trigger, IRQF_TRIGGER_RISING, | 445 | pm8xxx_alarm_trigger, |
| 465 | "pm8xxx_rtc_alarm", rtc_dd); | 446 | IRQF_TRIGGER_RISING, |
| 447 | "pm8xxx_rtc_alarm", rtc_dd); | ||
| 466 | if (rc < 0) { | 448 | if (rc < 0) { |
| 467 | dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc); | 449 | dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc); |
| 468 | return rc; | 450 | return rc; |
| 469 | } | 451 | } |
| 470 | 452 | ||
| 471 | device_init_wakeup(&pdev->dev, 1); | ||
| 472 | |||
| 473 | dev_dbg(&pdev->dev, "Probe success !!\n"); | 453 | dev_dbg(&pdev->dev, "Probe success !!\n"); |
| 474 | 454 | ||
| 475 | return 0; | 455 | return 0; |
| 476 | } | 456 | } |
| 477 | 457 | ||
| 478 | static int pm8xxx_rtc_remove(struct platform_device *pdev) | ||
| 479 | { | ||
| 480 | struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev); | ||
| 481 | |||
| 482 | device_init_wakeup(&pdev->dev, 0); | ||
| 483 | free_irq(rtc_dd->rtc_alarm_irq, rtc_dd); | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | #ifdef CONFIG_PM_SLEEP | 458 | #ifdef CONFIG_PM_SLEEP |
| 489 | static int pm8xxx_rtc_resume(struct device *dev) | 459 | static int pm8xxx_rtc_resume(struct device *dev) |
| 490 | { | 460 | { |
| @@ -507,15 +477,17 @@ static int pm8xxx_rtc_suspend(struct device *dev) | |||
| 507 | } | 477 | } |
| 508 | #endif | 478 | #endif |
| 509 | 479 | ||
| 510 | static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops, pm8xxx_rtc_suspend, pm8xxx_rtc_resume); | 480 | static SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops, |
| 481 | pm8xxx_rtc_suspend, | ||
| 482 | pm8xxx_rtc_resume); | ||
| 511 | 483 | ||
| 512 | static struct platform_driver pm8xxx_rtc_driver = { | 484 | static struct platform_driver pm8xxx_rtc_driver = { |
| 513 | .probe = pm8xxx_rtc_probe, | 485 | .probe = pm8xxx_rtc_probe, |
| 514 | .remove = pm8xxx_rtc_remove, | ||
| 515 | .driver = { | 486 | .driver = { |
| 516 | .name = PM8XXX_RTC_DEV_NAME, | 487 | .name = "rtc-pm8xxx", |
| 517 | .owner = THIS_MODULE, | 488 | .owner = THIS_MODULE, |
| 518 | .pm = &pm8xxx_rtc_pm_ops, | 489 | .pm = &pm8xxx_rtc_pm_ops, |
| 490 | .of_match_table = pm8xxx_id_table, | ||
| 519 | }, | 491 | }, |
| 520 | }; | 492 | }; |
| 521 | 493 | ||
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index 1a779a67ff66..e9ac5a43be1a 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c | |||
| @@ -395,6 +395,12 @@ static int rv3029c2_probe(struct i2c_client *client, | |||
| 395 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL)) | 395 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL)) |
| 396 | return -ENODEV; | 396 | return -ENODEV; |
| 397 | 397 | ||
| 398 | rc = rv3029c2_i2c_get_sr(client, buf); | ||
| 399 | if (rc < 0) { | ||
| 400 | dev_err(&client->dev, "reading status failed\n"); | ||
| 401 | return rc; | ||
| 402 | } | ||
| 403 | |||
| 398 | rtc = devm_rtc_device_register(&client->dev, client->name, | 404 | rtc = devm_rtc_device_register(&client->dev, client->name, |
| 399 | &rv3029c2_rtc_ops, THIS_MODULE); | 405 | &rv3029c2_rtc_ops, THIS_MODULE); |
| 400 | 406 | ||
| @@ -403,12 +409,6 @@ static int rv3029c2_probe(struct i2c_client *client, | |||
| 403 | 409 | ||
| 404 | i2c_set_clientdata(client, rtc); | 410 | i2c_set_clientdata(client, rtc); |
| 405 | 411 | ||
| 406 | rc = rv3029c2_i2c_get_sr(client, buf); | ||
| 407 | if (rc < 0) { | ||
| 408 | dev_err(&client->dev, "reading status failed\n"); | ||
| 409 | return rc; | ||
| 410 | } | ||
| 411 | |||
| 412 | return 0; | 412 | return 0; |
| 413 | } | 413 | } |
| 414 | 414 | ||
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 8fa23eabcb68..e6298e02b400 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c | |||
| @@ -551,7 +551,6 @@ static int rx8025_probe(struct i2c_client *client, | |||
| 551 | 551 | ||
| 552 | rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL); | 552 | rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL); |
| 553 | if (!rx8025) { | 553 | if (!rx8025) { |
| 554 | dev_err(&adapter->dev, "failed to alloc memory\n"); | ||
| 555 | err = -ENOMEM; | 554 | err = -ENOMEM; |
| 556 | goto errout; | 555 | goto errout; |
| 557 | } | 556 | } |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index c4cde9c08f1f..4958a363b2c7 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -48,8 +48,8 @@ struct s3c_rtc_drv_data { | |||
| 48 | 48 | ||
| 49 | static struct clk *rtc_clk; | 49 | static struct clk *rtc_clk; |
| 50 | static void __iomem *s3c_rtc_base; | 50 | static void __iomem *s3c_rtc_base; |
| 51 | static int s3c_rtc_alarmno = NO_IRQ; | 51 | static int s3c_rtc_alarmno; |
| 52 | static int s3c_rtc_tickno = NO_IRQ; | 52 | static int s3c_rtc_tickno; |
| 53 | static enum s3c_cpu_type s3c_rtc_cpu_type; | 53 | static enum s3c_cpu_type s3c_rtc_cpu_type; |
| 54 | 54 | ||
| 55 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); | 55 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); |
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c index 3eb3642ae299..76e38007ba90 100644 --- a/drivers/rtc/rtc-sirfsoc.c +++ b/drivers/rtc/rtc-sirfsoc.c | |||
| @@ -264,12 +264,8 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) | |||
| 264 | 264 | ||
| 265 | rtcdrv = devm_kzalloc(&pdev->dev, | 265 | rtcdrv = devm_kzalloc(&pdev->dev, |
| 266 | sizeof(struct sirfsoc_rtc_drv), GFP_KERNEL); | 266 | sizeof(struct sirfsoc_rtc_drv), GFP_KERNEL); |
| 267 | if (rtcdrv == NULL) { | 267 | if (rtcdrv == NULL) |
| 268 | dev_err(&pdev->dev, | ||
| 269 | "%s: can't alloc mem for drv struct\n", | ||
| 270 | pdev->name); | ||
| 271 | return -ENOMEM; | 268 | return -ENOMEM; |
| 272 | } | ||
| 273 | 269 | ||
| 274 | err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base); | 270 | err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base); |
| 275 | if (err) { | 271 | if (err) { |
| @@ -335,39 +331,29 @@ static int sirfsoc_rtc_remove(struct platform_device *pdev) | |||
| 335 | return 0; | 331 | return 0; |
| 336 | } | 332 | } |
| 337 | 333 | ||
| 338 | #ifdef CONFIG_PM | 334 | #ifdef CONFIG_PM_SLEEP |
| 339 | |||
| 340 | static int sirfsoc_rtc_suspend(struct device *dev) | 335 | static int sirfsoc_rtc_suspend(struct device *dev) |
| 341 | { | 336 | { |
| 342 | struct platform_device *pdev = to_platform_device(dev); | 337 | struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev); |
| 343 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
| 344 | rtcdrv->overflow_rtc = | 338 | rtcdrv->overflow_rtc = |
| 345 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); | 339 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); |
| 346 | 340 | ||
| 347 | rtcdrv->saved_counter = | 341 | rtcdrv->saved_counter = |
| 348 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); | 342 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); |
| 349 | rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc; | 343 | rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc; |
| 350 | if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq)) | 344 | if (device_may_wakeup(dev) && !enable_irq_wake(rtcdrv->irq)) |
| 351 | rtcdrv->irq_wake = 1; | 345 | rtcdrv->irq_wake = 1; |
| 352 | 346 | ||
| 353 | return 0; | 347 | return 0; |
| 354 | } | 348 | } |
| 355 | 349 | ||
| 356 | static int sirfsoc_rtc_freeze(struct device *dev) | 350 | static int sirfsoc_rtc_resume(struct device *dev) |
| 357 | { | ||
| 358 | sirfsoc_rtc_suspend(dev); | ||
| 359 | |||
| 360 | return 0; | ||
| 361 | } | ||
| 362 | |||
| 363 | static int sirfsoc_rtc_thaw(struct device *dev) | ||
| 364 | { | 351 | { |
| 365 | u32 tmp; | 352 | u32 tmp; |
| 366 | struct sirfsoc_rtc_drv *rtcdrv; | 353 | struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev); |
| 367 | rtcdrv = dev_get_drvdata(dev); | ||
| 368 | 354 | ||
| 369 | /* | 355 | /* |
| 370 | * if resume from snapshot and the rtc power is losed, | 356 | * if resume from snapshot and the rtc power is lost, |
| 371 | * restroe the rtc settings | 357 | * restroe the rtc settings |
| 372 | */ | 358 | */ |
| 373 | if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl( | 359 | if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl( |
| @@ -407,57 +393,23 @@ static int sirfsoc_rtc_thaw(struct device *dev) | |||
| 407 | sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc, | 393 | sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc, |
| 408 | rtcdrv->rtc_base + RTC_SW_VALUE); | 394 | rtcdrv->rtc_base + RTC_SW_VALUE); |
| 409 | 395 | ||
| 410 | return 0; | 396 | if (device_may_wakeup(dev) && rtcdrv->irq_wake) { |
| 411 | } | ||
| 412 | |||
| 413 | static int sirfsoc_rtc_resume(struct device *dev) | ||
| 414 | { | ||
| 415 | struct platform_device *pdev = to_platform_device(dev); | ||
| 416 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
| 417 | sirfsoc_rtc_thaw(dev); | ||
| 418 | if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) { | ||
| 419 | disable_irq_wake(rtcdrv->irq); | 397 | disable_irq_wake(rtcdrv->irq); |
| 420 | rtcdrv->irq_wake = 0; | 398 | rtcdrv->irq_wake = 0; |
| 421 | } | 399 | } |
| 422 | 400 | ||
| 423 | return 0; | 401 | return 0; |
| 424 | } | 402 | } |
| 425 | |||
| 426 | static int sirfsoc_rtc_restore(struct device *dev) | ||
| 427 | { | ||
| 428 | struct platform_device *pdev = to_platform_device(dev); | ||
| 429 | struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev); | ||
| 430 | |||
| 431 | if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) { | ||
| 432 | disable_irq_wake(rtcdrv->irq); | ||
| 433 | rtcdrv->irq_wake = 0; | ||
| 434 | } | ||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | |||
| 438 | #else | ||
| 439 | #define sirfsoc_rtc_suspend NULL | ||
| 440 | #define sirfsoc_rtc_resume NULL | ||
| 441 | #define sirfsoc_rtc_freeze NULL | ||
| 442 | #define sirfsoc_rtc_thaw NULL | ||
| 443 | #define sirfsoc_rtc_restore NULL | ||
| 444 | #endif | 403 | #endif |
| 445 | 404 | ||
| 446 | static const struct dev_pm_ops sirfsoc_rtc_pm_ops = { | 405 | static SIMPLE_DEV_PM_OPS(sirfsoc_rtc_pm_ops, |
| 447 | .suspend = sirfsoc_rtc_suspend, | 406 | sirfsoc_rtc_suspend, sirfsoc_rtc_resume); |
| 448 | .resume = sirfsoc_rtc_resume, | ||
| 449 | .freeze = sirfsoc_rtc_freeze, | ||
| 450 | .thaw = sirfsoc_rtc_thaw, | ||
| 451 | .restore = sirfsoc_rtc_restore, | ||
| 452 | }; | ||
| 453 | 407 | ||
| 454 | static struct platform_driver sirfsoc_rtc_driver = { | 408 | static struct platform_driver sirfsoc_rtc_driver = { |
| 455 | .driver = { | 409 | .driver = { |
| 456 | .name = "sirfsoc-rtc", | 410 | .name = "sirfsoc-rtc", |
| 457 | .owner = THIS_MODULE, | 411 | .owner = THIS_MODULE, |
| 458 | #ifdef CONFIG_PM | ||
| 459 | .pm = &sirfsoc_rtc_pm_ops, | 412 | .pm = &sirfsoc_rtc_pm_ops, |
| 460 | #endif | ||
| 461 | .of_match_table = sirfsoc_rtc_of_match, | 413 | .of_match_table = sirfsoc_rtc_of_match, |
| 462 | }, | 414 | }, |
| 463 | .probe = sirfsoc_rtc_probe, | 415 | .probe = sirfsoc_rtc_probe, |
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c index c492cf0ab8cd..d2cdb9823a15 100644 --- a/drivers/rtc/rtc-spear.c +++ b/drivers/rtc/rtc-spear.c | |||
| @@ -365,10 +365,8 @@ static int spear_rtc_probe(struct platform_device *pdev) | |||
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); | 367 | config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); |
| 368 | if (!config) { | 368 | if (!config) |
| 369 | dev_err(&pdev->dev, "out of memory\n"); | ||
| 370 | return -ENOMEM; | 369 | return -ENOMEM; |
| 371 | } | ||
| 372 | 370 | ||
| 373 | /* alarm irqs */ | 371 | /* alarm irqs */ |
| 374 | irq = platform_get_irq(pdev, 0); | 372 | irq = platform_get_irq(pdev, 0); |
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c index a176ba614683..35ed49ea1f81 100644 --- a/drivers/rtc/rtc-stk17ta8.c +++ b/drivers/rtc/rtc-stk17ta8.c | |||
| @@ -214,8 +214,7 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id) | |||
| 214 | events |= RTC_UF; | 214 | events |= RTC_UF; |
| 215 | else | 215 | else |
| 216 | events |= RTC_AF; | 216 | events |= RTC_AF; |
| 217 | if (likely(pdata->rtc)) | 217 | rtc_update_irq(pdata->rtc, 1, events); |
| 218 | rtc_update_irq(pdata->rtc, 1, events); | ||
| 219 | } | 218 | } |
| 220 | spin_unlock(&pdata->lock); | 219 | spin_unlock(&pdata->lock); |
| 221 | return events ? IRQ_HANDLED : IRQ_NONE; | 220 | return events ? IRQ_HANDLED : IRQ_NONE; |
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c index 68a35284e5ad..b6f21f73d508 100644 --- a/drivers/rtc/rtc-sunxi.c +++ b/drivers/rtc/rtc-sunxi.c | |||
| @@ -428,7 +428,7 @@ static const struct rtc_class_ops sunxi_rtc_ops = { | |||
| 428 | }; | 428 | }; |
| 429 | 429 | ||
| 430 | static const struct of_device_id sunxi_rtc_dt_ids[] = { | 430 | static const struct of_device_id sunxi_rtc_dt_ids[] = { |
| 431 | { .compatible = "allwinner,sun4i-rtc", .data = &data_year_param[0] }, | 431 | { .compatible = "allwinner,sun4i-a10-rtc", .data = &data_year_param[0] }, |
| 432 | { .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] }, | 432 | { .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] }, |
| 433 | { /* sentinel */ }, | 433 | { /* sentinel */ }, |
| 434 | }; | 434 | }; |
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index 7746e65b93f2..6599c20bc454 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c | |||
| @@ -104,20 +104,17 @@ static int test_probe(struct platform_device *plat_dev) | |||
| 104 | rtc = devm_rtc_device_register(&plat_dev->dev, "test", | 104 | rtc = devm_rtc_device_register(&plat_dev->dev, "test", |
| 105 | &test_rtc_ops, THIS_MODULE); | 105 | &test_rtc_ops, THIS_MODULE); |
| 106 | if (IS_ERR(rtc)) { | 106 | if (IS_ERR(rtc)) { |
| 107 | err = PTR_ERR(rtc); | 107 | return PTR_ERR(rtc); |
| 108 | return err; | ||
| 109 | } | 108 | } |
| 110 | 109 | ||
| 111 | err = device_create_file(&plat_dev->dev, &dev_attr_irq); | 110 | err = device_create_file(&plat_dev->dev, &dev_attr_irq); |
| 112 | if (err) | 111 | if (err) |
| 113 | goto err; | 112 | dev_err(&plat_dev->dev, "Unable to create sysfs entry: %s\n", |
| 113 | dev_attr_irq.attr.name); | ||
| 114 | 114 | ||
| 115 | platform_set_drvdata(plat_dev, rtc); | 115 | platform_set_drvdata(plat_dev, rtc); |
| 116 | 116 | ||
| 117 | return 0; | 117 | return 0; |
| 118 | |||
| 119 | err: | ||
| 120 | return err; | ||
| 121 | } | 118 | } |
| 122 | 119 | ||
| 123 | static int test_remove(struct platform_device *plat_dev) | 120 | static int test_remove(struct platform_device *plat_dev) |
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 4f87234e0dee..2e678c681b13 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c | |||
| @@ -176,8 +176,8 @@ static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id) | |||
| 176 | tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP); | 176 | tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP); |
| 177 | } | 177 | } |
| 178 | spin_unlock(&pdata->lock); | 178 | spin_unlock(&pdata->lock); |
| 179 | if (likely(pdata->rtc)) | 179 | rtc_update_irq(pdata->rtc, 1, events); |
| 180 | rtc_update_irq(pdata->rtc, 1, events); | 180 | |
| 181 | return IRQ_HANDLED; | 181 | return IRQ_HANDLED; |
| 182 | } | 182 | } |
| 183 | 183 | ||
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index df2ef3eba7cd..051da968da6d 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c | |||
| @@ -79,7 +79,6 @@ | |||
| 79 | 79 | ||
| 80 | struct vt8500_rtc { | 80 | struct vt8500_rtc { |
| 81 | void __iomem *regbase; | 81 | void __iomem *regbase; |
| 82 | struct resource *res; | ||
| 83 | int irq_alarm; | 82 | int irq_alarm; |
| 84 | struct rtc_device *rtc; | 83 | struct rtc_device *rtc; |
| 85 | spinlock_t lock; /* Protects this structure */ | 84 | spinlock_t lock; /* Protects this structure */ |
| @@ -209,6 +208,7 @@ static const struct rtc_class_ops vt8500_rtc_ops = { | |||
| 209 | static int vt8500_rtc_probe(struct platform_device *pdev) | 208 | static int vt8500_rtc_probe(struct platform_device *pdev) |
| 210 | { | 209 | { |
| 211 | struct vt8500_rtc *vt8500_rtc; | 210 | struct vt8500_rtc *vt8500_rtc; |
| 211 | struct resource *res; | ||
| 212 | int ret; | 212 | int ret; |
| 213 | 213 | ||
| 214 | vt8500_rtc = devm_kzalloc(&pdev->dev, | 214 | vt8500_rtc = devm_kzalloc(&pdev->dev, |
| @@ -219,34 +219,16 @@ static int vt8500_rtc_probe(struct platform_device *pdev) | |||
| 219 | spin_lock_init(&vt8500_rtc->lock); | 219 | spin_lock_init(&vt8500_rtc->lock); |
| 220 | platform_set_drvdata(pdev, vt8500_rtc); | 220 | platform_set_drvdata(pdev, vt8500_rtc); |
| 221 | 221 | ||
| 222 | vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 223 | if (!vt8500_rtc->res) { | ||
| 224 | dev_err(&pdev->dev, "No I/O memory resource defined\n"); | ||
| 225 | return -ENXIO; | ||
| 226 | } | ||
| 227 | |||
| 228 | vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0); | 222 | vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0); |
| 229 | if (vt8500_rtc->irq_alarm < 0) { | 223 | if (vt8500_rtc->irq_alarm < 0) { |
| 230 | dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); | 224 | dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); |
| 231 | return vt8500_rtc->irq_alarm; | 225 | return vt8500_rtc->irq_alarm; |
| 232 | } | 226 | } |
| 233 | 227 | ||
| 234 | vt8500_rtc->res = devm_request_mem_region(&pdev->dev, | 228 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 235 | vt8500_rtc->res->start, | 229 | vt8500_rtc->regbase = devm_ioremap_resource(&pdev->dev, res); |
| 236 | resource_size(vt8500_rtc->res), | 230 | if (IS_ERR(vt8500_rtc->regbase)) |
| 237 | "vt8500-rtc"); | 231 | return PTR_ERR(vt8500_rtc->regbase); |
| 238 | if (vt8500_rtc->res == NULL) { | ||
| 239 | dev_err(&pdev->dev, "failed to request I/O memory\n"); | ||
| 240 | return -EBUSY; | ||
| 241 | } | ||
| 242 | |||
| 243 | vt8500_rtc->regbase = devm_ioremap(&pdev->dev, vt8500_rtc->res->start, | ||
| 244 | resource_size(vt8500_rtc->res)); | ||
| 245 | if (!vt8500_rtc->regbase) { | ||
| 246 | dev_err(&pdev->dev, "Unable to map RTC I/O memory\n"); | ||
| 247 | ret = -EBUSY; | ||
| 248 | goto err_return; | ||
| 249 | } | ||
| 250 | 232 | ||
| 251 | /* Enable RTC and set it to 24-hour mode */ | 233 | /* Enable RTC and set it to 24-hour mode */ |
| 252 | writel(VT8500_RTC_CR_ENABLE, | 234 | writel(VT8500_RTC_CR_ENABLE, |
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index 365dc6505148..b1de58e0b3d0 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c | |||
| @@ -660,7 +660,7 @@ static int x1205_probe(struct i2c_client *client, | |||
| 660 | 660 | ||
| 661 | err = x1205_sysfs_register(&client->dev); | 661 | err = x1205_sysfs_register(&client->dev); |
| 662 | if (err) | 662 | if (err) |
| 663 | return err; | 663 | dev_err(&client->dev, "Unable to create sysfs entries\n"); |
| 664 | 664 | ||
| 665 | return 0; | 665 | return 0; |
| 666 | } | 666 | } |
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 26003d3c1be7..7c4fd97a7fa0 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c | |||
| @@ -1877,7 +1877,7 @@ void ll_delete_inode(struct inode *inode) | |||
| 1877 | cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, | 1877 | cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, |
| 1878 | CL_FSYNC_DISCARD, 1); | 1878 | CL_FSYNC_DISCARD, 1); |
| 1879 | 1879 | ||
| 1880 | truncate_inode_pages(&inode->i_data, 0); | 1880 | truncate_inode_pages_final(&inode->i_data); |
| 1881 | 1881 | ||
| 1882 | /* Workaround for LU-118 */ | 1882 | /* Workaround for LU-118 */ |
| 1883 | if (inode->i_data.nrpages) { | 1883 | if (inode->i_data.nrpages) { |
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c index ee0c0a982e4e..ec5350f2c28a 100644 --- a/drivers/video/backlight/aat2870_bl.c +++ b/drivers/video/backlight/aat2870_bl.c | |||
| @@ -149,8 +149,6 @@ static int aat2870_bl_probe(struct platform_device *pdev) | |||
| 149 | sizeof(struct aat2870_bl_driver_data), | 149 | sizeof(struct aat2870_bl_driver_data), |
| 150 | GFP_KERNEL); | 150 | GFP_KERNEL); |
| 151 | if (!aat2870_bl) { | 151 | if (!aat2870_bl) { |
| 152 | dev_err(&pdev->dev, | ||
| 153 | "Failed to allocate memory for aat2870 backlight\n"); | ||
| 154 | ret = -ENOMEM; | 152 | ret = -ENOMEM; |
| 155 | goto out; | 153 | goto out; |
| 156 | } | 154 | } |
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 9d656717d0f7..be8d83deca7d 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c | |||
| @@ -224,10 +224,8 @@ static int adp8860_led_probe(struct i2c_client *client) | |||
| 224 | 224 | ||
| 225 | led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds, | 225 | led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds, |
| 226 | GFP_KERNEL); | 226 | GFP_KERNEL); |
| 227 | if (led == NULL) { | 227 | if (led == NULL) |
| 228 | dev_err(&client->dev, "failed to alloc memory\n"); | ||
| 229 | return -ENOMEM; | 228 | return -ENOMEM; |
| 230 | } | ||
| 231 | 229 | ||
| 232 | ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law); | 230 | ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law); |
| 233 | ret = adp8860_write(client, ADP8860_ISCT1, | 231 | ret = adp8860_write(client, ADP8860_ISCT1, |
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 63707205326b..251af4d38d86 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c | |||
| @@ -246,10 +246,8 @@ static int adp8870_led_probe(struct i2c_client *client) | |||
| 246 | 246 | ||
| 247 | led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led), | 247 | led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led), |
| 248 | GFP_KERNEL); | 248 | GFP_KERNEL); |
| 249 | if (led == NULL) { | 249 | if (led == NULL) |
| 250 | dev_err(&client->dev, "failed to alloc memory\n"); | ||
| 251 | return -ENOMEM; | 250 | return -ENOMEM; |
| 252 | } | ||
| 253 | 251 | ||
| 254 | ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law); | 252 | ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law); |
| 255 | if (ret) | 253 | if (ret) |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 5d05555fe841..27d3cf255e78 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
| @@ -34,13 +34,15 @@ static const char *const backlight_types[] = { | |||
| 34 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) | 34 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) |
| 35 | /* This callback gets called when something important happens inside a | 35 | /* This callback gets called when something important happens inside a |
| 36 | * framebuffer driver. We're looking if that important event is blanking, | 36 | * framebuffer driver. We're looking if that important event is blanking, |
| 37 | * and if it is, we're switching backlight power as well ... | 37 | * and if it is and necessary, we're switching backlight power as well ... |
| 38 | */ | 38 | */ |
| 39 | static int fb_notifier_callback(struct notifier_block *self, | 39 | static int fb_notifier_callback(struct notifier_block *self, |
| 40 | unsigned long event, void *data) | 40 | unsigned long event, void *data) |
| 41 | { | 41 | { |
| 42 | struct backlight_device *bd; | 42 | struct backlight_device *bd; |
| 43 | struct fb_event *evdata = data; | 43 | struct fb_event *evdata = data; |
| 44 | int node = evdata->info->node; | ||
| 45 | int fb_blank = 0; | ||
| 44 | 46 | ||
| 45 | /* If we aren't interested in this event, skip it immediately ... */ | 47 | /* If we aren't interested in this event, skip it immediately ... */ |
| 46 | if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK) | 48 | if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK) |
| @@ -51,12 +53,24 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
| 51 | if (bd->ops) | 53 | if (bd->ops) |
| 52 | if (!bd->ops->check_fb || | 54 | if (!bd->ops->check_fb || |
| 53 | bd->ops->check_fb(bd, evdata->info)) { | 55 | bd->ops->check_fb(bd, evdata->info)) { |
| 54 | bd->props.fb_blank = *(int *)evdata->data; | 56 | fb_blank = *(int *)evdata->data; |
| 55 | if (bd->props.fb_blank == FB_BLANK_UNBLANK) | 57 | if (fb_blank == FB_BLANK_UNBLANK && |
| 56 | bd->props.state &= ~BL_CORE_FBBLANK; | 58 | !bd->fb_bl_on[node]) { |
| 57 | else | 59 | bd->fb_bl_on[node] = true; |
| 58 | bd->props.state |= BL_CORE_FBBLANK; | 60 | if (!bd->use_count++) { |
| 59 | backlight_update_status(bd); | 61 | bd->props.state &= ~BL_CORE_FBBLANK; |
| 62 | bd->props.fb_blank = FB_BLANK_UNBLANK; | ||
| 63 | backlight_update_status(bd); | ||
| 64 | } | ||
| 65 | } else if (fb_blank != FB_BLANK_UNBLANK && | ||
| 66 | bd->fb_bl_on[node]) { | ||
| 67 | bd->fb_bl_on[node] = false; | ||
| 68 | if (!(--bd->use_count)) { | ||
| 69 | bd->props.state |= BL_CORE_FBBLANK; | ||
| 70 | bd->props.fb_blank = fb_blank; | ||
| 71 | backlight_update_status(bd); | ||
| 72 | } | ||
| 73 | } | ||
| 60 | } | 74 | } |
| 61 | mutex_unlock(&bd->ops_lock); | 75 | mutex_unlock(&bd->ops_lock); |
| 62 | return 0; | 76 | return 0; |
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index db8db5fa6583..51d18d637e2b 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c | |||
| @@ -543,10 +543,8 @@ static int corgi_lcd_probe(struct spi_device *spi) | |||
| 543 | } | 543 | } |
| 544 | 544 | ||
| 545 | lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL); | 545 | lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL); |
| 546 | if (!lcd) { | 546 | if (!lcd) |
| 547 | dev_err(&spi->dev, "failed to allocate memory\n"); | ||
| 548 | return -ENOMEM; | 547 | return -ENOMEM; |
| 549 | } | ||
| 550 | 548 | ||
| 551 | lcd->spi_dev = spi; | 549 | lcd->spi_dev = spi; |
| 552 | 550 | ||
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c index 985e854e244b..23f50b92a930 100644 --- a/drivers/video/backlight/hx8357.c +++ b/drivers/video/backlight/hx8357.c | |||
| @@ -587,10 +587,8 @@ static int hx8357_probe(struct spi_device *spi) | |||
| 587 | int i, ret; | 587 | int i, ret; |
| 588 | 588 | ||
| 589 | lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); | 589 | lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); |
| 590 | if (!lcd) { | 590 | if (!lcd) |
| 591 | dev_err(&spi->dev, "Couldn't allocate lcd internal structure!\n"); | ||
| 592 | return -ENOMEM; | 591 | return -ENOMEM; |
| 593 | } | ||
| 594 | 592 | ||
| 595 | ret = spi_setup(spi); | 593 | ret = spi_setup(spi); |
| 596 | if (ret < 0) { | 594 | if (ret < 0) { |
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c index 73464e4b4c74..ea67fe199e34 100644 --- a/drivers/video/backlight/ili922x.c +++ b/drivers/video/backlight/ili922x.c | |||
| @@ -482,10 +482,8 @@ static int ili922x_probe(struct spi_device *spi) | |||
| 482 | u16 reg = 0; | 482 | u16 reg = 0; |
| 483 | 483 | ||
| 484 | ili = devm_kzalloc(&spi->dev, sizeof(*ili), GFP_KERNEL); | 484 | ili = devm_kzalloc(&spi->dev, sizeof(*ili), GFP_KERNEL); |
| 485 | if (!ili) { | 485 | if (!ili) |
| 486 | dev_err(&spi->dev, "cannot alloc priv data\n"); | ||
| 487 | return -ENOMEM; | 486 | return -ENOMEM; |
| 488 | } | ||
| 489 | 487 | ||
| 490 | ili->spi = spi; | 488 | ili->spi = spi; |
| 491 | spi_set_drvdata(spi, ili); | 489 | spi_set_drvdata(spi, ili); |
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c index e2b8b40a9bd9..2cf39e6d519d 100644 --- a/drivers/video/backlight/ili9320.c +++ b/drivers/video/backlight/ili9320.c | |||
| @@ -219,10 +219,8 @@ int ili9320_probe_spi(struct spi_device *spi, | |||
| 219 | /* allocate and initialse our state */ | 219 | /* allocate and initialse our state */ |
| 220 | 220 | ||
| 221 | ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL); | 221 | ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL); |
| 222 | if (ili == NULL) { | 222 | if (ili == NULL) |
| 223 | dev_err(dev, "no memory for device\n"); | ||
| 224 | return -ENOMEM; | 223 | return -ENOMEM; |
| 225 | } | ||
| 226 | 224 | ||
| 227 | ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1); | 225 | ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1); |
| 228 | 226 | ||
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 63e763828e0e..5fa2649c9631 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c | |||
| @@ -181,11 +181,8 @@ static int l4f00242t03_probe(struct spi_device *spi) | |||
| 181 | 181 | ||
| 182 | priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv), | 182 | priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv), |
| 183 | GFP_KERNEL); | 183 | GFP_KERNEL); |
| 184 | 184 | if (priv == NULL) | |
| 185 | if (priv == NULL) { | ||
| 186 | dev_err(&spi->dev, "No memory for this device.\n"); | ||
| 187 | return -ENOMEM; | 185 | return -ENOMEM; |
| 188 | } | ||
| 189 | 186 | ||
| 190 | spi_set_drvdata(spi, priv); | 187 | spi_set_drvdata(spi, priv); |
| 191 | spi->bits_per_word = 9; | 188 | spi->bits_per_word = 9; |
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c index 187d1c283c1d..cff1fbe89a1b 100644 --- a/drivers/video/backlight/lm3533_bl.c +++ b/drivers/video/backlight/lm3533_bl.c | |||
| @@ -296,11 +296,8 @@ static int lm3533_bl_probe(struct platform_device *pdev) | |||
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL); | 298 | bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL); |
| 299 | if (!bl) { | 299 | if (!bl) |
| 300 | dev_err(&pdev->dev, | ||
| 301 | "failed to allocate memory for backlight\n"); | ||
| 302 | return -ENOMEM; | 300 | return -ENOMEM; |
| 303 | } | ||
| 304 | 301 | ||
| 305 | bl->lm3533 = lm3533; | 302 | bl->lm3533 = lm3533; |
| 306 | bl->id = pdev->id; | 303 | bl->id = pdev->id; |
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c index de8832504f68..14590c54aedf 100644 --- a/drivers/video/backlight/lms283gf05.c +++ b/drivers/video/backlight/lms283gf05.c | |||
| @@ -168,10 +168,8 @@ static int lms283gf05_probe(struct spi_device *spi) | |||
| 168 | 168 | ||
| 169 | st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state), | 169 | st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state), |
| 170 | GFP_KERNEL); | 170 | GFP_KERNEL); |
| 171 | if (st == NULL) { | 171 | if (st == NULL) |
| 172 | dev_err(&spi->dev, "No memory for device state\n"); | ||
| 173 | return -ENOMEM; | 172 | return -ENOMEM; |
| 174 | } | ||
| 175 | 173 | ||
| 176 | ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st, | 174 | ld = devm_lcd_device_register(&spi->dev, "lms283gf05", &spi->dev, st, |
| 177 | &lms_ops); | 175 | &lms_ops); |
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c index d01884d4f1bf..c3d2e209fc8f 100644 --- a/drivers/video/backlight/platform_lcd.c +++ b/drivers/video/backlight/platform_lcd.c | |||
| @@ -94,10 +94,8 @@ static int platform_lcd_probe(struct platform_device *pdev) | |||
| 94 | 94 | ||
| 95 | plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd), | 95 | plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd), |
| 96 | GFP_KERNEL); | 96 | GFP_KERNEL); |
| 97 | if (!plcd) { | 97 | if (!plcd) |
| 98 | dev_err(dev, "no memory for state\n"); | ||
| 99 | return -ENOMEM; | 98 | return -ENOMEM; |
| 100 | } | ||
| 101 | 99 | ||
| 102 | plcd->us = dev; | 100 | plcd->us = dev; |
| 103 | plcd->pdata = pdata; | 101 | plcd->pdata = pdata; |
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c index cbba37e6836e..595dcf561020 100644 --- a/drivers/video/backlight/tps65217_bl.c +++ b/drivers/video/backlight/tps65217_bl.c | |||
| @@ -200,7 +200,6 @@ tps65217_bl_parse_dt(struct platform_device *pdev) | |||
| 200 | 200 | ||
| 201 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 201 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
| 202 | if (!pdata) { | 202 | if (!pdata) { |
| 203 | dev_err(&pdev->dev, "failed to allocate platform data\n"); | ||
| 204 | err = ERR_PTR(-ENOMEM); | 203 | err = ERR_PTR(-ENOMEM); |
| 205 | goto err; | 204 | goto err; |
| 206 | } | 205 | } |
| @@ -296,10 +295,8 @@ static int tps65217_bl_probe(struct platform_device *pdev) | |||
| 296 | 295 | ||
| 297 | tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl), | 296 | tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl), |
| 298 | GFP_KERNEL); | 297 | GFP_KERNEL); |
| 299 | if (tps65217_bl == NULL) { | 298 | if (tps65217_bl == NULL) |
| 300 | dev_err(&pdev->dev, "allocation of struct tps65217_bl failed\n"); | ||
| 301 | return -ENOMEM; | 299 | return -ENOMEM; |
| 302 | } | ||
| 303 | 300 | ||
| 304 | tps65217_bl->tps = tps; | 301 | tps65217_bl->tps = tps; |
| 305 | tps65217_bl->dev = &pdev->dev; | 302 | tps65217_bl->dev = &pdev->dev; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index bb7991c7e5c7..53161ec058a7 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
| @@ -451,7 +451,7 @@ void v9fs_evict_inode(struct inode *inode) | |||
| 451 | { | 451 | { |
| 452 | struct v9fs_inode *v9inode = V9FS_I(inode); | 452 | struct v9fs_inode *v9inode = V9FS_I(inode); |
| 453 | 453 | ||
| 454 | truncate_inode_pages(inode->i_mapping, 0); | 454 | truncate_inode_pages_final(inode->i_mapping); |
| 455 | clear_inode(inode); | 455 | clear_inode(inode); |
| 456 | filemap_fdatawrite(inode->i_mapping); | 456 | filemap_fdatawrite(inode->i_mapping); |
| 457 | 457 | ||
diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 0e092d08680e..96df91e8c334 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c | |||
| @@ -259,7 +259,7 @@ affs_evict_inode(struct inode *inode) | |||
| 259 | { | 259 | { |
| 260 | unsigned long cache_page; | 260 | unsigned long cache_page; |
| 261 | pr_debug("AFFS: evict_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); | 261 | pr_debug("AFFS: evict_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); |
| 262 | truncate_inode_pages(&inode->i_data, 0); | 262 | truncate_inode_pages_final(&inode->i_data); |
| 263 | 263 | ||
| 264 | if (!inode->i_nlink) { | 264 | if (!inode->i_nlink) { |
| 265 | inode->i_size = 0; | 265 | inode->i_size = 0; |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index ce25d755b7aa..294671288449 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
| @@ -422,7 +422,7 @@ void afs_evict_inode(struct inode *inode) | |||
| 422 | 422 | ||
| 423 | ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); | 423 | ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); |
| 424 | 424 | ||
| 425 | truncate_inode_pages(&inode->i_data, 0); | 425 | truncate_inode_pages_final(&inode->i_data); |
| 426 | clear_inode(inode); | 426 | clear_inode(inode); |
| 427 | 427 | ||
| 428 | afs_give_up_callback(vnode); | 428 | afs_give_up_callback(vnode); |
diff --git a/fs/befs/Makefile b/fs/befs/Makefile index 2f370bd7a50d..8b9f66642a83 100644 --- a/fs/befs/Makefile +++ b/fs/befs/Makefile | |||
| @@ -3,5 +3,5 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_BEFS_FS) += befs.o | 5 | obj-$(CONFIG_BEFS_FS) += befs.o |
| 6 | 6 | ccflags-$(CONFIG_BEFS_DEBUG) += -DDEBUG | |
| 7 | befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o | 7 | befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o |
diff --git a/fs/befs/befs.h b/fs/befs/befs.h index b26642839156..3a7813ab8c95 100644 --- a/fs/befs/befs.h +++ b/fs/befs/befs.h | |||
| @@ -88,8 +88,11 @@ enum befs_err { | |||
| 88 | 88 | ||
| 89 | /****************************/ | 89 | /****************************/ |
| 90 | /* debug.c */ | 90 | /* debug.c */ |
| 91 | __printf(2, 3) | ||
| 91 | void befs_error(const struct super_block *sb, const char *fmt, ...); | 92 | void befs_error(const struct super_block *sb, const char *fmt, ...); |
| 93 | __printf(2, 3) | ||
| 92 | void befs_warning(const struct super_block *sb, const char *fmt, ...); | 94 | void befs_warning(const struct super_block *sb, const char *fmt, ...); |
| 95 | __printf(2, 3) | ||
| 93 | void befs_debug(const struct super_block *sb, const char *fmt, ...); | 96 | void befs_debug(const struct super_block *sb, const char *fmt, ...); |
| 94 | 97 | ||
| 95 | void befs_dump_super_block(const struct super_block *sb, befs_super_block *); | 98 | void befs_dump_super_block(const struct super_block *sb, befs_super_block *); |
diff --git a/fs/befs/btree.c b/fs/befs/btree.c index 74e397db0b8b..a2cd305a993a 100644 --- a/fs/befs/btree.c +++ b/fs/befs/btree.c | |||
| @@ -137,7 +137,7 @@ befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, | |||
| 137 | struct buffer_head *bh = NULL; | 137 | struct buffer_head *bh = NULL; |
| 138 | befs_disk_btree_super *od_sup = NULL; | 138 | befs_disk_btree_super *od_sup = NULL; |
| 139 | 139 | ||
| 140 | befs_debug(sb, "---> befs_btree_read_super()"); | 140 | befs_debug(sb, "---> %s", __func__); |
| 141 | 141 | ||
| 142 | bh = befs_read_datastream(sb, ds, 0, NULL); | 142 | bh = befs_read_datastream(sb, ds, 0, NULL); |
| 143 | 143 | ||
| @@ -162,11 +162,11 @@ befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, | |||
| 162 | goto error; | 162 | goto error; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | befs_debug(sb, "<--- befs_btree_read_super()"); | 165 | befs_debug(sb, "<--- %s", __func__); |
| 166 | return BEFS_OK; | 166 | return BEFS_OK; |
| 167 | 167 | ||
| 168 | error: | 168 | error: |
| 169 | befs_debug(sb, "<--- befs_btree_read_super() ERROR"); | 169 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 170 | return BEFS_ERR; | 170 | return BEFS_ERR; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| @@ -195,16 +195,16 @@ befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, | |||
| 195 | { | 195 | { |
| 196 | uint off = 0; | 196 | uint off = 0; |
| 197 | 197 | ||
| 198 | befs_debug(sb, "---> befs_bt_read_node()"); | 198 | befs_debug(sb, "---> %s", __func__); |
| 199 | 199 | ||
| 200 | if (node->bh) | 200 | if (node->bh) |
| 201 | brelse(node->bh); | 201 | brelse(node->bh); |
| 202 | 202 | ||
| 203 | node->bh = befs_read_datastream(sb, ds, node_off, &off); | 203 | node->bh = befs_read_datastream(sb, ds, node_off, &off); |
| 204 | if (!node->bh) { | 204 | if (!node->bh) { |
| 205 | befs_error(sb, "befs_bt_read_node() failed to read " | 205 | befs_error(sb, "%s failed to read " |
| 206 | "node at %Lu", node_off); | 206 | "node at %llu", __func__, node_off); |
| 207 | befs_debug(sb, "<--- befs_bt_read_node() ERROR"); | 207 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 208 | 208 | ||
| 209 | return BEFS_ERR; | 209 | return BEFS_ERR; |
| 210 | } | 210 | } |
| @@ -221,7 +221,7 @@ befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, | |||
| 221 | node->head.all_key_length = | 221 | node->head.all_key_length = |
| 222 | fs16_to_cpu(sb, node->od_node->all_key_length); | 222 | fs16_to_cpu(sb, node->od_node->all_key_length); |
| 223 | 223 | ||
| 224 | befs_debug(sb, "<--- befs_btree_read_node()"); | 224 | befs_debug(sb, "<--- %s", __func__); |
| 225 | return BEFS_OK; | 225 | return BEFS_OK; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| @@ -252,7 +252,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 252 | befs_off_t node_off; | 252 | befs_off_t node_off; |
| 253 | int res; | 253 | int res; |
| 254 | 254 | ||
| 255 | befs_debug(sb, "---> befs_btree_find() Key: %s", key); | 255 | befs_debug(sb, "---> %s Key: %s", __func__, key); |
| 256 | 256 | ||
| 257 | if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { | 257 | if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { |
| 258 | befs_error(sb, | 258 | befs_error(sb, |
| @@ -263,7 +263,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 263 | this_node = kmalloc(sizeof (befs_btree_node), | 263 | this_node = kmalloc(sizeof (befs_btree_node), |
| 264 | GFP_NOFS); | 264 | GFP_NOFS); |
| 265 | if (!this_node) { | 265 | if (!this_node) { |
| 266 | befs_error(sb, "befs_btree_find() failed to allocate %u " | 266 | befs_error(sb, "befs_btree_find() failed to allocate %zu " |
| 267 | "bytes of memory", sizeof (befs_btree_node)); | 267 | "bytes of memory", sizeof (befs_btree_node)); |
| 268 | goto error; | 268 | goto error; |
| 269 | } | 269 | } |
| @@ -274,7 +274,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 274 | node_off = bt_super.root_node_ptr; | 274 | node_off = bt_super.root_node_ptr; |
| 275 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { | 275 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { |
| 276 | befs_error(sb, "befs_btree_find() failed to read " | 276 | befs_error(sb, "befs_btree_find() failed to read " |
| 277 | "node at %Lu", node_off); | 277 | "node at %llu", node_off); |
| 278 | goto error_alloc; | 278 | goto error_alloc; |
| 279 | } | 279 | } |
| 280 | 280 | ||
| @@ -285,7 +285,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 285 | /* if no match, go to overflow node */ | 285 | /* if no match, go to overflow node */ |
| 286 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { | 286 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { |
| 287 | befs_error(sb, "befs_btree_find() failed to read " | 287 | befs_error(sb, "befs_btree_find() failed to read " |
| 288 | "node at %Lu", node_off); | 288 | "node at %llu", node_off); |
| 289 | goto error_alloc; | 289 | goto error_alloc; |
| 290 | } | 290 | } |
| 291 | } | 291 | } |
| @@ -298,11 +298,11 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 298 | kfree(this_node); | 298 | kfree(this_node); |
| 299 | 299 | ||
| 300 | if (res != BEFS_BT_MATCH) { | 300 | if (res != BEFS_BT_MATCH) { |
| 301 | befs_debug(sb, "<--- befs_btree_find() Key %s not found", key); | 301 | befs_debug(sb, "<--- %s Key %s not found", __func__, key); |
| 302 | *value = 0; | 302 | *value = 0; |
| 303 | return BEFS_BT_NOT_FOUND; | 303 | return BEFS_BT_NOT_FOUND; |
| 304 | } | 304 | } |
| 305 | befs_debug(sb, "<--- befs_btree_find() Found key %s, value %Lu", | 305 | befs_debug(sb, "<--- %s Found key %s, value %llu", __func__, |
| 306 | key, *value); | 306 | key, *value); |
| 307 | return BEFS_OK; | 307 | return BEFS_OK; |
| 308 | 308 | ||
| @@ -310,7 +310,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds, | |||
| 310 | kfree(this_node); | 310 | kfree(this_node); |
| 311 | error: | 311 | error: |
| 312 | *value = 0; | 312 | *value = 0; |
| 313 | befs_debug(sb, "<--- befs_btree_find() ERROR"); | 313 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 314 | return BEFS_ERR; | 314 | return BEFS_ERR; |
| 315 | } | 315 | } |
| 316 | 316 | ||
| @@ -343,7 +343,7 @@ befs_find_key(struct super_block *sb, befs_btree_node * node, | |||
| 343 | char *thiskey; | 343 | char *thiskey; |
| 344 | fs64 *valarray; | 344 | fs64 *valarray; |
| 345 | 345 | ||
| 346 | befs_debug(sb, "---> befs_find_key() %s", findkey); | 346 | befs_debug(sb, "---> %s %s", __func__, findkey); |
| 347 | 347 | ||
| 348 | *value = 0; | 348 | *value = 0; |
| 349 | 349 | ||
| @@ -355,7 +355,7 @@ befs_find_key(struct super_block *sb, befs_btree_node * node, | |||
| 355 | 355 | ||
| 356 | eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len); | 356 | eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len); |
| 357 | if (eq < 0) { | 357 | if (eq < 0) { |
| 358 | befs_debug(sb, "<--- befs_find_key() %s not found", findkey); | 358 | befs_debug(sb, "<--- %s %s not found", __func__, findkey); |
| 359 | return BEFS_BT_NOT_FOUND; | 359 | return BEFS_BT_NOT_FOUND; |
| 360 | } | 360 | } |
| 361 | 361 | ||
| @@ -373,8 +373,8 @@ befs_find_key(struct super_block *sb, befs_btree_node * node, | |||
| 373 | findkey_len); | 373 | findkey_len); |
| 374 | 374 | ||
| 375 | if (eq == 0) { | 375 | if (eq == 0) { |
| 376 | befs_debug(sb, "<--- befs_find_key() found %s at %d", | 376 | befs_debug(sb, "<--- %s found %s at %d", |
| 377 | thiskey, mid); | 377 | __func__, thiskey, mid); |
| 378 | 378 | ||
| 379 | *value = fs64_to_cpu(sb, valarray[mid]); | 379 | *value = fs64_to_cpu(sb, valarray[mid]); |
| 380 | return BEFS_BT_MATCH; | 380 | return BEFS_BT_MATCH; |
| @@ -388,7 +388,7 @@ befs_find_key(struct super_block *sb, befs_btree_node * node, | |||
| 388 | *value = fs64_to_cpu(sb, valarray[mid + 1]); | 388 | *value = fs64_to_cpu(sb, valarray[mid + 1]); |
| 389 | else | 389 | else |
| 390 | *value = fs64_to_cpu(sb, valarray[mid]); | 390 | *value = fs64_to_cpu(sb, valarray[mid]); |
| 391 | befs_debug(sb, "<--- befs_find_key() found %s at %d", thiskey, mid); | 391 | befs_debug(sb, "<--- %s found %s at %d", __func__, thiskey, mid); |
| 392 | return BEFS_BT_PARMATCH; | 392 | return BEFS_BT_PARMATCH; |
| 393 | } | 393 | } |
| 394 | 394 | ||
| @@ -428,7 +428,7 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 428 | 428 | ||
| 429 | uint key_sum = 0; | 429 | uint key_sum = 0; |
| 430 | 430 | ||
| 431 | befs_debug(sb, "---> befs_btree_read()"); | 431 | befs_debug(sb, "---> %s", __func__); |
| 432 | 432 | ||
| 433 | if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { | 433 | if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { |
| 434 | befs_error(sb, | 434 | befs_error(sb, |
| @@ -437,7 +437,7 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | if ((this_node = kmalloc(sizeof (befs_btree_node), GFP_NOFS)) == NULL) { | 439 | if ((this_node = kmalloc(sizeof (befs_btree_node), GFP_NOFS)) == NULL) { |
| 440 | befs_error(sb, "befs_btree_read() failed to allocate %u " | 440 | befs_error(sb, "befs_btree_read() failed to allocate %zu " |
| 441 | "bytes of memory", sizeof (befs_btree_node)); | 441 | "bytes of memory", sizeof (befs_btree_node)); |
| 442 | goto error; | 442 | goto error; |
| 443 | } | 443 | } |
| @@ -452,7 +452,7 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 452 | kfree(this_node); | 452 | kfree(this_node); |
| 453 | *value = 0; | 453 | *value = 0; |
| 454 | *keysize = 0; | 454 | *keysize = 0; |
| 455 | befs_debug(sb, "<--- befs_btree_read() Tree is EMPTY"); | 455 | befs_debug(sb, "<--- %s Tree is EMPTY", __func__); |
| 456 | return BEFS_BT_EMPTY; | 456 | return BEFS_BT_EMPTY; |
| 457 | } else if (res == BEFS_ERR) { | 457 | } else if (res == BEFS_ERR) { |
| 458 | goto error_alloc; | 458 | goto error_alloc; |
| @@ -467,7 +467,8 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 467 | *keysize = 0; | 467 | *keysize = 0; |
| 468 | *value = 0; | 468 | *value = 0; |
| 469 | befs_debug(sb, | 469 | befs_debug(sb, |
| 470 | "<--- befs_btree_read() END of keys at %Lu", | 470 | "<--- %s END of keys at %llu", __func__, |
| 471 | (unsigned long long) | ||
| 471 | key_sum + this_node->head.all_key_count); | 472 | key_sum + this_node->head.all_key_count); |
| 472 | brelse(this_node->bh); | 473 | brelse(this_node->bh); |
| 473 | kfree(this_node); | 474 | kfree(this_node); |
| @@ -478,8 +479,8 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 478 | node_off = this_node->head.right; | 479 | node_off = this_node->head.right; |
| 479 | 480 | ||
| 480 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { | 481 | if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { |
| 481 | befs_error(sb, "befs_btree_read() failed to read " | 482 | befs_error(sb, "%s failed to read node at %llu", |
| 482 | "node at %Lu", node_off); | 483 | __func__, (unsigned long long)node_off); |
| 483 | goto error_alloc; | 484 | goto error_alloc; |
| 484 | } | 485 | } |
| 485 | } | 486 | } |
| @@ -492,11 +493,13 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 492 | 493 | ||
| 493 | keystart = befs_bt_get_key(sb, this_node, cur_key, &keylen); | 494 | keystart = befs_bt_get_key(sb, this_node, cur_key, &keylen); |
| 494 | 495 | ||
| 495 | befs_debug(sb, "Read [%Lu,%d]: keysize %d", node_off, cur_key, keylen); | 496 | befs_debug(sb, "Read [%llu,%d]: keysize %d", |
| 497 | (long long unsigned int)node_off, (int)cur_key, | ||
| 498 | (int)keylen); | ||
| 496 | 499 | ||
| 497 | if (bufsize < keylen + 1) { | 500 | if (bufsize < keylen + 1) { |
| 498 | befs_error(sb, "befs_btree_read() keybuf too small (%u) " | 501 | befs_error(sb, "%s keybuf too small (%zu) " |
| 499 | "for key of size %d", bufsize, keylen); | 502 | "for key of size %d", __func__, bufsize, keylen); |
| 500 | brelse(this_node->bh); | 503 | brelse(this_node->bh); |
| 501 | goto error_alloc; | 504 | goto error_alloc; |
| 502 | }; | 505 | }; |
| @@ -506,13 +509,13 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 506 | *keysize = keylen; | 509 | *keysize = keylen; |
| 507 | keybuf[keylen] = '\0'; | 510 | keybuf[keylen] = '\0'; |
| 508 | 511 | ||
| 509 | befs_debug(sb, "Read [%Lu,%d]: Key \"%.*s\", Value %Lu", node_off, | 512 | befs_debug(sb, "Read [%llu,%d]: Key \"%.*s\", Value %llu", node_off, |
| 510 | cur_key, keylen, keybuf, *value); | 513 | cur_key, keylen, keybuf, *value); |
| 511 | 514 | ||
| 512 | brelse(this_node->bh); | 515 | brelse(this_node->bh); |
| 513 | kfree(this_node); | 516 | kfree(this_node); |
| 514 | 517 | ||
| 515 | befs_debug(sb, "<--- befs_btree_read()"); | 518 | befs_debug(sb, "<--- %s", __func__); |
| 516 | 519 | ||
| 517 | return BEFS_OK; | 520 | return BEFS_OK; |
| 518 | 521 | ||
| @@ -522,7 +525,7 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, | |||
| 522 | error: | 525 | error: |
| 523 | *keysize = 0; | 526 | *keysize = 0; |
| 524 | *value = 0; | 527 | *value = 0; |
| 525 | befs_debug(sb, "<--- befs_btree_read() ERROR"); | 528 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 526 | return BEFS_ERR; | 529 | return BEFS_ERR; |
| 527 | } | 530 | } |
| 528 | 531 | ||
| @@ -547,26 +550,26 @@ befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, | |||
| 547 | befs_off_t * node_off) | 550 | befs_off_t * node_off) |
| 548 | { | 551 | { |
| 549 | 552 | ||
| 550 | befs_debug(sb, "---> befs_btree_seekleaf()"); | 553 | befs_debug(sb, "---> %s", __func__); |
| 551 | 554 | ||
| 552 | if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { | 555 | if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { |
| 553 | befs_error(sb, "befs_btree_seekleaf() failed to read " | 556 | befs_error(sb, "%s failed to read " |
| 554 | "node at %Lu", *node_off); | 557 | "node at %llu", __func__, *node_off); |
| 555 | goto error; | 558 | goto error; |
| 556 | } | 559 | } |
| 557 | befs_debug(sb, "Seekleaf to root node %Lu", *node_off); | 560 | befs_debug(sb, "Seekleaf to root node %llu", *node_off); |
| 558 | 561 | ||
| 559 | if (this_node->head.all_key_count == 0 && befs_leafnode(this_node)) { | 562 | if (this_node->head.all_key_count == 0 && befs_leafnode(this_node)) { |
| 560 | befs_debug(sb, "<--- befs_btree_seekleaf() Tree is EMPTY"); | 563 | befs_debug(sb, "<--- %s Tree is EMPTY", __func__); |
| 561 | return BEFS_BT_EMPTY; | 564 | return BEFS_BT_EMPTY; |
| 562 | } | 565 | } |
| 563 | 566 | ||
| 564 | while (!befs_leafnode(this_node)) { | 567 | while (!befs_leafnode(this_node)) { |
| 565 | 568 | ||
| 566 | if (this_node->head.all_key_count == 0) { | 569 | if (this_node->head.all_key_count == 0) { |
| 567 | befs_debug(sb, "befs_btree_seekleaf() encountered " | 570 | befs_debug(sb, "%s encountered " |
| 568 | "an empty interior node: %Lu. Using Overflow " | 571 | "an empty interior node: %llu. Using Overflow " |
| 569 | "node: %Lu", *node_off, | 572 | "node: %llu", __func__, *node_off, |
| 570 | this_node->head.overflow); | 573 | this_node->head.overflow); |
| 571 | *node_off = this_node->head.overflow; | 574 | *node_off = this_node->head.overflow; |
| 572 | } else { | 575 | } else { |
| @@ -574,19 +577,19 @@ befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, | |||
| 574 | *node_off = fs64_to_cpu(sb, valarray[0]); | 577 | *node_off = fs64_to_cpu(sb, valarray[0]); |
| 575 | } | 578 | } |
| 576 | if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { | 579 | if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { |
| 577 | befs_error(sb, "befs_btree_seekleaf() failed to read " | 580 | befs_error(sb, "%s failed to read " |
| 578 | "node at %Lu", *node_off); | 581 | "node at %llu", __func__, *node_off); |
| 579 | goto error; | 582 | goto error; |
| 580 | } | 583 | } |
| 581 | 584 | ||
| 582 | befs_debug(sb, "Seekleaf to child node %Lu", *node_off); | 585 | befs_debug(sb, "Seekleaf to child node %llu", *node_off); |
| 583 | } | 586 | } |
| 584 | befs_debug(sb, "Node %Lu is a leaf node", *node_off); | 587 | befs_debug(sb, "Node %llu is a leaf node", *node_off); |
| 585 | 588 | ||
| 586 | return BEFS_OK; | 589 | return BEFS_OK; |
| 587 | 590 | ||
| 588 | error: | 591 | error: |
| 589 | befs_debug(sb, "<--- befs_btree_seekleaf() ERROR"); | 592 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 590 | return BEFS_ERR; | 593 | return BEFS_ERR; |
| 591 | } | 594 | } |
| 592 | 595 | ||
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c index 59096b5e0fc7..c467bebd50af 100644 --- a/fs/befs/datastream.c +++ b/fs/befs/datastream.c | |||
| @@ -52,26 +52,25 @@ befs_read_datastream(struct super_block *sb, befs_data_stream * ds, | |||
| 52 | befs_block_run run; | 52 | befs_block_run run; |
| 53 | befs_blocknr_t block; /* block coresponding to pos */ | 53 | befs_blocknr_t block; /* block coresponding to pos */ |
| 54 | 54 | ||
| 55 | befs_debug(sb, "---> befs_read_datastream() %Lu", pos); | 55 | befs_debug(sb, "---> %s %llu", __func__, pos); |
| 56 | block = pos >> BEFS_SB(sb)->block_shift; | 56 | block = pos >> BEFS_SB(sb)->block_shift; |
| 57 | if (off) | 57 | if (off) |
| 58 | *off = pos - (block << BEFS_SB(sb)->block_shift); | 58 | *off = pos - (block << BEFS_SB(sb)->block_shift); |
| 59 | 59 | ||
| 60 | if (befs_fblock2brun(sb, ds, block, &run) != BEFS_OK) { | 60 | if (befs_fblock2brun(sb, ds, block, &run) != BEFS_OK) { |
| 61 | befs_error(sb, "BeFS: Error finding disk addr of block %lu", | 61 | befs_error(sb, "BeFS: Error finding disk addr of block %lu", |
| 62 | block); | 62 | (unsigned long)block); |
| 63 | befs_debug(sb, "<--- befs_read_datastream() ERROR"); | 63 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 64 | return NULL; | 64 | return NULL; |
| 65 | } | 65 | } |
| 66 | bh = befs_bread_iaddr(sb, run); | 66 | bh = befs_bread_iaddr(sb, run); |
| 67 | if (!bh) { | 67 | if (!bh) { |
| 68 | befs_error(sb, "BeFS: Error reading block %lu from datastream", | 68 | befs_error(sb, "BeFS: Error reading block %lu from datastream", |
| 69 | block); | 69 | (unsigned long)block); |
| 70 | return NULL; | 70 | return NULL; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | befs_debug(sb, "<--- befs_read_datastream() read data, starting at %Lu", | 73 | befs_debug(sb, "<--- %s read data, starting at %llu", __func__, pos); |
| 74 | pos); | ||
| 75 | 74 | ||
| 76 | return bh; | 75 | return bh; |
| 77 | } | 76 | } |
| @@ -106,7 +105,8 @@ befs_fblock2brun(struct super_block *sb, befs_data_stream * data, | |||
| 106 | } else { | 105 | } else { |
| 107 | befs_error(sb, | 106 | befs_error(sb, |
| 108 | "befs_fblock2brun() was asked to find block %lu, " | 107 | "befs_fblock2brun() was asked to find block %lu, " |
| 109 | "which is not mapped by the datastream\n", fblock); | 108 | "which is not mapped by the datastream\n", |
| 109 | (unsigned long)fblock); | ||
| 110 | err = BEFS_ERR; | 110 | err = BEFS_ERR; |
| 111 | } | 111 | } |
| 112 | return err; | 112 | return err; |
| @@ -128,14 +128,14 @@ befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff, | |||
| 128 | befs_off_t bytes_read = 0; /* bytes readed */ | 128 | befs_off_t bytes_read = 0; /* bytes readed */ |
| 129 | u16 plen; | 129 | u16 plen; |
| 130 | struct buffer_head *bh = NULL; | 130 | struct buffer_head *bh = NULL; |
| 131 | befs_debug(sb, "---> befs_read_lsymlink() length: %Lu", len); | 131 | befs_debug(sb, "---> %s length: %llu", __func__, len); |
| 132 | 132 | ||
| 133 | while (bytes_read < len) { | 133 | while (bytes_read < len) { |
| 134 | bh = befs_read_datastream(sb, ds, bytes_read, NULL); | 134 | bh = befs_read_datastream(sb, ds, bytes_read, NULL); |
| 135 | if (!bh) { | 135 | if (!bh) { |
| 136 | befs_error(sb, "BeFS: Error reading datastream block " | 136 | befs_error(sb, "BeFS: Error reading datastream block " |
| 137 | "starting from %Lu", bytes_read); | 137 | "starting from %llu", bytes_read); |
| 138 | befs_debug(sb, "<--- befs_read_lsymlink() ERROR"); | 138 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 139 | return bytes_read; | 139 | return bytes_read; |
| 140 | 140 | ||
| 141 | } | 141 | } |
| @@ -146,7 +146,8 @@ befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff, | |||
| 146 | bytes_read += plen; | 146 | bytes_read += plen; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | befs_debug(sb, "<--- befs_read_lsymlink() read %u bytes", bytes_read); | 149 | befs_debug(sb, "<--- %s read %u bytes", __func__, (unsigned int) |
| 150 | bytes_read); | ||
| 150 | return bytes_read; | 151 | return bytes_read; |
| 151 | } | 152 | } |
| 152 | 153 | ||
| @@ -169,7 +170,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) | |||
| 169 | befs_blocknr_t metablocks; /* FS metadata blocks */ | 170 | befs_blocknr_t metablocks; /* FS metadata blocks */ |
| 170 | befs_sb_info *befs_sb = BEFS_SB(sb); | 171 | befs_sb_info *befs_sb = BEFS_SB(sb); |
| 171 | 172 | ||
| 172 | befs_debug(sb, "---> befs_count_blocks()"); | 173 | befs_debug(sb, "---> %s", __func__); |
| 173 | 174 | ||
| 174 | datablocks = ds->size >> befs_sb->block_shift; | 175 | datablocks = ds->size >> befs_sb->block_shift; |
| 175 | if (ds->size & (befs_sb->block_size - 1)) | 176 | if (ds->size & (befs_sb->block_size - 1)) |
| @@ -206,7 +207,7 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) | |||
| 206 | } | 207 | } |
| 207 | 208 | ||
| 208 | blocks = datablocks + metablocks; | 209 | blocks = datablocks + metablocks; |
| 209 | befs_debug(sb, "<--- befs_count_blocks() %u blocks", blocks); | 210 | befs_debug(sb, "<--- %s %u blocks", __func__, (unsigned int)blocks); |
| 210 | 211 | ||
| 211 | return blocks; | 212 | return blocks; |
| 212 | } | 213 | } |
| @@ -251,11 +252,11 @@ befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, | |||
| 251 | befs_blocknr_t max_block = | 252 | befs_blocknr_t max_block = |
| 252 | data->max_direct_range >> BEFS_SB(sb)->block_shift; | 253 | data->max_direct_range >> BEFS_SB(sb)->block_shift; |
| 253 | 254 | ||
| 254 | befs_debug(sb, "---> befs_find_brun_direct(), find %lu", blockno); | 255 | befs_debug(sb, "---> %s, find %lu", __func__, (unsigned long)blockno); |
| 255 | 256 | ||
| 256 | if (blockno > max_block) { | 257 | if (blockno > max_block) { |
| 257 | befs_error(sb, "befs_find_brun_direct() passed block outside of" | 258 | befs_error(sb, "%s passed block outside of direct region", |
| 258 | "direct region"); | 259 | __func__); |
| 259 | return BEFS_ERR; | 260 | return BEFS_ERR; |
| 260 | } | 261 | } |
| 261 | 262 | ||
| @@ -267,13 +268,14 @@ befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, | |||
| 267 | run->start = array[i].start + offset; | 268 | run->start = array[i].start + offset; |
| 268 | run->len = array[i].len - offset; | 269 | run->len = array[i].len - offset; |
| 269 | 270 | ||
| 270 | befs_debug(sb, "---> befs_find_brun_direct(), " | 271 | befs_debug(sb, "---> %s, " |
| 271 | "found %lu at direct[%d]", blockno, i); | 272 | "found %lu at direct[%d]", __func__, |
| 273 | (unsigned long)blockno, i); | ||
| 272 | return BEFS_OK; | 274 | return BEFS_OK; |
| 273 | } | 275 | } |
| 274 | } | 276 | } |
| 275 | 277 | ||
| 276 | befs_debug(sb, "---> befs_find_brun_direct() ERROR"); | 278 | befs_debug(sb, "---> %s ERROR", __func__); |
| 277 | return BEFS_ERR; | 279 | return BEFS_ERR; |
| 278 | } | 280 | } |
| 279 | 281 | ||
| @@ -316,7 +318,7 @@ befs_find_brun_indirect(struct super_block *sb, | |||
| 316 | befs_blocknr_t indirblockno = iaddr2blockno(sb, &indirect); | 318 | befs_blocknr_t indirblockno = iaddr2blockno(sb, &indirect); |
| 317 | int arraylen = befs_iaddrs_per_block(sb); | 319 | int arraylen = befs_iaddrs_per_block(sb); |
| 318 | 320 | ||
| 319 | befs_debug(sb, "---> befs_find_brun_indirect(), find %lu", blockno); | 321 | befs_debug(sb, "---> %s, find %lu", __func__, (unsigned long)blockno); |
| 320 | 322 | ||
| 321 | indir_start_blk = data->max_direct_range >> BEFS_SB(sb)->block_shift; | 323 | indir_start_blk = data->max_direct_range >> BEFS_SB(sb)->block_shift; |
| 322 | search_blk = blockno - indir_start_blk; | 324 | search_blk = blockno - indir_start_blk; |
| @@ -325,10 +327,9 @@ befs_find_brun_indirect(struct super_block *sb, | |||
| 325 | for (i = 0; i < indirect.len; i++) { | 327 | for (i = 0; i < indirect.len; i++) { |
| 326 | indirblock = befs_bread(sb, indirblockno + i); | 328 | indirblock = befs_bread(sb, indirblockno + i); |
| 327 | if (indirblock == NULL) { | 329 | if (indirblock == NULL) { |
| 328 | befs_debug(sb, | 330 | befs_debug(sb, "---> %s failed to read " |
| 329 | "---> befs_find_brun_indirect() failed to " | 331 | "disk block %lu from the indirect brun", |
| 330 | "read disk block %lu from the indirect brun", | 332 | __func__, (unsigned long)indirblockno + i); |
| 331 | indirblockno + i); | ||
| 332 | return BEFS_ERR; | 333 | return BEFS_ERR; |
| 333 | } | 334 | } |
| 334 | 335 | ||
| @@ -348,9 +349,10 @@ befs_find_brun_indirect(struct super_block *sb, | |||
| 348 | 349 | ||
| 349 | brelse(indirblock); | 350 | brelse(indirblock); |
| 350 | befs_debug(sb, | 351 | befs_debug(sb, |
| 351 | "<--- befs_find_brun_indirect() found " | 352 | "<--- %s found file block " |
| 352 | "file block %lu at indirect[%d]", | 353 | "%lu at indirect[%d]", __func__, |
| 353 | blockno, j + (i * arraylen)); | 354 | (unsigned long)blockno, |
| 355 | j + (i * arraylen)); | ||
| 354 | return BEFS_OK; | 356 | return BEFS_OK; |
| 355 | } | 357 | } |
| 356 | sum += len; | 358 | sum += len; |
| @@ -360,10 +362,10 @@ befs_find_brun_indirect(struct super_block *sb, | |||
| 360 | } | 362 | } |
| 361 | 363 | ||
| 362 | /* Only fallthrough is an error */ | 364 | /* Only fallthrough is an error */ |
| 363 | befs_error(sb, "BeFS: befs_find_brun_indirect() failed to find " | 365 | befs_error(sb, "BeFS: %s failed to find " |
| 364 | "file block %lu", blockno); | 366 | "file block %lu", __func__, (unsigned long)blockno); |
| 365 | 367 | ||
| 366 | befs_debug(sb, "<--- befs_find_brun_indirect() ERROR"); | 368 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 367 | return BEFS_ERR; | 369 | return BEFS_ERR; |
| 368 | } | 370 | } |
| 369 | 371 | ||
| @@ -444,7 +446,7 @@ befs_find_brun_dblindirect(struct super_block *sb, | |||
| 444 | size_t diblklen = iblklen * befs_iaddrs_per_block(sb) | 446 | size_t diblklen = iblklen * befs_iaddrs_per_block(sb) |
| 445 | * BEFS_DBLINDIR_BRUN_LEN; | 447 | * BEFS_DBLINDIR_BRUN_LEN; |
| 446 | 448 | ||
| 447 | befs_debug(sb, "---> befs_find_brun_dblindirect() find %lu", blockno); | 449 | befs_debug(sb, "---> %s find %lu", __func__, (unsigned long)blockno); |
| 448 | 450 | ||
| 449 | /* First, discover which of the double_indir->indir blocks | 451 | /* First, discover which of the double_indir->indir blocks |
| 450 | * contains pos. Then figure out how much of pos that | 452 | * contains pos. Then figure out how much of pos that |
| @@ -460,8 +462,9 @@ befs_find_brun_dblindirect(struct super_block *sb, | |||
| 460 | dbl_which_block = dblindir_indx / befs_iaddrs_per_block(sb); | 462 | dbl_which_block = dblindir_indx / befs_iaddrs_per_block(sb); |
| 461 | if (dbl_which_block > data->double_indirect.len) { | 463 | if (dbl_which_block > data->double_indirect.len) { |
| 462 | befs_error(sb, "The double-indirect index calculated by " | 464 | befs_error(sb, "The double-indirect index calculated by " |
| 463 | "befs_read_brun_dblindirect(), %d, is outside the range " | 465 | "%s, %d, is outside the range " |
| 464 | "of the double-indirect block", dblindir_indx); | 466 | "of the double-indirect block", __func__, |
| 467 | dblindir_indx); | ||
| 465 | return BEFS_ERR; | 468 | return BEFS_ERR; |
| 466 | } | 469 | } |
| 467 | 470 | ||
| @@ -469,10 +472,10 @@ befs_find_brun_dblindirect(struct super_block *sb, | |||
| 469 | befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) + | 472 | befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) + |
| 470 | dbl_which_block); | 473 | dbl_which_block); |
| 471 | if (dbl_indir_block == NULL) { | 474 | if (dbl_indir_block == NULL) { |
| 472 | befs_error(sb, "befs_read_brun_dblindirect() couldn't read the " | 475 | befs_error(sb, "%s couldn't read the " |
| 473 | "double-indirect block at blockno %lu", | 476 | "double-indirect block at blockno %lu", __func__, |
| 474 | iaddr2blockno(sb, | 477 | (unsigned long) |
| 475 | &data->double_indirect) + | 478 | iaddr2blockno(sb, &data->double_indirect) + |
| 476 | dbl_which_block); | 479 | dbl_which_block); |
| 477 | brelse(dbl_indir_block); | 480 | brelse(dbl_indir_block); |
| 478 | return BEFS_ERR; | 481 | return BEFS_ERR; |
| @@ -489,16 +492,16 @@ befs_find_brun_dblindirect(struct super_block *sb, | |||
| 489 | which_block = indir_indx / befs_iaddrs_per_block(sb); | 492 | which_block = indir_indx / befs_iaddrs_per_block(sb); |
| 490 | if (which_block > indir_run.len) { | 493 | if (which_block > indir_run.len) { |
| 491 | befs_error(sb, "The indirect index calculated by " | 494 | befs_error(sb, "The indirect index calculated by " |
| 492 | "befs_read_brun_dblindirect(), %d, is outside the range " | 495 | "%s, %d, is outside the range " |
| 493 | "of the indirect block", indir_indx); | 496 | "of the indirect block", __func__, indir_indx); |
| 494 | return BEFS_ERR; | 497 | return BEFS_ERR; |
| 495 | } | 498 | } |
| 496 | 499 | ||
| 497 | indir_block = | 500 | indir_block = |
| 498 | befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block); | 501 | befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block); |
| 499 | if (indir_block == NULL) { | 502 | if (indir_block == NULL) { |
| 500 | befs_error(sb, "befs_read_brun_dblindirect() couldn't read the " | 503 | befs_error(sb, "%s couldn't read the indirect block " |
| 501 | "indirect block at blockno %lu", | 504 | "at blockno %lu", __func__, (unsigned long) |
| 502 | iaddr2blockno(sb, &indir_run) + which_block); | 505 | iaddr2blockno(sb, &indir_run) + which_block); |
| 503 | brelse(indir_block); | 506 | brelse(indir_block); |
| 504 | return BEFS_ERR; | 507 | return BEFS_ERR; |
| @@ -519,7 +522,7 @@ befs_find_brun_dblindirect(struct super_block *sb, | |||
| 519 | run->len -= offset; | 522 | run->len -= offset; |
| 520 | 523 | ||
| 521 | befs_debug(sb, "Found file block %lu in double_indirect[%d][%d]," | 524 | befs_debug(sb, "Found file block %lu in double_indirect[%d][%d]," |
| 522 | " double_indirect_leftover = %lu", | 525 | " double_indirect_leftover = %lu", (unsigned long) |
| 523 | blockno, dblindir_indx, indir_indx, dblindir_leftover); | 526 | blockno, dblindir_indx, indir_indx, dblindir_leftover); |
| 524 | 527 | ||
| 525 | return BEFS_OK; | 528 | return BEFS_OK; |
diff --git a/fs/befs/debug.c b/fs/befs/debug.c index 622e73775c83..4de7cffcd662 100644 --- a/fs/befs/debug.c +++ b/fs/befs/debug.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | * debug functions | 10 | * debug functions |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 13 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
| 14 | 15 | ||
| 15 | #include <stdarg.h> | 16 | #include <stdarg.h> |
| @@ -23,43 +24,30 @@ | |||
| 23 | 24 | ||
| 24 | #include "befs.h" | 25 | #include "befs.h" |
| 25 | 26 | ||
| 26 | #define ERRBUFSIZE 1024 | ||
| 27 | |||
| 28 | void | 27 | void |
| 29 | befs_error(const struct super_block *sb, const char *fmt, ...) | 28 | befs_error(const struct super_block *sb, const char *fmt, ...) |
| 30 | { | 29 | { |
| 30 | struct va_format vaf; | ||
| 31 | va_list args; | 31 | va_list args; |
| 32 | char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); | ||
| 33 | if (err_buf == NULL) { | ||
| 34 | printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); | ||
| 35 | return; | ||
| 36 | } | ||
| 37 | 32 | ||
| 38 | va_start(args, fmt); | 33 | va_start(args, fmt); |
| 39 | vsnprintf(err_buf, ERRBUFSIZE, fmt, args); | 34 | vaf.fmt = fmt; |
| 35 | vaf.va = &args; | ||
| 36 | pr_err("(%s): %pV\n", sb->s_id, &vaf); | ||
| 40 | va_end(args); | 37 | va_end(args); |
| 41 | |||
| 42 | printk(KERN_ERR "BeFS(%s): %s\n", sb->s_id, err_buf); | ||
| 43 | kfree(err_buf); | ||
| 44 | } | 38 | } |
| 45 | 39 | ||
| 46 | void | 40 | void |
| 47 | befs_warning(const struct super_block *sb, const char *fmt, ...) | 41 | befs_warning(const struct super_block *sb, const char *fmt, ...) |
| 48 | { | 42 | { |
| 43 | struct va_format vaf; | ||
| 49 | va_list args; | 44 | va_list args; |
| 50 | char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); | ||
| 51 | if (err_buf == NULL) { | ||
| 52 | printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); | ||
| 53 | return; | ||
| 54 | } | ||
| 55 | 45 | ||
| 56 | va_start(args, fmt); | 46 | va_start(args, fmt); |
| 57 | vsnprintf(err_buf, ERRBUFSIZE, fmt, args); | 47 | vaf.fmt = fmt; |
| 48 | vaf.va = &args; | ||
| 49 | pr_warn("(%s): %pV\n", sb->s_id, &vaf); | ||
| 58 | va_end(args); | 50 | va_end(args); |
| 59 | |||
| 60 | printk(KERN_WARNING "BeFS(%s): %s\n", sb->s_id, err_buf); | ||
| 61 | |||
| 62 | kfree(err_buf); | ||
| 63 | } | 51 | } |
| 64 | 52 | ||
| 65 | void | 53 | void |
| @@ -67,25 +55,13 @@ befs_debug(const struct super_block *sb, const char *fmt, ...) | |||
| 67 | { | 55 | { |
| 68 | #ifdef CONFIG_BEFS_DEBUG | 56 | #ifdef CONFIG_BEFS_DEBUG |
| 69 | 57 | ||
| 58 | struct va_format vaf; | ||
| 70 | va_list args; | 59 | va_list args; |
| 71 | char *err_buf = NULL; | 60 | va_start(args, fmt); |
| 72 | 61 | vaf.fmt = fmt; | |
| 73 | if (BEFS_SB(sb)->mount_opts.debug) { | 62 | vaf.va = &args; |
| 74 | err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL); | 63 | pr_debug("(%s): %pV\n", sb->s_id, &vaf); |
| 75 | if (err_buf == NULL) { | 64 | va_end(args); |
| 76 | printk(KERN_ERR "could not allocate %d bytes\n", | ||
| 77 | ERRBUFSIZE); | ||
| 78 | return; | ||
| 79 | } | ||
| 80 | |||
| 81 | va_start(args, fmt); | ||
| 82 | vsnprintf(err_buf, ERRBUFSIZE, fmt, args); | ||
| 83 | va_end(args); | ||
| 84 | |||
| 85 | printk(KERN_DEBUG "BeFS(%s): %s\n", sb->s_id, err_buf); | ||
| 86 | |||
| 87 | kfree(err_buf); | ||
| 88 | } | ||
| 89 | 65 | ||
| 90 | #endif //CONFIG_BEFS_DEBUG | 66 | #endif //CONFIG_BEFS_DEBUG |
| 91 | } | 67 | } |
| @@ -109,9 +85,9 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode) | |||
| 109 | befs_debug(sb, " gid %u", fs32_to_cpu(sb, inode->gid)); | 85 | befs_debug(sb, " gid %u", fs32_to_cpu(sb, inode->gid)); |
| 110 | befs_debug(sb, " mode %08x", fs32_to_cpu(sb, inode->mode)); | 86 | befs_debug(sb, " mode %08x", fs32_to_cpu(sb, inode->mode)); |
| 111 | befs_debug(sb, " flags %08x", fs32_to_cpu(sb, inode->flags)); | 87 | befs_debug(sb, " flags %08x", fs32_to_cpu(sb, inode->flags)); |
| 112 | befs_debug(sb, " create_time %Lu", | 88 | befs_debug(sb, " create_time %llu", |
| 113 | fs64_to_cpu(sb, inode->create_time)); | 89 | fs64_to_cpu(sb, inode->create_time)); |
| 114 | befs_debug(sb, " last_modified_time %Lu", | 90 | befs_debug(sb, " last_modified_time %llu", |
| 115 | fs64_to_cpu(sb, inode->last_modified_time)); | 91 | fs64_to_cpu(sb, inode->last_modified_time)); |
| 116 | 92 | ||
| 117 | tmp_run = fsrun_to_cpu(sb, inode->parent); | 93 | tmp_run = fsrun_to_cpu(sb, inode->parent); |
| @@ -137,7 +113,7 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode) | |||
| 137 | tmp_run.allocation_group, tmp_run.start, | 113 | tmp_run.allocation_group, tmp_run.start, |
| 138 | tmp_run.len); | 114 | tmp_run.len); |
| 139 | } | 115 | } |
| 140 | befs_debug(sb, " max_direct_range %Lu", | 116 | befs_debug(sb, " max_direct_range %llu", |
| 141 | fs64_to_cpu(sb, | 117 | fs64_to_cpu(sb, |
| 142 | inode->data.datastream. | 118 | inode->data.datastream. |
| 143 | max_direct_range)); | 119 | max_direct_range)); |
| @@ -147,7 +123,7 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode) | |||
| 147 | tmp_run.allocation_group, | 123 | tmp_run.allocation_group, |
| 148 | tmp_run.start, tmp_run.len); | 124 | tmp_run.start, tmp_run.len); |
| 149 | 125 | ||
| 150 | befs_debug(sb, " max_indirect_range %Lu", | 126 | befs_debug(sb, " max_indirect_range %llu", |
| 151 | fs64_to_cpu(sb, | 127 | fs64_to_cpu(sb, |
| 152 | inode->data.datastream. | 128 | inode->data.datastream. |
| 153 | max_indirect_range)); | 129 | max_indirect_range)); |
| @@ -158,12 +134,12 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode) | |||
| 158 | tmp_run.allocation_group, tmp_run.start, | 134 | tmp_run.allocation_group, tmp_run.start, |
| 159 | tmp_run.len); | 135 | tmp_run.len); |
| 160 | 136 | ||
| 161 | befs_debug(sb, " max_double_indirect_range %Lu", | 137 | befs_debug(sb, " max_double_indirect_range %llu", |
| 162 | fs64_to_cpu(sb, | 138 | fs64_to_cpu(sb, |
| 163 | inode->data.datastream. | 139 | inode->data.datastream. |
| 164 | max_double_indirect_range)); | 140 | max_double_indirect_range)); |
| 165 | 141 | ||
| 166 | befs_debug(sb, " size %Lu", | 142 | befs_debug(sb, " size %llu", |
| 167 | fs64_to_cpu(sb, inode->data.datastream.size)); | 143 | fs64_to_cpu(sb, inode->data.datastream.size)); |
| 168 | } | 144 | } |
| 169 | 145 | ||
| @@ -191,8 +167,8 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup) | |||
| 191 | befs_debug(sb, " block_size %u", fs32_to_cpu(sb, sup->block_size)); | 167 | befs_debug(sb, " block_size %u", fs32_to_cpu(sb, sup->block_size)); |
| 192 | befs_debug(sb, " block_shift %u", fs32_to_cpu(sb, sup->block_shift)); | 168 | befs_debug(sb, " block_shift %u", fs32_to_cpu(sb, sup->block_shift)); |
| 193 | 169 | ||
| 194 | befs_debug(sb, " num_blocks %Lu", fs64_to_cpu(sb, sup->num_blocks)); | 170 | befs_debug(sb, " num_blocks %llu", fs64_to_cpu(sb, sup->num_blocks)); |
| 195 | befs_debug(sb, " used_blocks %Lu", fs64_to_cpu(sb, sup->used_blocks)); | 171 | befs_debug(sb, " used_blocks %llu", fs64_to_cpu(sb, sup->used_blocks)); |
| 196 | 172 | ||
| 197 | befs_debug(sb, " magic2 %08x", fs32_to_cpu(sb, sup->magic2)); | 173 | befs_debug(sb, " magic2 %08x", fs32_to_cpu(sb, sup->magic2)); |
| 198 | befs_debug(sb, " blocks_per_ag %u", | 174 | befs_debug(sb, " blocks_per_ag %u", |
| @@ -206,8 +182,8 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup) | |||
| 206 | befs_debug(sb, " log_blocks %u, %hu, %hu", | 182 | befs_debug(sb, " log_blocks %u, %hu, %hu", |
| 207 | tmp_run.allocation_group, tmp_run.start, tmp_run.len); | 183 | tmp_run.allocation_group, tmp_run.start, tmp_run.len); |
| 208 | 184 | ||
| 209 | befs_debug(sb, " log_start %Ld", fs64_to_cpu(sb, sup->log_start)); | 185 | befs_debug(sb, " log_start %lld", fs64_to_cpu(sb, sup->log_start)); |
| 210 | befs_debug(sb, " log_end %Ld", fs64_to_cpu(sb, sup->log_end)); | 186 | befs_debug(sb, " log_end %lld", fs64_to_cpu(sb, sup->log_end)); |
| 211 | 187 | ||
| 212 | befs_debug(sb, " magic3 %08x", fs32_to_cpu(sb, sup->magic3)); | 188 | befs_debug(sb, " magic3 %08x", fs32_to_cpu(sb, sup->magic3)); |
| 213 | 189 | ||
diff --git a/fs/befs/inode.c b/fs/befs/inode.c index 94c17f9a9576..fa4b718de597 100644 --- a/fs/befs/inode.c +++ b/fs/befs/inode.c | |||
| @@ -25,7 +25,8 @@ befs_check_inode(struct super_block *sb, befs_inode * raw_inode, | |||
| 25 | /* check magic header. */ | 25 | /* check magic header. */ |
| 26 | if (magic1 != BEFS_INODE_MAGIC1) { | 26 | if (magic1 != BEFS_INODE_MAGIC1) { |
| 27 | befs_error(sb, | 27 | befs_error(sb, |
| 28 | "Inode has a bad magic header - inode = %lu", inode); | 28 | "Inode has a bad magic header - inode = %lu", |
| 29 | (unsigned long)inode); | ||
| 29 | return BEFS_BAD_INODE; | 30 | return BEFS_BAD_INODE; |
| 30 | } | 31 | } |
| 31 | 32 | ||
| @@ -34,8 +35,8 @@ befs_check_inode(struct super_block *sb, befs_inode * raw_inode, | |||
| 34 | */ | 35 | */ |
| 35 | if (inode != iaddr2blockno(sb, &ino_num)) { | 36 | if (inode != iaddr2blockno(sb, &ino_num)) { |
| 36 | befs_error(sb, "inode blocknr field disagrees with vfs " | 37 | befs_error(sb, "inode blocknr field disagrees with vfs " |
| 37 | "VFS: %lu, Inode %lu", | 38 | "VFS: %lu, Inode %lu", (unsigned long) |
| 38 | inode, iaddr2blockno(sb, &ino_num)); | 39 | inode, (unsigned long)iaddr2blockno(sb, &ino_num)); |
| 39 | return BEFS_BAD_INODE; | 40 | return BEFS_BAD_INODE; |
| 40 | } | 41 | } |
| 41 | 42 | ||
| @@ -44,7 +45,8 @@ befs_check_inode(struct super_block *sb, befs_inode * raw_inode, | |||
| 44 | */ | 45 | */ |
| 45 | 46 | ||
| 46 | if (!(flags & BEFS_INODE_IN_USE)) { | 47 | if (!(flags & BEFS_INODE_IN_USE)) { |
| 47 | befs_error(sb, "inode is not used - inode = %lu", inode); | 48 | befs_error(sb, "inode is not used - inode = %lu", |
| 49 | (unsigned long)inode); | ||
| 48 | return BEFS_BAD_INODE; | 50 | return BEFS_BAD_INODE; |
| 49 | } | 51 | } |
| 50 | 52 | ||
diff --git a/fs/befs/io.c b/fs/befs/io.c index ddef98aa255d..0408a3d601d0 100644 --- a/fs/befs/io.c +++ b/fs/befs/io.c | |||
| @@ -30,9 +30,9 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) | |||
| 30 | befs_blocknr_t block = 0; | 30 | befs_blocknr_t block = 0; |
| 31 | befs_sb_info *befs_sb = BEFS_SB(sb); | 31 | befs_sb_info *befs_sb = BEFS_SB(sb); |
| 32 | 32 | ||
| 33 | befs_debug(sb, "---> Enter befs_read_iaddr() " | 33 | befs_debug(sb, "---> Enter %s " |
| 34 | "[%u, %hu, %hu]", | 34 | "[%u, %hu, %hu]", __func__, iaddr.allocation_group, |
| 35 | iaddr.allocation_group, iaddr.start, iaddr.len); | 35 | iaddr.start, iaddr.len); |
| 36 | 36 | ||
| 37 | if (iaddr.allocation_group > befs_sb->num_ags) { | 37 | if (iaddr.allocation_group > befs_sb->num_ags) { |
| 38 | befs_error(sb, "BEFS: Invalid allocation group %u, max is %u", | 38 | befs_error(sb, "BEFS: Invalid allocation group %u, max is %u", |
| @@ -42,20 +42,21 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) | |||
| 42 | 42 | ||
| 43 | block = iaddr2blockno(sb, &iaddr); | 43 | block = iaddr2blockno(sb, &iaddr); |
| 44 | 44 | ||
| 45 | befs_debug(sb, "befs_read_iaddr: offset = %lu", block); | 45 | befs_debug(sb, "%s: offset = %lu", __func__, (unsigned long)block); |
| 46 | 46 | ||
| 47 | bh = sb_bread(sb, block); | 47 | bh = sb_bread(sb, block); |
| 48 | 48 | ||
| 49 | if (bh == NULL) { | 49 | if (bh == NULL) { |
| 50 | befs_error(sb, "Failed to read block %lu", block); | 50 | befs_error(sb, "Failed to read block %lu", |
| 51 | (unsigned long)block); | ||
| 51 | goto error; | 52 | goto error; |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | befs_debug(sb, "<--- befs_read_iaddr()"); | 55 | befs_debug(sb, "<--- %s", __func__); |
| 55 | return bh; | 56 | return bh; |
| 56 | 57 | ||
| 57 | error: | 58 | error: |
| 58 | befs_debug(sb, "<--- befs_read_iaddr() ERROR"); | 59 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 59 | return NULL; | 60 | return NULL; |
| 60 | } | 61 | } |
| 61 | 62 | ||
| @@ -64,20 +65,21 @@ befs_bread(struct super_block *sb, befs_blocknr_t block) | |||
| 64 | { | 65 | { |
| 65 | struct buffer_head *bh = NULL; | 66 | struct buffer_head *bh = NULL; |
| 66 | 67 | ||
| 67 | befs_debug(sb, "---> Enter befs_read() %Lu", block); | 68 | befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block); |
| 68 | 69 | ||
| 69 | bh = sb_bread(sb, block); | 70 | bh = sb_bread(sb, block); |
| 70 | 71 | ||
| 71 | if (bh == NULL) { | 72 | if (bh == NULL) { |
| 72 | befs_error(sb, "Failed to read block %lu", block); | 73 | befs_error(sb, "Failed to read block %lu", |
| 74 | (unsigned long)block); | ||
| 73 | goto error; | 75 | goto error; |
| 74 | } | 76 | } |
| 75 | 77 | ||
| 76 | befs_debug(sb, "<--- befs_read()"); | 78 | befs_debug(sb, "<--- %s", __func__); |
| 77 | 79 | ||
| 78 | return bh; | 80 | return bh; |
| 79 | 81 | ||
| 80 | error: | 82 | error: |
| 81 | befs_debug(sb, "<--- befs_read() ERROR"); | 83 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 82 | return NULL; | 84 | return NULL; |
| 83 | } | 85 | } |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 845d2d690ce2..5188f1222987 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
| @@ -5,6 +5,8 @@ | |||
| 5 | * | 5 | * |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 9 | |||
| 8 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 9 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 10 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| @@ -39,7 +41,6 @@ static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int) | |||
| 39 | static struct inode *befs_iget(struct super_block *, unsigned long); | 41 | static struct inode *befs_iget(struct super_block *, unsigned long); |
| 40 | static struct inode *befs_alloc_inode(struct super_block *sb); | 42 | static struct inode *befs_alloc_inode(struct super_block *sb); |
| 41 | static void befs_destroy_inode(struct inode *inode); | 43 | static void befs_destroy_inode(struct inode *inode); |
| 42 | static int befs_init_inodecache(void); | ||
| 43 | static void befs_destroy_inodecache(void); | 44 | static void befs_destroy_inodecache(void); |
| 44 | static void *befs_follow_link(struct dentry *, struct nameidata *); | 45 | static void *befs_follow_link(struct dentry *, struct nameidata *); |
| 45 | static void *befs_fast_follow_link(struct dentry *, struct nameidata *); | 46 | static void *befs_fast_follow_link(struct dentry *, struct nameidata *); |
| @@ -131,26 +132,28 @@ befs_get_block(struct inode *inode, sector_t block, | |||
| 131 | ulong disk_off; | 132 | ulong disk_off; |
| 132 | 133 | ||
| 133 | befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld", | 134 | befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld", |
| 134 | inode->i_ino, block); | 135 | (unsigned long)inode->i_ino, (long)block); |
| 135 | 136 | ||
| 136 | if (block < 0) { | 137 | if (block < 0) { |
| 137 | befs_error(sb, "befs_get_block() was asked for a block " | 138 | befs_error(sb, "befs_get_block() was asked for a block " |
| 138 | "number less than zero: block %ld in inode %lu", | 139 | "number less than zero: block %ld in inode %lu", |
| 139 | block, inode->i_ino); | 140 | (long)block, (unsigned long)inode->i_ino); |
| 140 | return -EIO; | 141 | return -EIO; |
| 141 | } | 142 | } |
| 142 | 143 | ||
| 143 | if (create) { | 144 | if (create) { |
| 144 | befs_error(sb, "befs_get_block() was asked to write to " | 145 | befs_error(sb, "befs_get_block() was asked to write to " |
| 145 | "block %ld in inode %lu", block, inode->i_ino); | 146 | "block %ld in inode %lu", (long)block, |
| 147 | (unsigned long)inode->i_ino); | ||
| 146 | return -EPERM; | 148 | return -EPERM; |
| 147 | } | 149 | } |
| 148 | 150 | ||
| 149 | res = befs_fblock2brun(sb, ds, block, &run); | 151 | res = befs_fblock2brun(sb, ds, block, &run); |
| 150 | if (res != BEFS_OK) { | 152 | if (res != BEFS_OK) { |
| 151 | befs_error(sb, | 153 | befs_error(sb, |
| 152 | "<--- befs_get_block() for inode %lu, block " | 154 | "<--- %s for inode %lu, block %ld ERROR", |
| 153 | "%ld ERROR", inode->i_ino, block); | 155 | __func__, (unsigned long)inode->i_ino, |
| 156 | (long)block); | ||
| 154 | return -EFBIG; | 157 | return -EFBIG; |
| 155 | } | 158 | } |
| 156 | 159 | ||
| @@ -158,8 +161,9 @@ befs_get_block(struct inode *inode, sector_t block, | |||
| 158 | 161 | ||
| 159 | map_bh(bh_result, inode->i_sb, disk_off); | 162 | map_bh(bh_result, inode->i_sb, disk_off); |
| 160 | 163 | ||
| 161 | befs_debug(sb, "<--- befs_get_block() for inode %lu, block %ld, " | 164 | befs_debug(sb, "<--- %s for inode %lu, block %ld, disk address %lu", |
| 162 | "disk address %lu", inode->i_ino, block, disk_off); | 165 | __func__, (unsigned long)inode->i_ino, (long)block, |
| 166 | (unsigned long)disk_off); | ||
| 163 | 167 | ||
| 164 | return 0; | 168 | return 0; |
| 165 | } | 169 | } |
| @@ -176,15 +180,15 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 176 | char *utfname; | 180 | char *utfname; |
| 177 | const char *name = dentry->d_name.name; | 181 | const char *name = dentry->d_name.name; |
| 178 | 182 | ||
| 179 | befs_debug(sb, "---> befs_lookup() " | 183 | befs_debug(sb, "---> %s name %s inode %ld", __func__, |
| 180 | "name %s inode %ld", dentry->d_name.name, dir->i_ino); | 184 | dentry->d_name.name, dir->i_ino); |
| 181 | 185 | ||
| 182 | /* Convert to UTF-8 */ | 186 | /* Convert to UTF-8 */ |
| 183 | if (BEFS_SB(sb)->nls) { | 187 | if (BEFS_SB(sb)->nls) { |
| 184 | ret = | 188 | ret = |
| 185 | befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen); | 189 | befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen); |
| 186 | if (ret < 0) { | 190 | if (ret < 0) { |
| 187 | befs_debug(sb, "<--- befs_lookup() ERROR"); | 191 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 188 | return ERR_PTR(ret); | 192 | return ERR_PTR(ret); |
| 189 | } | 193 | } |
| 190 | ret = befs_btree_find(sb, ds, utfname, &offset); | 194 | ret = befs_btree_find(sb, ds, utfname, &offset); |
| @@ -195,12 +199,12 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 195 | } | 199 | } |
| 196 | 200 | ||
| 197 | if (ret == BEFS_BT_NOT_FOUND) { | 201 | if (ret == BEFS_BT_NOT_FOUND) { |
| 198 | befs_debug(sb, "<--- befs_lookup() %s not found", | 202 | befs_debug(sb, "<--- %s %s not found", __func__, |
| 199 | dentry->d_name.name); | 203 | dentry->d_name.name); |
| 200 | return ERR_PTR(-ENOENT); | 204 | return ERR_PTR(-ENOENT); |
| 201 | 205 | ||
| 202 | } else if (ret != BEFS_OK || offset == 0) { | 206 | } else if (ret != BEFS_OK || offset == 0) { |
| 203 | befs_warning(sb, "<--- befs_lookup() Error"); | 207 | befs_warning(sb, "<--- %s Error", __func__); |
| 204 | return ERR_PTR(-ENODATA); | 208 | return ERR_PTR(-ENODATA); |
| 205 | } | 209 | } |
| 206 | 210 | ||
| @@ -210,7 +214,7 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 210 | 214 | ||
| 211 | d_add(dentry, inode); | 215 | d_add(dentry, inode); |
| 212 | 216 | ||
| 213 | befs_debug(sb, "<--- befs_lookup()"); | 217 | befs_debug(sb, "<--- %s", __func__); |
| 214 | 218 | ||
| 215 | return NULL; | 219 | return NULL; |
| 216 | } | 220 | } |
| @@ -228,26 +232,25 @@ befs_readdir(struct file *file, struct dir_context *ctx) | |||
| 228 | char keybuf[BEFS_NAME_LEN + 1]; | 232 | char keybuf[BEFS_NAME_LEN + 1]; |
| 229 | const char *dirname = file->f_path.dentry->d_name.name; | 233 | const char *dirname = file->f_path.dentry->d_name.name; |
| 230 | 234 | ||
| 231 | befs_debug(sb, "---> befs_readdir() " | 235 | befs_debug(sb, "---> %s name %s, inode %ld, ctx->pos %lld", |
| 232 | "name %s, inode %ld, ctx->pos %Ld", | 236 | __func__, dirname, inode->i_ino, ctx->pos); |
| 233 | dirname, inode->i_ino, ctx->pos); | ||
| 234 | 237 | ||
| 235 | more: | 238 | more: |
| 236 | result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1, | 239 | result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1, |
| 237 | keybuf, &keysize, &value); | 240 | keybuf, &keysize, &value); |
| 238 | 241 | ||
| 239 | if (result == BEFS_ERR) { | 242 | if (result == BEFS_ERR) { |
| 240 | befs_debug(sb, "<--- befs_readdir() ERROR"); | 243 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 241 | befs_error(sb, "IO error reading %s (inode %lu)", | 244 | befs_error(sb, "IO error reading %s (inode %lu)", |
| 242 | dirname, inode->i_ino); | 245 | dirname, inode->i_ino); |
| 243 | return -EIO; | 246 | return -EIO; |
| 244 | 247 | ||
| 245 | } else if (result == BEFS_BT_END) { | 248 | } else if (result == BEFS_BT_END) { |
| 246 | befs_debug(sb, "<--- befs_readdir() END"); | 249 | befs_debug(sb, "<--- %s END", __func__); |
| 247 | return 0; | 250 | return 0; |
| 248 | 251 | ||
| 249 | } else if (result == BEFS_BT_EMPTY) { | 252 | } else if (result == BEFS_BT_EMPTY) { |
| 250 | befs_debug(sb, "<--- befs_readdir() Empty directory"); | 253 | befs_debug(sb, "<--- %s Empty directory", __func__); |
| 251 | return 0; | 254 | return 0; |
| 252 | } | 255 | } |
| 253 | 256 | ||
| @@ -260,7 +263,7 @@ more: | |||
| 260 | result = | 263 | result = |
| 261 | befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); | 264 | befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); |
| 262 | if (result < 0) { | 265 | if (result < 0) { |
| 263 | befs_debug(sb, "<--- befs_readdir() ERROR"); | 266 | befs_debug(sb, "<--- %s ERROR", __func__); |
| 264 | return result; | 267 | return result; |
| 265 | } | 268 | } |
| 266 | if (!dir_emit(ctx, nlsname, nlsnamelen, | 269 | if (!dir_emit(ctx, nlsname, nlsnamelen, |
| @@ -277,7 +280,7 @@ more: | |||
| 277 | ctx->pos++; | 280 | ctx->pos++; |
| 278 | goto more; | 281 | goto more; |
| 279 | 282 | ||
| 280 | befs_debug(sb, "<--- befs_readdir() pos %Ld", ctx->pos); | 283 | befs_debug(sb, "<--- %s pos %lld", __func__, ctx->pos); |
| 281 | 284 | ||
| 282 | return 0; | 285 | return 0; |
| 283 | } | 286 | } |
| @@ -321,7 +324,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) | |||
| 321 | struct inode *inode; | 324 | struct inode *inode; |
| 322 | long ret = -EIO; | 325 | long ret = -EIO; |
| 323 | 326 | ||
| 324 | befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino); | 327 | befs_debug(sb, "---> %s inode = %lu", __func__, ino); |
| 325 | 328 | ||
| 326 | inode = iget_locked(sb, ino); | 329 | inode = iget_locked(sb, ino); |
| 327 | if (!inode) | 330 | if (!inode) |
| @@ -428,7 +431,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) | |||
| 428 | } | 431 | } |
| 429 | 432 | ||
| 430 | brelse(bh); | 433 | brelse(bh); |
| 431 | befs_debug(sb, "<--- befs_read_inode()"); | 434 | befs_debug(sb, "<--- %s", __func__); |
| 432 | unlock_new_inode(inode); | 435 | unlock_new_inode(inode); |
| 433 | return inode; | 436 | return inode; |
| 434 | 437 | ||
| @@ -437,7 +440,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) | |||
| 437 | 440 | ||
| 438 | unacquire_none: | 441 | unacquire_none: |
| 439 | iget_failed(inode); | 442 | iget_failed(inode); |
| 440 | befs_debug(sb, "<--- befs_read_inode() - Bad inode"); | 443 | befs_debug(sb, "<--- %s - Bad inode", __func__); |
| 441 | return ERR_PTR(ret); | 444 | return ERR_PTR(ret); |
| 442 | } | 445 | } |
| 443 | 446 | ||
| @@ -445,7 +448,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) | |||
| 445 | * | 448 | * |
| 446 | * Taken from NFS implementation by Al Viro. | 449 | * Taken from NFS implementation by Al Viro. |
| 447 | */ | 450 | */ |
| 448 | static int | 451 | static int __init |
| 449 | befs_init_inodecache(void) | 452 | befs_init_inodecache(void) |
| 450 | { | 453 | { |
| 451 | befs_inode_cachep = kmem_cache_create("befs_inode_cache", | 454 | befs_inode_cachep = kmem_cache_create("befs_inode_cache", |
| @@ -454,11 +457,9 @@ befs_init_inodecache(void) | |||
| 454 | SLAB_MEM_SPREAD), | 457 | SLAB_MEM_SPREAD), |
| 455 | init_once); | 458 | init_once); |
| 456 | if (befs_inode_cachep == NULL) { | 459 | if (befs_inode_cachep == NULL) { |
| 457 | printk(KERN_ERR "befs_init_inodecache: " | 460 | pr_err("%s: Couldn't initialize inode slabcache\n", __func__); |
| 458 | "Couldn't initialize inode slabcache\n"); | ||
| 459 | return -ENOMEM; | 461 | return -ENOMEM; |
| 460 | } | 462 | } |
| 461 | |||
| 462 | return 0; | 463 | return 0; |
| 463 | } | 464 | } |
| 464 | 465 | ||
| @@ -544,16 +545,16 @@ befs_utf2nls(struct super_block *sb, const char *in, | |||
| 544 | */ | 545 | */ |
| 545 | int maxlen = in_len + 1; | 546 | int maxlen = in_len + 1; |
| 546 | 547 | ||
| 547 | befs_debug(sb, "---> utf2nls()"); | 548 | befs_debug(sb, "---> %s", __func__); |
| 548 | 549 | ||
| 549 | if (!nls) { | 550 | if (!nls) { |
| 550 | befs_error(sb, "befs_utf2nls called with no NLS table loaded"); | 551 | befs_error(sb, "%s called with no NLS table loaded", __func__); |
| 551 | return -EINVAL; | 552 | return -EINVAL; |
| 552 | } | 553 | } |
| 553 | 554 | ||
| 554 | *out = result = kmalloc(maxlen, GFP_NOFS); | 555 | *out = result = kmalloc(maxlen, GFP_NOFS); |
| 555 | if (!*out) { | 556 | if (!*out) { |
| 556 | befs_error(sb, "befs_utf2nls() cannot allocate memory"); | 557 | befs_error(sb, "%s cannot allocate memory", __func__); |
| 557 | *out_len = 0; | 558 | *out_len = 0; |
| 558 | return -ENOMEM; | 559 | return -ENOMEM; |
| 559 | } | 560 | } |
| @@ -575,14 +576,14 @@ befs_utf2nls(struct super_block *sb, const char *in, | |||
| 575 | result[o] = '\0'; | 576 | result[o] = '\0'; |
| 576 | *out_len = o; | 577 | *out_len = o; |
| 577 | 578 | ||
| 578 | befs_debug(sb, "<--- utf2nls()"); | 579 | befs_debug(sb, "<--- %s", __func__); |
| 579 | 580 | ||
| 580 | return o; | 581 | return o; |
| 581 | 582 | ||
| 582 | conv_err: | 583 | conv_err: |
| 583 | befs_error(sb, "Name using character set %s contains a character that " | 584 | befs_error(sb, "Name using character set %s contains a character that " |
| 584 | "cannot be converted to unicode.", nls->charset); | 585 | "cannot be converted to unicode.", nls->charset); |
| 585 | befs_debug(sb, "<--- utf2nls()"); | 586 | befs_debug(sb, "<--- %s", __func__); |
| 586 | kfree(result); | 587 | kfree(result); |
| 587 | return -EILSEQ; | 588 | return -EILSEQ; |
| 588 | } | 589 | } |
| @@ -623,16 +624,17 @@ befs_nls2utf(struct super_block *sb, const char *in, | |||
| 623 | * in special cases */ | 624 | * in special cases */ |
| 624 | int maxlen = (3 * in_len) + 1; | 625 | int maxlen = (3 * in_len) + 1; |
| 625 | 626 | ||
| 626 | befs_debug(sb, "---> nls2utf()\n"); | 627 | befs_debug(sb, "---> %s\n", __func__); |
| 627 | 628 | ||
| 628 | if (!nls) { | 629 | if (!nls) { |
| 629 | befs_error(sb, "befs_nls2utf called with no NLS table loaded."); | 630 | befs_error(sb, "%s called with no NLS table loaded.", |
| 631 | __func__); | ||
| 630 | return -EINVAL; | 632 | return -EINVAL; |
| 631 | } | 633 | } |
| 632 | 634 | ||
| 633 | *out = result = kmalloc(maxlen, GFP_NOFS); | 635 | *out = result = kmalloc(maxlen, GFP_NOFS); |
| 634 | if (!*out) { | 636 | if (!*out) { |
| 635 | befs_error(sb, "befs_nls2utf() cannot allocate memory"); | 637 | befs_error(sb, "%s cannot allocate memory", __func__); |
| 636 | *out_len = 0; | 638 | *out_len = 0; |
| 637 | return -ENOMEM; | 639 | return -ENOMEM; |
| 638 | } | 640 | } |
| @@ -653,14 +655,14 @@ befs_nls2utf(struct super_block *sb, const char *in, | |||
| 653 | result[o] = '\0'; | 655 | result[o] = '\0'; |
| 654 | *out_len = o; | 656 | *out_len = o; |
| 655 | 657 | ||
| 656 | befs_debug(sb, "<--- nls2utf()"); | 658 | befs_debug(sb, "<--- %s", __func__); |
| 657 | 659 | ||
| 658 | return i; | 660 | return i; |
| 659 | 661 | ||
| 660 | conv_err: | 662 | conv_err: |
| 661 | befs_error(sb, "Name using charecter set %s contains a charecter that " | 663 | befs_error(sb, "Name using charecter set %s contains a charecter that " |
| 662 | "cannot be converted to unicode.", nls->charset); | 664 | "cannot be converted to unicode.", nls->charset); |
| 663 | befs_debug(sb, "<--- nls2utf()"); | 665 | befs_debug(sb, "<--- %s", __func__); |
| 664 | kfree(result); | 666 | kfree(result); |
| 665 | return -EILSEQ; | 667 | return -EILSEQ; |
| 666 | } | 668 | } |
| @@ -715,8 +717,8 @@ parse_options(char *options, befs_mount_options * opts) | |||
| 715 | if (option >= 0) | 717 | if (option >= 0) |
| 716 | uid = make_kuid(current_user_ns(), option); | 718 | uid = make_kuid(current_user_ns(), option); |
| 717 | if (!uid_valid(uid)) { | 719 | if (!uid_valid(uid)) { |
| 718 | printk(KERN_ERR "BeFS: Invalid uid %d, " | 720 | pr_err("Invalid uid %d, " |
| 719 | "using default\n", option); | 721 | "using default\n", option); |
| 720 | break; | 722 | break; |
| 721 | } | 723 | } |
| 722 | opts->uid = uid; | 724 | opts->uid = uid; |
| @@ -729,8 +731,8 @@ parse_options(char *options, befs_mount_options * opts) | |||
| 729 | if (option >= 0) | 731 | if (option >= 0) |
| 730 | gid = make_kgid(current_user_ns(), option); | 732 | gid = make_kgid(current_user_ns(), option); |
| 731 | if (!gid_valid(gid)) { | 733 | if (!gid_valid(gid)) { |
| 732 | printk(KERN_ERR "BeFS: Invalid gid %d, " | 734 | pr_err("Invalid gid %d, " |
| 733 | "using default\n", option); | 735 | "using default\n", option); |
| 734 | break; | 736 | break; |
| 735 | } | 737 | } |
| 736 | opts->gid = gid; | 738 | opts->gid = gid; |
| @@ -740,8 +742,8 @@ parse_options(char *options, befs_mount_options * opts) | |||
| 740 | kfree(opts->iocharset); | 742 | kfree(opts->iocharset); |
| 741 | opts->iocharset = match_strdup(&args[0]); | 743 | opts->iocharset = match_strdup(&args[0]); |
| 742 | if (!opts->iocharset) { | 744 | if (!opts->iocharset) { |
| 743 | printk(KERN_ERR "BeFS: allocation failure for " | 745 | pr_err("allocation failure for " |
| 744 | "iocharset string\n"); | 746 | "iocharset string\n"); |
| 745 | return 0; | 747 | return 0; |
| 746 | } | 748 | } |
| 747 | break; | 749 | break; |
| @@ -749,8 +751,8 @@ parse_options(char *options, befs_mount_options * opts) | |||
| 749 | opts->debug = 1; | 751 | opts->debug = 1; |
| 750 | break; | 752 | break; |
| 751 | default: | 753 | default: |
| 752 | printk(KERN_ERR "BeFS: Unrecognized mount option \"%s\" " | 754 | pr_err("Unrecognized mount option \"%s\" " |
| 753 | "or missing value\n", p); | 755 | "or missing value\n", p); |
| 754 | return 0; | 756 | return 0; |
| 755 | } | 757 | } |
| 756 | } | 758 | } |
| @@ -791,22 +793,20 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 791 | 793 | ||
| 792 | save_mount_options(sb, data); | 794 | save_mount_options(sb, data); |
| 793 | 795 | ||
| 794 | sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); | 796 | sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL); |
| 795 | if (sb->s_fs_info == NULL) { | 797 | if (sb->s_fs_info == NULL) { |
| 796 | printk(KERN_ERR | 798 | pr_err("(%s): Unable to allocate memory for private " |
| 797 | "BeFS(%s): Unable to allocate memory for private " | ||
| 798 | "portion of superblock. Bailing.\n", sb->s_id); | 799 | "portion of superblock. Bailing.\n", sb->s_id); |
| 799 | goto unacquire_none; | 800 | goto unacquire_none; |
| 800 | } | 801 | } |
| 801 | befs_sb = BEFS_SB(sb); | 802 | befs_sb = BEFS_SB(sb); |
| 802 | memset(befs_sb, 0, sizeof(befs_sb_info)); | ||
| 803 | 803 | ||
| 804 | if (!parse_options((char *) data, &befs_sb->mount_opts)) { | 804 | if (!parse_options((char *) data, &befs_sb->mount_opts)) { |
| 805 | befs_error(sb, "cannot parse mount options"); | 805 | befs_error(sb, "cannot parse mount options"); |
| 806 | goto unacquire_priv_sbp; | 806 | goto unacquire_priv_sbp; |
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | befs_debug(sb, "---> befs_fill_super()"); | 809 | befs_debug(sb, "---> %s", __func__); |
| 810 | 810 | ||
| 811 | #ifndef CONFIG_BEFS_RW | 811 | #ifndef CONFIG_BEFS_RW |
| 812 | if (!(sb->s_flags & MS_RDONLY)) { | 812 | if (!(sb->s_flags & MS_RDONLY)) { |
| @@ -854,7 +854,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 854 | goto unacquire_priv_sbp; | 854 | goto unacquire_priv_sbp; |
| 855 | 855 | ||
| 856 | if( befs_sb->num_blocks > ~((sector_t)0) ) { | 856 | if( befs_sb->num_blocks > ~((sector_t)0) ) { |
| 857 | befs_error(sb, "blocks count: %Lu " | 857 | befs_error(sb, "blocks count: %llu " |
| 858 | "is larger than the host can use", | 858 | "is larger than the host can use", |
| 859 | befs_sb->num_blocks); | 859 | befs_sb->num_blocks); |
| 860 | goto unacquire_priv_sbp; | 860 | goto unacquire_priv_sbp; |
| @@ -924,7 +924,7 @@ befs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 924 | struct super_block *sb = dentry->d_sb; | 924 | struct super_block *sb = dentry->d_sb; |
| 925 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | 925 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); |
| 926 | 926 | ||
| 927 | befs_debug(sb, "---> befs_statfs()"); | 927 | befs_debug(sb, "---> %s", __func__); |
| 928 | 928 | ||
| 929 | buf->f_type = BEFS_SUPER_MAGIC; | 929 | buf->f_type = BEFS_SUPER_MAGIC; |
| 930 | buf->f_bsize = sb->s_blocksize; | 930 | buf->f_bsize = sb->s_blocksize; |
| @@ -937,7 +937,7 @@ befs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 937 | buf->f_fsid.val[1] = (u32)(id >> 32); | 937 | buf->f_fsid.val[1] = (u32)(id >> 32); |
| 938 | buf->f_namelen = BEFS_NAME_LEN; | 938 | buf->f_namelen = BEFS_NAME_LEN; |
| 939 | 939 | ||
| 940 | befs_debug(sb, "<--- befs_statfs()"); | 940 | befs_debug(sb, "<--- %s", __func__); |
| 941 | 941 | ||
| 942 | return 0; | 942 | return 0; |
| 943 | } | 943 | } |
| @@ -963,7 +963,7 @@ init_befs_fs(void) | |||
| 963 | { | 963 | { |
| 964 | int err; | 964 | int err; |
| 965 | 965 | ||
| 966 | printk(KERN_INFO "BeFS version: %s\n", BEFS_VERSION); | 966 | pr_info("version: %s\n", BEFS_VERSION); |
| 967 | 967 | ||
| 968 | err = befs_init_inodecache(); | 968 | err = befs_init_inodecache(); |
| 969 | if (err) | 969 | if (err) |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 8defc6b3f9a2..29aa5cf6639b 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
| @@ -172,7 +172,7 @@ static void bfs_evict_inode(struct inode *inode) | |||
| 172 | 172 | ||
| 173 | dprintf("ino=%08lx\n", ino); | 173 | dprintf("ino=%08lx\n", ino); |
| 174 | 174 | ||
| 175 | truncate_inode_pages(&inode->i_data, 0); | 175 | truncate_inode_pages_final(&inode->i_data); |
| 176 | invalidate_inode_buffers(inode); | 176 | invalidate_inode_buffers(inode); |
| 177 | clear_inode(inode); | 177 | clear_inode(inode); |
| 178 | 178 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 67be2951b98a..0f59799fa105 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -46,10 +46,15 @@ | |||
| 46 | #endif | 46 | #endif |
| 47 | 47 | ||
| 48 | static int load_elf_binary(struct linux_binprm *bprm); | 48 | static int load_elf_binary(struct linux_binprm *bprm); |
| 49 | static int load_elf_library(struct file *); | ||
| 50 | static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, | 49 | static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, |
| 51 | int, int, unsigned long); | 50 | int, int, unsigned long); |
| 52 | 51 | ||
| 52 | #ifdef CONFIG_USELIB | ||
| 53 | static int load_elf_library(struct file *); | ||
| 54 | #else | ||
| 55 | #define load_elf_library NULL | ||
| 56 | #endif | ||
| 57 | |||
| 53 | /* | 58 | /* |
| 54 | * If we don't support core dumping, then supply a NULL so we | 59 | * If we don't support core dumping, then supply a NULL so we |
| 55 | * don't even try. | 60 | * don't even try. |
| @@ -1005,6 +1010,7 @@ out_free_ph: | |||
| 1005 | goto out; | 1010 | goto out; |
| 1006 | } | 1011 | } |
| 1007 | 1012 | ||
| 1013 | #ifdef CONFIG_USELIB | ||
| 1008 | /* This is really simpleminded and specialized - we are loading an | 1014 | /* This is really simpleminded and specialized - we are loading an |
| 1009 | a.out library that is given an ELF header. */ | 1015 | a.out library that is given an ELF header. */ |
| 1010 | static int load_elf_library(struct file *file) | 1016 | static int load_elf_library(struct file *file) |
| @@ -1083,6 +1089,7 @@ out_free_ph: | |||
| 1083 | out: | 1089 | out: |
| 1084 | return error; | 1090 | return error; |
| 1085 | } | 1091 | } |
| 1092 | #endif /* #ifdef CONFIG_USELIB */ | ||
| 1086 | 1093 | ||
| 1087 | #ifdef CONFIG_ELF_CORE | 1094 | #ifdef CONFIG_ELF_CORE |
| 1088 | /* | 1095 | /* |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 1c740e152f38..b60500300dd7 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
| @@ -656,6 +656,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer, | |||
| 656 | 656 | ||
| 657 | mutex_unlock(&root->d_inode->i_mutex); | 657 | mutex_unlock(&root->d_inode->i_mutex); |
| 658 | dput(root); | 658 | dput(root); |
| 659 | break; | ||
| 659 | default: return res; | 660 | default: return res; |
| 660 | } | 661 | } |
| 661 | return count; | 662 | return count; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 1e86823a9cbd..ba0d2b05bb78 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -83,7 +83,7 @@ void kill_bdev(struct block_device *bdev) | |||
| 83 | { | 83 | { |
| 84 | struct address_space *mapping = bdev->bd_inode->i_mapping; | 84 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
| 85 | 85 | ||
| 86 | if (mapping->nrpages == 0) | 86 | if (mapping->nrpages == 0 && mapping->nrshadows == 0) |
| 87 | return; | 87 | return; |
| 88 | 88 | ||
| 89 | invalidate_bh_lrus(); | 89 | invalidate_bh_lrus(); |
| @@ -419,7 +419,7 @@ static void bdev_evict_inode(struct inode *inode) | |||
| 419 | { | 419 | { |
| 420 | struct block_device *bdev = &BDEV_I(inode)->bdev; | 420 | struct block_device *bdev = &BDEV_I(inode)->bdev; |
| 421 | struct list_head *p; | 421 | struct list_head *p; |
| 422 | truncate_inode_pages(&inode->i_data, 0); | 422 | truncate_inode_pages_final(&inode->i_data); |
| 423 | invalidate_inode_buffers(inode); /* is it needed here? */ | 423 | invalidate_inode_buffers(inode); /* is it needed here? */ |
| 424 | clear_inode(inode); | 424 | clear_inode(inode); |
| 425 | spin_lock(&bdev_lock); | 425 | spin_lock(&bdev_lock); |
| @@ -1523,7 +1523,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 1523 | ssize_t err; | 1523 | ssize_t err; |
| 1524 | 1524 | ||
| 1525 | err = generic_write_sync(file, pos, ret); | 1525 | err = generic_write_sync(file, pos, ret); |
| 1526 | if (err < 0 && ret > 0) | 1526 | if (err < 0) |
| 1527 | ret = err; | 1527 | ret = err; |
| 1528 | } | 1528 | } |
| 1529 | blk_finish_plug(&plug); | 1529 | blk_finish_plug(&plug); |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b01fb6c527e3..d43c544d3b68 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
| @@ -472,7 +472,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
| 472 | rcu_read_lock(); | 472 | rcu_read_lock(); |
| 473 | page = radix_tree_lookup(&mapping->page_tree, pg_index); | 473 | page = radix_tree_lookup(&mapping->page_tree, pg_index); |
| 474 | rcu_read_unlock(); | 474 | rcu_read_unlock(); |
| 475 | if (page) { | 475 | if (page && !radix_tree_exceptional_entry(page)) { |
| 476 | misses++; | 476 | misses++; |
| 477 | if (misses > 4) | 477 | if (misses > 4) |
| 478 | break; | 478 | break; |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0165b8672f09..7331a230e30b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1797,7 +1797,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
| 1797 | BTRFS_I(inode)->last_sub_trans = root->log_transid; | 1797 | BTRFS_I(inode)->last_sub_trans = root->log_transid; |
| 1798 | if (num_written > 0) { | 1798 | if (num_written > 0) { |
| 1799 | err = generic_write_sync(file, pos, num_written); | 1799 | err = generic_write_sync(file, pos, num_written); |
| 1800 | if (err < 0 && num_written > 0) | 1800 | if (err < 0) |
| 1801 | num_written = err; | 1801 | num_written = err; |
| 1802 | } | 1802 | } |
| 1803 | 1803 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d3d44486290b..49ec1398879f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -4593,7 +4593,7 @@ static void evict_inode_truncate_pages(struct inode *inode) | |||
| 4593 | struct rb_node *node; | 4593 | struct rb_node *node; |
| 4594 | 4594 | ||
| 4595 | ASSERT(inode->i_state & I_FREEING); | 4595 | ASSERT(inode->i_state & I_FREEING); |
| 4596 | truncate_inode_pages(&inode->i_data, 0); | 4596 | truncate_inode_pages_final(&inode->i_data); |
| 4597 | 4597 | ||
| 4598 | write_lock(&map_tree->lock); | 4598 | write_lock(&map_tree->lock); |
| 4599 | while (!RB_EMPTY_ROOT(&map_tree->map)) { | 4599 | while (!RB_EMPTY_ROOT(&map_tree->map)) { |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index ebaff368120d..4b1fb5ca65b8 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
| @@ -265,24 +265,22 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |||
| 265 | goto nomem_monitor; | 265 | goto nomem_monitor; |
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | ret = add_to_page_cache(newpage, bmapping, | 268 | ret = add_to_page_cache_lru(newpage, bmapping, |
| 269 | netpage->index, cachefiles_gfp); | 269 | netpage->index, cachefiles_gfp); |
| 270 | if (ret == 0) | 270 | if (ret == 0) |
| 271 | goto installed_new_backing_page; | 271 | goto installed_new_backing_page; |
| 272 | if (ret != -EEXIST) | 272 | if (ret != -EEXIST) |
| 273 | goto nomem_page; | 273 | goto nomem_page; |
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | /* we've installed a new backing page, so now we need to add it | 276 | /* we've installed a new backing page, so now we need to start |
| 277 | * to the LRU list and start it reading */ | 277 | * it reading */ |
| 278 | installed_new_backing_page: | 278 | installed_new_backing_page: |
| 279 | _debug("- new %p", newpage); | 279 | _debug("- new %p", newpage); |
| 280 | 280 | ||
| 281 | backpage = newpage; | 281 | backpage = newpage; |
| 282 | newpage = NULL; | 282 | newpage = NULL; |
| 283 | 283 | ||
| 284 | lru_cache_add_file(backpage); | ||
| 285 | |||
| 286 | read_backing_page: | 284 | read_backing_page: |
| 287 | ret = bmapping->a_ops->readpage(NULL, backpage); | 285 | ret = bmapping->a_ops->readpage(NULL, backpage); |
| 288 | if (ret < 0) | 286 | if (ret < 0) |
| @@ -510,24 +508,23 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
| 510 | goto nomem; | 508 | goto nomem; |
| 511 | } | 509 | } |
| 512 | 510 | ||
| 513 | ret = add_to_page_cache(newpage, bmapping, | 511 | ret = add_to_page_cache_lru(newpage, bmapping, |
| 514 | netpage->index, cachefiles_gfp); | 512 | netpage->index, |
| 513 | cachefiles_gfp); | ||
| 515 | if (ret == 0) | 514 | if (ret == 0) |
| 516 | goto installed_new_backing_page; | 515 | goto installed_new_backing_page; |
| 517 | if (ret != -EEXIST) | 516 | if (ret != -EEXIST) |
| 518 | goto nomem; | 517 | goto nomem; |
| 519 | } | 518 | } |
| 520 | 519 | ||
| 521 | /* we've installed a new backing page, so now we need to add it | 520 | /* we've installed a new backing page, so now we need |
| 522 | * to the LRU list and start it reading */ | 521 | * to start it reading */ |
| 523 | installed_new_backing_page: | 522 | installed_new_backing_page: |
| 524 | _debug("- new %p", newpage); | 523 | _debug("- new %p", newpage); |
| 525 | 524 | ||
| 526 | backpage = newpage; | 525 | backpage = newpage; |
| 527 | newpage = NULL; | 526 | newpage = NULL; |
| 528 | 527 | ||
| 529 | lru_cache_add_file(backpage); | ||
| 530 | |||
| 531 | reread_backing_page: | 528 | reread_backing_page: |
| 532 | ret = bmapping->a_ops->readpage(NULL, backpage); | 529 | ret = bmapping->a_ops->readpage(NULL, backpage); |
| 533 | if (ret < 0) | 530 | if (ret < 0) |
| @@ -538,8 +535,8 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
| 538 | monitor_backing_page: | 535 | monitor_backing_page: |
| 539 | _debug("- monitor add"); | 536 | _debug("- monitor add"); |
| 540 | 537 | ||
| 541 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | 538 | ret = add_to_page_cache_lru(netpage, op->mapping, |
| 542 | cachefiles_gfp); | 539 | netpage->index, cachefiles_gfp); |
| 543 | if (ret < 0) { | 540 | if (ret < 0) { |
| 544 | if (ret == -EEXIST) { | 541 | if (ret == -EEXIST) { |
| 545 | page_cache_release(netpage); | 542 | page_cache_release(netpage); |
| @@ -549,8 +546,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
| 549 | goto nomem; | 546 | goto nomem; |
| 550 | } | 547 | } |
| 551 | 548 | ||
| 552 | lru_cache_add_file(netpage); | ||
| 553 | |||
| 554 | /* install a monitor */ | 549 | /* install a monitor */ |
| 555 | page_cache_get(netpage); | 550 | page_cache_get(netpage); |
| 556 | monitor->netfs_page = netpage; | 551 | monitor->netfs_page = netpage; |
| @@ -613,8 +608,8 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
| 613 | backing_page_already_uptodate: | 608 | backing_page_already_uptodate: |
| 614 | _debug("- uptodate"); | 609 | _debug("- uptodate"); |
| 615 | 610 | ||
| 616 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | 611 | ret = add_to_page_cache_lru(netpage, op->mapping, |
| 617 | cachefiles_gfp); | 612 | netpage->index, cachefiles_gfp); |
| 618 | if (ret < 0) { | 613 | if (ret < 0) { |
| 619 | if (ret == -EEXIST) { | 614 | if (ret == -EEXIST) { |
| 620 | page_cache_release(netpage); | 615 | page_cache_release(netpage); |
| @@ -631,8 +626,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
| 631 | 626 | ||
| 632 | fscache_mark_page_cached(op, netpage); | 627 | fscache_mark_page_cached(op, netpage); |
| 633 | 628 | ||
| 634 | lru_cache_add_file(netpage); | ||
| 635 | |||
| 636 | /* the netpage is unlocked and marked up to date here */ | 629 | /* the netpage is unlocked and marked up to date here */ |
| 637 | fscache_end_io(op, netpage, 0); | 630 | fscache_end_io(op, netpage, 0); |
| 638 | page_cache_release(netpage); | 631 | page_cache_release(netpage); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 849f6132b327..ab8ad2546c3e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -286,7 +286,7 @@ cifs_destroy_inode(struct inode *inode) | |||
| 286 | static void | 286 | static void |
| 287 | cifs_evict_inode(struct inode *inode) | 287 | cifs_evict_inode(struct inode *inode) |
| 288 | { | 288 | { |
| 289 | truncate_inode_pages(&inode->i_data, 0); | 289 | truncate_inode_pages_final(&inode->i_data); |
| 290 | clear_inode(inode); | 290 | clear_inode(inode); |
| 291 | cifs_fscache_release_inode_cookie(inode); | 291 | cifs_fscache_release_inode_cookie(inode); |
| 292 | } | 292 | } |
| @@ -1005,7 +1005,7 @@ cifs_init_once(void *inode) | |||
| 1005 | init_rwsem(&cifsi->lock_sem); | 1005 | init_rwsem(&cifsi->lock_sem); |
| 1006 | } | 1006 | } |
| 1007 | 1007 | ||
| 1008 | static int | 1008 | static int __init |
| 1009 | cifs_init_inodecache(void) | 1009 | cifs_init_inodecache(void) |
| 1010 | { | 1010 | { |
| 1011 | cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", | 1011 | cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", |
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h index b7143cf783ac..381c993b1427 100644 --- a/fs/coda/coda_int.h +++ b/fs/coda/coda_int.h | |||
| @@ -10,7 +10,7 @@ extern int coda_hard; | |||
| 10 | extern int coda_fake_statfs; | 10 | extern int coda_fake_statfs; |
| 11 | 11 | ||
| 12 | void coda_destroy_inodecache(void); | 12 | void coda_destroy_inodecache(void); |
| 13 | int coda_init_inodecache(void); | 13 | int __init coda_init_inodecache(void); |
| 14 | int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); | 14 | int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); |
| 15 | void coda_sysctl_init(void); | 15 | void coda_sysctl_init(void); |
| 16 | void coda_sysctl_clean(void); | 16 | void coda_sysctl_clean(void); |
diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 506de34a4ef3..626abc02b694 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c | |||
| @@ -73,7 +73,7 @@ static void init_once(void *foo) | |||
| 73 | inode_init_once(&ei->vfs_inode); | 73 | inode_init_once(&ei->vfs_inode); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | int coda_init_inodecache(void) | 76 | int __init coda_init_inodecache(void) |
| 77 | { | 77 | { |
| 78 | coda_inode_cachep = kmem_cache_create("coda_inode_cache", | 78 | coda_inode_cachep = kmem_cache_create("coda_inode_cache", |
| 79 | sizeof(struct coda_inode_info), | 79 | sizeof(struct coda_inode_info), |
| @@ -250,7 +250,7 @@ static void coda_put_super(struct super_block *sb) | |||
| 250 | 250 | ||
| 251 | static void coda_evict_inode(struct inode *inode) | 251 | static void coda_evict_inode(struct inode *inode) |
| 252 | { | 252 | { |
| 253 | truncate_inode_pages(&inode->i_data, 0); | 253 | truncate_inode_pages_final(&inode->i_data); |
| 254 | clear_inode(inode); | 254 | clear_inode(inode); |
| 255 | coda_cache_clear_inode(inode); | 255 | coda_cache_clear_inode(inode); |
| 256 | } | 256 | } |
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 06610cf94d57..a1f801c14fbc 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c | |||
| @@ -195,8 +195,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i | |||
| 195 | struct page *page = NULL; | 195 | struct page *page = NULL; |
| 196 | 196 | ||
| 197 | if (blocknr + i < devsize) { | 197 | if (blocknr + i < devsize) { |
| 198 | page = read_mapping_page_async(mapping, blocknr + i, | 198 | page = read_mapping_page(mapping, blocknr + i, NULL); |
| 199 | NULL); | ||
| 200 | /* synchronous error? */ | 199 | /* synchronous error? */ |
| 201 | if (IS_ERR(page)) | 200 | if (IS_ERR(page)) |
| 202 | page = NULL; | 201 | page = NULL; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 160a5489a939..6e6bff375244 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -664,7 +664,6 @@ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, | |||
| 664 | goto out; | 664 | goto out; |
| 665 | sector = start_sector << (sdio->blkbits - 9); | 665 | sector = start_sector << (sdio->blkbits - 9); |
| 666 | nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); | 666 | nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev)); |
| 667 | nr_pages = min(nr_pages, BIO_MAX_PAGES); | ||
| 668 | BUG_ON(nr_pages <= 0); | 667 | BUG_ON(nr_pages <= 0); |
| 669 | dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); | 668 | dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); |
| 670 | sdio->boundary = 0; | 669 | sdio->boundary = 0; |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 9fd702f5bfb2..9280202e488c 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
| @@ -59,10 +59,22 @@ int drop_caches_sysctl_handler(ctl_table *table, int write, | |||
| 59 | if (ret) | 59 | if (ret) |
| 60 | return ret; | 60 | return ret; |
| 61 | if (write) { | 61 | if (write) { |
| 62 | if (sysctl_drop_caches & 1) | 62 | static int stfu; |
| 63 | |||
| 64 | if (sysctl_drop_caches & 1) { | ||
| 63 | iterate_supers(drop_pagecache_sb, NULL); | 65 | iterate_supers(drop_pagecache_sb, NULL); |
| 64 | if (sysctl_drop_caches & 2) | 66 | count_vm_event(DROP_PAGECACHE); |
| 67 | } | ||
| 68 | if (sysctl_drop_caches & 2) { | ||
| 65 | drop_slab(); | 69 | drop_slab(); |
| 70 | count_vm_event(DROP_SLAB); | ||
| 71 | } | ||
| 72 | if (!stfu) { | ||
| 73 | pr_info("%s (%d): drop_caches: %d\n", | ||
| 74 | current->comm, task_pid_nr(current), | ||
| 75 | sysctl_drop_caches); | ||
| 76 | } | ||
| 77 | stfu |= sysctl_drop_caches & 4; | ||
| 66 | } | 78 | } |
| 67 | return 0; | 79 | return 0; |
| 68 | } | 80 | } |
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index e879cf8ff0b1..afa1b81c3418 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
| @@ -132,7 +132,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 132 | */ | 132 | */ |
| 133 | static void ecryptfs_evict_inode(struct inode *inode) | 133 | static void ecryptfs_evict_inode(struct inode *inode) |
| 134 | { | 134 | { |
| 135 | truncate_inode_pages(&inode->i_data, 0); | 135 | truncate_inode_pages_final(&inode->i_data); |
| 136 | clear_inode(inode); | 136 | clear_inode(inode); |
| 137 | iput(ecryptfs_inode_to_lower(inode)); | 137 | iput(ecryptfs_inode_to_lower(inode)); |
| 138 | } | 138 | } |
diff --git a/fs/efs/super.c b/fs/efs/super.c index 50215bbd6463..f8def1acf08c 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c | |||
| @@ -91,7 +91,7 @@ static void init_once(void *foo) | |||
| 91 | inode_init_once(&ei->vfs_inode); | 91 | inode_init_once(&ei->vfs_inode); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | static int init_inodecache(void) | 94 | static int __init init_inodecache(void) |
| 95 | { | 95 | { |
| 96 | efs_inode_cachep = kmem_cache_create("efs_inode_cache", | 96 | efs_inode_cachep = kmem_cache_create("efs_inode_cache", |
| 97 | sizeof(struct efs_inode_info), | 97 | sizeof(struct efs_inode_info), |
| @@ -97,6 +97,7 @@ static inline void put_binfmt(struct linux_binfmt * fmt) | |||
| 97 | module_put(fmt->module); | 97 | module_put(fmt->module); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | #ifdef CONFIG_USELIB | ||
| 100 | /* | 101 | /* |
| 101 | * Note that a shared library must be both readable and executable due to | 102 | * Note that a shared library must be both readable and executable due to |
| 102 | * security reasons. | 103 | * security reasons. |
| @@ -156,6 +157,7 @@ exit: | |||
| 156 | out: | 157 | out: |
| 157 | return error; | 158 | return error; |
| 158 | } | 159 | } |
| 160 | #endif /* #ifdef CONFIG_USELIB */ | ||
| 159 | 161 | ||
| 160 | #ifdef CONFIG_MMU | 162 | #ifdef CONFIG_MMU |
| 161 | /* | 163 | /* |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index ee4317faccb1..d1c244d67667 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
| @@ -1486,7 +1486,7 @@ void exofs_evict_inode(struct inode *inode) | |||
| 1486 | struct ore_io_state *ios; | 1486 | struct ore_io_state *ios; |
| 1487 | int ret; | 1487 | int ret; |
| 1488 | 1488 | ||
| 1489 | truncate_inode_pages(&inode->i_data, 0); | 1489 | truncate_inode_pages_final(&inode->i_data); |
| 1490 | 1490 | ||
| 1491 | /* TODO: should do better here */ | 1491 | /* TODO: should do better here */ |
| 1492 | if (inode->i_nlink || is_bad_inode(inode)) | 1492 | if (inode->i_nlink || is_bad_inode(inode)) |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 94ed36849b71..b1d2a4675d42 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -78,7 +78,7 @@ void ext2_evict_inode(struct inode * inode) | |||
| 78 | dquot_drop(inode); | 78 | dquot_drop(inode); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | truncate_inode_pages(&inode->i_data, 0); | 81 | truncate_inode_pages_final(&inode->i_data); |
| 82 | 82 | ||
| 83 | if (want_delete) { | 83 | if (want_delete) { |
| 84 | sb_start_intwrite(inode->i_sb); | 84 | sb_start_intwrite(inode->i_sb); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 384b6ebb655f..efce2bbfb5e5 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -228,7 +228,7 @@ void ext3_evict_inode (struct inode *inode) | |||
| 228 | log_wait_commit(journal, commit_tid); | 228 | log_wait_commit(journal, commit_tid); |
| 229 | filemap_write_and_wait(&inode->i_data); | 229 | filemap_write_and_wait(&inode->i_data); |
| 230 | } | 230 | } |
| 231 | truncate_inode_pages(&inode->i_data, 0); | 231 | truncate_inode_pages_final(&inode->i_data); |
| 232 | 232 | ||
| 233 | ext3_discard_reservation(inode); | 233 | ext3_discard_reservation(inode); |
| 234 | rsv = ei->i_block_alloc_info; | 234 | rsv = ei->i_block_alloc_info; |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 1a5073959f32..6db7f7db7777 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -153,7 +153,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 153 | ssize_t err; | 153 | ssize_t err; |
| 154 | 154 | ||
| 155 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); | 155 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); |
| 156 | if (err < 0 && ret > 0) | 156 | if (err < 0) |
| 157 | ret = err; | 157 | ret = err; |
| 158 | } | 158 | } |
| 159 | blk_finish_plug(&plug); | 159 | blk_finish_plug(&plug); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 24bfd7ff3049..175c3f933816 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -215,7 +215,7 @@ void ext4_evict_inode(struct inode *inode) | |||
| 215 | jbd2_complete_transaction(journal, commit_tid); | 215 | jbd2_complete_transaction(journal, commit_tid); |
| 216 | filemap_write_and_wait(&inode->i_data); | 216 | filemap_write_and_wait(&inode->i_data); |
| 217 | } | 217 | } |
| 218 | truncate_inode_pages(&inode->i_data, 0); | 218 | truncate_inode_pages_final(&inode->i_data); |
| 219 | 219 | ||
| 220 | WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); | 220 | WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); |
| 221 | goto no_delete; | 221 | goto no_delete; |
| @@ -226,7 +226,7 @@ void ext4_evict_inode(struct inode *inode) | |||
| 226 | 226 | ||
| 227 | if (ext4_should_order_data(inode)) | 227 | if (ext4_should_order_data(inode)) |
| 228 | ext4_begin_ordered_truncate(inode, 0); | 228 | ext4_begin_ordered_truncate(inode, 0); |
| 229 | truncate_inode_pages(&inode->i_data, 0); | 229 | truncate_inode_pages_final(&inode->i_data); |
| 230 | 230 | ||
| 231 | WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); | 231 | WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); |
| 232 | if (is_bad_inode(inode)) | 232 | if (is_bad_inode(inode)) |
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 4d67ed736dca..28cea76d78c6 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c | |||
| @@ -260,7 +260,7 @@ void f2fs_evict_inode(struct inode *inode) | |||
| 260 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 260 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
| 261 | 261 | ||
| 262 | trace_f2fs_evict_inode(inode); | 262 | trace_f2fs_evict_inode(inode); |
| 263 | truncate_inode_pages(&inode->i_data, 0); | 263 | truncate_inode_pages_final(&inode->i_data); |
| 264 | 264 | ||
| 265 | if (inode->i_ino == F2FS_NODE_INO(sbi) || | 265 | if (inode->i_ino == F2FS_NODE_INO(sbi) || |
| 266 | inode->i_ino == F2FS_META_INO(sbi)) | 266 | inode->i_ino == F2FS_META_INO(sbi)) |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 854b578f6695..c68d9f27135e 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -490,7 +490,7 @@ EXPORT_SYMBOL_GPL(fat_build_inode); | |||
| 490 | 490 | ||
| 491 | static void fat_evict_inode(struct inode *inode) | 491 | static void fat_evict_inode(struct inode *inode) |
| 492 | { | 492 | { |
| 493 | truncate_inode_pages(&inode->i_data, 0); | 493 | truncate_inode_pages_final(&inode->i_data); |
| 494 | if (!inode->i_nlink) { | 494 | if (!inode->i_nlink) { |
| 495 | inode->i_size = 0; | 495 | inode->i_size = 0; |
| 496 | fat_truncate_blocks(inode, 0); | 496 | fat_truncate_blocks(inode, 0); |
diff --git a/fs/filesystems.c b/fs/filesystems.c index 92567d95ba6a..5797d45a78cb 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c | |||
| @@ -121,6 +121,7 @@ int unregister_filesystem(struct file_system_type * fs) | |||
| 121 | 121 | ||
| 122 | EXPORT_SYMBOL(unregister_filesystem); | 122 | EXPORT_SYMBOL(unregister_filesystem); |
| 123 | 123 | ||
| 124 | #ifdef CONFIG_SYSFS_SYSCALL | ||
| 124 | static int fs_index(const char __user * __name) | 125 | static int fs_index(const char __user * __name) |
| 125 | { | 126 | { |
| 126 | struct file_system_type * tmp; | 127 | struct file_system_type * tmp; |
| @@ -199,6 +200,7 @@ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2) | |||
| 199 | } | 200 | } |
| 200 | return retval; | 201 | return retval; |
| 201 | } | 202 | } |
| 203 | #endif | ||
| 202 | 204 | ||
| 203 | int __init get_filesystem_list(char *buf) | 205 | int __init get_filesystem_list(char *buf) |
| 204 | { | 206 | { |
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index f47df72cef17..363e3ae25f6b 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c | |||
| @@ -354,7 +354,7 @@ static void vxfs_i_callback(struct rcu_head *head) | |||
| 354 | void | 354 | void |
| 355 | vxfs_evict_inode(struct inode *ip) | 355 | vxfs_evict_inode(struct inode *ip) |
| 356 | { | 356 | { |
| 357 | truncate_inode_pages(&ip->i_data, 0); | 357 | truncate_inode_pages_final(&ip->i_data); |
| 358 | clear_inode(ip); | 358 | clear_inode(ip); |
| 359 | call_rcu(&ip->i_rcu, vxfs_i_callback); | 359 | call_rcu(&ip->i_rcu, vxfs_i_callback); |
| 360 | } | 360 | } |
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 25d4099a4aea..99c7f0a37af4 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c | |||
| @@ -192,7 +192,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp) | |||
| 192 | * vxfs_lookup - lookup pathname component | 192 | * vxfs_lookup - lookup pathname component |
| 193 | * @dip: dir in which we lookup | 193 | * @dip: dir in which we lookup |
| 194 | * @dp: dentry we lookup | 194 | * @dp: dentry we lookup |
| 195 | * @nd: lookup nameidata | 195 | * @flags: lookup flags |
| 196 | * | 196 | * |
| 197 | * Description: | 197 | * Description: |
| 198 | * vxfs_lookup tries to lookup the pathname component described | 198 | * vxfs_lookup tries to lookup the pathname component described |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index d754e3cf99a8..a16315957ef3 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -89,16 +89,29 @@ static inline struct inode *wb_inode(struct list_head *head) | |||
| 89 | #define CREATE_TRACE_POINTS | 89 | #define CREATE_TRACE_POINTS |
| 90 | #include <trace/events/writeback.h> | 90 | #include <trace/events/writeback.h> |
| 91 | 91 | ||
| 92 | static void bdi_wakeup_thread(struct backing_dev_info *bdi) | ||
| 93 | { | ||
| 94 | spin_lock_bh(&bdi->wb_lock); | ||
| 95 | if (test_bit(BDI_registered, &bdi->state)) | ||
| 96 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | ||
| 97 | spin_unlock_bh(&bdi->wb_lock); | ||
| 98 | } | ||
| 99 | |||
| 92 | static void bdi_queue_work(struct backing_dev_info *bdi, | 100 | static void bdi_queue_work(struct backing_dev_info *bdi, |
| 93 | struct wb_writeback_work *work) | 101 | struct wb_writeback_work *work) |
| 94 | { | 102 | { |
| 95 | trace_writeback_queue(bdi, work); | 103 | trace_writeback_queue(bdi, work); |
| 96 | 104 | ||
| 97 | spin_lock_bh(&bdi->wb_lock); | 105 | spin_lock_bh(&bdi->wb_lock); |
| 106 | if (!test_bit(BDI_registered, &bdi->state)) { | ||
| 107 | if (work->done) | ||
| 108 | complete(work->done); | ||
| 109 | goto out_unlock; | ||
| 110 | } | ||
| 98 | list_add_tail(&work->list, &bdi->work_list); | 111 | list_add_tail(&work->list, &bdi->work_list); |
| 99 | spin_unlock_bh(&bdi->wb_lock); | ||
| 100 | |||
| 101 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | 112 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
| 113 | out_unlock: | ||
| 114 | spin_unlock_bh(&bdi->wb_lock); | ||
| 102 | } | 115 | } |
| 103 | 116 | ||
| 104 | static void | 117 | static void |
| @@ -114,7 +127,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
| 114 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | 127 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
| 115 | if (!work) { | 128 | if (!work) { |
| 116 | trace_writeback_nowork(bdi); | 129 | trace_writeback_nowork(bdi); |
| 117 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | 130 | bdi_wakeup_thread(bdi); |
| 118 | return; | 131 | return; |
| 119 | } | 132 | } |
| 120 | 133 | ||
| @@ -161,7 +174,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) | |||
| 161 | * writeback as soon as there is no other work to do. | 174 | * writeback as soon as there is no other work to do. |
| 162 | */ | 175 | */ |
| 163 | trace_writeback_wake_background(bdi); | 176 | trace_writeback_wake_background(bdi); |
| 164 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | 177 | bdi_wakeup_thread(bdi); |
| 165 | } | 178 | } |
| 166 | 179 | ||
| 167 | /* | 180 | /* |
| @@ -1017,7 +1030,7 @@ void bdi_writeback_workfn(struct work_struct *work) | |||
| 1017 | current->flags |= PF_SWAPWRITE; | 1030 | current->flags |= PF_SWAPWRITE; |
| 1018 | 1031 | ||
| 1019 | if (likely(!current_is_workqueue_rescuer() || | 1032 | if (likely(!current_is_workqueue_rescuer() || |
| 1020 | list_empty(&bdi->bdi_list))) { | 1033 | !test_bit(BDI_registered, &bdi->state))) { |
| 1021 | /* | 1034 | /* |
| 1022 | * The normal path. Keep writing back @bdi until its | 1035 | * The normal path. Keep writing back @bdi until its |
| 1023 | * work_list is empty. Note that this path is also taken | 1036 | * work_list is empty. Note that this path is also taken |
| @@ -1039,10 +1052,10 @@ void bdi_writeback_workfn(struct work_struct *work) | |||
| 1039 | trace_writeback_pages_written(pages_written); | 1052 | trace_writeback_pages_written(pages_written); |
| 1040 | } | 1053 | } |
| 1041 | 1054 | ||
| 1042 | if (!list_empty(&bdi->work_list) || | 1055 | if (!list_empty(&bdi->work_list)) |
| 1043 | (wb_has_dirty_io(wb) && dirty_writeback_interval)) | 1056 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
| 1044 | queue_delayed_work(bdi_wq, &wb->dwork, | 1057 | else if (wb_has_dirty_io(wb) && dirty_writeback_interval) |
| 1045 | msecs_to_jiffies(dirty_writeback_interval * 10)); | 1058 | bdi_wakeup_thread_delayed(bdi); |
| 1046 | 1059 | ||
| 1047 | current->flags &= ~PF_SWAPWRITE; | 1060 | current->flags &= ~PF_SWAPWRITE; |
| 1048 | } | 1061 | } |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index d468643a68b2..9c761b611c54 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
| @@ -123,7 +123,7 @@ static void fuse_destroy_inode(struct inode *inode) | |||
| 123 | 123 | ||
| 124 | static void fuse_evict_inode(struct inode *inode) | 124 | static void fuse_evict_inode(struct inode *inode) |
| 125 | { | 125 | { |
| 126 | truncate_inode_pages(&inode->i_data, 0); | 126 | truncate_inode_pages_final(&inode->i_data); |
| 127 | clear_inode(inode); | 127 | clear_inode(inode); |
| 128 | if (inode->i_sb->s_flags & MS_ACTIVE) { | 128 | if (inode->i_sb->s_flags & MS_ACTIVE) { |
| 129 | struct fuse_conn *fc = get_fuse_conn(inode); | 129 | struct fuse_conn *fc = get_fuse_conn(inode); |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 60f60f6181f3..24410cd9a82a 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
| @@ -1558,7 +1558,7 @@ out_unlock: | |||
| 1558 | fs_warn(sdp, "gfs2_evict_inode: %d\n", error); | 1558 | fs_warn(sdp, "gfs2_evict_inode: %d\n", error); |
| 1559 | out: | 1559 | out: |
| 1560 | /* Case 3 starts here */ | 1560 | /* Case 3 starts here */ |
| 1561 | truncate_inode_pages(&inode->i_data, 0); | 1561 | truncate_inode_pages_final(&inode->i_data); |
| 1562 | gfs2_rs_delete(ip, NULL); | 1562 | gfs2_rs_delete(ip, NULL); |
| 1563 | gfs2_ordered_del_inode(ip); | 1563 | gfs2_ordered_del_inode(ip); |
| 1564 | clear_inode(inode); | 1564 | clear_inode(inode); |
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 380ab31b5e0f..9e2fecd62f62 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c | |||
| @@ -547,7 +547,7 @@ out: | |||
| 547 | 547 | ||
| 548 | void hfs_evict_inode(struct inode *inode) | 548 | void hfs_evict_inode(struct inode *inode) |
| 549 | { | 549 | { |
| 550 | truncate_inode_pages(&inode->i_data, 0); | 550 | truncate_inode_pages_final(&inode->i_data); |
| 551 | clear_inode(inode); | 551 | clear_inode(inode); |
| 552 | if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) { | 552 | if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) { |
| 553 | HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL; | 553 | HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL; |
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c index 0f47890299c4..caf89a7be0a1 100644 --- a/fs/hfsplus/attributes.c +++ b/fs/hfsplus/attributes.c | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | static struct kmem_cache *hfsplus_attr_tree_cachep; | 12 | static struct kmem_cache *hfsplus_attr_tree_cachep; |
| 13 | 13 | ||
| 14 | int hfsplus_create_attr_tree_cache(void) | 14 | int __init hfsplus_create_attr_tree_cache(void) |
| 15 | { | 15 | { |
| 16 | if (hfsplus_attr_tree_cachep) | 16 | if (hfsplus_attr_tree_cachep) |
| 17 | return -EEXIST; | 17 | return -EEXIST; |
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index fbb212fbb1ef..a7aafb35b624 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c | |||
| @@ -227,10 +227,8 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, | |||
| 227 | u32 ablock, dblock, mask; | 227 | u32 ablock, dblock, mask; |
| 228 | sector_t sector; | 228 | sector_t sector; |
| 229 | int was_dirty = 0; | 229 | int was_dirty = 0; |
| 230 | int shift; | ||
| 231 | 230 | ||
| 232 | /* Convert inode block to disk allocation block */ | 231 | /* Convert inode block to disk allocation block */ |
| 233 | shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; | ||
| 234 | ablock = iblock >> sbi->fs_shift; | 232 | ablock = iblock >> sbi->fs_shift; |
| 235 | 233 | ||
| 236 | if (iblock >= hip->fs_blocks) { | 234 | if (iblock >= hip->fs_blocks) { |
| @@ -498,11 +496,13 @@ int hfsplus_file_extend(struct inode *inode) | |||
| 498 | goto insert_extent; | 496 | goto insert_extent; |
| 499 | } | 497 | } |
| 500 | out: | 498 | out: |
| 501 | mutex_unlock(&hip->extents_lock); | ||
| 502 | if (!res) { | 499 | if (!res) { |
| 503 | hip->alloc_blocks += len; | 500 | hip->alloc_blocks += len; |
| 501 | mutex_unlock(&hip->extents_lock); | ||
| 504 | hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); | 502 | hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); |
| 503 | return 0; | ||
| 505 | } | 504 | } |
| 505 | mutex_unlock(&hip->extents_lock); | ||
| 506 | return res; | 506 | return res; |
| 507 | 507 | ||
| 508 | insert_extent: | 508 | insert_extent: |
| @@ -556,11 +556,13 @@ void hfsplus_file_truncate(struct inode *inode) | |||
| 556 | 556 | ||
| 557 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> | 557 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> |
| 558 | HFSPLUS_SB(sb)->alloc_blksz_shift; | 558 | HFSPLUS_SB(sb)->alloc_blksz_shift; |
| 559 | |||
| 560 | mutex_lock(&hip->extents_lock); | ||
| 561 | |||
| 559 | alloc_cnt = hip->alloc_blocks; | 562 | alloc_cnt = hip->alloc_blocks; |
| 560 | if (blk_cnt == alloc_cnt) | 563 | if (blk_cnt == alloc_cnt) |
| 561 | goto out; | 564 | goto out_unlock; |
| 562 | 565 | ||
| 563 | mutex_lock(&hip->extents_lock); | ||
| 564 | res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); | 566 | res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); |
| 565 | if (res) { | 567 | if (res) { |
| 566 | mutex_unlock(&hip->extents_lock); | 568 | mutex_unlock(&hip->extents_lock); |
| @@ -592,10 +594,10 @@ void hfsplus_file_truncate(struct inode *inode) | |||
| 592 | hfs_brec_remove(&fd); | 594 | hfs_brec_remove(&fd); |
| 593 | } | 595 | } |
| 594 | hfs_find_exit(&fd); | 596 | hfs_find_exit(&fd); |
| 595 | mutex_unlock(&hip->extents_lock); | ||
| 596 | 597 | ||
| 597 | hip->alloc_blocks = blk_cnt; | 598 | hip->alloc_blocks = blk_cnt; |
| 598 | out: | 599 | out_unlock: |
| 600 | mutex_unlock(&hip->extents_lock); | ||
| 599 | hip->phys_size = inode->i_size; | 601 | hip->phys_size = inode->i_size; |
| 600 | hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> | 602 | hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> |
| 601 | sb->s_blocksize_bits; | 603 | sb->s_blocksize_bits; |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 62d571eb69ba..83dc29286b10 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
| @@ -367,7 +367,7 @@ typedef int (*search_strategy_t)(struct hfs_bnode *, | |||
| 367 | */ | 367 | */ |
| 368 | 368 | ||
| 369 | /* attributes.c */ | 369 | /* attributes.c */ |
| 370 | int hfsplus_create_attr_tree_cache(void); | 370 | int __init hfsplus_create_attr_tree_cache(void); |
| 371 | void hfsplus_destroy_attr_tree_cache(void); | 371 | void hfsplus_destroy_attr_tree_cache(void); |
| 372 | hfsplus_attr_entry *hfsplus_alloc_attr_entry(void); | 372 | hfsplus_attr_entry *hfsplus_alloc_attr_entry(void); |
| 373 | void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry_p); | 373 | void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry_p); |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 80875aa640ef..a6abf87d79d0 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
| @@ -161,7 +161,7 @@ static int hfsplus_write_inode(struct inode *inode, | |||
| 161 | static void hfsplus_evict_inode(struct inode *inode) | 161 | static void hfsplus_evict_inode(struct inode *inode) |
| 162 | { | 162 | { |
| 163 | hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); | 163 | hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); |
| 164 | truncate_inode_pages(&inode->i_data, 0); | 164 | truncate_inode_pages_final(&inode->i_data); |
| 165 | clear_inode(inode); | 165 | clear_inode(inode); |
| 166 | if (HFSPLUS_IS_RSRC(inode)) { | 166 | if (HFSPLUS_IS_RSRC(inode)) { |
| 167 | HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; | 167 | HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index fe649d325b1f..9c470fde9878 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
| @@ -230,7 +230,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb) | |||
| 230 | 230 | ||
| 231 | static void hostfs_evict_inode(struct inode *inode) | 231 | static void hostfs_evict_inode(struct inode *inode) |
| 232 | { | 232 | { |
| 233 | truncate_inode_pages(&inode->i_data, 0); | 233 | truncate_inode_pages_final(&inode->i_data); |
| 234 | clear_inode(inode); | 234 | clear_inode(inode); |
| 235 | if (HOSTFS_I(inode)->fd != -1) { | 235 | if (HOSTFS_I(inode)->fd != -1) { |
| 236 | close_file(&HOSTFS_I(inode)->fd); | 236 | close_file(&HOSTFS_I(inode)->fd); |
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 9edeeb0ea97e..50a427313835 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c | |||
| @@ -304,7 +304,7 @@ void hpfs_write_if_changed(struct inode *inode) | |||
| 304 | 304 | ||
| 305 | void hpfs_evict_inode(struct inode *inode) | 305 | void hpfs_evict_inode(struct inode *inode) |
| 306 | { | 306 | { |
| 307 | truncate_inode_pages(&inode->i_data, 0); | 307 | truncate_inode_pages_final(&inode->i_data); |
| 308 | clear_inode(inode); | 308 | clear_inode(inode); |
| 309 | if (!inode->i_nlink) { | 309 | if (!inode->i_nlink) { |
| 310 | hpfs_lock(inode->i_sb); | 310 | hpfs_lock(inode->i_sb); |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d19b30ababf1..204027520937 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -366,7 +366,13 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart) | |||
| 366 | 366 | ||
| 367 | static void hugetlbfs_evict_inode(struct inode *inode) | 367 | static void hugetlbfs_evict_inode(struct inode *inode) |
| 368 | { | 368 | { |
| 369 | struct resv_map *resv_map; | ||
| 370 | |||
| 369 | truncate_hugepages(inode, 0); | 371 | truncate_hugepages(inode, 0); |
| 372 | resv_map = (struct resv_map *)inode->i_mapping->private_data; | ||
| 373 | /* root inode doesn't have the resv_map, so we should check it */ | ||
| 374 | if (resv_map) | ||
| 375 | resv_map_release(&resv_map->refs); | ||
| 370 | clear_inode(inode); | 376 | clear_inode(inode); |
| 371 | } | 377 | } |
| 372 | 378 | ||
| @@ -476,6 +482,11 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 476 | umode_t mode, dev_t dev) | 482 | umode_t mode, dev_t dev) |
| 477 | { | 483 | { |
| 478 | struct inode *inode; | 484 | struct inode *inode; |
| 485 | struct resv_map *resv_map; | ||
| 486 | |||
| 487 | resv_map = resv_map_alloc(); | ||
| 488 | if (!resv_map) | ||
| 489 | return NULL; | ||
| 479 | 490 | ||
| 480 | inode = new_inode(sb); | 491 | inode = new_inode(sb); |
| 481 | if (inode) { | 492 | if (inode) { |
| @@ -487,7 +498,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 487 | inode->i_mapping->a_ops = &hugetlbfs_aops; | 498 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
| 488 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; | 499 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; |
| 489 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 500 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 490 | INIT_LIST_HEAD(&inode->i_mapping->private_list); | 501 | inode->i_mapping->private_data = resv_map; |
| 491 | info = HUGETLBFS_I(inode); | 502 | info = HUGETLBFS_I(inode); |
| 492 | /* | 503 | /* |
| 493 | * The policy is initialized here even if we are creating a | 504 | * The policy is initialized here even if we are creating a |
| @@ -517,7 +528,9 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 517 | break; | 528 | break; |
| 518 | } | 529 | } |
| 519 | lockdep_annotate_inode_mutex_key(inode); | 530 | lockdep_annotate_inode_mutex_key(inode); |
| 520 | } | 531 | } else |
| 532 | kref_put(&resv_map->refs, resv_map_release); | ||
| 533 | |||
| 521 | return inode; | 534 | return inode; |
| 522 | } | 535 | } |
| 523 | 536 | ||
diff --git a/fs/inode.c b/fs/inode.c index 4bcdad3c9361..e6905152c39f 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
| @@ -503,6 +503,7 @@ void clear_inode(struct inode *inode) | |||
| 503 | */ | 503 | */ |
| 504 | spin_lock_irq(&inode->i_data.tree_lock); | 504 | spin_lock_irq(&inode->i_data.tree_lock); |
| 505 | BUG_ON(inode->i_data.nrpages); | 505 | BUG_ON(inode->i_data.nrpages); |
| 506 | BUG_ON(inode->i_data.nrshadows); | ||
| 506 | spin_unlock_irq(&inode->i_data.tree_lock); | 507 | spin_unlock_irq(&inode->i_data.tree_lock); |
| 507 | BUG_ON(!list_empty(&inode->i_data.private_list)); | 508 | BUG_ON(!list_empty(&inode->i_data.private_list)); |
| 508 | BUG_ON(!(inode->i_state & I_FREEING)); | 509 | BUG_ON(!(inode->i_state & I_FREEING)); |
| @@ -548,8 +549,7 @@ static void evict(struct inode *inode) | |||
| 548 | if (op->evict_inode) { | 549 | if (op->evict_inode) { |
| 549 | op->evict_inode(inode); | 550 | op->evict_inode(inode); |
| 550 | } else { | 551 | } else { |
| 551 | if (inode->i_data.nrpages) | 552 | truncate_inode_pages_final(&inode->i_data); |
| 552 | truncate_inode_pages(&inode->i_data, 0); | ||
| 553 | clear_inode(inode); | 553 | clear_inode(inode); |
| 554 | } | 554 | } |
| 555 | if (S_ISBLK(inode->i_mode) && inode->i_bdev) | 555 | if (S_ISBLK(inode->i_mode) && inode->i_bdev) |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index a69e426435dd..f73991522672 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
| @@ -242,7 +242,7 @@ void jffs2_evict_inode (struct inode *inode) | |||
| 242 | 242 | ||
| 243 | jffs2_dbg(1, "%s(): ino #%lu mode %o\n", | 243 | jffs2_dbg(1, "%s(): ino #%lu mode %o\n", |
| 244 | __func__, inode->i_ino, inode->i_mode); | 244 | __func__, inode->i_ino, inode->i_mode); |
| 245 | truncate_inode_pages(&inode->i_data, 0); | 245 | truncate_inode_pages_final(&inode->i_data); |
| 246 | clear_inode(inode); | 246 | clear_inode(inode); |
| 247 | jffs2_do_clear_inode(c, f); | 247 | jffs2_do_clear_inode(c, f); |
| 248 | } | 248 | } |
| @@ -687,7 +687,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | |||
| 687 | struct inode *inode = OFNI_EDONI_2SFFJ(f); | 687 | struct inode *inode = OFNI_EDONI_2SFFJ(f); |
| 688 | struct page *pg; | 688 | struct page *pg; |
| 689 | 689 | ||
| 690 | pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, | 690 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, |
| 691 | (void *)jffs2_do_readpage_unlock, inode); | 691 | (void *)jffs2_do_readpage_unlock, inode); |
| 692 | if (IS_ERR(pg)) | 692 | if (IS_ERR(pg)) |
| 693 | return (void *)pg; | 693 | return (void *)pg; |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index f4aab719add5..6f8fe72c2a7a 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
| @@ -154,7 +154,7 @@ void jfs_evict_inode(struct inode *inode) | |||
| 154 | dquot_initialize(inode); | 154 | dquot_initialize(inode); |
| 155 | 155 | ||
| 156 | if (JFS_IP(inode)->fileset == FILESYSTEM_I) { | 156 | if (JFS_IP(inode)->fileset == FILESYSTEM_I) { |
| 157 | truncate_inode_pages(&inode->i_data, 0); | 157 | truncate_inode_pages_final(&inode->i_data); |
| 158 | 158 | ||
| 159 | if (test_cflag(COMMIT_Freewmap, inode)) | 159 | if (test_cflag(COMMIT_Freewmap, inode)) |
| 160 | jfs_free_zero_link(inode); | 160 | jfs_free_zero_link(inode); |
| @@ -168,7 +168,7 @@ void jfs_evict_inode(struct inode *inode) | |||
| 168 | dquot_free_inode(inode); | 168 | dquot_free_inode(inode); |
| 169 | } | 169 | } |
| 170 | } else { | 170 | } else { |
| 171 | truncate_inode_pages(&inode->i_data, 0); | 171 | truncate_inode_pages_final(&inode->i_data); |
| 172 | } | 172 | } |
| 173 | clear_inode(inode); | 173 | clear_inode(inode); |
| 174 | dquot_drop(inode); | 174 | dquot_drop(inode); |
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index e55126f85bd2..abb0f1f53d93 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c | |||
| @@ -355,7 +355,7 @@ void kernfs_evict_inode(struct inode *inode) | |||
| 355 | { | 355 | { |
| 356 | struct kernfs_node *kn = inode->i_private; | 356 | struct kernfs_node *kn = inode->i_private; |
| 357 | 357 | ||
| 358 | truncate_inode_pages(&inode->i_data, 0); | 358 | truncate_inode_pages_final(&inode->i_data); |
| 359 | clear_inode(inode); | 359 | clear_inode(inode); |
| 360 | kernfs_put(kn); | 360 | kernfs_put(kn); |
| 361 | } | 361 | } |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 9a59cbade2fb..48140315f627 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
| @@ -2180,7 +2180,7 @@ void logfs_evict_inode(struct inode *inode) | |||
| 2180 | do_delete_inode(inode); | 2180 | do_delete_inode(inode); |
| 2181 | } | 2181 | } |
| 2182 | } | 2182 | } |
| 2183 | truncate_inode_pages(&inode->i_data, 0); | 2183 | truncate_inode_pages_final(&inode->i_data); |
| 2184 | clear_inode(inode); | 2184 | clear_inode(inode); |
| 2185 | 2185 | ||
| 2186 | /* Cheaper version of write_inode. All changes are concealed in | 2186 | /* Cheaper version of write_inode. All changes are concealed in |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 0332109162a5..0ad2ec9601de 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
| @@ -26,7 +26,7 @@ static int minix_remount (struct super_block * sb, int * flags, char * data); | |||
| 26 | 26 | ||
| 27 | static void minix_evict_inode(struct inode *inode) | 27 | static void minix_evict_inode(struct inode *inode) |
| 28 | { | 28 | { |
| 29 | truncate_inode_pages(&inode->i_data, 0); | 29 | truncate_inode_pages_final(&inode->i_data); |
| 30 | if (!inode->i_nlink) { | 30 | if (!inode->i_nlink) { |
| 31 | inode->i_size = 0; | 31 | inode->i_size = 0; |
| 32 | minix_truncate(inode); | 32 | minix_truncate(inode); |
| @@ -86,7 +86,7 @@ static void init_once(void *foo) | |||
| 86 | inode_init_once(&ei->vfs_inode); | 86 | inode_init_once(&ei->vfs_inode); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static int init_inodecache(void) | 89 | static int __init init_inodecache(void) |
| 90 | { | 90 | { |
| 91 | minix_inode_cachep = kmem_cache_create("minix_inode_cache", | 91 | minix_inode_cachep = kmem_cache_create("minix_inode_cache", |
| 92 | sizeof(struct minix_inode_info), | 92 | sizeof(struct minix_inode_info), |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 2cf2ebecb55f..ee59d35ff069 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
| @@ -296,7 +296,7 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info) | |||
| 296 | static void | 296 | static void |
| 297 | ncp_evict_inode(struct inode *inode) | 297 | ncp_evict_inode(struct inode *inode) |
| 298 | { | 298 | { |
| 299 | truncate_inode_pages(&inode->i_data, 0); | 299 | truncate_inode_pages_final(&inode->i_data); |
| 300 | clear_inode(inode); | 300 | clear_inode(inode); |
| 301 | 301 | ||
| 302 | if (S_ISDIR(inode->i_mode)) { | 302 | if (S_ISDIR(inode->i_mode)) { |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 56ff823ca82e..65d849bdf77a 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
| @@ -1213,7 +1213,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) | |||
| 1213 | end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); | 1213 | end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); |
| 1214 | if (end != NFS_I(inode)->npages) { | 1214 | if (end != NFS_I(inode)->npages) { |
| 1215 | rcu_read_lock(); | 1215 | rcu_read_lock(); |
| 1216 | end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX); | 1216 | end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); |
| 1217 | rcu_read_unlock(); | 1217 | rcu_read_unlock(); |
| 1218 | } | 1218 | } |
| 1219 | 1219 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 360114ae8b82..c4702baa22b8 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(nfs_clear_inode); | |||
| 128 | 128 | ||
| 129 | void nfs_evict_inode(struct inode *inode) | 129 | void nfs_evict_inode(struct inode *inode) |
| 130 | { | 130 | { |
| 131 | truncate_inode_pages(&inode->i_data, 0); | 131 | truncate_inode_pages_final(&inode->i_data); |
| 132 | clear_inode(inode); | 132 | clear_inode(inode); |
| 133 | nfs_clear_inode(inode); | 133 | nfs_clear_inode(inode); |
| 134 | } | 134 | } |
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 808f29574412..6f340f02f2ba 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c | |||
| @@ -90,7 +90,7 @@ static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 90 | */ | 90 | */ |
| 91 | static void nfs4_evict_inode(struct inode *inode) | 91 | static void nfs4_evict_inode(struct inode *inode) |
| 92 | { | 92 | { |
| 93 | truncate_inode_pages(&inode->i_data, 0); | 93 | truncate_inode_pages_final(&inode->i_data); |
| 94 | clear_inode(inode); | 94 | clear_inode(inode); |
| 95 | pnfs_return_layout(inode); | 95 | pnfs_return_layout(inode); |
| 96 | pnfs_destroy_layout(NFS_I(inode)); | 96 | pnfs_destroy_layout(NFS_I(inode)); |
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 06cddd572264..2645be435e75 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c | |||
| @@ -71,10 +71,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) | |||
| 71 | if (gid_eq(new->fsgid, INVALID_GID)) | 71 | if (gid_eq(new->fsgid, INVALID_GID)) |
| 72 | new->fsgid = exp->ex_anon_gid; | 72 | new->fsgid = exp->ex_anon_gid; |
| 73 | 73 | ||
| 74 | ret = set_groups(new, gi); | 74 | set_groups(new, gi); |
| 75 | put_group_info(gi); | 75 | put_group_info(gi); |
| 76 | if (ret < 0) | ||
| 77 | goto error; | ||
| 78 | 76 | ||
| 79 | if (!uid_eq(new->fsuid, GLOBAL_ROOT_UID)) | 77 | if (!uid_eq(new->fsuid, GLOBAL_ROOT_UID)) |
| 80 | new->cap_effective = cap_drop_nfsd_set(new->cap_effective); | 78 | new->cap_effective = cap_drop_nfsd_set(new->cap_effective); |
| @@ -89,7 +87,6 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) | |||
| 89 | 87 | ||
| 90 | oom: | 88 | oom: |
| 91 | ret = -ENOMEM; | 89 | ret = -ENOMEM; |
| 92 | error: | ||
| 93 | abort_creds(new); | 90 | abort_creds(new); |
| 94 | return ret; | 91 | return ret; |
| 95 | } | 92 | } |
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index deaa3d33a0aa..0d58075f34e2 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
| @@ -942,6 +942,18 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, | |||
| 942 | struct inode *cpfile; | 942 | struct inode *cpfile; |
| 943 | int err; | 943 | int err; |
| 944 | 944 | ||
| 945 | if (cpsize > sb->s_blocksize) { | ||
| 946 | printk(KERN_ERR | ||
| 947 | "NILFS: too large checkpoint size: %zu bytes.\n", | ||
| 948 | cpsize); | ||
| 949 | return -EINVAL; | ||
| 950 | } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { | ||
| 951 | printk(KERN_ERR | ||
| 952 | "NILFS: too small checkpoint size: %zu bytes.\n", | ||
| 953 | cpsize); | ||
| 954 | return -EINVAL; | ||
| 955 | } | ||
| 956 | |||
| 945 | cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); | 957 | cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); |
| 946 | if (unlikely(!cpfile)) | 958 | if (unlikely(!cpfile)) |
| 947 | return -ENOMEM; | 959 | return -ENOMEM; |
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index fa0f80308c2d..0d5fada91191 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
| @@ -484,6 +484,18 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, | |||
| 484 | struct nilfs_dat_info *di; | 484 | struct nilfs_dat_info *di; |
| 485 | int err; | 485 | int err; |
| 486 | 486 | ||
| 487 | if (entry_size > sb->s_blocksize) { | ||
| 488 | printk(KERN_ERR | ||
| 489 | "NILFS: too large DAT entry size: %zu bytes.\n", | ||
| 490 | entry_size); | ||
| 491 | return -EINVAL; | ||
| 492 | } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) { | ||
| 493 | printk(KERN_ERR | ||
| 494 | "NILFS: too small DAT entry size: %zu bytes.\n", | ||
| 495 | entry_size); | ||
| 496 | return -EINVAL; | ||
| 497 | } | ||
| 498 | |||
| 487 | dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); | 499 | dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); |
| 488 | if (unlikely(!dat)) | 500 | if (unlikely(!dat)) |
| 489 | return -ENOMEM; | 501 | return -ENOMEM; |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 7e350c562e0e..b9c5726120e3 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
| @@ -783,16 +783,14 @@ void nilfs_evict_inode(struct inode *inode) | |||
| 783 | int ret; | 783 | int ret; |
| 784 | 784 | ||
| 785 | if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { | 785 | if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { |
| 786 | if (inode->i_data.nrpages) | 786 | truncate_inode_pages_final(&inode->i_data); |
| 787 | truncate_inode_pages(&inode->i_data, 0); | ||
| 788 | clear_inode(inode); | 787 | clear_inode(inode); |
| 789 | nilfs_clear_inode(inode); | 788 | nilfs_clear_inode(inode); |
| 790 | return; | 789 | return; |
| 791 | } | 790 | } |
| 792 | nilfs_transaction_begin(sb, &ti, 0); /* never fails */ | 791 | nilfs_transaction_begin(sb, &ti, 0); /* never fails */ |
| 793 | 792 | ||
| 794 | if (inode->i_data.nrpages) | 793 | truncate_inode_pages_final(&inode->i_data); |
| 795 | truncate_inode_pages(&inode->i_data, 0); | ||
| 796 | 794 | ||
| 797 | /* TODO: some of the following operations may fail. */ | 795 | /* TODO: some of the following operations may fail. */ |
| 798 | nilfs_truncate_bmap(ii, 0); | 796 | nilfs_truncate_bmap(ii, 0); |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 2b34021948e4..422fb54b7377 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
| @@ -1072,6 +1072,48 @@ out: | |||
| 1072 | } | 1072 | } |
| 1073 | 1073 | ||
| 1074 | /** | 1074 | /** |
| 1075 | * nilfs_ioctl_trim_fs() - trim ioctl handle function | ||
| 1076 | * @inode: inode object | ||
| 1077 | * @argp: pointer on argument from userspace | ||
| 1078 | * | ||
| 1079 | * Decription: nilfs_ioctl_trim_fs is the FITRIM ioctl handle function. It | ||
| 1080 | * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which | ||
| 1081 | * performs the actual trim operation. | ||
| 1082 | * | ||
| 1083 | * Return Value: On success, 0 is returned or negative error code, otherwise. | ||
| 1084 | */ | ||
| 1085 | static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) | ||
| 1086 | { | ||
| 1087 | struct the_nilfs *nilfs = inode->i_sb->s_fs_info; | ||
| 1088 | struct request_queue *q = bdev_get_queue(nilfs->ns_bdev); | ||
| 1089 | struct fstrim_range range; | ||
| 1090 | int ret; | ||
| 1091 | |||
| 1092 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1093 | return -EPERM; | ||
| 1094 | |||
| 1095 | if (!blk_queue_discard(q)) | ||
| 1096 | return -EOPNOTSUPP; | ||
| 1097 | |||
| 1098 | if (copy_from_user(&range, argp, sizeof(range))) | ||
| 1099 | return -EFAULT; | ||
| 1100 | |||
| 1101 | range.minlen = max_t(u64, range.minlen, q->limits.discard_granularity); | ||
| 1102 | |||
| 1103 | down_read(&nilfs->ns_segctor_sem); | ||
| 1104 | ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range); | ||
| 1105 | up_read(&nilfs->ns_segctor_sem); | ||
| 1106 | |||
| 1107 | if (ret < 0) | ||
| 1108 | return ret; | ||
| 1109 | |||
| 1110 | if (copy_to_user(argp, &range, sizeof(range))) | ||
| 1111 | return -EFAULT; | ||
| 1112 | |||
| 1113 | return 0; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | /** | ||
| 1075 | * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated | 1117 | * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated |
| 1076 | * @inode: inode object | 1118 | * @inode: inode object |
| 1077 | * @argp: pointer on argument from userspace | 1119 | * @argp: pointer on argument from userspace |
| @@ -1163,6 +1205,95 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, | |||
| 1163 | return ret; | 1205 | return ret; |
| 1164 | } | 1206 | } |
| 1165 | 1207 | ||
| 1208 | /** | ||
| 1209 | * nilfs_ioctl_set_suinfo - set segment usage info | ||
| 1210 | * @inode: inode object | ||
| 1211 | * @filp: file object | ||
| 1212 | * @cmd: ioctl's request code | ||
| 1213 | * @argp: pointer on argument from userspace | ||
| 1214 | * | ||
| 1215 | * Description: Expects an array of nilfs_suinfo_update structures | ||
| 1216 | * encapsulated in nilfs_argv and updates the segment usage info | ||
| 1217 | * according to the flags in nilfs_suinfo_update. | ||
| 1218 | * | ||
| 1219 | * Return Value: On success, 0 is returned. On error, one of the | ||
| 1220 | * following negative error codes is returned. | ||
| 1221 | * | ||
| 1222 | * %-EPERM - Not enough permissions | ||
| 1223 | * | ||
| 1224 | * %-EFAULT - Error copying input data | ||
| 1225 | * | ||
| 1226 | * %-EIO - I/O error. | ||
| 1227 | * | ||
| 1228 | * %-ENOMEM - Insufficient amount of memory available. | ||
| 1229 | * | ||
| 1230 | * %-EINVAL - Invalid values in input (segment number, flags or nblocks) | ||
| 1231 | */ | ||
| 1232 | static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, | ||
| 1233 | unsigned int cmd, void __user *argp) | ||
| 1234 | { | ||
| 1235 | struct the_nilfs *nilfs = inode->i_sb->s_fs_info; | ||
| 1236 | struct nilfs_transaction_info ti; | ||
| 1237 | struct nilfs_argv argv; | ||
| 1238 | size_t len; | ||
| 1239 | void __user *base; | ||
| 1240 | void *kbuf; | ||
| 1241 | int ret; | ||
| 1242 | |||
| 1243 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1244 | return -EPERM; | ||
| 1245 | |||
| 1246 | ret = mnt_want_write_file(filp); | ||
| 1247 | if (ret) | ||
| 1248 | return ret; | ||
| 1249 | |||
| 1250 | ret = -EFAULT; | ||
| 1251 | if (copy_from_user(&argv, argp, sizeof(argv))) | ||
| 1252 | goto out; | ||
| 1253 | |||
| 1254 | ret = -EINVAL; | ||
| 1255 | if (argv.v_size < sizeof(struct nilfs_suinfo_update)) | ||
| 1256 | goto out; | ||
| 1257 | |||
| 1258 | if (argv.v_nmembs > nilfs->ns_nsegments) | ||
| 1259 | goto out; | ||
| 1260 | |||
| 1261 | if (argv.v_nmembs >= UINT_MAX / argv.v_size) | ||
| 1262 | goto out; | ||
| 1263 | |||
| 1264 | len = argv.v_size * argv.v_nmembs; | ||
| 1265 | if (!len) { | ||
| 1266 | ret = 0; | ||
| 1267 | goto out; | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | base = (void __user *)(unsigned long)argv.v_base; | ||
| 1271 | kbuf = vmalloc(len); | ||
| 1272 | if (!kbuf) { | ||
| 1273 | ret = -ENOMEM; | ||
| 1274 | goto out; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | if (copy_from_user(kbuf, base, len)) { | ||
| 1278 | ret = -EFAULT; | ||
| 1279 | goto out_free; | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | nilfs_transaction_begin(inode->i_sb, &ti, 0); | ||
| 1283 | ret = nilfs_sufile_set_suinfo(nilfs->ns_sufile, kbuf, argv.v_size, | ||
| 1284 | argv.v_nmembs); | ||
| 1285 | if (unlikely(ret < 0)) | ||
| 1286 | nilfs_transaction_abort(inode->i_sb); | ||
| 1287 | else | ||
| 1288 | nilfs_transaction_commit(inode->i_sb); /* never fails */ | ||
| 1289 | |||
| 1290 | out_free: | ||
| 1291 | vfree(kbuf); | ||
| 1292 | out: | ||
| 1293 | mnt_drop_write_file(filp); | ||
| 1294 | return ret; | ||
| 1295 | } | ||
| 1296 | |||
| 1166 | long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 1297 | long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 1167 | { | 1298 | { |
| 1168 | struct inode *inode = file_inode(filp); | 1299 | struct inode *inode = file_inode(filp); |
| @@ -1189,6 +1320,8 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 1189 | return nilfs_ioctl_get_info(inode, filp, cmd, argp, | 1320 | return nilfs_ioctl_get_info(inode, filp, cmd, argp, |
| 1190 | sizeof(struct nilfs_suinfo), | 1321 | sizeof(struct nilfs_suinfo), |
| 1191 | nilfs_ioctl_do_get_suinfo); | 1322 | nilfs_ioctl_do_get_suinfo); |
| 1323 | case NILFS_IOCTL_SET_SUINFO: | ||
| 1324 | return nilfs_ioctl_set_suinfo(inode, filp, cmd, argp); | ||
| 1192 | case NILFS_IOCTL_GET_SUSTAT: | 1325 | case NILFS_IOCTL_GET_SUSTAT: |
| 1193 | return nilfs_ioctl_get_sustat(inode, filp, cmd, argp); | 1326 | return nilfs_ioctl_get_sustat(inode, filp, cmd, argp); |
| 1194 | case NILFS_IOCTL_GET_VINFO: | 1327 | case NILFS_IOCTL_GET_VINFO: |
| @@ -1205,6 +1338,8 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 1205 | return nilfs_ioctl_resize(inode, filp, argp); | 1338 | return nilfs_ioctl_resize(inode, filp, argp); |
| 1206 | case NILFS_IOCTL_SET_ALLOC_RANGE: | 1339 | case NILFS_IOCTL_SET_ALLOC_RANGE: |
| 1207 | return nilfs_ioctl_set_alloc_range(inode, argp); | 1340 | return nilfs_ioctl_set_alloc_range(inode, argp); |
| 1341 | case FITRIM: | ||
| 1342 | return nilfs_ioctl_trim_fs(inode, argp); | ||
| 1208 | default: | 1343 | default: |
| 1209 | return -ENOTTY; | 1344 | return -ENOTTY; |
| 1210 | } | 1345 | } |
| @@ -1228,6 +1363,7 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 1228 | case NILFS_IOCTL_GET_CPINFO: | 1363 | case NILFS_IOCTL_GET_CPINFO: |
| 1229 | case NILFS_IOCTL_GET_CPSTAT: | 1364 | case NILFS_IOCTL_GET_CPSTAT: |
| 1230 | case NILFS_IOCTL_GET_SUINFO: | 1365 | case NILFS_IOCTL_GET_SUINFO: |
| 1366 | case NILFS_IOCTL_SET_SUINFO: | ||
| 1231 | case NILFS_IOCTL_GET_SUSTAT: | 1367 | case NILFS_IOCTL_GET_SUSTAT: |
| 1232 | case NILFS_IOCTL_GET_VINFO: | 1368 | case NILFS_IOCTL_GET_VINFO: |
| 1233 | case NILFS_IOCTL_GET_BDESCS: | 1369 | case NILFS_IOCTL_GET_BDESCS: |
| @@ -1235,6 +1371,7 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 1235 | case NILFS_IOCTL_SYNC: | 1371 | case NILFS_IOCTL_SYNC: |
| 1236 | case NILFS_IOCTL_RESIZE: | 1372 | case NILFS_IOCTL_RESIZE: |
| 1237 | case NILFS_IOCTL_SET_ALLOC_RANGE: | 1373 | case NILFS_IOCTL_SET_ALLOC_RANGE: |
| 1374 | case FITRIM: | ||
| 1238 | break; | 1375 | break; |
| 1239 | default: | 1376 | default: |
| 1240 | return -ENOIOCTLCMD; | 1377 | return -ENOIOCTLCMD; |
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 3127e9f438a7..2a869c35c362 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
| @@ -870,6 +870,289 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, | |||
| 870 | } | 870 | } |
| 871 | 871 | ||
| 872 | /** | 872 | /** |
| 873 | * nilfs_sufile_set_suinfo - sets segment usage info | ||
| 874 | * @sufile: inode of segment usage file | ||
| 875 | * @buf: array of suinfo_update | ||
| 876 | * @supsz: byte size of suinfo_update | ||
| 877 | * @nsup: size of suinfo_update array | ||
| 878 | * | ||
| 879 | * Description: Takes an array of nilfs_suinfo_update structs and updates | ||
| 880 | * segment usage accordingly. Only the fields indicated by the sup_flags | ||
| 881 | * are updated. | ||
| 882 | * | ||
| 883 | * Return Value: On success, 0 is returned. On error, one of the | ||
| 884 | * following negative error codes is returned. | ||
| 885 | * | ||
| 886 | * %-EIO - I/O error. | ||
| 887 | * | ||
| 888 | * %-ENOMEM - Insufficient amount of memory available. | ||
| 889 | * | ||
| 890 | * %-EINVAL - Invalid values in input (segment number, flags or nblocks) | ||
| 891 | */ | ||
| 892 | ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, | ||
| 893 | unsigned supsz, size_t nsup) | ||
| 894 | { | ||
| 895 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; | ||
| 896 | struct buffer_head *header_bh, *bh; | ||
| 897 | struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; | ||
| 898 | struct nilfs_segment_usage *su; | ||
| 899 | void *kaddr; | ||
| 900 | unsigned long blkoff, prev_blkoff; | ||
| 901 | int cleansi, cleansu, dirtysi, dirtysu; | ||
| 902 | long ncleaned = 0, ndirtied = 0; | ||
| 903 | int ret = 0; | ||
| 904 | |||
| 905 | if (unlikely(nsup == 0)) | ||
| 906 | return ret; | ||
| 907 | |||
| 908 | for (sup = buf; sup < supend; sup = (void *)sup + supsz) { | ||
| 909 | if (sup->sup_segnum >= nilfs->ns_nsegments | ||
| 910 | || (sup->sup_flags & | ||
| 911 | (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) | ||
| 912 | || (nilfs_suinfo_update_nblocks(sup) && | ||
| 913 | sup->sup_sui.sui_nblocks > | ||
| 914 | nilfs->ns_blocks_per_segment)) | ||
| 915 | return -EINVAL; | ||
| 916 | } | ||
| 917 | |||
| 918 | down_write(&NILFS_MDT(sufile)->mi_sem); | ||
| 919 | |||
| 920 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | ||
| 921 | if (ret < 0) | ||
| 922 | goto out_sem; | ||
| 923 | |||
| 924 | sup = buf; | ||
| 925 | blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); | ||
| 926 | ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); | ||
| 927 | if (ret < 0) | ||
| 928 | goto out_header; | ||
| 929 | |||
| 930 | for (;;) { | ||
| 931 | kaddr = kmap_atomic(bh->b_page); | ||
| 932 | su = nilfs_sufile_block_get_segment_usage( | ||
| 933 | sufile, sup->sup_segnum, bh, kaddr); | ||
| 934 | |||
| 935 | if (nilfs_suinfo_update_lastmod(sup)) | ||
| 936 | su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); | ||
| 937 | |||
| 938 | if (nilfs_suinfo_update_nblocks(sup)) | ||
| 939 | su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); | ||
| 940 | |||
| 941 | if (nilfs_suinfo_update_flags(sup)) { | ||
| 942 | /* | ||
| 943 | * Active flag is a virtual flag projected by running | ||
| 944 | * nilfs kernel code - drop it not to write it to | ||
| 945 | * disk. | ||
| 946 | */ | ||
| 947 | sup->sup_sui.sui_flags &= | ||
| 948 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); | ||
| 949 | |||
| 950 | cleansi = nilfs_suinfo_clean(&sup->sup_sui); | ||
| 951 | cleansu = nilfs_segment_usage_clean(su); | ||
| 952 | dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); | ||
| 953 | dirtysu = nilfs_segment_usage_dirty(su); | ||
| 954 | |||
| 955 | if (cleansi && !cleansu) | ||
| 956 | ++ncleaned; | ||
| 957 | else if (!cleansi && cleansu) | ||
| 958 | --ncleaned; | ||
| 959 | |||
| 960 | if (dirtysi && !dirtysu) | ||
| 961 | ++ndirtied; | ||
| 962 | else if (!dirtysi && dirtysu) | ||
| 963 | --ndirtied; | ||
| 964 | |||
| 965 | su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); | ||
| 966 | } | ||
| 967 | |||
| 968 | kunmap_atomic(kaddr); | ||
| 969 | |||
| 970 | sup = (void *)sup + supsz; | ||
| 971 | if (sup >= supend) | ||
| 972 | break; | ||
| 973 | |||
| 974 | prev_blkoff = blkoff; | ||
| 975 | blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); | ||
| 976 | if (blkoff == prev_blkoff) | ||
| 977 | continue; | ||
| 978 | |||
| 979 | /* get different block */ | ||
| 980 | mark_buffer_dirty(bh); | ||
| 981 | put_bh(bh); | ||
| 982 | ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); | ||
| 983 | if (unlikely(ret < 0)) | ||
| 984 | goto out_mark; | ||
| 985 | } | ||
| 986 | mark_buffer_dirty(bh); | ||
| 987 | put_bh(bh); | ||
| 988 | |||
| 989 | out_mark: | ||
| 990 | if (ncleaned || ndirtied) { | ||
| 991 | nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, | ||
| 992 | (u64)ndirtied); | ||
| 993 | NILFS_SUI(sufile)->ncleansegs += ncleaned; | ||
| 994 | } | ||
| 995 | nilfs_mdt_mark_dirty(sufile); | ||
| 996 | out_header: | ||
| 997 | put_bh(header_bh); | ||
| 998 | out_sem: | ||
| 999 | up_write(&NILFS_MDT(sufile)->mi_sem); | ||
| 1000 | return ret; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | /** | ||
| 1004 | * nilfs_sufile_trim_fs() - trim ioctl handle function | ||
| 1005 | * @sufile: inode of segment usage file | ||
| 1006 | * @range: fstrim_range structure | ||
| 1007 | * | ||
| 1008 | * start: First Byte to trim | ||
| 1009 | * len: number of Bytes to trim from start | ||
| 1010 | * minlen: minimum extent length in Bytes | ||
| 1011 | * | ||
| 1012 | * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes | ||
| 1013 | * from start to start+len. start is rounded up to the next block boundary | ||
| 1014 | * and start+len is rounded down. For each clean segment blkdev_issue_discard | ||
| 1015 | * function is invoked. | ||
| 1016 | * | ||
| 1017 | * Return Value: On success, 0 is returned or negative error code, otherwise. | ||
| 1018 | */ | ||
| 1019 | int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) | ||
| 1020 | { | ||
| 1021 | struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; | ||
| 1022 | struct buffer_head *su_bh; | ||
| 1023 | struct nilfs_segment_usage *su; | ||
| 1024 | void *kaddr; | ||
| 1025 | size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; | ||
| 1026 | sector_t seg_start, seg_end, start_block, end_block; | ||
| 1027 | sector_t start = 0, nblocks = 0; | ||
| 1028 | u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; | ||
| 1029 | int ret = 0; | ||
| 1030 | unsigned int sects_per_block; | ||
| 1031 | |||
| 1032 | sects_per_block = (1 << nilfs->ns_blocksize_bits) / | ||
| 1033 | bdev_logical_block_size(nilfs->ns_bdev); | ||
| 1034 | len = range->len >> nilfs->ns_blocksize_bits; | ||
| 1035 | minlen = range->minlen >> nilfs->ns_blocksize_bits; | ||
| 1036 | max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); | ||
| 1037 | |||
| 1038 | if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) | ||
| 1039 | return -EINVAL; | ||
| 1040 | |||
| 1041 | start_block = (range->start + nilfs->ns_blocksize - 1) >> | ||
| 1042 | nilfs->ns_blocksize_bits; | ||
| 1043 | |||
| 1044 | /* | ||
| 1045 | * range->len can be very large (actually, it is set to | ||
| 1046 | * ULLONG_MAX by default) - truncate upper end of the range | ||
| 1047 | * carefully so as not to overflow. | ||
| 1048 | */ | ||
| 1049 | if (max_blocks - start_block < len) | ||
| 1050 | end_block = max_blocks - 1; | ||
| 1051 | else | ||
| 1052 | end_block = start_block + len - 1; | ||
| 1053 | |||
| 1054 | segnum = nilfs_get_segnum_of_block(nilfs, start_block); | ||
| 1055 | segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); | ||
| 1056 | |||
| 1057 | down_read(&NILFS_MDT(sufile)->mi_sem); | ||
| 1058 | |||
| 1059 | while (segnum <= segnum_end) { | ||
| 1060 | n = nilfs_sufile_segment_usages_in_block(sufile, segnum, | ||
| 1061 | segnum_end); | ||
| 1062 | |||
| 1063 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, | ||
| 1064 | &su_bh); | ||
| 1065 | if (ret < 0) { | ||
| 1066 | if (ret != -ENOENT) | ||
| 1067 | goto out_sem; | ||
| 1068 | /* hole */ | ||
| 1069 | segnum += n; | ||
| 1070 | continue; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | kaddr = kmap_atomic(su_bh->b_page); | ||
| 1074 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, | ||
| 1075 | su_bh, kaddr); | ||
| 1076 | for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { | ||
| 1077 | if (!nilfs_segment_usage_clean(su)) | ||
| 1078 | continue; | ||
| 1079 | |||
| 1080 | nilfs_get_segment_range(nilfs, segnum, &seg_start, | ||
| 1081 | &seg_end); | ||
| 1082 | |||
| 1083 | if (!nblocks) { | ||
| 1084 | /* start new extent */ | ||
| 1085 | start = seg_start; | ||
| 1086 | nblocks = seg_end - seg_start + 1; | ||
| 1087 | continue; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | if (start + nblocks == seg_start) { | ||
| 1091 | /* add to previous extent */ | ||
| 1092 | nblocks += seg_end - seg_start + 1; | ||
| 1093 | continue; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | /* discard previous extent */ | ||
| 1097 | if (start < start_block) { | ||
| 1098 | nblocks -= start_block - start; | ||
| 1099 | start = start_block; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | if (nblocks >= minlen) { | ||
| 1103 | kunmap_atomic(kaddr); | ||
| 1104 | |||
| 1105 | ret = blkdev_issue_discard(nilfs->ns_bdev, | ||
| 1106 | start * sects_per_block, | ||
| 1107 | nblocks * sects_per_block, | ||
| 1108 | GFP_NOFS, 0); | ||
| 1109 | if (ret < 0) { | ||
| 1110 | put_bh(su_bh); | ||
| 1111 | goto out_sem; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | ndiscarded += nblocks; | ||
| 1115 | kaddr = kmap_atomic(su_bh->b_page); | ||
| 1116 | su = nilfs_sufile_block_get_segment_usage( | ||
| 1117 | sufile, segnum, su_bh, kaddr); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | /* start new extent */ | ||
| 1121 | start = seg_start; | ||
| 1122 | nblocks = seg_end - seg_start + 1; | ||
| 1123 | } | ||
| 1124 | kunmap_atomic(kaddr); | ||
| 1125 | put_bh(su_bh); | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | |||
| 1129 | if (nblocks) { | ||
| 1130 | /* discard last extent */ | ||
| 1131 | if (start < start_block) { | ||
| 1132 | nblocks -= start_block - start; | ||
| 1133 | start = start_block; | ||
| 1134 | } | ||
| 1135 | if (start + nblocks > end_block + 1) | ||
| 1136 | nblocks = end_block - start + 1; | ||
| 1137 | |||
| 1138 | if (nblocks >= minlen) { | ||
| 1139 | ret = blkdev_issue_discard(nilfs->ns_bdev, | ||
| 1140 | start * sects_per_block, | ||
| 1141 | nblocks * sects_per_block, | ||
| 1142 | GFP_NOFS, 0); | ||
| 1143 | if (!ret) | ||
| 1144 | ndiscarded += nblocks; | ||
| 1145 | } | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | out_sem: | ||
| 1149 | up_read(&NILFS_MDT(sufile)->mi_sem); | ||
| 1150 | |||
| 1151 | range->len = ndiscarded << nilfs->ns_blocksize_bits; | ||
| 1152 | return ret; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | /** | ||
| 873 | * nilfs_sufile_read - read or get sufile inode | 1156 | * nilfs_sufile_read - read or get sufile inode |
| 874 | * @sb: super block instance | 1157 | * @sb: super block instance |
| 875 | * @susize: size of a segment usage entry | 1158 | * @susize: size of a segment usage entry |
| @@ -886,6 +1169,18 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, | |||
| 886 | void *kaddr; | 1169 | void *kaddr; |
| 887 | int err; | 1170 | int err; |
| 888 | 1171 | ||
| 1172 | if (susize > sb->s_blocksize) { | ||
| 1173 | printk(KERN_ERR | ||
| 1174 | "NILFS: too large segment usage size: %zu bytes.\n", | ||
| 1175 | susize); | ||
| 1176 | return -EINVAL; | ||
| 1177 | } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { | ||
| 1178 | printk(KERN_ERR | ||
| 1179 | "NILFS: too small segment usage size: %zu bytes.\n", | ||
| 1180 | susize); | ||
| 1181 | return -EINVAL; | ||
| 1182 | } | ||
| 1183 | |||
| 889 | sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); | 1184 | sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); |
| 890 | if (unlikely(!sufile)) | 1185 | if (unlikely(!sufile)) |
| 891 | return -ENOMEM; | 1186 | return -ENOMEM; |
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index e84bc5b51fc1..b8afd72f2379 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h | |||
| @@ -44,6 +44,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | |||
| 44 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); | 44 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); |
| 45 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, | 45 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, |
| 46 | size_t); | 46 | size_t); |
| 47 | ssize_t nilfs_sufile_set_suinfo(struct inode *, void *, unsigned , size_t); | ||
| 47 | 48 | ||
| 48 | int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *, | 49 | int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *, |
| 49 | void (*dofunc)(struct inode *, __u64, | 50 | void (*dofunc)(struct inode *, __u64, |
| @@ -65,6 +66,7 @@ void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, | |||
| 65 | int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs); | 66 | int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs); |
| 66 | int nilfs_sufile_read(struct super_block *sb, size_t susize, | 67 | int nilfs_sufile_read(struct super_block *sb, size_t susize, |
| 67 | struct nilfs_inode *raw_inode, struct inode **inodep); | 68 | struct nilfs_inode *raw_inode, struct inode **inodep); |
| 69 | int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range); | ||
| 68 | 70 | ||
| 69 | /** | 71 | /** |
| 70 | * nilfs_sufile_scrap - make a segment garbage | 72 | * nilfs_sufile_scrap - make a segment garbage |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 94c451ce6d24..8ba8229ba076 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
| @@ -399,6 +399,16 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, | |||
| 399 | return -EINVAL; | 399 | return -EINVAL; |
| 400 | 400 | ||
| 401 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); | 401 | nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); |
| 402 | if (nilfs->ns_inode_size > nilfs->ns_blocksize) { | ||
| 403 | printk(KERN_ERR "NILFS: too large inode size: %d bytes.\n", | ||
| 404 | nilfs->ns_inode_size); | ||
| 405 | return -EINVAL; | ||
| 406 | } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { | ||
| 407 | printk(KERN_ERR "NILFS: too small inode size: %d bytes.\n", | ||
| 408 | nilfs->ns_inode_size); | ||
| 409 | return -EINVAL; | ||
| 410 | } | ||
| 411 | |||
| 402 | nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); | 412 | nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); |
| 403 | 413 | ||
| 404 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); | 414 | nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index dc638f786d5c..ee9cb3795c2b 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
| @@ -60,8 +60,8 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) | |||
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 62 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 63 | static int fanotify_get_response_from_access(struct fsnotify_group *group, | 63 | static int fanotify_get_response(struct fsnotify_group *group, |
| 64 | struct fanotify_event_info *event) | 64 | struct fanotify_perm_event_info *event) |
| 65 | { | 65 | { |
| 66 | int ret; | 66 | int ret; |
| 67 | 67 | ||
| @@ -142,6 +142,40 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
| 142 | return false; | 142 | return false; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, | ||
| 146 | struct path *path) | ||
| 147 | { | ||
| 148 | struct fanotify_event_info *event; | ||
| 149 | |||
| 150 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 151 | if (mask & FAN_ALL_PERM_EVENTS) { | ||
| 152 | struct fanotify_perm_event_info *pevent; | ||
| 153 | |||
| 154 | pevent = kmem_cache_alloc(fanotify_perm_event_cachep, | ||
| 155 | GFP_KERNEL); | ||
| 156 | if (!pevent) | ||
| 157 | return NULL; | ||
| 158 | event = &pevent->fae; | ||
| 159 | pevent->response = 0; | ||
| 160 | goto init; | ||
| 161 | } | ||
| 162 | #endif | ||
| 163 | event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); | ||
| 164 | if (!event) | ||
| 165 | return NULL; | ||
| 166 | init: __maybe_unused | ||
| 167 | fsnotify_init_event(&event->fse, inode, mask); | ||
| 168 | event->tgid = get_pid(task_tgid(current)); | ||
| 169 | if (path) { | ||
| 170 | event->path = *path; | ||
| 171 | path_get(&event->path); | ||
| 172 | } else { | ||
| 173 | event->path.mnt = NULL; | ||
| 174 | event->path.dentry = NULL; | ||
| 175 | } | ||
| 176 | return event; | ||
| 177 | } | ||
| 178 | |||
| 145 | static int fanotify_handle_event(struct fsnotify_group *group, | 179 | static int fanotify_handle_event(struct fsnotify_group *group, |
| 146 | struct inode *inode, | 180 | struct inode *inode, |
| 147 | struct fsnotify_mark *inode_mark, | 181 | struct fsnotify_mark *inode_mark, |
| @@ -171,25 +205,11 @@ static int fanotify_handle_event(struct fsnotify_group *group, | |||
| 171 | pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, | 205 | pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, |
| 172 | mask); | 206 | mask); |
| 173 | 207 | ||
| 174 | event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); | 208 | event = fanotify_alloc_event(inode, mask, data); |
| 175 | if (unlikely(!event)) | 209 | if (unlikely(!event)) |
| 176 | return -ENOMEM; | 210 | return -ENOMEM; |
| 177 | 211 | ||
| 178 | fsn_event = &event->fse; | 212 | fsn_event = &event->fse; |
| 179 | fsnotify_init_event(fsn_event, inode, mask); | ||
| 180 | event->tgid = get_pid(task_tgid(current)); | ||
| 181 | if (data_type == FSNOTIFY_EVENT_PATH) { | ||
| 182 | struct path *path = data; | ||
| 183 | event->path = *path; | ||
| 184 | path_get(&event->path); | ||
| 185 | } else { | ||
| 186 | event->path.mnt = NULL; | ||
| 187 | event->path.dentry = NULL; | ||
| 188 | } | ||
| 189 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 190 | event->response = 0; | ||
| 191 | #endif | ||
| 192 | |||
| 193 | ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge); | 213 | ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge); |
| 194 | if (ret) { | 214 | if (ret) { |
| 195 | /* Permission events shouldn't be merged */ | 215 | /* Permission events shouldn't be merged */ |
| @@ -202,7 +222,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, | |||
| 202 | 222 | ||
| 203 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 223 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 204 | if (mask & FAN_ALL_PERM_EVENTS) { | 224 | if (mask & FAN_ALL_PERM_EVENTS) { |
| 205 | ret = fanotify_get_response_from_access(group, event); | 225 | ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event)); |
| 206 | fsnotify_destroy_event(group, fsn_event); | 226 | fsnotify_destroy_event(group, fsn_event); |
| 207 | } | 227 | } |
| 208 | #endif | 228 | #endif |
| @@ -225,6 +245,13 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event) | |||
| 225 | event = FANOTIFY_E(fsn_event); | 245 | event = FANOTIFY_E(fsn_event); |
| 226 | path_put(&event->path); | 246 | path_put(&event->path); |
| 227 | put_pid(event->tgid); | 247 | put_pid(event->tgid); |
| 248 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 249 | if (fsn_event->mask & FAN_ALL_PERM_EVENTS) { | ||
| 250 | kmem_cache_free(fanotify_perm_event_cachep, | ||
| 251 | FANOTIFY_PE(fsn_event)); | ||
| 252 | return; | ||
| 253 | } | ||
| 254 | #endif | ||
| 228 | kmem_cache_free(fanotify_event_cachep, event); | 255 | kmem_cache_free(fanotify_event_cachep, event); |
| 229 | } | 256 | } |
| 230 | 257 | ||
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h index 32a2f034fb94..2a5fb14115df 100644 --- a/fs/notify/fanotify/fanotify.h +++ b/fs/notify/fanotify/fanotify.h | |||
| @@ -3,13 +3,12 @@ | |||
| 3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
| 4 | 4 | ||
| 5 | extern struct kmem_cache *fanotify_event_cachep; | 5 | extern struct kmem_cache *fanotify_event_cachep; |
| 6 | extern struct kmem_cache *fanotify_perm_event_cachep; | ||
| 6 | 7 | ||
| 7 | /* | 8 | /* |
| 8 | * Lifetime of the structure differs for normal and permission events. In both | 9 | * Structure for normal fanotify events. It gets allocated in |
| 9 | * cases the structure is allocated in fanotify_handle_event(). For normal | 10 | * fanotify_handle_event() and freed when the information is retrieved by |
| 10 | * events the structure is freed immediately after reporting it to userspace. | 11 | * userspace |
| 11 | * For permission events we free it only after we receive response from | ||
| 12 | * userspace. | ||
| 13 | */ | 12 | */ |
| 14 | struct fanotify_event_info { | 13 | struct fanotify_event_info { |
| 15 | struct fsnotify_event fse; | 14 | struct fsnotify_event fse; |
| @@ -19,12 +18,33 @@ struct fanotify_event_info { | |||
| 19 | */ | 18 | */ |
| 20 | struct path path; | 19 | struct path path; |
| 21 | struct pid *tgid; | 20 | struct pid *tgid; |
| 21 | }; | ||
| 22 | |||
| 22 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 23 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 23 | u32 response; /* userspace answer to question */ | 24 | /* |
| 24 | #endif | 25 | * Structure for permission fanotify events. It gets allocated and freed in |
| 26 | * fanotify_handle_event() since we wait there for user response. When the | ||
| 27 | * information is retrieved by userspace the structure is moved from | ||
| 28 | * group->notification_list to group->fanotify_data.access_list to wait for | ||
| 29 | * user response. | ||
| 30 | */ | ||
| 31 | struct fanotify_perm_event_info { | ||
| 32 | struct fanotify_event_info fae; | ||
| 33 | int response; /* userspace answer to question */ | ||
| 34 | int fd; /* fd we passed to userspace for this event */ | ||
| 25 | }; | 35 | }; |
| 26 | 36 | ||
| 37 | static inline struct fanotify_perm_event_info * | ||
| 38 | FANOTIFY_PE(struct fsnotify_event *fse) | ||
| 39 | { | ||
| 40 | return container_of(fse, struct fanotify_perm_event_info, fae.fse); | ||
| 41 | } | ||
| 42 | #endif | ||
| 43 | |||
| 27 | static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse) | 44 | static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse) |
| 28 | { | 45 | { |
| 29 | return container_of(fse, struct fanotify_event_info, fse); | 46 | return container_of(fse, struct fanotify_event_info, fse); |
| 30 | } | 47 | } |
| 48 | |||
| 49 | struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, | ||
| 50 | struct path *path); | ||
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 287a22c04149..4e565c814309 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c | |||
| @@ -28,14 +28,8 @@ | |||
| 28 | extern const struct fsnotify_ops fanotify_fsnotify_ops; | 28 | extern const struct fsnotify_ops fanotify_fsnotify_ops; |
| 29 | 29 | ||
| 30 | static struct kmem_cache *fanotify_mark_cache __read_mostly; | 30 | static struct kmem_cache *fanotify_mark_cache __read_mostly; |
| 31 | static struct kmem_cache *fanotify_response_event_cache __read_mostly; | ||
| 32 | struct kmem_cache *fanotify_event_cachep __read_mostly; | 31 | struct kmem_cache *fanotify_event_cachep __read_mostly; |
| 33 | 32 | struct kmem_cache *fanotify_perm_event_cachep __read_mostly; | |
| 34 | struct fanotify_response_event { | ||
| 35 | struct list_head list; | ||
| 36 | __s32 fd; | ||
| 37 | struct fanotify_event_info *event; | ||
| 38 | }; | ||
| 39 | 33 | ||
| 40 | /* | 34 | /* |
| 41 | * Get an fsnotify notification event if one exists and is small | 35 | * Get an fsnotify notification event if one exists and is small |
| @@ -135,33 +129,34 @@ static int fill_event_metadata(struct fsnotify_group *group, | |||
| 135 | } | 129 | } |
| 136 | 130 | ||
| 137 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 131 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 138 | static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, | 132 | static struct fanotify_perm_event_info *dequeue_event( |
| 139 | __s32 fd) | 133 | struct fsnotify_group *group, int fd) |
| 140 | { | 134 | { |
| 141 | struct fanotify_response_event *re, *return_re = NULL; | 135 | struct fanotify_perm_event_info *event, *return_e = NULL; |
| 142 | 136 | ||
| 143 | mutex_lock(&group->fanotify_data.access_mutex); | 137 | spin_lock(&group->fanotify_data.access_lock); |
| 144 | list_for_each_entry(re, &group->fanotify_data.access_list, list) { | 138 | list_for_each_entry(event, &group->fanotify_data.access_list, |
| 145 | if (re->fd != fd) | 139 | fae.fse.list) { |
| 140 | if (event->fd != fd) | ||
| 146 | continue; | 141 | continue; |
| 147 | 142 | ||
| 148 | list_del_init(&re->list); | 143 | list_del_init(&event->fae.fse.list); |
| 149 | return_re = re; | 144 | return_e = event; |
| 150 | break; | 145 | break; |
| 151 | } | 146 | } |
| 152 | mutex_unlock(&group->fanotify_data.access_mutex); | 147 | spin_unlock(&group->fanotify_data.access_lock); |
| 153 | 148 | ||
| 154 | pr_debug("%s: found return_re=%p\n", __func__, return_re); | 149 | pr_debug("%s: found return_re=%p\n", __func__, return_e); |
| 155 | 150 | ||
| 156 | return return_re; | 151 | return return_e; |
| 157 | } | 152 | } |
| 158 | 153 | ||
| 159 | static int process_access_response(struct fsnotify_group *group, | 154 | static int process_access_response(struct fsnotify_group *group, |
| 160 | struct fanotify_response *response_struct) | 155 | struct fanotify_response *response_struct) |
| 161 | { | 156 | { |
| 162 | struct fanotify_response_event *re; | 157 | struct fanotify_perm_event_info *event; |
| 163 | __s32 fd = response_struct->fd; | 158 | int fd = response_struct->fd; |
| 164 | __u32 response = response_struct->response; | 159 | int response = response_struct->response; |
| 165 | 160 | ||
| 166 | pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, | 161 | pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, |
| 167 | fd, response); | 162 | fd, response); |
| @@ -181,58 +176,15 @@ static int process_access_response(struct fsnotify_group *group, | |||
| 181 | if (fd < 0) | 176 | if (fd < 0) |
| 182 | return -EINVAL; | 177 | return -EINVAL; |
| 183 | 178 | ||
| 184 | re = dequeue_re(group, fd); | 179 | event = dequeue_event(group, fd); |
| 185 | if (!re) | 180 | if (!event) |
| 186 | return -ENOENT; | 181 | return -ENOENT; |
| 187 | 182 | ||
| 188 | re->event->response = response; | 183 | event->response = response; |
| 189 | |||
| 190 | wake_up(&group->fanotify_data.access_waitq); | 184 | wake_up(&group->fanotify_data.access_waitq); |
| 191 | 185 | ||
| 192 | kmem_cache_free(fanotify_response_event_cache, re); | ||
| 193 | |||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | static int prepare_for_access_response(struct fsnotify_group *group, | ||
| 198 | struct fsnotify_event *event, | ||
| 199 | __s32 fd) | ||
| 200 | { | ||
| 201 | struct fanotify_response_event *re; | ||
| 202 | |||
| 203 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | ||
| 204 | return 0; | ||
| 205 | |||
| 206 | re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); | ||
| 207 | if (!re) | ||
| 208 | return -ENOMEM; | ||
| 209 | |||
| 210 | re->event = FANOTIFY_E(event); | ||
| 211 | re->fd = fd; | ||
| 212 | |||
| 213 | mutex_lock(&group->fanotify_data.access_mutex); | ||
| 214 | |||
| 215 | if (atomic_read(&group->fanotify_data.bypass_perm)) { | ||
| 216 | mutex_unlock(&group->fanotify_data.access_mutex); | ||
| 217 | kmem_cache_free(fanotify_response_event_cache, re); | ||
| 218 | FANOTIFY_E(event)->response = FAN_ALLOW; | ||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | list_add_tail(&re->list, &group->fanotify_data.access_list); | ||
| 223 | mutex_unlock(&group->fanotify_data.access_mutex); | ||
| 224 | |||
| 225 | return 0; | ||
| 226 | } | ||
| 227 | |||
| 228 | #else | ||
| 229 | static int prepare_for_access_response(struct fsnotify_group *group, | ||
| 230 | struct fsnotify_event *event, | ||
| 231 | __s32 fd) | ||
| 232 | { | ||
| 233 | return 0; | 186 | return 0; |
| 234 | } | 187 | } |
| 235 | |||
| 236 | #endif | 188 | #endif |
| 237 | 189 | ||
| 238 | static ssize_t copy_event_to_user(struct fsnotify_group *group, | 190 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
| @@ -247,7 +199,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
| 247 | 199 | ||
| 248 | ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); | 200 | ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); |
| 249 | if (ret < 0) | 201 | if (ret < 0) |
| 250 | goto out; | 202 | return ret; |
| 251 | 203 | ||
| 252 | fd = fanotify_event_metadata.fd; | 204 | fd = fanotify_event_metadata.fd; |
| 253 | ret = -EFAULT; | 205 | ret = -EFAULT; |
| @@ -255,9 +207,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, | |||
| 255 | fanotify_event_metadata.event_len)) | 207 | fanotify_event_metadata.event_len)) |
| 256 | goto out_close_fd; | 208 | goto out_close_fd; |
| 257 | 209 | ||
| 258 | ret = prepare_for_access_response(group, event, fd); | 210 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 259 | if (ret) | 211 | if (event->mask & FAN_ALL_PERM_EVENTS) |
| 260 | goto out_close_fd; | 212 | FANOTIFY_PE(event)->fd = fd; |
| 213 | #endif | ||
| 261 | 214 | ||
| 262 | if (fd != FAN_NOFD) | 215 | if (fd != FAN_NOFD) |
| 263 | fd_install(fd, f); | 216 | fd_install(fd, f); |
| @@ -268,13 +221,6 @@ out_close_fd: | |||
| 268 | put_unused_fd(fd); | 221 | put_unused_fd(fd); |
| 269 | fput(f); | 222 | fput(f); |
| 270 | } | 223 | } |
| 271 | out: | ||
| 272 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 273 | if (event->mask & FAN_ALL_PERM_EVENTS) { | ||
| 274 | FANOTIFY_E(event)->response = FAN_DENY; | ||
| 275 | wake_up(&group->fanotify_data.access_waitq); | ||
| 276 | } | ||
| 277 | #endif | ||
| 278 | return ret; | 224 | return ret; |
| 279 | } | 225 | } |
| 280 | 226 | ||
| @@ -314,35 +260,50 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, | |||
| 314 | kevent = get_one_event(group, count); | 260 | kevent = get_one_event(group, count); |
| 315 | mutex_unlock(&group->notification_mutex); | 261 | mutex_unlock(&group->notification_mutex); |
| 316 | 262 | ||
| 317 | if (kevent) { | 263 | if (IS_ERR(kevent)) { |
| 318 | ret = PTR_ERR(kevent); | 264 | ret = PTR_ERR(kevent); |
| 319 | if (IS_ERR(kevent)) | 265 | break; |
| 266 | } | ||
| 267 | |||
| 268 | if (!kevent) { | ||
| 269 | ret = -EAGAIN; | ||
| 270 | if (file->f_flags & O_NONBLOCK) | ||
| 320 | break; | 271 | break; |
| 321 | ret = copy_event_to_user(group, kevent, buf); | 272 | |
| 322 | /* | 273 | ret = -ERESTARTSYS; |
| 323 | * Permission events get destroyed after we | 274 | if (signal_pending(current)) |
| 324 | * receive response | 275 | break; |
| 325 | */ | 276 | |
| 326 | if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) | 277 | if (start != buf) |
| 327 | fsnotify_destroy_event(group, kevent); | ||
| 328 | if (ret < 0) | ||
| 329 | break; | 278 | break; |
| 330 | buf += ret; | 279 | schedule(); |
| 331 | count -= ret; | ||
| 332 | continue; | 280 | continue; |
| 333 | } | 281 | } |
| 334 | 282 | ||
| 335 | ret = -EAGAIN; | 283 | ret = copy_event_to_user(group, kevent, buf); |
| 336 | if (file->f_flags & O_NONBLOCK) | 284 | /* |
| 337 | break; | 285 | * Permission events get queued to wait for response. Other |
| 338 | ret = -ERESTARTSYS; | 286 | * events can be destroyed now. |
| 339 | if (signal_pending(current)) | 287 | */ |
| 340 | break; | 288 | if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) { |
| 341 | 289 | fsnotify_destroy_event(group, kevent); | |
| 342 | if (start != buf) | 290 | if (ret < 0) |
| 343 | break; | 291 | break; |
| 344 | 292 | } else { | |
| 345 | schedule(); | 293 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 294 | if (ret < 0) { | ||
| 295 | FANOTIFY_PE(kevent)->response = FAN_DENY; | ||
| 296 | wake_up(&group->fanotify_data.access_waitq); | ||
| 297 | break; | ||
| 298 | } | ||
| 299 | spin_lock(&group->fanotify_data.access_lock); | ||
| 300 | list_add_tail(&kevent->list, | ||
| 301 | &group->fanotify_data.access_list); | ||
| 302 | spin_unlock(&group->fanotify_data.access_lock); | ||
| 303 | #endif | ||
| 304 | } | ||
| 305 | buf += ret; | ||
| 306 | count -= ret; | ||
| 346 | } | 307 | } |
| 347 | 308 | ||
| 348 | finish_wait(&group->notification_waitq, &wait); | 309 | finish_wait(&group->notification_waitq, &wait); |
| @@ -383,22 +344,21 @@ static int fanotify_release(struct inode *ignored, struct file *file) | |||
| 383 | struct fsnotify_group *group = file->private_data; | 344 | struct fsnotify_group *group = file->private_data; |
| 384 | 345 | ||
| 385 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 346 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 386 | struct fanotify_response_event *re, *lre; | 347 | struct fanotify_perm_event_info *event, *next; |
| 387 | 348 | ||
| 388 | mutex_lock(&group->fanotify_data.access_mutex); | 349 | spin_lock(&group->fanotify_data.access_lock); |
| 389 | 350 | ||
| 390 | atomic_inc(&group->fanotify_data.bypass_perm); | 351 | atomic_inc(&group->fanotify_data.bypass_perm); |
| 391 | 352 | ||
| 392 | list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { | 353 | list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, |
| 393 | pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, | 354 | fae.fse.list) { |
| 394 | re, re->event); | 355 | pr_debug("%s: found group=%p event=%p\n", __func__, group, |
| 356 | event); | ||
| 395 | 357 | ||
| 396 | list_del_init(&re->list); | 358 | list_del_init(&event->fae.fse.list); |
| 397 | re->event->response = FAN_ALLOW; | 359 | event->response = FAN_ALLOW; |
| 398 | |||
| 399 | kmem_cache_free(fanotify_response_event_cache, re); | ||
| 400 | } | 360 | } |
| 401 | mutex_unlock(&group->fanotify_data.access_mutex); | 361 | spin_unlock(&group->fanotify_data.access_lock); |
| 402 | 362 | ||
| 403 | wake_up(&group->fanotify_data.access_waitq); | 363 | wake_up(&group->fanotify_data.access_waitq); |
| 404 | #endif | 364 | #endif |
| @@ -731,21 +691,16 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) | |||
| 731 | group->fanotify_data.user = user; | 691 | group->fanotify_data.user = user; |
| 732 | atomic_inc(&user->fanotify_listeners); | 692 | atomic_inc(&user->fanotify_listeners); |
| 733 | 693 | ||
| 734 | oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); | 694 | oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); |
| 735 | if (unlikely(!oevent)) { | 695 | if (unlikely(!oevent)) { |
| 736 | fd = -ENOMEM; | 696 | fd = -ENOMEM; |
| 737 | goto out_destroy_group; | 697 | goto out_destroy_group; |
| 738 | } | 698 | } |
| 739 | group->overflow_event = &oevent->fse; | 699 | group->overflow_event = &oevent->fse; |
| 740 | fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); | ||
| 741 | oevent->tgid = get_pid(task_tgid(current)); | ||
| 742 | oevent->path.mnt = NULL; | ||
| 743 | oevent->path.dentry = NULL; | ||
| 744 | 700 | ||
| 745 | group->fanotify_data.f_flags = event_f_flags; | 701 | group->fanotify_data.f_flags = event_f_flags; |
| 746 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 702 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 747 | oevent->response = 0; | 703 | spin_lock_init(&group->fanotify_data.access_lock); |
| 748 | mutex_init(&group->fanotify_data.access_mutex); | ||
| 749 | init_waitqueue_head(&group->fanotify_data.access_waitq); | 704 | init_waitqueue_head(&group->fanotify_data.access_waitq); |
| 750 | INIT_LIST_HEAD(&group->fanotify_data.access_list); | 705 | INIT_LIST_HEAD(&group->fanotify_data.access_list); |
| 751 | atomic_set(&group->fanotify_data.bypass_perm, 0); | 706 | atomic_set(&group->fanotify_data.bypass_perm, 0); |
| @@ -920,9 +875,11 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, | |||
| 920 | static int __init fanotify_user_setup(void) | 875 | static int __init fanotify_user_setup(void) |
| 921 | { | 876 | { |
| 922 | fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); | 877 | fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); |
| 923 | fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, | ||
| 924 | SLAB_PANIC); | ||
| 925 | fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); | 878 | fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); |
| 879 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 880 | fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info, | ||
| 881 | SLAB_PANIC); | ||
| 882 | #endif | ||
| 926 | 883 | ||
| 927 | return 0; | 884 | return 0; |
| 928 | } | 885 | } |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index ffb9b3675736..9d8153ebacfb 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
| @@ -2259,7 +2259,7 @@ void ntfs_evict_big_inode(struct inode *vi) | |||
| 2259 | { | 2259 | { |
| 2260 | ntfs_inode *ni = NTFS_I(vi); | 2260 | ntfs_inode *ni = NTFS_I(vi); |
| 2261 | 2261 | ||
| 2262 | truncate_inode_pages(&vi->i_data, 0); | 2262 | truncate_inode_pages_final(&vi->i_data); |
| 2263 | clear_inode(vi); | 2263 | clear_inode(vi); |
| 2264 | 2264 | ||
| 2265 | #ifdef NTFS_RW | 2265 | #ifdef NTFS_RW |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 555f4cddefe3..7e8282dcea2a 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
| @@ -205,6 +205,7 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh, | |||
| 205 | di->i_mode = cpu_to_le16(inode->i_mode); | 205 | di->i_mode = cpu_to_le16(inode->i_mode); |
| 206 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | 206 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); |
| 207 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | 207 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); |
| 208 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 208 | 209 | ||
| 209 | ocfs2_journal_dirty(handle, di_bh); | 210 | ocfs2_journal_dirty(handle, di_bh); |
| 210 | 211 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index e2edff38be52..b4deb5f750d9 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
| @@ -5728,6 +5728,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
| 5728 | } | 5728 | } |
| 5729 | 5729 | ||
| 5730 | ocfs2_et_update_clusters(et, -len); | 5730 | ocfs2_et_update_clusters(et, -len); |
| 5731 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 5731 | 5732 | ||
| 5732 | ocfs2_journal_dirty(handle, et->et_root_bh); | 5733 | ocfs2_journal_dirty(handle, et->et_root_bh); |
| 5733 | 5734 | ||
| @@ -6932,6 +6933,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, | |||
| 6932 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | 6933 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); |
| 6933 | spin_unlock(&oi->ip_lock); | 6934 | spin_unlock(&oi->ip_lock); |
| 6934 | 6935 | ||
| 6936 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 6935 | ocfs2_dinode_new_extent_list(inode, di); | 6937 | ocfs2_dinode_new_extent_list(inode, di); |
| 6936 | 6938 | ||
| 6937 | ocfs2_journal_dirty(handle, di_bh); | 6939 | ocfs2_journal_dirty(handle, di_bh); |
| @@ -7208,6 +7210,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, | |||
| 7208 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); | 7210 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); |
| 7209 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | 7211 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); |
| 7210 | 7212 | ||
| 7213 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 7211 | ocfs2_journal_dirty(handle, di_bh); | 7214 | ocfs2_journal_dirty(handle, di_bh); |
| 7212 | 7215 | ||
| 7213 | out_commit: | 7216 | out_commit: |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index aeb44e879c51..d310d12a9adc 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
| @@ -571,7 +571,6 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
| 571 | { | 571 | { |
| 572 | struct inode *inode = file_inode(iocb->ki_filp); | 572 | struct inode *inode = file_inode(iocb->ki_filp); |
| 573 | int level; | 573 | int level; |
| 574 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
| 575 | 574 | ||
| 576 | /* this io's submitter should not have unlocked this before we could */ | 575 | /* this io's submitter should not have unlocked this before we could */ |
| 577 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); | 576 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); |
| @@ -582,10 +581,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
| 582 | if (ocfs2_iocb_is_unaligned_aio(iocb)) { | 581 | if (ocfs2_iocb_is_unaligned_aio(iocb)) { |
| 583 | ocfs2_iocb_clear_unaligned_aio(iocb); | 582 | ocfs2_iocb_clear_unaligned_aio(iocb); |
| 584 | 583 | ||
| 585 | if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) && | 584 | mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); |
| 586 | waitqueue_active(wq)) { | ||
| 587 | wake_up_all(wq); | ||
| 588 | } | ||
| 589 | } | 585 | } |
| 590 | 586 | ||
| 591 | ocfs2_iocb_clear_rw_locked(iocb); | 587 | ocfs2_iocb_clear_rw_locked(iocb); |
| @@ -2043,6 +2039,7 @@ out_write_size: | |||
| 2043 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 2039 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| 2044 | di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); | 2040 | di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); |
| 2045 | di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | 2041 | di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); |
| 2042 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 2046 | ocfs2_journal_dirty(handle, wc->w_di_bh); | 2043 | ocfs2_journal_dirty(handle, wc->w_di_bh); |
| 2047 | 2044 | ||
| 2048 | ocfs2_commit_trans(osb, handle); | 2045 | ocfs2_commit_trans(osb, handle); |
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index f671e49beb34..6cae155d54df 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h | |||
| @@ -102,9 +102,4 @@ enum ocfs2_iocb_lock_bits { | |||
| 102 | #define ocfs2_iocb_is_unaligned_aio(iocb) \ | 102 | #define ocfs2_iocb_is_unaligned_aio(iocb) \ |
| 103 | test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | 103 | test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) |
| 104 | 104 | ||
| 105 | #define OCFS2_IOEND_WQ_HASH_SZ 37 | ||
| 106 | #define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\ | ||
| 107 | OCFS2_IOEND_WQ_HASH_SZ]) | ||
| 108 | extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
| 109 | |||
| 110 | #endif /* OCFS2_FILE_H */ | 105 | #endif /* OCFS2_FILE_H */ |
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 5b704c63a103..1edcb141f639 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
| @@ -90,7 +90,6 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, | |||
| 90 | * information for this bh as it's not marked locally | 90 | * information for this bh as it's not marked locally |
| 91 | * uptodate. */ | 91 | * uptodate. */ |
| 92 | ret = -EIO; | 92 | ret = -EIO; |
| 93 | put_bh(bh); | ||
| 94 | mlog_errno(ret); | 93 | mlog_errno(ret); |
| 95 | } | 94 | } |
| 96 | 95 | ||
| @@ -420,7 +419,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, | |||
| 420 | 419 | ||
| 421 | if (!buffer_uptodate(bh)) { | 420 | if (!buffer_uptodate(bh)) { |
| 422 | ret = -EIO; | 421 | ret = -EIO; |
| 423 | put_bh(bh); | ||
| 424 | mlog_errno(ret); | 422 | mlog_errno(ret); |
| 425 | } | 423 | } |
| 426 | 424 | ||
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2cd2406b4140..eb649d23a4de 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
| @@ -262,17 +262,17 @@ static void o2net_update_recv_stats(struct o2net_sock_container *sc) | |||
| 262 | 262 | ||
| 263 | #endif /* CONFIG_OCFS2_FS_STATS */ | 263 | #endif /* CONFIG_OCFS2_FS_STATS */ |
| 264 | 264 | ||
| 265 | static inline int o2net_reconnect_delay(void) | 265 | static inline unsigned int o2net_reconnect_delay(void) |
| 266 | { | 266 | { |
| 267 | return o2nm_single_cluster->cl_reconnect_delay_ms; | 267 | return o2nm_single_cluster->cl_reconnect_delay_ms; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | static inline int o2net_keepalive_delay(void) | 270 | static inline unsigned int o2net_keepalive_delay(void) |
| 271 | { | 271 | { |
| 272 | return o2nm_single_cluster->cl_keepalive_delay_ms; | 272 | return o2nm_single_cluster->cl_keepalive_delay_ms; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | static inline int o2net_idle_timeout(void) | 275 | static inline unsigned int o2net_idle_timeout(void) |
| 276 | { | 276 | { |
| 277 | return o2nm_single_cluster->cl_idle_timeout_ms; | 277 | return o2nm_single_cluster->cl_idle_timeout_ms; |
| 278 | } | 278 | } |
| @@ -1964,18 +1964,30 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes) | |||
| 1964 | goto out; | 1964 | goto out; |
| 1965 | } | 1965 | } |
| 1966 | 1966 | ||
| 1967 | /* ->sk_data_ready is also called for a newly established child socket | 1967 | /* This callback may called twice when a new connection |
| 1968 | * before it has been accepted and the acceptor has set up their | 1968 | * is being established as a child socket inherits everything |
| 1969 | * data_ready.. we only want to queue listen work for our listening | 1969 | * from a parent LISTEN socket, including the data_ready cb of |
| 1970 | * socket */ | 1970 | * the parent. This leads to a hazard. In o2net_accept_one() |
| 1971 | * we are still initializing the child socket but have not | ||
| 1972 | * changed the inherited data_ready callback yet when | ||
| 1973 | * data starts arriving. | ||
| 1974 | * We avoid this hazard by checking the state. | ||
| 1975 | * For the listening socket, the state will be TCP_LISTEN; for the new | ||
| 1976 | * socket, will be TCP_ESTABLISHED. Also, in this case, | ||
| 1977 | * sk->sk_user_data is not a valid function pointer. | ||
| 1978 | */ | ||
| 1979 | |||
| 1971 | if (sk->sk_state == TCP_LISTEN) { | 1980 | if (sk->sk_state == TCP_LISTEN) { |
| 1972 | mlog(ML_TCP, "bytes: %d\n", bytes); | 1981 | mlog(ML_TCP, "bytes: %d\n", bytes); |
| 1973 | queue_work(o2net_wq, &o2net_listen_work); | 1982 | queue_work(o2net_wq, &o2net_listen_work); |
| 1983 | } else { | ||
| 1984 | ready = NULL; | ||
| 1974 | } | 1985 | } |
| 1975 | 1986 | ||
| 1976 | out: | 1987 | out: |
| 1977 | read_unlock(&sk->sk_callback_lock); | 1988 | read_unlock(&sk->sk_callback_lock); |
| 1978 | ready(sk, bytes); | 1989 | if (ready != NULL) |
| 1990 | ready(sk, bytes); | ||
| 1979 | } | 1991 | } |
| 1980 | 1992 | ||
| 1981 | static int o2net_open_listening_sock(__be32 addr, __be16 port) | 1993 | static int o2net_open_listening_sock(__be32 addr, __be16 port) |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 0d3a97d2d5f6..e2e05a106beb 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #include "dlmglue.h" | 37 | #include "dlmglue.h" |
| 38 | #include "file.h" | 38 | #include "file.h" |
| 39 | #include "inode.h" | 39 | #include "inode.h" |
| 40 | #include "super.h" | ||
| 41 | #include "ocfs2_trace.h" | 40 | #include "ocfs2_trace.h" |
| 42 | 41 | ||
| 43 | void ocfs2_dentry_attach_gen(struct dentry *dentry) | 42 | void ocfs2_dentry_attach_gen(struct dentry *dentry) |
| @@ -346,52 +345,6 @@ out_attach: | |||
| 346 | return ret; | 345 | return ret; |
| 347 | } | 346 | } |
| 348 | 347 | ||
| 349 | DEFINE_SPINLOCK(dentry_list_lock); | ||
| 350 | |||
| 351 | /* We limit the number of dentry locks to drop in one go. We have | ||
| 352 | * this limit so that we don't starve other users of ocfs2_wq. */ | ||
| 353 | #define DL_INODE_DROP_COUNT 64 | ||
| 354 | |||
| 355 | /* Drop inode references from dentry locks */ | ||
| 356 | static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count) | ||
| 357 | { | ||
| 358 | struct ocfs2_dentry_lock *dl; | ||
| 359 | |||
| 360 | spin_lock(&dentry_list_lock); | ||
| 361 | while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) { | ||
| 362 | dl = osb->dentry_lock_list; | ||
| 363 | osb->dentry_lock_list = dl->dl_next; | ||
| 364 | spin_unlock(&dentry_list_lock); | ||
| 365 | iput(dl->dl_inode); | ||
| 366 | kfree(dl); | ||
| 367 | spin_lock(&dentry_list_lock); | ||
| 368 | } | ||
| 369 | spin_unlock(&dentry_list_lock); | ||
| 370 | } | ||
| 371 | |||
| 372 | void ocfs2_drop_dl_inodes(struct work_struct *work) | ||
| 373 | { | ||
| 374 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, | ||
| 375 | dentry_lock_work); | ||
| 376 | |||
| 377 | __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT); | ||
| 378 | /* | ||
| 379 | * Don't queue dropping if umount is in progress. We flush the | ||
| 380 | * list in ocfs2_dismount_volume | ||
| 381 | */ | ||
| 382 | spin_lock(&dentry_list_lock); | ||
| 383 | if (osb->dentry_lock_list && | ||
| 384 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) | ||
| 385 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | ||
| 386 | spin_unlock(&dentry_list_lock); | ||
| 387 | } | ||
| 388 | |||
| 389 | /* Flush the whole work queue */ | ||
| 390 | void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) | ||
| 391 | { | ||
| 392 | __ocfs2_drop_dl_inodes(osb, -1); | ||
| 393 | } | ||
| 394 | |||
| 395 | /* | 348 | /* |
| 396 | * ocfs2_dentry_iput() and friends. | 349 | * ocfs2_dentry_iput() and friends. |
| 397 | * | 350 | * |
| @@ -416,24 +369,16 @@ void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) | |||
| 416 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, | 369 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, |
| 417 | struct ocfs2_dentry_lock *dl) | 370 | struct ocfs2_dentry_lock *dl) |
| 418 | { | 371 | { |
| 372 | iput(dl->dl_inode); | ||
| 419 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); | 373 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); |
| 420 | ocfs2_lock_res_free(&dl->dl_lockres); | 374 | ocfs2_lock_res_free(&dl->dl_lockres); |
| 421 | 375 | kfree(dl); | |
| 422 | /* We leave dropping of inode reference to ocfs2_wq as that can | ||
| 423 | * possibly lead to inode deletion which gets tricky */ | ||
| 424 | spin_lock(&dentry_list_lock); | ||
| 425 | if (!osb->dentry_lock_list && | ||
| 426 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) | ||
| 427 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | ||
| 428 | dl->dl_next = osb->dentry_lock_list; | ||
| 429 | osb->dentry_lock_list = dl; | ||
| 430 | spin_unlock(&dentry_list_lock); | ||
| 431 | } | 376 | } |
| 432 | 377 | ||
| 433 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 378 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
| 434 | struct ocfs2_dentry_lock *dl) | 379 | struct ocfs2_dentry_lock *dl) |
| 435 | { | 380 | { |
| 436 | int unlock; | 381 | int unlock = 0; |
| 437 | 382 | ||
| 438 | BUG_ON(dl->dl_count == 0); | 383 | BUG_ON(dl->dl_count == 0); |
| 439 | 384 | ||
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index b79eff709958..55f58892b153 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h | |||
| @@ -29,13 +29,8 @@ | |||
| 29 | extern const struct dentry_operations ocfs2_dentry_ops; | 29 | extern const struct dentry_operations ocfs2_dentry_ops; |
| 30 | 30 | ||
| 31 | struct ocfs2_dentry_lock { | 31 | struct ocfs2_dentry_lock { |
| 32 | /* Use count of dentry lock */ | ||
| 33 | unsigned int dl_count; | 32 | unsigned int dl_count; |
| 34 | union { | 33 | u64 dl_parent_blkno; |
| 35 | /* Linked list of dentry locks to release */ | ||
| 36 | struct ocfs2_dentry_lock *dl_next; | ||
| 37 | u64 dl_parent_blkno; | ||
| 38 | }; | ||
| 39 | 34 | ||
| 40 | /* | 35 | /* |
| 41 | * The ocfs2_dentry_lock keeps an inode reference until | 36 | * The ocfs2_dentry_lock keeps an inode reference until |
| @@ -49,14 +44,9 @@ struct ocfs2_dentry_lock { | |||
| 49 | int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, | 44 | int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, |
| 50 | u64 parent_blkno); | 45 | u64 parent_blkno); |
| 51 | 46 | ||
| 52 | extern spinlock_t dentry_list_lock; | ||
| 53 | |||
| 54 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 47 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
| 55 | struct ocfs2_dentry_lock *dl); | 48 | struct ocfs2_dentry_lock *dl); |
| 56 | 49 | ||
| 57 | void ocfs2_drop_dl_inodes(struct work_struct *work); | ||
| 58 | void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb); | ||
| 59 | |||
| 60 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, | 50 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, |
| 61 | int skip_unhashed); | 51 | int skip_unhashed); |
| 62 | 52 | ||
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 91a7e85ac8fd..0717662b4aef 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
| @@ -2957,6 +2957,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
| 2957 | ocfs2_init_dir_trailer(dir, dirdata_bh, i); | 2957 | ocfs2_init_dir_trailer(dir, dirdata_bh, i); |
| 2958 | } | 2958 | } |
| 2959 | 2959 | ||
| 2960 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 2960 | ocfs2_journal_dirty(handle, dirdata_bh); | 2961 | ocfs2_journal_dirty(handle, dirdata_bh); |
| 2961 | 2962 | ||
| 2962 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { | 2963 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { |
| @@ -3005,6 +3006,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
| 3005 | di->i_size = cpu_to_le64(sb->s_blocksize); | 3006 | di->i_size = cpu_to_le64(sb->s_blocksize); |
| 3006 | di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec); | 3007 | di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec); |
| 3007 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec); | 3008 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec); |
| 3009 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 3008 | 3010 | ||
| 3009 | /* | 3011 | /* |
| 3010 | * This should never fail as our extent list is empty and all | 3012 | * This should never fail as our extent list is empty and all |
| @@ -3338,6 +3340,7 @@ do_extend: | |||
| 3338 | } else { | 3340 | } else { |
| 3339 | de->rec_len = cpu_to_le16(sb->s_blocksize); | 3341 | de->rec_len = cpu_to_le16(sb->s_blocksize); |
| 3340 | } | 3342 | } |
| 3343 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 3341 | ocfs2_journal_dirty(handle, new_bh); | 3344 | ocfs2_journal_dirty(handle, new_bh); |
| 3342 | 3345 | ||
| 3343 | dir_i_size += dir->i_sb->s_blocksize; | 3346 | dir_i_size += dir->i_sb->s_blocksize; |
| @@ -3896,6 +3899,7 @@ out_commit: | |||
| 3896 | dquot_free_space_nodirty(dir, | 3899 | dquot_free_space_nodirty(dir, |
| 3897 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); | 3900 | ocfs2_clusters_to_bytes(dir->i_sb, 1)); |
| 3898 | 3901 | ||
| 3902 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 3899 | ocfs2_commit_trans(osb, handle); | 3903 | ocfs2_commit_trans(osb, handle); |
| 3900 | 3904 | ||
| 3901 | out: | 3905 | out: |
| @@ -4134,6 +4138,7 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir, | |||
| 4134 | mlog_errno(ret); | 4138 | mlog_errno(ret); |
| 4135 | did_quota = 0; | 4139 | did_quota = 0; |
| 4136 | 4140 | ||
| 4141 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 4137 | ocfs2_journal_dirty(handle, dx_root_bh); | 4142 | ocfs2_journal_dirty(handle, dx_root_bh); |
| 4138 | 4143 | ||
| 4139 | out_commit: | 4144 | out_commit: |
| @@ -4401,6 +4406,7 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir, | |||
| 4401 | di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); | 4406 | di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); |
| 4402 | spin_unlock(&OCFS2_I(dir)->ip_lock); | 4407 | spin_unlock(&OCFS2_I(dir)->ip_lock); |
| 4403 | di->i_dx_root = cpu_to_le64(0ULL); | 4408 | di->i_dx_root = cpu_to_le64(0ULL); |
| 4409 | ocfs2_update_inode_fsync_trans(handle, dir, 1); | ||
| 4404 | 4410 | ||
| 4405 | ocfs2_journal_dirty(handle, di_bh); | 4411 | ocfs2_journal_dirty(handle, di_bh); |
| 4406 | 4412 | ||
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 33660a4a52fa..c973690dc0bc 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
| @@ -1123,7 +1123,6 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
| 1123 | struct dlm_ctxt *dlm = NULL; | 1123 | struct dlm_ctxt *dlm = NULL; |
| 1124 | char *local = NULL; | 1124 | char *local = NULL; |
| 1125 | int status = 0; | 1125 | int status = 0; |
| 1126 | int locked = 0; | ||
| 1127 | 1126 | ||
| 1128 | qr = (struct dlm_query_region *) msg->buf; | 1127 | qr = (struct dlm_query_region *) msg->buf; |
| 1129 | 1128 | ||
| @@ -1132,10 +1131,8 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
| 1132 | 1131 | ||
| 1133 | /* buffer used in dlm_mast_regions() */ | 1132 | /* buffer used in dlm_mast_regions() */ |
| 1134 | local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); | 1133 | local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); |
| 1135 | if (!local) { | 1134 | if (!local) |
| 1136 | status = -ENOMEM; | 1135 | return -ENOMEM; |
| 1137 | goto bail; | ||
| 1138 | } | ||
| 1139 | 1136 | ||
| 1140 | status = -EINVAL; | 1137 | status = -EINVAL; |
| 1141 | 1138 | ||
| @@ -1144,16 +1141,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
| 1144 | if (!dlm) { | 1141 | if (!dlm) { |
| 1145 | mlog(ML_ERROR, "Node %d queried hb regions on domain %s " | 1142 | mlog(ML_ERROR, "Node %d queried hb regions on domain %s " |
| 1146 | "before join domain\n", qr->qr_node, qr->qr_domain); | 1143 | "before join domain\n", qr->qr_node, qr->qr_domain); |
| 1147 | goto bail; | 1144 | goto out_domain_lock; |
| 1148 | } | 1145 | } |
| 1149 | 1146 | ||
| 1150 | spin_lock(&dlm->spinlock); | 1147 | spin_lock(&dlm->spinlock); |
| 1151 | locked = 1; | ||
| 1152 | if (dlm->joining_node != qr->qr_node) { | 1148 | if (dlm->joining_node != qr->qr_node) { |
| 1153 | mlog(ML_ERROR, "Node %d queried hb regions on domain %s " | 1149 | mlog(ML_ERROR, "Node %d queried hb regions on domain %s " |
| 1154 | "but joining node is %d\n", qr->qr_node, qr->qr_domain, | 1150 | "but joining node is %d\n", qr->qr_node, qr->qr_domain, |
| 1155 | dlm->joining_node); | 1151 | dlm->joining_node); |
| 1156 | goto bail; | 1152 | goto out_dlm_lock; |
| 1157 | } | 1153 | } |
| 1158 | 1154 | ||
| 1159 | /* Support for global heartbeat was added in 1.1 */ | 1155 | /* Support for global heartbeat was added in 1.1 */ |
| @@ -1163,14 +1159,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
| 1163 | "but active dlm protocol is %d.%d\n", qr->qr_node, | 1159 | "but active dlm protocol is %d.%d\n", qr->qr_node, |
| 1164 | qr->qr_domain, dlm->dlm_locking_proto.pv_major, | 1160 | qr->qr_domain, dlm->dlm_locking_proto.pv_major, |
| 1165 | dlm->dlm_locking_proto.pv_minor); | 1161 | dlm->dlm_locking_proto.pv_minor); |
| 1166 | goto bail; | 1162 | goto out_dlm_lock; |
| 1167 | } | 1163 | } |
| 1168 | 1164 | ||
| 1169 | status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions)); | 1165 | status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions)); |
| 1170 | 1166 | ||
| 1171 | bail: | 1167 | out_dlm_lock: |
| 1172 | if (locked) | 1168 | spin_unlock(&dlm->spinlock); |
| 1173 | spin_unlock(&dlm->spinlock); | 1169 | |
| 1170 | out_domain_lock: | ||
| 1174 | spin_unlock(&dlm_domain_lock); | 1171 | spin_unlock(&dlm_domain_lock); |
| 1175 | 1172 | ||
| 1176 | kfree(local); | 1173 | kfree(local); |
| @@ -1877,19 +1874,19 @@ static int dlm_join_domain(struct dlm_ctxt *dlm) | |||
| 1877 | goto bail; | 1874 | goto bail; |
| 1878 | } | 1875 | } |
| 1879 | 1876 | ||
| 1880 | status = dlm_debug_init(dlm); | 1877 | status = dlm_launch_thread(dlm); |
| 1881 | if (status < 0) { | 1878 | if (status < 0) { |
| 1882 | mlog_errno(status); | 1879 | mlog_errno(status); |
| 1883 | goto bail; | 1880 | goto bail; |
| 1884 | } | 1881 | } |
| 1885 | 1882 | ||
| 1886 | status = dlm_launch_thread(dlm); | 1883 | status = dlm_launch_recovery_thread(dlm); |
| 1887 | if (status < 0) { | 1884 | if (status < 0) { |
| 1888 | mlog_errno(status); | 1885 | mlog_errno(status); |
| 1889 | goto bail; | 1886 | goto bail; |
| 1890 | } | 1887 | } |
| 1891 | 1888 | ||
| 1892 | status = dlm_launch_recovery_thread(dlm); | 1889 | status = dlm_debug_init(dlm); |
| 1893 | if (status < 0) { | 1890 | if (status < 0) { |
| 1894 | mlog_errno(status); | 1891 | mlog_errno(status); |
| 1895 | goto bail; | 1892 | goto bail; |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 7035af09cc03..fe29f7978f81 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
| @@ -537,7 +537,10 @@ master_here: | |||
| 537 | /* success! see if any other nodes need recovery */ | 537 | /* success! see if any other nodes need recovery */ |
| 538 | mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", | 538 | mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", |
| 539 | dlm->name, dlm->reco.dead_node, dlm->node_num); | 539 | dlm->name, dlm->reco.dead_node, dlm->node_num); |
| 540 | dlm_reset_recovery(dlm); | 540 | spin_lock(&dlm->spinlock); |
| 541 | __dlm_reset_recovery(dlm); | ||
| 542 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | ||
| 543 | spin_unlock(&dlm->spinlock); | ||
| 541 | } | 544 | } |
| 542 | dlm_end_recovery(dlm); | 545 | dlm_end_recovery(dlm); |
| 543 | 546 | ||
| @@ -695,6 +698,14 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
| 695 | if (all_nodes_done) { | 698 | if (all_nodes_done) { |
| 696 | int ret; | 699 | int ret; |
| 697 | 700 | ||
| 701 | /* Set this flag on recovery master to avoid | ||
| 702 | * a new recovery for another dead node start | ||
| 703 | * before the recovery is not done. That may | ||
| 704 | * cause recovery hung.*/ | ||
| 705 | spin_lock(&dlm->spinlock); | ||
| 706 | dlm->reco.state |= DLM_RECO_STATE_FINALIZE; | ||
| 707 | spin_unlock(&dlm->spinlock); | ||
| 708 | |||
| 698 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state | 709 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state |
| 699 | * just send a finalize message to everyone and | 710 | * just send a finalize message to everyone and |
| 700 | * clean up */ | 711 | * clean up */ |
| @@ -1750,13 +1761,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
| 1750 | struct dlm_migratable_lockres *mres) | 1761 | struct dlm_migratable_lockres *mres) |
| 1751 | { | 1762 | { |
| 1752 | struct dlm_migratable_lock *ml; | 1763 | struct dlm_migratable_lock *ml; |
| 1753 | struct list_head *queue; | 1764 | struct list_head *queue, *iter; |
| 1754 | struct list_head *tmpq = NULL; | 1765 | struct list_head *tmpq = NULL; |
| 1755 | struct dlm_lock *newlock = NULL; | 1766 | struct dlm_lock *newlock = NULL; |
| 1756 | struct dlm_lockstatus *lksb = NULL; | 1767 | struct dlm_lockstatus *lksb = NULL; |
| 1757 | int ret = 0; | 1768 | int ret = 0; |
| 1758 | int i, j, bad; | 1769 | int i, j, bad; |
| 1759 | struct dlm_lock *lock = NULL; | 1770 | struct dlm_lock *lock; |
| 1760 | u8 from = O2NM_MAX_NODES; | 1771 | u8 from = O2NM_MAX_NODES; |
| 1761 | unsigned int added = 0; | 1772 | unsigned int added = 0; |
| 1762 | __be64 c; | 1773 | __be64 c; |
| @@ -1791,14 +1802,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
| 1791 | /* MIGRATION ONLY! */ | 1802 | /* MIGRATION ONLY! */ |
| 1792 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | 1803 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); |
| 1793 | 1804 | ||
| 1805 | lock = NULL; | ||
| 1794 | spin_lock(&res->spinlock); | 1806 | spin_lock(&res->spinlock); |
| 1795 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { | 1807 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { |
| 1796 | tmpq = dlm_list_idx_to_ptr(res, j); | 1808 | tmpq = dlm_list_idx_to_ptr(res, j); |
| 1797 | list_for_each_entry(lock, tmpq, list) { | 1809 | list_for_each(iter, tmpq) { |
| 1798 | if (lock->ml.cookie != ml->cookie) | 1810 | lock = list_entry(iter, |
| 1799 | lock = NULL; | 1811 | struct dlm_lock, list); |
| 1800 | else | 1812 | if (lock->ml.cookie == ml->cookie) |
| 1801 | break; | 1813 | break; |
| 1814 | lock = NULL; | ||
| 1802 | } | 1815 | } |
| 1803 | if (lock) | 1816 | if (lock) |
| 1804 | break; | 1817 | break; |
| @@ -2882,8 +2895,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, | |||
| 2882 | BUG(); | 2895 | BUG(); |
| 2883 | } | 2896 | } |
| 2884 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | 2897 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; |
| 2898 | __dlm_reset_recovery(dlm); | ||
| 2885 | spin_unlock(&dlm->spinlock); | 2899 | spin_unlock(&dlm->spinlock); |
| 2886 | dlm_reset_recovery(dlm); | ||
| 2887 | dlm_kick_recovery_thread(dlm); | 2900 | dlm_kick_recovery_thread(dlm); |
| 2888 | break; | 2901 | break; |
| 2889 | default: | 2902 | default: |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 19986959d149..6bd690b5a061 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
| @@ -3144,22 +3144,60 @@ out: | |||
| 3144 | return 0; | 3144 | return 0; |
| 3145 | } | 3145 | } |
| 3146 | 3146 | ||
| 3147 | static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, | ||
| 3148 | struct ocfs2_lock_res *lockres); | ||
| 3149 | |||
| 3147 | /* Mark the lockres as being dropped. It will no longer be | 3150 | /* Mark the lockres as being dropped. It will no longer be |
| 3148 | * queued if blocking, but we still may have to wait on it | 3151 | * queued if blocking, but we still may have to wait on it |
| 3149 | * being dequeued from the downconvert thread before we can consider | 3152 | * being dequeued from the downconvert thread before we can consider |
| 3150 | * it safe to drop. | 3153 | * it safe to drop. |
| 3151 | * | 3154 | * |
| 3152 | * You can *not* attempt to call cluster_lock on this lockres anymore. */ | 3155 | * You can *not* attempt to call cluster_lock on this lockres anymore. */ |
| 3153 | void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) | 3156 | void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb, |
| 3157 | struct ocfs2_lock_res *lockres) | ||
| 3154 | { | 3158 | { |
| 3155 | int status; | 3159 | int status; |
| 3156 | struct ocfs2_mask_waiter mw; | 3160 | struct ocfs2_mask_waiter mw; |
| 3157 | unsigned long flags; | 3161 | unsigned long flags, flags2; |
| 3158 | 3162 | ||
| 3159 | ocfs2_init_mask_waiter(&mw); | 3163 | ocfs2_init_mask_waiter(&mw); |
| 3160 | 3164 | ||
| 3161 | spin_lock_irqsave(&lockres->l_lock, flags); | 3165 | spin_lock_irqsave(&lockres->l_lock, flags); |
| 3162 | lockres->l_flags |= OCFS2_LOCK_FREEING; | 3166 | lockres->l_flags |= OCFS2_LOCK_FREEING; |
| 3167 | if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) { | ||
| 3168 | /* | ||
| 3169 | * We know the downconvert is queued but not in progress | ||
| 3170 | * because we are the downconvert thread and processing | ||
| 3171 | * different lock. So we can just remove the lock from the | ||
| 3172 | * queue. This is not only an optimization but also a way | ||
| 3173 | * to avoid the following deadlock: | ||
| 3174 | * ocfs2_dentry_post_unlock() | ||
| 3175 | * ocfs2_dentry_lock_put() | ||
| 3176 | * ocfs2_drop_dentry_lock() | ||
| 3177 | * iput() | ||
| 3178 | * ocfs2_evict_inode() | ||
| 3179 | * ocfs2_clear_inode() | ||
| 3180 | * ocfs2_mark_lockres_freeing() | ||
| 3181 | * ... blocks waiting for OCFS2_LOCK_QUEUED | ||
| 3182 | * since we are the downconvert thread which | ||
| 3183 | * should clear the flag. | ||
| 3184 | */ | ||
| 3185 | spin_unlock_irqrestore(&lockres->l_lock, flags); | ||
| 3186 | spin_lock_irqsave(&osb->dc_task_lock, flags2); | ||
| 3187 | list_del_init(&lockres->l_blocked_list); | ||
| 3188 | osb->blocked_lock_count--; | ||
| 3189 | spin_unlock_irqrestore(&osb->dc_task_lock, flags2); | ||
| 3190 | /* | ||
| 3191 | * Warn if we recurse into another post_unlock call. Strictly | ||
| 3192 | * speaking it isn't a problem but we need to be careful if | ||
| 3193 | * that happens (stack overflow, deadlocks, ...) so warn if | ||
| 3194 | * ocfs2 grows a path for which this can happen. | ||
| 3195 | */ | ||
| 3196 | WARN_ON_ONCE(lockres->l_ops->post_unlock); | ||
| 3197 | /* Since the lock is freeing we don't do much in the fn below */ | ||
| 3198 | ocfs2_process_blocked_lock(osb, lockres); | ||
| 3199 | return; | ||
| 3200 | } | ||
| 3163 | while (lockres->l_flags & OCFS2_LOCK_QUEUED) { | 3201 | while (lockres->l_flags & OCFS2_LOCK_QUEUED) { |
| 3164 | lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0); | 3202 | lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0); |
| 3165 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 3203 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
| @@ -3180,7 +3218,7 @@ void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, | |||
| 3180 | { | 3218 | { |
| 3181 | int ret; | 3219 | int ret; |
| 3182 | 3220 | ||
| 3183 | ocfs2_mark_lockres_freeing(lockres); | 3221 | ocfs2_mark_lockres_freeing(osb, lockres); |
| 3184 | ret = ocfs2_drop_lock(osb, lockres); | 3222 | ret = ocfs2_drop_lock(osb, lockres); |
| 3185 | if (ret) | 3223 | if (ret) |
| 3186 | mlog_errno(ret); | 3224 | mlog_errno(ret); |
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 1d596d8c4a4a..d293a22c32c5 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h | |||
| @@ -157,7 +157,8 @@ int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex); | |||
| 157 | void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex); | 157 | void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex); |
| 158 | 158 | ||
| 159 | 159 | ||
| 160 | void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres); | 160 | void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb, |
| 161 | struct ocfs2_lock_res *lockres); | ||
| 161 | void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, | 162 | void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, |
| 162 | struct ocfs2_lock_res *lockres); | 163 | struct ocfs2_lock_res *lockres); |
| 163 | 164 | ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 51632c40e896..ff33c5ef87f2 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -175,9 +175,13 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, | |||
| 175 | int datasync) | 175 | int datasync) |
| 176 | { | 176 | { |
| 177 | int err = 0; | 177 | int err = 0; |
| 178 | journal_t *journal; | ||
| 179 | struct inode *inode = file->f_mapping->host; | 178 | struct inode *inode = file->f_mapping->host; |
| 180 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 179 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
| 180 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | ||
| 181 | journal_t *journal = osb->journal->j_journal; | ||
| 182 | int ret; | ||
| 183 | tid_t commit_tid; | ||
| 184 | bool needs_barrier = false; | ||
| 181 | 185 | ||
| 182 | trace_ocfs2_sync_file(inode, file, file->f_path.dentry, | 186 | trace_ocfs2_sync_file(inode, file, file->f_path.dentry, |
| 183 | OCFS2_I(inode)->ip_blkno, | 187 | OCFS2_I(inode)->ip_blkno, |
| @@ -192,29 +196,19 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, | |||
| 192 | if (err) | 196 | if (err) |
| 193 | return err; | 197 | return err; |
| 194 | 198 | ||
| 195 | /* | 199 | commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid; |
| 196 | * Probably don't need the i_mutex at all in here, just putting it here | 200 | if (journal->j_flags & JBD2_BARRIER && |
| 197 | * to be consistent with how fsync used to be called, someone more | 201 | !jbd2_trans_will_send_data_barrier(journal, commit_tid)) |
| 198 | * familiar with the fs could possibly remove it. | 202 | needs_barrier = true; |
| 199 | */ | 203 | err = jbd2_complete_transaction(journal, commit_tid); |
| 200 | mutex_lock(&inode->i_mutex); | 204 | if (needs_barrier) { |
| 201 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { | 205 | ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); |
| 202 | /* | 206 | if (!err) |
| 203 | * We still have to flush drive's caches to get data to the | 207 | err = ret; |
| 204 | * platter | ||
| 205 | */ | ||
| 206 | if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) | ||
| 207 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); | ||
| 208 | goto bail; | ||
| 209 | } | 208 | } |
| 210 | 209 | ||
| 211 | journal = osb->journal->j_journal; | ||
| 212 | err = jbd2_journal_force_commit(journal); | ||
| 213 | |||
| 214 | bail: | ||
| 215 | if (err) | 210 | if (err) |
| 216 | mlog_errno(err); | 211 | mlog_errno(err); |
| 217 | mutex_unlock(&inode->i_mutex); | ||
| 218 | 212 | ||
| 219 | return (err < 0) ? -EIO : 0; | 213 | return (err < 0) ? -EIO : 0; |
| 220 | } | 214 | } |
| @@ -292,6 +286,7 @@ int ocfs2_update_inode_atime(struct inode *inode, | |||
| 292 | inode->i_atime = CURRENT_TIME; | 286 | inode->i_atime = CURRENT_TIME; |
| 293 | di->i_atime = cpu_to_le64(inode->i_atime.tv_sec); | 287 | di->i_atime = cpu_to_le64(inode->i_atime.tv_sec); |
| 294 | di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); | 288 | di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); |
| 289 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 295 | ocfs2_journal_dirty(handle, bh); | 290 | ocfs2_journal_dirty(handle, bh); |
| 296 | 291 | ||
| 297 | out_commit: | 292 | out_commit: |
| @@ -341,6 +336,7 @@ int ocfs2_simple_size_update(struct inode *inode, | |||
| 341 | if (ret < 0) | 336 | if (ret < 0) |
| 342 | mlog_errno(ret); | 337 | mlog_errno(ret); |
| 343 | 338 | ||
| 339 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 344 | ocfs2_commit_trans(osb, handle); | 340 | ocfs2_commit_trans(osb, handle); |
| 345 | out: | 341 | out: |
| 346 | return ret; | 342 | return ret; |
| @@ -435,6 +431,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, | |||
| 435 | di->i_size = cpu_to_le64(new_i_size); | 431 | di->i_size = cpu_to_le64(new_i_size); |
| 436 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); | 432 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); |
| 437 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | 433 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); |
| 434 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 438 | 435 | ||
| 439 | ocfs2_journal_dirty(handle, fe_bh); | 436 | ocfs2_journal_dirty(handle, fe_bh); |
| 440 | 437 | ||
| @@ -650,7 +647,7 @@ restarted_transaction: | |||
| 650 | mlog_errno(status); | 647 | mlog_errno(status); |
| 651 | goto leave; | 648 | goto leave; |
| 652 | } | 649 | } |
| 653 | 650 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | |
| 654 | ocfs2_journal_dirty(handle, bh); | 651 | ocfs2_journal_dirty(handle, bh); |
| 655 | 652 | ||
| 656 | spin_lock(&OCFS2_I(inode)->ip_lock); | 653 | spin_lock(&OCFS2_I(inode)->ip_lock); |
| @@ -743,6 +740,7 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode, | |||
| 743 | OCFS2_JOURNAL_ACCESS_WRITE); | 740 | OCFS2_JOURNAL_ACCESS_WRITE); |
| 744 | if (ret) | 741 | if (ret) |
| 745 | mlog_errno(ret); | 742 | mlog_errno(ret); |
| 743 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 746 | 744 | ||
| 747 | out: | 745 | out: |
| 748 | if (ret) { | 746 | if (ret) { |
| @@ -840,6 +838,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, | |||
| 840 | di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | 838 | di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); |
| 841 | di->i_mtime_nsec = di->i_ctime_nsec; | 839 | di->i_mtime_nsec = di->i_ctime_nsec; |
| 842 | ocfs2_journal_dirty(handle, di_bh); | 840 | ocfs2_journal_dirty(handle, di_bh); |
| 841 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 843 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); | 842 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); |
| 844 | } | 843 | } |
| 845 | 844 | ||
| @@ -1344,6 +1343,7 @@ static int __ocfs2_write_remove_suid(struct inode *inode, | |||
| 1344 | 1343 | ||
| 1345 | di = (struct ocfs2_dinode *) bh->b_data; | 1344 | di = (struct ocfs2_dinode *) bh->b_data; |
| 1346 | di->i_mode = cpu_to_le16(inode->i_mode); | 1345 | di->i_mode = cpu_to_le16(inode->i_mode); |
| 1346 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 1347 | 1347 | ||
| 1348 | ocfs2_journal_dirty(handle, bh); | 1348 | ocfs2_journal_dirty(handle, bh); |
| 1349 | 1349 | ||
| @@ -1576,6 +1576,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, | |||
| 1576 | if (ret) | 1576 | if (ret) |
| 1577 | mlog_errno(ret); | 1577 | mlog_errno(ret); |
| 1578 | } | 1578 | } |
| 1579 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 1579 | 1580 | ||
| 1580 | ocfs2_commit_trans(osb, handle); | 1581 | ocfs2_commit_trans(osb, handle); |
| 1581 | out: | 1582 | out: |
| @@ -2061,13 +2062,6 @@ out: | |||
| 2061 | return ret; | 2062 | return ret; |
| 2062 | } | 2063 | } |
| 2063 | 2064 | ||
| 2064 | static void ocfs2_aiodio_wait(struct inode *inode) | ||
| 2065 | { | ||
| 2066 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
| 2067 | |||
| 2068 | wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0)); | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) | 2065 | static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) |
| 2072 | { | 2066 | { |
| 2073 | int blockmask = inode->i_sb->s_blocksize - 1; | 2067 | int blockmask = inode->i_sb->s_blocksize - 1; |
| @@ -2345,10 +2339,8 @@ relock: | |||
| 2345 | * Wait on previous unaligned aio to complete before | 2339 | * Wait on previous unaligned aio to complete before |
| 2346 | * proceeding. | 2340 | * proceeding. |
| 2347 | */ | 2341 | */ |
| 2348 | ocfs2_aiodio_wait(inode); | 2342 | mutex_lock(&OCFS2_I(inode)->ip_unaligned_aio); |
| 2349 | 2343 | /* Mark the iocb as needing an unlock in ocfs2_dio_end_io */ | |
| 2350 | /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */ | ||
| 2351 | atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio); | ||
| 2352 | ocfs2_iocb_set_unaligned_aio(iocb); | 2344 | ocfs2_iocb_set_unaligned_aio(iocb); |
| 2353 | } | 2345 | } |
| 2354 | 2346 | ||
| @@ -2428,7 +2420,7 @@ out_dio: | |||
| 2428 | 2420 | ||
| 2429 | if (unaligned_dio) { | 2421 | if (unaligned_dio) { |
| 2430 | ocfs2_iocb_clear_unaligned_aio(iocb); | 2422 | ocfs2_iocb_clear_unaligned_aio(iocb); |
| 2431 | atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio); | 2423 | mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); |
| 2432 | } | 2424 | } |
| 2433 | 2425 | ||
| 2434 | out: | 2426 | out: |
| @@ -2645,7 +2637,16 @@ static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence) | |||
| 2645 | case SEEK_SET: | 2637 | case SEEK_SET: |
| 2646 | break; | 2638 | break; |
| 2647 | case SEEK_END: | 2639 | case SEEK_END: |
| 2648 | offset += inode->i_size; | 2640 | /* SEEK_END requires the OCFS2 inode lock for the file |
| 2641 | * because it references the file's size. | ||
| 2642 | */ | ||
| 2643 | ret = ocfs2_inode_lock(inode, NULL, 0); | ||
| 2644 | if (ret < 0) { | ||
| 2645 | mlog_errno(ret); | ||
| 2646 | goto out; | ||
| 2647 | } | ||
| 2648 | offset += i_size_read(inode); | ||
| 2649 | ocfs2_inode_unlock(inode, 0); | ||
| 2649 | break; | 2650 | break; |
| 2650 | case SEEK_CUR: | 2651 | case SEEK_CUR: |
| 2651 | if (offset == 0) { | 2652 | if (offset == 0) { |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index f29a90fde619..437de7f768c6 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
| @@ -130,6 +130,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
| 130 | struct inode *inode = NULL; | 130 | struct inode *inode = NULL; |
| 131 | struct super_block *sb = osb->sb; | 131 | struct super_block *sb = osb->sb; |
| 132 | struct ocfs2_find_inode_args args; | 132 | struct ocfs2_find_inode_args args; |
| 133 | journal_t *journal = OCFS2_SB(sb)->journal->j_journal; | ||
| 133 | 134 | ||
| 134 | trace_ocfs2_iget_begin((unsigned long long)blkno, flags, | 135 | trace_ocfs2_iget_begin((unsigned long long)blkno, flags, |
| 135 | sysfile_type); | 136 | sysfile_type); |
| @@ -169,6 +170,32 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
| 169 | goto bail; | 170 | goto bail; |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 173 | /* | ||
| 174 | * Set transaction id's of transactions that have to be committed | ||
| 175 | * to finish f[data]sync. We set them to currently running transaction | ||
| 176 | * as we cannot be sure that the inode or some of its metadata isn't | ||
| 177 | * part of the transaction - the inode could have been reclaimed and | ||
| 178 | * now it is reread from disk. | ||
| 179 | */ | ||
| 180 | if (journal) { | ||
| 181 | transaction_t *transaction; | ||
| 182 | tid_t tid; | ||
| 183 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | ||
| 184 | |||
| 185 | read_lock(&journal->j_state_lock); | ||
| 186 | if (journal->j_running_transaction) | ||
| 187 | transaction = journal->j_running_transaction; | ||
| 188 | else | ||
| 189 | transaction = journal->j_committing_transaction; | ||
| 190 | if (transaction) | ||
| 191 | tid = transaction->t_tid; | ||
| 192 | else | ||
| 193 | tid = journal->j_commit_sequence; | ||
| 194 | read_unlock(&journal->j_state_lock); | ||
| 195 | oi->i_sync_tid = tid; | ||
| 196 | oi->i_datasync_tid = tid; | ||
| 197 | } | ||
| 198 | |||
| 172 | bail: | 199 | bail: |
| 173 | if (!IS_ERR(inode)) { | 200 | if (!IS_ERR(inode)) { |
| 174 | trace_ocfs2_iget_end(inode, | 201 | trace_ocfs2_iget_end(inode, |
| @@ -804,11 +831,13 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode) | |||
| 804 | goto bail; | 831 | goto bail; |
| 805 | } | 832 | } |
| 806 | 833 | ||
| 807 | /* If we're coming from downconvert_thread we can't go into our own | 834 | /* |
| 808 | * voting [hello, deadlock city!], so unforuntately we just | 835 | * If we're coming from downconvert_thread we can't go into our own |
| 809 | * have to skip deleting this guy. That's OK though because | 836 | * voting [hello, deadlock city!] so we cannot delete the inode. But |
| 810 | * the node who's doing the actual deleting should handle it | 837 | * since we dropped last inode ref when downconverting dentry lock, |
| 811 | * anyway. */ | 838 | * we cannot have the file open and thus the node doing unlink will |
| 839 | * take care of deleting the inode. | ||
| 840 | */ | ||
| 812 | if (current == osb->dc_task) | 841 | if (current == osb->dc_task) |
| 813 | goto bail; | 842 | goto bail; |
| 814 | 843 | ||
| @@ -822,12 +851,6 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode) | |||
| 822 | goto bail_unlock; | 851 | goto bail_unlock; |
| 823 | } | 852 | } |
| 824 | 853 | ||
| 825 | /* If we have allowd wipe of this inode for another node, it | ||
| 826 | * will be marked here so we can safely skip it. Recovery will | ||
| 827 | * cleanup any inodes we might inadvertently skip here. */ | ||
| 828 | if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) | ||
| 829 | goto bail_unlock; | ||
| 830 | |||
| 831 | ret = 1; | 854 | ret = 1; |
| 832 | bail_unlock: | 855 | bail_unlock: |
| 833 | spin_unlock(&oi->ip_lock); | 856 | spin_unlock(&oi->ip_lock); |
| @@ -941,7 +964,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode, | |||
| 941 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); | 964 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); |
| 942 | if (sync_data) | 965 | if (sync_data) |
| 943 | filemap_write_and_wait(inode->i_mapping); | 966 | filemap_write_and_wait(inode->i_mapping); |
| 944 | truncate_inode_pages(&inode->i_data, 0); | 967 | truncate_inode_pages_final(&inode->i_data); |
| 945 | } | 968 | } |
| 946 | 969 | ||
| 947 | static void ocfs2_delete_inode(struct inode *inode) | 970 | static void ocfs2_delete_inode(struct inode *inode) |
| @@ -960,8 +983,6 @@ static void ocfs2_delete_inode(struct inode *inode) | |||
| 960 | if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) | 983 | if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) |
| 961 | goto bail; | 984 | goto bail; |
| 962 | 985 | ||
| 963 | dquot_initialize(inode); | ||
| 964 | |||
| 965 | if (!ocfs2_inode_is_valid_to_delete(inode)) { | 986 | if (!ocfs2_inode_is_valid_to_delete(inode)) { |
| 966 | /* It's probably not necessary to truncate_inode_pages | 987 | /* It's probably not necessary to truncate_inode_pages |
| 967 | * here but we do it for safety anyway (it will most | 988 | * here but we do it for safety anyway (it will most |
| @@ -970,6 +991,8 @@ static void ocfs2_delete_inode(struct inode *inode) | |||
| 970 | goto bail; | 991 | goto bail; |
| 971 | } | 992 | } |
| 972 | 993 | ||
| 994 | dquot_initialize(inode); | ||
| 995 | |||
| 973 | /* We want to block signals in delete_inode as the lock and | 996 | /* We want to block signals in delete_inode as the lock and |
| 974 | * messaging paths may return us -ERESTARTSYS. Which would | 997 | * messaging paths may return us -ERESTARTSYS. Which would |
| 975 | * cause us to exit early, resulting in inodes being orphaned | 998 | * cause us to exit early, resulting in inodes being orphaned |
| @@ -1057,6 +1080,7 @@ static void ocfs2_clear_inode(struct inode *inode) | |||
| 1057 | { | 1080 | { |
| 1058 | int status; | 1081 | int status; |
| 1059 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 1082 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
| 1083 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
| 1060 | 1084 | ||
| 1061 | clear_inode(inode); | 1085 | clear_inode(inode); |
| 1062 | trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno, | 1086 | trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno, |
| @@ -1073,9 +1097,9 @@ static void ocfs2_clear_inode(struct inode *inode) | |||
| 1073 | 1097 | ||
| 1074 | /* Do these before all the other work so that we don't bounce | 1098 | /* Do these before all the other work so that we don't bounce |
| 1075 | * the downconvert thread while waiting to destroy the locks. */ | 1099 | * the downconvert thread while waiting to destroy the locks. */ |
| 1076 | ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres); | 1100 | ocfs2_mark_lockres_freeing(osb, &oi->ip_rw_lockres); |
| 1077 | ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres); | 1101 | ocfs2_mark_lockres_freeing(osb, &oi->ip_inode_lockres); |
| 1078 | ocfs2_mark_lockres_freeing(&oi->ip_open_lockres); | 1102 | ocfs2_mark_lockres_freeing(osb, &oi->ip_open_lockres); |
| 1079 | 1103 | ||
| 1080 | ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap, | 1104 | ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap, |
| 1081 | &oi->ip_la_data_resv); | 1105 | &oi->ip_la_data_resv); |
| @@ -1157,7 +1181,7 @@ void ocfs2_evict_inode(struct inode *inode) | |||
| 1157 | (OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) { | 1181 | (OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) { |
| 1158 | ocfs2_delete_inode(inode); | 1182 | ocfs2_delete_inode(inode); |
| 1159 | } else { | 1183 | } else { |
| 1160 | truncate_inode_pages(&inode->i_data, 0); | 1184 | truncate_inode_pages_final(&inode->i_data); |
| 1161 | } | 1185 | } |
| 1162 | ocfs2_clear_inode(inode); | 1186 | ocfs2_clear_inode(inode); |
| 1163 | } | 1187 | } |
| @@ -1260,6 +1284,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle, | |||
| 1260 | fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); | 1284 | fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); |
| 1261 | 1285 | ||
| 1262 | ocfs2_journal_dirty(handle, bh); | 1286 | ocfs2_journal_dirty(handle, bh); |
| 1287 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 1263 | leave: | 1288 | leave: |
| 1264 | return status; | 1289 | return status; |
| 1265 | } | 1290 | } |
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 621fc73bf23d..a6c991c0fc98 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h | |||
| @@ -44,7 +44,7 @@ struct ocfs2_inode_info | |||
| 44 | struct rw_semaphore ip_xattr_sem; | 44 | struct rw_semaphore ip_xattr_sem; |
| 45 | 45 | ||
| 46 | /* Number of outstanding AIO's which are not page aligned */ | 46 | /* Number of outstanding AIO's which are not page aligned */ |
| 47 | atomic_t ip_unaligned_aio; | 47 | struct mutex ip_unaligned_aio; |
| 48 | 48 | ||
| 49 | /* These fields are protected by ip_lock */ | 49 | /* These fields are protected by ip_lock */ |
| 50 | spinlock_t ip_lock; | 50 | spinlock_t ip_lock; |
| @@ -73,6 +73,13 @@ struct ocfs2_inode_info | |||
| 73 | u32 ip_dir_lock_gen; | 73 | u32 ip_dir_lock_gen; |
| 74 | 74 | ||
| 75 | struct ocfs2_alloc_reservation ip_la_data_resv; | 75 | struct ocfs2_alloc_reservation ip_la_data_resv; |
| 76 | |||
| 77 | /* | ||
| 78 | * Transactions that contain inode's metadata needed to complete | ||
| 79 | * fsync and fdatasync, respectively. | ||
| 80 | */ | ||
| 81 | tid_t i_sync_tid; | ||
| 82 | tid_t i_datasync_tid; | ||
| 76 | }; | 83 | }; |
| 77 | 84 | ||
| 78 | /* | 85 | /* |
| @@ -84,8 +91,6 @@ struct ocfs2_inode_info | |||
| 84 | #define OCFS2_INODE_BITMAP 0x00000004 | 91 | #define OCFS2_INODE_BITMAP 0x00000004 |
| 85 | /* This inode has been wiped from disk */ | 92 | /* This inode has been wiped from disk */ |
| 86 | #define OCFS2_INODE_DELETED 0x00000008 | 93 | #define OCFS2_INODE_DELETED 0x00000008 |
| 87 | /* Another node is deleting, so our delete is a nop */ | ||
| 88 | #define OCFS2_INODE_SKIP_DELETE 0x00000010 | ||
| 89 | /* Has the inode been orphaned on another node? | 94 | /* Has the inode been orphaned on another node? |
| 90 | * | 95 | * |
| 91 | * This hints to ocfs2_drop_inode that it should clear i_nlink before | 96 | * This hints to ocfs2_drop_inode that it should clear i_nlink before |
| @@ -100,11 +105,11 @@ struct ocfs2_inode_info | |||
| 100 | * rely on ocfs2_delete_inode to sort things out under the proper | 105 | * rely on ocfs2_delete_inode to sort things out under the proper |
| 101 | * cluster locks. | 106 | * cluster locks. |
| 102 | */ | 107 | */ |
| 103 | #define OCFS2_INODE_MAYBE_ORPHANED 0x00000020 | 108 | #define OCFS2_INODE_MAYBE_ORPHANED 0x00000010 |
| 104 | /* Does someone have the file open O_DIRECT */ | 109 | /* Does someone have the file open O_DIRECT */ |
| 105 | #define OCFS2_INODE_OPEN_DIRECT 0x00000040 | 110 | #define OCFS2_INODE_OPEN_DIRECT 0x00000020 |
| 106 | /* Tell the inode wipe code it's not in orphan dir */ | 111 | /* Tell the inode wipe code it's not in orphan dir */ |
| 107 | #define OCFS2_INODE_SKIP_ORPHAN_DIR 0x00000080 | 112 | #define OCFS2_INODE_SKIP_ORPHAN_DIR 0x00000040 |
| 108 | 113 | ||
| 109 | static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) | 114 | static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) |
| 110 | { | 115 | { |
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 8ca3c29accbf..490229f43731 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
| @@ -413,11 +413,12 @@ int ocfs2_info_handle_freeinode(struct inode *inode, | |||
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i); | 415 | status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i); |
| 416 | if (status < 0) | ||
| 417 | goto bail; | ||
| 418 | 416 | ||
| 419 | iput(inode_alloc); | 417 | iput(inode_alloc); |
| 420 | inode_alloc = NULL; | 418 | inode_alloc = NULL; |
| 419 | |||
| 420 | if (status < 0) | ||
| 421 | goto bail; | ||
| 421 | } | 422 | } |
| 422 | 423 | ||
| 423 | o2info_set_request_filled(&oifi->ifi_req); | 424 | o2info_set_request_filled(&oifi->ifi_req); |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 44fc3e530c3d..03ea9314fecd 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
| @@ -2132,12 +2132,6 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, | |||
| 2132 | iter = oi->ip_next_orphan; | 2132 | iter = oi->ip_next_orphan; |
| 2133 | 2133 | ||
| 2134 | spin_lock(&oi->ip_lock); | 2134 | spin_lock(&oi->ip_lock); |
| 2135 | /* The remote delete code may have set these on the | ||
| 2136 | * assumption that the other node would wipe them | ||
| 2137 | * successfully. If they are still in the node's | ||
| 2138 | * orphan dir, we need to reset that state. */ | ||
| 2139 | oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE); | ||
| 2140 | |||
| 2141 | /* Set the proper information to get us going into | 2135 | /* Set the proper information to get us going into |
| 2142 | * ocfs2_delete_inode. */ | 2136 | * ocfs2_delete_inode. */ |
| 2143 | oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; | 2137 | oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 9ff4e8cf9d97..7f8cde94abfe 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
| @@ -626,4 +626,15 @@ static inline int ocfs2_begin_ordered_truncate(struct inode *inode, | |||
| 626 | new_size); | 626 | new_size); |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | static inline void ocfs2_update_inode_fsync_trans(handle_t *handle, | ||
| 630 | struct inode *inode, | ||
| 631 | int datasync) | ||
| 632 | { | ||
| 633 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | ||
| 634 | |||
| 635 | oi->i_sync_tid = handle->h_transaction->t_tid; | ||
| 636 | if (datasync) | ||
| 637 | oi->i_datasync_tid = handle->h_transaction->t_tid; | ||
| 638 | } | ||
| 639 | |||
| 629 | #endif /* OCFS2_JOURNAL_H */ | 640 | #endif /* OCFS2_JOURNAL_H */ |
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index e57c804069ea..6b6d092b0998 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c | |||
| @@ -82,6 +82,8 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode, | |||
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | ret = flock_lock_file_wait(file, fl); | 84 | ret = flock_lock_file_wait(file, fl); |
| 85 | if (ret) | ||
| 86 | ocfs2_file_unlock(file); | ||
| 85 | 87 | ||
| 86 | out: | 88 | out: |
| 87 | mutex_unlock(&fp->fp_mutex); | 89 | mutex_unlock(&fp->fp_mutex); |
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 64c304d668f0..599eb4c4c8be 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
| @@ -151,6 +151,7 @@ static int __ocfs2_move_extent(handle_t *handle, | |||
| 151 | old_blkno, len); | 151 | old_blkno, len); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 154 | out: | 155 | out: |
| 155 | ocfs2_free_path(path); | 156 | ocfs2_free_path(path); |
| 156 | return ret; | 157 | return ret; |
| @@ -690,8 +691,11 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, | |||
| 690 | 691 | ||
| 691 | ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, | 692 | ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, |
| 692 | goal_bit, len); | 693 | goal_bit, len); |
| 693 | if (ret) | 694 | if (ret) { |
| 695 | ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len, | ||
| 696 | le16_to_cpu(gd->bg_chain)); | ||
| 694 | mlog_errno(ret); | 697 | mlog_errno(ret); |
| 698 | } | ||
| 695 | 699 | ||
| 696 | /* | 700 | /* |
| 697 | * Here we should write the new page out first if we are | 701 | * Here we should write the new page out first if we are |
| @@ -957,6 +961,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) | |||
| 957 | inode->i_ctime = CURRENT_TIME; | 961 | inode->i_ctime = CURRENT_TIME; |
| 958 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | 962 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); |
| 959 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | 963 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); |
| 964 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 960 | 965 | ||
| 961 | ocfs2_journal_dirty(handle, di_bh); | 966 | ocfs2_journal_dirty(handle, di_bh); |
| 962 | 967 | ||
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 3683643f3f0e..2060fc398445 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -450,7 +450,6 @@ leave: | |||
| 450 | 450 | ||
| 451 | brelse(new_fe_bh); | 451 | brelse(new_fe_bh); |
| 452 | brelse(parent_fe_bh); | 452 | brelse(parent_fe_bh); |
| 453 | kfree(si.name); | ||
| 454 | kfree(si.value); | 453 | kfree(si.value); |
| 455 | 454 | ||
| 456 | ocfs2_free_dir_lookup_result(&lookup); | 455 | ocfs2_free_dir_lookup_result(&lookup); |
| @@ -495,6 +494,7 @@ static int __ocfs2_mknod_locked(struct inode *dir, | |||
| 495 | struct ocfs2_dinode *fe = NULL; | 494 | struct ocfs2_dinode *fe = NULL; |
| 496 | struct ocfs2_extent_list *fel; | 495 | struct ocfs2_extent_list *fel; |
| 497 | u16 feat; | 496 | u16 feat; |
| 497 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | ||
| 498 | 498 | ||
| 499 | *new_fe_bh = NULL; | 499 | *new_fe_bh = NULL; |
| 500 | 500 | ||
| @@ -576,8 +576,8 @@ static int __ocfs2_mknod_locked(struct inode *dir, | |||
| 576 | mlog_errno(status); | 576 | mlog_errno(status); |
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | status = 0; /* error in ocfs2_create_new_inode_locks is not | 579 | oi->i_sync_tid = handle->h_transaction->t_tid; |
| 580 | * critical */ | 580 | oi->i_datasync_tid = handle->h_transaction->t_tid; |
| 581 | 581 | ||
| 582 | leave: | 582 | leave: |
| 583 | if (status < 0) { | 583 | if (status < 0) { |
| @@ -1855,7 +1855,6 @@ bail: | |||
| 1855 | 1855 | ||
| 1856 | brelse(new_fe_bh); | 1856 | brelse(new_fe_bh); |
| 1857 | brelse(parent_fe_bh); | 1857 | brelse(parent_fe_bh); |
| 1858 | kfree(si.name); | ||
| 1859 | kfree(si.value); | 1858 | kfree(si.value); |
| 1860 | ocfs2_free_dir_lookup_result(&lookup); | 1859 | ocfs2_free_dir_lookup_result(&lookup); |
| 1861 | if (inode_ac) | 1860 | if (inode_ac) |
| @@ -2481,6 +2480,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
| 2481 | di->i_orphaned_slot = 0; | 2480 | di->i_orphaned_slot = 0; |
| 2482 | set_nlink(inode, 1); | 2481 | set_nlink(inode, 1); |
| 2483 | ocfs2_set_links_count(di, inode->i_nlink); | 2482 | ocfs2_set_links_count(di, inode->i_nlink); |
| 2483 | ocfs2_update_inode_fsync_trans(handle, inode, 1); | ||
| 2484 | ocfs2_journal_dirty(handle, di_bh); | 2484 | ocfs2_journal_dirty(handle, di_bh); |
| 2485 | 2485 | ||
| 2486 | status = ocfs2_add_entry(handle, dentry, inode, | 2486 | status = ocfs2_add_entry(handle, dentry, inode, |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 553f53cc73ae..8d64a97a9d5e 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
| 31 | #include <linux/wait.h> | 31 | #include <linux/wait.h> |
| 32 | #include <linux/list.h> | 32 | #include <linux/list.h> |
| 33 | #include <linux/llist.h> | ||
| 33 | #include <linux/rbtree.h> | 34 | #include <linux/rbtree.h> |
| 34 | #include <linux/workqueue.h> | 35 | #include <linux/workqueue.h> |
| 35 | #include <linux/kref.h> | 36 | #include <linux/kref.h> |
| @@ -274,19 +275,16 @@ enum ocfs2_mount_options | |||
| 274 | OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */ | 275 | OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */ |
| 275 | }; | 276 | }; |
| 276 | 277 | ||
| 277 | #define OCFS2_OSB_SOFT_RO 0x0001 | 278 | #define OCFS2_OSB_SOFT_RO 0x0001 |
| 278 | #define OCFS2_OSB_HARD_RO 0x0002 | 279 | #define OCFS2_OSB_HARD_RO 0x0002 |
| 279 | #define OCFS2_OSB_ERROR_FS 0x0004 | 280 | #define OCFS2_OSB_ERROR_FS 0x0004 |
| 280 | #define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED 0x0008 | 281 | #define OCFS2_DEFAULT_ATIME_QUANTUM 60 |
| 281 | |||
| 282 | #define OCFS2_DEFAULT_ATIME_QUANTUM 60 | ||
| 283 | 282 | ||
| 284 | struct ocfs2_journal; | 283 | struct ocfs2_journal; |
| 285 | struct ocfs2_slot_info; | 284 | struct ocfs2_slot_info; |
| 286 | struct ocfs2_recovery_map; | 285 | struct ocfs2_recovery_map; |
| 287 | struct ocfs2_replay_map; | 286 | struct ocfs2_replay_map; |
| 288 | struct ocfs2_quota_recovery; | 287 | struct ocfs2_quota_recovery; |
| 289 | struct ocfs2_dentry_lock; | ||
| 290 | struct ocfs2_super | 288 | struct ocfs2_super |
| 291 | { | 289 | { |
| 292 | struct task_struct *commit_task; | 290 | struct task_struct *commit_task; |
| @@ -414,10 +412,9 @@ struct ocfs2_super | |||
| 414 | struct list_head blocked_lock_list; | 412 | struct list_head blocked_lock_list; |
| 415 | unsigned long blocked_lock_count; | 413 | unsigned long blocked_lock_count; |
| 416 | 414 | ||
| 417 | /* List of dentry locks to release. Anyone can add locks to | 415 | /* List of dquot structures to drop last reference to */ |
| 418 | * the list, ocfs2_wq processes the list */ | 416 | struct llist_head dquot_drop_list; |
| 419 | struct ocfs2_dentry_lock *dentry_lock_list; | 417 | struct work_struct dquot_drop_work; |
| 420 | struct work_struct dentry_lock_work; | ||
| 421 | 418 | ||
| 422 | wait_queue_head_t osb_mount_event; | 419 | wait_queue_head_t osb_mount_event; |
| 423 | 420 | ||
| @@ -449,6 +446,8 @@ struct ocfs2_super | |||
| 449 | /* rb tree root for refcount lock. */ | 446 | /* rb tree root for refcount lock. */ |
| 450 | struct rb_root osb_rf_lock_tree; | 447 | struct rb_root osb_rf_lock_tree; |
| 451 | struct ocfs2_refcount_tree *osb_ref_tree_lru; | 448 | struct ocfs2_refcount_tree *osb_ref_tree_lru; |
| 449 | |||
| 450 | struct mutex system_file_mutex; | ||
| 452 | }; | 451 | }; |
| 453 | 452 | ||
| 454 | #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) | 453 | #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) |
| @@ -579,18 +578,6 @@ static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb, | |||
| 579 | spin_unlock(&osb->osb_lock); | 578 | spin_unlock(&osb->osb_lock); |
| 580 | } | 579 | } |
| 581 | 580 | ||
| 582 | |||
| 583 | static inline unsigned long ocfs2_test_osb_flag(struct ocfs2_super *osb, | ||
| 584 | unsigned long flag) | ||
| 585 | { | ||
| 586 | unsigned long ret; | ||
| 587 | |||
| 588 | spin_lock(&osb->osb_lock); | ||
| 589 | ret = osb->osb_flags & flag; | ||
| 590 | spin_unlock(&osb->osb_lock); | ||
| 591 | return ret; | ||
| 592 | } | ||
| 593 | |||
| 594 | static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, | 581 | static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, |
| 595 | int hard) | 582 | int hard) |
| 596 | { | 583 | { |
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index d5ab56cbe5c5..f266d67df3c6 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h | |||
| @@ -28,6 +28,7 @@ struct ocfs2_dquot { | |||
| 28 | unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */ | 28 | unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */ |
| 29 | s64 dq_origspace; /* Last globally synced space usage */ | 29 | s64 dq_origspace; /* Last globally synced space usage */ |
| 30 | s64 dq_originodes; /* Last globally synced inode usage */ | 30 | s64 dq_originodes; /* Last globally synced inode usage */ |
| 31 | struct llist_node list; /* Member of list of dquots to drop */ | ||
| 31 | }; | 32 | }; |
| 32 | 33 | ||
| 33 | /* Description of one chunk to recover in memory */ | 34 | /* Description of one chunk to recover in memory */ |
| @@ -110,6 +111,7 @@ int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block, | |||
| 110 | int ocfs2_create_local_dquot(struct dquot *dquot); | 111 | int ocfs2_create_local_dquot(struct dquot *dquot); |
| 111 | int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot); | 112 | int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot); |
| 112 | int ocfs2_local_write_dquot(struct dquot *dquot); | 113 | int ocfs2_local_write_dquot(struct dquot *dquot); |
| 114 | void ocfs2_drop_dquot_refs(struct work_struct *work); | ||
| 113 | 115 | ||
| 114 | extern const struct dquot_operations ocfs2_quota_operations; | 116 | extern const struct dquot_operations ocfs2_quota_operations; |
| 115 | extern struct quota_format_type ocfs2_quota_format; | 117 | extern struct quota_format_type ocfs2_quota_format; |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index d7b5108789e2..b990a62cff50 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
| 11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
| 12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
| 13 | #include <linux/llist.h> | ||
| 13 | 14 | ||
| 14 | #include <cluster/masklog.h> | 15 | #include <cluster/masklog.h> |
| 15 | 16 | ||
| @@ -679,6 +680,27 @@ static int ocfs2_calc_qdel_credits(struct super_block *sb, int type) | |||
| 679 | OCFS2_INODE_UPDATE_CREDITS; | 680 | OCFS2_INODE_UPDATE_CREDITS; |
| 680 | } | 681 | } |
| 681 | 682 | ||
| 683 | void ocfs2_drop_dquot_refs(struct work_struct *work) | ||
| 684 | { | ||
| 685 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, | ||
| 686 | dquot_drop_work); | ||
| 687 | struct llist_node *list; | ||
| 688 | struct ocfs2_dquot *odquot, *next_odquot; | ||
| 689 | |||
| 690 | list = llist_del_all(&osb->dquot_drop_list); | ||
| 691 | llist_for_each_entry_safe(odquot, next_odquot, list, list) { | ||
| 692 | /* Drop the reference we acquired in ocfs2_dquot_release() */ | ||
| 693 | dqput(&odquot->dq_dquot); | ||
| 694 | } | ||
| 695 | } | ||
| 696 | |||
| 697 | /* | ||
| 698 | * Called when the last reference to dquot is dropped. If we are called from | ||
| 699 | * downconvert thread, we cannot do all the handling here because grabbing | ||
| 700 | * quota lock could deadlock (the node holding the quota lock could need some | ||
| 701 | * other cluster lock to proceed but with blocked downconvert thread we cannot | ||
| 702 | * release any lock). | ||
| 703 | */ | ||
| 682 | static int ocfs2_release_dquot(struct dquot *dquot) | 704 | static int ocfs2_release_dquot(struct dquot *dquot) |
| 683 | { | 705 | { |
| 684 | handle_t *handle; | 706 | handle_t *handle; |
| @@ -694,6 +716,19 @@ static int ocfs2_release_dquot(struct dquot *dquot) | |||
| 694 | /* Check whether we are not racing with some other dqget() */ | 716 | /* Check whether we are not racing with some other dqget() */ |
| 695 | if (atomic_read(&dquot->dq_count) > 1) | 717 | if (atomic_read(&dquot->dq_count) > 1) |
| 696 | goto out; | 718 | goto out; |
| 719 | /* Running from downconvert thread? Postpone quota processing to wq */ | ||
| 720 | if (current == osb->dc_task) { | ||
| 721 | /* | ||
| 722 | * Grab our own reference to dquot and queue it for delayed | ||
| 723 | * dropping. Quota code rechecks after calling | ||
| 724 | * ->release_dquot() and won't free dquot structure. | ||
| 725 | */ | ||
| 726 | dqgrab(dquot); | ||
| 727 | /* First entry on list -> queue work */ | ||
| 728 | if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list)) | ||
| 729 | queue_work(ocfs2_wq, &osb->dquot_drop_work); | ||
| 730 | goto out; | ||
| 731 | } | ||
| 697 | status = ocfs2_lock_global_qf(oinfo, 1); | 732 | status = ocfs2_lock_global_qf(oinfo, 1); |
| 698 | if (status < 0) | 733 | if (status < 0) |
| 699 | goto out; | 734 | goto out; |
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index ca5ce14cbddc..5c8343fe7438 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c | |||
| @@ -603,11 +603,25 @@ static struct kobj_attribute ocfs2_attr_cluster_stack = | |||
| 603 | ocfs2_cluster_stack_show, | 603 | ocfs2_cluster_stack_show, |
| 604 | ocfs2_cluster_stack_store); | 604 | ocfs2_cluster_stack_store); |
| 605 | 605 | ||
| 606 | |||
| 607 | |||
| 608 | static ssize_t ocfs2_dlm_recover_show(struct kobject *kobj, | ||
| 609 | struct kobj_attribute *attr, | ||
| 610 | char *buf) | ||
| 611 | { | ||
| 612 | return snprintf(buf, PAGE_SIZE, "1\n"); | ||
| 613 | } | ||
| 614 | |||
| 615 | static struct kobj_attribute ocfs2_attr_dlm_recover_support = | ||
| 616 | __ATTR(dlm_recover_callback_support, S_IRUGO, | ||
| 617 | ocfs2_dlm_recover_show, NULL); | ||
| 618 | |||
| 606 | static struct attribute *ocfs2_attrs[] = { | 619 | static struct attribute *ocfs2_attrs[] = { |
| 607 | &ocfs2_attr_max_locking_protocol.attr, | 620 | &ocfs2_attr_max_locking_protocol.attr, |
| 608 | &ocfs2_attr_loaded_cluster_plugins.attr, | 621 | &ocfs2_attr_loaded_cluster_plugins.attr, |
| 609 | &ocfs2_attr_active_cluster_plugin.attr, | 622 | &ocfs2_attr_active_cluster_plugin.attr, |
| 610 | &ocfs2_attr_cluster_stack.attr, | 623 | &ocfs2_attr_cluster_stack.attr, |
| 624 | &ocfs2_attr_dlm_recover_support.attr, | ||
| 611 | NULL, | 625 | NULL, |
| 612 | }; | 626 | }; |
| 613 | 627 | ||
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 47ae2663a6f5..0cb889a17ae1 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
| @@ -771,6 +771,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, | |||
| 771 | spin_unlock(&OCFS2_I(alloc_inode)->ip_lock); | 771 | spin_unlock(&OCFS2_I(alloc_inode)->ip_lock); |
| 772 | i_size_write(alloc_inode, le64_to_cpu(fe->i_size)); | 772 | i_size_write(alloc_inode, le64_to_cpu(fe->i_size)); |
| 773 | alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode); | 773 | alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode); |
| 774 | ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0); | ||
| 774 | 775 | ||
| 775 | status = 0; | 776 | status = 0; |
| 776 | 777 | ||
| @@ -1607,6 +1608,21 @@ out: | |||
| 1607 | return ret; | 1608 | return ret; |
| 1608 | } | 1609 | } |
| 1609 | 1610 | ||
| 1611 | void ocfs2_rollback_alloc_dinode_counts(struct inode *inode, | ||
| 1612 | struct buffer_head *di_bh, | ||
| 1613 | u32 num_bits, | ||
| 1614 | u16 chain) | ||
| 1615 | { | ||
| 1616 | u32 tmp_used; | ||
| 1617 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; | ||
| 1618 | struct ocfs2_chain_list *cl; | ||
| 1619 | |||
| 1620 | cl = (struct ocfs2_chain_list *)&di->id2.i_chain; | ||
| 1621 | tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); | ||
| 1622 | di->id1.bitmap1.i_used = cpu_to_le32(tmp_used - num_bits); | ||
| 1623 | le32_add_cpu(&cl->cl_recs[chain].c_free, num_bits); | ||
| 1624 | } | ||
| 1625 | |||
| 1610 | static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res, | 1626 | static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res, |
| 1611 | struct ocfs2_extent_rec *rec, | 1627 | struct ocfs2_extent_rec *rec, |
| 1612 | struct ocfs2_chain_list *cl) | 1628 | struct ocfs2_chain_list *cl) |
| @@ -1707,8 +1723,12 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, | |||
| 1707 | 1723 | ||
| 1708 | ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, | 1724 | ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, |
| 1709 | res->sr_bit_offset, res->sr_bits); | 1725 | res->sr_bit_offset, res->sr_bits); |
| 1710 | if (ret < 0) | 1726 | if (ret < 0) { |
| 1727 | ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh, | ||
| 1728 | res->sr_bits, | ||
| 1729 | le16_to_cpu(gd->bg_chain)); | ||
| 1711 | mlog_errno(ret); | 1730 | mlog_errno(ret); |
| 1731 | } | ||
| 1712 | 1732 | ||
| 1713 | out_loc_only: | 1733 | out_loc_only: |
| 1714 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); | 1734 | *bits_left = le16_to_cpu(gd->bg_free_bits_count); |
| @@ -1838,6 +1858,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
| 1838 | res->sr_bit_offset, | 1858 | res->sr_bit_offset, |
| 1839 | res->sr_bits); | 1859 | res->sr_bits); |
| 1840 | if (status < 0) { | 1860 | if (status < 0) { |
| 1861 | ocfs2_rollback_alloc_dinode_counts(alloc_inode, | ||
| 1862 | ac->ac_bh, res->sr_bits, chain); | ||
| 1841 | mlog_errno(status); | 1863 | mlog_errno(status); |
| 1842 | goto bail; | 1864 | goto bail; |
| 1843 | } | 1865 | } |
| @@ -2091,7 +2113,7 @@ int ocfs2_find_new_inode_loc(struct inode *dir, | |||
| 2091 | 2113 | ||
| 2092 | ac->ac_find_loc_priv = res; | 2114 | ac->ac_find_loc_priv = res; |
| 2093 | *fe_blkno = res->sr_blkno; | 2115 | *fe_blkno = res->sr_blkno; |
| 2094 | 2116 | ocfs2_update_inode_fsync_trans(handle, dir, 0); | |
| 2095 | out: | 2117 | out: |
| 2096 | if (handle) | 2118 | if (handle) |
| 2097 | ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle); | 2119 | ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle); |
| @@ -2149,6 +2171,8 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle, | |||
| 2149 | res->sr_bit_offset, | 2171 | res->sr_bit_offset, |
| 2150 | res->sr_bits); | 2172 | res->sr_bits); |
| 2151 | if (ret < 0) { | 2173 | if (ret < 0) { |
| 2174 | ocfs2_rollback_alloc_dinode_counts(ac->ac_inode, | ||
| 2175 | ac->ac_bh, res->sr_bits, chain); | ||
| 2152 | mlog_errno(ret); | 2176 | mlog_errno(ret); |
| 2153 | goto out; | 2177 | goto out; |
| 2154 | } | 2178 | } |
| @@ -2870,6 +2894,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
| 2870 | status = ocfs2_inode_lock(inode_alloc_inode, &alloc_bh, 0); | 2894 | status = ocfs2_inode_lock(inode_alloc_inode, &alloc_bh, 0); |
| 2871 | if (status < 0) { | 2895 | if (status < 0) { |
| 2872 | mutex_unlock(&inode_alloc_inode->i_mutex); | 2896 | mutex_unlock(&inode_alloc_inode->i_mutex); |
| 2897 | iput(inode_alloc_inode); | ||
| 2873 | mlog(ML_ERROR, "lock on alloc inode on slot %u failed %d\n", | 2898 | mlog(ML_ERROR, "lock on alloc inode on slot %u failed %d\n", |
| 2874 | (u32)suballoc_slot, status); | 2899 | (u32)suballoc_slot, status); |
| 2875 | goto bail; | 2900 | goto bail; |
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 218d8036b3e7..2d2501767c0c 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h | |||
| @@ -91,6 +91,10 @@ int ocfs2_alloc_dinode_update_counts(struct inode *inode, | |||
| 91 | struct buffer_head *di_bh, | 91 | struct buffer_head *di_bh, |
| 92 | u32 num_bits, | 92 | u32 num_bits, |
| 93 | u16 chain); | 93 | u16 chain); |
| 94 | void ocfs2_rollback_alloc_dinode_counts(struct inode *inode, | ||
| 95 | struct buffer_head *di_bh, | ||
| 96 | u32 num_bits, | ||
| 97 | u16 chain); | ||
| 94 | int ocfs2_block_group_set_bits(handle_t *handle, | 98 | int ocfs2_block_group_set_bits(handle_t *handle, |
| 95 | struct inode *alloc_inode, | 99 | struct inode *alloc_inode, |
| 96 | struct ocfs2_group_desc *bg, | 100 | struct ocfs2_group_desc *bg, |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 49d84f80f36c..1aecd626e645 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
| @@ -561,6 +561,9 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb) | |||
| 561 | if (!oi) | 561 | if (!oi) |
| 562 | return NULL; | 562 | return NULL; |
| 563 | 563 | ||
| 564 | oi->i_sync_tid = 0; | ||
| 565 | oi->i_datasync_tid = 0; | ||
| 566 | |||
| 564 | jbd2_journal_init_jbd_inode(&oi->ip_jinode, &oi->vfs_inode); | 567 | jbd2_journal_init_jbd_inode(&oi->ip_jinode, &oi->vfs_inode); |
| 565 | return &oi->vfs_inode; | 568 | return &oi->vfs_inode; |
| 566 | } | 569 | } |
| @@ -1238,30 +1241,11 @@ static struct dentry *ocfs2_mount(struct file_system_type *fs_type, | |||
| 1238 | return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); | 1241 | return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); |
| 1239 | } | 1242 | } |
| 1240 | 1243 | ||
| 1241 | static void ocfs2_kill_sb(struct super_block *sb) | ||
| 1242 | { | ||
| 1243 | struct ocfs2_super *osb = OCFS2_SB(sb); | ||
| 1244 | |||
| 1245 | /* Failed mount? */ | ||
| 1246 | if (!osb || atomic_read(&osb->vol_state) == VOLUME_DISABLED) | ||
| 1247 | goto out; | ||
| 1248 | |||
| 1249 | /* Prevent further queueing of inode drop events */ | ||
| 1250 | spin_lock(&dentry_list_lock); | ||
| 1251 | ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED); | ||
| 1252 | spin_unlock(&dentry_list_lock); | ||
| 1253 | /* Wait for work to finish and/or remove it */ | ||
| 1254 | cancel_work_sync(&osb->dentry_lock_work); | ||
| 1255 | out: | ||
| 1256 | kill_block_super(sb); | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | static struct file_system_type ocfs2_fs_type = { | 1244 | static struct file_system_type ocfs2_fs_type = { |
| 1260 | .owner = THIS_MODULE, | 1245 | .owner = THIS_MODULE, |
| 1261 | .name = "ocfs2", | 1246 | .name = "ocfs2", |
| 1262 | .mount = ocfs2_mount, | 1247 | .mount = ocfs2_mount, |
| 1263 | .kill_sb = ocfs2_kill_sb, | 1248 | .kill_sb = kill_block_super, |
| 1264 | |||
| 1265 | .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, | 1249 | .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, |
| 1266 | .next = NULL | 1250 | .next = NULL |
| 1267 | }; | 1251 | }; |
| @@ -1612,14 +1596,9 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root) | |||
| 1612 | return 0; | 1596 | return 0; |
| 1613 | } | 1597 | } |
| 1614 | 1598 | ||
| 1615 | wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
| 1616 | |||
| 1617 | static int __init ocfs2_init(void) | 1599 | static int __init ocfs2_init(void) |
| 1618 | { | 1600 | { |
| 1619 | int status, i; | 1601 | int status; |
| 1620 | |||
| 1621 | for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++) | ||
| 1622 | init_waitqueue_head(&ocfs2__ioend_wq[i]); | ||
| 1623 | 1602 | ||
| 1624 | status = init_ocfs2_uptodate_cache(); | 1603 | status = init_ocfs2_uptodate_cache(); |
| 1625 | if (status < 0) | 1604 | if (status < 0) |
| @@ -1761,7 +1740,7 @@ static void ocfs2_inode_init_once(void *data) | |||
| 1761 | ocfs2_extent_map_init(&oi->vfs_inode); | 1740 | ocfs2_extent_map_init(&oi->vfs_inode); |
| 1762 | INIT_LIST_HEAD(&oi->ip_io_markers); | 1741 | INIT_LIST_HEAD(&oi->ip_io_markers); |
| 1763 | oi->ip_dir_start_lookup = 0; | 1742 | oi->ip_dir_start_lookup = 0; |
| 1764 | atomic_set(&oi->ip_unaligned_aio, 0); | 1743 | mutex_init(&oi->ip_unaligned_aio); |
| 1765 | init_rwsem(&oi->ip_alloc_sem); | 1744 | init_rwsem(&oi->ip_alloc_sem); |
| 1766 | init_rwsem(&oi->ip_xattr_sem); | 1745 | init_rwsem(&oi->ip_xattr_sem); |
| 1767 | mutex_init(&oi->ip_io_mutex); | 1746 | mutex_init(&oi->ip_io_mutex); |
| @@ -1932,17 +1911,16 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) | |||
| 1932 | 1911 | ||
| 1933 | debugfs_remove(osb->osb_ctxt); | 1912 | debugfs_remove(osb->osb_ctxt); |
| 1934 | 1913 | ||
| 1935 | /* | ||
| 1936 | * Flush inode dropping work queue so that deletes are | ||
| 1937 | * performed while the filesystem is still working | ||
| 1938 | */ | ||
| 1939 | ocfs2_drop_all_dl_inodes(osb); | ||
| 1940 | |||
| 1941 | /* Orphan scan should be stopped as early as possible */ | 1914 | /* Orphan scan should be stopped as early as possible */ |
| 1942 | ocfs2_orphan_scan_stop(osb); | 1915 | ocfs2_orphan_scan_stop(osb); |
| 1943 | 1916 | ||
| 1944 | ocfs2_disable_quotas(osb); | 1917 | ocfs2_disable_quotas(osb); |
| 1945 | 1918 | ||
| 1919 | /* All dquots should be freed by now */ | ||
| 1920 | WARN_ON(!llist_empty(&osb->dquot_drop_list)); | ||
| 1921 | /* Wait for worker to be done with the work structure in osb */ | ||
| 1922 | cancel_work_sync(&osb->dquot_drop_work); | ||
| 1923 | |||
| 1946 | ocfs2_shutdown_local_alloc(osb); | 1924 | ocfs2_shutdown_local_alloc(osb); |
| 1947 | 1925 | ||
| 1948 | /* This will disable recovery and flush any recovery work. */ | 1926 | /* This will disable recovery and flush any recovery work. */ |
| @@ -2077,7 +2055,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
| 2077 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; | 2055 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; |
| 2078 | struct inode *inode = NULL; | 2056 | struct inode *inode = NULL; |
| 2079 | struct ocfs2_journal *journal; | 2057 | struct ocfs2_journal *journal; |
| 2080 | __le32 uuid_net_key; | ||
| 2081 | struct ocfs2_super *osb; | 2058 | struct ocfs2_super *osb; |
| 2082 | u64 total_blocks; | 2059 | u64 total_blocks; |
| 2083 | 2060 | ||
| @@ -2123,6 +2100,8 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
| 2123 | spin_lock_init(&osb->osb_xattr_lock); | 2100 | spin_lock_init(&osb->osb_xattr_lock); |
| 2124 | ocfs2_init_steal_slots(osb); | 2101 | ocfs2_init_steal_slots(osb); |
| 2125 | 2102 | ||
| 2103 | mutex_init(&osb->system_file_mutex); | ||
| 2104 | |||
| 2126 | atomic_set(&osb->alloc_stats.moves, 0); | 2105 | atomic_set(&osb->alloc_stats.moves, 0); |
| 2127 | atomic_set(&osb->alloc_stats.local_data, 0); | 2106 | atomic_set(&osb->alloc_stats.local_data, 0); |
| 2128 | atomic_set(&osb->alloc_stats.bitmap_data, 0); | 2107 | atomic_set(&osb->alloc_stats.bitmap_data, 0); |
| @@ -2276,8 +2255,8 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
| 2276 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); | 2255 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); |
| 2277 | journal->j_state = OCFS2_JOURNAL_FREE; | 2256 | journal->j_state = OCFS2_JOURNAL_FREE; |
| 2278 | 2257 | ||
| 2279 | INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes); | 2258 | INIT_WORK(&osb->dquot_drop_work, ocfs2_drop_dquot_refs); |
| 2280 | osb->dentry_lock_list = NULL; | 2259 | init_llist_head(&osb->dquot_drop_list); |
| 2281 | 2260 | ||
| 2282 | /* get some pseudo constants for clustersize bits */ | 2261 | /* get some pseudo constants for clustersize bits */ |
| 2283 | osb->s_clustersize_bits = | 2262 | osb->s_clustersize_bits = |
| @@ -2311,8 +2290,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
| 2311 | goto bail; | 2290 | goto bail; |
| 2312 | } | 2291 | } |
| 2313 | 2292 | ||
| 2314 | memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key)); | ||
| 2315 | |||
| 2316 | strncpy(osb->vol_label, di->id2.i_super.s_label, 63); | 2293 | strncpy(osb->vol_label, di->id2.i_super.s_label, 63); |
| 2317 | osb->vol_label[63] = '\0'; | 2294 | osb->vol_label[63] = '\0'; |
| 2318 | osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno); | 2295 | osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno); |
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c index f053688d22a3..af155c183123 100644 --- a/fs/ocfs2/sysfile.c +++ b/fs/ocfs2/sysfile.c | |||
| @@ -113,9 +113,11 @@ struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb, | |||
| 113 | } else | 113 | } else |
| 114 | arr = get_local_system_inode(osb, type, slot); | 114 | arr = get_local_system_inode(osb, type, slot); |
| 115 | 115 | ||
| 116 | mutex_lock(&osb->system_file_mutex); | ||
| 116 | if (arr && ((inode = *arr) != NULL)) { | 117 | if (arr && ((inode = *arr) != NULL)) { |
| 117 | /* get a ref in addition to the array ref */ | 118 | /* get a ref in addition to the array ref */ |
| 118 | inode = igrab(inode); | 119 | inode = igrab(inode); |
| 120 | mutex_unlock(&osb->system_file_mutex); | ||
| 119 | BUG_ON(!inode); | 121 | BUG_ON(!inode); |
| 120 | 122 | ||
| 121 | return inode; | 123 | return inode; |
| @@ -129,6 +131,7 @@ struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb, | |||
| 129 | *arr = igrab(inode); | 131 | *arr = igrab(inode); |
| 130 | BUG_ON(!*arr); | 132 | BUG_ON(!*arr); |
| 131 | } | 133 | } |
| 134 | mutex_unlock(&osb->system_file_mutex); | ||
| 132 | return inode; | 135 | return inode; |
| 133 | } | 136 | } |
| 134 | 137 | ||
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 185fa3b7f962..016f01df3825 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
| @@ -369,7 +369,7 @@ static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket) | |||
| 369 | * them fully. | 369 | * them fully. |
| 370 | */ | 370 | */ |
| 371 | static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, | 371 | static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, |
| 372 | u64 xb_blkno) | 372 | u64 xb_blkno, int new) |
| 373 | { | 373 | { |
| 374 | int i, rc = 0; | 374 | int i, rc = 0; |
| 375 | 375 | ||
| @@ -383,9 +383,16 @@ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, | |||
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode), | 385 | if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode), |
| 386 | bucket->bu_bhs[i])) | 386 | bucket->bu_bhs[i])) { |
| 387 | ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode), | 387 | if (new) |
| 388 | bucket->bu_bhs[i]); | 388 | ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode), |
| 389 | bucket->bu_bhs[i]); | ||
| 390 | else { | ||
| 391 | set_buffer_uptodate(bucket->bu_bhs[i]); | ||
| 392 | ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode), | ||
| 393 | bucket->bu_bhs[i]); | ||
| 394 | } | ||
| 395 | } | ||
| 389 | } | 396 | } |
| 390 | 397 | ||
| 391 | if (rc) | 398 | if (rc) |
| @@ -2602,6 +2609,7 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) | |||
| 2602 | oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL); | 2609 | oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL); |
| 2603 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | 2610 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); |
| 2604 | spin_unlock(&oi->ip_lock); | 2611 | spin_unlock(&oi->ip_lock); |
| 2612 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 2605 | 2613 | ||
| 2606 | ocfs2_journal_dirty(handle, di_bh); | 2614 | ocfs2_journal_dirty(handle, di_bh); |
| 2607 | out_commit: | 2615 | out_commit: |
| @@ -3200,8 +3208,15 @@ meta_guess: | |||
| 3200 | clusters_add += 1; | 3208 | clusters_add += 1; |
| 3201 | } | 3209 | } |
| 3202 | } else { | 3210 | } else { |
| 3203 | meta_add += 1; | ||
| 3204 | credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; | 3211 | credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS; |
| 3212 | if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { | ||
| 3213 | struct ocfs2_extent_list *el = &def_xv.xv.xr_list; | ||
| 3214 | meta_add += ocfs2_extend_meta_needed(el); | ||
| 3215 | credits += ocfs2_calc_extend_credits(inode->i_sb, | ||
| 3216 | el); | ||
| 3217 | } else { | ||
| 3218 | meta_add += 1; | ||
| 3219 | } | ||
| 3205 | } | 3220 | } |
| 3206 | out: | 3221 | out: |
| 3207 | if (clusters_need) | 3222 | if (clusters_need) |
| @@ -3614,6 +3629,7 @@ int ocfs2_xattr_set(struct inode *inode, | |||
| 3614 | } | 3629 | } |
| 3615 | 3630 | ||
| 3616 | ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); | 3631 | ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); |
| 3632 | ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0); | ||
| 3617 | 3633 | ||
| 3618 | ocfs2_commit_trans(osb, ctxt.handle); | 3634 | ocfs2_commit_trans(osb, ctxt.handle); |
| 3619 | 3635 | ||
| @@ -4294,7 +4310,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, | |||
| 4294 | 4310 | ||
| 4295 | trace_ocfs2_xattr_create_index_block((unsigned long long)blkno); | 4311 | trace_ocfs2_xattr_create_index_block((unsigned long long)blkno); |
| 4296 | 4312 | ||
| 4297 | ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); | 4313 | ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1); |
| 4298 | if (ret) { | 4314 | if (ret) { |
| 4299 | mlog_errno(ret); | 4315 | mlog_errno(ret); |
| 4300 | goto out; | 4316 | goto out; |
| @@ -4638,7 +4654,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |||
| 4638 | * Even if !new_bucket_head, we're overwriting t_bucket. Thus, | 4654 | * Even if !new_bucket_head, we're overwriting t_bucket. Thus, |
| 4639 | * there's no need to read it. | 4655 | * there's no need to read it. |
| 4640 | */ | 4656 | */ |
| 4641 | ret = ocfs2_init_xattr_bucket(t_bucket, new_blk); | 4657 | ret = ocfs2_init_xattr_bucket(t_bucket, new_blk, new_bucket_head); |
| 4642 | if (ret) { | 4658 | if (ret) { |
| 4643 | mlog_errno(ret); | 4659 | mlog_errno(ret); |
| 4644 | goto out; | 4660 | goto out; |
| @@ -4804,7 +4820,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, | |||
| 4804 | * Even if !t_is_new, we're overwriting t_bucket. Thus, | 4820 | * Even if !t_is_new, we're overwriting t_bucket. Thus, |
| 4805 | * there's no need to read it. | 4821 | * there's no need to read it. |
| 4806 | */ | 4822 | */ |
| 4807 | ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno); | 4823 | ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno, t_is_new); |
| 4808 | if (ret) | 4824 | if (ret) |
| 4809 | goto out; | 4825 | goto out; |
| 4810 | 4826 | ||
| @@ -5476,6 +5492,7 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, | |||
| 5476 | ret = ocfs2_truncate_log_append(osb, handle, blkno, len); | 5492 | ret = ocfs2_truncate_log_append(osb, handle, blkno, len); |
| 5477 | if (ret) | 5493 | if (ret) |
| 5478 | mlog_errno(ret); | 5494 | mlog_errno(ret); |
| 5495 | ocfs2_update_inode_fsync_trans(handle, inode, 0); | ||
| 5479 | 5496 | ||
| 5480 | out_commit: | 5497 | out_commit: |
| 5481 | ocfs2_commit_trans(osb, handle); | 5498 | ocfs2_commit_trans(osb, handle); |
| @@ -6830,7 +6847,7 @@ static int ocfs2_reflink_xattr_bucket(handle_t *handle, | |||
| 6830 | break; | 6847 | break; |
| 6831 | } | 6848 | } |
| 6832 | 6849 | ||
| 6833 | ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno); | 6850 | ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno, 1); |
| 6834 | if (ret) { | 6851 | if (ret) { |
| 6835 | mlog_errno(ret); | 6852 | mlog_errno(ret); |
| 6836 | break; | 6853 | break; |
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index d8b0afde2179..ec58c7659183 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c | |||
| @@ -183,7 +183,7 @@ int omfs_sync_inode(struct inode *inode) | |||
| 183 | */ | 183 | */ |
| 184 | static void omfs_evict_inode(struct inode *inode) | 184 | static void omfs_evict_inode(struct inode *inode) |
| 185 | { | 185 | { |
| 186 | truncate_inode_pages(&inode->i_data, 0); | 186 | truncate_inode_pages_final(&inode->i_data); |
| 187 | clear_inode(inode); | 187 | clear_inode(inode); |
| 188 | 188 | ||
| 189 | if (inode->i_nlink) | 189 | if (inode->i_nlink) |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 124fc43c7090..8f20e3404fd2 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
| @@ -35,7 +35,7 @@ static void proc_evict_inode(struct inode *inode) | |||
| 35 | const struct proc_ns_operations *ns_ops; | 35 | const struct proc_ns_operations *ns_ops; |
| 36 | void *ns; | 36 | void *ns; |
| 37 | 37 | ||
| 38 | truncate_inode_pages(&inode->i_data, 0); | 38 | truncate_inode_pages_final(&inode->i_data); |
| 39 | clear_inode(inode); | 39 | clear_inode(inode); |
| 40 | 40 | ||
| 41 | /* Stop tracking associated processes */ | 41 | /* Stop tracking associated processes */ |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index cfc8dcc16043..9cd5f63715c0 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -528,7 +528,7 @@ restart: | |||
| 528 | if (atomic_read(&dquot->dq_count)) { | 528 | if (atomic_read(&dquot->dq_count)) { |
| 529 | DEFINE_WAIT(wait); | 529 | DEFINE_WAIT(wait); |
| 530 | 530 | ||
| 531 | atomic_inc(&dquot->dq_count); | 531 | dqgrab(dquot); |
| 532 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | 532 | prepare_to_wait(&dquot->dq_wait_unused, &wait, |
| 533 | TASK_UNINTERRUPTIBLE); | 533 | TASK_UNINTERRUPTIBLE); |
| 534 | spin_unlock(&dq_list_lock); | 534 | spin_unlock(&dq_list_lock); |
| @@ -632,7 +632,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type) | |||
| 632 | /* Now we have active dquot from which someone is | 632 | /* Now we have active dquot from which someone is |
| 633 | * holding reference so we can safely just increase | 633 | * holding reference so we can safely just increase |
| 634 | * use count */ | 634 | * use count */ |
| 635 | atomic_inc(&dquot->dq_count); | 635 | dqgrab(dquot); |
| 636 | spin_unlock(&dq_list_lock); | 636 | spin_unlock(&dq_list_lock); |
| 637 | dqstats_inc(DQST_LOOKUPS); | 637 | dqstats_inc(DQST_LOOKUPS); |
| 638 | err = sb->dq_op->write_dquot(dquot); | 638 | err = sb->dq_op->write_dquot(dquot); |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index ad62bdbb451e..bc8b8009897d 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -35,7 +35,7 @@ void reiserfs_evict_inode(struct inode *inode) | |||
| 35 | if (!inode->i_nlink && !is_bad_inode(inode)) | 35 | if (!inode->i_nlink && !is_bad_inode(inode)) |
| 36 | dquot_initialize(inode); | 36 | dquot_initialize(inode); |
| 37 | 37 | ||
| 38 | truncate_inode_pages(&inode->i_data, 0); | 38 | truncate_inode_pages_final(&inode->i_data); |
| 39 | if (inode->i_nlink) | 39 | if (inode->i_nlink) |
| 40 | goto no_delete; | 40 | goto no_delete; |
| 41 | 41 | ||
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 8d06adf89948..83d4eac8059a 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h | |||
| @@ -2831,6 +2831,7 @@ void reiserfs_init_alloc_options(struct super_block *s); | |||
| 2831 | */ | 2831 | */ |
| 2832 | __le32 reiserfs_choose_packing(struct inode *dir); | 2832 | __le32 reiserfs_choose_packing(struct inode *dir); |
| 2833 | 2833 | ||
| 2834 | void show_alloc_options(struct seq_file *seq, struct super_block *s); | ||
| 2834 | int reiserfs_init_bitmap_cache(struct super_block *sb); | 2835 | int reiserfs_init_bitmap_cache(struct super_block *sb); |
| 2835 | void reiserfs_free_bitmap_cache(struct super_block *sb); | 2836 | void reiserfs_free_bitmap_cache(struct super_block *sb); |
| 2836 | void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info); | 2837 | void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info); |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 2c803353f8ac..ed54a04c33bd 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
| @@ -62,7 +62,6 @@ static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs) | |||
| 62 | 62 | ||
| 63 | static int reiserfs_remount(struct super_block *s, int *flags, char *data); | 63 | static int reiserfs_remount(struct super_block *s, int *flags, char *data); |
| 64 | static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf); | 64 | static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf); |
| 65 | void show_alloc_options(struct seq_file *seq, struct super_block *s); | ||
| 66 | 65 | ||
| 67 | static int reiserfs_sync_fs(struct super_block *s, int wait) | 66 | static int reiserfs_sync_fs(struct super_block *s, int wait) |
| 68 | { | 67 | { |
| @@ -597,7 +596,7 @@ static void init_once(void *foo) | |||
| 597 | inode_init_once(&ei->vfs_inode); | 596 | inode_init_once(&ei->vfs_inode); |
| 598 | } | 597 | } |
| 599 | 598 | ||
| 600 | static int init_inodecache(void) | 599 | static int __init init_inodecache(void) |
| 601 | { | 600 | { |
| 602 | reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache", | 601 | reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache", |
| 603 | sizeof(struct | 602 | sizeof(struct |
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index c327d4ee1235..5625ca920f5e 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
| @@ -295,7 +295,7 @@ int sysv_sync_inode(struct inode *inode) | |||
| 295 | 295 | ||
| 296 | static void sysv_evict_inode(struct inode *inode) | 296 | static void sysv_evict_inode(struct inode *inode) |
| 297 | { | 297 | { |
| 298 | truncate_inode_pages(&inode->i_data, 0); | 298 | truncate_inode_pages_final(&inode->i_data); |
| 299 | if (!inode->i_nlink) { | 299 | if (!inode->i_nlink) { |
| 300 | inode->i_size = 0; | 300 | inode->i_size = 0; |
| 301 | sysv_truncate(inode); | 301 | sysv_truncate(inode); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 5ded8490c0c6..48f943f7f5d5 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -351,7 +351,7 @@ static void ubifs_evict_inode(struct inode *inode) | |||
| 351 | dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode); | 351 | dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode); |
| 352 | ubifs_assert(!atomic_read(&inode->i_count)); | 352 | ubifs_assert(!atomic_read(&inode->i_count)); |
| 353 | 353 | ||
| 354 | truncate_inode_pages(&inode->i_data, 0); | 354 | truncate_inode_pages_final(&inode->i_data); |
| 355 | 355 | ||
| 356 | if (inode->i_nlink) | 356 | if (inode->i_nlink) |
| 357 | goto done; | 357 | goto done; |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 982ce05c87ed..5d643706212f 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -146,8 +146,8 @@ void udf_evict_inode(struct inode *inode) | |||
| 146 | want_delete = 1; | 146 | want_delete = 1; |
| 147 | udf_setsize(inode, 0); | 147 | udf_setsize(inode, 0); |
| 148 | udf_update_inode(inode, IS_SYNC(inode)); | 148 | udf_update_inode(inode, IS_SYNC(inode)); |
| 149 | } else | 149 | } |
| 150 | truncate_inode_pages(&inode->i_data, 0); | 150 | truncate_inode_pages_final(&inode->i_data); |
| 151 | invalidate_inode_buffers(inode); | 151 | invalidate_inode_buffers(inode); |
| 152 | clear_inode(inode); | 152 | clear_inode(inode); |
| 153 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && | 153 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index c8ca96086784..61e8a9b021dd 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
| @@ -885,7 +885,7 @@ void ufs_evict_inode(struct inode * inode) | |||
| 885 | if (!inode->i_nlink && !is_bad_inode(inode)) | 885 | if (!inode->i_nlink && !is_bad_inode(inode)) |
| 886 | want_delete = 1; | 886 | want_delete = 1; |
| 887 | 887 | ||
| 888 | truncate_inode_pages(&inode->i_data, 0); | 888 | truncate_inode_pages_final(&inode->i_data); |
| 889 | if (want_delete) { | 889 | if (want_delete) { |
| 890 | loff_t old_i_size; | 890 | loff_t old_i_size; |
| 891 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ | 891 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index d971f4932b5d..0ef599218991 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
| @@ -996,7 +996,7 @@ xfs_fs_evict_inode( | |||
| 996 | 996 | ||
| 997 | trace_xfs_evict_inode(ip); | 997 | trace_xfs_evict_inode(ip); |
| 998 | 998 | ||
| 999 | truncate_inode_pages(&inode->i_data, 0); | 999 | truncate_inode_pages_final(&inode->i_data); |
| 1000 | clear_inode(inode); | 1000 | clear_inode(inode); |
| 1001 | XFS_STATS_INC(vn_rele); | 1001 | XFS_STATS_INC(vn_rele); |
| 1002 | XFS_STATS_INC(vn_remove); | 1002 | XFS_STATS_INC(vn_remove); |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 24819001f5c8..e488e9459a93 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -95,7 +95,7 @@ struct backing_dev_info { | |||
| 95 | unsigned int max_ratio, max_prop_frac; | 95 | unsigned int max_ratio, max_prop_frac; |
| 96 | 96 | ||
| 97 | struct bdi_writeback wb; /* default writeback info for this bdi */ | 97 | struct bdi_writeback wb; /* default writeback info for this bdi */ |
| 98 | spinlock_t wb_lock; /* protects work_list */ | 98 | spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */ |
| 99 | 99 | ||
| 100 | struct list_head work_list; | 100 | struct list_head work_list; |
| 101 | 101 | ||
diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 5f9cd963213d..72647429adf6 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #define _LINUX_BACKLIGHT_H | 9 | #define _LINUX_BACKLIGHT_H |
| 10 | 10 | ||
| 11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
| 12 | #include <linux/fb.h> | ||
| 12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
| 13 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
| 14 | 15 | ||
| @@ -104,6 +105,11 @@ struct backlight_device { | |||
| 104 | struct list_head entry; | 105 | struct list_head entry; |
| 105 | 106 | ||
| 106 | struct device dev; | 107 | struct device dev; |
| 108 | |||
| 109 | /* Multiple framebuffers may share one backlight device */ | ||
| 110 | bool fb_bl_on[FB_MAX]; | ||
| 111 | |||
| 112 | int use_count; | ||
| 107 | }; | 113 | }; |
| 108 | 114 | ||
| 109 | static inline void backlight_update_status(struct backlight_device *bd) | 115 | static inline void backlight_update_status(struct backlight_device *bd) |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 3fe661fe96d1..b19d3dc2e651 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -87,25 +87,26 @@ extern void rebuild_sched_domains(void); | |||
| 87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
| 88 | 88 | ||
| 89 | /* | 89 | /* |
| 90 | * get_mems_allowed is required when making decisions involving mems_allowed | 90 | * read_mems_allowed_begin is required when making decisions involving |
| 91 | * such as during page allocation. mems_allowed can be updated in parallel | 91 | * mems_allowed such as during page allocation. mems_allowed can be updated in |
| 92 | * and depending on the new value an operation can fail potentially causing | 92 | * parallel and depending on the new value an operation can fail potentially |
| 93 | * process failure. A retry loop with get_mems_allowed and put_mems_allowed | 93 | * causing process failure. A retry loop with read_mems_allowed_begin and |
| 94 | * prevents these artificial failures. | 94 | * read_mems_allowed_retry prevents these artificial failures. |
| 95 | */ | 95 | */ |
| 96 | static inline unsigned int get_mems_allowed(void) | 96 | static inline unsigned int read_mems_allowed_begin(void) |
| 97 | { | 97 | { |
| 98 | return read_seqcount_begin(¤t->mems_allowed_seq); | 98 | return read_seqcount_begin(¤t->mems_allowed_seq); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | /* | 101 | /* |
| 102 | * If this returns false, the operation that took place after get_mems_allowed | 102 | * If this returns true, the operation that took place after |
| 103 | * may have failed. It is up to the caller to retry the operation if | 103 | * read_mems_allowed_begin may have failed artificially due to a concurrent |
| 104 | * update of mems_allowed. It is up to the caller to retry the operation if | ||
| 104 | * appropriate. | 105 | * appropriate. |
| 105 | */ | 106 | */ |
| 106 | static inline bool put_mems_allowed(unsigned int seq) | 107 | static inline bool read_mems_allowed_retry(unsigned int seq) |
| 107 | { | 108 | { |
| 108 | return !read_seqcount_retry(¤t->mems_allowed_seq, seq); | 109 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
| 109 | } | 110 | } |
| 110 | 111 | ||
| 111 | static inline void set_mems_allowed(nodemask_t nodemask) | 112 | static inline void set_mems_allowed(nodemask_t nodemask) |
| @@ -225,14 +226,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
| 225 | { | 226 | { |
| 226 | } | 227 | } |
| 227 | 228 | ||
| 228 | static inline unsigned int get_mems_allowed(void) | 229 | static inline unsigned int read_mems_allowed_begin(void) |
| 229 | { | 230 | { |
| 230 | return 0; | 231 | return 0; |
| 231 | } | 232 | } |
| 232 | 233 | ||
| 233 | static inline bool put_mems_allowed(unsigned int seq) | 234 | static inline bool read_mems_allowed_retry(unsigned int seq) |
| 234 | { | 235 | { |
| 235 | return true; | 236 | return false; |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 238 | #endif /* !CONFIG_CPUSETS */ | 239 | #endif /* !CONFIG_CPUSETS */ |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 04421e825365..f61d6c8f5ef3 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
| @@ -66,7 +66,7 @@ extern struct group_info *groups_alloc(int); | |||
| 66 | extern struct group_info init_groups; | 66 | extern struct group_info init_groups; |
| 67 | extern void groups_free(struct group_info *); | 67 | extern void groups_free(struct group_info *); |
| 68 | extern int set_current_groups(struct group_info *); | 68 | extern int set_current_groups(struct group_info *); |
| 69 | extern int set_groups(struct cred *, struct group_info *); | 69 | extern void set_groups(struct cred *, struct group_info *); |
| 70 | extern int groups_search(const struct group_info *, kgid_t); | 70 | extern int groups_search(const struct group_info *, kgid_t); |
| 71 | 71 | ||
| 72 | /* access the groups "array" with this macro */ | 72 | /* access the groups "array" with this macro */ |
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h index 8c0aef1ba5f5..1d0aedef9822 100644 --- a/include/linux/decompress/inflate.h +++ b/include/linux/decompress/inflate.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | #ifndef INFLATE_H | 1 | #ifndef LINUX_DECOMPRESS_INFLATE_H |
| 2 | #define INFLATE_H | 2 | #define LINUX_DECOMPRESS_INFLATE_H |
| 3 | 3 | ||
| 4 | int gunzip(unsigned char *inbuf, int len, | 4 | int gunzip(unsigned char *inbuf, int len, |
| 5 | int(*fill)(void*, unsigned int), | 5 | int(*fill)(void*, unsigned int), |
diff --git a/include/linux/err.h b/include/linux/err.h index 15f92e072450..a729120644d5 100644 --- a/include/linux/err.h +++ b/include/linux/err.h | |||
| @@ -2,12 +2,13 @@ | |||
| 2 | #define _LINUX_ERR_H | 2 | #define _LINUX_ERR_H |
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | #include <linux/types.h> | ||
| 5 | 6 | ||
| 6 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
| 7 | 8 | ||
| 8 | /* | 9 | /* |
| 9 | * Kernel pointers have redundant information, so we can use a | 10 | * Kernel pointers have redundant information, so we can use a |
| 10 | * scheme where we can return either an error code or a dentry | 11 | * scheme where we can return either an error code or a normal |
| 11 | * pointer with the same return value. | 12 | * pointer with the same return value. |
| 12 | * | 13 | * |
| 13 | * This should be a per-architecture thing, to allow different | 14 | * This should be a per-architecture thing, to allow different |
| @@ -29,12 +30,12 @@ static inline long __must_check PTR_ERR(__force const void *ptr) | |||
| 29 | return (long) ptr; | 30 | return (long) ptr; |
| 30 | } | 31 | } |
| 31 | 32 | ||
| 32 | static inline long __must_check IS_ERR(__force const void *ptr) | 33 | static inline bool __must_check IS_ERR(__force const void *ptr) |
| 33 | { | 34 | { |
| 34 | return IS_ERR_VALUE((unsigned long)ptr); | 35 | return IS_ERR_VALUE((unsigned long)ptr); |
| 35 | } | 36 | } |
| 36 | 37 | ||
| 37 | static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) | 38 | static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) |
| 38 | { | 39 | { |
| 39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); | 40 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
| 40 | } | 41 | } |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 6e765d28841b..3ca9420f627e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -419,6 +419,7 @@ struct address_space { | |||
| 419 | struct mutex i_mmap_mutex; /* protect tree, count, list */ | 419 | struct mutex i_mmap_mutex; /* protect tree, count, list */ |
| 420 | /* Protected by tree_lock together with the radix tree */ | 420 | /* Protected by tree_lock together with the radix tree */ |
| 421 | unsigned long nrpages; /* number of total pages */ | 421 | unsigned long nrpages; /* number of total pages */ |
| 422 | unsigned long nrshadows; /* number of shadow entries */ | ||
| 422 | pgoff_t writeback_index;/* writeback starts here */ | 423 | pgoff_t writeback_index;/* writeback starts here */ |
| 423 | const struct address_space_operations *a_ops; /* methods */ | 424 | const struct address_space_operations *a_ops; /* methods */ |
| 424 | unsigned long flags; /* error bits/gfp mask */ | 425 | unsigned long flags; /* error bits/gfp mask */ |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 64cf3ef50696..fc7718c6bd3e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -178,7 +178,7 @@ struct fsnotify_group { | |||
| 178 | struct fanotify_group_private_data { | 178 | struct fanotify_group_private_data { |
| 179 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 179 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
| 180 | /* allows a group to block waiting for a userspace response */ | 180 | /* allows a group to block waiting for a userspace response */ |
| 181 | struct mutex access_mutex; | 181 | spinlock_t access_lock; |
| 182 | struct list_head access_list; | 182 | struct list_head access_list; |
| 183 | wait_queue_head_t access_waitq; | 183 | wait_queue_head_t access_waitq; |
| 184 | atomic_t bypass_perm; | 184 | atomic_t bypass_perm; |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 8c43cc469d78..5b337cf8fb86 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
| 7 | #include <linux/hugetlb_inline.h> | 7 | #include <linux/hugetlb_inline.h> |
| 8 | #include <linux/cgroup.h> | 8 | #include <linux/cgroup.h> |
| 9 | #include <linux/list.h> | ||
| 10 | #include <linux/kref.h> | ||
| 9 | 11 | ||
| 10 | struct ctl_table; | 12 | struct ctl_table; |
| 11 | struct user_struct; | 13 | struct user_struct; |
| @@ -23,6 +25,14 @@ struct hugepage_subpool { | |||
| 23 | long max_hpages, used_hpages; | 25 | long max_hpages, used_hpages; |
| 24 | }; | 26 | }; |
| 25 | 27 | ||
| 28 | struct resv_map { | ||
| 29 | struct kref refs; | ||
| 30 | spinlock_t lock; | ||
| 31 | struct list_head regions; | ||
| 32 | }; | ||
| 33 | extern struct resv_map *resv_map_alloc(void); | ||
| 34 | void resv_map_release(struct kref *ref); | ||
| 35 | |||
| 26 | extern spinlock_t hugetlb_lock; | 36 | extern spinlock_t hugetlb_lock; |
| 27 | extern int hugetlb_max_hstate __read_mostly; | 37 | extern int hugetlb_max_hstate __read_mostly; |
| 28 | #define for_each_hstate(h) \ | 38 | #define for_each_hstate(h) \ |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 2a5e5548a1d2..5bb424659c04 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -30,8 +30,6 @@ extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; | |||
| 30 | extern void kmemleak_free(const void *ptr) __ref; | 30 | extern void kmemleak_free(const void *ptr) __ref; |
| 31 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; | 31 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; |
| 32 | extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; | 32 | extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; |
| 33 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | ||
| 34 | size_t size) __ref; | ||
| 35 | extern void kmemleak_not_leak(const void *ptr) __ref; | 33 | extern void kmemleak_not_leak(const void *ptr) __ref; |
| 36 | extern void kmemleak_ignore(const void *ptr) __ref; | 34 | extern void kmemleak_ignore(const void *ptr) __ref; |
| 37 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; | 35 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 926afb6f6b5f..f896a33e8341 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
| @@ -119,6 +119,7 @@ struct kobj_type { | |||
| 119 | }; | 119 | }; |
| 120 | 120 | ||
| 121 | struct kobj_uevent_env { | 121 | struct kobj_uevent_env { |
| 122 | char *argv[3]; | ||
| 122 | char *envp[UEVENT_NUM_ENVP]; | 123 | char *envp[UEVENT_NUM_ENVP]; |
| 123 | int envp_idx; | 124 | int envp_idx; |
| 124 | char buf[UEVENT_BUFFER_SIZE]; | 125 | char buf[UEVENT_BUFFER_SIZE]; |
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 3ce541753c88..f3434533fbf8 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | /* list_lru_walk_cb has to always return one of those */ | 13 | /* list_lru_walk_cb has to always return one of those */ |
| 14 | enum lru_status { | 14 | enum lru_status { |
| 15 | LRU_REMOVED, /* item removed from list */ | 15 | LRU_REMOVED, /* item removed from list */ |
| 16 | LRU_REMOVED_RETRY, /* item removed, but lock has been | ||
| 17 | dropped and reacquired */ | ||
| 16 | LRU_ROTATE, /* item referenced, give another pass */ | 18 | LRU_ROTATE, /* item referenced, give another pass */ |
| 17 | LRU_SKIP, /* item cannot be locked, skip */ | 19 | LRU_SKIP, /* item cannot be locked, skip */ |
| 18 | LRU_RETRY, /* item not freeable. May drop the lock | 20 | LRU_RETRY, /* item not freeable. May drop the lock |
| @@ -32,7 +34,11 @@ struct list_lru { | |||
| 32 | }; | 34 | }; |
| 33 | 35 | ||
| 34 | void list_lru_destroy(struct list_lru *lru); | 36 | void list_lru_destroy(struct list_lru *lru); |
| 35 | int list_lru_init(struct list_lru *lru); | 37 | int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key); |
| 38 | static inline int list_lru_init(struct list_lru *lru) | ||
| 39 | { | ||
| 40 | return list_lru_init_key(lru, NULL); | ||
| 41 | } | ||
| 36 | 42 | ||
| 37 | /** | 43 | /** |
| 38 | * list_lru_add: add an element to the lru list's tail | 44 | * list_lru_add: add an element to the lru list's tail |
diff --git a/include/linux/mfd/pm8xxx/rtc.h b/include/linux/mfd/pm8xxx/rtc.h deleted file mode 100644 index 14f1983eaecc..000000000000 --- a/include/linux/mfd/pm8xxx/rtc.h +++ /dev/null | |||
| @@ -1,25 +0,0 @@ | |||
| 1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License version 2 and | ||
| 5 | * only version 2 as published by the Free Software Foundation. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef __RTC_PM8XXX_H__ | ||
| 14 | #define __RTC_PM8XXX_H__ | ||
| 15 | |||
| 16 | #define PM8XXX_RTC_DEV_NAME "rtc-pm8xxx" | ||
| 17 | /** | ||
| 18 | * struct pm8xxx_rtc_pdata - RTC driver platform data | ||
| 19 | * @rtc_write_enable: variable stating RTC write capability | ||
| 20 | */ | ||
| 21 | struct pm8xxx_rtc_platform_data { | ||
| 22 | bool rtc_write_enable; | ||
| 23 | }; | ||
| 24 | |||
| 25 | #endif /* __RTC_PM8XXX_H__ */ | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2eec61fe75c9..35300f390eb6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1041,6 +1041,14 @@ extern void show_free_areas(unsigned int flags); | |||
| 1041 | extern bool skip_free_areas_node(unsigned int flags, int nid); | 1041 | extern bool skip_free_areas_node(unsigned int flags, int nid); |
| 1042 | 1042 | ||
| 1043 | int shmem_zero_setup(struct vm_area_struct *); | 1043 | int shmem_zero_setup(struct vm_area_struct *); |
| 1044 | #ifdef CONFIG_SHMEM | ||
| 1045 | bool shmem_mapping(struct address_space *mapping); | ||
| 1046 | #else | ||
| 1047 | static inline bool shmem_mapping(struct address_space *mapping) | ||
| 1048 | { | ||
| 1049 | return false; | ||
| 1050 | } | ||
| 1051 | #endif | ||
| 1044 | 1052 | ||
| 1045 | extern int can_do_mlock(void); | 1053 | extern int can_do_mlock(void); |
| 1046 | extern int user_shm_lock(size_t, struct user_struct *); | 1054 | extern int user_shm_lock(size_t, struct user_struct *); |
| @@ -1658,10 +1666,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn) | |||
| 1658 | #else | 1666 | #else |
| 1659 | /* please see mm/page_alloc.c */ | 1667 | /* please see mm/page_alloc.c */ |
| 1660 | extern int __meminit early_pfn_to_nid(unsigned long pfn); | 1668 | extern int __meminit early_pfn_to_nid(unsigned long pfn); |
| 1661 | #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
| 1662 | /* there is a per-arch backend function. */ | 1669 | /* there is a per-arch backend function. */ |
| 1663 | extern int __meminit __early_pfn_to_nid(unsigned long pfn); | 1670 | extern int __meminit __early_pfn_to_nid(unsigned long pfn); |
| 1664 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
| 1665 | #endif | 1671 | #endif |
| 1666 | 1672 | ||
| 1667 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 1673 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
| @@ -1826,6 +1832,7 @@ vm_unmapped_area(struct vm_unmapped_area_info *info) | |||
| 1826 | extern void truncate_inode_pages(struct address_space *, loff_t); | 1832 | extern void truncate_inode_pages(struct address_space *, loff_t); |
| 1827 | extern void truncate_inode_pages_range(struct address_space *, | 1833 | extern void truncate_inode_pages_range(struct address_space *, |
| 1828 | loff_t lstart, loff_t lend); | 1834 | loff_t lstart, loff_t lend); |
| 1835 | extern void truncate_inode_pages_final(struct address_space *); | ||
| 1829 | 1836 | ||
| 1830 | /* generic vm_area_ops exported for stackable file systems */ | 1837 | /* generic vm_area_ops exported for stackable file systems */ |
| 1831 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 1838 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9b61b9bf81ac..fac5509c18f0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -142,6 +142,9 @@ enum zone_stat_item { | |||
| 142 | NUMA_LOCAL, /* allocation from local node */ | 142 | NUMA_LOCAL, /* allocation from local node */ |
| 143 | NUMA_OTHER, /* allocation from other node */ | 143 | NUMA_OTHER, /* allocation from other node */ |
| 144 | #endif | 144 | #endif |
| 145 | WORKINGSET_REFAULT, | ||
| 146 | WORKINGSET_ACTIVATE, | ||
| 147 | WORKINGSET_NODERECLAIM, | ||
| 145 | NR_ANON_TRANSPARENT_HUGEPAGES, | 148 | NR_ANON_TRANSPARENT_HUGEPAGES, |
| 146 | NR_FREE_CMA_PAGES, | 149 | NR_FREE_CMA_PAGES, |
| 147 | NR_VM_ZONE_STAT_ITEMS }; | 150 | NR_VM_ZONE_STAT_ITEMS }; |
| @@ -392,6 +395,9 @@ struct zone { | |||
| 392 | spinlock_t lru_lock; | 395 | spinlock_t lru_lock; |
| 393 | struct lruvec lruvec; | 396 | struct lruvec lruvec; |
| 394 | 397 | ||
| 398 | /* Evictions & activations on the inactive file list */ | ||
| 399 | atomic_long_t inactive_age; | ||
| 400 | |||
| 395 | unsigned long pages_scanned; /* since last reclaim */ | 401 | unsigned long pages_scanned; /* since last reclaim */ |
| 396 | unsigned long flags; /* zone flags, see below */ | 402 | unsigned long flags; /* zone flags, see below */ |
| 397 | 403 | ||
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 98755767c7b0..ff3fea3194c6 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h | |||
| @@ -82,6 +82,8 @@ struct nilfs_inode { | |||
| 82 | __le32 i_pad; | 82 | __le32 i_pad; |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | #define NILFS_MIN_INODE_SIZE 128 | ||
| 86 | |||
| 85 | /** | 87 | /** |
| 86 | * struct nilfs_super_root - structure of super root | 88 | * struct nilfs_super_root - structure of super root |
| 87 | * @sr_sum: check sum | 89 | * @sr_sum: check sum |
| @@ -482,6 +484,8 @@ struct nilfs_dat_entry { | |||
| 482 | __le64 de_rsv; | 484 | __le64 de_rsv; |
| 483 | }; | 485 | }; |
| 484 | 486 | ||
| 487 | #define NILFS_MIN_DAT_ENTRY_SIZE 32 | ||
| 488 | |||
| 485 | /** | 489 | /** |
| 486 | * struct nilfs_snapshot_list - snapshot list | 490 | * struct nilfs_snapshot_list - snapshot list |
| 487 | * @ssl_next: next checkpoint number on snapshot list | 491 | * @ssl_next: next checkpoint number on snapshot list |
| @@ -520,6 +524,8 @@ struct nilfs_checkpoint { | |||
| 520 | struct nilfs_inode cp_ifile_inode; | 524 | struct nilfs_inode cp_ifile_inode; |
| 521 | }; | 525 | }; |
| 522 | 526 | ||
| 527 | #define NILFS_MIN_CHECKPOINT_SIZE (64 + NILFS_MIN_INODE_SIZE) | ||
| 528 | |||
| 523 | /* checkpoint flags */ | 529 | /* checkpoint flags */ |
| 524 | enum { | 530 | enum { |
| 525 | NILFS_CHECKPOINT_SNAPSHOT, | 531 | NILFS_CHECKPOINT_SNAPSHOT, |
| @@ -615,6 +621,8 @@ struct nilfs_segment_usage { | |||
| 615 | __le32 su_flags; | 621 | __le32 su_flags; |
| 616 | }; | 622 | }; |
| 617 | 623 | ||
| 624 | #define NILFS_MIN_SEGMENT_USAGE_SIZE 16 | ||
| 625 | |||
| 618 | /* segment usage flag */ | 626 | /* segment usage flag */ |
| 619 | enum { | 627 | enum { |
| 620 | NILFS_SEGMENT_USAGE_ACTIVE, | 628 | NILFS_SEGMENT_USAGE_ACTIVE, |
| @@ -710,6 +718,48 @@ static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) | |||
| 710 | } | 718 | } |
| 711 | 719 | ||
| 712 | /* ioctl */ | 720 | /* ioctl */ |
| 721 | /** | ||
| 722 | * nilfs_suinfo_update - segment usage information update | ||
| 723 | * @sup_segnum: segment number | ||
| 724 | * @sup_flags: flags for which fields are active in sup_sui | ||
| 725 | * @sup_reserved: reserved necessary for alignment | ||
| 726 | * @sup_sui: segment usage information | ||
| 727 | */ | ||
| 728 | struct nilfs_suinfo_update { | ||
| 729 | __u64 sup_segnum; | ||
| 730 | __u32 sup_flags; | ||
| 731 | __u32 sup_reserved; | ||
| 732 | struct nilfs_suinfo sup_sui; | ||
| 733 | }; | ||
| 734 | |||
| 735 | enum { | ||
| 736 | NILFS_SUINFO_UPDATE_LASTMOD, | ||
| 737 | NILFS_SUINFO_UPDATE_NBLOCKS, | ||
| 738 | NILFS_SUINFO_UPDATE_FLAGS, | ||
| 739 | __NR_NILFS_SUINFO_UPDATE_FIELDS, | ||
| 740 | }; | ||
| 741 | |||
| 742 | #define NILFS_SUINFO_UPDATE_FNS(flag, name) \ | ||
| 743 | static inline void \ | ||
| 744 | nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \ | ||
| 745 | { \ | ||
| 746 | sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \ | ||
| 747 | } \ | ||
| 748 | static inline void \ | ||
| 749 | nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \ | ||
| 750 | { \ | ||
| 751 | sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \ | ||
| 752 | } \ | ||
| 753 | static inline int \ | ||
| 754 | nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \ | ||
| 755 | { \ | ||
| 756 | return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\ | ||
| 757 | } | ||
| 758 | |||
| 759 | NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod) | ||
| 760 | NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks) | ||
| 761 | NILFS_SUINFO_UPDATE_FNS(FLAGS, flags) | ||
| 762 | |||
| 713 | enum { | 763 | enum { |
| 714 | NILFS_CHECKPOINT, | 764 | NILFS_CHECKPOINT, |
| 715 | NILFS_SNAPSHOT, | 765 | NILFS_SNAPSHOT, |
| @@ -863,5 +913,7 @@ struct nilfs_bdesc { | |||
| 863 | _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) | 913 | _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) |
| 864 | #define NILFS_IOCTL_SET_ALLOC_RANGE \ | 914 | #define NILFS_IOCTL_SET_ALLOC_RANGE \ |
| 865 | _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) | 915 | _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) |
| 916 | #define NILFS_IOCTL_SET_SUINFO \ | ||
| 917 | _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv) | ||
| 866 | 918 | ||
| 867 | #endif /* _LINUX_NILFS_FS_H */ | 919 | #endif /* _LINUX_NILFS_FS_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1710d1b060ba..45598f1e9aa3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -25,6 +25,7 @@ enum mapping_flags { | |||
| 25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | 25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
| 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
| 27 | AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ | 27 | AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ |
| 28 | AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ | ||
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 30 | static inline void mapping_set_error(struct address_space *mapping, int error) | 31 | static inline void mapping_set_error(struct address_space *mapping, int error) |
| @@ -69,6 +70,16 @@ static inline int mapping_balloon(struct address_space *mapping) | |||
| 69 | return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); | 70 | return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); |
| 70 | } | 71 | } |
| 71 | 72 | ||
| 73 | static inline void mapping_set_exiting(struct address_space *mapping) | ||
| 74 | { | ||
| 75 | set_bit(AS_EXITING, &mapping->flags); | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline int mapping_exiting(struct address_space *mapping) | ||
| 79 | { | ||
| 80 | return test_bit(AS_EXITING, &mapping->flags); | ||
| 81 | } | ||
| 82 | |||
| 72 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | 83 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
| 73 | { | 84 | { |
| 74 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; | 85 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
| @@ -243,12 +254,20 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x) | |||
| 243 | 254 | ||
| 244 | typedef int filler_t(void *, struct page *); | 255 | typedef int filler_t(void *, struct page *); |
| 245 | 256 | ||
| 246 | extern struct page * find_get_page(struct address_space *mapping, | 257 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
| 247 | pgoff_t index); | 258 | pgoff_t index, unsigned long max_scan); |
| 248 | extern struct page * find_lock_page(struct address_space *mapping, | 259 | pgoff_t page_cache_prev_hole(struct address_space *mapping, |
| 249 | pgoff_t index); | 260 | pgoff_t index, unsigned long max_scan); |
| 250 | extern struct page * find_or_create_page(struct address_space *mapping, | 261 | |
| 251 | pgoff_t index, gfp_t gfp_mask); | 262 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
| 263 | struct page *find_get_page(struct address_space *mapping, pgoff_t offset); | ||
| 264 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); | ||
| 265 | struct page *find_lock_page(struct address_space *mapping, pgoff_t offset); | ||
| 266 | struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, | ||
| 267 | gfp_t gfp_mask); | ||
| 268 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, | ||
| 269 | unsigned int nr_entries, struct page **entries, | ||
| 270 | pgoff_t *indices); | ||
| 252 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | 271 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
| 253 | unsigned int nr_pages, struct page **pages); | 272 | unsigned int nr_pages, struct page **pages); |
| 254 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, | 273 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
| @@ -270,8 +289,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping, | |||
| 270 | 289 | ||
| 271 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, | 290 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
| 272 | pgoff_t index); | 291 | pgoff_t index); |
| 273 | extern struct page * read_cache_page_async(struct address_space *mapping, | ||
| 274 | pgoff_t index, filler_t *filler, void *data); | ||
| 275 | extern struct page * read_cache_page(struct address_space *mapping, | 292 | extern struct page * read_cache_page(struct address_space *mapping, |
| 276 | pgoff_t index, filler_t *filler, void *data); | 293 | pgoff_t index, filler_t *filler, void *data); |
| 277 | extern struct page * read_cache_page_gfp(struct address_space *mapping, | 294 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
| @@ -279,14 +296,6 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping, | |||
| 279 | extern int read_cache_pages(struct address_space *mapping, | 296 | extern int read_cache_pages(struct address_space *mapping, |
| 280 | struct list_head *pages, filler_t *filler, void *data); | 297 | struct list_head *pages, filler_t *filler, void *data); |
| 281 | 298 | ||
| 282 | static inline struct page *read_mapping_page_async( | ||
| 283 | struct address_space *mapping, | ||
| 284 | pgoff_t index, void *data) | ||
| 285 | { | ||
| 286 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | ||
| 287 | return read_cache_page_async(mapping, index, filler, data); | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline struct page *read_mapping_page(struct address_space *mapping, | 299 | static inline struct page *read_mapping_page(struct address_space *mapping, |
| 291 | pgoff_t index, void *data) | 300 | pgoff_t index, void *data) |
| 292 | { | 301 | { |
| @@ -539,7 +548,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
| 539 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 548 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 540 | pgoff_t index, gfp_t gfp_mask); | 549 | pgoff_t index, gfp_t gfp_mask); |
| 541 | extern void delete_from_page_cache(struct page *page); | 550 | extern void delete_from_page_cache(struct page *page); |
| 542 | extern void __delete_from_page_cache(struct page *page); | 551 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
| 543 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); | 552 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
| 544 | 553 | ||
| 545 | /* | 554 | /* |
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index e4dbfab37729..b45d391b4540 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h | |||
| @@ -22,6 +22,11 @@ struct pagevec { | |||
| 22 | 22 | ||
| 23 | void __pagevec_release(struct pagevec *pvec); | 23 | void __pagevec_release(struct pagevec *pvec); |
| 24 | void __pagevec_lru_add(struct pagevec *pvec); | 24 | void __pagevec_lru_add(struct pagevec *pvec); |
| 25 | unsigned pagevec_lookup_entries(struct pagevec *pvec, | ||
| 26 | struct address_space *mapping, | ||
| 27 | pgoff_t start, unsigned nr_entries, | ||
| 28 | pgoff_t *indices); | ||
| 29 | void pagevec_remove_exceptionals(struct pagevec *pvec); | ||
| 25 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 30 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, |
| 26 | pgoff_t start, unsigned nr_pages); | 31 | pgoff_t start, unsigned nr_pages); |
| 27 | unsigned pagevec_lookup_tag(struct pagevec *pvec, | 32 | unsigned pagevec_lookup_tag(struct pagevec *pvec, |
diff --git a/include/linux/printk.h b/include/linux/printk.h index fa47e2708c01..8752f7595b27 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -24,13 +24,9 @@ static inline int printk_get_level(const char *buffer) | |||
| 24 | 24 | ||
| 25 | static inline const char *printk_skip_level(const char *buffer) | 25 | static inline const char *printk_skip_level(const char *buffer) |
| 26 | { | 26 | { |
| 27 | if (printk_get_level(buffer)) { | 27 | if (printk_get_level(buffer)) |
| 28 | switch (buffer[1]) { | 28 | return buffer + 2; |
| 29 | case '0' ... '7': | 29 | |
| 30 | case 'd': /* KERN_DEFAULT */ | ||
| 31 | return buffer + 2; | ||
| 32 | } | ||
| 33 | } | ||
| 34 | return buffer; | 30 | return buffer; |
| 35 | } | 31 | } |
| 36 | 32 | ||
| @@ -124,9 +120,9 @@ asmlinkage __printf(1, 0) | |||
| 124 | int vprintk(const char *fmt, va_list args); | 120 | int vprintk(const char *fmt, va_list args); |
| 125 | 121 | ||
| 126 | asmlinkage __printf(5, 6) __cold | 122 | asmlinkage __printf(5, 6) __cold |
| 127 | asmlinkage int printk_emit(int facility, int level, | 123 | int printk_emit(int facility, int level, |
| 128 | const char *dict, size_t dictlen, | 124 | const char *dict, size_t dictlen, |
| 129 | const char *fmt, ...); | 125 | const char *fmt, ...); |
| 130 | 126 | ||
| 131 | asmlinkage __printf(1, 2) __cold | 127 | asmlinkage __printf(1, 2) __cold |
| 132 | int printk(const char *fmt, ...); | 128 | int printk(const char *fmt, ...); |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 6965fe394c3b..1d3eee594cd6 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
| @@ -46,6 +46,14 @@ void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); | |||
| 46 | void dquot_initialize(struct inode *inode); | 46 | void dquot_initialize(struct inode *inode); |
| 47 | void dquot_drop(struct inode *inode); | 47 | void dquot_drop(struct inode *inode); |
| 48 | struct dquot *dqget(struct super_block *sb, struct kqid qid); | 48 | struct dquot *dqget(struct super_block *sb, struct kqid qid); |
| 49 | static inline struct dquot *dqgrab(struct dquot *dquot) | ||
| 50 | { | ||
| 51 | /* Make sure someone else has active reference to dquot */ | ||
| 52 | WARN_ON_ONCE(!atomic_read(&dquot->dq_count)); | ||
| 53 | WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); | ||
| 54 | atomic_inc(&dquot->dq_count); | ||
| 55 | return dquot; | ||
| 56 | } | ||
| 49 | void dqput(struct dquot *dquot); | 57 | void dqput(struct dquot *dquot); |
| 50 | int dquot_scan_active(struct super_block *sb, | 58 | int dquot_scan_active(struct super_block *sb, |
| 51 | int (*fn)(struct dquot *dquot, unsigned long priv), | 59 | int (*fn)(struct dquot *dquot, unsigned long priv), |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 403940787be1..33170dbd9db4 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -60,6 +60,49 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) | |||
| 60 | 60 | ||
| 61 | #define RADIX_TREE_MAX_TAGS 3 | 61 | #define RADIX_TREE_MAX_TAGS 3 |
| 62 | 62 | ||
| 63 | #ifdef __KERNEL__ | ||
| 64 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) | ||
| 65 | #else | ||
| 66 | #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ | ||
| 67 | #endif | ||
| 68 | |||
| 69 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | ||
| 70 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | ||
| 71 | |||
| 72 | #define RADIX_TREE_TAG_LONGS \ | ||
| 73 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | ||
| 74 | |||
| 75 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) | ||
| 76 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | ||
| 77 | RADIX_TREE_MAP_SHIFT)) | ||
| 78 | |||
| 79 | /* Height component in node->path */ | ||
| 80 | #define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1) | ||
| 81 | #define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1) | ||
| 82 | |||
| 83 | /* Internally used bits of node->count */ | ||
| 84 | #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) | ||
| 85 | #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) | ||
| 86 | |||
| 87 | struct radix_tree_node { | ||
| 88 | unsigned int path; /* Offset in parent & height from the bottom */ | ||
| 89 | unsigned int count; | ||
| 90 | union { | ||
| 91 | struct { | ||
| 92 | /* Used when ascending tree */ | ||
| 93 | struct radix_tree_node *parent; | ||
| 94 | /* For tree user */ | ||
| 95 | void *private_data; | ||
| 96 | }; | ||
| 97 | /* Used when freeing node */ | ||
| 98 | struct rcu_head rcu_head; | ||
| 99 | }; | ||
| 100 | /* For tree user */ | ||
| 101 | struct list_head private_list; | ||
| 102 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | ||
| 103 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | ||
| 104 | }; | ||
| 105 | |||
| 63 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ | 106 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ |
| 64 | struct radix_tree_root { | 107 | struct radix_tree_root { |
| 65 | unsigned int height; | 108 | unsigned int height; |
| @@ -101,6 +144,7 @@ do { \ | |||
| 101 | * concurrently with other readers. | 144 | * concurrently with other readers. |
| 102 | * | 145 | * |
| 103 | * The notable exceptions to this rule are the following functions: | 146 | * The notable exceptions to this rule are the following functions: |
| 147 | * __radix_tree_lookup | ||
| 104 | * radix_tree_lookup | 148 | * radix_tree_lookup |
| 105 | * radix_tree_lookup_slot | 149 | * radix_tree_lookup_slot |
| 106 | * radix_tree_tag_get | 150 | * radix_tree_tag_get |
| @@ -216,9 +260,16 @@ static inline void radix_tree_replace_slot(void **pslot, void *item) | |||
| 216 | rcu_assign_pointer(*pslot, item); | 260 | rcu_assign_pointer(*pslot, item); |
| 217 | } | 261 | } |
| 218 | 262 | ||
| 263 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, | ||
| 264 | struct radix_tree_node **nodep, void ***slotp); | ||
| 219 | int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); | 265 | int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); |
| 266 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, | ||
| 267 | struct radix_tree_node **nodep, void ***slotp); | ||
| 220 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); | 268 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); |
| 221 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); | 269 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); |
| 270 | bool __radix_tree_delete_node(struct radix_tree_root *root, | ||
| 271 | struct radix_tree_node *node); | ||
| 272 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); | ||
| 222 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); | 273 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
| 223 | unsigned int | 274 | unsigned int |
| 224 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 275 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
| @@ -226,10 +277,6 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 226 | unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, | 277 | unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
| 227 | void ***results, unsigned long *indices, | 278 | void ***results, unsigned long *indices, |
| 228 | unsigned long first_index, unsigned int max_items); | 279 | unsigned long first_index, unsigned int max_items); |
| 229 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | ||
| 230 | unsigned long index, unsigned long max_scan); | ||
| 231 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
| 232 | unsigned long index, unsigned long max_scan); | ||
| 233 | int radix_tree_preload(gfp_t gfp_mask); | 280 | int radix_tree_preload(gfp_t gfp_mask); |
| 234 | int radix_tree_maybe_preload(gfp_t gfp_mask); | 281 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
| 235 | void radix_tree_init(void); | 282 | void radix_tree_init(void); |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 9d55438bc4ad..4d1771c2d29f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -51,6 +51,7 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, | |||
| 51 | unsigned long flags); | 51 | unsigned long flags); |
| 52 | extern int shmem_zero_setup(struct vm_area_struct *); | 52 | extern int shmem_zero_setup(struct vm_area_struct *); |
| 53 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); | 53 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); |
| 54 | extern bool shmem_mapping(struct address_space *mapping); | ||
| 54 | extern void shmem_unlock_mapping(struct address_space *mapping); | 55 | extern void shmem_unlock_mapping(struct address_space *mapping); |
| 55 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | 56 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 56 | pgoff_t index, gfp_t gfp_mask); | 57 | pgoff_t index, gfp_t gfp_mask); |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 46ba0c6c219f..350711560753 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -260,6 +260,42 @@ struct swap_list_t { | |||
| 260 | int next; /* swapfile to be used next */ | 260 | int next; /* swapfile to be used next */ |
| 261 | }; | 261 | }; |
| 262 | 262 | ||
| 263 | /* linux/mm/workingset.c */ | ||
| 264 | void *workingset_eviction(struct address_space *mapping, struct page *page); | ||
| 265 | bool workingset_refault(void *shadow); | ||
| 266 | void workingset_activation(struct page *page); | ||
| 267 | extern struct list_lru workingset_shadow_nodes; | ||
| 268 | |||
| 269 | static inline unsigned int workingset_node_pages(struct radix_tree_node *node) | ||
| 270 | { | ||
| 271 | return node->count & RADIX_TREE_COUNT_MASK; | ||
| 272 | } | ||
| 273 | |||
| 274 | static inline void workingset_node_pages_inc(struct radix_tree_node *node) | ||
| 275 | { | ||
| 276 | node->count++; | ||
| 277 | } | ||
| 278 | |||
| 279 | static inline void workingset_node_pages_dec(struct radix_tree_node *node) | ||
| 280 | { | ||
| 281 | node->count--; | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) | ||
| 285 | { | ||
| 286 | return node->count >> RADIX_TREE_COUNT_SHIFT; | ||
| 287 | } | ||
| 288 | |||
| 289 | static inline void workingset_node_shadows_inc(struct radix_tree_node *node) | ||
| 290 | { | ||
| 291 | node->count += 1U << RADIX_TREE_COUNT_SHIFT; | ||
| 292 | } | ||
| 293 | |||
| 294 | static inline void workingset_node_shadows_dec(struct radix_tree_node *node) | ||
| 295 | { | ||
| 296 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | ||
| 297 | } | ||
| 298 | |||
| 263 | /* linux/mm/page_alloc.c */ | 299 | /* linux/mm/page_alloc.c */ |
| 264 | extern unsigned long totalram_pages; | 300 | extern unsigned long totalram_pages; |
| 265 | extern unsigned long totalreserve_pages; | 301 | extern unsigned long totalreserve_pages; |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1e67b7a5968c..2aa8b749f13d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -200,6 +200,8 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 200 | } \ | 200 | } \ |
| 201 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) | 201 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) |
| 202 | 202 | ||
| 203 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | ||
| 204 | qid_t id, void __user *addr); | ||
| 203 | asmlinkage long sys_time(time_t __user *tloc); | 205 | asmlinkage long sys_time(time_t __user *tloc); |
| 204 | asmlinkage long sys_stime(time_t __user *tptr); | 206 | asmlinkage long sys_stime(time_t __user *tptr); |
| 205 | asmlinkage long sys_gettimeofday(struct timeval __user *tv, | 207 | asmlinkage long sys_gettimeofday(struct timeval __user *tv, |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 3a712e2e7d76..486c3972c0be 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -37,6 +37,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 37 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, | 37 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, |
| 38 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | 38 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, |
| 39 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 39 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| 40 | DROP_PAGECACHE, DROP_SLAB, | ||
| 40 | #ifdef CONFIG_NUMA_BALANCING | 41 | #ifdef CONFIG_NUMA_BALANCING |
| 41 | NUMA_PTE_UPDATES, | 42 | NUMA_PTE_UPDATES, |
| 42 | NUMA_HUGE_PTE_UPDATES, | 43 | NUMA_HUGE_PTE_UPDATES, |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 67ce70c8279b..ea4476157e00 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -187,8 +187,6 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); | |||
| 187 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | 187 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| 188 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | 188 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 189 | 189 | ||
| 190 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | ||
| 191 | |||
| 192 | #ifdef CONFIG_SMP | 190 | #ifdef CONFIG_SMP |
| 193 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | 191 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| 194 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | 192 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| @@ -230,18 +228,18 @@ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | |||
| 230 | atomic_long_inc(&vm_stat[item]); | 228 | atomic_long_inc(&vm_stat[item]); |
| 231 | } | 229 | } |
| 232 | 230 | ||
| 233 | static inline void __inc_zone_page_state(struct page *page, | ||
| 234 | enum zone_stat_item item) | ||
| 235 | { | ||
| 236 | __inc_zone_state(page_zone(page), item); | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 231 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 240 | { | 232 | { |
| 241 | atomic_long_dec(&zone->vm_stat[item]); | 233 | atomic_long_dec(&zone->vm_stat[item]); |
| 242 | atomic_long_dec(&vm_stat[item]); | 234 | atomic_long_dec(&vm_stat[item]); |
| 243 | } | 235 | } |
| 244 | 236 | ||
| 237 | static inline void __inc_zone_page_state(struct page *page, | ||
| 238 | enum zone_stat_item item) | ||
| 239 | { | ||
| 240 | __inc_zone_state(page_zone(page), item); | ||
| 241 | } | ||
| 242 | |||
| 245 | static inline void __dec_zone_page_state(struct page *page, | 243 | static inline void __dec_zone_page_state(struct page *page, |
| 246 | enum zone_stat_item item) | 244 | enum zone_stat_item item) |
| 247 | { | 245 | { |
| @@ -256,6 +254,9 @@ static inline void __dec_zone_page_state(struct page *page, | |||
| 256 | #define dec_zone_page_state __dec_zone_page_state | 254 | #define dec_zone_page_state __dec_zone_page_state |
| 257 | #define mod_zone_page_state __mod_zone_page_state | 255 | #define mod_zone_page_state __mod_zone_page_state |
| 258 | 256 | ||
| 257 | #define inc_zone_state __inc_zone_state | ||
| 258 | #define dec_zone_state __dec_zone_state | ||
| 259 | |||
| 259 | #define set_pgdat_percpu_threshold(pgdat, callback) { } | 260 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
| 260 | 261 | ||
| 261 | static inline void refresh_cpu_vm_stats(int cpu) { } | 262 | static inline void refresh_cpu_vm_stats(int cpu) { } |
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index 335e8a7cad39..c140620dad92 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h | |||
| @@ -85,6 +85,12 @@ | |||
| 85 | 85 | ||
| 86 | #endif /* _NETINET_IN_H */ | 86 | #endif /* _NETINET_IN_H */ |
| 87 | 87 | ||
| 88 | /* Definitions for xattr.h */ | ||
| 89 | #if defined(_SYS_XATTR_H) | ||
| 90 | #define __UAPI_DEF_XATTR 0 | ||
| 91 | #else | ||
| 92 | #define __UAPI_DEF_XATTR 1 | ||
| 93 | #endif | ||
| 88 | 94 | ||
| 89 | /* If we did not see any headers from any supported C libraries, | 95 | /* If we did not see any headers from any supported C libraries, |
| 90 | * or we are being included in the kernel, then define everything | 96 | * or we are being included in the kernel, then define everything |
| @@ -98,6 +104,9 @@ | |||
| 98 | #define __UAPI_DEF_IPV6_MREQ 1 | 104 | #define __UAPI_DEF_IPV6_MREQ 1 |
| 99 | #define __UAPI_DEF_IPPROTO_V6 1 | 105 | #define __UAPI_DEF_IPPROTO_V6 1 |
| 100 | 106 | ||
| 107 | /* Definitions for xattr.h */ | ||
| 108 | #define __UAPI_DEF_XATTR 1 | ||
| 109 | |||
| 101 | #endif /* __GLIBC__ */ | 110 | #endif /* __GLIBC__ */ |
| 102 | 111 | ||
| 103 | #endif /* _UAPI_LIBC_COMPAT_H */ | 112 | #endif /* _UAPI_LIBC_COMPAT_H */ |
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index 40bbc04b6f81..c38355c1f3c9 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h | |||
| @@ -7,11 +7,18 @@ | |||
| 7 | Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. | 7 | Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. |
| 8 | Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | 8 | Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> |
| 9 | */ | 9 | */ |
| 10 | |||
| 11 | #include <linux/libc-compat.h> | ||
| 12 | |||
| 10 | #ifndef _UAPI_LINUX_XATTR_H | 13 | #ifndef _UAPI_LINUX_XATTR_H |
| 11 | #define _UAPI_LINUX_XATTR_H | 14 | #define _UAPI_LINUX_XATTR_H |
| 12 | 15 | ||
| 16 | #ifdef __UAPI_DEF_XATTR | ||
| 17 | #define __USE_KERNEL_XATTR_DEFS | ||
| 18 | |||
| 13 | #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ | 19 | #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ |
| 14 | #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ | 20 | #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ |
| 21 | #endif | ||
| 15 | 22 | ||
| 16 | /* Namespaces */ | 23 | /* Namespaces */ |
| 17 | #define XATTR_OS2_PREFIX "os2." | 24 | #define XATTR_OS2_PREFIX "os2." |
diff --git a/init/Kconfig b/init/Kconfig index 62b66acfdb30..8851c6417880 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -273,6 +273,16 @@ config FHANDLE | |||
| 273 | get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) | 273 | get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) |
| 274 | syscalls. | 274 | syscalls. |
| 275 | 275 | ||
| 276 | config USELIB | ||
| 277 | bool "uselib syscall" | ||
| 278 | default y | ||
| 279 | help | ||
| 280 | This option enables the uselib syscall, a system call used in the | ||
| 281 | dynamic linker from libc5 and earlier. glibc does not use this | ||
| 282 | system call. If you intend to run programs built on libc5 or | ||
| 283 | earlier, you may need to enable this syscall. Current systems | ||
| 284 | running glibc can safely disable this. | ||
| 285 | |||
| 276 | config AUDIT | 286 | config AUDIT |
| 277 | bool "Auditing support" | 287 | bool "Auditing support" |
| 278 | depends on NET | 288 | depends on NET |
| @@ -1291,6 +1301,16 @@ config UID16 | |||
| 1291 | help | 1301 | help |
| 1292 | This enables the legacy 16-bit UID syscall wrappers. | 1302 | This enables the legacy 16-bit UID syscall wrappers. |
| 1293 | 1303 | ||
| 1304 | config SYSFS_SYSCALL | ||
| 1305 | bool "Sysfs syscall support" if EXPERT | ||
| 1306 | default y | ||
| 1307 | ---help--- | ||
| 1308 | sys_sysfs is an obsolete system call no longer supported in libc. | ||
| 1309 | Note that disabling this option is more secure but might break | ||
| 1310 | compatibility with some systems. | ||
| 1311 | |||
| 1312 | If unsure say Y here. | ||
| 1313 | |||
| 1294 | config SYSCTL_SYSCALL | 1314 | config SYSCTL_SYSCALL |
| 1295 | bool "Sysctl syscall support" if EXPERT | 1315 | bool "Sysctl syscall support" if EXPERT |
| 1296 | depends on PROC_SYSCTL | 1316 | depends on PROC_SYSCTL |
diff --git a/init/do_mounts.c b/init/do_mounts.c index 8e5addc45874..82f22885c87e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c | |||
| @@ -102,13 +102,13 @@ no_match: | |||
| 102 | 102 | ||
| 103 | /** | 103 | /** |
| 104 | * devt_from_partuuid - looks up the dev_t of a partition by its UUID | 104 | * devt_from_partuuid - looks up the dev_t of a partition by its UUID |
| 105 | * @uuid: char array containing ascii UUID | 105 | * @uuid_str: char array containing ascii UUID |
| 106 | * | 106 | * |
| 107 | * The function will return the first partition which contains a matching | 107 | * The function will return the first partition which contains a matching |
| 108 | * UUID value in its partition_meta_info struct. This does not search | 108 | * UUID value in its partition_meta_info struct. This does not search |
| 109 | * by filesystem UUIDs. | 109 | * by filesystem UUIDs. |
| 110 | * | 110 | * |
| 111 | * If @uuid is followed by a "/PARTNROFF=%d", then the number will be | 111 | * If @uuid_str is followed by a "/PARTNROFF=%d", then the number will be |
| 112 | * extracted and used as an offset from the partition identified by the UUID. | 112 | * extracted and used as an offset from the partition identified by the UUID. |
| 113 | * | 113 | * |
| 114 | * Returns the matching dev_t on success or 0 on failure. | 114 | * Returns the matching dev_t on success or 0 on failure. |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e2dbb60004d4..3d54c418bd06 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -996,7 +996,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
| 996 | task_lock(tsk); | 996 | task_lock(tsk); |
| 997 | /* | 997 | /* |
| 998 | * Determine if a loop is necessary if another thread is doing | 998 | * Determine if a loop is necessary if another thread is doing |
| 999 | * get_mems_allowed(). If at least one node remains unchanged and | 999 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
| 1000 | * tsk does not have a mempolicy, then an empty nodemask will not be | 1000 | * tsk does not have a mempolicy, then an empty nodemask will not be |
| 1001 | * possible when mems_allowed is larger than a word. | 1001 | * possible when mems_allowed is larger than a word. |
| 1002 | */ | 1002 | */ |
diff --git a/kernel/groups.c b/kernel/groups.c index 90cf1c38c8ea..451698f86cfa 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -157,17 +157,13 @@ int groups_search(const struct group_info *group_info, kgid_t grp) | |||
| 157 | * set_groups - Change a group subscription in a set of credentials | 157 | * set_groups - Change a group subscription in a set of credentials |
| 158 | * @new: The newly prepared set of credentials to alter | 158 | * @new: The newly prepared set of credentials to alter |
| 159 | * @group_info: The group list to install | 159 | * @group_info: The group list to install |
| 160 | * | ||
| 161 | * Validate a group subscription and, if valid, insert it into a set | ||
| 162 | * of credentials. | ||
| 163 | */ | 160 | */ |
| 164 | int set_groups(struct cred *new, struct group_info *group_info) | 161 | void set_groups(struct cred *new, struct group_info *group_info) |
| 165 | { | 162 | { |
| 166 | put_group_info(new->group_info); | 163 | put_group_info(new->group_info); |
| 167 | groups_sort(group_info); | 164 | groups_sort(group_info); |
| 168 | get_group_info(group_info); | 165 | get_group_info(group_info); |
| 169 | new->group_info = group_info; | 166 | new->group_info = group_info; |
| 170 | return 0; | ||
| 171 | } | 167 | } |
| 172 | 168 | ||
| 173 | EXPORT_SYMBOL(set_groups); | 169 | EXPORT_SYMBOL(set_groups); |
| @@ -182,18 +178,12 @@ EXPORT_SYMBOL(set_groups); | |||
| 182 | int set_current_groups(struct group_info *group_info) | 178 | int set_current_groups(struct group_info *group_info) |
| 183 | { | 179 | { |
| 184 | struct cred *new; | 180 | struct cred *new; |
| 185 | int ret; | ||
| 186 | 181 | ||
| 187 | new = prepare_creds(); | 182 | new = prepare_creds(); |
| 188 | if (!new) | 183 | if (!new) |
| 189 | return -ENOMEM; | 184 | return -ENOMEM; |
| 190 | 185 | ||
| 191 | ret = set_groups(new, group_info); | 186 | set_groups(new, group_info); |
| 192 | if (ret < 0) { | ||
| 193 | abort_creds(new); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | return commit_creds(new); | 187 | return commit_creds(new); |
| 198 | } | 188 | } |
| 199 | 189 | ||
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0b9c169d577f..06bb1417b063 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
| @@ -246,5 +246,4 @@ static int __init hung_task_init(void) | |||
| 246 | 246 | ||
| 247 | return 0; | 247 | return 0; |
| 248 | } | 248 | } |
| 249 | 249 | subsys_initcall(hung_task_init); | |
| 250 | module_init(hung_task_init); | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index 45601cf41bee..c0d261c7db7b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1235,7 +1235,7 @@ static int __init crash_notes_memory_init(void) | |||
| 1235 | } | 1235 | } |
| 1236 | return 0; | 1236 | return 0; |
| 1237 | } | 1237 | } |
| 1238 | module_init(crash_notes_memory_init) | 1238 | subsys_initcall(crash_notes_memory_init); |
| 1239 | 1239 | ||
| 1240 | 1240 | ||
| 1241 | /* | 1241 | /* |
| @@ -1629,7 +1629,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 1629 | return 0; | 1629 | return 0; |
| 1630 | } | 1630 | } |
| 1631 | 1631 | ||
| 1632 | module_init(crash_save_vmcoreinfo_init) | 1632 | subsys_initcall(crash_save_vmcoreinfo_init); |
| 1633 | 1633 | ||
| 1634 | /* | 1634 | /* |
| 1635 | * Move into place and start executing a preloaded standalone | 1635 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/kthread.c b/kernel/kthread.c index b5ae3ee860a9..9a130ec06f7a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -217,7 +217,7 @@ int tsk_fork_get_node(struct task_struct *tsk) | |||
| 217 | if (tsk == kthreadd_task) | 217 | if (tsk == kthreadd_task) |
| 218 | return tsk->pref_node_fork; | 218 | return tsk->pref_node_fork; |
| 219 | #endif | 219 | #endif |
| 220 | return numa_node_id(); | 220 | return NUMA_NO_NODE; |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static void create_kthread(struct kthread_create_info *create) | 223 | static void create_kthread(struct kthread_create_info *create) |
| @@ -369,7 +369,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
| 369 | { | 369 | { |
| 370 | struct task_struct *p; | 370 | struct task_struct *p; |
| 371 | 371 | ||
| 372 | p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, | 372 | p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, |
| 373 | cpu); | 373 | cpu); |
| 374 | if (IS_ERR(p)) | 374 | if (IS_ERR(p)) |
| 375 | return p; | 375 | return p; |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 4dae9cbe9259..a45b50962295 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -319,7 +319,7 @@ static void log_store(int facility, int level, | |||
| 319 | else | 319 | else |
| 320 | free = log_first_idx - log_next_idx; | 320 | free = log_first_idx - log_next_idx; |
| 321 | 321 | ||
| 322 | if (free > size + sizeof(struct printk_log)) | 322 | if (free >= size + sizeof(struct printk_log)) |
| 323 | break; | 323 | break; |
| 324 | 324 | ||
| 325 | /* drop old messages until we have enough contiuous space */ | 325 | /* drop old messages until we have enough contiuous space */ |
| @@ -327,7 +327,7 @@ static void log_store(int facility, int level, | |||
| 327 | log_first_seq++; | 327 | log_first_seq++; |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) { | 330 | if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { |
| 331 | /* | 331 | /* |
| 332 | * This message + an additional empty header does not fit | 332 | * This message + an additional empty header does not fit |
| 333 | * at the end of the buffer. Add an empty header with len == 0 | 333 | * at the end of the buffer. Add an empty header with len == 0 |
| @@ -351,7 +351,7 @@ static void log_store(int facility, int level, | |||
| 351 | else | 351 | else |
| 352 | msg->ts_nsec = local_clock(); | 352 | msg->ts_nsec = local_clock(); |
| 353 | memset(log_dict(msg) + dict_len, 0, pad_len); | 353 | memset(log_dict(msg) + dict_len, 0, pad_len); |
| 354 | msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len; | 354 | msg->len = size; |
| 355 | 355 | ||
| 356 | /* insert message */ | 356 | /* insert message */ |
| 357 | log_next_idx += msg->len; | 357 | log_next_idx += msg->len; |
| @@ -1560,9 +1560,12 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
| 1560 | level = kern_level - '0'; | 1560 | level = kern_level - '0'; |
| 1561 | case 'd': /* KERN_DEFAULT */ | 1561 | case 'd': /* KERN_DEFAULT */ |
| 1562 | lflags |= LOG_PREFIX; | 1562 | lflags |= LOG_PREFIX; |
| 1563 | case 'c': /* KERN_CONT */ | ||
| 1564 | break; | ||
| 1565 | } | 1563 | } |
| 1564 | /* | ||
| 1565 | * No need to check length here because vscnprintf | ||
| 1566 | * put '\0' at the end of the string. Only valid and | ||
| 1567 | * newly printed level is detected. | ||
| 1568 | */ | ||
| 1566 | text_len -= end_of_header - text; | 1569 | text_len -= end_of_header - text; |
| 1567 | text = (char *)end_of_header; | 1570 | text = (char *)end_of_header; |
| 1568 | } | 1571 | } |
| @@ -1880,6 +1883,7 @@ void suspend_console(void) | |||
| 1880 | console_lock(); | 1883 | console_lock(); |
| 1881 | console_suspended = 1; | 1884 | console_suspended = 1; |
| 1882 | up(&console_sem); | 1885 | up(&console_sem); |
| 1886 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
| 1883 | } | 1887 | } |
| 1884 | 1888 | ||
| 1885 | void resume_console(void) | 1889 | void resume_console(void) |
| @@ -1887,6 +1891,7 @@ void resume_console(void) | |||
| 1887 | if (!console_suspend_enabled) | 1891 | if (!console_suspend_enabled) |
| 1888 | return; | 1892 | return; |
| 1889 | down(&console_sem); | 1893 | down(&console_sem); |
| 1894 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
| 1890 | console_suspended = 0; | 1895 | console_suspended = 0; |
| 1891 | console_unlock(); | 1896 | console_unlock(); |
| 1892 | } | 1897 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index ebdd9c1a86b4..1b266dbe755a 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -604,5 +604,5 @@ int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ | |||
| 604 | hotcpu_notifier(profile_cpu_callback, 0); | 604 | hotcpu_notifier(profile_cpu_callback, 0); |
| 605 | return 0; | 605 | return 0; |
| 606 | } | 606 | } |
| 607 | module_init(create_proc_profile); | 607 | subsys_initcall(create_proc_profile); |
| 608 | #endif /* CONFIG_PROC_FS */ | 608 | #endif /* CONFIG_PROC_FS */ |
diff --git a/kernel/resource.c b/kernel/resource.c index 673061c06da1..8957d686e29b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -511,7 +511,7 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 511 | * @newsize: new size of the resource descriptor | 511 | * @newsize: new size of the resource descriptor |
| 512 | * @constraint: the size and alignment constraints to be met. | 512 | * @constraint: the size and alignment constraints to be met. |
| 513 | */ | 513 | */ |
| 514 | int reallocate_resource(struct resource *root, struct resource *old, | 514 | static int reallocate_resource(struct resource *root, struct resource *old, |
| 515 | resource_size_t newsize, | 515 | resource_size_t newsize, |
| 516 | struct resource_constraint *constraint) | 516 | struct resource_constraint *constraint) |
| 517 | { | 517 | { |
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index da98af347e8b..a476bea17fbc 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c | |||
| @@ -142,4 +142,4 @@ static int __init proc_schedstat_init(void) | |||
| 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); |
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | module_init(proc_schedstat_init); | 145 | subsys_initcall(proc_schedstat_init); |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7078052284fd..bc8d1b74a6b9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -146,11 +146,13 @@ cond_syscall(sys_io_destroy); | |||
| 146 | cond_syscall(sys_io_submit); | 146 | cond_syscall(sys_io_submit); |
| 147 | cond_syscall(sys_io_cancel); | 147 | cond_syscall(sys_io_cancel); |
| 148 | cond_syscall(sys_io_getevents); | 148 | cond_syscall(sys_io_getevents); |
| 149 | cond_syscall(sys_sysfs); | ||
| 149 | cond_syscall(sys_syslog); | 150 | cond_syscall(sys_syslog); |
| 150 | cond_syscall(sys_process_vm_readv); | 151 | cond_syscall(sys_process_vm_readv); |
| 151 | cond_syscall(sys_process_vm_writev); | 152 | cond_syscall(sys_process_vm_writev); |
| 152 | cond_syscall(compat_sys_process_vm_readv); | 153 | cond_syscall(compat_sys_process_vm_readv); |
| 153 | cond_syscall(compat_sys_process_vm_writev); | 154 | cond_syscall(compat_sys_process_vm_writev); |
| 155 | cond_syscall(sys_uselib); | ||
| 154 | 156 | ||
| 155 | /* arch-specific weak syscall entries */ | 157 | /* arch-specific weak syscall entries */ |
| 156 | cond_syscall(sys_pciconfig_read); | 158 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 09d2e2413605..5c14b547882e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -123,7 +123,7 @@ static int __maybe_unused neg_one = -1; | |||
| 123 | static int zero; | 123 | static int zero; |
| 124 | static int __maybe_unused one = 1; | 124 | static int __maybe_unused one = 1; |
| 125 | static int __maybe_unused two = 2; | 125 | static int __maybe_unused two = 2; |
| 126 | static int __maybe_unused three = 3; | 126 | static int __maybe_unused four = 4; |
| 127 | static unsigned long one_ul = 1; | 127 | static unsigned long one_ul = 1; |
| 128 | static int one_hundred = 100; | 128 | static int one_hundred = 100; |
| 129 | #ifdef CONFIG_PRINTK | 129 | #ifdef CONFIG_PRINTK |
| @@ -1264,7 +1264,7 @@ static struct ctl_table vm_table[] = { | |||
| 1264 | .mode = 0644, | 1264 | .mode = 0644, |
| 1265 | .proc_handler = drop_caches_sysctl_handler, | 1265 | .proc_handler = drop_caches_sysctl_handler, |
| 1266 | .extra1 = &one, | 1266 | .extra1 = &one, |
| 1267 | .extra2 = &three, | 1267 | .extra2 = &four, |
| 1268 | }, | 1268 | }, |
| 1269 | #ifdef CONFIG_COMPACTION | 1269 | #ifdef CONFIG_COMPACTION |
| 1270 | { | 1270 | { |
diff --git a/kernel/user.c b/kernel/user.c index c006131beb77..294fc6a94168 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -222,5 +222,4 @@ static int __init uid_cache_init(void) | |||
| 222 | 222 | ||
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | subsys_initcall(uid_cache_init); | |
| 226 | module_init(uid_cache_init); | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index dd06439b9c84..0d8f6023fd8d 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -902,4 +902,4 @@ static __init int user_namespaces_init(void) | |||
| 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); | 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); |
| 903 | return 0; | 903 | return 0; |
| 904 | } | 904 | } |
| 905 | module_init(user_namespaces_init); | 905 | subsys_initcall(user_namespaces_init); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 01c6f979486f..e90089fd78e0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -158,14 +158,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 159 | void touch_nmi_watchdog(void) | 159 | void touch_nmi_watchdog(void) |
| 160 | { | 160 | { |
| 161 | if (watchdog_user_enabled) { | 161 | /* |
| 162 | unsigned cpu; | 162 | * Using __raw here because some code paths have |
| 163 | 163 | * preemption enabled. If preemption is enabled | |
| 164 | for_each_present_cpu(cpu) { | 164 | * then interrupts should be enabled too, in which |
| 165 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | 165 | * case we shouldn't have to worry about the watchdog |
| 166 | per_cpu(watchdog_nmi_touch, cpu) = true; | 166 | * going off. |
| 167 | } | 167 | */ |
| 168 | } | 168 | __raw_get_cpu_var(watchdog_nmi_touch) = true; |
| 169 | touch_softlockup_watchdog(); | 169 | touch_softlockup_watchdog(); |
| 170 | } | 170 | } |
| 171 | EXPORT_SYMBOL(touch_nmi_watchdog); | 171 | EXPORT_SYMBOL(touch_nmi_watchdog); |
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index a8f8379eb49f..2e11e48446ab 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c | |||
| @@ -6,6 +6,9 @@ | |||
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | * The functions in this file aren't called directly, but are required by | ||
| 10 | * GCC builtins such as __builtin_ctz, and therefore they can't be removed | ||
| 11 | * despite appearing unreferenced in kernel source. | ||
| 9 | * | 12 | * |
| 10 | * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. | 13 | * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. |
| 11 | */ | 14 | */ |
| @@ -13,18 +16,22 @@ | |||
| 13 | #include <linux/export.h> | 16 | #include <linux/export.h> |
| 14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 15 | 18 | ||
| 19 | int __weak __ctzsi2(int val); | ||
| 16 | int __weak __ctzsi2(int val) | 20 | int __weak __ctzsi2(int val) |
| 17 | { | 21 | { |
| 18 | return __ffs(val); | 22 | return __ffs(val); |
| 19 | } | 23 | } |
| 20 | EXPORT_SYMBOL(__ctzsi2); | 24 | EXPORT_SYMBOL(__ctzsi2); |
| 21 | 25 | ||
| 26 | int __weak __clzsi2(int val); | ||
| 22 | int __weak __clzsi2(int val) | 27 | int __weak __clzsi2(int val) |
| 23 | { | 28 | { |
| 24 | return 32 - fls(val); | 29 | return 32 - fls(val); |
| 25 | } | 30 | } |
| 26 | EXPORT_SYMBOL(__clzsi2); | 31 | EXPORT_SYMBOL(__clzsi2); |
| 27 | 32 | ||
| 33 | int __weak __clzdi2(long val); | ||
| 34 | int __weak __ctzdi2(long val); | ||
| 28 | #if BITS_PER_LONG == 32 | 35 | #if BITS_PER_LONG == 32 |
| 29 | 36 | ||
| 30 | int __weak __clzdi2(long val) | 37 | int __weak __clzdi2(long val) |
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index d619b28c456f..0edfd742a154 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include "zlib_inflate/inflate.h" | 19 | #include "zlib_inflate/inflate.h" |
| 20 | 20 | ||
| 21 | #include "zlib_inflate/infutil.h" | 21 | #include "zlib_inflate/infutil.h" |
| 22 | #include <linux/decompress/inflate.h> | ||
| 22 | 23 | ||
| 23 | #endif /* STATIC */ | 24 | #endif /* STATIC */ |
| 24 | 25 | ||
diff --git a/lib/devres.c b/lib/devres.c index 823533138fa0..48cb3c7bd7de 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
| @@ -81,11 +81,13 @@ EXPORT_SYMBOL(devm_ioremap_nocache); | |||
| 81 | void devm_iounmap(struct device *dev, void __iomem *addr) | 81 | void devm_iounmap(struct device *dev, void __iomem *addr) |
| 82 | { | 82 | { |
| 83 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, | 83 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
| 84 | (void *)addr)); | 84 | (__force void *)addr)); |
| 85 | iounmap(addr); | 85 | iounmap(addr); |
| 86 | } | 86 | } |
| 87 | EXPORT_SYMBOL(devm_iounmap); | 87 | EXPORT_SYMBOL(devm_iounmap); |
| 88 | 88 | ||
| 89 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | ||
| 90 | |||
| 89 | /** | 91 | /** |
| 90 | * devm_ioremap_resource() - check, request region, and ioremap resource | 92 | * devm_ioremap_resource() - check, request region, and ioremap resource |
| 91 | * @dev: generic device to handle the resource for | 93 | * @dev: generic device to handle the resource for |
| @@ -114,7 +116,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
| 114 | 116 | ||
| 115 | if (!res || resource_type(res) != IORESOURCE_MEM) { | 117 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
| 116 | dev_err(dev, "invalid resource\n"); | 118 | dev_err(dev, "invalid resource\n"); |
| 117 | return ERR_PTR(-EINVAL); | 119 | return IOMEM_ERR_PTR(-EINVAL); |
| 118 | } | 120 | } |
| 119 | 121 | ||
| 120 | size = resource_size(res); | 122 | size = resource_size(res); |
| @@ -122,7 +124,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
| 122 | 124 | ||
| 123 | if (!devm_request_mem_region(dev, res->start, size, name)) { | 125 | if (!devm_request_mem_region(dev, res->start, size, name)) { |
| 124 | dev_err(dev, "can't request region for resource %pR\n", res); | 126 | dev_err(dev, "can't request region for resource %pR\n", res); |
| 125 | return ERR_PTR(-EBUSY); | 127 | return IOMEM_ERR_PTR(-EBUSY); |
| 126 | } | 128 | } |
| 127 | 129 | ||
| 128 | if (res->flags & IORESOURCE_CACHEABLE) | 130 | if (res->flags & IORESOURCE_CACHEABLE) |
| @@ -133,7 +135,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
| 133 | if (!dest_ptr) { | 135 | if (!dest_ptr) { |
| 134 | dev_err(dev, "ioremap failed for resource %pR\n", res); | 136 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
| 135 | devm_release_mem_region(dev, res->start, size); | 137 | devm_release_mem_region(dev, res->start, size); |
| 136 | dest_ptr = ERR_PTR(-ENOMEM); | 138 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
| 137 | } | 139 | } |
| 138 | 140 | ||
| 139 | return dest_ptr; | 141 | return dest_ptr; |
| @@ -224,7 +226,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
| 224 | { | 226 | { |
| 225 | ioport_unmap(addr); | 227 | ioport_unmap(addr); |
| 226 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | 228 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, |
| 227 | devm_ioport_map_match, (void *)addr)); | 229 | devm_ioport_map_match, (__force void *)addr)); |
| 228 | } | 230 | } |
| 229 | EXPORT_SYMBOL(devm_ioport_unmap); | 231 | EXPORT_SYMBOL(devm_ioport_unmap); |
| 230 | #endif /* CONFIG_HAS_IOPORT */ | 232 | #endif /* CONFIG_HAS_IOPORT */ |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 5f72767ddd9b..4e3bd71bd949 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -124,6 +124,30 @@ static int kobj_usermode_filter(struct kobject *kobj) | |||
| 124 | return 0; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) | ||
| 128 | { | ||
| 129 | int len; | ||
| 130 | |||
| 131 | len = strlcpy(&env->buf[env->buflen], subsystem, | ||
| 132 | sizeof(env->buf) - env->buflen); | ||
| 133 | if (len >= (sizeof(env->buf) - env->buflen)) { | ||
| 134 | WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); | ||
| 135 | return -ENOMEM; | ||
| 136 | } | ||
| 137 | |||
| 138 | env->argv[0] = uevent_helper; | ||
| 139 | env->argv[1] = &env->buf[env->buflen]; | ||
| 140 | env->argv[2] = NULL; | ||
| 141 | |||
| 142 | env->buflen += len + 1; | ||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void cleanup_uevent_env(struct subprocess_info *info) | ||
| 147 | { | ||
| 148 | kfree(info->data); | ||
| 149 | } | ||
| 150 | |||
| 127 | /** | 151 | /** |
| 128 | * kobject_uevent_env - send an uevent with environmental data | 152 | * kobject_uevent_env - send an uevent with environmental data |
| 129 | * | 153 | * |
| @@ -301,11 +325,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 301 | 325 | ||
| 302 | /* call uevent_helper, usually only enabled during early boot */ | 326 | /* call uevent_helper, usually only enabled during early boot */ |
| 303 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { | 327 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |
| 304 | char *argv [3]; | 328 | struct subprocess_info *info; |
| 305 | 329 | ||
| 306 | argv [0] = uevent_helper; | ||
| 307 | argv [1] = (char *)subsystem; | ||
| 308 | argv [2] = NULL; | ||
| 309 | retval = add_uevent_var(env, "HOME=/"); | 330 | retval = add_uevent_var(env, "HOME=/"); |
| 310 | if (retval) | 331 | if (retval) |
| 311 | goto exit; | 332 | goto exit; |
| @@ -313,9 +334,18 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 313 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); | 334 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); |
| 314 | if (retval) | 335 | if (retval) |
| 315 | goto exit; | 336 | goto exit; |
| 337 | retval = init_uevent_argv(env, subsystem); | ||
| 338 | if (retval) | ||
| 339 | goto exit; | ||
| 316 | 340 | ||
| 317 | retval = call_usermodehelper(argv[0], argv, | 341 | retval = -ENOMEM; |
| 318 | env->envp, UMH_WAIT_EXEC); | 342 | info = call_usermodehelper_setup(env->argv[0], env->argv, |
| 343 | env->envp, GFP_KERNEL, | ||
| 344 | NULL, cleanup_uevent_env, env); | ||
| 345 | if (info) { | ||
| 346 | retval = call_usermodehelper_exec(info, UMH_NO_WAIT); | ||
| 347 | env = NULL; /* freed by cleanup_uevent_env */ | ||
| 348 | } | ||
| 319 | } | 349 | } |
| 320 | 350 | ||
| 321 | exit: | 351 | exit: |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd4a8dfdf0b8..9599aa72d7a0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -35,33 +35,6 @@ | |||
| 35 | #include <linux/hardirq.h> /* in_interrupt() */ | 35 | #include <linux/hardirq.h> /* in_interrupt() */ |
| 36 | 36 | ||
| 37 | 37 | ||
| 38 | #ifdef __KERNEL__ | ||
| 39 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) | ||
| 40 | #else | ||
| 41 | #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ | ||
| 42 | #endif | ||
| 43 | |||
| 44 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | ||
| 45 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | ||
| 46 | |||
| 47 | #define RADIX_TREE_TAG_LONGS \ | ||
| 48 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | ||
| 49 | |||
| 50 | struct radix_tree_node { | ||
| 51 | unsigned int height; /* Height from the bottom */ | ||
| 52 | unsigned int count; | ||
| 53 | union { | ||
| 54 | struct radix_tree_node *parent; /* Used when ascending tree */ | ||
| 55 | struct rcu_head rcu_head; /* Used when freeing node */ | ||
| 56 | }; | ||
| 57 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | ||
| 58 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | ||
| 59 | }; | ||
| 60 | |||
| 61 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) | ||
| 62 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | ||
| 63 | RADIX_TREE_MAP_SHIFT)) | ||
| 64 | |||
| 65 | /* | 38 | /* |
| 66 | * The height_to_maxindex array needs to be one deeper than the maximum | 39 | * The height_to_maxindex array needs to be one deeper than the maximum |
| 67 | * path as height 0 holds only 1 entry. | 40 | * path as height 0 holds only 1 entry. |
| @@ -369,7 +342,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
| 369 | 342 | ||
| 370 | /* Increase the height. */ | 343 | /* Increase the height. */ |
| 371 | newheight = root->height+1; | 344 | newheight = root->height+1; |
| 372 | node->height = newheight; | 345 | BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK); |
| 346 | node->path = newheight; | ||
| 373 | node->count = 1; | 347 | node->count = 1; |
| 374 | node->parent = NULL; | 348 | node->parent = NULL; |
| 375 | slot = root->rnode; | 349 | slot = root->rnode; |
| @@ -387,23 +361,28 @@ out: | |||
| 387 | } | 361 | } |
| 388 | 362 | ||
| 389 | /** | 363 | /** |
| 390 | * radix_tree_insert - insert into a radix tree | 364 | * __radix_tree_create - create a slot in a radix tree |
| 391 | * @root: radix tree root | 365 | * @root: radix tree root |
| 392 | * @index: index key | 366 | * @index: index key |
| 393 | * @item: item to insert | 367 | * @nodep: returns node |
| 368 | * @slotp: returns slot | ||
| 394 | * | 369 | * |
| 395 | * Insert an item into the radix tree at position @index. | 370 | * Create, if necessary, and return the node and slot for an item |
| 371 | * at position @index in the radix tree @root. | ||
| 372 | * | ||
| 373 | * Until there is more than one item in the tree, no nodes are | ||
| 374 | * allocated and @root->rnode is used as a direct slot instead of | ||
| 375 | * pointing to a node, in which case *@nodep will be NULL. | ||
| 376 | * | ||
| 377 | * Returns -ENOMEM, or 0 for success. | ||
| 396 | */ | 378 | */ |
| 397 | int radix_tree_insert(struct radix_tree_root *root, | 379 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
| 398 | unsigned long index, void *item) | 380 | struct radix_tree_node **nodep, void ***slotp) |
| 399 | { | 381 | { |
| 400 | struct radix_tree_node *node = NULL, *slot; | 382 | struct radix_tree_node *node = NULL, *slot; |
| 401 | unsigned int height, shift; | 383 | unsigned int height, shift, offset; |
| 402 | int offset; | ||
| 403 | int error; | 384 | int error; |
| 404 | 385 | ||
| 405 | BUG_ON(radix_tree_is_indirect_ptr(item)); | ||
| 406 | |||
| 407 | /* Make sure the tree is high enough. */ | 386 | /* Make sure the tree is high enough. */ |
| 408 | if (index > radix_tree_maxindex(root->height)) { | 387 | if (index > radix_tree_maxindex(root->height)) { |
| 409 | error = radix_tree_extend(root, index); | 388 | error = radix_tree_extend(root, index); |
| @@ -422,11 +401,12 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
| 422 | /* Have to add a child node. */ | 401 | /* Have to add a child node. */ |
| 423 | if (!(slot = radix_tree_node_alloc(root))) | 402 | if (!(slot = radix_tree_node_alloc(root))) |
| 424 | return -ENOMEM; | 403 | return -ENOMEM; |
| 425 | slot->height = height; | 404 | slot->path = height; |
| 426 | slot->parent = node; | 405 | slot->parent = node; |
| 427 | if (node) { | 406 | if (node) { |
| 428 | rcu_assign_pointer(node->slots[offset], slot); | 407 | rcu_assign_pointer(node->slots[offset], slot); |
| 429 | node->count++; | 408 | node->count++; |
| 409 | slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT; | ||
| 430 | } else | 410 | } else |
| 431 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); | 411 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); |
| 432 | } | 412 | } |
| @@ -439,16 +419,42 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
| 439 | height--; | 419 | height--; |
| 440 | } | 420 | } |
| 441 | 421 | ||
| 442 | if (slot != NULL) | 422 | if (nodep) |
| 423 | *nodep = node; | ||
| 424 | if (slotp) | ||
| 425 | *slotp = node ? node->slots + offset : (void **)&root->rnode; | ||
| 426 | return 0; | ||
| 427 | } | ||
| 428 | |||
| 429 | /** | ||
| 430 | * radix_tree_insert - insert into a radix tree | ||
| 431 | * @root: radix tree root | ||
| 432 | * @index: index key | ||
| 433 | * @item: item to insert | ||
| 434 | * | ||
| 435 | * Insert an item into the radix tree at position @index. | ||
| 436 | */ | ||
| 437 | int radix_tree_insert(struct radix_tree_root *root, | ||
| 438 | unsigned long index, void *item) | ||
| 439 | { | ||
| 440 | struct radix_tree_node *node; | ||
| 441 | void **slot; | ||
| 442 | int error; | ||
| 443 | |||
| 444 | BUG_ON(radix_tree_is_indirect_ptr(item)); | ||
| 445 | |||
| 446 | error = __radix_tree_create(root, index, &node, &slot); | ||
| 447 | if (error) | ||
| 448 | return error; | ||
| 449 | if (*slot != NULL) | ||
| 443 | return -EEXIST; | 450 | return -EEXIST; |
| 451 | rcu_assign_pointer(*slot, item); | ||
| 444 | 452 | ||
| 445 | if (node) { | 453 | if (node) { |
| 446 | node->count++; | 454 | node->count++; |
| 447 | rcu_assign_pointer(node->slots[offset], item); | 455 | BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK)); |
| 448 | BUG_ON(tag_get(node, 0, offset)); | 456 | BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK)); |
| 449 | BUG_ON(tag_get(node, 1, offset)); | ||
| 450 | } else { | 457 | } else { |
| 451 | rcu_assign_pointer(root->rnode, item); | ||
| 452 | BUG_ON(root_tag_get(root, 0)); | 458 | BUG_ON(root_tag_get(root, 0)); |
| 453 | BUG_ON(root_tag_get(root, 1)); | 459 | BUG_ON(root_tag_get(root, 1)); |
| 454 | } | 460 | } |
| @@ -457,15 +463,26 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
| 457 | } | 463 | } |
| 458 | EXPORT_SYMBOL(radix_tree_insert); | 464 | EXPORT_SYMBOL(radix_tree_insert); |
| 459 | 465 | ||
| 460 | /* | 466 | /** |
| 461 | * is_slot == 1 : search for the slot. | 467 | * __radix_tree_lookup - lookup an item in a radix tree |
| 462 | * is_slot == 0 : search for the node. | 468 | * @root: radix tree root |
| 469 | * @index: index key | ||
| 470 | * @nodep: returns node | ||
| 471 | * @slotp: returns slot | ||
| 472 | * | ||
| 473 | * Lookup and return the item at position @index in the radix | ||
| 474 | * tree @root. | ||
| 475 | * | ||
| 476 | * Until there is more than one item in the tree, no nodes are | ||
| 477 | * allocated and @root->rnode is used as a direct slot instead of | ||
| 478 | * pointing to a node, in which case *@nodep will be NULL. | ||
| 463 | */ | 479 | */ |
| 464 | static void *radix_tree_lookup_element(struct radix_tree_root *root, | 480 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
| 465 | unsigned long index, int is_slot) | 481 | struct radix_tree_node **nodep, void ***slotp) |
| 466 | { | 482 | { |
| 483 | struct radix_tree_node *node, *parent; | ||
| 467 | unsigned int height, shift; | 484 | unsigned int height, shift; |
| 468 | struct radix_tree_node *node, **slot; | 485 | void **slot; |
| 469 | 486 | ||
| 470 | node = rcu_dereference_raw(root->rnode); | 487 | node = rcu_dereference_raw(root->rnode); |
| 471 | if (node == NULL) | 488 | if (node == NULL) |
| @@ -474,19 +491,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 474 | if (!radix_tree_is_indirect_ptr(node)) { | 491 | if (!radix_tree_is_indirect_ptr(node)) { |
| 475 | if (index > 0) | 492 | if (index > 0) |
| 476 | return NULL; | 493 | return NULL; |
| 477 | return is_slot ? (void *)&root->rnode : node; | 494 | |
| 495 | if (nodep) | ||
| 496 | *nodep = NULL; | ||
| 497 | if (slotp) | ||
| 498 | *slotp = (void **)&root->rnode; | ||
| 499 | return node; | ||
| 478 | } | 500 | } |
| 479 | node = indirect_to_ptr(node); | 501 | node = indirect_to_ptr(node); |
| 480 | 502 | ||
| 481 | height = node->height; | 503 | height = node->path & RADIX_TREE_HEIGHT_MASK; |
| 482 | if (index > radix_tree_maxindex(height)) | 504 | if (index > radix_tree_maxindex(height)) |
| 483 | return NULL; | 505 | return NULL; |
| 484 | 506 | ||
| 485 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 507 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
| 486 | 508 | ||
| 487 | do { | 509 | do { |
| 488 | slot = (struct radix_tree_node **) | 510 | parent = node; |
| 489 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 511 | slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK); |
| 490 | node = rcu_dereference_raw(*slot); | 512 | node = rcu_dereference_raw(*slot); |
| 491 | if (node == NULL) | 513 | if (node == NULL) |
| 492 | return NULL; | 514 | return NULL; |
| @@ -495,7 +517,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 495 | height--; | 517 | height--; |
| 496 | } while (height > 0); | 518 | } while (height > 0); |
| 497 | 519 | ||
| 498 | return is_slot ? (void *)slot : indirect_to_ptr(node); | 520 | if (nodep) |
| 521 | *nodep = parent; | ||
| 522 | if (slotp) | ||
| 523 | *slotp = slot; | ||
| 524 | return node; | ||
| 499 | } | 525 | } |
| 500 | 526 | ||
| 501 | /** | 527 | /** |
| @@ -513,7 +539,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 513 | */ | 539 | */ |
| 514 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 540 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) |
| 515 | { | 541 | { |
| 516 | return (void **)radix_tree_lookup_element(root, index, 1); | 542 | void **slot; |
| 543 | |||
| 544 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | ||
| 545 | return NULL; | ||
| 546 | return slot; | ||
| 517 | } | 547 | } |
| 518 | EXPORT_SYMBOL(radix_tree_lookup_slot); | 548 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
| 519 | 549 | ||
| @@ -531,7 +561,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); | |||
| 531 | */ | 561 | */ |
| 532 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | 562 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) |
| 533 | { | 563 | { |
| 534 | return radix_tree_lookup_element(root, index, 0); | 564 | return __radix_tree_lookup(root, index, NULL, NULL); |
| 535 | } | 565 | } |
| 536 | EXPORT_SYMBOL(radix_tree_lookup); | 566 | EXPORT_SYMBOL(radix_tree_lookup); |
| 537 | 567 | ||
| @@ -676,7 +706,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 676 | return (index == 0); | 706 | return (index == 0); |
| 677 | node = indirect_to_ptr(node); | 707 | node = indirect_to_ptr(node); |
| 678 | 708 | ||
| 679 | height = node->height; | 709 | height = node->path & RADIX_TREE_HEIGHT_MASK; |
| 680 | if (index > radix_tree_maxindex(height)) | 710 | if (index > radix_tree_maxindex(height)) |
| 681 | return 0; | 711 | return 0; |
| 682 | 712 | ||
| @@ -713,7 +743,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
| 713 | { | 743 | { |
| 714 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; | 744 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; |
| 715 | struct radix_tree_node *rnode, *node; | 745 | struct radix_tree_node *rnode, *node; |
| 716 | unsigned long index, offset; | 746 | unsigned long index, offset, height; |
| 717 | 747 | ||
| 718 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | 748 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) |
| 719 | return NULL; | 749 | return NULL; |
| @@ -744,7 +774,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
| 744 | return NULL; | 774 | return NULL; |
| 745 | 775 | ||
| 746 | restart: | 776 | restart: |
| 747 | shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT; | 777 | height = rnode->path & RADIX_TREE_HEIGHT_MASK; |
| 778 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
| 748 | offset = index >> shift; | 779 | offset = index >> shift; |
| 749 | 780 | ||
| 750 | /* Index outside of the tree */ | 781 | /* Index outside of the tree */ |
| @@ -946,81 +977,6 @@ next: | |||
| 946 | } | 977 | } |
| 947 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | 978 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); |
| 948 | 979 | ||
| 949 | |||
| 950 | /** | ||
| 951 | * radix_tree_next_hole - find the next hole (not-present entry) | ||
| 952 | * @root: tree root | ||
| 953 | * @index: index key | ||
| 954 | * @max_scan: maximum range to search | ||
| 955 | * | ||
| 956 | * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest | ||
| 957 | * indexed hole. | ||
| 958 | * | ||
| 959 | * Returns: the index of the hole if found, otherwise returns an index | ||
| 960 | * outside of the set specified (in which case 'return - index >= max_scan' | ||
| 961 | * will be true). In rare cases of index wrap-around, 0 will be returned. | ||
| 962 | * | ||
| 963 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
| 964 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
| 965 | * the tree at a single point in time. For example, if a hole is created | ||
| 966 | * at index 5, then subsequently a hole is created at index 10, | ||
| 967 | * radix_tree_next_hole covering both indexes may return 10 if called | ||
| 968 | * under rcu_read_lock. | ||
| 969 | */ | ||
| 970 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | ||
| 971 | unsigned long index, unsigned long max_scan) | ||
| 972 | { | ||
| 973 | unsigned long i; | ||
| 974 | |||
| 975 | for (i = 0; i < max_scan; i++) { | ||
| 976 | if (!radix_tree_lookup(root, index)) | ||
| 977 | break; | ||
| 978 | index++; | ||
| 979 | if (index == 0) | ||
| 980 | break; | ||
| 981 | } | ||
| 982 | |||
| 983 | return index; | ||
| 984 | } | ||
| 985 | EXPORT_SYMBOL(radix_tree_next_hole); | ||
| 986 | |||
| 987 | /** | ||
| 988 | * radix_tree_prev_hole - find the prev hole (not-present entry) | ||
| 989 | * @root: tree root | ||
| 990 | * @index: index key | ||
| 991 | * @max_scan: maximum range to search | ||
| 992 | * | ||
| 993 | * Search backwards in the range [max(index-max_scan+1, 0), index] | ||
| 994 | * for the first hole. | ||
| 995 | * | ||
| 996 | * Returns: the index of the hole if found, otherwise returns an index | ||
| 997 | * outside of the set specified (in which case 'index - return >= max_scan' | ||
| 998 | * will be true). In rare cases of wrap-around, ULONG_MAX will be returned. | ||
| 999 | * | ||
| 1000 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
| 1001 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
| 1002 | * the tree at a single point in time. For example, if a hole is created | ||
| 1003 | * at index 10, then subsequently a hole is created at index 5, | ||
| 1004 | * radix_tree_prev_hole covering both indexes may return 5 if called under | ||
| 1005 | * rcu_read_lock. | ||
| 1006 | */ | ||
| 1007 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
| 1008 | unsigned long index, unsigned long max_scan) | ||
| 1009 | { | ||
| 1010 | unsigned long i; | ||
| 1011 | |||
| 1012 | for (i = 0; i < max_scan; i++) { | ||
| 1013 | if (!radix_tree_lookup(root, index)) | ||
| 1014 | break; | ||
| 1015 | index--; | ||
| 1016 | if (index == ULONG_MAX) | ||
| 1017 | break; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | return index; | ||
| 1021 | } | ||
| 1022 | EXPORT_SYMBOL(radix_tree_prev_hole); | ||
| 1023 | |||
| 1024 | /** | 980 | /** |
| 1025 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | 981 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
| 1026 | * @root: radix tree root | 982 | * @root: radix tree root |
| @@ -1189,7 +1145,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item, | |||
| 1189 | unsigned int shift, height; | 1145 | unsigned int shift, height; |
| 1190 | unsigned long i; | 1146 | unsigned long i; |
| 1191 | 1147 | ||
| 1192 | height = slot->height; | 1148 | height = slot->path & RADIX_TREE_HEIGHT_MASK; |
| 1193 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 1149 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
| 1194 | 1150 | ||
| 1195 | for ( ; height > 1; height--) { | 1151 | for ( ; height > 1; height--) { |
| @@ -1252,7 +1208,8 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |||
| 1252 | } | 1208 | } |
| 1253 | 1209 | ||
| 1254 | node = indirect_to_ptr(node); | 1210 | node = indirect_to_ptr(node); |
| 1255 | max_index = radix_tree_maxindex(node->height); | 1211 | max_index = radix_tree_maxindex(node->path & |
| 1212 | RADIX_TREE_HEIGHT_MASK); | ||
| 1256 | if (cur_index > max_index) { | 1213 | if (cur_index > max_index) { |
| 1257 | rcu_read_unlock(); | 1214 | rcu_read_unlock(); |
| 1258 | break; | 1215 | break; |
| @@ -1337,48 +1294,90 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
| 1337 | } | 1294 | } |
| 1338 | 1295 | ||
| 1339 | /** | 1296 | /** |
| 1340 | * radix_tree_delete - delete an item from a radix tree | 1297 | * __radix_tree_delete_node - try to free node after clearing a slot |
| 1341 | * @root: radix tree root | 1298 | * @root: radix tree root |
| 1342 | * @index: index key | 1299 | * @index: index key |
| 1300 | * @node: node containing @index | ||
| 1343 | * | 1301 | * |
| 1344 | * Remove the item at @index from the radix tree rooted at @root. | 1302 | * After clearing the slot at @index in @node from radix tree |
| 1303 | * rooted at @root, call this function to attempt freeing the | ||
| 1304 | * node and shrinking the tree. | ||
| 1345 | * | 1305 | * |
| 1346 | * Returns the address of the deleted item, or NULL if it was not present. | 1306 | * Returns %true if @node was freed, %false otherwise. |
| 1347 | */ | 1307 | */ |
| 1348 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | 1308 | bool __radix_tree_delete_node(struct radix_tree_root *root, |
| 1309 | struct radix_tree_node *node) | ||
| 1349 | { | 1310 | { |
| 1350 | struct radix_tree_node *node = NULL; | 1311 | bool deleted = false; |
| 1351 | struct radix_tree_node *slot = NULL; | 1312 | |
| 1352 | struct radix_tree_node *to_free; | 1313 | do { |
| 1353 | unsigned int height, shift; | 1314 | struct radix_tree_node *parent; |
| 1315 | |||
| 1316 | if (node->count) { | ||
| 1317 | if (node == indirect_to_ptr(root->rnode)) { | ||
| 1318 | radix_tree_shrink(root); | ||
| 1319 | if (root->height == 0) | ||
| 1320 | deleted = true; | ||
| 1321 | } | ||
| 1322 | return deleted; | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | parent = node->parent; | ||
| 1326 | if (parent) { | ||
| 1327 | unsigned int offset; | ||
| 1328 | |||
| 1329 | offset = node->path >> RADIX_TREE_HEIGHT_SHIFT; | ||
| 1330 | parent->slots[offset] = NULL; | ||
| 1331 | parent->count--; | ||
| 1332 | } else { | ||
| 1333 | root_tag_clear_all(root); | ||
| 1334 | root->height = 0; | ||
| 1335 | root->rnode = NULL; | ||
| 1336 | } | ||
| 1337 | |||
| 1338 | radix_tree_node_free(node); | ||
| 1339 | deleted = true; | ||
| 1340 | |||
| 1341 | node = parent; | ||
| 1342 | } while (node); | ||
| 1343 | |||
| 1344 | return deleted; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | /** | ||
| 1348 | * radix_tree_delete_item - delete an item from a radix tree | ||
| 1349 | * @root: radix tree root | ||
| 1350 | * @index: index key | ||
| 1351 | * @item: expected item | ||
| 1352 | * | ||
| 1353 | * Remove @item at @index from the radix tree rooted at @root. | ||
| 1354 | * | ||
| 1355 | * Returns the address of the deleted item, or NULL if it was not present | ||
| 1356 | * or the entry at the given @index was not @item. | ||
| 1357 | */ | ||
| 1358 | void *radix_tree_delete_item(struct radix_tree_root *root, | ||
| 1359 | unsigned long index, void *item) | ||
| 1360 | { | ||
| 1361 | struct radix_tree_node *node; | ||
| 1362 | unsigned int offset; | ||
| 1363 | void **slot; | ||
| 1364 | void *entry; | ||
| 1354 | int tag; | 1365 | int tag; |
| 1355 | int uninitialized_var(offset); | ||
| 1356 | 1366 | ||
| 1357 | height = root->height; | 1367 | entry = __radix_tree_lookup(root, index, &node, &slot); |
| 1358 | if (index > radix_tree_maxindex(height)) | 1368 | if (!entry) |
| 1359 | goto out; | 1369 | return NULL; |
| 1360 | 1370 | ||
| 1361 | slot = root->rnode; | 1371 | if (item && entry != item) |
| 1362 | if (height == 0) { | 1372 | return NULL; |
| 1373 | |||
| 1374 | if (!node) { | ||
| 1363 | root_tag_clear_all(root); | 1375 | root_tag_clear_all(root); |
| 1364 | root->rnode = NULL; | 1376 | root->rnode = NULL; |
| 1365 | goto out; | 1377 | return entry; |
| 1366 | } | 1378 | } |
| 1367 | slot = indirect_to_ptr(slot); | ||
| 1368 | shift = height * RADIX_TREE_MAP_SHIFT; | ||
| 1369 | 1379 | ||
| 1370 | do { | 1380 | offset = index & RADIX_TREE_MAP_MASK; |
| 1371 | if (slot == NULL) | ||
| 1372 | goto out; | ||
| 1373 | |||
| 1374 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 1375 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
| 1376 | node = slot; | ||
| 1377 | slot = slot->slots[offset]; | ||
| 1378 | } while (shift); | ||
| 1379 | |||
| 1380 | if (slot == NULL) | ||
| 1381 | goto out; | ||
| 1382 | 1381 | ||
| 1383 | /* | 1382 | /* |
| 1384 | * Clear all tags associated with the item to be deleted. | 1383 | * Clear all tags associated with the item to be deleted. |
| @@ -1389,40 +1388,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
| 1389 | radix_tree_tag_clear(root, index, tag); | 1388 | radix_tree_tag_clear(root, index, tag); |
| 1390 | } | 1389 | } |
| 1391 | 1390 | ||
| 1392 | to_free = NULL; | 1391 | node->slots[offset] = NULL; |
| 1393 | /* Now free the nodes we do not need anymore */ | 1392 | node->count--; |
| 1394 | while (node) { | ||
| 1395 | node->slots[offset] = NULL; | ||
| 1396 | node->count--; | ||
| 1397 | /* | ||
| 1398 | * Queue the node for deferred freeing after the | ||
| 1399 | * last reference to it disappears (set NULL, above). | ||
| 1400 | */ | ||
| 1401 | if (to_free) | ||
| 1402 | radix_tree_node_free(to_free); | ||
| 1403 | |||
| 1404 | if (node->count) { | ||
| 1405 | if (node == indirect_to_ptr(root->rnode)) | ||
| 1406 | radix_tree_shrink(root); | ||
| 1407 | goto out; | ||
| 1408 | } | ||
| 1409 | 1393 | ||
| 1410 | /* Node with zero slots in use so free it */ | 1394 | __radix_tree_delete_node(root, node); |
| 1411 | to_free = node; | ||
| 1412 | 1395 | ||
| 1413 | index >>= RADIX_TREE_MAP_SHIFT; | 1396 | return entry; |
| 1414 | offset = index & RADIX_TREE_MAP_MASK; | 1397 | } |
| 1415 | node = node->parent; | 1398 | EXPORT_SYMBOL(radix_tree_delete_item); |
| 1416 | } | ||
| 1417 | |||
| 1418 | root_tag_clear_all(root); | ||
| 1419 | root->height = 0; | ||
| 1420 | root->rnode = NULL; | ||
| 1421 | if (to_free) | ||
| 1422 | radix_tree_node_free(to_free); | ||
| 1423 | 1399 | ||
| 1424 | out: | 1400 | /** |
| 1425 | return slot; | 1401 | * radix_tree_delete - delete an item from a radix tree |
| 1402 | * @root: radix tree root | ||
| 1403 | * @index: index key | ||
| 1404 | * | ||
| 1405 | * Remove the item at @index from the radix tree rooted at @root. | ||
| 1406 | * | ||
| 1407 | * Returns the address of the deleted item, or NULL if it was not present. | ||
| 1408 | */ | ||
| 1409 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | ||
| 1410 | { | ||
| 1411 | return radix_tree_delete_item(root, index, NULL); | ||
| 1426 | } | 1412 | } |
| 1427 | EXPORT_SYMBOL(radix_tree_delete); | 1413 | EXPORT_SYMBOL(radix_tree_delete); |
| 1428 | 1414 | ||
| @@ -1438,9 +1424,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
| 1438 | EXPORT_SYMBOL(radix_tree_tagged); | 1424 | EXPORT_SYMBOL(radix_tree_tagged); |
| 1439 | 1425 | ||
| 1440 | static void | 1426 | static void |
| 1441 | radix_tree_node_ctor(void *node) | 1427 | radix_tree_node_ctor(void *arg) |
| 1442 | { | 1428 | { |
| 1443 | memset(node, 0, sizeof(struct radix_tree_node)); | 1429 | struct radix_tree_node *node = arg; |
| 1430 | |||
| 1431 | memset(node, 0, sizeof(*node)); | ||
| 1432 | INIT_LIST_HEAD(&node->private_list); | ||
| 1444 | } | 1433 | } |
| 1445 | 1434 | ||
| 1446 | static __init unsigned long __maxindex(unsigned int height) | 1435 | static __init unsigned long __maxindex(unsigned int height) |
diff --git a/lib/random32.c b/lib/random32.c index 614896778700..fa5da61ce7ad 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -1,37 +1,35 @@ | |||
| 1 | /* | 1 | /* |
| 2 | This is a maximally equidistributed combined Tausworthe generator | 2 | * This is a maximally equidistributed combined Tausworthe generator |
| 3 | based on code from GNU Scientific Library 1.5 (30 Jun 2004) | 3 | * based on code from GNU Scientific Library 1.5 (30 Jun 2004) |
| 4 | 4 | * | |
| 5 | lfsr113 version: | 5 | * lfsr113 version: |
| 6 | 6 | * | |
| 7 | x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) | 7 | * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) |
| 8 | 8 | * | |
| 9 | s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) | 9 | * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) |
| 10 | s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) | 10 | * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) |
| 11 | s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) | 11 | * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) |
| 12 | s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) | 12 | * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) |
| 13 | 13 | * | |
| 14 | The period of this generator is about 2^113 (see erratum paper). | 14 | * The period of this generator is about 2^113 (see erratum paper). |
| 15 | 15 | * | |
| 16 | From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe | 16 | * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe |
| 17 | Generators", Mathematics of Computation, 65, 213 (1996), 203--213: | 17 | * Generators", Mathematics of Computation, 65, 213 (1996), 203--213: |
| 18 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps | 18 | * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps |
| 19 | ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps | 19 | * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps |
| 20 | 20 | * | |
| 21 | There is an erratum in the paper "Tables of Maximally | 21 | * There is an erratum in the paper "Tables of Maximally Equidistributed |
| 22 | Equidistributed Combined LFSR Generators", Mathematics of | 22 | * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999), |
| 23 | Computation, 68, 225 (1999), 261--269: | 23 | * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps |
| 24 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps | 24 | * |
| 25 | 25 | * ... the k_j most significant bits of z_j must be non-zero, | |
| 26 | ... the k_j most significant bits of z_j must be non- | 26 | * for each j. (Note: this restriction also applies to the |
| 27 | zero, for each j. (Note: this restriction also applies to the | 27 | * computer code given in [4], but was mistakenly not mentioned |
| 28 | computer code given in [4], but was mistakenly not mentioned in | 28 | * in that paper.) |
| 29 | that paper.) | 29 | * |
| 30 | 30 | * This affects the seeding procedure by imposing the requirement | |
| 31 | This affects the seeding procedure by imposing the requirement | 31 | * s1 > 1, s2 > 7, s3 > 15, s4 > 127. |
| 32 | s1 > 1, s2 > 7, s3 > 15, s4 > 127. | 32 | */ |
| 33 | |||
| 34 | */ | ||
| 35 | 33 | ||
| 36 | #include <linux/types.h> | 34 | #include <linux/types.h> |
| 37 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
| @@ -75,15 +73,17 @@ EXPORT_SYMBOL(prandom_u32_state); | |||
| 75 | */ | 73 | */ |
| 76 | u32 prandom_u32(void) | 74 | u32 prandom_u32(void) |
| 77 | { | 75 | { |
| 78 | unsigned long r; | ||
| 79 | struct rnd_state *state = &get_cpu_var(net_rand_state); | 76 | struct rnd_state *state = &get_cpu_var(net_rand_state); |
| 80 | r = prandom_u32_state(state); | 77 | u32 res; |
| 78 | |||
| 79 | res = prandom_u32_state(state); | ||
| 81 | put_cpu_var(state); | 80 | put_cpu_var(state); |
| 82 | return r; | 81 | |
| 82 | return res; | ||
| 83 | } | 83 | } |
| 84 | EXPORT_SYMBOL(prandom_u32); | 84 | EXPORT_SYMBOL(prandom_u32); |
| 85 | 85 | ||
| 86 | /* | 86 | /** |
| 87 | * prandom_bytes_state - get the requested number of pseudo-random bytes | 87 | * prandom_bytes_state - get the requested number of pseudo-random bytes |
| 88 | * | 88 | * |
| 89 | * @state: pointer to state structure holding seeded state. | 89 | * @state: pointer to state structure holding seeded state. |
| @@ -204,6 +204,7 @@ static int __init prandom_init(void) | |||
| 204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); | 204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); |
| 205 | prandom_warmup(state); | 205 | prandom_warmup(state); |
| 206 | } | 206 | } |
| 207 | |||
| 207 | return 0; | 208 | return 0; |
| 208 | } | 209 | } |
| 209 | core_initcall(prandom_init); | 210 | core_initcall(prandom_init); |
| @@ -259,6 +260,7 @@ static void __prandom_reseed(bool late) | |||
| 259 | 260 | ||
| 260 | if (latch && !late) | 261 | if (latch && !late) |
| 261 | goto out; | 262 | goto out; |
| 263 | |||
| 262 | latch = true; | 264 | latch = true; |
| 263 | 265 | ||
| 264 | for_each_possible_cpu(i) { | 266 | for_each_possible_cpu(i) { |
diff --git a/lib/syscall.c b/lib/syscall.c index 58710eefeac8..e30e03932480 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno, | |||
| 72 | 72 | ||
| 73 | return 0; | 73 | return 0; |
| 74 | } | 74 | } |
| 75 | EXPORT_SYMBOL_GPL(task_current_syscall); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 5e2cf6f342f8..0648291cdafe 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -364,7 +364,6 @@ enum format_type { | |||
| 364 | FORMAT_TYPE_SHORT, | 364 | FORMAT_TYPE_SHORT, |
| 365 | FORMAT_TYPE_UINT, | 365 | FORMAT_TYPE_UINT, |
| 366 | FORMAT_TYPE_INT, | 366 | FORMAT_TYPE_INT, |
| 367 | FORMAT_TYPE_NRCHARS, | ||
| 368 | FORMAT_TYPE_SIZE_T, | 367 | FORMAT_TYPE_SIZE_T, |
| 369 | FORMAT_TYPE_PTRDIFF | 368 | FORMAT_TYPE_PTRDIFF |
| 370 | }; | 369 | }; |
| @@ -1538,10 +1537,6 @@ qualifier: | |||
| 1538 | return fmt - start; | 1537 | return fmt - start; |
| 1539 | /* skip alnum */ | 1538 | /* skip alnum */ |
| 1540 | 1539 | ||
| 1541 | case 'n': | ||
| 1542 | spec->type = FORMAT_TYPE_NRCHARS; | ||
| 1543 | return ++fmt - start; | ||
| 1544 | |||
| 1545 | case '%': | 1540 | case '%': |
| 1546 | spec->type = FORMAT_TYPE_PERCENT_CHAR; | 1541 | spec->type = FORMAT_TYPE_PERCENT_CHAR; |
| 1547 | return ++fmt - start; | 1542 | return ++fmt - start; |
| @@ -1564,6 +1559,15 @@ qualifier: | |||
| 1564 | case 'u': | 1559 | case 'u': |
| 1565 | break; | 1560 | break; |
| 1566 | 1561 | ||
| 1562 | case 'n': | ||
| 1563 | /* | ||
| 1564 | * Since %n poses a greater security risk than utility, treat | ||
| 1565 | * it as an invalid format specifier. Warn about its use so | ||
| 1566 | * that new instances don't get added. | ||
| 1567 | */ | ||
| 1568 | WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt); | ||
| 1569 | /* Fall-through */ | ||
| 1570 | |||
| 1567 | default: | 1571 | default: |
| 1568 | spec->type = FORMAT_TYPE_INVALID; | 1572 | spec->type = FORMAT_TYPE_INVALID; |
| 1569 | return fmt - start; | 1573 | return fmt - start; |
| @@ -1737,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1737 | ++str; | 1741 | ++str; |
| 1738 | break; | 1742 | break; |
| 1739 | 1743 | ||
| 1740 | case FORMAT_TYPE_NRCHARS: { | ||
| 1741 | /* | ||
| 1742 | * Since %n poses a greater security risk than | ||
| 1743 | * utility, ignore %n and skip its argument. | ||
| 1744 | */ | ||
| 1745 | void *skip_arg; | ||
| 1746 | |||
| 1747 | WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", | ||
| 1748 | old_fmt); | ||
| 1749 | |||
| 1750 | skip_arg = va_arg(args, void *); | ||
| 1751 | break; | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | default: | 1744 | default: |
| 1755 | switch (spec.type) { | 1745 | switch (spec.type) { |
| 1756 | case FORMAT_TYPE_LONG_LONG: | 1746 | case FORMAT_TYPE_LONG_LONG: |
| @@ -2025,19 +2015,6 @@ do { \ | |||
| 2025 | fmt++; | 2015 | fmt++; |
| 2026 | break; | 2016 | break; |
| 2027 | 2017 | ||
| 2028 | case FORMAT_TYPE_NRCHARS: { | ||
| 2029 | /* skip %n 's argument */ | ||
| 2030 | u8 qualifier = spec.qualifier; | ||
| 2031 | void *skip_arg; | ||
| 2032 | if (qualifier == 'l') | ||
| 2033 | skip_arg = va_arg(args, long *); | ||
| 2034 | else if (_tolower(qualifier) == 'z') | ||
| 2035 | skip_arg = va_arg(args, size_t *); | ||
| 2036 | else | ||
| 2037 | skip_arg = va_arg(args, int *); | ||
| 2038 | break; | ||
| 2039 | } | ||
| 2040 | |||
| 2041 | default: | 2018 | default: |
| 2042 | switch (spec.type) { | 2019 | switch (spec.type) { |
| 2043 | 2020 | ||
| @@ -2196,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 2196 | ++str; | 2173 | ++str; |
| 2197 | break; | 2174 | break; |
| 2198 | 2175 | ||
| 2199 | case FORMAT_TYPE_NRCHARS: | ||
| 2200 | /* skip */ | ||
| 2201 | break; | ||
| 2202 | |||
| 2203 | default: { | 2176 | default: { |
| 2204 | unsigned long long num; | 2177 | unsigned long long num; |
| 2205 | 2178 | ||
diff --git a/mm/Makefile b/mm/Makefile index 310c90a09264..cdd741519ee0 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
| @@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ | |||
| 17 | util.o mmzone.o vmstat.o backing-dev.o \ | 17 | util.o mmzone.o vmstat.o backing-dev.o \ |
| 18 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 18 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
| 19 | compaction.o balloon_compaction.o \ | 19 | compaction.o balloon_compaction.o \ |
| 20 | interval_tree.o list_lru.o $(mmu-y) | 20 | interval_tree.o list_lru.o workingset.o $(mmu-y) |
| 21 | 21 | ||
| 22 | obj-y += init-mm.o | 22 | obj-y += init-mm.o |
| 23 | 23 | ||
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ce682f7a4f29..09d9591b7708 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -288,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) | |||
| 288 | * Note, we wouldn't bother setting up the timer, but this function is on the | 288 | * Note, we wouldn't bother setting up the timer, but this function is on the |
| 289 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | 289 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches |
| 290 | * by delaying the wake-up. | 290 | * by delaying the wake-up. |
| 291 | * | ||
| 292 | * We have to be careful not to postpone flush work if it is scheduled for | ||
| 293 | * earlier. Thus we use queue_delayed_work(). | ||
| 291 | */ | 294 | */ |
| 292 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) | 295 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) |
| 293 | { | 296 | { |
| 294 | unsigned long timeout; | 297 | unsigned long timeout; |
| 295 | 298 | ||
| 296 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | 299 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); |
| 297 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); | 300 | spin_lock_bh(&bdi->wb_lock); |
| 301 | if (test_bit(BDI_registered, &bdi->state)) | ||
| 302 | queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); | ||
| 303 | spin_unlock_bh(&bdi->wb_lock); | ||
| 298 | } | 304 | } |
| 299 | 305 | ||
| 300 | /* | 306 | /* |
| @@ -307,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) | |||
| 307 | spin_unlock_bh(&bdi_lock); | 313 | spin_unlock_bh(&bdi_lock); |
| 308 | 314 | ||
| 309 | synchronize_rcu_expedited(); | 315 | synchronize_rcu_expedited(); |
| 310 | |||
| 311 | /* bdi_list is now unused, clear it to mark @bdi dying */ | ||
| 312 | INIT_LIST_HEAD(&bdi->bdi_list); | ||
| 313 | } | 316 | } |
| 314 | 317 | ||
| 315 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | 318 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
| @@ -360,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
| 360 | */ | 363 | */ |
| 361 | bdi_remove_from_list(bdi); | 364 | bdi_remove_from_list(bdi); |
| 362 | 365 | ||
| 366 | /* Make sure nobody queues further work */ | ||
| 367 | spin_lock_bh(&bdi->wb_lock); | ||
| 368 | clear_bit(BDI_registered, &bdi->state); | ||
| 369 | spin_unlock_bh(&bdi->wb_lock); | ||
| 370 | |||
| 363 | /* | 371 | /* |
| 364 | * Drain work list and shutdown the delayed_work. At this point, | 372 | * Drain work list and shutdown the delayed_work. At this point, |
| 365 | * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi | 373 | * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi |
diff --git a/mm/compaction.c b/mm/compaction.c index 918577595ea8..b6ab77160068 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -584,6 +584,15 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
| 584 | continue; | 584 | continue; |
| 585 | } | 585 | } |
| 586 | 586 | ||
| 587 | /* | ||
| 588 | * Migration will fail if an anonymous page is pinned in memory, | ||
| 589 | * so avoid taking lru_lock and isolating it unnecessarily in an | ||
| 590 | * admittedly racy check. | ||
| 591 | */ | ||
| 592 | if (!page_mapping(page) && | ||
| 593 | page_count(page) > page_mapcount(page)) | ||
| 594 | continue; | ||
| 595 | |||
| 587 | /* Check if it is ok to still hold the lock */ | 596 | /* Check if it is ok to still hold the lock */ |
| 588 | locked = compact_checklock_irqsave(&zone->lru_lock, &flags, | 597 | locked = compact_checklock_irqsave(&zone->lru_lock, &flags, |
| 589 | locked, cc); | 598 | locked, cc); |
| @@ -1186,6 +1195,7 @@ static void compact_node(int nid) | |||
| 1186 | struct compact_control cc = { | 1195 | struct compact_control cc = { |
| 1187 | .order = -1, | 1196 | .order = -1, |
| 1188 | .sync = true, | 1197 | .sync = true, |
| 1198 | .ignore_skip_hint = true, | ||
| 1189 | }; | 1199 | }; |
| 1190 | 1200 | ||
| 1191 | __compact_pgdat(NODE_DATA(nid), &cc); | 1201 | __compact_pgdat(NODE_DATA(nid), &cc); |
| @@ -1225,7 +1235,7 @@ int sysctl_extfrag_handler(struct ctl_table *table, int write, | |||
| 1225 | } | 1235 | } |
| 1226 | 1236 | ||
| 1227 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | 1237 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
| 1228 | ssize_t sysfs_compact_node(struct device *dev, | 1238 | static ssize_t sysfs_compact_node(struct device *dev, |
| 1229 | struct device_attribute *attr, | 1239 | struct device_attribute *attr, |
| 1230 | const char *buf, size_t count) | 1240 | const char *buf, size_t count) |
| 1231 | { | 1241 | { |
diff --git a/mm/filemap.c b/mm/filemap.c index 7a13f6ac5421..21781f1fe52b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -107,12 +107,75 @@ | |||
| 107 | * ->tasklist_lock (memory_failure, collect_procs_ao) | 107 | * ->tasklist_lock (memory_failure, collect_procs_ao) |
| 108 | */ | 108 | */ |
| 109 | 109 | ||
| 110 | static void page_cache_tree_delete(struct address_space *mapping, | ||
| 111 | struct page *page, void *shadow) | ||
| 112 | { | ||
| 113 | struct radix_tree_node *node; | ||
| 114 | unsigned long index; | ||
| 115 | unsigned int offset; | ||
| 116 | unsigned int tag; | ||
| 117 | void **slot; | ||
| 118 | |||
| 119 | VM_BUG_ON(!PageLocked(page)); | ||
| 120 | |||
| 121 | __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); | ||
| 122 | |||
| 123 | if (shadow) { | ||
| 124 | mapping->nrshadows++; | ||
| 125 | /* | ||
| 126 | * Make sure the nrshadows update is committed before | ||
| 127 | * the nrpages update so that final truncate racing | ||
| 128 | * with reclaim does not see both counters 0 at the | ||
| 129 | * same time and miss a shadow entry. | ||
| 130 | */ | ||
| 131 | smp_wmb(); | ||
| 132 | } | ||
| 133 | mapping->nrpages--; | ||
| 134 | |||
| 135 | if (!node) { | ||
| 136 | /* Clear direct pointer tags in root node */ | ||
| 137 | mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; | ||
| 138 | radix_tree_replace_slot(slot, shadow); | ||
| 139 | return; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* Clear tree tags for the removed page */ | ||
| 143 | index = page->index; | ||
| 144 | offset = index & RADIX_TREE_MAP_MASK; | ||
| 145 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | ||
| 146 | if (test_bit(offset, node->tags[tag])) | ||
| 147 | radix_tree_tag_clear(&mapping->page_tree, index, tag); | ||
| 148 | } | ||
| 149 | |||
| 150 | /* Delete page, swap shadow entry */ | ||
| 151 | radix_tree_replace_slot(slot, shadow); | ||
| 152 | workingset_node_pages_dec(node); | ||
| 153 | if (shadow) | ||
| 154 | workingset_node_shadows_inc(node); | ||
| 155 | else | ||
| 156 | if (__radix_tree_delete_node(&mapping->page_tree, node)) | ||
| 157 | return; | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Track node that only contains shadow entries. | ||
| 161 | * | ||
| 162 | * Avoid acquiring the list_lru lock if already tracked. The | ||
| 163 | * list_empty() test is safe as node->private_list is | ||
| 164 | * protected by mapping->tree_lock. | ||
| 165 | */ | ||
| 166 | if (!workingset_node_pages(node) && | ||
| 167 | list_empty(&node->private_list)) { | ||
| 168 | node->private_data = mapping; | ||
| 169 | list_lru_add(&workingset_shadow_nodes, &node->private_list); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | |||
| 110 | /* | 173 | /* |
| 111 | * Delete a page from the page cache and free it. Caller has to make | 174 | * Delete a page from the page cache and free it. Caller has to make |
| 112 | * sure the page is locked and that nobody else uses it - or that usage | 175 | * sure the page is locked and that nobody else uses it - or that usage |
| 113 | * is safe. The caller must hold the mapping's tree_lock. | 176 | * is safe. The caller must hold the mapping's tree_lock. |
| 114 | */ | 177 | */ |
| 115 | void __delete_from_page_cache(struct page *page) | 178 | void __delete_from_page_cache(struct page *page, void *shadow) |
| 116 | { | 179 | { |
| 117 | struct address_space *mapping = page->mapping; | 180 | struct address_space *mapping = page->mapping; |
| 118 | 181 | ||
| @@ -127,10 +190,11 @@ void __delete_from_page_cache(struct page *page) | |||
| 127 | else | 190 | else |
| 128 | cleancache_invalidate_page(mapping, page); | 191 | cleancache_invalidate_page(mapping, page); |
| 129 | 192 | ||
| 130 | radix_tree_delete(&mapping->page_tree, page->index); | 193 | page_cache_tree_delete(mapping, page, shadow); |
| 194 | |||
| 131 | page->mapping = NULL; | 195 | page->mapping = NULL; |
| 132 | /* Leave page->index set: truncation lookup relies upon it */ | 196 | /* Leave page->index set: truncation lookup relies upon it */ |
| 133 | mapping->nrpages--; | 197 | |
| 134 | __dec_zone_page_state(page, NR_FILE_PAGES); | 198 | __dec_zone_page_state(page, NR_FILE_PAGES); |
| 135 | if (PageSwapBacked(page)) | 199 | if (PageSwapBacked(page)) |
| 136 | __dec_zone_page_state(page, NR_SHMEM); | 200 | __dec_zone_page_state(page, NR_SHMEM); |
| @@ -166,7 +230,7 @@ void delete_from_page_cache(struct page *page) | |||
| 166 | 230 | ||
| 167 | freepage = mapping->a_ops->freepage; | 231 | freepage = mapping->a_ops->freepage; |
| 168 | spin_lock_irq(&mapping->tree_lock); | 232 | spin_lock_irq(&mapping->tree_lock); |
| 169 | __delete_from_page_cache(page); | 233 | __delete_from_page_cache(page, NULL); |
| 170 | spin_unlock_irq(&mapping->tree_lock); | 234 | spin_unlock_irq(&mapping->tree_lock); |
| 171 | mem_cgroup_uncharge_cache_page(page); | 235 | mem_cgroup_uncharge_cache_page(page); |
| 172 | 236 | ||
| @@ -426,7 +490,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
| 426 | new->index = offset; | 490 | new->index = offset; |
| 427 | 491 | ||
| 428 | spin_lock_irq(&mapping->tree_lock); | 492 | spin_lock_irq(&mapping->tree_lock); |
| 429 | __delete_from_page_cache(old); | 493 | __delete_from_page_cache(old, NULL); |
| 430 | error = radix_tree_insert(&mapping->page_tree, offset, new); | 494 | error = radix_tree_insert(&mapping->page_tree, offset, new); |
| 431 | BUG_ON(error); | 495 | BUG_ON(error); |
| 432 | mapping->nrpages++; | 496 | mapping->nrpages++; |
| @@ -446,18 +510,52 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
| 446 | } | 510 | } |
| 447 | EXPORT_SYMBOL_GPL(replace_page_cache_page); | 511 | EXPORT_SYMBOL_GPL(replace_page_cache_page); |
| 448 | 512 | ||
| 449 | /** | 513 | static int page_cache_tree_insert(struct address_space *mapping, |
| 450 | * add_to_page_cache_locked - add a locked page to the pagecache | 514 | struct page *page, void **shadowp) |
| 451 | * @page: page to add | 515 | { |
| 452 | * @mapping: the page's address_space | 516 | struct radix_tree_node *node; |
| 453 | * @offset: page index | 517 | void **slot; |
| 454 | * @gfp_mask: page allocation mode | 518 | int error; |
| 455 | * | 519 | |
| 456 | * This function is used to add a page to the pagecache. It must be locked. | 520 | error = __radix_tree_create(&mapping->page_tree, page->index, |
| 457 | * This function does not add the page to the LRU. The caller must do that. | 521 | &node, &slot); |
| 458 | */ | 522 | if (error) |
| 459 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 523 | return error; |
| 460 | pgoff_t offset, gfp_t gfp_mask) | 524 | if (*slot) { |
| 525 | void *p; | ||
| 526 | |||
| 527 | p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | ||
| 528 | if (!radix_tree_exceptional_entry(p)) | ||
| 529 | return -EEXIST; | ||
| 530 | if (shadowp) | ||
| 531 | *shadowp = p; | ||
| 532 | mapping->nrshadows--; | ||
| 533 | if (node) | ||
| 534 | workingset_node_shadows_dec(node); | ||
| 535 | } | ||
| 536 | radix_tree_replace_slot(slot, page); | ||
| 537 | mapping->nrpages++; | ||
| 538 | if (node) { | ||
| 539 | workingset_node_pages_inc(node); | ||
| 540 | /* | ||
| 541 | * Don't track node that contains actual pages. | ||
| 542 | * | ||
| 543 | * Avoid acquiring the list_lru lock if already | ||
| 544 | * untracked. The list_empty() test is safe as | ||
| 545 | * node->private_list is protected by | ||
| 546 | * mapping->tree_lock. | ||
| 547 | */ | ||
| 548 | if (!list_empty(&node->private_list)) | ||
| 549 | list_lru_del(&workingset_shadow_nodes, | ||
| 550 | &node->private_list); | ||
| 551 | } | ||
| 552 | return 0; | ||
| 553 | } | ||
| 554 | |||
| 555 | static int __add_to_page_cache_locked(struct page *page, | ||
| 556 | struct address_space *mapping, | ||
| 557 | pgoff_t offset, gfp_t gfp_mask, | ||
| 558 | void **shadowp) | ||
| 461 | { | 559 | { |
| 462 | int error; | 560 | int error; |
| 463 | 561 | ||
| @@ -480,11 +578,10 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
| 480 | page->index = offset; | 578 | page->index = offset; |
| 481 | 579 | ||
| 482 | spin_lock_irq(&mapping->tree_lock); | 580 | spin_lock_irq(&mapping->tree_lock); |
| 483 | error = radix_tree_insert(&mapping->page_tree, offset, page); | 581 | error = page_cache_tree_insert(mapping, page, shadowp); |
| 484 | radix_tree_preload_end(); | 582 | radix_tree_preload_end(); |
| 485 | if (unlikely(error)) | 583 | if (unlikely(error)) |
| 486 | goto err_insert; | 584 | goto err_insert; |
| 487 | mapping->nrpages++; | ||
| 488 | __inc_zone_page_state(page, NR_FILE_PAGES); | 585 | __inc_zone_page_state(page, NR_FILE_PAGES); |
| 489 | spin_unlock_irq(&mapping->tree_lock); | 586 | spin_unlock_irq(&mapping->tree_lock); |
| 490 | trace_mm_filemap_add_to_page_cache(page); | 587 | trace_mm_filemap_add_to_page_cache(page); |
| @@ -497,16 +594,49 @@ err_insert: | |||
| 497 | page_cache_release(page); | 594 | page_cache_release(page); |
| 498 | return error; | 595 | return error; |
| 499 | } | 596 | } |
| 597 | |||
| 598 | /** | ||
| 599 | * add_to_page_cache_locked - add a locked page to the pagecache | ||
| 600 | * @page: page to add | ||
| 601 | * @mapping: the page's address_space | ||
| 602 | * @offset: page index | ||
| 603 | * @gfp_mask: page allocation mode | ||
| 604 | * | ||
| 605 | * This function is used to add a page to the pagecache. It must be locked. | ||
| 606 | * This function does not add the page to the LRU. The caller must do that. | ||
| 607 | */ | ||
| 608 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
| 609 | pgoff_t offset, gfp_t gfp_mask) | ||
| 610 | { | ||
| 611 | return __add_to_page_cache_locked(page, mapping, offset, | ||
| 612 | gfp_mask, NULL); | ||
| 613 | } | ||
| 500 | EXPORT_SYMBOL(add_to_page_cache_locked); | 614 | EXPORT_SYMBOL(add_to_page_cache_locked); |
| 501 | 615 | ||
| 502 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 616 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 503 | pgoff_t offset, gfp_t gfp_mask) | 617 | pgoff_t offset, gfp_t gfp_mask) |
| 504 | { | 618 | { |
| 619 | void *shadow = NULL; | ||
| 505 | int ret; | 620 | int ret; |
| 506 | 621 | ||
| 507 | ret = add_to_page_cache(page, mapping, offset, gfp_mask); | 622 | __set_page_locked(page); |
| 508 | if (ret == 0) | 623 | ret = __add_to_page_cache_locked(page, mapping, offset, |
| 509 | lru_cache_add_file(page); | 624 | gfp_mask, &shadow); |
| 625 | if (unlikely(ret)) | ||
| 626 | __clear_page_locked(page); | ||
| 627 | else { | ||
| 628 | /* | ||
| 629 | * The page might have been evicted from cache only | ||
| 630 | * recently, in which case it should be activated like | ||
| 631 | * any other repeatedly accessed page. | ||
| 632 | */ | ||
| 633 | if (shadow && workingset_refault(shadow)) { | ||
| 634 | SetPageActive(page); | ||
| 635 | workingset_activation(page); | ||
| 636 | } else | ||
| 637 | ClearPageActive(page); | ||
| 638 | lru_cache_add(page); | ||
| 639 | } | ||
| 510 | return ret; | 640 | return ret; |
| 511 | } | 641 | } |
| 512 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); | 642 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); |
| @@ -520,10 +650,10 @@ struct page *__page_cache_alloc(gfp_t gfp) | |||
| 520 | if (cpuset_do_page_mem_spread()) { | 650 | if (cpuset_do_page_mem_spread()) { |
| 521 | unsigned int cpuset_mems_cookie; | 651 | unsigned int cpuset_mems_cookie; |
| 522 | do { | 652 | do { |
| 523 | cpuset_mems_cookie = get_mems_allowed(); | 653 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 524 | n = cpuset_mem_spread_node(); | 654 | n = cpuset_mem_spread_node(); |
| 525 | page = alloc_pages_exact_node(n, gfp, 0); | 655 | page = alloc_pages_exact_node(n, gfp, 0); |
| 526 | } while (!put_mems_allowed(cpuset_mems_cookie) && !page); | 656 | } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); |
| 527 | 657 | ||
| 528 | return page; | 658 | return page; |
| 529 | } | 659 | } |
| @@ -686,14 +816,101 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | |||
| 686 | } | 816 | } |
| 687 | 817 | ||
| 688 | /** | 818 | /** |
| 689 | * find_get_page - find and get a page reference | 819 | * page_cache_next_hole - find the next hole (not-present entry) |
| 820 | * @mapping: mapping | ||
| 821 | * @index: index | ||
| 822 | * @max_scan: maximum range to search | ||
| 823 | * | ||
| 824 | * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the | ||
| 825 | * lowest indexed hole. | ||
| 826 | * | ||
| 827 | * Returns: the index of the hole if found, otherwise returns an index | ||
| 828 | * outside of the set specified (in which case 'return - index >= | ||
| 829 | * max_scan' will be true). In rare cases of index wrap-around, 0 will | ||
| 830 | * be returned. | ||
| 831 | * | ||
| 832 | * page_cache_next_hole may be called under rcu_read_lock. However, | ||
| 833 | * like radix_tree_gang_lookup, this will not atomically search a | ||
| 834 | * snapshot of the tree at a single point in time. For example, if a | ||
| 835 | * hole is created at index 5, then subsequently a hole is created at | ||
| 836 | * index 10, page_cache_next_hole covering both indexes may return 10 | ||
| 837 | * if called under rcu_read_lock. | ||
| 838 | */ | ||
| 839 | pgoff_t page_cache_next_hole(struct address_space *mapping, | ||
| 840 | pgoff_t index, unsigned long max_scan) | ||
| 841 | { | ||
| 842 | unsigned long i; | ||
| 843 | |||
| 844 | for (i = 0; i < max_scan; i++) { | ||
| 845 | struct page *page; | ||
| 846 | |||
| 847 | page = radix_tree_lookup(&mapping->page_tree, index); | ||
| 848 | if (!page || radix_tree_exceptional_entry(page)) | ||
| 849 | break; | ||
| 850 | index++; | ||
| 851 | if (index == 0) | ||
| 852 | break; | ||
| 853 | } | ||
| 854 | |||
| 855 | return index; | ||
| 856 | } | ||
| 857 | EXPORT_SYMBOL(page_cache_next_hole); | ||
| 858 | |||
| 859 | /** | ||
| 860 | * page_cache_prev_hole - find the prev hole (not-present entry) | ||
| 861 | * @mapping: mapping | ||
| 862 | * @index: index | ||
| 863 | * @max_scan: maximum range to search | ||
| 864 | * | ||
| 865 | * Search backwards in the range [max(index-max_scan+1, 0), index] for | ||
| 866 | * the first hole. | ||
| 867 | * | ||
| 868 | * Returns: the index of the hole if found, otherwise returns an index | ||
| 869 | * outside of the set specified (in which case 'index - return >= | ||
| 870 | * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX | ||
| 871 | * will be returned. | ||
| 872 | * | ||
| 873 | * page_cache_prev_hole may be called under rcu_read_lock. However, | ||
| 874 | * like radix_tree_gang_lookup, this will not atomically search a | ||
| 875 | * snapshot of the tree at a single point in time. For example, if a | ||
| 876 | * hole is created at index 10, then subsequently a hole is created at | ||
| 877 | * index 5, page_cache_prev_hole covering both indexes may return 5 if | ||
| 878 | * called under rcu_read_lock. | ||
| 879 | */ | ||
| 880 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | ||
| 881 | pgoff_t index, unsigned long max_scan) | ||
| 882 | { | ||
| 883 | unsigned long i; | ||
| 884 | |||
| 885 | for (i = 0; i < max_scan; i++) { | ||
| 886 | struct page *page; | ||
| 887 | |||
| 888 | page = radix_tree_lookup(&mapping->page_tree, index); | ||
| 889 | if (!page || radix_tree_exceptional_entry(page)) | ||
| 890 | break; | ||
| 891 | index--; | ||
| 892 | if (index == ULONG_MAX) | ||
| 893 | break; | ||
| 894 | } | ||
| 895 | |||
| 896 | return index; | ||
| 897 | } | ||
| 898 | EXPORT_SYMBOL(page_cache_prev_hole); | ||
| 899 | |||
| 900 | /** | ||
| 901 | * find_get_entry - find and get a page cache entry | ||
| 690 | * @mapping: the address_space to search | 902 | * @mapping: the address_space to search |
| 691 | * @offset: the page index | 903 | * @offset: the page cache index |
| 904 | * | ||
| 905 | * Looks up the page cache slot at @mapping & @offset. If there is a | ||
| 906 | * page cache page, it is returned with an increased refcount. | ||
| 907 | * | ||
| 908 | * If the slot holds a shadow entry of a previously evicted page, it | ||
| 909 | * is returned. | ||
| 692 | * | 910 | * |
| 693 | * Is there a pagecache struct page at the given (mapping, offset) tuple? | 911 | * Otherwise, %NULL is returned. |
| 694 | * If yes, increment its refcount and return it; if no, return NULL. | ||
| 695 | */ | 912 | */ |
| 696 | struct page *find_get_page(struct address_space *mapping, pgoff_t offset) | 913 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) |
| 697 | { | 914 | { |
| 698 | void **pagep; | 915 | void **pagep; |
| 699 | struct page *page; | 916 | struct page *page; |
| @@ -734,24 +951,50 @@ out: | |||
| 734 | 951 | ||
| 735 | return page; | 952 | return page; |
| 736 | } | 953 | } |
| 737 | EXPORT_SYMBOL(find_get_page); | 954 | EXPORT_SYMBOL(find_get_entry); |
| 738 | 955 | ||
| 739 | /** | 956 | /** |
| 740 | * find_lock_page - locate, pin and lock a pagecache page | 957 | * find_get_page - find and get a page reference |
| 741 | * @mapping: the address_space to search | 958 | * @mapping: the address_space to search |
| 742 | * @offset: the page index | 959 | * @offset: the page index |
| 743 | * | 960 | * |
| 744 | * Locates the desired pagecache page, locks it, increments its reference | 961 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 745 | * count and returns its address. | 962 | * page cache page, it is returned with an increased refcount. |
| 746 | * | 963 | * |
| 747 | * Returns zero if the page was not present. find_lock_page() may sleep. | 964 | * Otherwise, %NULL is returned. |
| 748 | */ | 965 | */ |
| 749 | struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) | 966 | struct page *find_get_page(struct address_space *mapping, pgoff_t offset) |
| 967 | { | ||
| 968 | struct page *page = find_get_entry(mapping, offset); | ||
| 969 | |||
| 970 | if (radix_tree_exceptional_entry(page)) | ||
| 971 | page = NULL; | ||
| 972 | return page; | ||
| 973 | } | ||
| 974 | EXPORT_SYMBOL(find_get_page); | ||
| 975 | |||
| 976 | /** | ||
| 977 | * find_lock_entry - locate, pin and lock a page cache entry | ||
| 978 | * @mapping: the address_space to search | ||
| 979 | * @offset: the page cache index | ||
| 980 | * | ||
| 981 | * Looks up the page cache slot at @mapping & @offset. If there is a | ||
| 982 | * page cache page, it is returned locked and with an increased | ||
| 983 | * refcount. | ||
| 984 | * | ||
| 985 | * If the slot holds a shadow entry of a previously evicted page, it | ||
| 986 | * is returned. | ||
| 987 | * | ||
| 988 | * Otherwise, %NULL is returned. | ||
| 989 | * | ||
| 990 | * find_lock_entry() may sleep. | ||
| 991 | */ | ||
| 992 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) | ||
| 750 | { | 993 | { |
| 751 | struct page *page; | 994 | struct page *page; |
| 752 | 995 | ||
| 753 | repeat: | 996 | repeat: |
| 754 | page = find_get_page(mapping, offset); | 997 | page = find_get_entry(mapping, offset); |
| 755 | if (page && !radix_tree_exception(page)) { | 998 | if (page && !radix_tree_exception(page)) { |
| 756 | lock_page(page); | 999 | lock_page(page); |
| 757 | /* Has the page been truncated? */ | 1000 | /* Has the page been truncated? */ |
| @@ -764,6 +1007,29 @@ repeat: | |||
| 764 | } | 1007 | } |
| 765 | return page; | 1008 | return page; |
| 766 | } | 1009 | } |
| 1010 | EXPORT_SYMBOL(find_lock_entry); | ||
| 1011 | |||
| 1012 | /** | ||
| 1013 | * find_lock_page - locate, pin and lock a pagecache page | ||
| 1014 | * @mapping: the address_space to search | ||
| 1015 | * @offset: the page index | ||
| 1016 | * | ||
| 1017 | * Looks up the page cache slot at @mapping & @offset. If there is a | ||
| 1018 | * page cache page, it is returned locked and with an increased | ||
| 1019 | * refcount. | ||
| 1020 | * | ||
| 1021 | * Otherwise, %NULL is returned. | ||
| 1022 | * | ||
| 1023 | * find_lock_page() may sleep. | ||
| 1024 | */ | ||
| 1025 | struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) | ||
| 1026 | { | ||
| 1027 | struct page *page = find_lock_entry(mapping, offset); | ||
| 1028 | |||
| 1029 | if (radix_tree_exceptional_entry(page)) | ||
| 1030 | page = NULL; | ||
| 1031 | return page; | ||
| 1032 | } | ||
| 767 | EXPORT_SYMBOL(find_lock_page); | 1033 | EXPORT_SYMBOL(find_lock_page); |
| 768 | 1034 | ||
| 769 | /** | 1035 | /** |
| @@ -772,16 +1038,18 @@ EXPORT_SYMBOL(find_lock_page); | |||
| 772 | * @index: the page's index into the mapping | 1038 | * @index: the page's index into the mapping |
| 773 | * @gfp_mask: page allocation mode | 1039 | * @gfp_mask: page allocation mode |
| 774 | * | 1040 | * |
| 775 | * Locates a page in the pagecache. If the page is not present, a new page | 1041 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 776 | * is allocated using @gfp_mask and is added to the pagecache and to the VM's | 1042 | * page cache page, it is returned locked and with an increased |
| 777 | * LRU list. The returned page is locked and has its reference count | 1043 | * refcount. |
| 778 | * incremented. | ||
| 779 | * | 1044 | * |
| 780 | * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic | 1045 | * If the page is not present, a new page is allocated using @gfp_mask |
| 781 | * allocation! | 1046 | * and added to the page cache and the VM's LRU list. The page is |
| 1047 | * returned locked and with an increased refcount. | ||
| 782 | * | 1048 | * |
| 783 | * find_or_create_page() returns the desired page's address, or zero on | 1049 | * On memory exhaustion, %NULL is returned. |
| 784 | * memory exhaustion. | 1050 | * |
| 1051 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | ||
| 1052 | * atomic allocation! | ||
| 785 | */ | 1053 | */ |
| 786 | struct page *find_or_create_page(struct address_space *mapping, | 1054 | struct page *find_or_create_page(struct address_space *mapping, |
| 787 | pgoff_t index, gfp_t gfp_mask) | 1055 | pgoff_t index, gfp_t gfp_mask) |
| @@ -814,6 +1082,76 @@ repeat: | |||
| 814 | EXPORT_SYMBOL(find_or_create_page); | 1082 | EXPORT_SYMBOL(find_or_create_page); |
| 815 | 1083 | ||
| 816 | /** | 1084 | /** |
| 1085 | * find_get_entries - gang pagecache lookup | ||
| 1086 | * @mapping: The address_space to search | ||
| 1087 | * @start: The starting page cache index | ||
| 1088 | * @nr_entries: The maximum number of entries | ||
| 1089 | * @entries: Where the resulting entries are placed | ||
| 1090 | * @indices: The cache indices corresponding to the entries in @entries | ||
| 1091 | * | ||
| 1092 | * find_get_entries() will search for and return a group of up to | ||
| 1093 | * @nr_entries entries in the mapping. The entries are placed at | ||
| 1094 | * @entries. find_get_entries() takes a reference against any actual | ||
| 1095 | * pages it returns. | ||
| 1096 | * | ||
| 1097 | * The search returns a group of mapping-contiguous page cache entries | ||
| 1098 | * with ascending indexes. There may be holes in the indices due to | ||
| 1099 | * not-present pages. | ||
| 1100 | * | ||
| 1101 | * Any shadow entries of evicted pages are included in the returned | ||
| 1102 | * array. | ||
| 1103 | * | ||
| 1104 | * find_get_entries() returns the number of pages and shadow entries | ||
| 1105 | * which were found. | ||
| 1106 | */ | ||
| 1107 | unsigned find_get_entries(struct address_space *mapping, | ||
| 1108 | pgoff_t start, unsigned int nr_entries, | ||
| 1109 | struct page **entries, pgoff_t *indices) | ||
| 1110 | { | ||
| 1111 | void **slot; | ||
| 1112 | unsigned int ret = 0; | ||
| 1113 | struct radix_tree_iter iter; | ||
| 1114 | |||
| 1115 | if (!nr_entries) | ||
| 1116 | return 0; | ||
| 1117 | |||
| 1118 | rcu_read_lock(); | ||
| 1119 | restart: | ||
| 1120 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | ||
| 1121 | struct page *page; | ||
| 1122 | repeat: | ||
| 1123 | page = radix_tree_deref_slot(slot); | ||
| 1124 | if (unlikely(!page)) | ||
| 1125 | continue; | ||
| 1126 | if (radix_tree_exception(page)) { | ||
| 1127 | if (radix_tree_deref_retry(page)) | ||
| 1128 | goto restart; | ||
| 1129 | /* | ||
| 1130 | * Otherwise, we must be storing a swap entry | ||
| 1131 | * here as an exceptional entry: so return it | ||
| 1132 | * without attempting to raise page count. | ||
| 1133 | */ | ||
| 1134 | goto export; | ||
| 1135 | } | ||
| 1136 | if (!page_cache_get_speculative(page)) | ||
| 1137 | goto repeat; | ||
| 1138 | |||
| 1139 | /* Has the page moved? */ | ||
| 1140 | if (unlikely(page != *slot)) { | ||
| 1141 | page_cache_release(page); | ||
| 1142 | goto repeat; | ||
| 1143 | } | ||
| 1144 | export: | ||
| 1145 | indices[ret] = iter.index; | ||
| 1146 | entries[ret] = page; | ||
| 1147 | if (++ret == nr_entries) | ||
| 1148 | break; | ||
| 1149 | } | ||
| 1150 | rcu_read_unlock(); | ||
| 1151 | return ret; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | /** | ||
| 817 | * find_get_pages - gang pagecache lookup | 1155 | * find_get_pages - gang pagecache lookup |
| 818 | * @mapping: The address_space to search | 1156 | * @mapping: The address_space to search |
| 819 | * @start: The starting page index | 1157 | * @start: The starting page index |
| @@ -1795,6 +2133,18 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) | |||
| 1795 | EXPORT_SYMBOL(generic_file_mmap); | 2133 | EXPORT_SYMBOL(generic_file_mmap); |
| 1796 | EXPORT_SYMBOL(generic_file_readonly_mmap); | 2134 | EXPORT_SYMBOL(generic_file_readonly_mmap); |
| 1797 | 2135 | ||
| 2136 | static struct page *wait_on_page_read(struct page *page) | ||
| 2137 | { | ||
| 2138 | if (!IS_ERR(page)) { | ||
| 2139 | wait_on_page_locked(page); | ||
| 2140 | if (!PageUptodate(page)) { | ||
| 2141 | page_cache_release(page); | ||
| 2142 | page = ERR_PTR(-EIO); | ||
| 2143 | } | ||
| 2144 | } | ||
| 2145 | return page; | ||
| 2146 | } | ||
| 2147 | |||
| 1798 | static struct page *__read_cache_page(struct address_space *mapping, | 2148 | static struct page *__read_cache_page(struct address_space *mapping, |
| 1799 | pgoff_t index, | 2149 | pgoff_t index, |
| 1800 | int (*filler)(void *, struct page *), | 2150 | int (*filler)(void *, struct page *), |
| @@ -1821,6 +2171,8 @@ repeat: | |||
| 1821 | if (err < 0) { | 2171 | if (err < 0) { |
| 1822 | page_cache_release(page); | 2172 | page_cache_release(page); |
| 1823 | page = ERR_PTR(err); | 2173 | page = ERR_PTR(err); |
| 2174 | } else { | ||
| 2175 | page = wait_on_page_read(page); | ||
| 1824 | } | 2176 | } |
| 1825 | } | 2177 | } |
| 1826 | return page; | 2178 | return page; |
| @@ -1857,6 +2209,10 @@ retry: | |||
| 1857 | if (err < 0) { | 2209 | if (err < 0) { |
| 1858 | page_cache_release(page); | 2210 | page_cache_release(page); |
| 1859 | return ERR_PTR(err); | 2211 | return ERR_PTR(err); |
| 2212 | } else { | ||
| 2213 | page = wait_on_page_read(page); | ||
| 2214 | if (IS_ERR(page)) | ||
| 2215 | return page; | ||
| 1860 | } | 2216 | } |
| 1861 | out: | 2217 | out: |
| 1862 | mark_page_accessed(page); | 2218 | mark_page_accessed(page); |
| @@ -1864,40 +2220,25 @@ out: | |||
| 1864 | } | 2220 | } |
| 1865 | 2221 | ||
| 1866 | /** | 2222 | /** |
| 1867 | * read_cache_page_async - read into page cache, fill it if needed | 2223 | * read_cache_page - read into page cache, fill it if needed |
| 1868 | * @mapping: the page's address_space | 2224 | * @mapping: the page's address_space |
| 1869 | * @index: the page index | 2225 | * @index: the page index |
| 1870 | * @filler: function to perform the read | 2226 | * @filler: function to perform the read |
| 1871 | * @data: first arg to filler(data, page) function, often left as NULL | 2227 | * @data: first arg to filler(data, page) function, often left as NULL |
| 1872 | * | 2228 | * |
| 1873 | * Same as read_cache_page, but don't wait for page to become unlocked | ||
| 1874 | * after submitting it to the filler. | ||
| 1875 | * | ||
| 1876 | * Read into the page cache. If a page already exists, and PageUptodate() is | 2229 | * Read into the page cache. If a page already exists, and PageUptodate() is |
| 1877 | * not set, try to fill the page but don't wait for it to become unlocked. | 2230 | * not set, try to fill the page and wait for it to become unlocked. |
| 1878 | * | 2231 | * |
| 1879 | * If the page does not get brought uptodate, return -EIO. | 2232 | * If the page does not get brought uptodate, return -EIO. |
| 1880 | */ | 2233 | */ |
| 1881 | struct page *read_cache_page_async(struct address_space *mapping, | 2234 | struct page *read_cache_page(struct address_space *mapping, |
| 1882 | pgoff_t index, | 2235 | pgoff_t index, |
| 1883 | int (*filler)(void *, struct page *), | 2236 | int (*filler)(void *, struct page *), |
| 1884 | void *data) | 2237 | void *data) |
| 1885 | { | 2238 | { |
| 1886 | return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); | 2239 | return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); |
| 1887 | } | 2240 | } |
| 1888 | EXPORT_SYMBOL(read_cache_page_async); | 2241 | EXPORT_SYMBOL(read_cache_page); |
| 1889 | |||
| 1890 | static struct page *wait_on_page_read(struct page *page) | ||
| 1891 | { | ||
| 1892 | if (!IS_ERR(page)) { | ||
| 1893 | wait_on_page_locked(page); | ||
| 1894 | if (!PageUptodate(page)) { | ||
| 1895 | page_cache_release(page); | ||
| 1896 | page = ERR_PTR(-EIO); | ||
| 1897 | } | ||
| 1898 | } | ||
| 1899 | return page; | ||
| 1900 | } | ||
| 1901 | 2242 | ||
| 1902 | /** | 2243 | /** |
| 1903 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. | 2244 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. |
| @@ -1916,31 +2257,10 @@ struct page *read_cache_page_gfp(struct address_space *mapping, | |||
| 1916 | { | 2257 | { |
| 1917 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | 2258 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
| 1918 | 2259 | ||
| 1919 | return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); | 2260 | return do_read_cache_page(mapping, index, filler, NULL, gfp); |
| 1920 | } | 2261 | } |
| 1921 | EXPORT_SYMBOL(read_cache_page_gfp); | 2262 | EXPORT_SYMBOL(read_cache_page_gfp); |
| 1922 | 2263 | ||
| 1923 | /** | ||
| 1924 | * read_cache_page - read into page cache, fill it if needed | ||
| 1925 | * @mapping: the page's address_space | ||
| 1926 | * @index: the page index | ||
| 1927 | * @filler: function to perform the read | ||
| 1928 | * @data: first arg to filler(data, page) function, often left as NULL | ||
| 1929 | * | ||
| 1930 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
| 1931 | * not set, try to fill the page then wait for it to become unlocked. | ||
| 1932 | * | ||
| 1933 | * If the page does not get brought uptodate, return -EIO. | ||
| 1934 | */ | ||
| 1935 | struct page *read_cache_page(struct address_space *mapping, | ||
| 1936 | pgoff_t index, | ||
| 1937 | int (*filler)(void *, struct page *), | ||
| 1938 | void *data) | ||
| 1939 | { | ||
| 1940 | return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); | ||
| 1941 | } | ||
| 1942 | EXPORT_SYMBOL(read_cache_page); | ||
| 1943 | |||
| 1944 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, | 2264 | static size_t __iovec_copy_from_user_inatomic(char *vaddr, |
| 1945 | const struct iovec *iov, size_t base, size_t bytes) | 2265 | const struct iovec *iov, size_t base, size_t bytes) |
| 1946 | { | 2266 | { |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1546655a2d78..6ac89e9f82ef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -941,81 +941,6 @@ unlock: | |||
| 941 | spin_unlock(ptl); | 941 | spin_unlock(ptl); |
| 942 | } | 942 | } |
| 943 | 943 | ||
| 944 | static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, | ||
| 945 | struct vm_area_struct *vma, unsigned long address, | ||
| 946 | pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) | ||
| 947 | { | ||
| 948 | spinlock_t *ptl; | ||
| 949 | pgtable_t pgtable; | ||
| 950 | pmd_t _pmd; | ||
| 951 | struct page *page; | ||
| 952 | int i, ret = 0; | ||
| 953 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
| 954 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
| 955 | |||
| 956 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | ||
| 957 | if (!page) { | ||
| 958 | ret |= VM_FAULT_OOM; | ||
| 959 | goto out; | ||
| 960 | } | ||
| 961 | |||
| 962 | if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { | ||
| 963 | put_page(page); | ||
| 964 | ret |= VM_FAULT_OOM; | ||
| 965 | goto out; | ||
| 966 | } | ||
| 967 | |||
| 968 | clear_user_highpage(page, address); | ||
| 969 | __SetPageUptodate(page); | ||
| 970 | |||
| 971 | mmun_start = haddr; | ||
| 972 | mmun_end = haddr + HPAGE_PMD_SIZE; | ||
| 973 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
| 974 | |||
| 975 | ptl = pmd_lock(mm, pmd); | ||
| 976 | if (unlikely(!pmd_same(*pmd, orig_pmd))) | ||
| 977 | goto out_free_page; | ||
| 978 | |||
| 979 | pmdp_clear_flush(vma, haddr, pmd); | ||
| 980 | /* leave pmd empty until pte is filled */ | ||
| 981 | |||
| 982 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | ||
| 983 | pmd_populate(mm, &_pmd, pgtable); | ||
| 984 | |||
| 985 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { | ||
| 986 | pte_t *pte, entry; | ||
| 987 | if (haddr == (address & PAGE_MASK)) { | ||
| 988 | entry = mk_pte(page, vma->vm_page_prot); | ||
| 989 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
| 990 | page_add_new_anon_rmap(page, vma, haddr); | ||
| 991 | } else { | ||
| 992 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); | ||
| 993 | entry = pte_mkspecial(entry); | ||
| 994 | } | ||
| 995 | pte = pte_offset_map(&_pmd, haddr); | ||
| 996 | VM_BUG_ON(!pte_none(*pte)); | ||
| 997 | set_pte_at(mm, haddr, pte, entry); | ||
| 998 | pte_unmap(pte); | ||
| 999 | } | ||
| 1000 | smp_wmb(); /* make pte visible before pmd */ | ||
| 1001 | pmd_populate(mm, pmd, pgtable); | ||
| 1002 | spin_unlock(ptl); | ||
| 1003 | put_huge_zero_page(); | ||
| 1004 | inc_mm_counter(mm, MM_ANONPAGES); | ||
| 1005 | |||
| 1006 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
| 1007 | |||
| 1008 | ret |= VM_FAULT_WRITE; | ||
| 1009 | out: | ||
| 1010 | return ret; | ||
| 1011 | out_free_page: | ||
| 1012 | spin_unlock(ptl); | ||
| 1013 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
| 1014 | mem_cgroup_uncharge_page(page); | ||
| 1015 | put_page(page); | ||
| 1016 | goto out; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | 944 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, |
| 1020 | struct vm_area_struct *vma, | 945 | struct vm_area_struct *vma, |
| 1021 | unsigned long address, | 946 | unsigned long address, |
| @@ -1161,8 +1086,8 @@ alloc: | |||
| 1161 | 1086 | ||
| 1162 | if (unlikely(!new_page)) { | 1087 | if (unlikely(!new_page)) { |
| 1163 | if (!page) { | 1088 | if (!page) { |
| 1164 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, | 1089 | split_huge_page_pmd(vma, address, pmd); |
| 1165 | address, pmd, orig_pmd, haddr); | 1090 | ret |= VM_FAULT_FALLBACK; |
| 1166 | } else { | 1091 | } else { |
| 1167 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 1092 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
| 1168 | pmd, orig_pmd, page, haddr); | 1093 | pmd, orig_pmd, page, haddr); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c01cb9fedb18..7c02b9dadfb0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/swap.h> | 22 | #include <linux/swap.h> |
| 23 | #include <linux/swapops.h> | 23 | #include <linux/swapops.h> |
| 24 | #include <linux/page-isolation.h> | 24 | #include <linux/page-isolation.h> |
| 25 | #include <linux/jhash.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/page.h> | 27 | #include <asm/page.h> |
| 27 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
| @@ -53,6 +54,13 @@ static unsigned long __initdata default_hstate_size; | |||
| 53 | */ | 54 | */ |
| 54 | DEFINE_SPINLOCK(hugetlb_lock); | 55 | DEFINE_SPINLOCK(hugetlb_lock); |
| 55 | 56 | ||
| 57 | /* | ||
| 58 | * Serializes faults on the same logical page. This is used to | ||
| 59 | * prevent spurious OOMs when the hugepage pool is fully utilized. | ||
| 60 | */ | ||
| 61 | static int num_fault_mutexes; | ||
| 62 | static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; | ||
| 63 | |||
| 56 | static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) | 64 | static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) |
| 57 | { | 65 | { |
| 58 | bool free = (spool->count == 0) && (spool->used_hpages == 0); | 66 | bool free = (spool->count == 0) && (spool->used_hpages == 0); |
| @@ -135,15 +143,8 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) | |||
| 135 | * Region tracking -- allows tracking of reservations and instantiated pages | 143 | * Region tracking -- allows tracking of reservations and instantiated pages |
| 136 | * across the pages in a mapping. | 144 | * across the pages in a mapping. |
| 137 | * | 145 | * |
| 138 | * The region data structures are protected by a combination of the mmap_sem | 146 | * The region data structures are embedded into a resv_map and |
| 139 | * and the hugetlb_instantiation_mutex. To access or modify a region the caller | 147 | * protected by a resv_map's lock |
| 140 | * must either hold the mmap_sem for write, or the mmap_sem for read and | ||
| 141 | * the hugetlb_instantiation_mutex: | ||
| 142 | * | ||
| 143 | * down_write(&mm->mmap_sem); | ||
| 144 | * or | ||
| 145 | * down_read(&mm->mmap_sem); | ||
| 146 | * mutex_lock(&hugetlb_instantiation_mutex); | ||
| 147 | */ | 148 | */ |
| 148 | struct file_region { | 149 | struct file_region { |
| 149 | struct list_head link; | 150 | struct list_head link; |
| @@ -151,10 +152,12 @@ struct file_region { | |||
| 151 | long to; | 152 | long to; |
| 152 | }; | 153 | }; |
| 153 | 154 | ||
| 154 | static long region_add(struct list_head *head, long f, long t) | 155 | static long region_add(struct resv_map *resv, long f, long t) |
| 155 | { | 156 | { |
| 157 | struct list_head *head = &resv->regions; | ||
| 156 | struct file_region *rg, *nrg, *trg; | 158 | struct file_region *rg, *nrg, *trg; |
| 157 | 159 | ||
| 160 | spin_lock(&resv->lock); | ||
| 158 | /* Locate the region we are either in or before. */ | 161 | /* Locate the region we are either in or before. */ |
| 159 | list_for_each_entry(rg, head, link) | 162 | list_for_each_entry(rg, head, link) |
| 160 | if (f <= rg->to) | 163 | if (f <= rg->to) |
| @@ -184,14 +187,18 @@ static long region_add(struct list_head *head, long f, long t) | |||
| 184 | } | 187 | } |
| 185 | nrg->from = f; | 188 | nrg->from = f; |
| 186 | nrg->to = t; | 189 | nrg->to = t; |
| 190 | spin_unlock(&resv->lock); | ||
| 187 | return 0; | 191 | return 0; |
| 188 | } | 192 | } |
| 189 | 193 | ||
| 190 | static long region_chg(struct list_head *head, long f, long t) | 194 | static long region_chg(struct resv_map *resv, long f, long t) |
| 191 | { | 195 | { |
| 192 | struct file_region *rg, *nrg; | 196 | struct list_head *head = &resv->regions; |
| 197 | struct file_region *rg, *nrg = NULL; | ||
| 193 | long chg = 0; | 198 | long chg = 0; |
| 194 | 199 | ||
| 200 | retry: | ||
| 201 | spin_lock(&resv->lock); | ||
| 195 | /* Locate the region we are before or in. */ | 202 | /* Locate the region we are before or in. */ |
| 196 | list_for_each_entry(rg, head, link) | 203 | list_for_each_entry(rg, head, link) |
| 197 | if (f <= rg->to) | 204 | if (f <= rg->to) |
| @@ -201,15 +208,21 @@ static long region_chg(struct list_head *head, long f, long t) | |||
| 201 | * Subtle, allocate a new region at the position but make it zero | 208 | * Subtle, allocate a new region at the position but make it zero |
| 202 | * size such that we can guarantee to record the reservation. */ | 209 | * size such that we can guarantee to record the reservation. */ |
| 203 | if (&rg->link == head || t < rg->from) { | 210 | if (&rg->link == head || t < rg->from) { |
| 204 | nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); | 211 | if (!nrg) { |
| 205 | if (!nrg) | 212 | spin_unlock(&resv->lock); |
| 206 | return -ENOMEM; | 213 | nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); |
| 207 | nrg->from = f; | 214 | if (!nrg) |
| 208 | nrg->to = f; | 215 | return -ENOMEM; |
| 209 | INIT_LIST_HEAD(&nrg->link); | 216 | |
| 210 | list_add(&nrg->link, rg->link.prev); | 217 | nrg->from = f; |
| 218 | nrg->to = f; | ||
| 219 | INIT_LIST_HEAD(&nrg->link); | ||
| 220 | goto retry; | ||
| 221 | } | ||
| 211 | 222 | ||
| 212 | return t - f; | 223 | list_add(&nrg->link, rg->link.prev); |
| 224 | chg = t - f; | ||
| 225 | goto out_nrg; | ||
| 213 | } | 226 | } |
| 214 | 227 | ||
| 215 | /* Round our left edge to the current segment if it encloses us. */ | 228 | /* Round our left edge to the current segment if it encloses us. */ |
| @@ -222,7 +235,7 @@ static long region_chg(struct list_head *head, long f, long t) | |||
| 222 | if (&rg->link == head) | 235 | if (&rg->link == head) |
| 223 | break; | 236 | break; |
| 224 | if (rg->from > t) | 237 | if (rg->from > t) |
| 225 | return chg; | 238 | goto out; |
| 226 | 239 | ||
| 227 | /* We overlap with this area, if it extends further than | 240 | /* We overlap with this area, if it extends further than |
| 228 | * us then we must extend ourselves. Account for its | 241 | * us then we must extend ourselves. Account for its |
| @@ -233,20 +246,30 @@ static long region_chg(struct list_head *head, long f, long t) | |||
| 233 | } | 246 | } |
| 234 | chg -= rg->to - rg->from; | 247 | chg -= rg->to - rg->from; |
| 235 | } | 248 | } |
| 249 | |||
| 250 | out: | ||
| 251 | spin_unlock(&resv->lock); | ||
| 252 | /* We already know we raced and no longer need the new region */ | ||
| 253 | kfree(nrg); | ||
| 254 | return chg; | ||
| 255 | out_nrg: | ||
| 256 | spin_unlock(&resv->lock); | ||
| 236 | return chg; | 257 | return chg; |
| 237 | } | 258 | } |
| 238 | 259 | ||
| 239 | static long region_truncate(struct list_head *head, long end) | 260 | static long region_truncate(struct resv_map *resv, long end) |
| 240 | { | 261 | { |
| 262 | struct list_head *head = &resv->regions; | ||
| 241 | struct file_region *rg, *trg; | 263 | struct file_region *rg, *trg; |
| 242 | long chg = 0; | 264 | long chg = 0; |
| 243 | 265 | ||
| 266 | spin_lock(&resv->lock); | ||
| 244 | /* Locate the region we are either in or before. */ | 267 | /* Locate the region we are either in or before. */ |
| 245 | list_for_each_entry(rg, head, link) | 268 | list_for_each_entry(rg, head, link) |
| 246 | if (end <= rg->to) | 269 | if (end <= rg->to) |
| 247 | break; | 270 | break; |
| 248 | if (&rg->link == head) | 271 | if (&rg->link == head) |
| 249 | return 0; | 272 | goto out; |
| 250 | 273 | ||
| 251 | /* If we are in the middle of a region then adjust it. */ | 274 | /* If we are in the middle of a region then adjust it. */ |
| 252 | if (end > rg->from) { | 275 | if (end > rg->from) { |
| @@ -263,14 +286,19 @@ static long region_truncate(struct list_head *head, long end) | |||
| 263 | list_del(&rg->link); | 286 | list_del(&rg->link); |
| 264 | kfree(rg); | 287 | kfree(rg); |
| 265 | } | 288 | } |
| 289 | |||
| 290 | out: | ||
| 291 | spin_unlock(&resv->lock); | ||
| 266 | return chg; | 292 | return chg; |
| 267 | } | 293 | } |
| 268 | 294 | ||
| 269 | static long region_count(struct list_head *head, long f, long t) | 295 | static long region_count(struct resv_map *resv, long f, long t) |
| 270 | { | 296 | { |
| 297 | struct list_head *head = &resv->regions; | ||
| 271 | struct file_region *rg; | 298 | struct file_region *rg; |
| 272 | long chg = 0; | 299 | long chg = 0; |
| 273 | 300 | ||
| 301 | spin_lock(&resv->lock); | ||
| 274 | /* Locate each segment we overlap with, and count that overlap. */ | 302 | /* Locate each segment we overlap with, and count that overlap. */ |
| 275 | list_for_each_entry(rg, head, link) { | 303 | list_for_each_entry(rg, head, link) { |
| 276 | long seg_from; | 304 | long seg_from; |
| @@ -286,6 +314,7 @@ static long region_count(struct list_head *head, long f, long t) | |||
| 286 | 314 | ||
| 287 | chg += seg_to - seg_from; | 315 | chg += seg_to - seg_from; |
| 288 | } | 316 | } |
| 317 | spin_unlock(&resv->lock); | ||
| 289 | 318 | ||
| 290 | return chg; | 319 | return chg; |
| 291 | } | 320 | } |
| @@ -376,39 +405,46 @@ static void set_vma_private_data(struct vm_area_struct *vma, | |||
| 376 | vma->vm_private_data = (void *)value; | 405 | vma->vm_private_data = (void *)value; |
| 377 | } | 406 | } |
| 378 | 407 | ||
| 379 | struct resv_map { | 408 | struct resv_map *resv_map_alloc(void) |
| 380 | struct kref refs; | ||
| 381 | struct list_head regions; | ||
| 382 | }; | ||
| 383 | |||
| 384 | static struct resv_map *resv_map_alloc(void) | ||
| 385 | { | 409 | { |
| 386 | struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); | 410 | struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); |
| 387 | if (!resv_map) | 411 | if (!resv_map) |
| 388 | return NULL; | 412 | return NULL; |
| 389 | 413 | ||
| 390 | kref_init(&resv_map->refs); | 414 | kref_init(&resv_map->refs); |
| 415 | spin_lock_init(&resv_map->lock); | ||
| 391 | INIT_LIST_HEAD(&resv_map->regions); | 416 | INIT_LIST_HEAD(&resv_map->regions); |
| 392 | 417 | ||
| 393 | return resv_map; | 418 | return resv_map; |
| 394 | } | 419 | } |
| 395 | 420 | ||
| 396 | static void resv_map_release(struct kref *ref) | 421 | void resv_map_release(struct kref *ref) |
| 397 | { | 422 | { |
| 398 | struct resv_map *resv_map = container_of(ref, struct resv_map, refs); | 423 | struct resv_map *resv_map = container_of(ref, struct resv_map, refs); |
| 399 | 424 | ||
| 400 | /* Clear out any active regions before we release the map. */ | 425 | /* Clear out any active regions before we release the map. */ |
| 401 | region_truncate(&resv_map->regions, 0); | 426 | region_truncate(resv_map, 0); |
| 402 | kfree(resv_map); | 427 | kfree(resv_map); |
| 403 | } | 428 | } |
| 404 | 429 | ||
| 430 | static inline struct resv_map *inode_resv_map(struct inode *inode) | ||
| 431 | { | ||
| 432 | return inode->i_mapping->private_data; | ||
| 433 | } | ||
| 434 | |||
| 405 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | 435 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) |
| 406 | { | 436 | { |
| 407 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 437 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
| 408 | if (!(vma->vm_flags & VM_MAYSHARE)) | 438 | if (vma->vm_flags & VM_MAYSHARE) { |
| 439 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
| 440 | struct inode *inode = mapping->host; | ||
| 441 | |||
| 442 | return inode_resv_map(inode); | ||
| 443 | |||
| 444 | } else { | ||
| 409 | return (struct resv_map *)(get_vma_private_data(vma) & | 445 | return (struct resv_map *)(get_vma_private_data(vma) & |
| 410 | ~HPAGE_RESV_MASK); | 446 | ~HPAGE_RESV_MASK); |
| 411 | return NULL; | 447 | } |
| 412 | } | 448 | } |
| 413 | 449 | ||
| 414 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | 450 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) |
| @@ -540,7 +576,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, | |||
| 540 | goto err; | 576 | goto err; |
| 541 | 577 | ||
| 542 | retry_cpuset: | 578 | retry_cpuset: |
| 543 | cpuset_mems_cookie = get_mems_allowed(); | 579 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 544 | zonelist = huge_zonelist(vma, address, | 580 | zonelist = huge_zonelist(vma, address, |
| 545 | htlb_alloc_mask(h), &mpol, &nodemask); | 581 | htlb_alloc_mask(h), &mpol, &nodemask); |
| 546 | 582 | ||
| @@ -562,7 +598,7 @@ retry_cpuset: | |||
| 562 | } | 598 | } |
| 563 | 599 | ||
| 564 | mpol_cond_put(mpol); | 600 | mpol_cond_put(mpol); |
| 565 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 601 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 566 | goto retry_cpuset; | 602 | goto retry_cpuset; |
| 567 | return page; | 603 | return page; |
| 568 | 604 | ||
| @@ -653,7 +689,8 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) | |||
| 653 | put_page(page); /* free it into the hugepage allocator */ | 689 | put_page(page); /* free it into the hugepage allocator */ |
| 654 | } | 690 | } |
| 655 | 691 | ||
| 656 | static void prep_compound_gigantic_page(struct page *page, unsigned long order) | 692 | static void __init prep_compound_gigantic_page(struct page *page, |
| 693 | unsigned long order) | ||
| 657 | { | 694 | { |
| 658 | int i; | 695 | int i; |
| 659 | int nr_pages = 1 << order; | 696 | int nr_pages = 1 << order; |
| @@ -1150,45 +1187,34 @@ static void return_unused_surplus_pages(struct hstate *h, | |||
| 1150 | static long vma_needs_reservation(struct hstate *h, | 1187 | static long vma_needs_reservation(struct hstate *h, |
| 1151 | struct vm_area_struct *vma, unsigned long addr) | 1188 | struct vm_area_struct *vma, unsigned long addr) |
| 1152 | { | 1189 | { |
| 1153 | struct address_space *mapping = vma->vm_file->f_mapping; | 1190 | struct resv_map *resv; |
| 1154 | struct inode *inode = mapping->host; | 1191 | pgoff_t idx; |
| 1155 | 1192 | long chg; | |
| 1156 | if (vma->vm_flags & VM_MAYSHARE) { | ||
| 1157 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | ||
| 1158 | return region_chg(&inode->i_mapping->private_list, | ||
| 1159 | idx, idx + 1); | ||
| 1160 | 1193 | ||
| 1161 | } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { | 1194 | resv = vma_resv_map(vma); |
| 1195 | if (!resv) | ||
| 1162 | return 1; | 1196 | return 1; |
| 1163 | 1197 | ||
| 1164 | } else { | 1198 | idx = vma_hugecache_offset(h, vma, addr); |
| 1165 | long err; | 1199 | chg = region_chg(resv, idx, idx + 1); |
| 1166 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | ||
| 1167 | struct resv_map *resv = vma_resv_map(vma); | ||
| 1168 | 1200 | ||
| 1169 | err = region_chg(&resv->regions, idx, idx + 1); | 1201 | if (vma->vm_flags & VM_MAYSHARE) |
| 1170 | if (err < 0) | 1202 | return chg; |
| 1171 | return err; | 1203 | else |
| 1172 | return 0; | 1204 | return chg < 0 ? chg : 0; |
| 1173 | } | ||
| 1174 | } | 1205 | } |
| 1175 | static void vma_commit_reservation(struct hstate *h, | 1206 | static void vma_commit_reservation(struct hstate *h, |
| 1176 | struct vm_area_struct *vma, unsigned long addr) | 1207 | struct vm_area_struct *vma, unsigned long addr) |
| 1177 | { | 1208 | { |
| 1178 | struct address_space *mapping = vma->vm_file->f_mapping; | 1209 | struct resv_map *resv; |
| 1179 | struct inode *inode = mapping->host; | 1210 | pgoff_t idx; |
| 1180 | |||
| 1181 | if (vma->vm_flags & VM_MAYSHARE) { | ||
| 1182 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | ||
| 1183 | region_add(&inode->i_mapping->private_list, idx, idx + 1); | ||
| 1184 | 1211 | ||
| 1185 | } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { | 1212 | resv = vma_resv_map(vma); |
| 1186 | pgoff_t idx = vma_hugecache_offset(h, vma, addr); | 1213 | if (!resv) |
| 1187 | struct resv_map *resv = vma_resv_map(vma); | 1214 | return; |
| 1188 | 1215 | ||
| 1189 | /* Mark this page used in the map. */ | 1216 | idx = vma_hugecache_offset(h, vma, addr); |
| 1190 | region_add(&resv->regions, idx, idx + 1); | 1217 | region_add(resv, idx, idx + 1); |
| 1191 | } | ||
| 1192 | } | 1218 | } |
| 1193 | 1219 | ||
| 1194 | static struct page *alloc_huge_page(struct vm_area_struct *vma, | 1220 | static struct page *alloc_huge_page(struct vm_area_struct *vma, |
| @@ -1294,7 +1320,7 @@ found: | |||
| 1294 | return 1; | 1320 | return 1; |
| 1295 | } | 1321 | } |
| 1296 | 1322 | ||
| 1297 | static void prep_compound_huge_page(struct page *page, int order) | 1323 | static void __init prep_compound_huge_page(struct page *page, int order) |
| 1298 | { | 1324 | { |
| 1299 | if (unlikely(order > (MAX_ORDER - 1))) | 1325 | if (unlikely(order > (MAX_ORDER - 1))) |
| 1300 | prep_compound_gigantic_page(page, order); | 1326 | prep_compound_gigantic_page(page, order); |
| @@ -1944,11 +1970,14 @@ static void __exit hugetlb_exit(void) | |||
| 1944 | } | 1970 | } |
| 1945 | 1971 | ||
| 1946 | kobject_put(hugepages_kobj); | 1972 | kobject_put(hugepages_kobj); |
| 1973 | kfree(htlb_fault_mutex_table); | ||
| 1947 | } | 1974 | } |
| 1948 | module_exit(hugetlb_exit); | 1975 | module_exit(hugetlb_exit); |
| 1949 | 1976 | ||
| 1950 | static int __init hugetlb_init(void) | 1977 | static int __init hugetlb_init(void) |
| 1951 | { | 1978 | { |
| 1979 | int i; | ||
| 1980 | |||
| 1952 | /* Some platform decide whether they support huge pages at boot | 1981 | /* Some platform decide whether they support huge pages at boot |
| 1953 | * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when | 1982 | * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when |
| 1954 | * there is no such support | 1983 | * there is no such support |
| @@ -1973,6 +2002,17 @@ static int __init hugetlb_init(void) | |||
| 1973 | hugetlb_register_all_nodes(); | 2002 | hugetlb_register_all_nodes(); |
| 1974 | hugetlb_cgroup_file_init(); | 2003 | hugetlb_cgroup_file_init(); |
| 1975 | 2004 | ||
| 2005 | #ifdef CONFIG_SMP | ||
| 2006 | num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); | ||
| 2007 | #else | ||
| 2008 | num_fault_mutexes = 1; | ||
| 2009 | #endif | ||
| 2010 | htlb_fault_mutex_table = | ||
| 2011 | kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); | ||
| 2012 | BUG_ON(!htlb_fault_mutex_table); | ||
| 2013 | |||
| 2014 | for (i = 0; i < num_fault_mutexes; i++) | ||
| 2015 | mutex_init(&htlb_fault_mutex_table[i]); | ||
| 1976 | return 0; | 2016 | return 0; |
| 1977 | } | 2017 | } |
| 1978 | module_init(hugetlb_init); | 2018 | module_init(hugetlb_init); |
| @@ -2251,41 +2291,30 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) | |||
| 2251 | * after this open call completes. It is therefore safe to take a | 2291 | * after this open call completes. It is therefore safe to take a |
| 2252 | * new reference here without additional locking. | 2292 | * new reference here without additional locking. |
| 2253 | */ | 2293 | */ |
| 2254 | if (resv) | 2294 | if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) |
| 2255 | kref_get(&resv->refs); | 2295 | kref_get(&resv->refs); |
| 2256 | } | 2296 | } |
| 2257 | 2297 | ||
| 2258 | static void resv_map_put(struct vm_area_struct *vma) | ||
| 2259 | { | ||
| 2260 | struct resv_map *resv = vma_resv_map(vma); | ||
| 2261 | |||
| 2262 | if (!resv) | ||
| 2263 | return; | ||
| 2264 | kref_put(&resv->refs, resv_map_release); | ||
| 2265 | } | ||
| 2266 | |||
| 2267 | static void hugetlb_vm_op_close(struct vm_area_struct *vma) | 2298 | static void hugetlb_vm_op_close(struct vm_area_struct *vma) |
| 2268 | { | 2299 | { |
| 2269 | struct hstate *h = hstate_vma(vma); | 2300 | struct hstate *h = hstate_vma(vma); |
| 2270 | struct resv_map *resv = vma_resv_map(vma); | 2301 | struct resv_map *resv = vma_resv_map(vma); |
| 2271 | struct hugepage_subpool *spool = subpool_vma(vma); | 2302 | struct hugepage_subpool *spool = subpool_vma(vma); |
| 2272 | unsigned long reserve; | 2303 | unsigned long reserve, start, end; |
| 2273 | unsigned long start; | ||
| 2274 | unsigned long end; | ||
| 2275 | 2304 | ||
| 2276 | if (resv) { | 2305 | if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) |
| 2277 | start = vma_hugecache_offset(h, vma, vma->vm_start); | 2306 | return; |
| 2278 | end = vma_hugecache_offset(h, vma, vma->vm_end); | ||
| 2279 | 2307 | ||
| 2280 | reserve = (end - start) - | 2308 | start = vma_hugecache_offset(h, vma, vma->vm_start); |
| 2281 | region_count(&resv->regions, start, end); | 2309 | end = vma_hugecache_offset(h, vma, vma->vm_end); |
| 2282 | 2310 | ||
| 2283 | resv_map_put(vma); | 2311 | reserve = (end - start) - region_count(resv, start, end); |
| 2284 | 2312 | ||
| 2285 | if (reserve) { | 2313 | kref_put(&resv->refs, resv_map_release); |
| 2286 | hugetlb_acct_memory(h, -reserve); | 2314 | |
| 2287 | hugepage_subpool_put_pages(spool, reserve); | 2315 | if (reserve) { |
| 2288 | } | 2316 | hugetlb_acct_memory(h, -reserve); |
| 2317 | hugepage_subpool_put_pages(spool, reserve); | ||
| 2289 | } | 2318 | } |
| 2290 | } | 2319 | } |
| 2291 | 2320 | ||
| @@ -2761,15 +2790,14 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, | |||
| 2761 | } | 2790 | } |
| 2762 | 2791 | ||
| 2763 | static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2792 | static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2764 | unsigned long address, pte_t *ptep, unsigned int flags) | 2793 | struct address_space *mapping, pgoff_t idx, |
| 2794 | unsigned long address, pte_t *ptep, unsigned int flags) | ||
| 2765 | { | 2795 | { |
| 2766 | struct hstate *h = hstate_vma(vma); | 2796 | struct hstate *h = hstate_vma(vma); |
| 2767 | int ret = VM_FAULT_SIGBUS; | 2797 | int ret = VM_FAULT_SIGBUS; |
| 2768 | int anon_rmap = 0; | 2798 | int anon_rmap = 0; |
| 2769 | pgoff_t idx; | ||
| 2770 | unsigned long size; | 2799 | unsigned long size; |
| 2771 | struct page *page; | 2800 | struct page *page; |
| 2772 | struct address_space *mapping; | ||
| 2773 | pte_t new_pte; | 2801 | pte_t new_pte; |
| 2774 | spinlock_t *ptl; | 2802 | spinlock_t *ptl; |
| 2775 | 2803 | ||
| @@ -2784,9 +2812,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2784 | return ret; | 2812 | return ret; |
| 2785 | } | 2813 | } |
| 2786 | 2814 | ||
| 2787 | mapping = vma->vm_file->f_mapping; | ||
| 2788 | idx = vma_hugecache_offset(h, vma, address); | ||
| 2789 | |||
| 2790 | /* | 2815 | /* |
| 2791 | * Use page lock to guard against racing truncation | 2816 | * Use page lock to guard against racing truncation |
| 2792 | * before we get page_table_lock. | 2817 | * before we get page_table_lock. |
| @@ -2896,17 +2921,53 @@ backout_unlocked: | |||
| 2896 | goto out; | 2921 | goto out; |
| 2897 | } | 2922 | } |
| 2898 | 2923 | ||
| 2924 | #ifdef CONFIG_SMP | ||
| 2925 | static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, | ||
| 2926 | struct vm_area_struct *vma, | ||
| 2927 | struct address_space *mapping, | ||
| 2928 | pgoff_t idx, unsigned long address) | ||
| 2929 | { | ||
| 2930 | unsigned long key[2]; | ||
| 2931 | u32 hash; | ||
| 2932 | |||
| 2933 | if (vma->vm_flags & VM_SHARED) { | ||
| 2934 | key[0] = (unsigned long) mapping; | ||
| 2935 | key[1] = idx; | ||
| 2936 | } else { | ||
| 2937 | key[0] = (unsigned long) mm; | ||
| 2938 | key[1] = address >> huge_page_shift(h); | ||
| 2939 | } | ||
| 2940 | |||
| 2941 | hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); | ||
| 2942 | |||
| 2943 | return hash & (num_fault_mutexes - 1); | ||
| 2944 | } | ||
| 2945 | #else | ||
| 2946 | /* | ||
| 2947 | * For uniprocesor systems we always use a single mutex, so just | ||
| 2948 | * return 0 and avoid the hashing overhead. | ||
| 2949 | */ | ||
| 2950 | static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, | ||
| 2951 | struct vm_area_struct *vma, | ||
| 2952 | struct address_space *mapping, | ||
| 2953 | pgoff_t idx, unsigned long address) | ||
| 2954 | { | ||
| 2955 | return 0; | ||
| 2956 | } | ||
| 2957 | #endif | ||
| 2958 | |||
| 2899 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2959 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2900 | unsigned long address, unsigned int flags) | 2960 | unsigned long address, unsigned int flags) |
| 2901 | { | 2961 | { |
| 2902 | pte_t *ptep; | 2962 | pte_t *ptep, entry; |
| 2903 | pte_t entry; | ||
| 2904 | spinlock_t *ptl; | 2963 | spinlock_t *ptl; |
| 2905 | int ret; | 2964 | int ret; |
| 2965 | u32 hash; | ||
| 2966 | pgoff_t idx; | ||
| 2906 | struct page *page = NULL; | 2967 | struct page *page = NULL; |
| 2907 | struct page *pagecache_page = NULL; | 2968 | struct page *pagecache_page = NULL; |
| 2908 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); | ||
| 2909 | struct hstate *h = hstate_vma(vma); | 2969 | struct hstate *h = hstate_vma(vma); |
| 2970 | struct address_space *mapping; | ||
| 2910 | 2971 | ||
| 2911 | address &= huge_page_mask(h); | 2972 | address &= huge_page_mask(h); |
| 2912 | 2973 | ||
| @@ -2925,15 +2986,20 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2925 | if (!ptep) | 2986 | if (!ptep) |
| 2926 | return VM_FAULT_OOM; | 2987 | return VM_FAULT_OOM; |
| 2927 | 2988 | ||
| 2989 | mapping = vma->vm_file->f_mapping; | ||
| 2990 | idx = vma_hugecache_offset(h, vma, address); | ||
| 2991 | |||
| 2928 | /* | 2992 | /* |
| 2929 | * Serialize hugepage allocation and instantiation, so that we don't | 2993 | * Serialize hugepage allocation and instantiation, so that we don't |
| 2930 | * get spurious allocation failures if two CPUs race to instantiate | 2994 | * get spurious allocation failures if two CPUs race to instantiate |
| 2931 | * the same page in the page cache. | 2995 | * the same page in the page cache. |
| 2932 | */ | 2996 | */ |
| 2933 | mutex_lock(&hugetlb_instantiation_mutex); | 2997 | hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); |
| 2998 | mutex_lock(&htlb_fault_mutex_table[hash]); | ||
| 2999 | |||
| 2934 | entry = huge_ptep_get(ptep); | 3000 | entry = huge_ptep_get(ptep); |
| 2935 | if (huge_pte_none(entry)) { | 3001 | if (huge_pte_none(entry)) { |
| 2936 | ret = hugetlb_no_page(mm, vma, address, ptep, flags); | 3002 | ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); |
| 2937 | goto out_mutex; | 3003 | goto out_mutex; |
| 2938 | } | 3004 | } |
| 2939 | 3005 | ||
| @@ -3002,8 +3068,7 @@ out_ptl: | |||
| 3002 | put_page(page); | 3068 | put_page(page); |
| 3003 | 3069 | ||
| 3004 | out_mutex: | 3070 | out_mutex: |
| 3005 | mutex_unlock(&hugetlb_instantiation_mutex); | 3071 | mutex_unlock(&htlb_fault_mutex_table[hash]); |
| 3006 | |||
| 3007 | return ret; | 3072 | return ret; |
| 3008 | } | 3073 | } |
| 3009 | 3074 | ||
| @@ -3161,6 +3226,7 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 3161 | long ret, chg; | 3226 | long ret, chg; |
| 3162 | struct hstate *h = hstate_inode(inode); | 3227 | struct hstate *h = hstate_inode(inode); |
| 3163 | struct hugepage_subpool *spool = subpool_inode(inode); | 3228 | struct hugepage_subpool *spool = subpool_inode(inode); |
| 3229 | struct resv_map *resv_map; | ||
| 3164 | 3230 | ||
| 3165 | /* | 3231 | /* |
| 3166 | * Only apply hugepage reservation if asked. At fault time, an | 3232 | * Only apply hugepage reservation if asked. At fault time, an |
| @@ -3176,10 +3242,13 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 3176 | * to reserve the full area even if read-only as mprotect() may be | 3242 | * to reserve the full area even if read-only as mprotect() may be |
| 3177 | * called to make the mapping read-write. Assume !vma is a shm mapping | 3243 | * called to make the mapping read-write. Assume !vma is a shm mapping |
| 3178 | */ | 3244 | */ |
| 3179 | if (!vma || vma->vm_flags & VM_MAYSHARE) | 3245 | if (!vma || vma->vm_flags & VM_MAYSHARE) { |
| 3180 | chg = region_chg(&inode->i_mapping->private_list, from, to); | 3246 | resv_map = inode_resv_map(inode); |
| 3181 | else { | 3247 | |
| 3182 | struct resv_map *resv_map = resv_map_alloc(); | 3248 | chg = region_chg(resv_map, from, to); |
| 3249 | |||
| 3250 | } else { | ||
| 3251 | resv_map = resv_map_alloc(); | ||
| 3183 | if (!resv_map) | 3252 | if (!resv_map) |
| 3184 | return -ENOMEM; | 3253 | return -ENOMEM; |
| 3185 | 3254 | ||
| @@ -3222,20 +3291,23 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
| 3222 | * else has to be done for private mappings here | 3291 | * else has to be done for private mappings here |
| 3223 | */ | 3292 | */ |
| 3224 | if (!vma || vma->vm_flags & VM_MAYSHARE) | 3293 | if (!vma || vma->vm_flags & VM_MAYSHARE) |
| 3225 | region_add(&inode->i_mapping->private_list, from, to); | 3294 | region_add(resv_map, from, to); |
| 3226 | return 0; | 3295 | return 0; |
| 3227 | out_err: | 3296 | out_err: |
| 3228 | if (vma) | 3297 | if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) |
| 3229 | resv_map_put(vma); | 3298 | kref_put(&resv_map->refs, resv_map_release); |
| 3230 | return ret; | 3299 | return ret; |
| 3231 | } | 3300 | } |
| 3232 | 3301 | ||
| 3233 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) | 3302 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) |
| 3234 | { | 3303 | { |
| 3235 | struct hstate *h = hstate_inode(inode); | 3304 | struct hstate *h = hstate_inode(inode); |
| 3236 | long chg = region_truncate(&inode->i_mapping->private_list, offset); | 3305 | struct resv_map *resv_map = inode_resv_map(inode); |
| 3306 | long chg = 0; | ||
| 3237 | struct hugepage_subpool *spool = subpool_inode(inode); | 3307 | struct hugepage_subpool *spool = subpool_inode(inode); |
| 3238 | 3308 | ||
| 3309 | if (resv_map) | ||
| 3310 | chg = region_truncate(resv_map, offset); | ||
| 3239 | spin_lock(&inode->i_lock); | 3311 | spin_lock(&inode->i_lock); |
| 3240 | inode->i_blocks -= (blocks_per_huge_page(h) * freed); | 3312 | inode->i_blocks -= (blocks_per_huge_page(h) * freed); |
| 3241 | spin_unlock(&inode->i_lock); | 3313 | spin_unlock(&inode->i_lock); |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 31f01c5011e5..91d67eaee050 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -192,15 +192,15 @@ static struct kmem_cache *object_cache; | |||
| 192 | static struct kmem_cache *scan_area_cache; | 192 | static struct kmem_cache *scan_area_cache; |
| 193 | 193 | ||
| 194 | /* set if tracing memory operations is enabled */ | 194 | /* set if tracing memory operations is enabled */ |
| 195 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); | 195 | static int kmemleak_enabled; |
| 196 | /* set in the late_initcall if there were no errors */ | 196 | /* set in the late_initcall if there were no errors */ |
| 197 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); | 197 | static int kmemleak_initialized; |
| 198 | /* enables or disables early logging of the memory operations */ | 198 | /* enables or disables early logging of the memory operations */ |
| 199 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); | 199 | static int kmemleak_early_log = 1; |
| 200 | /* set if a kmemleak warning was issued */ | 200 | /* set if a kmemleak warning was issued */ |
| 201 | static atomic_t kmemleak_warning = ATOMIC_INIT(0); | 201 | static int kmemleak_warning; |
| 202 | /* set if a fatal kmemleak error has occurred */ | 202 | /* set if a fatal kmemleak error has occurred */ |
| 203 | static atomic_t kmemleak_error = ATOMIC_INIT(0); | 203 | static int kmemleak_error; |
| 204 | 204 | ||
| 205 | /* minimum and maximum address that may be valid pointers */ | 205 | /* minimum and maximum address that may be valid pointers */ |
| 206 | static unsigned long min_addr = ULONG_MAX; | 206 | static unsigned long min_addr = ULONG_MAX; |
| @@ -218,7 +218,8 @@ static int kmemleak_stack_scan = 1; | |||
| 218 | static DEFINE_MUTEX(scan_mutex); | 218 | static DEFINE_MUTEX(scan_mutex); |
| 219 | /* setting kmemleak=on, will set this var, skipping the disable */ | 219 | /* setting kmemleak=on, will set this var, skipping the disable */ |
| 220 | static int kmemleak_skip_disable; | 220 | static int kmemleak_skip_disable; |
| 221 | 221 | /* If there are leaks that can be reported */ | |
| 222 | static bool kmemleak_found_leaks; | ||
| 222 | 223 | ||
| 223 | /* | 224 | /* |
| 224 | * Early object allocation/freeing logging. Kmemleak is initialized after the | 225 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
| @@ -267,7 +268,7 @@ static void kmemleak_disable(void); | |||
| 267 | #define kmemleak_warn(x...) do { \ | 268 | #define kmemleak_warn(x...) do { \ |
| 268 | pr_warning(x); \ | 269 | pr_warning(x); \ |
| 269 | dump_stack(); \ | 270 | dump_stack(); \ |
| 270 | atomic_set(&kmemleak_warning, 1); \ | 271 | kmemleak_warning = 1; \ |
| 271 | } while (0) | 272 | } while (0) |
| 272 | 273 | ||
| 273 | /* | 274 | /* |
| @@ -805,7 +806,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
| 805 | unsigned long flags; | 806 | unsigned long flags; |
| 806 | struct early_log *log; | 807 | struct early_log *log; |
| 807 | 808 | ||
| 808 | if (atomic_read(&kmemleak_error)) { | 809 | if (kmemleak_error) { |
| 809 | /* kmemleak stopped recording, just count the requests */ | 810 | /* kmemleak stopped recording, just count the requests */ |
| 810 | crt_early_log++; | 811 | crt_early_log++; |
| 811 | return; | 812 | return; |
| @@ -840,7 +841,7 @@ static void early_alloc(struct early_log *log) | |||
| 840 | unsigned long flags; | 841 | unsigned long flags; |
| 841 | int i; | 842 | int i; |
| 842 | 843 | ||
| 843 | if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) | 844 | if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) |
| 844 | return; | 845 | return; |
| 845 | 846 | ||
| 846 | /* | 847 | /* |
| @@ -893,9 +894,9 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
| 893 | { | 894 | { |
| 894 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | 895 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
| 895 | 896 | ||
| 896 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 897 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 897 | create_object((unsigned long)ptr, size, min_count, gfp); | 898 | create_object((unsigned long)ptr, size, min_count, gfp); |
| 898 | else if (atomic_read(&kmemleak_early_log)) | 899 | else if (kmemleak_early_log) |
| 899 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); | 900 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
| 900 | } | 901 | } |
| 901 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 902 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
| @@ -919,11 +920,11 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | |||
| 919 | * Percpu allocations are only scanned and not reported as leaks | 920 | * Percpu allocations are only scanned and not reported as leaks |
| 920 | * (min_count is set to 0). | 921 | * (min_count is set to 0). |
| 921 | */ | 922 | */ |
| 922 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 923 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 923 | for_each_possible_cpu(cpu) | 924 | for_each_possible_cpu(cpu) |
| 924 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), | 925 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), |
| 925 | size, 0, GFP_KERNEL); | 926 | size, 0, GFP_KERNEL); |
| 926 | else if (atomic_read(&kmemleak_early_log)) | 927 | else if (kmemleak_early_log) |
| 927 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); | 928 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); |
| 928 | } | 929 | } |
| 929 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); | 930 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); |
| @@ -939,9 +940,9 @@ void __ref kmemleak_free(const void *ptr) | |||
| 939 | { | 940 | { |
| 940 | pr_debug("%s(0x%p)\n", __func__, ptr); | 941 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 941 | 942 | ||
| 942 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 943 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 943 | delete_object_full((unsigned long)ptr); | 944 | delete_object_full((unsigned long)ptr); |
| 944 | else if (atomic_read(&kmemleak_early_log)) | 945 | else if (kmemleak_early_log) |
| 945 | log_early(KMEMLEAK_FREE, ptr, 0, 0); | 946 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
| 946 | } | 947 | } |
| 947 | EXPORT_SYMBOL_GPL(kmemleak_free); | 948 | EXPORT_SYMBOL_GPL(kmemleak_free); |
| @@ -959,9 +960,9 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
| 959 | { | 960 | { |
| 960 | pr_debug("%s(0x%p)\n", __func__, ptr); | 961 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 961 | 962 | ||
| 962 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 963 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 963 | delete_object_part((unsigned long)ptr, size); | 964 | delete_object_part((unsigned long)ptr, size); |
| 964 | else if (atomic_read(&kmemleak_early_log)) | 965 | else if (kmemleak_early_log) |
| 965 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); | 966 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
| 966 | } | 967 | } |
| 967 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 968 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
| @@ -979,11 +980,11 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr) | |||
| 979 | 980 | ||
| 980 | pr_debug("%s(0x%p)\n", __func__, ptr); | 981 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 981 | 982 | ||
| 982 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 983 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 983 | for_each_possible_cpu(cpu) | 984 | for_each_possible_cpu(cpu) |
| 984 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | 985 | delete_object_full((unsigned long)per_cpu_ptr(ptr, |
| 985 | cpu)); | 986 | cpu)); |
| 986 | else if (atomic_read(&kmemleak_early_log)) | 987 | else if (kmemleak_early_log) |
| 987 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); | 988 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); |
| 988 | } | 989 | } |
| 989 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); | 990 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); |
| @@ -999,9 +1000,9 @@ void __ref kmemleak_not_leak(const void *ptr) | |||
| 999 | { | 1000 | { |
| 1000 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1001 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 1001 | 1002 | ||
| 1002 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1003 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 1003 | make_gray_object((unsigned long)ptr); | 1004 | make_gray_object((unsigned long)ptr); |
| 1004 | else if (atomic_read(&kmemleak_early_log)) | 1005 | else if (kmemleak_early_log) |
| 1005 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); | 1006 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
| 1006 | } | 1007 | } |
| 1007 | EXPORT_SYMBOL(kmemleak_not_leak); | 1008 | EXPORT_SYMBOL(kmemleak_not_leak); |
| @@ -1019,9 +1020,9 @@ void __ref kmemleak_ignore(const void *ptr) | |||
| 1019 | { | 1020 | { |
| 1020 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1021 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 1021 | 1022 | ||
| 1022 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1023 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 1023 | make_black_object((unsigned long)ptr); | 1024 | make_black_object((unsigned long)ptr); |
| 1024 | else if (atomic_read(&kmemleak_early_log)) | 1025 | else if (kmemleak_early_log) |
| 1025 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); | 1026 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
| 1026 | } | 1027 | } |
| 1027 | EXPORT_SYMBOL(kmemleak_ignore); | 1028 | EXPORT_SYMBOL(kmemleak_ignore); |
| @@ -1041,9 +1042,9 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |||
| 1041 | { | 1042 | { |
| 1042 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1043 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 1043 | 1044 | ||
| 1044 | if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr)) | 1045 | if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) |
| 1045 | add_scan_area((unsigned long)ptr, size, gfp); | 1046 | add_scan_area((unsigned long)ptr, size, gfp); |
| 1046 | else if (atomic_read(&kmemleak_early_log)) | 1047 | else if (kmemleak_early_log) |
| 1047 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); | 1048 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
| 1048 | } | 1049 | } |
| 1049 | EXPORT_SYMBOL(kmemleak_scan_area); | 1050 | EXPORT_SYMBOL(kmemleak_scan_area); |
| @@ -1061,9 +1062,9 @@ void __ref kmemleak_no_scan(const void *ptr) | |||
| 1061 | { | 1062 | { |
| 1062 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1063 | pr_debug("%s(0x%p)\n", __func__, ptr); |
| 1063 | 1064 | ||
| 1064 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1065 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
| 1065 | object_no_scan((unsigned long)ptr); | 1066 | object_no_scan((unsigned long)ptr); |
| 1066 | else if (atomic_read(&kmemleak_early_log)) | 1067 | else if (kmemleak_early_log) |
| 1067 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); | 1068 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
| 1068 | } | 1069 | } |
| 1069 | EXPORT_SYMBOL(kmemleak_no_scan); | 1070 | EXPORT_SYMBOL(kmemleak_no_scan); |
| @@ -1088,7 +1089,7 @@ static bool update_checksum(struct kmemleak_object *object) | |||
| 1088 | */ | 1089 | */ |
| 1089 | static int scan_should_stop(void) | 1090 | static int scan_should_stop(void) |
| 1090 | { | 1091 | { |
| 1091 | if (!atomic_read(&kmemleak_enabled)) | 1092 | if (!kmemleak_enabled) |
| 1092 | return 1; | 1093 | return 1; |
| 1093 | 1094 | ||
| 1094 | /* | 1095 | /* |
| @@ -1382,9 +1383,12 @@ static void kmemleak_scan(void) | |||
| 1382 | } | 1383 | } |
| 1383 | rcu_read_unlock(); | 1384 | rcu_read_unlock(); |
| 1384 | 1385 | ||
| 1385 | if (new_leaks) | 1386 | if (new_leaks) { |
| 1387 | kmemleak_found_leaks = true; | ||
| 1388 | |||
| 1386 | pr_info("%d new suspected memory leaks (see " | 1389 | pr_info("%d new suspected memory leaks (see " |
| 1387 | "/sys/kernel/debug/kmemleak)\n", new_leaks); | 1390 | "/sys/kernel/debug/kmemleak)\n", new_leaks); |
| 1391 | } | ||
| 1388 | 1392 | ||
| 1389 | } | 1393 | } |
| 1390 | 1394 | ||
| @@ -1545,11 +1549,6 @@ static int kmemleak_open(struct inode *inode, struct file *file) | |||
| 1545 | return seq_open(file, &kmemleak_seq_ops); | 1549 | return seq_open(file, &kmemleak_seq_ops); |
| 1546 | } | 1550 | } |
| 1547 | 1551 | ||
| 1548 | static int kmemleak_release(struct inode *inode, struct file *file) | ||
| 1549 | { | ||
| 1550 | return seq_release(inode, file); | ||
| 1551 | } | ||
| 1552 | |||
| 1553 | static int dump_str_object_info(const char *str) | 1552 | static int dump_str_object_info(const char *str) |
| 1554 | { | 1553 | { |
| 1555 | unsigned long flags; | 1554 | unsigned long flags; |
| @@ -1592,8 +1591,12 @@ static void kmemleak_clear(void) | |||
| 1592 | spin_unlock_irqrestore(&object->lock, flags); | 1591 | spin_unlock_irqrestore(&object->lock, flags); |
| 1593 | } | 1592 | } |
| 1594 | rcu_read_unlock(); | 1593 | rcu_read_unlock(); |
| 1594 | |||
| 1595 | kmemleak_found_leaks = false; | ||
| 1595 | } | 1596 | } |
| 1596 | 1597 | ||
| 1598 | static void __kmemleak_do_cleanup(void); | ||
| 1599 | |||
| 1597 | /* | 1600 | /* |
| 1598 | * File write operation to configure kmemleak at run-time. The following | 1601 | * File write operation to configure kmemleak at run-time. The following |
| 1599 | * commands can be written to the /sys/kernel/debug/kmemleak file: | 1602 | * commands can be written to the /sys/kernel/debug/kmemleak file: |
| @@ -1606,7 +1609,8 @@ static void kmemleak_clear(void) | |||
| 1606 | * disable it) | 1609 | * disable it) |
| 1607 | * scan - trigger a memory scan | 1610 | * scan - trigger a memory scan |
| 1608 | * clear - mark all current reported unreferenced kmemleak objects as | 1611 | * clear - mark all current reported unreferenced kmemleak objects as |
| 1609 | * grey to ignore printing them | 1612 | * grey to ignore printing them, or free all kmemleak objects |
| 1613 | * if kmemleak has been disabled. | ||
| 1610 | * dump=... - dump information about the object found at the given address | 1614 | * dump=... - dump information about the object found at the given address |
| 1611 | */ | 1615 | */ |
| 1612 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | 1616 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
| @@ -1616,9 +1620,6 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1616 | int buf_size; | 1620 | int buf_size; |
| 1617 | int ret; | 1621 | int ret; |
| 1618 | 1622 | ||
| 1619 | if (!atomic_read(&kmemleak_enabled)) | ||
| 1620 | return -EBUSY; | ||
| 1621 | |||
| 1622 | buf_size = min(size, (sizeof(buf) - 1)); | 1623 | buf_size = min(size, (sizeof(buf) - 1)); |
| 1623 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | 1624 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
| 1624 | return -EFAULT; | 1625 | return -EFAULT; |
| @@ -1628,6 +1629,19 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1628 | if (ret < 0) | 1629 | if (ret < 0) |
| 1629 | return ret; | 1630 | return ret; |
| 1630 | 1631 | ||
| 1632 | if (strncmp(buf, "clear", 5) == 0) { | ||
| 1633 | if (kmemleak_enabled) | ||
| 1634 | kmemleak_clear(); | ||
| 1635 | else | ||
| 1636 | __kmemleak_do_cleanup(); | ||
| 1637 | goto out; | ||
| 1638 | } | ||
| 1639 | |||
| 1640 | if (!kmemleak_enabled) { | ||
| 1641 | ret = -EBUSY; | ||
| 1642 | goto out; | ||
| 1643 | } | ||
| 1644 | |||
| 1631 | if (strncmp(buf, "off", 3) == 0) | 1645 | if (strncmp(buf, "off", 3) == 0) |
| 1632 | kmemleak_disable(); | 1646 | kmemleak_disable(); |
| 1633 | else if (strncmp(buf, "stack=on", 8) == 0) | 1647 | else if (strncmp(buf, "stack=on", 8) == 0) |
| @@ -1651,8 +1665,6 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
| 1651 | } | 1665 | } |
| 1652 | } else if (strncmp(buf, "scan", 4) == 0) | 1666 | } else if (strncmp(buf, "scan", 4) == 0) |
| 1653 | kmemleak_scan(); | 1667 | kmemleak_scan(); |
| 1654 | else if (strncmp(buf, "clear", 5) == 0) | ||
| 1655 | kmemleak_clear(); | ||
| 1656 | else if (strncmp(buf, "dump=", 5) == 0) | 1668 | else if (strncmp(buf, "dump=", 5) == 0) |
| 1657 | ret = dump_str_object_info(buf + 5); | 1669 | ret = dump_str_object_info(buf + 5); |
| 1658 | else | 1670 | else |
| @@ -1674,9 +1686,19 @@ static const struct file_operations kmemleak_fops = { | |||
| 1674 | .read = seq_read, | 1686 | .read = seq_read, |
| 1675 | .write = kmemleak_write, | 1687 | .write = kmemleak_write, |
| 1676 | .llseek = seq_lseek, | 1688 | .llseek = seq_lseek, |
| 1677 | .release = kmemleak_release, | 1689 | .release = seq_release, |
| 1678 | }; | 1690 | }; |
| 1679 | 1691 | ||
| 1692 | static void __kmemleak_do_cleanup(void) | ||
| 1693 | { | ||
| 1694 | struct kmemleak_object *object; | ||
| 1695 | |||
| 1696 | rcu_read_lock(); | ||
| 1697 | list_for_each_entry_rcu(object, &object_list, object_list) | ||
| 1698 | delete_object_full(object->pointer); | ||
| 1699 | rcu_read_unlock(); | ||
| 1700 | } | ||
| 1701 | |||
| 1680 | /* | 1702 | /* |
| 1681 | * Stop the memory scanning thread and free the kmemleak internal objects if | 1703 | * Stop the memory scanning thread and free the kmemleak internal objects if |
| 1682 | * no previous scan thread (otherwise, kmemleak may still have some useful | 1704 | * no previous scan thread (otherwise, kmemleak may still have some useful |
| @@ -1684,18 +1706,14 @@ static const struct file_operations kmemleak_fops = { | |||
| 1684 | */ | 1706 | */ |
| 1685 | static void kmemleak_do_cleanup(struct work_struct *work) | 1707 | static void kmemleak_do_cleanup(struct work_struct *work) |
| 1686 | { | 1708 | { |
| 1687 | struct kmemleak_object *object; | ||
| 1688 | bool cleanup = scan_thread == NULL; | ||
| 1689 | |||
| 1690 | mutex_lock(&scan_mutex); | 1709 | mutex_lock(&scan_mutex); |
| 1691 | stop_scan_thread(); | 1710 | stop_scan_thread(); |
| 1692 | 1711 | ||
| 1693 | if (cleanup) { | 1712 | if (!kmemleak_found_leaks) |
| 1694 | rcu_read_lock(); | 1713 | __kmemleak_do_cleanup(); |
| 1695 | list_for_each_entry_rcu(object, &object_list, object_list) | 1714 | else |
| 1696 | delete_object_full(object->pointer); | 1715 | pr_info("Kmemleak disabled without freeing internal data. " |
| 1697 | rcu_read_unlock(); | 1716 | "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n"); |
| 1698 | } | ||
| 1699 | mutex_unlock(&scan_mutex); | 1717 | mutex_unlock(&scan_mutex); |
| 1700 | } | 1718 | } |
| 1701 | 1719 | ||
| @@ -1708,14 +1726,14 @@ static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); | |||
| 1708 | static void kmemleak_disable(void) | 1726 | static void kmemleak_disable(void) |
| 1709 | { | 1727 | { |
| 1710 | /* atomically check whether it was already invoked */ | 1728 | /* atomically check whether it was already invoked */ |
| 1711 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) | 1729 | if (cmpxchg(&kmemleak_error, 0, 1)) |
| 1712 | return; | 1730 | return; |
| 1713 | 1731 | ||
| 1714 | /* stop any memory operation tracing */ | 1732 | /* stop any memory operation tracing */ |
| 1715 | atomic_set(&kmemleak_enabled, 0); | 1733 | kmemleak_enabled = 0; |
| 1716 | 1734 | ||
| 1717 | /* check whether it is too early for a kernel thread */ | 1735 | /* check whether it is too early for a kernel thread */ |
| 1718 | if (atomic_read(&kmemleak_initialized)) | 1736 | if (kmemleak_initialized) |
| 1719 | schedule_work(&cleanup_work); | 1737 | schedule_work(&cleanup_work); |
| 1720 | 1738 | ||
| 1721 | pr_info("Kernel memory leak detector disabled\n"); | 1739 | pr_info("Kernel memory leak detector disabled\n"); |
| @@ -1757,9 +1775,10 @@ void __init kmemleak_init(void) | |||
| 1757 | int i; | 1775 | int i; |
| 1758 | unsigned long flags; | 1776 | unsigned long flags; |
| 1759 | 1777 | ||
| 1778 | kmemleak_early_log = 0; | ||
| 1779 | |||
| 1760 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF | 1780 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF |
| 1761 | if (!kmemleak_skip_disable) { | 1781 | if (!kmemleak_skip_disable) { |
| 1762 | atomic_set(&kmemleak_early_log, 0); | ||
| 1763 | kmemleak_disable(); | 1782 | kmemleak_disable(); |
| 1764 | return; | 1783 | return; |
| 1765 | } | 1784 | } |
| @@ -1777,12 +1796,11 @@ void __init kmemleak_init(void) | |||
| 1777 | 1796 | ||
| 1778 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | 1797 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
| 1779 | local_irq_save(flags); | 1798 | local_irq_save(flags); |
| 1780 | atomic_set(&kmemleak_early_log, 0); | 1799 | if (kmemleak_error) { |
| 1781 | if (atomic_read(&kmemleak_error)) { | ||
| 1782 | local_irq_restore(flags); | 1800 | local_irq_restore(flags); |
| 1783 | return; | 1801 | return; |
| 1784 | } else | 1802 | } else |
| 1785 | atomic_set(&kmemleak_enabled, 1); | 1803 | kmemleak_enabled = 1; |
| 1786 | local_irq_restore(flags); | 1804 | local_irq_restore(flags); |
| 1787 | 1805 | ||
| 1788 | /* | 1806 | /* |
| @@ -1826,9 +1844,9 @@ void __init kmemleak_init(void) | |||
| 1826 | log->op_type); | 1844 | log->op_type); |
| 1827 | } | 1845 | } |
| 1828 | 1846 | ||
| 1829 | if (atomic_read(&kmemleak_warning)) { | 1847 | if (kmemleak_warning) { |
| 1830 | print_log_trace(log); | 1848 | print_log_trace(log); |
| 1831 | atomic_set(&kmemleak_warning, 0); | 1849 | kmemleak_warning = 0; |
| 1832 | } | 1850 | } |
| 1833 | } | 1851 | } |
| 1834 | } | 1852 | } |
| @@ -1840,9 +1858,9 @@ static int __init kmemleak_late_init(void) | |||
| 1840 | { | 1858 | { |
| 1841 | struct dentry *dentry; | 1859 | struct dentry *dentry; |
| 1842 | 1860 | ||
| 1843 | atomic_set(&kmemleak_initialized, 1); | 1861 | kmemleak_initialized = 1; |
| 1844 | 1862 | ||
| 1845 | if (atomic_read(&kmemleak_error)) { | 1863 | if (kmemleak_error) { |
| 1846 | /* | 1864 | /* |
| 1847 | * Some error occurred and kmemleak was disabled. There is a | 1865 | * Some error occurred and kmemleak was disabled. There is a |
| 1848 | * small chance that kmemleak_disable() was called immediately | 1866 | * small chance that kmemleak_disable() was called immediately |
diff --git a/mm/list_lru.c b/mm/list_lru.c index 72f9decb0104..f1a0db194173 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
| @@ -87,11 +87,20 @@ restart: | |||
| 87 | 87 | ||
| 88 | ret = isolate(item, &nlru->lock, cb_arg); | 88 | ret = isolate(item, &nlru->lock, cb_arg); |
| 89 | switch (ret) { | 89 | switch (ret) { |
| 90 | case LRU_REMOVED_RETRY: | ||
| 91 | assert_spin_locked(&nlru->lock); | ||
| 90 | case LRU_REMOVED: | 92 | case LRU_REMOVED: |
| 91 | if (--nlru->nr_items == 0) | 93 | if (--nlru->nr_items == 0) |
| 92 | node_clear(nid, lru->active_nodes); | 94 | node_clear(nid, lru->active_nodes); |
| 93 | WARN_ON_ONCE(nlru->nr_items < 0); | 95 | WARN_ON_ONCE(nlru->nr_items < 0); |
| 94 | isolated++; | 96 | isolated++; |
| 97 | /* | ||
| 98 | * If the lru lock has been dropped, our list | ||
| 99 | * traversal is now invalid and so we have to | ||
| 100 | * restart from scratch. | ||
| 101 | */ | ||
| 102 | if (ret == LRU_REMOVED_RETRY) | ||
| 103 | goto restart; | ||
| 95 | break; | 104 | break; |
| 96 | case LRU_ROTATE: | 105 | case LRU_ROTATE: |
| 97 | list_move_tail(item, &nlru->list); | 106 | list_move_tail(item, &nlru->list); |
| @@ -103,6 +112,7 @@ restart: | |||
| 103 | * The lru lock has been dropped, our list traversal is | 112 | * The lru lock has been dropped, our list traversal is |
| 104 | * now invalid and so we have to restart from scratch. | 113 | * now invalid and so we have to restart from scratch. |
| 105 | */ | 114 | */ |
| 115 | assert_spin_locked(&nlru->lock); | ||
| 106 | goto restart; | 116 | goto restart; |
| 107 | default: | 117 | default: |
| 108 | BUG(); | 118 | BUG(); |
| @@ -114,7 +124,7 @@ restart: | |||
| 114 | } | 124 | } |
| 115 | EXPORT_SYMBOL_GPL(list_lru_walk_node); | 125 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 116 | 126 | ||
| 117 | int list_lru_init(struct list_lru *lru) | 127 | int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key) |
| 118 | { | 128 | { |
| 119 | int i; | 129 | int i; |
| 120 | size_t size = sizeof(*lru->node) * nr_node_ids; | 130 | size_t size = sizeof(*lru->node) * nr_node_ids; |
| @@ -126,12 +136,14 @@ int list_lru_init(struct list_lru *lru) | |||
| 126 | nodes_clear(lru->active_nodes); | 136 | nodes_clear(lru->active_nodes); |
| 127 | for (i = 0; i < nr_node_ids; i++) { | 137 | for (i = 0; i < nr_node_ids; i++) { |
| 128 | spin_lock_init(&lru->node[i].lock); | 138 | spin_lock_init(&lru->node[i].lock); |
| 139 | if (key) | ||
| 140 | lockdep_set_class(&lru->node[i].lock, key); | ||
| 129 | INIT_LIST_HEAD(&lru->node[i].list); | 141 | INIT_LIST_HEAD(&lru->node[i].list); |
| 130 | lru->node[i].nr_items = 0; | 142 | lru->node[i].nr_items = 0; |
| 131 | } | 143 | } |
| 132 | return 0; | 144 | return 0; |
| 133 | } | 145 | } |
| 134 | EXPORT_SYMBOL_GPL(list_lru_init); | 146 | EXPORT_SYMBOL_GPL(list_lru_init_key); |
| 135 | 147 | ||
| 136 | void list_lru_destroy(struct list_lru *lru) | 148 | void list_lru_destroy(struct list_lru *lru) |
| 137 | { | 149 | { |
diff --git a/mm/memory.c b/mm/memory.c index 22dfa617bddb..90cea22001ef 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2587,6 +2587,38 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo | |||
| 2587 | } | 2587 | } |
| 2588 | 2588 | ||
| 2589 | /* | 2589 | /* |
| 2590 | * Notify the address space that the page is about to become writable so that | ||
| 2591 | * it can prohibit this or wait for the page to get into an appropriate state. | ||
| 2592 | * | ||
| 2593 | * We do this without the lock held, so that it can sleep if it needs to. | ||
| 2594 | */ | ||
| 2595 | static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, | ||
| 2596 | unsigned long address) | ||
| 2597 | { | ||
| 2598 | struct vm_fault vmf; | ||
| 2599 | int ret; | ||
| 2600 | |||
| 2601 | vmf.virtual_address = (void __user *)(address & PAGE_MASK); | ||
| 2602 | vmf.pgoff = page->index; | ||
| 2603 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | ||
| 2604 | vmf.page = page; | ||
| 2605 | |||
| 2606 | ret = vma->vm_ops->page_mkwrite(vma, &vmf); | ||
| 2607 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | ||
| 2608 | return ret; | ||
| 2609 | if (unlikely(!(ret & VM_FAULT_LOCKED))) { | ||
| 2610 | lock_page(page); | ||
| 2611 | if (!page->mapping) { | ||
| 2612 | unlock_page(page); | ||
| 2613 | return 0; /* retry */ | ||
| 2614 | } | ||
| 2615 | ret |= VM_FAULT_LOCKED; | ||
| 2616 | } else | ||
| 2617 | VM_BUG_ON_PAGE(!PageLocked(page), page); | ||
| 2618 | return ret; | ||
| 2619 | } | ||
| 2620 | |||
| 2621 | /* | ||
| 2590 | * This routine handles present pages, when users try to write | 2622 | * This routine handles present pages, when users try to write |
| 2591 | * to a shared page. It is done by copying the page to a new address | 2623 | * to a shared page. It is done by copying the page to a new address |
| 2592 | * and decrementing the shared-page counter for the old page. | 2624 | * and decrementing the shared-page counter for the old page. |
| @@ -2668,42 +2700,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2668 | * get_user_pages(.write=1, .force=1). | 2700 | * get_user_pages(.write=1, .force=1). |
| 2669 | */ | 2701 | */ |
| 2670 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | 2702 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { |
| 2671 | struct vm_fault vmf; | ||
| 2672 | int tmp; | 2703 | int tmp; |
| 2673 | |||
| 2674 | vmf.virtual_address = (void __user *)(address & | ||
| 2675 | PAGE_MASK); | ||
| 2676 | vmf.pgoff = old_page->index; | ||
| 2677 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | ||
| 2678 | vmf.page = old_page; | ||
| 2679 | |||
| 2680 | /* | ||
| 2681 | * Notify the address space that the page is about to | ||
| 2682 | * become writable so that it can prohibit this or wait | ||
| 2683 | * for the page to get into an appropriate state. | ||
| 2684 | * | ||
| 2685 | * We do this without the lock held, so that it can | ||
| 2686 | * sleep if it needs to. | ||
| 2687 | */ | ||
| 2688 | page_cache_get(old_page); | 2704 | page_cache_get(old_page); |
| 2689 | pte_unmap_unlock(page_table, ptl); | 2705 | pte_unmap_unlock(page_table, ptl); |
| 2690 | 2706 | tmp = do_page_mkwrite(vma, old_page, address); | |
| 2691 | tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | 2707 | if (unlikely(!tmp || (tmp & |
| 2692 | if (unlikely(tmp & | 2708 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
| 2693 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | 2709 | page_cache_release(old_page); |
| 2694 | ret = tmp; | 2710 | return tmp; |
| 2695 | goto unwritable_page; | ||
| 2696 | } | 2711 | } |
| 2697 | if (unlikely(!(tmp & VM_FAULT_LOCKED))) { | ||
| 2698 | lock_page(old_page); | ||
| 2699 | if (!old_page->mapping) { | ||
| 2700 | ret = 0; /* retry the fault */ | ||
| 2701 | unlock_page(old_page); | ||
| 2702 | goto unwritable_page; | ||
| 2703 | } | ||
| 2704 | } else | ||
| 2705 | VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); | ||
| 2706 | |||
| 2707 | /* | 2712 | /* |
| 2708 | * Since we dropped the lock we need to revalidate | 2713 | * Since we dropped the lock we need to revalidate |
| 2709 | * the PTE as someone else may have changed it. If | 2714 | * the PTE as someone else may have changed it. If |
| @@ -2748,7 +2753,7 @@ reuse: | |||
| 2748 | * bit after it clear all dirty ptes, but before a racing | 2753 | * bit after it clear all dirty ptes, but before a racing |
| 2749 | * do_wp_page installs a dirty pte. | 2754 | * do_wp_page installs a dirty pte. |
| 2750 | * | 2755 | * |
| 2751 | * __do_fault is protected similarly. | 2756 | * do_shared_fault is protected similarly. |
| 2752 | */ | 2757 | */ |
| 2753 | if (!page_mkwrite) { | 2758 | if (!page_mkwrite) { |
| 2754 | wait_on_page_locked(dirty_page); | 2759 | wait_on_page_locked(dirty_page); |
| @@ -2892,10 +2897,6 @@ oom: | |||
| 2892 | if (old_page) | 2897 | if (old_page) |
| 2893 | page_cache_release(old_page); | 2898 | page_cache_release(old_page); |
| 2894 | return VM_FAULT_OOM; | 2899 | return VM_FAULT_OOM; |
| 2895 | |||
| 2896 | unwritable_page: | ||
| 2897 | page_cache_release(old_page); | ||
| 2898 | return ret; | ||
| 2899 | } | 2900 | } |
| 2900 | 2901 | ||
| 2901 | static void unmap_mapping_range_vma(struct vm_area_struct *vma, | 2902 | static void unmap_mapping_range_vma(struct vm_area_struct *vma, |
| @@ -3286,53 +3287,11 @@ oom: | |||
| 3286 | return VM_FAULT_OOM; | 3287 | return VM_FAULT_OOM; |
| 3287 | } | 3288 | } |
| 3288 | 3289 | ||
| 3289 | /* | 3290 | static int __do_fault(struct vm_area_struct *vma, unsigned long address, |
| 3290 | * __do_fault() tries to create a new page mapping. It aggressively | 3291 | pgoff_t pgoff, unsigned int flags, struct page **page) |
| 3291 | * tries to share with existing pages, but makes a separate copy if | ||
| 3292 | * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid | ||
| 3293 | * the next page fault. | ||
| 3294 | * | ||
| 3295 | * As this is called only for pages that do not currently exist, we | ||
| 3296 | * do not need to flush old virtual caches or the TLB. | ||
| 3297 | * | ||
| 3298 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | ||
| 3299 | * but allow concurrent faults), and pte neither mapped nor locked. | ||
| 3300 | * We return with mmap_sem still held, but pte unmapped and unlocked. | ||
| 3301 | */ | ||
| 3302 | static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 3303 | unsigned long address, pmd_t *pmd, | ||
| 3304 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) | ||
| 3305 | { | 3292 | { |
| 3306 | pte_t *page_table; | ||
| 3307 | spinlock_t *ptl; | ||
| 3308 | struct page *page; | ||
| 3309 | struct page *cow_page; | ||
| 3310 | pte_t entry; | ||
| 3311 | int anon = 0; | ||
| 3312 | struct page *dirty_page = NULL; | ||
| 3313 | struct vm_fault vmf; | 3293 | struct vm_fault vmf; |
| 3314 | int ret; | 3294 | int ret; |
| 3315 | int page_mkwrite = 0; | ||
| 3316 | |||
| 3317 | /* | ||
| 3318 | * If we do COW later, allocate page befor taking lock_page() | ||
| 3319 | * on the file cache page. This will reduce lock holding time. | ||
| 3320 | */ | ||
| 3321 | if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { | ||
| 3322 | |||
| 3323 | if (unlikely(anon_vma_prepare(vma))) | ||
| 3324 | return VM_FAULT_OOM; | ||
| 3325 | |||
| 3326 | cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | ||
| 3327 | if (!cow_page) | ||
| 3328 | return VM_FAULT_OOM; | ||
| 3329 | |||
| 3330 | if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) { | ||
| 3331 | page_cache_release(cow_page); | ||
| 3332 | return VM_FAULT_OOM; | ||
| 3333 | } | ||
| 3334 | } else | ||
| 3335 | cow_page = NULL; | ||
| 3336 | 3295 | ||
| 3337 | vmf.virtual_address = (void __user *)(address & PAGE_MASK); | 3296 | vmf.virtual_address = (void __user *)(address & PAGE_MASK); |
| 3338 | vmf.pgoff = pgoff; | 3297 | vmf.pgoff = pgoff; |
| @@ -3340,151 +3299,176 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3340 | vmf.page = NULL; | 3299 | vmf.page = NULL; |
| 3341 | 3300 | ||
| 3342 | ret = vma->vm_ops->fault(vma, &vmf); | 3301 | ret = vma->vm_ops->fault(vma, &vmf); |
| 3343 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | | 3302 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
| 3344 | VM_FAULT_RETRY))) | 3303 | return ret; |
| 3345 | goto uncharge_out; | ||
| 3346 | 3304 | ||
| 3347 | if (unlikely(PageHWPoison(vmf.page))) { | 3305 | if (unlikely(PageHWPoison(vmf.page))) { |
| 3348 | if (ret & VM_FAULT_LOCKED) | 3306 | if (ret & VM_FAULT_LOCKED) |
| 3349 | unlock_page(vmf.page); | 3307 | unlock_page(vmf.page); |
| 3350 | ret = VM_FAULT_HWPOISON; | ||
| 3351 | page_cache_release(vmf.page); | 3308 | page_cache_release(vmf.page); |
| 3352 | goto uncharge_out; | 3309 | return VM_FAULT_HWPOISON; |
| 3353 | } | 3310 | } |
| 3354 | 3311 | ||
| 3355 | /* | ||
| 3356 | * For consistency in subsequent calls, make the faulted page always | ||
| 3357 | * locked. | ||
| 3358 | */ | ||
| 3359 | if (unlikely(!(ret & VM_FAULT_LOCKED))) | 3312 | if (unlikely(!(ret & VM_FAULT_LOCKED))) |
| 3360 | lock_page(vmf.page); | 3313 | lock_page(vmf.page); |
| 3361 | else | 3314 | else |
| 3362 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); | 3315 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); |
| 3363 | 3316 | ||
| 3364 | /* | 3317 | *page = vmf.page; |
| 3365 | * Should we do an early C-O-W break? | 3318 | return ret; |
| 3366 | */ | 3319 | } |
| 3367 | page = vmf.page; | ||
| 3368 | if (flags & FAULT_FLAG_WRITE) { | ||
| 3369 | if (!(vma->vm_flags & VM_SHARED)) { | ||
| 3370 | page = cow_page; | ||
| 3371 | anon = 1; | ||
| 3372 | copy_user_highpage(page, vmf.page, address, vma); | ||
| 3373 | __SetPageUptodate(page); | ||
| 3374 | } else { | ||
| 3375 | /* | ||
| 3376 | * If the page will be shareable, see if the backing | ||
| 3377 | * address space wants to know that the page is about | ||
| 3378 | * to become writable | ||
| 3379 | */ | ||
| 3380 | if (vma->vm_ops->page_mkwrite) { | ||
| 3381 | int tmp; | ||
| 3382 | |||
| 3383 | unlock_page(page); | ||
| 3384 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | ||
| 3385 | tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | ||
| 3386 | if (unlikely(tmp & | ||
| 3387 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | ||
| 3388 | ret = tmp; | ||
| 3389 | goto unwritable_page; | ||
| 3390 | } | ||
| 3391 | if (unlikely(!(tmp & VM_FAULT_LOCKED))) { | ||
| 3392 | lock_page(page); | ||
| 3393 | if (!page->mapping) { | ||
| 3394 | ret = 0; /* retry the fault */ | ||
| 3395 | unlock_page(page); | ||
| 3396 | goto unwritable_page; | ||
| 3397 | } | ||
| 3398 | } else | ||
| 3399 | VM_BUG_ON_PAGE(!PageLocked(page), page); | ||
| 3400 | page_mkwrite = 1; | ||
| 3401 | } | ||
| 3402 | } | ||
| 3403 | 3320 | ||
| 3321 | static void do_set_pte(struct vm_area_struct *vma, unsigned long address, | ||
| 3322 | struct page *page, pte_t *pte, bool write, bool anon) | ||
| 3323 | { | ||
| 3324 | pte_t entry; | ||
| 3325 | |||
| 3326 | flush_icache_page(vma, page); | ||
| 3327 | entry = mk_pte(page, vma->vm_page_prot); | ||
| 3328 | if (write) | ||
| 3329 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
| 3330 | else if (pte_file(*pte) && pte_file_soft_dirty(*pte)) | ||
| 3331 | pte_mksoft_dirty(entry); | ||
| 3332 | if (anon) { | ||
| 3333 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); | ||
| 3334 | page_add_new_anon_rmap(page, vma, address); | ||
| 3335 | } else { | ||
| 3336 | inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); | ||
| 3337 | page_add_file_rmap(page); | ||
| 3404 | } | 3338 | } |
| 3339 | set_pte_at(vma->vm_mm, address, pte, entry); | ||
| 3405 | 3340 | ||
| 3406 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | 3341 | /* no need to invalidate: a not-present page won't be cached */ |
| 3342 | update_mmu_cache(vma, address, pte); | ||
| 3343 | } | ||
| 3407 | 3344 | ||
| 3408 | /* | 3345 | static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3409 | * This silly early PAGE_DIRTY setting removes a race | 3346 | unsigned long address, pmd_t *pmd, |
| 3410 | * due to the bad i386 page protection. But it's valid | 3347 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) |
| 3411 | * for other architectures too. | 3348 | { |
| 3412 | * | 3349 | struct page *fault_page; |
| 3413 | * Note that if FAULT_FLAG_WRITE is set, we either now have | 3350 | spinlock_t *ptl; |
| 3414 | * an exclusive copy of the page, or this is a shared mapping, | 3351 | pte_t *pte; |
| 3415 | * so we can make it writable and dirty to avoid having to | 3352 | int ret; |
| 3416 | * handle that later. | ||
| 3417 | */ | ||
| 3418 | /* Only go through if we didn't race with anybody else... */ | ||
| 3419 | if (likely(pte_same(*page_table, orig_pte))) { | ||
| 3420 | flush_icache_page(vma, page); | ||
| 3421 | entry = mk_pte(page, vma->vm_page_prot); | ||
| 3422 | if (flags & FAULT_FLAG_WRITE) | ||
| 3423 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
| 3424 | else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) | ||
| 3425 | pte_mksoft_dirty(entry); | ||
| 3426 | if (anon) { | ||
| 3427 | inc_mm_counter_fast(mm, MM_ANONPAGES); | ||
| 3428 | page_add_new_anon_rmap(page, vma, address); | ||
| 3429 | } else { | ||
| 3430 | inc_mm_counter_fast(mm, MM_FILEPAGES); | ||
| 3431 | page_add_file_rmap(page); | ||
| 3432 | if (flags & FAULT_FLAG_WRITE) { | ||
| 3433 | dirty_page = page; | ||
| 3434 | get_page(dirty_page); | ||
| 3435 | } | ||
| 3436 | } | ||
| 3437 | set_pte_at(mm, address, page_table, entry); | ||
| 3438 | 3353 | ||
| 3439 | /* no need to invalidate: a not-present page won't be cached */ | 3354 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); |
| 3440 | update_mmu_cache(vma, address, page_table); | 3355 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
| 3441 | } else { | 3356 | return ret; |
| 3442 | if (cow_page) | 3357 | |
| 3443 | mem_cgroup_uncharge_page(cow_page); | 3358 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 3444 | if (anon) | 3359 | if (unlikely(!pte_same(*pte, orig_pte))) { |
| 3445 | page_cache_release(page); | 3360 | pte_unmap_unlock(pte, ptl); |
| 3446 | else | 3361 | unlock_page(fault_page); |
| 3447 | anon = 1; /* no anon but release faulted_page */ | 3362 | page_cache_release(fault_page); |
| 3363 | return ret; | ||
| 3448 | } | 3364 | } |
| 3365 | do_set_pte(vma, address, fault_page, pte, false, false); | ||
| 3366 | pte_unmap_unlock(pte, ptl); | ||
| 3367 | unlock_page(fault_page); | ||
| 3368 | return ret; | ||
| 3369 | } | ||
| 3449 | 3370 | ||
| 3450 | pte_unmap_unlock(page_table, ptl); | 3371 | static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3372 | unsigned long address, pmd_t *pmd, | ||
| 3373 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) | ||
| 3374 | { | ||
| 3375 | struct page *fault_page, *new_page; | ||
| 3376 | spinlock_t *ptl; | ||
| 3377 | pte_t *pte; | ||
| 3378 | int ret; | ||
| 3451 | 3379 | ||
| 3452 | if (dirty_page) { | 3380 | if (unlikely(anon_vma_prepare(vma))) |
| 3453 | struct address_space *mapping = page->mapping; | 3381 | return VM_FAULT_OOM; |
| 3454 | int dirtied = 0; | ||
| 3455 | 3382 | ||
| 3456 | if (set_page_dirty(dirty_page)) | 3383 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 3457 | dirtied = 1; | 3384 | if (!new_page) |
| 3458 | unlock_page(dirty_page); | 3385 | return VM_FAULT_OOM; |
| 3459 | put_page(dirty_page); | ||
| 3460 | if ((dirtied || page_mkwrite) && mapping) { | ||
| 3461 | /* | ||
| 3462 | * Some device drivers do not set page.mapping but still | ||
| 3463 | * dirty their pages | ||
| 3464 | */ | ||
| 3465 | balance_dirty_pages_ratelimited(mapping); | ||
| 3466 | } | ||
| 3467 | 3386 | ||
| 3468 | /* file_update_time outside page_lock */ | 3387 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) { |
| 3469 | if (vma->vm_file && !page_mkwrite) | 3388 | page_cache_release(new_page); |
| 3470 | file_update_time(vma->vm_file); | 3389 | return VM_FAULT_OOM; |
| 3471 | } else { | ||
| 3472 | unlock_page(vmf.page); | ||
| 3473 | if (anon) | ||
| 3474 | page_cache_release(vmf.page); | ||
| 3475 | } | 3390 | } |
| 3476 | 3391 | ||
| 3477 | return ret; | 3392 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); |
| 3393 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | ||
| 3394 | goto uncharge_out; | ||
| 3478 | 3395 | ||
| 3479 | unwritable_page: | 3396 | copy_user_highpage(new_page, fault_page, address, vma); |
| 3480 | page_cache_release(page); | 3397 | __SetPageUptodate(new_page); |
| 3398 | |||
| 3399 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
| 3400 | if (unlikely(!pte_same(*pte, orig_pte))) { | ||
| 3401 | pte_unmap_unlock(pte, ptl); | ||
| 3402 | unlock_page(fault_page); | ||
| 3403 | page_cache_release(fault_page); | ||
| 3404 | goto uncharge_out; | ||
| 3405 | } | ||
| 3406 | do_set_pte(vma, address, new_page, pte, true, true); | ||
| 3407 | pte_unmap_unlock(pte, ptl); | ||
| 3408 | unlock_page(fault_page); | ||
| 3409 | page_cache_release(fault_page); | ||
| 3481 | return ret; | 3410 | return ret; |
| 3482 | uncharge_out: | 3411 | uncharge_out: |
| 3483 | /* fs's fault handler get error */ | 3412 | mem_cgroup_uncharge_page(new_page); |
| 3484 | if (cow_page) { | 3413 | page_cache_release(new_page); |
| 3485 | mem_cgroup_uncharge_page(cow_page); | 3414 | return ret; |
| 3486 | page_cache_release(cow_page); | 3415 | } |
| 3416 | |||
| 3417 | static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
| 3418 | unsigned long address, pmd_t *pmd, | ||
| 3419 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) | ||
| 3420 | { | ||
| 3421 | struct page *fault_page; | ||
| 3422 | struct address_space *mapping; | ||
| 3423 | spinlock_t *ptl; | ||
| 3424 | pte_t *pte; | ||
| 3425 | int dirtied = 0; | ||
| 3426 | int ret, tmp; | ||
| 3427 | |||
| 3428 | ret = __do_fault(vma, address, pgoff, flags, &fault_page); | ||
| 3429 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | ||
| 3430 | return ret; | ||
| 3431 | |||
| 3432 | /* | ||
| 3433 | * Check if the backing address space wants to know that the page is | ||
| 3434 | * about to become writable | ||
| 3435 | */ | ||
| 3436 | if (vma->vm_ops->page_mkwrite) { | ||
| 3437 | unlock_page(fault_page); | ||
| 3438 | tmp = do_page_mkwrite(vma, fault_page, address); | ||
| 3439 | if (unlikely(!tmp || | ||
| 3440 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { | ||
| 3441 | page_cache_release(fault_page); | ||
| 3442 | return tmp; | ||
| 3443 | } | ||
| 3487 | } | 3444 | } |
| 3445 | |||
| 3446 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
| 3447 | if (unlikely(!pte_same(*pte, orig_pte))) { | ||
| 3448 | pte_unmap_unlock(pte, ptl); | ||
| 3449 | unlock_page(fault_page); | ||
| 3450 | page_cache_release(fault_page); | ||
| 3451 | return ret; | ||
| 3452 | } | ||
| 3453 | do_set_pte(vma, address, fault_page, pte, true, false); | ||
| 3454 | pte_unmap_unlock(pte, ptl); | ||
| 3455 | |||
| 3456 | if (set_page_dirty(fault_page)) | ||
| 3457 | dirtied = 1; | ||
| 3458 | mapping = fault_page->mapping; | ||
| 3459 | unlock_page(fault_page); | ||
| 3460 | if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { | ||
| 3461 | /* | ||
| 3462 | * Some device drivers do not set page.mapping but still | ||
| 3463 | * dirty their pages | ||
| 3464 | */ | ||
| 3465 | balance_dirty_pages_ratelimited(mapping); | ||
| 3466 | } | ||
| 3467 | |||
| 3468 | /* file_update_time outside page_lock */ | ||
| 3469 | if (vma->vm_file && !vma->vm_ops->page_mkwrite) | ||
| 3470 | file_update_time(vma->vm_file); | ||
| 3471 | |||
| 3488 | return ret; | 3472 | return ret; |
| 3489 | } | 3473 | } |
| 3490 | 3474 | ||
| @@ -3496,7 +3480,13 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3496 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 3480 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 3497 | 3481 | ||
| 3498 | pte_unmap(page_table); | 3482 | pte_unmap(page_table); |
| 3499 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | 3483 | if (!(flags & FAULT_FLAG_WRITE)) |
| 3484 | return do_read_fault(mm, vma, address, pmd, pgoff, flags, | ||
| 3485 | orig_pte); | ||
| 3486 | if (!(vma->vm_flags & VM_SHARED)) | ||
| 3487 | return do_cow_fault(mm, vma, address, pmd, pgoff, flags, | ||
| 3488 | orig_pte); | ||
| 3489 | return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | ||
| 3500 | } | 3490 | } |
| 3501 | 3491 | ||
| 3502 | /* | 3492 | /* |
| @@ -3528,10 +3518,16 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3528 | } | 3518 | } |
| 3529 | 3519 | ||
| 3530 | pgoff = pte_to_pgoff(orig_pte); | 3520 | pgoff = pte_to_pgoff(orig_pte); |
| 3531 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | 3521 | if (!(flags & FAULT_FLAG_WRITE)) |
| 3522 | return do_read_fault(mm, vma, address, pmd, pgoff, flags, | ||
| 3523 | orig_pte); | ||
| 3524 | if (!(vma->vm_flags & VM_SHARED)) | ||
| 3525 | return do_cow_fault(mm, vma, address, pmd, pgoff, flags, | ||
| 3526 | orig_pte); | ||
| 3527 | return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | ||
| 3532 | } | 3528 | } |
| 3533 | 3529 | ||
| 3534 | int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, | 3530 | static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
| 3535 | unsigned long addr, int page_nid, | 3531 | unsigned long addr, int page_nid, |
| 3536 | int *flags) | 3532 | int *flags) |
| 3537 | { | 3533 | { |
| @@ -3546,7 +3542,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, | |||
| 3546 | return mpol_misplaced(page, vma, addr); | 3542 | return mpol_misplaced(page, vma, addr); |
| 3547 | } | 3543 | } |
| 3548 | 3544 | ||
| 3549 | int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 3545 | static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3550 | unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) | 3546 | unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) |
| 3551 | { | 3547 | { |
| 3552 | struct page *page = NULL; | 3548 | struct page *page = NULL; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4755c8576942..e3ab02822799 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1899,7 +1899,7 @@ int node_random(const nodemask_t *maskp) | |||
| 1899 | * If the effective policy is 'BIND, returns a pointer to the mempolicy's | 1899 | * If the effective policy is 'BIND, returns a pointer to the mempolicy's |
| 1900 | * @nodemask for filtering the zonelist. | 1900 | * @nodemask for filtering the zonelist. |
| 1901 | * | 1901 | * |
| 1902 | * Must be protected by get_mems_allowed() | 1902 | * Must be protected by read_mems_allowed_begin() |
| 1903 | */ | 1903 | */ |
| 1904 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | 1904 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, |
| 1905 | gfp_t gfp_flags, struct mempolicy **mpol, | 1905 | gfp_t gfp_flags, struct mempolicy **mpol, |
| @@ -2063,7 +2063,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
| 2063 | 2063 | ||
| 2064 | retry_cpuset: | 2064 | retry_cpuset: |
| 2065 | pol = get_vma_policy(current, vma, addr); | 2065 | pol = get_vma_policy(current, vma, addr); |
| 2066 | cpuset_mems_cookie = get_mems_allowed(); | 2066 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 2067 | 2067 | ||
| 2068 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { | 2068 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { |
| 2069 | unsigned nid; | 2069 | unsigned nid; |
| @@ -2071,7 +2071,7 @@ retry_cpuset: | |||
| 2071 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); | 2071 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); |
| 2072 | mpol_cond_put(pol); | 2072 | mpol_cond_put(pol); |
| 2073 | page = alloc_page_interleave(gfp, order, nid); | 2073 | page = alloc_page_interleave(gfp, order, nid); |
| 2074 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2074 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 2075 | goto retry_cpuset; | 2075 | goto retry_cpuset; |
| 2076 | 2076 | ||
| 2077 | return page; | 2077 | return page; |
| @@ -2081,7 +2081,7 @@ retry_cpuset: | |||
| 2081 | policy_nodemask(gfp, pol)); | 2081 | policy_nodemask(gfp, pol)); |
| 2082 | if (unlikely(mpol_needs_cond_ref(pol))) | 2082 | if (unlikely(mpol_needs_cond_ref(pol))) |
| 2083 | __mpol_put(pol); | 2083 | __mpol_put(pol); |
| 2084 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2084 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 2085 | goto retry_cpuset; | 2085 | goto retry_cpuset; |
| 2086 | return page; | 2086 | return page; |
| 2087 | } | 2087 | } |
| @@ -2115,7 +2115,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
| 2115 | pol = &default_policy; | 2115 | pol = &default_policy; |
| 2116 | 2116 | ||
| 2117 | retry_cpuset: | 2117 | retry_cpuset: |
| 2118 | cpuset_mems_cookie = get_mems_allowed(); | 2118 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 2119 | 2119 | ||
| 2120 | /* | 2120 | /* |
| 2121 | * No reference counting needed for current->mempolicy | 2121 | * No reference counting needed for current->mempolicy |
| @@ -2128,7 +2128,7 @@ retry_cpuset: | |||
| 2128 | policy_zonelist(gfp, pol, numa_node_id()), | 2128 | policy_zonelist(gfp, pol, numa_node_id()), |
| 2129 | policy_nodemask(gfp, pol)); | 2129 | policy_nodemask(gfp, pol)); |
| 2130 | 2130 | ||
| 2131 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2131 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 2132 | goto retry_cpuset; | 2132 | goto retry_cpuset; |
| 2133 | 2133 | ||
| 2134 | return page; | 2134 | return page; |
diff --git a/mm/mincore.c b/mm/mincore.c index 101623378fbf..725c80961048 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
| @@ -70,13 +70,21 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) | |||
| 70 | * any other file mapping (ie. marked !present and faulted in with | 70 | * any other file mapping (ie. marked !present and faulted in with |
| 71 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. | 71 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. |
| 72 | */ | 72 | */ |
| 73 | page = find_get_page(mapping, pgoff); | ||
| 74 | #ifdef CONFIG_SWAP | 73 | #ifdef CONFIG_SWAP |
| 75 | /* shmem/tmpfs may return swap: account for swapcache page too. */ | 74 | if (shmem_mapping(mapping)) { |
| 76 | if (radix_tree_exceptional_entry(page)) { | 75 | page = find_get_entry(mapping, pgoff); |
| 77 | swp_entry_t swap = radix_to_swp_entry(page); | 76 | /* |
| 78 | page = find_get_page(swap_address_space(swap), swap.val); | 77 | * shmem/tmpfs may return swap: account for swapcache |
| 79 | } | 78 | * page too. |
| 79 | */ | ||
| 80 | if (radix_tree_exceptional_entry(page)) { | ||
| 81 | swp_entry_t swp = radix_to_swp_entry(page); | ||
| 82 | page = find_get_page(swap_address_space(swp), swp.val); | ||
| 83 | } | ||
| 84 | } else | ||
| 85 | page = find_get_page(mapping, pgoff); | ||
| 86 | #else | ||
| 87 | page = find_get_page(mapping, pgoff); | ||
| 80 | #endif | 88 | #endif |
| 81 | if (page) { | 89 | if (page) { |
| 82 | present = PageUptodate(page); | 90 | present = PageUptodate(page); |
| @@ -405,7 +405,7 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) | |||
| 405 | } | 405 | } |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | void validate_mm(struct mm_struct *mm) | 408 | static void validate_mm(struct mm_struct *mm) |
| 409 | { | 409 | { |
| 410 | int bug = 0; | 410 | int bug = 0; |
| 411 | int i = 0; | 411 | int i = 0; |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index f73f2987a852..04a9d94333a5 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
| @@ -334,7 +334,7 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | |||
| 334 | return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); | 334 | return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | 337 | static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, |
| 338 | unsigned long align, unsigned long goal, | 338 | unsigned long align, unsigned long goal, |
| 339 | unsigned long limit) | 339 | unsigned long limit) |
| 340 | { | 340 | { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3bac76ae4b30..979378deccbf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -2739,7 +2739,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
| 2739 | return NULL; | 2739 | return NULL; |
| 2740 | 2740 | ||
| 2741 | retry_cpuset: | 2741 | retry_cpuset: |
| 2742 | cpuset_mems_cookie = get_mems_allowed(); | 2742 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 2743 | 2743 | ||
| 2744 | /* The preferred zone is used for statistics later */ | 2744 | /* The preferred zone is used for statistics later */ |
| 2745 | first_zones_zonelist(zonelist, high_zoneidx, | 2745 | first_zones_zonelist(zonelist, high_zoneidx, |
| @@ -2777,7 +2777,7 @@ out: | |||
| 2777 | * the mask is being updated. If a page allocation is about to fail, | 2777 | * the mask is being updated. If a page allocation is about to fail, |
| 2778 | * check if the cpuset changed during allocation and if so, retry. | 2778 | * check if the cpuset changed during allocation and if so, retry. |
| 2779 | */ | 2779 | */ |
| 2780 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) | 2780 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 2781 | goto retry_cpuset; | 2781 | goto retry_cpuset; |
| 2782 | 2782 | ||
| 2783 | memcg_kmem_commit_charge(page, memcg, order); | 2783 | memcg_kmem_commit_charge(page, memcg, order); |
| @@ -3045,9 +3045,9 @@ bool skip_free_areas_node(unsigned int flags, int nid) | |||
| 3045 | goto out; | 3045 | goto out; |
| 3046 | 3046 | ||
| 3047 | do { | 3047 | do { |
| 3048 | cpuset_mems_cookie = get_mems_allowed(); | 3048 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 3049 | ret = !node_isset(nid, cpuset_current_mems_allowed); | 3049 | ret = !node_isset(nid, cpuset_current_mems_allowed); |
| 3050 | } while (!put_mems_allowed(cpuset_mems_cookie)); | 3050 | } while (read_mems_allowed_retry(cpuset_mems_cookie)); |
| 3051 | out: | 3051 | out: |
| 3052 | return ret; | 3052 | return ret; |
| 3053 | } | 3053 | } |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index cfd162882c00..3708264d2833 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
| @@ -175,7 +175,7 @@ static void free_page_cgroup(void *addr) | |||
| 175 | } | 175 | } |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | void __free_page_cgroup(unsigned long pfn) | 178 | static void __free_page_cgroup(unsigned long pfn) |
| 179 | { | 179 | { |
| 180 | struct mem_section *ms; | 180 | struct mem_section *ms; |
| 181 | struct page_cgroup *base; | 181 | struct page_cgroup *base; |
| @@ -188,9 +188,9 @@ void __free_page_cgroup(unsigned long pfn) | |||
| 188 | ms->page_cgroup = NULL; | 188 | ms->page_cgroup = NULL; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | int __meminit online_page_cgroup(unsigned long start_pfn, | 191 | static int __meminit online_page_cgroup(unsigned long start_pfn, |
| 192 | unsigned long nr_pages, | 192 | unsigned long nr_pages, |
| 193 | int nid) | 193 | int nid) |
| 194 | { | 194 | { |
| 195 | unsigned long start, end, pfn; | 195 | unsigned long start, end, pfn; |
| 196 | int fail = 0; | 196 | int fail = 0; |
| @@ -223,8 +223,8 @@ int __meminit online_page_cgroup(unsigned long start_pfn, | |||
| 223 | return -ENOMEM; | 223 | return -ENOMEM; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | int __meminit offline_page_cgroup(unsigned long start_pfn, | 226 | static int __meminit offline_page_cgroup(unsigned long start_pfn, |
| 227 | unsigned long nr_pages, int nid) | 227 | unsigned long nr_pages, int nid) |
| 228 | { | 228 | { |
| 229 | unsigned long start, end, pfn; | 229 | unsigned long start, end, pfn; |
| 230 | 230 | ||
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 3c5cf68566ec..cb79065c19e5 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
| @@ -412,7 +412,7 @@ SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, | |||
| 412 | 412 | ||
| 413 | #ifdef CONFIG_COMPAT | 413 | #ifdef CONFIG_COMPAT |
| 414 | 414 | ||
| 415 | asmlinkage ssize_t | 415 | static ssize_t |
| 416 | compat_process_vm_rw(compat_pid_t pid, | 416 | compat_process_vm_rw(compat_pid_t pid, |
| 417 | const struct compat_iovec __user *lvec, | 417 | const struct compat_iovec __user *lvec, |
| 418 | unsigned long liovcnt, | 418 | unsigned long liovcnt, |
diff --git a/mm/readahead.c b/mm/readahead.c index 0de2360d65f3..29c5e1af5a0c 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
| @@ -179,7 +179,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
| 179 | rcu_read_lock(); | 179 | rcu_read_lock(); |
| 180 | page = radix_tree_lookup(&mapping->page_tree, page_offset); | 180 | page = radix_tree_lookup(&mapping->page_tree, page_offset); |
| 181 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
| 182 | if (page) | 182 | if (page && !radix_tree_exceptional_entry(page)) |
| 183 | continue; | 183 | continue; |
| 184 | 184 | ||
| 185 | page = page_cache_alloc_readahead(mapping); | 185 | page = page_cache_alloc_readahead(mapping); |
| @@ -233,14 +233,14 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
| 233 | return 0; | 233 | return 0; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | #define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE) | ||
| 236 | /* | 237 | /* |
| 237 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | 238 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a |
| 238 | * sensible upper limit. | 239 | * sensible upper limit. |
| 239 | */ | 240 | */ |
| 240 | unsigned long max_sane_readahead(unsigned long nr) | 241 | unsigned long max_sane_readahead(unsigned long nr) |
| 241 | { | 242 | { |
| 242 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE) | 243 | return min(nr, MAX_READAHEAD); |
| 243 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); | ||
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | /* | 246 | /* |
| @@ -347,7 +347,7 @@ static pgoff_t count_history_pages(struct address_space *mapping, | |||
| 347 | pgoff_t head; | 347 | pgoff_t head; |
| 348 | 348 | ||
| 349 | rcu_read_lock(); | 349 | rcu_read_lock(); |
| 350 | head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max); | 350 | head = page_cache_prev_hole(mapping, offset - 1, max); |
| 351 | rcu_read_unlock(); | 351 | rcu_read_unlock(); |
| 352 | 352 | ||
| 353 | return offset - 1 - head; | 353 | return offset - 1 - head; |
| @@ -427,7 +427,7 @@ ondemand_readahead(struct address_space *mapping, | |||
| 427 | pgoff_t start; | 427 | pgoff_t start; |
| 428 | 428 | ||
| 429 | rcu_read_lock(); | 429 | rcu_read_lock(); |
| 430 | start = radix_tree_next_hole(&mapping->page_tree, offset+1,max); | 430 | start = page_cache_next_hole(mapping, offset + 1, max); |
| 431 | rcu_read_unlock(); | 431 | rcu_read_unlock(); |
| 432 | 432 | ||
| 433 | if (!start || start - offset > max) | 433 | if (!start || start - offset > max) |
diff --git a/mm/shmem.c b/mm/shmem.c index 1f18c9d0d93e..a3ba988ec946 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -242,19 +242,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping, | |||
| 242 | pgoff_t index, void *expected, void *replacement) | 242 | pgoff_t index, void *expected, void *replacement) |
| 243 | { | 243 | { |
| 244 | void **pslot; | 244 | void **pslot; |
| 245 | void *item = NULL; | 245 | void *item; |
| 246 | 246 | ||
| 247 | VM_BUG_ON(!expected); | 247 | VM_BUG_ON(!expected); |
| 248 | VM_BUG_ON(!replacement); | ||
| 248 | pslot = radix_tree_lookup_slot(&mapping->page_tree, index); | 249 | pslot = radix_tree_lookup_slot(&mapping->page_tree, index); |
| 249 | if (pslot) | 250 | if (!pslot) |
| 250 | item = radix_tree_deref_slot_protected(pslot, | 251 | return -ENOENT; |
| 251 | &mapping->tree_lock); | 252 | item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); |
| 252 | if (item != expected) | 253 | if (item != expected) |
| 253 | return -ENOENT; | 254 | return -ENOENT; |
| 254 | if (replacement) | 255 | radix_tree_replace_slot(pslot, replacement); |
| 255 | radix_tree_replace_slot(pslot, replacement); | ||
| 256 | else | ||
| 257 | radix_tree_delete(&mapping->page_tree, index); | ||
| 258 | return 0; | 256 | return 0; |
| 259 | } | 257 | } |
| 260 | 258 | ||
| @@ -331,84 +329,20 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) | |||
| 331 | } | 329 | } |
| 332 | 330 | ||
| 333 | /* | 331 | /* |
| 334 | * Like find_get_pages, but collecting swap entries as well as pages. | ||
| 335 | */ | ||
| 336 | static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, | ||
| 337 | pgoff_t start, unsigned int nr_pages, | ||
| 338 | struct page **pages, pgoff_t *indices) | ||
| 339 | { | ||
| 340 | void **slot; | ||
| 341 | unsigned int ret = 0; | ||
| 342 | struct radix_tree_iter iter; | ||
| 343 | |||
| 344 | if (!nr_pages) | ||
| 345 | return 0; | ||
| 346 | |||
| 347 | rcu_read_lock(); | ||
| 348 | restart: | ||
| 349 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | ||
| 350 | struct page *page; | ||
| 351 | repeat: | ||
| 352 | page = radix_tree_deref_slot(slot); | ||
| 353 | if (unlikely(!page)) | ||
| 354 | continue; | ||
| 355 | if (radix_tree_exception(page)) { | ||
| 356 | if (radix_tree_deref_retry(page)) | ||
| 357 | goto restart; | ||
| 358 | /* | ||
| 359 | * Otherwise, we must be storing a swap entry | ||
| 360 | * here as an exceptional entry: so return it | ||
| 361 | * without attempting to raise page count. | ||
| 362 | */ | ||
| 363 | goto export; | ||
| 364 | } | ||
| 365 | if (!page_cache_get_speculative(page)) | ||
| 366 | goto repeat; | ||
| 367 | |||
| 368 | /* Has the page moved? */ | ||
| 369 | if (unlikely(page != *slot)) { | ||
| 370 | page_cache_release(page); | ||
| 371 | goto repeat; | ||
| 372 | } | ||
| 373 | export: | ||
| 374 | indices[ret] = iter.index; | ||
| 375 | pages[ret] = page; | ||
| 376 | if (++ret == nr_pages) | ||
| 377 | break; | ||
| 378 | } | ||
| 379 | rcu_read_unlock(); | ||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Remove swap entry from radix tree, free the swap and its page cache. | 332 | * Remove swap entry from radix tree, free the swap and its page cache. |
| 385 | */ | 333 | */ |
| 386 | static int shmem_free_swap(struct address_space *mapping, | 334 | static int shmem_free_swap(struct address_space *mapping, |
| 387 | pgoff_t index, void *radswap) | 335 | pgoff_t index, void *radswap) |
| 388 | { | 336 | { |
| 389 | int error; | 337 | void *old; |
| 390 | 338 | ||
| 391 | spin_lock_irq(&mapping->tree_lock); | 339 | spin_lock_irq(&mapping->tree_lock); |
| 392 | error = shmem_radix_tree_replace(mapping, index, radswap, NULL); | 340 | old = radix_tree_delete_item(&mapping->page_tree, index, radswap); |
| 393 | spin_unlock_irq(&mapping->tree_lock); | 341 | spin_unlock_irq(&mapping->tree_lock); |
| 394 | if (!error) | 342 | if (old != radswap) |
| 395 | free_swap_and_cache(radix_to_swp_entry(radswap)); | 343 | return -ENOENT; |
| 396 | return error; | 344 | free_swap_and_cache(radix_to_swp_entry(radswap)); |
| 397 | } | 345 | return 0; |
| 398 | |||
| 399 | /* | ||
| 400 | * Pagevec may contain swap entries, so shuffle up pages before releasing. | ||
| 401 | */ | ||
| 402 | static void shmem_deswap_pagevec(struct pagevec *pvec) | ||
| 403 | { | ||
| 404 | int i, j; | ||
| 405 | |||
| 406 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { | ||
| 407 | struct page *page = pvec->pages[i]; | ||
| 408 | if (!radix_tree_exceptional_entry(page)) | ||
| 409 | pvec->pages[j++] = page; | ||
| 410 | } | ||
| 411 | pvec->nr = j; | ||
| 412 | } | 346 | } |
| 413 | 347 | ||
| 414 | /* | 348 | /* |
| @@ -429,12 +363,12 @@ void shmem_unlock_mapping(struct address_space *mapping) | |||
| 429 | * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it | 363 | * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it |
| 430 | * has finished, if it hits a row of PAGEVEC_SIZE swap entries. | 364 | * has finished, if it hits a row of PAGEVEC_SIZE swap entries. |
| 431 | */ | 365 | */ |
| 432 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, | 366 | pvec.nr = find_get_entries(mapping, index, |
| 433 | PAGEVEC_SIZE, pvec.pages, indices); | 367 | PAGEVEC_SIZE, pvec.pages, indices); |
| 434 | if (!pvec.nr) | 368 | if (!pvec.nr) |
| 435 | break; | 369 | break; |
| 436 | index = indices[pvec.nr - 1] + 1; | 370 | index = indices[pvec.nr - 1] + 1; |
| 437 | shmem_deswap_pagevec(&pvec); | 371 | pagevec_remove_exceptionals(&pvec); |
| 438 | check_move_unevictable_pages(pvec.pages, pvec.nr); | 372 | check_move_unevictable_pages(pvec.pages, pvec.nr); |
| 439 | pagevec_release(&pvec); | 373 | pagevec_release(&pvec); |
| 440 | cond_resched(); | 374 | cond_resched(); |
| @@ -466,9 +400,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
| 466 | pagevec_init(&pvec, 0); | 400 | pagevec_init(&pvec, 0); |
| 467 | index = start; | 401 | index = start; |
| 468 | while (index < end) { | 402 | while (index < end) { |
| 469 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, | 403 | pvec.nr = find_get_entries(mapping, index, |
| 470 | min(end - index, (pgoff_t)PAGEVEC_SIZE), | 404 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 471 | pvec.pages, indices); | 405 | pvec.pages, indices); |
| 472 | if (!pvec.nr) | 406 | if (!pvec.nr) |
| 473 | break; | 407 | break; |
| 474 | mem_cgroup_uncharge_start(); | 408 | mem_cgroup_uncharge_start(); |
| @@ -497,7 +431,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
| 497 | } | 431 | } |
| 498 | unlock_page(page); | 432 | unlock_page(page); |
| 499 | } | 433 | } |
| 500 | shmem_deswap_pagevec(&pvec); | 434 | pagevec_remove_exceptionals(&pvec); |
| 501 | pagevec_release(&pvec); | 435 | pagevec_release(&pvec); |
| 502 | mem_cgroup_uncharge_end(); | 436 | mem_cgroup_uncharge_end(); |
| 503 | cond_resched(); | 437 | cond_resched(); |
| @@ -535,9 +469,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
| 535 | index = start; | 469 | index = start; |
| 536 | for ( ; ; ) { | 470 | for ( ; ; ) { |
| 537 | cond_resched(); | 471 | cond_resched(); |
| 538 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, | 472 | |
| 473 | pvec.nr = find_get_entries(mapping, index, | ||
| 539 | min(end - index, (pgoff_t)PAGEVEC_SIZE), | 474 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 540 | pvec.pages, indices); | 475 | pvec.pages, indices); |
| 541 | if (!pvec.nr) { | 476 | if (!pvec.nr) { |
| 542 | if (index == start || unfalloc) | 477 | if (index == start || unfalloc) |
| 543 | break; | 478 | break; |
| @@ -545,7 +480,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
| 545 | continue; | 480 | continue; |
| 546 | } | 481 | } |
| 547 | if ((index == start || unfalloc) && indices[0] >= end) { | 482 | if ((index == start || unfalloc) && indices[0] >= end) { |
| 548 | shmem_deswap_pagevec(&pvec); | 483 | pagevec_remove_exceptionals(&pvec); |
| 549 | pagevec_release(&pvec); | 484 | pagevec_release(&pvec); |
| 550 | break; | 485 | break; |
| 551 | } | 486 | } |
| @@ -574,7 +509,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
| 574 | } | 509 | } |
| 575 | unlock_page(page); | 510 | unlock_page(page); |
| 576 | } | 511 | } |
| 577 | shmem_deswap_pagevec(&pvec); | 512 | pagevec_remove_exceptionals(&pvec); |
| 578 | pagevec_release(&pvec); | 513 | pagevec_release(&pvec); |
| 579 | mem_cgroup_uncharge_end(); | 514 | mem_cgroup_uncharge_end(); |
| 580 | index++; | 515 | index++; |
| @@ -1080,7 +1015,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, | |||
| 1080 | return -EFBIG; | 1015 | return -EFBIG; |
| 1081 | repeat: | 1016 | repeat: |
| 1082 | swap.val = 0; | 1017 | swap.val = 0; |
| 1083 | page = find_lock_page(mapping, index); | 1018 | page = find_lock_entry(mapping, index); |
| 1084 | if (radix_tree_exceptional_entry(page)) { | 1019 | if (radix_tree_exceptional_entry(page)) { |
| 1085 | swap = radix_to_swp_entry(page); | 1020 | swap = radix_to_swp_entry(page); |
| 1086 | page = NULL; | 1021 | page = NULL; |
| @@ -1417,6 +1352,11 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode | |||
| 1417 | return inode; | 1352 | return inode; |
| 1418 | } | 1353 | } |
| 1419 | 1354 | ||
| 1355 | bool shmem_mapping(struct address_space *mapping) | ||
| 1356 | { | ||
| 1357 | return mapping->backing_dev_info == &shmem_backing_dev_info; | ||
| 1358 | } | ||
| 1359 | |||
| 1420 | #ifdef CONFIG_TMPFS | 1360 | #ifdef CONFIG_TMPFS |
| 1421 | static const struct inode_operations shmem_symlink_inode_operations; | 1361 | static const struct inode_operations shmem_symlink_inode_operations; |
| 1422 | static const struct inode_operations shmem_short_symlink_operations; | 1362 | static const struct inode_operations shmem_short_symlink_operations; |
| @@ -1729,7 +1669,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping, | |||
| 1729 | pagevec_init(&pvec, 0); | 1669 | pagevec_init(&pvec, 0); |
| 1730 | pvec.nr = 1; /* start small: we may be there already */ | 1670 | pvec.nr = 1; /* start small: we may be there already */ |
| 1731 | while (!done) { | 1671 | while (!done) { |
| 1732 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, | 1672 | pvec.nr = find_get_entries(mapping, index, |
| 1733 | pvec.nr, pvec.pages, indices); | 1673 | pvec.nr, pvec.pages, indices); |
| 1734 | if (!pvec.nr) { | 1674 | if (!pvec.nr) { |
| 1735 | if (whence == SEEK_DATA) | 1675 | if (whence == SEEK_DATA) |
| @@ -1756,7 +1696,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping, | |||
| 1756 | break; | 1696 | break; |
| 1757 | } | 1697 | } |
| 1758 | } | 1698 | } |
| 1759 | shmem_deswap_pagevec(&pvec); | 1699 | pagevec_remove_exceptionals(&pvec); |
| 1760 | pagevec_release(&pvec); | 1700 | pagevec_release(&pvec); |
| 1761 | pvec.nr = PAGEVEC_SIZE; | 1701 | pvec.nr = PAGEVEC_SIZE; |
| 1762 | cond_resched(); | 1702 | cond_resched(); |
| @@ -3073,7 +3073,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
| 3073 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | 3073 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
| 3074 | 3074 | ||
| 3075 | retry_cpuset: | 3075 | retry_cpuset: |
| 3076 | cpuset_mems_cookie = get_mems_allowed(); | 3076 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 3077 | zonelist = node_zonelist(slab_node(), flags); | 3077 | zonelist = node_zonelist(slab_node(), flags); |
| 3078 | 3078 | ||
| 3079 | retry: | 3079 | retry: |
| @@ -3131,7 +3131,7 @@ retry: | |||
| 3131 | } | 3131 | } |
| 3132 | } | 3132 | } |
| 3133 | 3133 | ||
| 3134 | if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj)) | 3134 | if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) |
| 3135 | goto retry_cpuset; | 3135 | goto retry_cpuset; |
| 3136 | return obj; | 3136 | return obj; |
| 3137 | } | 3137 | } |
| @@ -1684,7 +1684,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, | |||
| 1684 | return NULL; | 1684 | return NULL; |
| 1685 | 1685 | ||
| 1686 | do { | 1686 | do { |
| 1687 | cpuset_mems_cookie = get_mems_allowed(); | 1687 | cpuset_mems_cookie = read_mems_allowed_begin(); |
| 1688 | zonelist = node_zonelist(slab_node(), flags); | 1688 | zonelist = node_zonelist(slab_node(), flags); |
| 1689 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | 1689 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
| 1690 | struct kmem_cache_node *n; | 1690 | struct kmem_cache_node *n; |
| @@ -1696,19 +1696,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, | |||
| 1696 | object = get_partial_node(s, n, c, flags); | 1696 | object = get_partial_node(s, n, c, flags); |
| 1697 | if (object) { | 1697 | if (object) { |
| 1698 | /* | 1698 | /* |
| 1699 | * Return the object even if | 1699 | * Don't check read_mems_allowed_retry() |
| 1700 | * put_mems_allowed indicated that | 1700 | * here - if mems_allowed was updated in |
| 1701 | * the cpuset mems_allowed was | 1701 | * parallel, that was a harmless race |
| 1702 | * updated in parallel. It's a | 1702 | * between allocation and the cpuset |
| 1703 | * harmless race between the alloc | 1703 | * update |
| 1704 | * and the cpuset update. | ||
| 1705 | */ | 1704 | */ |
| 1706 | put_mems_allowed(cpuset_mems_cookie); | ||
| 1707 | return object; | 1705 | return object; |
| 1708 | } | 1706 | } |
| 1709 | } | 1707 | } |
| 1710 | } | 1708 | } |
| 1711 | } while (!put_mems_allowed(cpuset_mems_cookie)); | 1709 | } while (read_mems_allowed_retry(cpuset_mems_cookie)); |
| 1712 | #endif | 1710 | #endif |
| 1713 | return NULL; | 1711 | return NULL; |
| 1714 | } | 1712 | } |
| @@ -3239,8 +3237,9 @@ int __kmem_cache_shutdown(struct kmem_cache *s) | |||
| 3239 | 3237 | ||
| 3240 | if (!rc) { | 3238 | if (!rc) { |
| 3241 | /* | 3239 | /* |
| 3242 | * We do the same lock strategy around sysfs_slab_add, see | 3240 | * Since slab_attr_store may take the slab_mutex, we should |
| 3243 | * __kmem_cache_create. Because this is pretty much the last | 3241 | * release the lock while removing the sysfs entry in order to |
| 3242 | * avoid a deadlock. Because this is pretty much the last | ||
| 3244 | * operation we do and the lock will be released shortly after | 3243 | * operation we do and the lock will be released shortly after |
| 3245 | * that in slab_common.c, we could just move sysfs_slab_remove | 3244 | * that in slab_common.c, we could just move sysfs_slab_remove |
| 3246 | * to a later point in common code. We should do that when we | 3245 | * to a later point in common code. We should do that when we |
| @@ -3780,10 +3779,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) | |||
| 3780 | return 0; | 3779 | return 0; |
| 3781 | 3780 | ||
| 3782 | memcg_propagate_slab_attrs(s); | 3781 | memcg_propagate_slab_attrs(s); |
| 3783 | mutex_unlock(&slab_mutex); | ||
| 3784 | err = sysfs_slab_add(s); | 3782 | err = sysfs_slab_add(s); |
| 3785 | mutex_lock(&slab_mutex); | ||
| 3786 | |||
| 3787 | if (err) | 3783 | if (err) |
| 3788 | kmem_cache_close(s); | 3784 | kmem_cache_close(s); |
| 3789 | 3785 | ||
| @@ -574,6 +574,8 @@ void mark_page_accessed(struct page *page) | |||
| 574 | else | 574 | else |
| 575 | __lru_cache_activate_page(page); | 575 | __lru_cache_activate_page(page); |
| 576 | ClearPageReferenced(page); | 576 | ClearPageReferenced(page); |
| 577 | if (page_is_file_cache(page)) | ||
| 578 | workingset_activation(page); | ||
| 577 | } else if (!PageReferenced(page)) { | 579 | } else if (!PageReferenced(page)) { |
| 578 | SetPageReferenced(page); | 580 | SetPageReferenced(page); |
| 579 | } | 581 | } |
| @@ -948,6 +950,57 @@ void __pagevec_lru_add(struct pagevec *pvec) | |||
| 948 | EXPORT_SYMBOL(__pagevec_lru_add); | 950 | EXPORT_SYMBOL(__pagevec_lru_add); |
| 949 | 951 | ||
| 950 | /** | 952 | /** |
| 953 | * pagevec_lookup_entries - gang pagecache lookup | ||
| 954 | * @pvec: Where the resulting entries are placed | ||
| 955 | * @mapping: The address_space to search | ||
| 956 | * @start: The starting entry index | ||
| 957 | * @nr_entries: The maximum number of entries | ||
| 958 | * @indices: The cache indices corresponding to the entries in @pvec | ||
| 959 | * | ||
| 960 | * pagevec_lookup_entries() will search for and return a group of up | ||
| 961 | * to @nr_entries pages and shadow entries in the mapping. All | ||
| 962 | * entries are placed in @pvec. pagevec_lookup_entries() takes a | ||
| 963 | * reference against actual pages in @pvec. | ||
| 964 | * | ||
| 965 | * The search returns a group of mapping-contiguous entries with | ||
| 966 | * ascending indexes. There may be holes in the indices due to | ||
| 967 | * not-present entries. | ||
| 968 | * | ||
| 969 | * pagevec_lookup_entries() returns the number of entries which were | ||
| 970 | * found. | ||
| 971 | */ | ||
| 972 | unsigned pagevec_lookup_entries(struct pagevec *pvec, | ||
| 973 | struct address_space *mapping, | ||
| 974 | pgoff_t start, unsigned nr_pages, | ||
| 975 | pgoff_t *indices) | ||
| 976 | { | ||
| 977 | pvec->nr = find_get_entries(mapping, start, nr_pages, | ||
| 978 | pvec->pages, indices); | ||
| 979 | return pagevec_count(pvec); | ||
| 980 | } | ||
| 981 | |||
| 982 | /** | ||
| 983 | * pagevec_remove_exceptionals - pagevec exceptionals pruning | ||
| 984 | * @pvec: The pagevec to prune | ||
| 985 | * | ||
| 986 | * pagevec_lookup_entries() fills both pages and exceptional radix | ||
| 987 | * tree entries into the pagevec. This function prunes all | ||
| 988 | * exceptionals from @pvec without leaving holes, so that it can be | ||
| 989 | * passed on to page-only pagevec operations. | ||
| 990 | */ | ||
| 991 | void pagevec_remove_exceptionals(struct pagevec *pvec) | ||
| 992 | { | ||
| 993 | int i, j; | ||
| 994 | |||
| 995 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { | ||
| 996 | struct page *page = pvec->pages[i]; | ||
| 997 | if (!radix_tree_exceptional_entry(page)) | ||
| 998 | pvec->pages[j++] = page; | ||
| 999 | } | ||
| 1000 | pvec->nr = j; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | /** | ||
| 951 | * pagevec_lookup - gang pagecache lookup | 1004 | * pagevec_lookup - gang pagecache lookup |
| 952 | * @pvec: Where the resulting pages are placed | 1005 | * @pvec: Where the resulting pages are placed |
| 953 | * @mapping: The address_space to search | 1006 | * @mapping: The address_space to search |
diff --git a/mm/truncate.c b/mm/truncate.c index 353b683afd6e..e5cc39ab0751 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
| @@ -22,6 +22,45 @@ | |||
| 22 | #include <linux/cleancache.h> | 22 | #include <linux/cleancache.h> |
| 23 | #include "internal.h" | 23 | #include "internal.h" |
| 24 | 24 | ||
| 25 | static void clear_exceptional_entry(struct address_space *mapping, | ||
| 26 | pgoff_t index, void *entry) | ||
| 27 | { | ||
| 28 | struct radix_tree_node *node; | ||
| 29 | void **slot; | ||
| 30 | |||
| 31 | /* Handled by shmem itself */ | ||
| 32 | if (shmem_mapping(mapping)) | ||
| 33 | return; | ||
| 34 | |||
| 35 | spin_lock_irq(&mapping->tree_lock); | ||
| 36 | /* | ||
| 37 | * Regular page slots are stabilized by the page lock even | ||
| 38 | * without the tree itself locked. These unlocked entries | ||
| 39 | * need verification under the tree lock. | ||
| 40 | */ | ||
| 41 | if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) | ||
| 42 | goto unlock; | ||
| 43 | if (*slot != entry) | ||
| 44 | goto unlock; | ||
| 45 | radix_tree_replace_slot(slot, NULL); | ||
| 46 | mapping->nrshadows--; | ||
| 47 | if (!node) | ||
| 48 | goto unlock; | ||
| 49 | workingset_node_shadows_dec(node); | ||
| 50 | /* | ||
| 51 | * Don't track node without shadow entries. | ||
| 52 | * | ||
| 53 | * Avoid acquiring the list_lru lock if already untracked. | ||
| 54 | * The list_empty() test is safe as node->private_list is | ||
| 55 | * protected by mapping->tree_lock. | ||
| 56 | */ | ||
| 57 | if (!workingset_node_shadows(node) && | ||
| 58 | !list_empty(&node->private_list)) | ||
| 59 | list_lru_del(&workingset_shadow_nodes, &node->private_list); | ||
| 60 | __radix_tree_delete_node(&mapping->page_tree, node); | ||
| 61 | unlock: | ||
| 62 | spin_unlock_irq(&mapping->tree_lock); | ||
| 63 | } | ||
| 25 | 64 | ||
| 26 | /** | 65 | /** |
| 27 | * do_invalidatepage - invalidate part or all of a page | 66 | * do_invalidatepage - invalidate part or all of a page |
| @@ -208,11 +247,12 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
| 208 | unsigned int partial_start; /* inclusive */ | 247 | unsigned int partial_start; /* inclusive */ |
| 209 | unsigned int partial_end; /* exclusive */ | 248 | unsigned int partial_end; /* exclusive */ |
| 210 | struct pagevec pvec; | 249 | struct pagevec pvec; |
| 250 | pgoff_t indices[PAGEVEC_SIZE]; | ||
| 211 | pgoff_t index; | 251 | pgoff_t index; |
| 212 | int i; | 252 | int i; |
| 213 | 253 | ||
| 214 | cleancache_invalidate_inode(mapping); | 254 | cleancache_invalidate_inode(mapping); |
| 215 | if (mapping->nrpages == 0) | 255 | if (mapping->nrpages == 0 && mapping->nrshadows == 0) |
| 216 | return; | 256 | return; |
| 217 | 257 | ||
| 218 | /* Offsets within partial pages */ | 258 | /* Offsets within partial pages */ |
| @@ -238,17 +278,23 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
| 238 | 278 | ||
| 239 | pagevec_init(&pvec, 0); | 279 | pagevec_init(&pvec, 0); |
| 240 | index = start; | 280 | index = start; |
| 241 | while (index < end && pagevec_lookup(&pvec, mapping, index, | 281 | while (index < end && pagevec_lookup_entries(&pvec, mapping, index, |
| 242 | min(end - index, (pgoff_t)PAGEVEC_SIZE))) { | 282 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 283 | indices)) { | ||
| 243 | mem_cgroup_uncharge_start(); | 284 | mem_cgroup_uncharge_start(); |
| 244 | for (i = 0; i < pagevec_count(&pvec); i++) { | 285 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 245 | struct page *page = pvec.pages[i]; | 286 | struct page *page = pvec.pages[i]; |
| 246 | 287 | ||
| 247 | /* We rely upon deletion not changing page->index */ | 288 | /* We rely upon deletion not changing page->index */ |
| 248 | index = page->index; | 289 | index = indices[i]; |
| 249 | if (index >= end) | 290 | if (index >= end) |
| 250 | break; | 291 | break; |
| 251 | 292 | ||
| 293 | if (radix_tree_exceptional_entry(page)) { | ||
| 294 | clear_exceptional_entry(mapping, index, page); | ||
| 295 | continue; | ||
| 296 | } | ||
| 297 | |||
| 252 | if (!trylock_page(page)) | 298 | if (!trylock_page(page)) |
| 253 | continue; | 299 | continue; |
| 254 | WARN_ON(page->index != index); | 300 | WARN_ON(page->index != index); |
| @@ -259,6 +305,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
| 259 | truncate_inode_page(mapping, page); | 305 | truncate_inode_page(mapping, page); |
| 260 | unlock_page(page); | 306 | unlock_page(page); |
| 261 | } | 307 | } |
| 308 | pagevec_remove_exceptionals(&pvec); | ||
| 262 | pagevec_release(&pvec); | 309 | pagevec_release(&pvec); |
| 263 | mem_cgroup_uncharge_end(); | 310 | mem_cgroup_uncharge_end(); |
| 264 | cond_resched(); | 311 | cond_resched(); |
| @@ -307,14 +354,16 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
| 307 | index = start; | 354 | index = start; |
| 308 | for ( ; ; ) { | 355 | for ( ; ; ) { |
| 309 | cond_resched(); | 356 | cond_resched(); |
| 310 | if (!pagevec_lookup(&pvec, mapping, index, | 357 | if (!pagevec_lookup_entries(&pvec, mapping, index, |
| 311 | min(end - index, (pgoff_t)PAGEVEC_SIZE))) { | 358 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 359 | indices)) { | ||
| 312 | if (index == start) | 360 | if (index == start) |
| 313 | break; | 361 | break; |
| 314 | index = start; | 362 | index = start; |
| 315 | continue; | 363 | continue; |
| 316 | } | 364 | } |
| 317 | if (index == start && pvec.pages[0]->index >= end) { | 365 | if (index == start && indices[0] >= end) { |
| 366 | pagevec_remove_exceptionals(&pvec); | ||
| 318 | pagevec_release(&pvec); | 367 | pagevec_release(&pvec); |
| 319 | break; | 368 | break; |
| 320 | } | 369 | } |
| @@ -323,16 +372,22 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
| 323 | struct page *page = pvec.pages[i]; | 372 | struct page *page = pvec.pages[i]; |
| 324 | 373 | ||
| 325 | /* We rely upon deletion not changing page->index */ | 374 | /* We rely upon deletion not changing page->index */ |
| 326 | index = page->index; | 375 | index = indices[i]; |
| 327 | if (index >= end) | 376 | if (index >= end) |
| 328 | break; | 377 | break; |
| 329 | 378 | ||
| 379 | if (radix_tree_exceptional_entry(page)) { | ||
| 380 | clear_exceptional_entry(mapping, index, page); | ||
| 381 | continue; | ||
| 382 | } | ||
| 383 | |||
| 330 | lock_page(page); | 384 | lock_page(page); |
| 331 | WARN_ON(page->index != index); | 385 | WARN_ON(page->index != index); |
| 332 | wait_on_page_writeback(page); | 386 | wait_on_page_writeback(page); |
| 333 | truncate_inode_page(mapping, page); | 387 | truncate_inode_page(mapping, page); |
| 334 | unlock_page(page); | 388 | unlock_page(page); |
| 335 | } | 389 | } |
| 390 | pagevec_remove_exceptionals(&pvec); | ||
| 336 | pagevec_release(&pvec); | 391 | pagevec_release(&pvec); |
| 337 | mem_cgroup_uncharge_end(); | 392 | mem_cgroup_uncharge_end(); |
| 338 | index++; | 393 | index++; |
| @@ -360,6 +415,53 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) | |||
| 360 | EXPORT_SYMBOL(truncate_inode_pages); | 415 | EXPORT_SYMBOL(truncate_inode_pages); |
| 361 | 416 | ||
| 362 | /** | 417 | /** |
| 418 | * truncate_inode_pages_final - truncate *all* pages before inode dies | ||
| 419 | * @mapping: mapping to truncate | ||
| 420 | * | ||
| 421 | * Called under (and serialized by) inode->i_mutex. | ||
| 422 | * | ||
| 423 | * Filesystems have to use this in the .evict_inode path to inform the | ||
| 424 | * VM that this is the final truncate and the inode is going away. | ||
| 425 | */ | ||
| 426 | void truncate_inode_pages_final(struct address_space *mapping) | ||
| 427 | { | ||
| 428 | unsigned long nrshadows; | ||
| 429 | unsigned long nrpages; | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Page reclaim can not participate in regular inode lifetime | ||
| 433 | * management (can't call iput()) and thus can race with the | ||
| 434 | * inode teardown. Tell it when the address space is exiting, | ||
| 435 | * so that it does not install eviction information after the | ||
| 436 | * final truncate has begun. | ||
| 437 | */ | ||
| 438 | mapping_set_exiting(mapping); | ||
| 439 | |||
| 440 | /* | ||
| 441 | * When reclaim installs eviction entries, it increases | ||
| 442 | * nrshadows first, then decreases nrpages. Make sure we see | ||
| 443 | * this in the right order or we might miss an entry. | ||
| 444 | */ | ||
| 445 | nrpages = mapping->nrpages; | ||
| 446 | smp_rmb(); | ||
| 447 | nrshadows = mapping->nrshadows; | ||
| 448 | |||
| 449 | if (nrpages || nrshadows) { | ||
| 450 | /* | ||
| 451 | * As truncation uses a lockless tree lookup, cycle | ||
| 452 | * the tree lock to make sure any ongoing tree | ||
| 453 | * modification that does not see AS_EXITING is | ||
| 454 | * completed before starting the final truncate. | ||
| 455 | */ | ||
| 456 | spin_lock_irq(&mapping->tree_lock); | ||
| 457 | spin_unlock_irq(&mapping->tree_lock); | ||
| 458 | |||
| 459 | truncate_inode_pages(mapping, 0); | ||
| 460 | } | ||
| 461 | } | ||
| 462 | EXPORT_SYMBOL(truncate_inode_pages_final); | ||
| 463 | |||
| 464 | /** | ||
| 363 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode | 465 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode |
| 364 | * @mapping: the address_space which holds the pages to invalidate | 466 | * @mapping: the address_space which holds the pages to invalidate |
| 365 | * @start: the offset 'from' which to invalidate | 467 | * @start: the offset 'from' which to invalidate |
| @@ -375,6 +477,7 @@ EXPORT_SYMBOL(truncate_inode_pages); | |||
| 375 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 477 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
| 376 | pgoff_t start, pgoff_t end) | 478 | pgoff_t start, pgoff_t end) |
| 377 | { | 479 | { |
| 480 | pgoff_t indices[PAGEVEC_SIZE]; | ||
| 378 | struct pagevec pvec; | 481 | struct pagevec pvec; |
| 379 | pgoff_t index = start; | 482 | pgoff_t index = start; |
| 380 | unsigned long ret; | 483 | unsigned long ret; |
| @@ -390,17 +493,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, | |||
| 390 | */ | 493 | */ |
| 391 | 494 | ||
| 392 | pagevec_init(&pvec, 0); | 495 | pagevec_init(&pvec, 0); |
| 393 | while (index <= end && pagevec_lookup(&pvec, mapping, index, | 496 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, |
| 394 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { | 497 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 498 | indices)) { | ||
| 395 | mem_cgroup_uncharge_start(); | 499 | mem_cgroup_uncharge_start(); |
| 396 | for (i = 0; i < pagevec_count(&pvec); i++) { | 500 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 397 | struct page *page = pvec.pages[i]; | 501 | struct page *page = pvec.pages[i]; |
| 398 | 502 | ||
| 399 | /* We rely upon deletion not changing page->index */ | 503 | /* We rely upon deletion not changing page->index */ |
| 400 | index = page->index; | 504 | index = indices[i]; |
| 401 | if (index > end) | 505 | if (index > end) |
| 402 | break; | 506 | break; |
| 403 | 507 | ||
| 508 | if (radix_tree_exceptional_entry(page)) { | ||
| 509 | clear_exceptional_entry(mapping, index, page); | ||
| 510 | continue; | ||
| 511 | } | ||
| 512 | |||
| 404 | if (!trylock_page(page)) | 513 | if (!trylock_page(page)) |
| 405 | continue; | 514 | continue; |
| 406 | WARN_ON(page->index != index); | 515 | WARN_ON(page->index != index); |
| @@ -414,6 +523,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, | |||
| 414 | deactivate_page(page); | 523 | deactivate_page(page); |
| 415 | count += ret; | 524 | count += ret; |
| 416 | } | 525 | } |
| 526 | pagevec_remove_exceptionals(&pvec); | ||
| 417 | pagevec_release(&pvec); | 527 | pagevec_release(&pvec); |
| 418 | mem_cgroup_uncharge_end(); | 528 | mem_cgroup_uncharge_end(); |
| 419 | cond_resched(); | 529 | cond_resched(); |
| @@ -444,7 +554,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) | |||
| 444 | goto failed; | 554 | goto failed; |
| 445 | 555 | ||
| 446 | BUG_ON(page_has_private(page)); | 556 | BUG_ON(page_has_private(page)); |
| 447 | __delete_from_page_cache(page); | 557 | __delete_from_page_cache(page, NULL); |
| 448 | spin_unlock_irq(&mapping->tree_lock); | 558 | spin_unlock_irq(&mapping->tree_lock); |
| 449 | mem_cgroup_uncharge_cache_page(page); | 559 | mem_cgroup_uncharge_cache_page(page); |
| 450 | 560 | ||
| @@ -481,6 +591,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page) | |||
| 481 | int invalidate_inode_pages2_range(struct address_space *mapping, | 591 | int invalidate_inode_pages2_range(struct address_space *mapping, |
| 482 | pgoff_t start, pgoff_t end) | 592 | pgoff_t start, pgoff_t end) |
| 483 | { | 593 | { |
| 594 | pgoff_t indices[PAGEVEC_SIZE]; | ||
| 484 | struct pagevec pvec; | 595 | struct pagevec pvec; |
| 485 | pgoff_t index; | 596 | pgoff_t index; |
| 486 | int i; | 597 | int i; |
| @@ -491,17 +602,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping, | |||
| 491 | cleancache_invalidate_inode(mapping); | 602 | cleancache_invalidate_inode(mapping); |
| 492 | pagevec_init(&pvec, 0); | 603 | pagevec_init(&pvec, 0); |
| 493 | index = start; | 604 | index = start; |
| 494 | while (index <= end && pagevec_lookup(&pvec, mapping, index, | 605 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, |
| 495 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { | 606 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 607 | indices)) { | ||
| 496 | mem_cgroup_uncharge_start(); | 608 | mem_cgroup_uncharge_start(); |
| 497 | for (i = 0; i < pagevec_count(&pvec); i++) { | 609 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 498 | struct page *page = pvec.pages[i]; | 610 | struct page *page = pvec.pages[i]; |
| 499 | 611 | ||
| 500 | /* We rely upon deletion not changing page->index */ | 612 | /* We rely upon deletion not changing page->index */ |
| 501 | index = page->index; | 613 | index = indices[i]; |
| 502 | if (index > end) | 614 | if (index > end) |
| 503 | break; | 615 | break; |
| 504 | 616 | ||
| 617 | if (radix_tree_exceptional_entry(page)) { | ||
| 618 | clear_exceptional_entry(mapping, index, page); | ||
| 619 | continue; | ||
| 620 | } | ||
| 621 | |||
| 505 | lock_page(page); | 622 | lock_page(page); |
| 506 | WARN_ON(page->index != index); | 623 | WARN_ON(page->index != index); |
| 507 | if (page->mapping != mapping) { | 624 | if (page->mapping != mapping) { |
| @@ -539,6 +656,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, | |||
| 539 | ret = ret2; | 656 | ret = ret2; |
| 540 | unlock_page(page); | 657 | unlock_page(page); |
| 541 | } | 658 | } |
| 659 | pagevec_remove_exceptionals(&pvec); | ||
| 542 | pagevec_release(&pvec); | 660 | pagevec_release(&pvec); |
| 543 | mem_cgroup_uncharge_end(); | 661 | mem_cgroup_uncharge_end(); |
| 544 | cond_resched(); | 662 | cond_resched(); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a9c74b409681..1f56a80a7c41 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -224,15 +224,15 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, | |||
| 224 | unsigned long freed = 0; | 224 | unsigned long freed = 0; |
| 225 | unsigned long long delta; | 225 | unsigned long long delta; |
| 226 | long total_scan; | 226 | long total_scan; |
| 227 | long max_pass; | 227 | long freeable; |
| 228 | long nr; | 228 | long nr; |
| 229 | long new_nr; | 229 | long new_nr; |
| 230 | int nid = shrinkctl->nid; | 230 | int nid = shrinkctl->nid; |
| 231 | long batch_size = shrinker->batch ? shrinker->batch | 231 | long batch_size = shrinker->batch ? shrinker->batch |
| 232 | : SHRINK_BATCH; | 232 | : SHRINK_BATCH; |
| 233 | 233 | ||
| 234 | max_pass = shrinker->count_objects(shrinker, shrinkctl); | 234 | freeable = shrinker->count_objects(shrinker, shrinkctl); |
| 235 | if (max_pass == 0) | 235 | if (freeable == 0) |
| 236 | return 0; | 236 | return 0; |
| 237 | 237 | ||
| 238 | /* | 238 | /* |
| @@ -244,14 +244,14 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, | |||
| 244 | 244 | ||
| 245 | total_scan = nr; | 245 | total_scan = nr; |
| 246 | delta = (4 * nr_pages_scanned) / shrinker->seeks; | 246 | delta = (4 * nr_pages_scanned) / shrinker->seeks; |
| 247 | delta *= max_pass; | 247 | delta *= freeable; |
| 248 | do_div(delta, lru_pages + 1); | 248 | do_div(delta, lru_pages + 1); |
| 249 | total_scan += delta; | 249 | total_scan += delta; |
| 250 | if (total_scan < 0) { | 250 | if (total_scan < 0) { |
| 251 | printk(KERN_ERR | 251 | printk(KERN_ERR |
| 252 | "shrink_slab: %pF negative objects to delete nr=%ld\n", | 252 | "shrink_slab: %pF negative objects to delete nr=%ld\n", |
| 253 | shrinker->scan_objects, total_scan); | 253 | shrinker->scan_objects, total_scan); |
| 254 | total_scan = max_pass; | 254 | total_scan = freeable; |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | /* | 257 | /* |
| @@ -260,26 +260,26 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, | |||
| 260 | * shrinkers to return -1 all the time. This results in a large | 260 | * shrinkers to return -1 all the time. This results in a large |
| 261 | * nr being built up so when a shrink that can do some work | 261 | * nr being built up so when a shrink that can do some work |
| 262 | * comes along it empties the entire cache due to nr >>> | 262 | * comes along it empties the entire cache due to nr >>> |
| 263 | * max_pass. This is bad for sustaining a working set in | 263 | * freeable. This is bad for sustaining a working set in |
| 264 | * memory. | 264 | * memory. |
| 265 | * | 265 | * |
| 266 | * Hence only allow the shrinker to scan the entire cache when | 266 | * Hence only allow the shrinker to scan the entire cache when |
| 267 | * a large delta change is calculated directly. | 267 | * a large delta change is calculated directly. |
| 268 | */ | 268 | */ |
| 269 | if (delta < max_pass / 4) | 269 | if (delta < freeable / 4) |
| 270 | total_scan = min(total_scan, max_pass / 2); | 270 | total_scan = min(total_scan, freeable / 2); |
| 271 | 271 | ||
| 272 | /* | 272 | /* |
| 273 | * Avoid risking looping forever due to too large nr value: | 273 | * Avoid risking looping forever due to too large nr value: |
| 274 | * never try to free more than twice the estimate number of | 274 | * never try to free more than twice the estimate number of |
| 275 | * freeable entries. | 275 | * freeable entries. |
| 276 | */ | 276 | */ |
| 277 | if (total_scan > max_pass * 2) | 277 | if (total_scan > freeable * 2) |
| 278 | total_scan = max_pass * 2; | 278 | total_scan = freeable * 2; |
| 279 | 279 | ||
| 280 | trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, | 280 | trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, |
| 281 | nr_pages_scanned, lru_pages, | 281 | nr_pages_scanned, lru_pages, |
| 282 | max_pass, delta, total_scan); | 282 | freeable, delta, total_scan); |
| 283 | 283 | ||
| 284 | /* | 284 | /* |
| 285 | * Normally, we should not scan less than batch_size objects in one | 285 | * Normally, we should not scan less than batch_size objects in one |
| @@ -292,12 +292,12 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, | |||
| 292 | * | 292 | * |
| 293 | * We detect the "tight on memory" situations by looking at the total | 293 | * We detect the "tight on memory" situations by looking at the total |
| 294 | * number of objects we want to scan (total_scan). If it is greater | 294 | * number of objects we want to scan (total_scan). If it is greater |
| 295 | * than the total number of objects on slab (max_pass), we must be | 295 | * than the total number of objects on slab (freeable), we must be |
| 296 | * scanning at high prio and therefore should try to reclaim as much as | 296 | * scanning at high prio and therefore should try to reclaim as much as |
| 297 | * possible. | 297 | * possible. |
| 298 | */ | 298 | */ |
| 299 | while (total_scan >= batch_size || | 299 | while (total_scan >= batch_size || |
| 300 | total_scan >= max_pass) { | 300 | total_scan >= freeable) { |
| 301 | unsigned long ret; | 301 | unsigned long ret; |
| 302 | unsigned long nr_to_scan = min(batch_size, total_scan); | 302 | unsigned long nr_to_scan = min(batch_size, total_scan); |
| 303 | 303 | ||
| @@ -523,7 +523,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 523 | * Same as remove_mapping, but if the page is removed from the mapping, it | 523 | * Same as remove_mapping, but if the page is removed from the mapping, it |
| 524 | * gets returned with a refcount of 0. | 524 | * gets returned with a refcount of 0. |
| 525 | */ | 525 | */ |
| 526 | static int __remove_mapping(struct address_space *mapping, struct page *page) | 526 | static int __remove_mapping(struct address_space *mapping, struct page *page, |
| 527 | bool reclaimed) | ||
| 527 | { | 528 | { |
| 528 | BUG_ON(!PageLocked(page)); | 529 | BUG_ON(!PageLocked(page)); |
| 529 | BUG_ON(mapping != page_mapping(page)); | 530 | BUG_ON(mapping != page_mapping(page)); |
| @@ -569,10 +570,23 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) | |||
| 569 | swapcache_free(swap, page); | 570 | swapcache_free(swap, page); |
| 570 | } else { | 571 | } else { |
| 571 | void (*freepage)(struct page *); | 572 | void (*freepage)(struct page *); |
| 573 | void *shadow = NULL; | ||
| 572 | 574 | ||
| 573 | freepage = mapping->a_ops->freepage; | 575 | freepage = mapping->a_ops->freepage; |
| 574 | 576 | /* | |
| 575 | __delete_from_page_cache(page); | 577 | * Remember a shadow entry for reclaimed file cache in |
| 578 | * order to detect refaults, thus thrashing, later on. | ||
| 579 | * | ||
| 580 | * But don't store shadows in an address space that is | ||
| 581 | * already exiting. This is not just an optizimation, | ||
| 582 | * inode reclaim needs to empty out the radix tree or | ||
| 583 | * the nodes are lost. Don't plant shadows behind its | ||
| 584 | * back. | ||
| 585 | */ | ||
| 586 | if (reclaimed && page_is_file_cache(page) && | ||
| 587 | !mapping_exiting(mapping)) | ||
| 588 | shadow = workingset_eviction(mapping, page); | ||
| 589 | __delete_from_page_cache(page, shadow); | ||
| 576 | spin_unlock_irq(&mapping->tree_lock); | 590 | spin_unlock_irq(&mapping->tree_lock); |
| 577 | mem_cgroup_uncharge_cache_page(page); | 591 | mem_cgroup_uncharge_cache_page(page); |
| 578 | 592 | ||
| @@ -595,7 +609,7 @@ cannot_free: | |||
| 595 | */ | 609 | */ |
| 596 | int remove_mapping(struct address_space *mapping, struct page *page) | 610 | int remove_mapping(struct address_space *mapping, struct page *page) |
| 597 | { | 611 | { |
| 598 | if (__remove_mapping(mapping, page)) { | 612 | if (__remove_mapping(mapping, page, false)) { |
| 599 | /* | 613 | /* |
| 600 | * Unfreezing the refcount with 1 rather than 2 effectively | 614 | * Unfreezing the refcount with 1 rather than 2 effectively |
| 601 | * drops the pagecache ref for us without requiring another | 615 | * drops the pagecache ref for us without requiring another |
| @@ -1065,7 +1079,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 1065 | } | 1079 | } |
| 1066 | } | 1080 | } |
| 1067 | 1081 | ||
| 1068 | if (!mapping || !__remove_mapping(mapping, page)) | 1082 | if (!mapping || !__remove_mapping(mapping, page, true)) |
| 1069 | goto keep_locked; | 1083 | goto keep_locked; |
| 1070 | 1084 | ||
| 1071 | /* | 1085 | /* |
| @@ -2297,7 +2311,12 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
| 2297 | struct zone *zone; | 2311 | struct zone *zone; |
| 2298 | unsigned long nr_soft_reclaimed; | 2312 | unsigned long nr_soft_reclaimed; |
| 2299 | unsigned long nr_soft_scanned; | 2313 | unsigned long nr_soft_scanned; |
| 2314 | unsigned long lru_pages = 0; | ||
| 2300 | bool aborted_reclaim = false; | 2315 | bool aborted_reclaim = false; |
| 2316 | struct reclaim_state *reclaim_state = current->reclaim_state; | ||
| 2317 | struct shrink_control shrink = { | ||
| 2318 | .gfp_mask = sc->gfp_mask, | ||
| 2319 | }; | ||
| 2301 | 2320 | ||
| 2302 | /* | 2321 | /* |
| 2303 | * If the number of buffer_heads in the machine exceeds the maximum | 2322 | * If the number of buffer_heads in the machine exceeds the maximum |
| @@ -2307,6 +2326,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
| 2307 | if (buffer_heads_over_limit) | 2326 | if (buffer_heads_over_limit) |
| 2308 | sc->gfp_mask |= __GFP_HIGHMEM; | 2327 | sc->gfp_mask |= __GFP_HIGHMEM; |
| 2309 | 2328 | ||
| 2329 | nodes_clear(shrink.nodes_to_scan); | ||
| 2330 | |||
| 2310 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 2331 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
| 2311 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 2332 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
| 2312 | if (!populated_zone(zone)) | 2333 | if (!populated_zone(zone)) |
| @@ -2318,6 +2339,10 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
| 2318 | if (global_reclaim(sc)) { | 2339 | if (global_reclaim(sc)) { |
| 2319 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 2340 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
| 2320 | continue; | 2341 | continue; |
| 2342 | |||
| 2343 | lru_pages += zone_reclaimable_pages(zone); | ||
| 2344 | node_set(zone_to_nid(zone), shrink.nodes_to_scan); | ||
| 2345 | |||
| 2321 | if (sc->priority != DEF_PRIORITY && | 2346 | if (sc->priority != DEF_PRIORITY && |
| 2322 | !zone_reclaimable(zone)) | 2347 | !zone_reclaimable(zone)) |
| 2323 | continue; /* Let kswapd poll it */ | 2348 | continue; /* Let kswapd poll it */ |
| @@ -2354,6 +2379,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
| 2354 | shrink_zone(zone, sc); | 2379 | shrink_zone(zone, sc); |
| 2355 | } | 2380 | } |
| 2356 | 2381 | ||
| 2382 | /* | ||
| 2383 | * Don't shrink slabs when reclaiming memory from over limit cgroups | ||
| 2384 | * but do shrink slab at least once when aborting reclaim for | ||
| 2385 | * compaction to avoid unevenly scanning file/anon LRU pages over slab | ||
| 2386 | * pages. | ||
| 2387 | */ | ||
| 2388 | if (global_reclaim(sc)) { | ||
| 2389 | shrink_slab(&shrink, sc->nr_scanned, lru_pages); | ||
| 2390 | if (reclaim_state) { | ||
| 2391 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; | ||
| 2392 | reclaim_state->reclaimed_slab = 0; | ||
| 2393 | } | ||
| 2394 | } | ||
| 2395 | |||
| 2357 | return aborted_reclaim; | 2396 | return aborted_reclaim; |
| 2358 | } | 2397 | } |
| 2359 | 2398 | ||
| @@ -2394,13 +2433,9 @@ static bool all_unreclaimable(struct zonelist *zonelist, | |||
| 2394 | * else, the number of pages reclaimed | 2433 | * else, the number of pages reclaimed |
| 2395 | */ | 2434 | */ |
| 2396 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | 2435 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
| 2397 | struct scan_control *sc, | 2436 | struct scan_control *sc) |
| 2398 | struct shrink_control *shrink) | ||
| 2399 | { | 2437 | { |
| 2400 | unsigned long total_scanned = 0; | 2438 | unsigned long total_scanned = 0; |
| 2401 | struct reclaim_state *reclaim_state = current->reclaim_state; | ||
| 2402 | struct zoneref *z; | ||
| 2403 | struct zone *zone; | ||
| 2404 | unsigned long writeback_threshold; | 2439 | unsigned long writeback_threshold; |
| 2405 | bool aborted_reclaim; | 2440 | bool aborted_reclaim; |
| 2406 | 2441 | ||
| @@ -2415,32 +2450,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
| 2415 | sc->nr_scanned = 0; | 2450 | sc->nr_scanned = 0; |
| 2416 | aborted_reclaim = shrink_zones(zonelist, sc); | 2451 | aborted_reclaim = shrink_zones(zonelist, sc); |
| 2417 | 2452 | ||
| 2418 | /* | ||
| 2419 | * Don't shrink slabs when reclaiming memory from over limit | ||
| 2420 | * cgroups but do shrink slab at least once when aborting | ||
| 2421 | * reclaim for compaction to avoid unevenly scanning file/anon | ||
| 2422 | * LRU pages over slab pages. | ||
| 2423 | */ | ||
| 2424 | if (global_reclaim(sc)) { | ||
| 2425 | unsigned long lru_pages = 0; | ||
| 2426 | |||
| 2427 | nodes_clear(shrink->nodes_to_scan); | ||
| 2428 | for_each_zone_zonelist(zone, z, zonelist, | ||
| 2429 | gfp_zone(sc->gfp_mask)) { | ||
| 2430 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | ||
| 2431 | continue; | ||
| 2432 | |||
| 2433 | lru_pages += zone_reclaimable_pages(zone); | ||
| 2434 | node_set(zone_to_nid(zone), | ||
| 2435 | shrink->nodes_to_scan); | ||
| 2436 | } | ||
| 2437 | |||
| 2438 | shrink_slab(shrink, sc->nr_scanned, lru_pages); | ||
| 2439 | if (reclaim_state) { | ||
| 2440 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; | ||
| 2441 | reclaim_state->reclaimed_slab = 0; | ||
| 2442 | } | ||
| 2443 | } | ||
| 2444 | total_scanned += sc->nr_scanned; | 2453 | total_scanned += sc->nr_scanned; |
| 2445 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) | 2454 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) |
| 2446 | goto out; | 2455 | goto out; |
| @@ -2602,9 +2611,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
| 2602 | .target_mem_cgroup = NULL, | 2611 | .target_mem_cgroup = NULL, |
| 2603 | .nodemask = nodemask, | 2612 | .nodemask = nodemask, |
| 2604 | }; | 2613 | }; |
| 2605 | struct shrink_control shrink = { | ||
| 2606 | .gfp_mask = sc.gfp_mask, | ||
| 2607 | }; | ||
| 2608 | 2614 | ||
| 2609 | /* | 2615 | /* |
| 2610 | * Do not enter reclaim if fatal signal was delivered while throttled. | 2616 | * Do not enter reclaim if fatal signal was delivered while throttled. |
| @@ -2618,7 +2624,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
| 2618 | sc.may_writepage, | 2624 | sc.may_writepage, |
| 2619 | gfp_mask); | 2625 | gfp_mask); |
| 2620 | 2626 | ||
| 2621 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | 2627 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
| 2622 | 2628 | ||
| 2623 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); | 2629 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); |
| 2624 | 2630 | ||
| @@ -2685,9 +2691,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
| 2685 | .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2691 | .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
| 2686 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), | 2692 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), |
| 2687 | }; | 2693 | }; |
| 2688 | struct shrink_control shrink = { | ||
| 2689 | .gfp_mask = sc.gfp_mask, | ||
| 2690 | }; | ||
| 2691 | 2694 | ||
| 2692 | /* | 2695 | /* |
| 2693 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't | 2696 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't |
| @@ -2702,7 +2705,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
| 2702 | sc.may_writepage, | 2705 | sc.may_writepage, |
| 2703 | sc.gfp_mask); | 2706 | sc.gfp_mask); |
| 2704 | 2707 | ||
| 2705 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | 2708 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
| 2706 | 2709 | ||
| 2707 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); | 2710 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
| 2708 | 2711 | ||
| @@ -3337,9 +3340,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) | |||
| 3337 | .order = 0, | 3340 | .order = 0, |
| 3338 | .priority = DEF_PRIORITY, | 3341 | .priority = DEF_PRIORITY, |
| 3339 | }; | 3342 | }; |
| 3340 | struct shrink_control shrink = { | ||
| 3341 | .gfp_mask = sc.gfp_mask, | ||
| 3342 | }; | ||
| 3343 | struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); | 3343 | struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); |
| 3344 | struct task_struct *p = current; | 3344 | struct task_struct *p = current; |
| 3345 | unsigned long nr_reclaimed; | 3345 | unsigned long nr_reclaimed; |
| @@ -3349,7 +3349,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) | |||
| 3349 | reclaim_state.reclaimed_slab = 0; | 3349 | reclaim_state.reclaimed_slab = 0; |
| 3350 | p->reclaim_state = &reclaim_state; | 3350 | p->reclaim_state = &reclaim_state; |
| 3351 | 3351 | ||
| 3352 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | 3352 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc); |
| 3353 | 3353 | ||
| 3354 | p->reclaim_state = NULL; | 3354 | p->reclaim_state = NULL; |
| 3355 | lockdep_clear_current_reclaim_state(); | 3355 | lockdep_clear_current_reclaim_state(); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index def5dd2fbe61..197b4c4a9587 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -770,6 +770,9 @@ const char * const vmstat_text[] = { | |||
| 770 | "numa_local", | 770 | "numa_local", |
| 771 | "numa_other", | 771 | "numa_other", |
| 772 | #endif | 772 | #endif |
| 773 | "workingset_refault", | ||
| 774 | "workingset_activate", | ||
| 775 | "workingset_nodereclaim", | ||
| 773 | "nr_anon_transparent_hugepages", | 776 | "nr_anon_transparent_hugepages", |
| 774 | "nr_free_cma", | 777 | "nr_free_cma", |
| 775 | "nr_dirty_threshold", | 778 | "nr_dirty_threshold", |
| @@ -810,6 +813,9 @@ const char * const vmstat_text[] = { | |||
| 810 | 813 | ||
| 811 | "pgrotated", | 814 | "pgrotated", |
| 812 | 815 | ||
| 816 | "drop_pagecache", | ||
| 817 | "drop_slab", | ||
| 818 | |||
| 813 | #ifdef CONFIG_NUMA_BALANCING | 819 | #ifdef CONFIG_NUMA_BALANCING |
| 814 | "numa_pte_updates", | 820 | "numa_pte_updates", |
| 815 | "numa_huge_pte_updates", | 821 | "numa_huge_pte_updates", |
diff --git a/mm/workingset.c b/mm/workingset.c new file mode 100644 index 000000000000..f7216fa7da27 --- /dev/null +++ b/mm/workingset.c | |||
| @@ -0,0 +1,414 @@ | |||
| 1 | /* | ||
| 2 | * Workingset detection | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/memcontrol.h> | ||
| 8 | #include <linux/writeback.h> | ||
| 9 | #include <linux/pagemap.h> | ||
| 10 | #include <linux/atomic.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/swap.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | #include <linux/mm.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Double CLOCK lists | ||
| 18 | * | ||
| 19 | * Per zone, two clock lists are maintained for file pages: the | ||
| 20 | * inactive and the active list. Freshly faulted pages start out at | ||
| 21 | * the head of the inactive list and page reclaim scans pages from the | ||
| 22 | * tail. Pages that are accessed multiple times on the inactive list | ||
| 23 | * are promoted to the active list, to protect them from reclaim, | ||
| 24 | * whereas active pages are demoted to the inactive list when the | ||
| 25 | * active list grows too big. | ||
| 26 | * | ||
| 27 | * fault ------------------------+ | ||
| 28 | * | | ||
| 29 | * +--------------+ | +-------------+ | ||
| 30 | * reclaim <- | inactive | <-+-- demotion | active | <--+ | ||
| 31 | * +--------------+ +-------------+ | | ||
| 32 | * | | | ||
| 33 | * +-------------- promotion ------------------+ | ||
| 34 | * | ||
| 35 | * | ||
| 36 | * Access frequency and refault distance | ||
| 37 | * | ||
| 38 | * A workload is thrashing when its pages are frequently used but they | ||
| 39 | * are evicted from the inactive list every time before another access | ||
| 40 | * would have promoted them to the active list. | ||
| 41 | * | ||
| 42 | * In cases where the average access distance between thrashing pages | ||
| 43 | * is bigger than the size of memory there is nothing that can be | ||
| 44 | * done - the thrashing set could never fit into memory under any | ||
| 45 | * circumstance. | ||
| 46 | * | ||
| 47 | * However, the average access distance could be bigger than the | ||
| 48 | * inactive list, yet smaller than the size of memory. In this case, | ||
| 49 | * the set could fit into memory if it weren't for the currently | ||
| 50 | * active pages - which may be used more, hopefully less frequently: | ||
| 51 | * | ||
| 52 | * +-memory available to cache-+ | ||
| 53 | * | | | ||
| 54 | * +-inactive------+-active----+ | ||
| 55 | * a b | c d e f g h i | J K L M N | | ||
| 56 | * +---------------+-----------+ | ||
| 57 | * | ||
| 58 | * It is prohibitively expensive to accurately track access frequency | ||
| 59 | * of pages. But a reasonable approximation can be made to measure | ||
| 60 | * thrashing on the inactive list, after which refaulting pages can be | ||
| 61 | * activated optimistically to compete with the existing active pages. | ||
| 62 | * | ||
| 63 | * Approximating inactive page access frequency - Observations: | ||
| 64 | * | ||
| 65 | * 1. When a page is accessed for the first time, it is added to the | ||
| 66 | * head of the inactive list, slides every existing inactive page | ||
| 67 | * towards the tail by one slot, and pushes the current tail page | ||
| 68 | * out of memory. | ||
| 69 | * | ||
| 70 | * 2. When a page is accessed for the second time, it is promoted to | ||
| 71 | * the active list, shrinking the inactive list by one slot. This | ||
| 72 | * also slides all inactive pages that were faulted into the cache | ||
| 73 | * more recently than the activated page towards the tail of the | ||
| 74 | * inactive list. | ||
| 75 | * | ||
| 76 | * Thus: | ||
| 77 | * | ||
| 78 | * 1. The sum of evictions and activations between any two points in | ||
| 79 | * time indicate the minimum number of inactive pages accessed in | ||
| 80 | * between. | ||
| 81 | * | ||
| 82 | * 2. Moving one inactive page N page slots towards the tail of the | ||
| 83 | * list requires at least N inactive page accesses. | ||
| 84 | * | ||
| 85 | * Combining these: | ||
| 86 | * | ||
| 87 | * 1. When a page is finally evicted from memory, the number of | ||
| 88 | * inactive pages accessed while the page was in cache is at least | ||
| 89 | * the number of page slots on the inactive list. | ||
| 90 | * | ||
| 91 | * 2. In addition, measuring the sum of evictions and activations (E) | ||
| 92 | * at the time of a page's eviction, and comparing it to another | ||
| 93 | * reading (R) at the time the page faults back into memory tells | ||
| 94 | * the minimum number of accesses while the page was not cached. | ||
| 95 | * This is called the refault distance. | ||
| 96 | * | ||
| 97 | * Because the first access of the page was the fault and the second | ||
| 98 | * access the refault, we combine the in-cache distance with the | ||
| 99 | * out-of-cache distance to get the complete minimum access distance | ||
| 100 | * of this page: | ||
| 101 | * | ||
| 102 | * NR_inactive + (R - E) | ||
| 103 | * | ||
| 104 | * And knowing the minimum access distance of a page, we can easily | ||
| 105 | * tell if the page would be able to stay in cache assuming all page | ||
| 106 | * slots in the cache were available: | ||
| 107 | * | ||
| 108 | * NR_inactive + (R - E) <= NR_inactive + NR_active | ||
| 109 | * | ||
| 110 | * which can be further simplified to | ||
| 111 | * | ||
| 112 | * (R - E) <= NR_active | ||
| 113 | * | ||
| 114 | * Put into words, the refault distance (out-of-cache) can be seen as | ||
| 115 | * a deficit in inactive list space (in-cache). If the inactive list | ||
| 116 | * had (R - E) more page slots, the page would not have been evicted | ||
| 117 | * in between accesses, but activated instead. And on a full system, | ||
| 118 | * the only thing eating into inactive list space is active pages. | ||
| 119 | * | ||
| 120 | * | ||
| 121 | * Activating refaulting pages | ||
| 122 | * | ||
| 123 | * All that is known about the active list is that the pages have been | ||
| 124 | * accessed more than once in the past. This means that at any given | ||
| 125 | * time there is actually a good chance that pages on the active list | ||
| 126 | * are no longer in active use. | ||
| 127 | * | ||
| 128 | * So when a refault distance of (R - E) is observed and there are at | ||
| 129 | * least (R - E) active pages, the refaulting page is activated | ||
| 130 | * optimistically in the hope that (R - E) active pages are actually | ||
| 131 | * used less frequently than the refaulting page - or even not used at | ||
| 132 | * all anymore. | ||
| 133 | * | ||
| 134 | * If this is wrong and demotion kicks in, the pages which are truly | ||
| 135 | * used more frequently will be reactivated while the less frequently | ||
| 136 | * used once will be evicted from memory. | ||
| 137 | * | ||
| 138 | * But if this is right, the stale pages will be pushed out of memory | ||
| 139 | * and the used pages get to stay in cache. | ||
| 140 | * | ||
| 141 | * | ||
| 142 | * Implementation | ||
| 143 | * | ||
| 144 | * For each zone's file LRU lists, a counter for inactive evictions | ||
| 145 | * and activations is maintained (zone->inactive_age). | ||
| 146 | * | ||
| 147 | * On eviction, a snapshot of this counter (along with some bits to | ||
| 148 | * identify the zone) is stored in the now empty page cache radix tree | ||
| 149 | * slot of the evicted page. This is called a shadow entry. | ||
| 150 | * | ||
| 151 | * On cache misses for which there are shadow entries, an eligible | ||
| 152 | * refault distance will immediately activate the refaulting page. | ||
| 153 | */ | ||
| 154 | |||
| 155 | static void *pack_shadow(unsigned long eviction, struct zone *zone) | ||
| 156 | { | ||
| 157 | eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); | ||
| 158 | eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); | ||
| 159 | eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); | ||
| 160 | |||
| 161 | return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void unpack_shadow(void *shadow, | ||
| 165 | struct zone **zone, | ||
| 166 | unsigned long *distance) | ||
| 167 | { | ||
| 168 | unsigned long entry = (unsigned long)shadow; | ||
| 169 | unsigned long eviction; | ||
| 170 | unsigned long refault; | ||
| 171 | unsigned long mask; | ||
| 172 | int zid, nid; | ||
| 173 | |||
| 174 | entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; | ||
| 175 | zid = entry & ((1UL << ZONES_SHIFT) - 1); | ||
| 176 | entry >>= ZONES_SHIFT; | ||
| 177 | nid = entry & ((1UL << NODES_SHIFT) - 1); | ||
| 178 | entry >>= NODES_SHIFT; | ||
| 179 | eviction = entry; | ||
| 180 | |||
| 181 | *zone = NODE_DATA(nid)->node_zones + zid; | ||
| 182 | |||
| 183 | refault = atomic_long_read(&(*zone)->inactive_age); | ||
| 184 | mask = ~0UL >> (NODES_SHIFT + ZONES_SHIFT + | ||
| 185 | RADIX_TREE_EXCEPTIONAL_SHIFT); | ||
| 186 | /* | ||
| 187 | * The unsigned subtraction here gives an accurate distance | ||
| 188 | * across inactive_age overflows in most cases. | ||
| 189 | * | ||
| 190 | * There is a special case: usually, shadow entries have a | ||
| 191 | * short lifetime and are either refaulted or reclaimed along | ||
| 192 | * with the inode before they get too old. But it is not | ||
| 193 | * impossible for the inactive_age to lap a shadow entry in | ||
| 194 | * the field, which can then can result in a false small | ||
| 195 | * refault distance, leading to a false activation should this | ||
| 196 | * old entry actually refault again. However, earlier kernels | ||
| 197 | * used to deactivate unconditionally with *every* reclaim | ||
| 198 | * invocation for the longest time, so the occasional | ||
| 199 | * inappropriate activation leading to pressure on the active | ||
| 200 | * list is not a problem. | ||
| 201 | */ | ||
| 202 | *distance = (refault - eviction) & mask; | ||
| 203 | } | ||
| 204 | |||
| 205 | /** | ||
| 206 | * workingset_eviction - note the eviction of a page from memory | ||
| 207 | * @mapping: address space the page was backing | ||
| 208 | * @page: the page being evicted | ||
| 209 | * | ||
| 210 | * Returns a shadow entry to be stored in @mapping->page_tree in place | ||
| 211 | * of the evicted @page so that a later refault can be detected. | ||
| 212 | */ | ||
| 213 | void *workingset_eviction(struct address_space *mapping, struct page *page) | ||
| 214 | { | ||
| 215 | struct zone *zone = page_zone(page); | ||
| 216 | unsigned long eviction; | ||
| 217 | |||
| 218 | eviction = atomic_long_inc_return(&zone->inactive_age); | ||
| 219 | return pack_shadow(eviction, zone); | ||
| 220 | } | ||
| 221 | |||
| 222 | /** | ||
| 223 | * workingset_refault - evaluate the refault of a previously evicted page | ||
| 224 | * @shadow: shadow entry of the evicted page | ||
| 225 | * | ||
| 226 | * Calculates and evaluates the refault distance of the previously | ||
| 227 | * evicted page in the context of the zone it was allocated in. | ||
| 228 | * | ||
| 229 | * Returns %true if the page should be activated, %false otherwise. | ||
| 230 | */ | ||
| 231 | bool workingset_refault(void *shadow) | ||
| 232 | { | ||
| 233 | unsigned long refault_distance; | ||
| 234 | struct zone *zone; | ||
| 235 | |||
| 236 | unpack_shadow(shadow, &zone, &refault_distance); | ||
| 237 | inc_zone_state(zone, WORKINGSET_REFAULT); | ||
| 238 | |||
| 239 | if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) { | ||
| 240 | inc_zone_state(zone, WORKINGSET_ACTIVATE); | ||
| 241 | return true; | ||
| 242 | } | ||
| 243 | return false; | ||
| 244 | } | ||
| 245 | |||
| 246 | /** | ||
| 247 | * workingset_activation - note a page activation | ||
| 248 | * @page: page that is being activated | ||
| 249 | */ | ||
| 250 | void workingset_activation(struct page *page) | ||
| 251 | { | ||
| 252 | atomic_long_inc(&page_zone(page)->inactive_age); | ||
| 253 | } | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Shadow entries reflect the share of the working set that does not | ||
| 257 | * fit into memory, so their number depends on the access pattern of | ||
| 258 | * the workload. In most cases, they will refault or get reclaimed | ||
| 259 | * along with the inode, but a (malicious) workload that streams | ||
| 260 | * through files with a total size several times that of available | ||
| 261 | * memory, while preventing the inodes from being reclaimed, can | ||
| 262 | * create excessive amounts of shadow nodes. To keep a lid on this, | ||
| 263 | * track shadow nodes and reclaim them when they grow way past the | ||
| 264 | * point where they would still be useful. | ||
| 265 | */ | ||
| 266 | |||
| 267 | struct list_lru workingset_shadow_nodes; | ||
| 268 | |||
| 269 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | ||
| 270 | struct shrink_control *sc) | ||
| 271 | { | ||
| 272 | unsigned long shadow_nodes; | ||
| 273 | unsigned long max_nodes; | ||
| 274 | unsigned long pages; | ||
| 275 | |||
| 276 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | ||
| 277 | local_irq_disable(); | ||
| 278 | shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid); | ||
| 279 | local_irq_enable(); | ||
| 280 | |||
| 281 | pages = node_present_pages(sc->nid); | ||
| 282 | /* | ||
| 283 | * Active cache pages are limited to 50% of memory, and shadow | ||
| 284 | * entries that represent a refault distance bigger than that | ||
| 285 | * do not have any effect. Limit the number of shadow nodes | ||
| 286 | * such that shadow entries do not exceed the number of active | ||
| 287 | * cache pages, assuming a worst-case node population density | ||
| 288 | * of 1/8th on average. | ||
| 289 | * | ||
| 290 | * On 64-bit with 7 radix_tree_nodes per page and 64 slots | ||
| 291 | * each, this will reclaim shadow entries when they consume | ||
| 292 | * ~2% of available memory: | ||
| 293 | * | ||
| 294 | * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE | ||
| 295 | */ | ||
| 296 | max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3); | ||
| 297 | |||
| 298 | if (shadow_nodes <= max_nodes) | ||
| 299 | return 0; | ||
| 300 | |||
| 301 | return shadow_nodes - max_nodes; | ||
| 302 | } | ||
| 303 | |||
| 304 | static enum lru_status shadow_lru_isolate(struct list_head *item, | ||
| 305 | spinlock_t *lru_lock, | ||
| 306 | void *arg) | ||
| 307 | { | ||
| 308 | struct address_space *mapping; | ||
| 309 | struct radix_tree_node *node; | ||
| 310 | unsigned int i; | ||
| 311 | int ret; | ||
| 312 | |||
| 313 | /* | ||
| 314 | * Page cache insertions and deletions synchroneously maintain | ||
| 315 | * the shadow node LRU under the mapping->tree_lock and the | ||
| 316 | * lru_lock. Because the page cache tree is emptied before | ||
| 317 | * the inode can be destroyed, holding the lru_lock pins any | ||
| 318 | * address_space that has radix tree nodes on the LRU. | ||
| 319 | * | ||
| 320 | * We can then safely transition to the mapping->tree_lock to | ||
| 321 | * pin only the address_space of the particular node we want | ||
| 322 | * to reclaim, take the node off-LRU, and drop the lru_lock. | ||
| 323 | */ | ||
| 324 | |||
| 325 | node = container_of(item, struct radix_tree_node, private_list); | ||
| 326 | mapping = node->private_data; | ||
| 327 | |||
| 328 | /* Coming from the list, invert the lock order */ | ||
| 329 | if (!spin_trylock(&mapping->tree_lock)) { | ||
| 330 | spin_unlock(lru_lock); | ||
| 331 | ret = LRU_RETRY; | ||
| 332 | goto out; | ||
| 333 | } | ||
| 334 | |||
| 335 | list_del_init(item); | ||
| 336 | spin_unlock(lru_lock); | ||
| 337 | |||
| 338 | /* | ||
| 339 | * The nodes should only contain one or more shadow entries, | ||
| 340 | * no pages, so we expect to be able to remove them all and | ||
| 341 | * delete and free the empty node afterwards. | ||
| 342 | */ | ||
| 343 | |||
| 344 | BUG_ON(!node->count); | ||
| 345 | BUG_ON(node->count & RADIX_TREE_COUNT_MASK); | ||
| 346 | |||
| 347 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | ||
| 348 | if (node->slots[i]) { | ||
| 349 | BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); | ||
| 350 | node->slots[i] = NULL; | ||
| 351 | BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); | ||
| 352 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | ||
| 353 | BUG_ON(!mapping->nrshadows); | ||
| 354 | mapping->nrshadows--; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | BUG_ON(node->count); | ||
| 358 | inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); | ||
| 359 | if (!__radix_tree_delete_node(&mapping->page_tree, node)) | ||
| 360 | BUG(); | ||
| 361 | |||
| 362 | spin_unlock(&mapping->tree_lock); | ||
| 363 | ret = LRU_REMOVED_RETRY; | ||
| 364 | out: | ||
| 365 | local_irq_enable(); | ||
| 366 | cond_resched(); | ||
| 367 | local_irq_disable(); | ||
| 368 | spin_lock(lru_lock); | ||
| 369 | return ret; | ||
| 370 | } | ||
| 371 | |||
| 372 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | ||
| 373 | struct shrink_control *sc) | ||
| 374 | { | ||
| 375 | unsigned long ret; | ||
| 376 | |||
| 377 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | ||
| 378 | local_irq_disable(); | ||
| 379 | ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, | ||
| 380 | shadow_lru_isolate, NULL, &sc->nr_to_scan); | ||
| 381 | local_irq_enable(); | ||
| 382 | return ret; | ||
| 383 | } | ||
| 384 | |||
| 385 | static struct shrinker workingset_shadow_shrinker = { | ||
| 386 | .count_objects = count_shadow_nodes, | ||
| 387 | .scan_objects = scan_shadow_nodes, | ||
| 388 | .seeks = DEFAULT_SEEKS, | ||
| 389 | .flags = SHRINKER_NUMA_AWARE, | ||
| 390 | }; | ||
| 391 | |||
| 392 | /* | ||
| 393 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | ||
| 394 | * mapping->tree_lock. | ||
| 395 | */ | ||
| 396 | static struct lock_class_key shadow_nodes_key; | ||
| 397 | |||
| 398 | static int __init workingset_init(void) | ||
| 399 | { | ||
| 400 | int ret; | ||
| 401 | |||
| 402 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); | ||
| 403 | if (ret) | ||
| 404 | goto err; | ||
| 405 | ret = register_shrinker(&workingset_shadow_shrinker); | ||
| 406 | if (ret) | ||
| 407 | goto err_list_lru; | ||
| 408 | return 0; | ||
| 409 | err_list_lru: | ||
| 410 | list_lru_destroy(&workingset_shadow_nodes); | ||
| 411 | err: | ||
| 412 | return ret; | ||
| 413 | } | ||
| 414 | module_init(workingset_init); | ||
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index 7203e66dcd6f..1b4e4b8f5e47 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile | |||
| @@ -18,8 +18,8 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include | |||
| 18 | bpf-direct-objs := bpf-direct.o | 18 | bpf-direct-objs := bpf-direct.o |
| 19 | 19 | ||
| 20 | # Try to match the kernel target. | 20 | # Try to match the kernel target. |
| 21 | ifndef CONFIG_64BIT | ||
| 22 | ifndef CROSS_COMPILE | 21 | ifndef CROSS_COMPILE |
| 22 | ifndef CONFIG_64BIT | ||
| 23 | 23 | ||
| 24 | # s390 has -m31 flag to build 31 bit binaries | 24 | # s390 has -m31 flag to build 31 bit binaries |
| 25 | ifndef CONFIG_S390 | 25 | ifndef CONFIG_S390 |
| @@ -36,7 +36,13 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG) | |||
| 36 | HOSTLOADLIBES_bpf-fancy += $(MFLAG) | 36 | HOSTLOADLIBES_bpf-fancy += $(MFLAG) |
| 37 | HOSTLOADLIBES_dropper += $(MFLAG) | 37 | HOSTLOADLIBES_dropper += $(MFLAG) |
| 38 | endif | 38 | endif |
| 39 | endif | ||
| 40 | |||
| 41 | # Tell kbuild to always build the programs | ||
| 42 | always := $(hostprogs-y) | 39 | always := $(hostprogs-y) |
| 40 | else | ||
| 41 | # MIPS system calls are defined based on the -mabi that is passed | ||
| 42 | # to the toolchain which may or may not be a valid option | ||
| 43 | # for the host toolchain. So disable tests if target architecture | ||
| 44 | # is MIPS but the host isn't. | ||
| 45 | ifndef CONFIG_MIPS | ||
| 46 | always := $(hostprogs-y) | ||
| 47 | endif | ||
| 48 | endif | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 464dcef79b35..34eb2160489d 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -281,7 +281,7 @@ our $Attribute = qr{ | |||
| 281 | __weak | 281 | __weak |
| 282 | }x; | 282 | }x; |
| 283 | our $Modifier; | 283 | our $Modifier; |
| 284 | our $Inline = qr{inline|__always_inline|noinline}; | 284 | our $Inline = qr{inline|__always_inline|noinline|__inline|__inline__}; |
| 285 | our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; | 285 | our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; |
| 286 | our $Lval = qr{$Ident(?:$Member)*}; | 286 | our $Lval = qr{$Ident(?:$Member)*}; |
| 287 | 287 | ||
| @@ -289,13 +289,14 @@ our $Int_type = qr{(?i)llu|ull|ll|lu|ul|l|u}; | |||
| 289 | our $Binary = qr{(?i)0b[01]+$Int_type?}; | 289 | our $Binary = qr{(?i)0b[01]+$Int_type?}; |
| 290 | our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?}; | 290 | our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?}; |
| 291 | our $Int = qr{[0-9]+$Int_type?}; | 291 | our $Int = qr{[0-9]+$Int_type?}; |
| 292 | our $Octal = qr{0[0-7]+$Int_type?}; | ||
| 292 | our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; | 293 | our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; |
| 293 | our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; | 294 | our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; |
| 294 | our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; | 295 | our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; |
| 295 | our $Float = qr{$Float_hex|$Float_dec|$Float_int}; | 296 | our $Float = qr{$Float_hex|$Float_dec|$Float_int}; |
| 296 | our $Constant = qr{$Float|$Binary|$Hex|$Int}; | 297 | our $Constant = qr{$Float|$Binary|$Octal|$Hex|$Int}; |
| 297 | our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; | 298 | our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; |
| 298 | our $Compare = qr{<=|>=|==|!=|<|>}; | 299 | our $Compare = qr{<=|>=|==|!=|<|(?<!-)>}; |
| 299 | our $Arithmetic = qr{\+|-|\*|\/|%}; | 300 | our $Arithmetic = qr{\+|-|\*|\/|%}; |
| 300 | our $Operators = qr{ | 301 | our $Operators = qr{ |
| 301 | <=|>=|==|!=| | 302 | <=|>=|==|!=| |
| @@ -303,6 +304,8 @@ our $Operators = qr{ | |||
| 303 | &&|\|\||,|\^|\+\+|--|&|\||$Arithmetic | 304 | &&|\|\||,|\^|\+\+|--|&|\||$Arithmetic |
| 304 | }x; | 305 | }x; |
| 305 | 306 | ||
| 307 | our $c90_Keywords = qr{do|for|while|if|else|return|goto|continue|switch|default|case|break}x; | ||
| 308 | |||
| 306 | our $NonptrType; | 309 | our $NonptrType; |
| 307 | our $NonptrTypeWithAttr; | 310 | our $NonptrTypeWithAttr; |
| 308 | our $Type; | 311 | our $Type; |
| @@ -378,6 +381,22 @@ our @modifierList = ( | |||
| 378 | qr{fastcall}, | 381 | qr{fastcall}, |
| 379 | ); | 382 | ); |
| 380 | 383 | ||
| 384 | our @mode_permission_funcs = ( | ||
| 385 | ["module_param", 3], | ||
| 386 | ["module_param_(?:array|named|string)", 4], | ||
| 387 | ["module_param_array_named", 5], | ||
| 388 | ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2], | ||
| 389 | ["proc_create(?:_data|)", 2], | ||
| 390 | ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2], | ||
| 391 | ); | ||
| 392 | |||
| 393 | #Create a search pattern for all these functions to speed up a loop below | ||
| 394 | our $mode_perms_search = ""; | ||
| 395 | foreach my $entry (@mode_permission_funcs) { | ||
| 396 | $mode_perms_search .= '|' if ($mode_perms_search ne ""); | ||
| 397 | $mode_perms_search .= $entry->[0]; | ||
| 398 | } | ||
| 399 | |||
| 381 | our $allowed_asm_includes = qr{(?x: | 400 | our $allowed_asm_includes = qr{(?x: |
| 382 | irq| | 401 | irq| |
| 383 | memory | 402 | memory |
| @@ -412,7 +431,7 @@ sub build_types { | |||
| 412 | (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*|\[\])+|(?:\s*\[\s*\])+)? | 431 | (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*|\[\])+|(?:\s*\[\s*\])+)? |
| 413 | (?:\s+$Inline|\s+$Modifier)* | 432 | (?:\s+$Inline|\s+$Modifier)* |
| 414 | }x; | 433 | }x; |
| 415 | $Declare = qr{(?:$Storage\s+)?$Type}; | 434 | $Declare = qr{(?:$Storage\s+(?:$Inline\s+)?)?$Type}; |
| 416 | } | 435 | } |
| 417 | build_types(); | 436 | build_types(); |
| 418 | 437 | ||
| @@ -423,15 +442,20 @@ our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*}; | |||
| 423 | # Any use must be runtime checked with $^V | 442 | # Any use must be runtime checked with $^V |
| 424 | 443 | ||
| 425 | our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/; | 444 | our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/; |
| 426 | our $LvalOrFunc = qr{($Lval)\s*($balanced_parens{0,1})\s*}; | 445 | our $LvalOrFunc = qr{((?:[\&\*]\s*)?$Lval)\s*($balanced_parens{0,1})\s*}; |
| 427 | our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)}; | 446 | our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)}; |
| 428 | 447 | ||
| 429 | sub deparenthesize { | 448 | sub deparenthesize { |
| 430 | my ($string) = @_; | 449 | my ($string) = @_; |
| 431 | return "" if (!defined($string)); | 450 | return "" if (!defined($string)); |
| 432 | $string =~ s@^\s*\(\s*@@g; | 451 | |
| 433 | $string =~ s@\s*\)\s*$@@g; | 452 | while ($string =~ /^\s*\(.*\)\s*$/) { |
| 453 | $string =~ s@^\s*\(\s*@@; | ||
| 454 | $string =~ s@\s*\)\s*$@@; | ||
| 455 | } | ||
| 456 | |||
| 434 | $string =~ s@\s+@ @g; | 457 | $string =~ s@\s+@ @g; |
| 458 | |||
| 435 | return $string; | 459 | return $string; |
| 436 | } | 460 | } |
| 437 | 461 | ||
| @@ -1421,21 +1445,25 @@ sub possible { | |||
| 1421 | my $prefix = ''; | 1445 | my $prefix = ''; |
| 1422 | 1446 | ||
| 1423 | sub show_type { | 1447 | sub show_type { |
| 1424 | return defined $use_type{$_[0]} if (scalar keys %use_type > 0); | 1448 | my ($type) = @_; |
| 1449 | |||
| 1450 | return defined $use_type{$type} if (scalar keys %use_type > 0); | ||
| 1425 | 1451 | ||
| 1426 | return !defined $ignore_type{$_[0]}; | 1452 | return !defined $ignore_type{$type}; |
| 1427 | } | 1453 | } |
| 1428 | 1454 | ||
| 1429 | sub report { | 1455 | sub report { |
| 1430 | if (!show_type($_[1]) || | 1456 | my ($level, $type, $msg) = @_; |
| 1431 | (defined $tst_only && $_[2] !~ /\Q$tst_only\E/)) { | 1457 | |
| 1458 | if (!show_type($type) || | ||
| 1459 | (defined $tst_only && $msg !~ /\Q$tst_only\E/)) { | ||
| 1432 | return 0; | 1460 | return 0; |
| 1433 | } | 1461 | } |
| 1434 | my $line; | 1462 | my $line; |
| 1435 | if ($show_types) { | 1463 | if ($show_types) { |
| 1436 | $line = "$prefix$_[0]:$_[1]: $_[2]\n"; | 1464 | $line = "$prefix$level:$type: $msg\n"; |
| 1437 | } else { | 1465 | } else { |
| 1438 | $line = "$prefix$_[0]: $_[2]\n"; | 1466 | $line = "$prefix$level: $msg\n"; |
| 1439 | } | 1467 | } |
| 1440 | $line = (split('\n', $line))[0] . "\n" if ($terse); | 1468 | $line = (split('\n', $line))[0] . "\n" if ($terse); |
| 1441 | 1469 | ||
| @@ -1443,12 +1471,15 @@ sub report { | |||
| 1443 | 1471 | ||
| 1444 | return 1; | 1472 | return 1; |
| 1445 | } | 1473 | } |
| 1474 | |||
| 1446 | sub report_dump { | 1475 | sub report_dump { |
| 1447 | our @report; | 1476 | our @report; |
| 1448 | } | 1477 | } |
| 1449 | 1478 | ||
| 1450 | sub ERROR { | 1479 | sub ERROR { |
| 1451 | if (report("ERROR", $_[0], $_[1])) { | 1480 | my ($type, $msg) = @_; |
| 1481 | |||
| 1482 | if (report("ERROR", $type, $msg)) { | ||
| 1452 | our $clean = 0; | 1483 | our $clean = 0; |
| 1453 | our $cnt_error++; | 1484 | our $cnt_error++; |
| 1454 | return 1; | 1485 | return 1; |
| @@ -1456,7 +1487,9 @@ sub ERROR { | |||
| 1456 | return 0; | 1487 | return 0; |
| 1457 | } | 1488 | } |
| 1458 | sub WARN { | 1489 | sub WARN { |
| 1459 | if (report("WARNING", $_[0], $_[1])) { | 1490 | my ($type, $msg) = @_; |
| 1491 | |||
| 1492 | if (report("WARNING", $type, $msg)) { | ||
| 1460 | our $clean = 0; | 1493 | our $clean = 0; |
| 1461 | our $cnt_warn++; | 1494 | our $cnt_warn++; |
| 1462 | return 1; | 1495 | return 1; |
| @@ -1464,7 +1497,9 @@ sub WARN { | |||
| 1464 | return 0; | 1497 | return 0; |
| 1465 | } | 1498 | } |
| 1466 | sub CHK { | 1499 | sub CHK { |
| 1467 | if ($check && report("CHECK", $_[0], $_[1])) { | 1500 | my ($type, $msg) = @_; |
| 1501 | |||
| 1502 | if ($check && report("CHECK", $type, $msg)) { | ||
| 1468 | our $clean = 0; | 1503 | our $clean = 0; |
| 1469 | our $cnt_chk++; | 1504 | our $cnt_chk++; |
| 1470 | return 1; | 1505 | return 1; |
| @@ -1574,7 +1609,7 @@ sub pos_last_openparen { | |||
| 1574 | } | 1609 | } |
| 1575 | } | 1610 | } |
| 1576 | 1611 | ||
| 1577 | return $last_openparen + 1; | 1612 | return length(expand_tabs(substr($line, 0, $last_openparen))) + 1; |
| 1578 | } | 1613 | } |
| 1579 | 1614 | ||
| 1580 | sub process { | 1615 | sub process { |
| @@ -1891,6 +1926,12 @@ sub process { | |||
| 1891 | } | 1926 | } |
| 1892 | } | 1927 | } |
| 1893 | 1928 | ||
| 1929 | # Check for unwanted Gerrit info | ||
| 1930 | if ($in_commit_log && $line =~ /^\s*change-id:/i) { | ||
| 1931 | ERROR("GERRIT_CHANGE_ID", | ||
| 1932 | "Remove Gerrit Change-Id's before submitting upstream.\n" . $herecurr); | ||
| 1933 | } | ||
| 1934 | |||
| 1894 | # Check for wrappage within a valid hunk of the file | 1935 | # Check for wrappage within a valid hunk of the file |
| 1895 | if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) { | 1936 | if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) { |
| 1896 | ERROR("CORRUPTED_PATCH", | 1937 | ERROR("CORRUPTED_PATCH", |
| @@ -2041,13 +2082,17 @@ sub process { | |||
| 2041 | } | 2082 | } |
| 2042 | 2083 | ||
| 2043 | # check for DT compatible documentation | 2084 | # check for DT compatible documentation |
| 2044 | if (defined $root && $realfile =~ /\.dts/ && | 2085 | if (defined $root && |
| 2045 | $rawline =~ /^\+\s*compatible\s*=/) { | 2086 | (($realfile =~ /\.dtsi?$/ && $line =~ /^\+\s*compatible\s*=\s*\"/) || |
| 2087 | ($realfile =~ /\.[ch]$/ && $line =~ /^\+.*\.compatible\s*=\s*\"/))) { | ||
| 2088 | |||
| 2046 | my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g; | 2089 | my @compats = $rawline =~ /\"([a-zA-Z0-9\-\,\.\+_]+)\"/g; |
| 2047 | 2090 | ||
| 2091 | my $dt_path = $root . "/Documentation/devicetree/bindings/"; | ||
| 2092 | my $vp_file = $dt_path . "vendor-prefixes.txt"; | ||
| 2093 | |||
| 2048 | foreach my $compat (@compats) { | 2094 | foreach my $compat (@compats) { |
| 2049 | my $compat2 = $compat; | 2095 | my $compat2 = $compat; |
| 2050 | my $dt_path = $root . "/Documentation/devicetree/bindings/"; | ||
| 2051 | $compat2 =~ s/\,[a-z]*\-/\,<\.\*>\-/; | 2096 | $compat2 =~ s/\,[a-z]*\-/\,<\.\*>\-/; |
| 2052 | `grep -Erq "$compat|$compat2" $dt_path`; | 2097 | `grep -Erq "$compat|$compat2" $dt_path`; |
| 2053 | if ( $? >> 8 ) { | 2098 | if ( $? >> 8 ) { |
| @@ -2055,14 +2100,12 @@ sub process { | |||
| 2055 | "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr); | 2100 | "DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr); |
| 2056 | } | 2101 | } |
| 2057 | 2102 | ||
| 2058 | my $vendor = $compat; | 2103 | next if $compat !~ /^([a-zA-Z0-9\-]+)\,/; |
| 2059 | my $vendor_path = $dt_path . "vendor-prefixes.txt"; | 2104 | my $vendor = $1; |
| 2060 | next if (! -f $vendor_path); | 2105 | `grep -Eq "^$vendor\\b" $vp_file`; |
| 2061 | $vendor =~ s/^([a-zA-Z0-9]+)\,.*/$1/; | ||
| 2062 | `grep -Eq "$vendor" $vendor_path`; | ||
| 2063 | if ( $? >> 8 ) { | 2106 | if ( $? >> 8 ) { |
| 2064 | WARN("UNDOCUMENTED_DT_STRING", | 2107 | WARN("UNDOCUMENTED_DT_STRING", |
| 2065 | "DT compatible string vendor \"$vendor\" appears un-documented -- check $vendor_path\n" . $herecurr); | 2108 | "DT compatible string vendor \"$vendor\" appears un-documented -- check $vp_file\n" . $herecurr); |
| 2066 | } | 2109 | } |
| 2067 | } | 2110 | } |
| 2068 | } | 2111 | } |
| @@ -2159,7 +2202,7 @@ sub process { | |||
| 2159 | 2202 | ||
| 2160 | # check multi-line statement indentation matches previous line | 2203 | # check multi-line statement indentation matches previous line |
| 2161 | if ($^V && $^V ge 5.10.0 && | 2204 | if ($^V && $^V ge 5.10.0 && |
| 2162 | $prevline =~ /^\+(\t*)(if \(|$Ident\().*(\&\&|\|\||,)\s*$/) { | 2205 | $prevline =~ /^\+([ \t]*)((?:$c90_Keywords(?:\s+if)\s*)|(?:$Declare\s*)?(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*|$Ident\s*=\s*$Ident\s*)\(.*(\&\&|\|\||,)\s*$/) { |
| 2163 | $prevline =~ /^\+(\t*)(.*)$/; | 2206 | $prevline =~ /^\+(\t*)(.*)$/; |
| 2164 | my $oldindent = $1; | 2207 | my $oldindent = $1; |
| 2165 | my $rest = $2; | 2208 | my $rest = $2; |
| @@ -2198,7 +2241,8 @@ sub process { | |||
| 2198 | 2241 | ||
| 2199 | if ($realfile =~ m@^(drivers/net/|net/)@ && | 2242 | if ($realfile =~ m@^(drivers/net/|net/)@ && |
| 2200 | $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ && | 2243 | $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ && |
| 2201 | $rawline =~ /^\+[ \t]*\*/) { | 2244 | $rawline =~ /^\+[ \t]*\*/ && |
| 2245 | $realline > 2) { | ||
| 2202 | WARN("NETWORKING_BLOCK_COMMENT_STYLE", | 2246 | WARN("NETWORKING_BLOCK_COMMENT_STYLE", |
| 2203 | "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev); | 2247 | "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev); |
| 2204 | } | 2248 | } |
| @@ -2221,6 +2265,21 @@ sub process { | |||
| 2221 | "networking block comments put the trailing */ on a separate line\n" . $herecurr); | 2265 | "networking block comments put the trailing */ on a separate line\n" . $herecurr); |
| 2222 | } | 2266 | } |
| 2223 | 2267 | ||
| 2268 | # check for missing blank lines after declarations | ||
| 2269 | if ($realfile =~ m@^(drivers/net/|net/)@ && | ||
| 2270 | $prevline =~ /^\+\s+$Declare\s+$Ident/ && | ||
| 2271 | !($prevline =~ /(?:$Compare|$Assignment|$Operators)\s*$/ || | ||
| 2272 | $prevline =~ /(?:\{\s*|\\)$/) && #extended lines | ||
| 2273 | $sline =~ /^\+\s+/ && #Not at char 1 | ||
| 2274 | !($sline =~ /^\+\s+$Declare/ || | ||
| 2275 | $sline =~ /^\+\s+$Ident\s+$Ident/ || #eg: typedef foo | ||
| 2276 | $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ || | ||
| 2277 | $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(])/ || | ||
| 2278 | $sline =~ /^\+\s+\(?\s*(?:$Compare|$Assignment|$Operators)/)) { | ||
| 2279 | WARN("SPACING", | ||
| 2280 | "networking uses a blank line after declarations\n" . $hereprev); | ||
| 2281 | } | ||
| 2282 | |||
| 2224 | # check for spaces at the beginning of a line. | 2283 | # check for spaces at the beginning of a line. |
| 2225 | # Exceptions: | 2284 | # Exceptions: |
| 2226 | # 1) within comments | 2285 | # 1) within comments |
| @@ -2665,6 +2724,13 @@ sub process { | |||
| 2665 | $herecurr); | 2724 | $herecurr); |
| 2666 | } | 2725 | } |
| 2667 | 2726 | ||
| 2727 | # check for non-global char *foo[] = {"bar", ...} declarations. | ||
| 2728 | if ($line =~ /^.\s+(?:static\s+|const\s+)?char\s+\*\s*\w+\s*\[\s*\]\s*=\s*\{/) { | ||
| 2729 | WARN("STATIC_CONST_CHAR_ARRAY", | ||
| 2730 | "char * array declaration might be better as static const\n" . | ||
| 2731 | $herecurr); | ||
| 2732 | } | ||
| 2733 | |||
| 2668 | # check for function declarations without arguments like "int foo()" | 2734 | # check for function declarations without arguments like "int foo()" |
| 2669 | if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) { | 2735 | if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) { |
| 2670 | if (ERROR("FUNCTION_WITHOUT_ARGS", | 2736 | if (ERROR("FUNCTION_WITHOUT_ARGS", |
| @@ -2799,7 +2865,7 @@ sub process { | |||
| 2799 | my $level2 = $level; | 2865 | my $level2 = $level; |
| 2800 | $level2 = "dbg" if ($level eq "debug"); | 2866 | $level2 = "dbg" if ($level eq "debug"); |
| 2801 | WARN("PREFER_PR_LEVEL", | 2867 | WARN("PREFER_PR_LEVEL", |
| 2802 | "Prefer netdev_$level2(netdev, ... then dev_$level2(dev, ... then pr_$level(... to printk(KERN_$orig ...\n" . $herecurr); | 2868 | "Prefer [subsystem eg: netdev]_$level2([subsystem]dev, ... then dev_$level2(dev, ... then pr_$level(... to printk(KERN_$orig ...\n" . $herecurr); |
| 2803 | } | 2869 | } |
| 2804 | 2870 | ||
| 2805 | if ($line =~ /\bpr_warning\s*\(/) { | 2871 | if ($line =~ /\bpr_warning\s*\(/) { |
| @@ -2848,10 +2914,7 @@ sub process { | |||
| 2848 | # Function pointer declarations | 2914 | # Function pointer declarations |
| 2849 | # check spacing between type, funcptr, and args | 2915 | # check spacing between type, funcptr, and args |
| 2850 | # canonical declaration is "type (*funcptr)(args...)" | 2916 | # canonical declaration is "type (*funcptr)(args...)" |
| 2851 | # | 2917 | if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)($Ident)(\s*)\)(\s*)\(/) { |
| 2852 | # the $Declare variable will capture all spaces after the type | ||
| 2853 | # so check it for trailing missing spaces or multiple spaces | ||
| 2854 | if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)$Ident(\s*)\)(\s*)\(/) { | ||
| 2855 | my $declare = $1; | 2918 | my $declare = $1; |
| 2856 | my $pre_pointer_space = $2; | 2919 | my $pre_pointer_space = $2; |
| 2857 | my $post_pointer_space = $3; | 2920 | my $post_pointer_space = $3; |
| @@ -2859,16 +2922,30 @@ sub process { | |||
| 2859 | my $post_funcname_space = $5; | 2922 | my $post_funcname_space = $5; |
| 2860 | my $pre_args_space = $6; | 2923 | my $pre_args_space = $6; |
| 2861 | 2924 | ||
| 2862 | if ($declare !~ /\s$/) { | 2925 | # the $Declare variable will capture all spaces after the type |
| 2926 | # so check it for a missing trailing missing space but pointer return types | ||
| 2927 | # don't need a space so don't warn for those. | ||
| 2928 | my $post_declare_space = ""; | ||
| 2929 | if ($declare =~ /(\s+)$/) { | ||
| 2930 | $post_declare_space = $1; | ||
| 2931 | $declare = rtrim($declare); | ||
| 2932 | } | ||
| 2933 | if ($declare !~ /\*$/ && $post_declare_space =~ /^$/) { | ||
| 2863 | WARN("SPACING", | 2934 | WARN("SPACING", |
| 2864 | "missing space after return type\n" . $herecurr); | 2935 | "missing space after return type\n" . $herecurr); |
| 2936 | $post_declare_space = " "; | ||
| 2865 | } | 2937 | } |
| 2866 | 2938 | ||
| 2867 | # unnecessary space "type (*funcptr)(args...)" | 2939 | # unnecessary space "type (*funcptr)(args...)" |
| 2868 | elsif ($declare =~ /\s{2,}$/) { | 2940 | # This test is not currently implemented because these declarations are |
| 2869 | WARN("SPACING", | 2941 | # equivalent to |
| 2870 | "Multiple spaces after return type\n" . $herecurr); | 2942 | # int foo(int bar, ...) |
| 2871 | } | 2943 | # and this is form shouldn't/doesn't generate a checkpatch warning. |
| 2944 | # | ||
| 2945 | # elsif ($declare =~ /\s{2,}$/) { | ||
| 2946 | # WARN("SPACING", | ||
| 2947 | # "Multiple spaces after return type\n" . $herecurr); | ||
| 2948 | # } | ||
| 2872 | 2949 | ||
| 2873 | # unnecessary space "type ( *funcptr)(args...)" | 2950 | # unnecessary space "type ( *funcptr)(args...)" |
| 2874 | if (defined $pre_pointer_space && | 2951 | if (defined $pre_pointer_space && |
| @@ -2900,7 +2977,7 @@ sub process { | |||
| 2900 | 2977 | ||
| 2901 | if (show_type("SPACING") && $fix) { | 2978 | if (show_type("SPACING") && $fix) { |
| 2902 | $fixed[$linenr - 1] =~ | 2979 | $fixed[$linenr - 1] =~ |
| 2903 | s/^(.\s*$Declare)\(\s*\*\s*($Ident)\s*\)\s*\(/rtrim($1) . " " . "\(\*$2\)\("/ex; | 2980 | s/^(.\s*)$Declare\s*\(\s*\*\s*$Ident\s*\)\s*\(/$1 . $declare . $post_declare_space . '(*' . $funcname . ')('/ex; |
| 2904 | } | 2981 | } |
| 2905 | } | 2982 | } |
| 2906 | 2983 | ||
| @@ -3061,10 +3138,13 @@ sub process { | |||
| 3061 | # // is a comment | 3138 | # // is a comment |
| 3062 | } elsif ($op eq '//') { | 3139 | } elsif ($op eq '//') { |
| 3063 | 3140 | ||
| 3141 | # : when part of a bitfield | ||
| 3142 | } elsif ($opv eq ':B') { | ||
| 3143 | # skip the bitfield test for now | ||
| 3144 | |||
| 3064 | # No spaces for: | 3145 | # No spaces for: |
| 3065 | # -> | 3146 | # -> |
| 3066 | # : when part of a bitfield | 3147 | } elsif ($op eq '->') { |
| 3067 | } elsif ($op eq '->' || $opv eq ':B') { | ||
| 3068 | if ($ctx =~ /Wx.|.xW/) { | 3148 | if ($ctx =~ /Wx.|.xW/) { |
| 3069 | if (ERROR("SPACING", | 3149 | if (ERROR("SPACING", |
| 3070 | "spaces prohibited around that '$op' $at\n" . $hereptr)) { | 3150 | "spaces prohibited around that '$op' $at\n" . $hereptr)) { |
| @@ -3334,14 +3414,17 @@ sub process { | |||
| 3334 | } | 3414 | } |
| 3335 | } | 3415 | } |
| 3336 | 3416 | ||
| 3337 | # Return is not a function. | 3417 | # return is not a function |
| 3338 | if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) { | 3418 | if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) { |
| 3339 | my $spacing = $1; | 3419 | my $spacing = $1; |
| 3340 | if ($^V && $^V ge 5.10.0 && | 3420 | if ($^V && $^V ge 5.10.0 && |
| 3341 | $stat =~ /^.\s*return\s*$balanced_parens\s*;\s*$/) { | 3421 | $stat =~ /^.\s*return\s*($balanced_parens)\s*;\s*$/) { |
| 3342 | ERROR("RETURN_PARENTHESES", | 3422 | my $value = $1; |
| 3343 | "return is not a function, parentheses are not required\n" . $herecurr); | 3423 | $value = deparenthesize($value); |
| 3344 | 3424 | if ($value =~ m/^\s*$FuncArg\s*(?:\?|$)/) { | |
| 3425 | ERROR("RETURN_PARENTHESES", | ||
| 3426 | "return is not a function, parentheses are not required\n" . $herecurr); | ||
| 3427 | } | ||
| 3345 | } elsif ($spacing !~ /\s+/) { | 3428 | } elsif ($spacing !~ /\s+/) { |
| 3346 | ERROR("SPACING", | 3429 | ERROR("SPACING", |
| 3347 | "space required before the open parenthesis '('\n" . $herecurr); | 3430 | "space required before the open parenthesis '('\n" . $herecurr); |
| @@ -3910,12 +3993,30 @@ sub process { | |||
| 3910 | } | 3993 | } |
| 3911 | } | 3994 | } |
| 3912 | 3995 | ||
| 3996 | # don't use __constant_<foo> functions outside of include/uapi/ | ||
| 3997 | if ($realfile !~ m@^include/uapi/@ && | ||
| 3998 | $line =~ /(__constant_(?:htons|ntohs|[bl]e(?:16|32|64)_to_cpu|cpu_to_[bl]e(?:16|32|64)))\s*\(/) { | ||
| 3999 | my $constant_func = $1; | ||
| 4000 | my $func = $constant_func; | ||
| 4001 | $func =~ s/^__constant_//; | ||
| 4002 | if (WARN("CONSTANT_CONVERSION", | ||
| 4003 | "$constant_func should be $func\n" . $herecurr) && | ||
| 4004 | $fix) { | ||
| 4005 | $fixed[$linenr - 1] =~ s/\b$constant_func\b/$func/g; | ||
| 4006 | } | ||
| 4007 | } | ||
| 4008 | |||
| 3913 | # prefer usleep_range over udelay | 4009 | # prefer usleep_range over udelay |
| 3914 | if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) { | 4010 | if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) { |
| 4011 | my $delay = $1; | ||
| 3915 | # ignore udelay's < 10, however | 4012 | # ignore udelay's < 10, however |
| 3916 | if (! ($1 < 10) ) { | 4013 | if (! ($delay < 10) ) { |
| 3917 | CHK("USLEEP_RANGE", | 4014 | CHK("USLEEP_RANGE", |
| 3918 | "usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $line); | 4015 | "usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $herecurr); |
| 4016 | } | ||
| 4017 | if ($delay > 2000) { | ||
| 4018 | WARN("LONG_UDELAY", | ||
| 4019 | "long udelay - prefer mdelay; see arch/arm/include/asm/delay.h\n" . $herecurr); | ||
| 3919 | } | 4020 | } |
| 3920 | } | 4021 | } |
| 3921 | 4022 | ||
| @@ -3923,7 +4024,7 @@ sub process { | |||
| 3923 | if ($line =~ /\bmsleep\s*\((\d+)\);/) { | 4024 | if ($line =~ /\bmsleep\s*\((\d+)\);/) { |
| 3924 | if ($1 < 20) { | 4025 | if ($1 < 20) { |
| 3925 | WARN("MSLEEP", | 4026 | WARN("MSLEEP", |
| 3926 | "msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $line); | 4027 | "msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $herecurr); |
| 3927 | } | 4028 | } |
| 3928 | } | 4029 | } |
| 3929 | 4030 | ||
| @@ -4149,7 +4250,7 @@ sub process { | |||
| 4149 | # check for naked sscanf | 4250 | # check for naked sscanf |
| 4150 | if ($^V && $^V ge 5.10.0 && | 4251 | if ($^V && $^V ge 5.10.0 && |
| 4151 | defined $stat && | 4252 | defined $stat && |
| 4152 | $stat =~ /\bsscanf\b/ && | 4253 | $line =~ /\bsscanf\b/ && |
| 4153 | ($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ && | 4254 | ($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ && |
| 4154 | $stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ && | 4255 | $stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ && |
| 4155 | $stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) { | 4256 | $stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) { |
| @@ -4240,12 +4341,6 @@ sub process { | |||
| 4240 | "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr); | 4341 | "$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr); |
| 4241 | } | 4342 | } |
| 4242 | 4343 | ||
| 4243 | # check for GFP_NOWAIT use | ||
| 4244 | if ($line =~ /\b__GFP_NOFAIL\b/) { | ||
| 4245 | WARN("__GFP_NOFAIL", | ||
| 4246 | "Use of __GFP_NOFAIL is deprecated, no new users should be added\n" . $herecurr); | ||
| 4247 | } | ||
| 4248 | |||
| 4249 | # check for multiple semicolons | 4344 | # check for multiple semicolons |
| 4250 | if ($line =~ /;\s*;\s*$/) { | 4345 | if ($line =~ /;\s*;\s*$/) { |
| 4251 | if (WARN("ONE_SEMICOLON", | 4346 | if (WARN("ONE_SEMICOLON", |
| @@ -4457,6 +4552,34 @@ sub process { | |||
| 4457 | WARN("EXPORTED_WORLD_WRITABLE", | 4552 | WARN("EXPORTED_WORLD_WRITABLE", |
| 4458 | "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); | 4553 | "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); |
| 4459 | } | 4554 | } |
| 4555 | |||
| 4556 | # Mode permission misuses where it seems decimal should be octal | ||
| 4557 | # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop | ||
| 4558 | if ($^V && $^V ge 5.10.0 && | ||
| 4559 | $line =~ /$mode_perms_search/) { | ||
| 4560 | foreach my $entry (@mode_permission_funcs) { | ||
| 4561 | my $func = $entry->[0]; | ||
| 4562 | my $arg_pos = $entry->[1]; | ||
| 4563 | |||
| 4564 | my $skip_args = ""; | ||
| 4565 | if ($arg_pos > 1) { | ||
| 4566 | $arg_pos--; | ||
| 4567 | $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}"; | ||
| 4568 | } | ||
| 4569 | my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]"; | ||
| 4570 | if ($line =~ /$test/) { | ||
| 4571 | my $val = $1; | ||
| 4572 | $val = $6 if ($skip_args ne ""); | ||
| 4573 | |||
| 4574 | if ($val !~ /^0$/ && | ||
| 4575 | (($val =~ /^$Int$/ && $val !~ /^$Octal$/) || | ||
| 4576 | length($val) ne 4)) { | ||
| 4577 | ERROR("NON_OCTAL_PERMISSIONS", | ||
| 4578 | "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr); | ||
| 4579 | } | ||
| 4580 | } | ||
| 4581 | } | ||
| 4582 | } | ||
| 4460 | } | 4583 | } |
| 4461 | 4584 | ||
| 4462 | # If we have no input at all, then there is nothing to report on | 4585 | # If we have no input at all, then there is nothing to report on |
diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf index 3e77a943e7b7..a9096d993172 100644 --- a/scripts/genksyms/keywords.gperf +++ b/scripts/genksyms/keywords.gperf | |||
| @@ -23,6 +23,8 @@ __inline, INLINE_KEYW | |||
| 23 | __inline__, INLINE_KEYW | 23 | __inline__, INLINE_KEYW |
| 24 | __signed, SIGNED_KEYW | 24 | __signed, SIGNED_KEYW |
| 25 | __signed__, SIGNED_KEYW | 25 | __signed__, SIGNED_KEYW |
| 26 | __typeof, TYPEOF_KEYW | ||
| 27 | __typeof__, TYPEOF_KEYW | ||
| 26 | __volatile, VOLATILE_KEYW | 28 | __volatile, VOLATILE_KEYW |
| 27 | __volatile__, VOLATILE_KEYW | 29 | __volatile__, VOLATILE_KEYW |
| 28 | # According to rth, c99 defines _Bool, __restrict, __restrict__, restrict. KAO | 30 | # According to rth, c99 defines _Bool, __restrict, __restrict__, restrict. KAO |
| @@ -51,9 +53,8 @@ signed, SIGNED_KEYW | |||
| 51 | static, STATIC_KEYW | 53 | static, STATIC_KEYW |
| 52 | struct, STRUCT_KEYW | 54 | struct, STRUCT_KEYW |
| 53 | typedef, TYPEDEF_KEYW | 55 | typedef, TYPEDEF_KEYW |
| 56 | typeof, TYPEOF_KEYW | ||
| 54 | union, UNION_KEYW | 57 | union, UNION_KEYW |
| 55 | unsigned, UNSIGNED_KEYW | 58 | unsigned, UNSIGNED_KEYW |
| 56 | void, VOID_KEYW | 59 | void, VOID_KEYW |
| 57 | volatile, VOLATILE_KEYW | 60 | volatile, VOLATILE_KEYW |
| 58 | typeof, TYPEOF_KEYW | ||
| 59 | __typeof__, TYPEOF_KEYW | ||
diff --git a/scripts/genksyms/keywords.hash.c_shipped b/scripts/genksyms/keywords.hash.c_shipped index 82062607e8c0..e9452482e198 100644 --- a/scripts/genksyms/keywords.hash.c_shipped +++ b/scripts/genksyms/keywords.hash.c_shipped | |||
| @@ -34,7 +34,7 @@ struct resword; | |||
| 34 | static const struct resword *is_reserved_word(register const char *str, register unsigned int len); | 34 | static const struct resword *is_reserved_word(register const char *str, register unsigned int len); |
| 35 | #line 8 "scripts/genksyms/keywords.gperf" | 35 | #line 8 "scripts/genksyms/keywords.gperf" |
| 36 | struct resword { const char *name; int token; }; | 36 | struct resword { const char *name; int token; }; |
| 37 | /* maximum key range = 64, duplicates = 0 */ | 37 | /* maximum key range = 98, duplicates = 0 */ |
| 38 | 38 | ||
| 39 | #ifdef __GNUC__ | 39 | #ifdef __GNUC__ |
| 40 | __inline | 40 | __inline |
| @@ -48,32 +48,32 @@ is_reserved_hash (register const char *str, register unsigned int len) | |||
| 48 | { | 48 | { |
| 49 | static const unsigned char asso_values[] = | 49 | static const unsigned char asso_values[] = |
| 50 | { | 50 | { |
| 51 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 51 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 52 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 52 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 53 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 53 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 54 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 54 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 55 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 55 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 56 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 56 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 57 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 0, | 57 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 0, |
| 58 | 67, 67, 67, 67, 67, 67, 15, 67, 67, 67, | 58 | 101, 101, 101, 101, 101, 101, 15, 101, 101, 101, |
| 59 | 0, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 59 | 0, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 60 | 67, 67, 67, 67, 67, 0, 67, 0, 67, 5, | 60 | 101, 101, 101, 101, 101, 0, 101, 0, 101, 5, |
| 61 | 25, 20, 15, 30, 67, 15, 67, 67, 10, 0, | 61 | 25, 20, 55, 30, 101, 15, 101, 101, 10, 0, |
| 62 | 10, 40, 20, 67, 10, 5, 0, 10, 15, 67, | 62 | 10, 40, 10, 101, 10, 5, 0, 10, 15, 101, |
| 63 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 63 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 64 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 64 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 65 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 65 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 66 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 66 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 67 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 67 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 68 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 68 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 69 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 69 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 70 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 70 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 71 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 71 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 72 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 72 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 73 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 73 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 74 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 74 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 75 | 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, | 75 | 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, |
| 76 | 67, 67, 67, 67, 67, 67 | 76 | 101, 101, 101, 101, 101, 101 |
| 77 | }; | 77 | }; |
| 78 | return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[0]] + asso_values[(unsigned char)str[len - 1]]; | 78 | return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[0]] + asso_values[(unsigned char)str[len - 1]]; |
| 79 | } | 79 | } |
| @@ -89,17 +89,17 @@ is_reserved_word (register const char *str, register unsigned int len) | |||
| 89 | { | 89 | { |
| 90 | enum | 90 | enum |
| 91 | { | 91 | { |
| 92 | TOTAL_KEYWORDS = 45, | 92 | TOTAL_KEYWORDS = 46, |
| 93 | MIN_WORD_LENGTH = 3, | 93 | MIN_WORD_LENGTH = 3, |
| 94 | MAX_WORD_LENGTH = 24, | 94 | MAX_WORD_LENGTH = 24, |
| 95 | MIN_HASH_VALUE = 3, | 95 | MIN_HASH_VALUE = 3, |
| 96 | MAX_HASH_VALUE = 66 | 96 | MAX_HASH_VALUE = 100 |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | static const struct resword wordlist[] = | 99 | static const struct resword wordlist[] = |
| 100 | { | 100 | { |
| 101 | {""}, {""}, {""}, | 101 | {""}, {""}, {""}, |
| 102 | #line 33 "scripts/genksyms/keywords.gperf" | 102 | #line 35 "scripts/genksyms/keywords.gperf" |
| 103 | {"asm", ASM_KEYW}, | 103 | {"asm", ASM_KEYW}, |
| 104 | {""}, | 104 | {""}, |
| 105 | #line 15 "scripts/genksyms/keywords.gperf" | 105 | #line 15 "scripts/genksyms/keywords.gperf" |
| @@ -108,7 +108,7 @@ is_reserved_word (register const char *str, register unsigned int len) | |||
| 108 | #line 16 "scripts/genksyms/keywords.gperf" | 108 | #line 16 "scripts/genksyms/keywords.gperf" |
| 109 | {"__asm__", ASM_KEYW}, | 109 | {"__asm__", ASM_KEYW}, |
| 110 | {""}, {""}, | 110 | {""}, {""}, |
| 111 | #line 59 "scripts/genksyms/keywords.gperf" | 111 | #line 27 "scripts/genksyms/keywords.gperf" |
| 112 | {"__typeof__", TYPEOF_KEYW}, | 112 | {"__typeof__", TYPEOF_KEYW}, |
| 113 | {""}, | 113 | {""}, |
| 114 | #line 19 "scripts/genksyms/keywords.gperf" | 114 | #line 19 "scripts/genksyms/keywords.gperf" |
| @@ -119,31 +119,31 @@ is_reserved_word (register const char *str, register unsigned int len) | |||
| 119 | {"__const__", CONST_KEYW}, | 119 | {"__const__", CONST_KEYW}, |
| 120 | #line 25 "scripts/genksyms/keywords.gperf" | 120 | #line 25 "scripts/genksyms/keywords.gperf" |
| 121 | {"__signed__", SIGNED_KEYW}, | 121 | {"__signed__", SIGNED_KEYW}, |
| 122 | #line 51 "scripts/genksyms/keywords.gperf" | 122 | #line 53 "scripts/genksyms/keywords.gperf" |
| 123 | {"static", STATIC_KEYW}, | 123 | {"static", STATIC_KEYW}, |
| 124 | {""}, | 124 | {""}, |
| 125 | #line 46 "scripts/genksyms/keywords.gperf" | 125 | #line 48 "scripts/genksyms/keywords.gperf" |
| 126 | {"int", INT_KEYW}, | 126 | {"int", INT_KEYW}, |
| 127 | #line 39 "scripts/genksyms/keywords.gperf" | 127 | #line 41 "scripts/genksyms/keywords.gperf" |
| 128 | {"char", CHAR_KEYW}, | 128 | {"char", CHAR_KEYW}, |
| 129 | #line 40 "scripts/genksyms/keywords.gperf" | 129 | #line 42 "scripts/genksyms/keywords.gperf" |
| 130 | {"const", CONST_KEYW}, | 130 | {"const", CONST_KEYW}, |
| 131 | #line 52 "scripts/genksyms/keywords.gperf" | 131 | #line 54 "scripts/genksyms/keywords.gperf" |
| 132 | {"struct", STRUCT_KEYW}, | 132 | {"struct", STRUCT_KEYW}, |
| 133 | #line 31 "scripts/genksyms/keywords.gperf" | 133 | #line 33 "scripts/genksyms/keywords.gperf" |
| 134 | {"__restrict__", RESTRICT_KEYW}, | 134 | {"__restrict__", RESTRICT_KEYW}, |
| 135 | #line 32 "scripts/genksyms/keywords.gperf" | 135 | #line 34 "scripts/genksyms/keywords.gperf" |
| 136 | {"restrict", RESTRICT_KEYW}, | 136 | {"restrict", RESTRICT_KEYW}, |
| 137 | #line 12 "scripts/genksyms/keywords.gperf" | 137 | #line 12 "scripts/genksyms/keywords.gperf" |
| 138 | {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, | 138 | {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, |
| 139 | #line 23 "scripts/genksyms/keywords.gperf" | 139 | #line 23 "scripts/genksyms/keywords.gperf" |
| 140 | {"__inline__", INLINE_KEYW}, | 140 | {"__inline__", INLINE_KEYW}, |
| 141 | {""}, | 141 | {""}, |
| 142 | #line 27 "scripts/genksyms/keywords.gperf" | 142 | #line 29 "scripts/genksyms/keywords.gperf" |
| 143 | {"__volatile__", VOLATILE_KEYW}, | 143 | {"__volatile__", VOLATILE_KEYW}, |
| 144 | #line 10 "scripts/genksyms/keywords.gperf" | 144 | #line 10 "scripts/genksyms/keywords.gperf" |
| 145 | {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, | 145 | {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, |
| 146 | #line 30 "scripts/genksyms/keywords.gperf" | 146 | #line 32 "scripts/genksyms/keywords.gperf" |
| 147 | {"_restrict", RESTRICT_KEYW}, | 147 | {"_restrict", RESTRICT_KEYW}, |
| 148 | {""}, | 148 | {""}, |
| 149 | #line 17 "scripts/genksyms/keywords.gperf" | 149 | #line 17 "scripts/genksyms/keywords.gperf" |
| @@ -152,56 +152,65 @@ is_reserved_word (register const char *str, register unsigned int len) | |||
| 152 | {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, | 152 | {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, |
| 153 | #line 21 "scripts/genksyms/keywords.gperf" | 153 | #line 21 "scripts/genksyms/keywords.gperf" |
| 154 | {"__extension__", EXTENSION_KEYW}, | 154 | {"__extension__", EXTENSION_KEYW}, |
| 155 | #line 42 "scripts/genksyms/keywords.gperf" | 155 | #line 44 "scripts/genksyms/keywords.gperf" |
| 156 | {"enum", ENUM_KEYW}, | 156 | {"enum", ENUM_KEYW}, |
| 157 | #line 13 "scripts/genksyms/keywords.gperf" | 157 | #line 13 "scripts/genksyms/keywords.gperf" |
| 158 | {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW}, | 158 | {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW}, |
| 159 | #line 43 "scripts/genksyms/keywords.gperf" | 159 | #line 45 "scripts/genksyms/keywords.gperf" |
| 160 | {"extern", EXTERN_KEYW}, | 160 | {"extern", EXTERN_KEYW}, |
| 161 | {""}, | 161 | {""}, |
| 162 | #line 24 "scripts/genksyms/keywords.gperf" | 162 | #line 24 "scripts/genksyms/keywords.gperf" |
| 163 | {"__signed", SIGNED_KEYW}, | 163 | {"__signed", SIGNED_KEYW}, |
| 164 | #line 14 "scripts/genksyms/keywords.gperf" | 164 | #line 14 "scripts/genksyms/keywords.gperf" |
| 165 | {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, | 165 | {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, |
| 166 | #line 54 "scripts/genksyms/keywords.gperf" | 166 | #line 57 "scripts/genksyms/keywords.gperf" |
| 167 | {"union", UNION_KEYW}, | 167 | {"union", UNION_KEYW}, |
| 168 | #line 58 "scripts/genksyms/keywords.gperf" | 168 | {""}, {""}, |
| 169 | {"typeof", TYPEOF_KEYW}, | ||
| 170 | #line 53 "scripts/genksyms/keywords.gperf" | ||
| 171 | {"typedef", TYPEDEF_KEYW}, | ||
| 172 | #line 22 "scripts/genksyms/keywords.gperf" | 169 | #line 22 "scripts/genksyms/keywords.gperf" |
| 173 | {"__inline", INLINE_KEYW}, | 170 | {"__inline", INLINE_KEYW}, |
| 174 | #line 38 "scripts/genksyms/keywords.gperf" | 171 | #line 40 "scripts/genksyms/keywords.gperf" |
| 175 | {"auto", AUTO_KEYW}, | 172 | {"auto", AUTO_KEYW}, |
| 176 | #line 26 "scripts/genksyms/keywords.gperf" | 173 | #line 28 "scripts/genksyms/keywords.gperf" |
| 177 | {"__volatile", VOLATILE_KEYW}, | 174 | {"__volatile", VOLATILE_KEYW}, |
| 178 | {""}, {""}, | 175 | {""}, {""}, |
| 179 | #line 55 "scripts/genksyms/keywords.gperf" | 176 | #line 58 "scripts/genksyms/keywords.gperf" |
| 180 | {"unsigned", UNSIGNED_KEYW}, | 177 | {"unsigned", UNSIGNED_KEYW}, |
| 181 | {""}, | 178 | {""}, |
| 182 | #line 49 "scripts/genksyms/keywords.gperf" | 179 | #line 51 "scripts/genksyms/keywords.gperf" |
| 183 | {"short", SHORT_KEYW}, | 180 | {"short", SHORT_KEYW}, |
| 184 | #line 45 "scripts/genksyms/keywords.gperf" | 181 | #line 47 "scripts/genksyms/keywords.gperf" |
| 185 | {"inline", INLINE_KEYW}, | 182 | {"inline", INLINE_KEYW}, |
| 186 | {""}, | 183 | {""}, |
| 187 | #line 57 "scripts/genksyms/keywords.gperf" | 184 | #line 60 "scripts/genksyms/keywords.gperf" |
| 188 | {"volatile", VOLATILE_KEYW}, | 185 | {"volatile", VOLATILE_KEYW}, |
| 189 | #line 47 "scripts/genksyms/keywords.gperf" | 186 | #line 49 "scripts/genksyms/keywords.gperf" |
| 190 | {"long", LONG_KEYW}, | 187 | {"long", LONG_KEYW}, |
| 191 | #line 29 "scripts/genksyms/keywords.gperf" | 188 | #line 31 "scripts/genksyms/keywords.gperf" |
| 192 | {"_Bool", BOOL_KEYW}, | 189 | {"_Bool", BOOL_KEYW}, |
| 193 | {""}, {""}, | 190 | {""}, {""}, |
| 194 | #line 48 "scripts/genksyms/keywords.gperf" | 191 | #line 50 "scripts/genksyms/keywords.gperf" |
| 195 | {"register", REGISTER_KEYW}, | 192 | {"register", REGISTER_KEYW}, |
| 196 | #line 56 "scripts/genksyms/keywords.gperf" | 193 | #line 59 "scripts/genksyms/keywords.gperf" |
| 197 | {"void", VOID_KEYW}, | 194 | {"void", VOID_KEYW}, |
| 198 | #line 44 "scripts/genksyms/keywords.gperf" | 195 | {""}, |
| 199 | {"float", FLOAT_KEYW}, | 196 | #line 43 "scripts/genksyms/keywords.gperf" |
| 200 | #line 41 "scripts/genksyms/keywords.gperf" | ||
| 201 | {"double", DOUBLE_KEYW}, | 197 | {"double", DOUBLE_KEYW}, |
| 198 | {""}, | ||
| 199 | #line 26 "scripts/genksyms/keywords.gperf" | ||
| 200 | {"__typeof", TYPEOF_KEYW}, | ||
| 201 | {""}, {""}, | ||
| 202 | #line 52 "scripts/genksyms/keywords.gperf" | ||
| 203 | {"signed", SIGNED_KEYW}, | ||
| 202 | {""}, {""}, {""}, {""}, | 204 | {""}, {""}, {""}, {""}, |
| 203 | #line 50 "scripts/genksyms/keywords.gperf" | 205 | #line 56 "scripts/genksyms/keywords.gperf" |
| 204 | {"signed", SIGNED_KEYW} | 206 | {"typeof", TYPEOF_KEYW}, |
| 207 | #line 55 "scripts/genksyms/keywords.gperf" | ||
| 208 | {"typedef", TYPEDEF_KEYW}, | ||
| 209 | {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, | ||
| 210 | {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, | ||
| 211 | {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, | ||
| 212 | #line 46 "scripts/genksyms/keywords.gperf" | ||
| 213 | {"float", FLOAT_KEYW} | ||
| 205 | }; | 214 | }; |
| 206 | 215 | ||
| 207 | if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) | 216 | if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) |
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l index f770071719cb..e583565f2011 100644 --- a/scripts/genksyms/lex.l +++ b/scripts/genksyms/lex.l | |||
| @@ -129,8 +129,9 @@ int | |||
| 129 | yylex(void) | 129 | yylex(void) |
| 130 | { | 130 | { |
| 131 | static enum { | 131 | static enum { |
| 132 | ST_NOTSTARTED, ST_NORMAL, ST_ATTRIBUTE, ST_ASM, ST_BRACKET, ST_BRACE, | 132 | ST_NOTSTARTED, ST_NORMAL, ST_ATTRIBUTE, ST_ASM, ST_TYPEOF, ST_TYPEOF_1, |
| 133 | ST_EXPRESSION, ST_TABLE_1, ST_TABLE_2, ST_TABLE_3, ST_TABLE_4, | 133 | ST_BRACKET, ST_BRACE, ST_EXPRESSION, |
| 134 | ST_TABLE_1, ST_TABLE_2, ST_TABLE_3, ST_TABLE_4, | ||
| 134 | ST_TABLE_5, ST_TABLE_6 | 135 | ST_TABLE_5, ST_TABLE_6 |
| 135 | } lexstate = ST_NOTSTARTED; | 136 | } lexstate = ST_NOTSTARTED; |
| 136 | 137 | ||
| @@ -198,6 +199,10 @@ repeat: | |||
| 198 | lexstate = ST_ASM; | 199 | lexstate = ST_ASM; |
| 199 | count = 0; | 200 | count = 0; |
| 200 | goto repeat; | 201 | goto repeat; |
| 202 | case TYPEOF_KEYW: | ||
| 203 | lexstate = ST_TYPEOF; | ||
| 204 | count = 0; | ||
| 205 | goto repeat; | ||
| 201 | 206 | ||
| 202 | case STRUCT_KEYW: | 207 | case STRUCT_KEYW: |
| 203 | case UNION_KEYW: | 208 | case UNION_KEYW: |
| @@ -284,6 +289,48 @@ repeat: | |||
| 284 | } | 289 | } |
| 285 | break; | 290 | break; |
| 286 | 291 | ||
| 292 | case ST_TYPEOF: | ||
| 293 | switch (token) | ||
| 294 | { | ||
| 295 | case '(': | ||
| 296 | if ( ++count == 1 ) | ||
| 297 | lexstate = ST_TYPEOF_1; | ||
| 298 | else | ||
| 299 | APP; | ||
| 300 | goto repeat; | ||
| 301 | case ')': | ||
| 302 | APP; | ||
| 303 | if (--count == 0) | ||
| 304 | { | ||
| 305 | lexstate = ST_NORMAL; | ||
| 306 | token = TYPEOF_PHRASE; | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | goto repeat; | ||
| 310 | default: | ||
| 311 | APP; | ||
| 312 | goto repeat; | ||
| 313 | } | ||
| 314 | break; | ||
| 315 | |||
| 316 | case ST_TYPEOF_1: | ||
| 317 | if (token == IDENT) | ||
| 318 | { | ||
| 319 | if (is_reserved_word(yytext, yyleng) | ||
| 320 | || find_symbol(yytext, SYM_TYPEDEF, 1)) | ||
| 321 | { | ||
| 322 | yyless(0); | ||
| 323 | unput('('); | ||
| 324 | lexstate = ST_NORMAL; | ||
| 325 | token = TYPEOF_KEYW; | ||
| 326 | break; | ||
| 327 | } | ||
| 328 | _APP("(", 1); | ||
| 329 | } | ||
| 330 | APP; | ||
| 331 | lexstate = ST_TYPEOF; | ||
| 332 | goto repeat; | ||
| 333 | |||
| 287 | case ST_BRACKET: | 334 | case ST_BRACKET: |
| 288 | APP; | 335 | APP; |
| 289 | switch (token) | 336 | switch (token) |
diff --git a/scripts/genksyms/lex.lex.c_shipped b/scripts/genksyms/lex.lex.c_shipped index 0bf4157e6161..f82740a69b85 100644 --- a/scripts/genksyms/lex.lex.c_shipped +++ b/scripts/genksyms/lex.lex.c_shipped | |||
| @@ -1938,8 +1938,9 @@ int | |||
| 1938 | yylex(void) | 1938 | yylex(void) |
| 1939 | { | 1939 | { |
| 1940 | static enum { | 1940 | static enum { |
| 1941 | ST_NOTSTARTED, ST_NORMAL, ST_ATTRIBUTE, ST_ASM, ST_BRACKET, ST_BRACE, | 1941 | ST_NOTSTARTED, ST_NORMAL, ST_ATTRIBUTE, ST_ASM, ST_TYPEOF, ST_TYPEOF_1, |
| 1942 | ST_EXPRESSION, ST_TABLE_1, ST_TABLE_2, ST_TABLE_3, ST_TABLE_4, | 1942 | ST_BRACKET, ST_BRACE, ST_EXPRESSION, |
| 1943 | ST_TABLE_1, ST_TABLE_2, ST_TABLE_3, ST_TABLE_4, | ||
| 1943 | ST_TABLE_5, ST_TABLE_6 | 1944 | ST_TABLE_5, ST_TABLE_6 |
| 1944 | } lexstate = ST_NOTSTARTED; | 1945 | } lexstate = ST_NOTSTARTED; |
| 1945 | 1946 | ||
| @@ -2007,6 +2008,10 @@ repeat: | |||
| 2007 | lexstate = ST_ASM; | 2008 | lexstate = ST_ASM; |
| 2008 | count = 0; | 2009 | count = 0; |
| 2009 | goto repeat; | 2010 | goto repeat; |
| 2011 | case TYPEOF_KEYW: | ||
| 2012 | lexstate = ST_TYPEOF; | ||
| 2013 | count = 0; | ||
| 2014 | goto repeat; | ||
| 2010 | 2015 | ||
| 2011 | case STRUCT_KEYW: | 2016 | case STRUCT_KEYW: |
| 2012 | case UNION_KEYW: | 2017 | case UNION_KEYW: |
| @@ -2093,6 +2098,48 @@ repeat: | |||
| 2093 | } | 2098 | } |
| 2094 | break; | 2099 | break; |
| 2095 | 2100 | ||
| 2101 | case ST_TYPEOF: | ||
| 2102 | switch (token) | ||
| 2103 | { | ||
| 2104 | case '(': | ||
| 2105 | if ( ++count == 1 ) | ||
| 2106 | lexstate = ST_TYPEOF_1; | ||
| 2107 | else | ||
| 2108 | APP; | ||
| 2109 | goto repeat; | ||
| 2110 | case ')': | ||
| 2111 | APP; | ||
| 2112 | if (--count == 0) | ||
| 2113 | { | ||
| 2114 | lexstate = ST_NORMAL; | ||
| 2115 | token = TYPEOF_PHRASE; | ||
| 2116 | break; | ||
| 2117 | } | ||
| 2118 | goto repeat; | ||
| 2119 | default: | ||
| 2120 | APP; | ||
| 2121 | goto repeat; | ||
| 2122 | } | ||
| 2123 | break; | ||
| 2124 | |||
| 2125 | case ST_TYPEOF_1: | ||
| 2126 | if (token == IDENT) | ||
| 2127 | { | ||
| 2128 | if (is_reserved_word(yytext, yyleng) | ||
| 2129 | || find_symbol(yytext, SYM_TYPEDEF, 1)) | ||
| 2130 | { | ||
| 2131 | yyless(0); | ||
| 2132 | unput('('); | ||
| 2133 | lexstate = ST_NORMAL; | ||
| 2134 | token = TYPEOF_KEYW; | ||
| 2135 | break; | ||
| 2136 | } | ||
| 2137 | _APP("(", 1); | ||
| 2138 | } | ||
| 2139 | APP; | ||
| 2140 | lexstate = ST_TYPEOF; | ||
| 2141 | goto repeat; | ||
| 2142 | |||
| 2096 | case ST_BRACKET: | 2143 | case ST_BRACKET: |
| 2097 | APP; | 2144 | APP; |
| 2098 | switch (token) | 2145 | switch (token) |
diff --git a/scripts/genksyms/parse.tab.c_shipped b/scripts/genksyms/parse.tab.c_shipped index ece53c79bb59..c9f0f0ce82ff 100644 --- a/scripts/genksyms/parse.tab.c_shipped +++ b/scripts/genksyms/parse.tab.c_shipped | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | /* A Bison parser, made by GNU Bison 2.5. */ | 1 | /* A Bison parser, made by GNU Bison 2.5.1. */ |
| 2 | 2 | ||
| 3 | /* Bison implementation for Yacc-like parsers in C | 3 | /* Bison implementation for Yacc-like parsers in C |
| 4 | 4 | ||
| 5 | Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc. | 5 | Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. |
| 6 | 6 | ||
| 7 | This program is free software: you can redistribute it and/or modify | 7 | This program is free software: you can redistribute it and/or modify |
| 8 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
| @@ -44,7 +44,7 @@ | |||
| 44 | #define YYBISON 1 | 44 | #define YYBISON 1 |
| 45 | 45 | ||
| 46 | /* Bison version. */ | 46 | /* Bison version. */ |
| 47 | #define YYBISON_VERSION "2.5" | 47 | #define YYBISON_VERSION "2.5.1" |
| 48 | 48 | ||
| 49 | /* Skeleton name. */ | 49 | /* Skeleton name. */ |
| 50 | #define YYSKELETON_NAME "yacc.c" | 50 | #define YYSKELETON_NAME "yacc.c" |
| @@ -117,6 +117,14 @@ static void record_compound(struct string_list **keyw, | |||
| 117 | 117 | ||
| 118 | 118 | ||
| 119 | 119 | ||
| 120 | # ifndef YY_NULL | ||
| 121 | # if defined __cplusplus && 201103L <= __cplusplus | ||
| 122 | # define YY_NULL nullptr | ||
| 123 | # else | ||
| 124 | # define YY_NULL 0 | ||
| 125 | # endif | ||
| 126 | # endif | ||
| 127 | |||
| 120 | /* Enabling traces. */ | 128 | /* Enabling traces. */ |
| 121 | #ifndef YYDEBUG | 129 | #ifndef YYDEBUG |
| 122 | # define YYDEBUG 1 | 130 | # define YYDEBUG 1 |
| @@ -171,18 +179,19 @@ static void record_compound(struct string_list **keyw, | |||
| 171 | EXPORT_SYMBOL_KEYW = 284, | 179 | EXPORT_SYMBOL_KEYW = 284, |
| 172 | ASM_PHRASE = 285, | 180 | ASM_PHRASE = 285, |
| 173 | ATTRIBUTE_PHRASE = 286, | 181 | ATTRIBUTE_PHRASE = 286, |
| 174 | BRACE_PHRASE = 287, | 182 | TYPEOF_PHRASE = 287, |
| 175 | BRACKET_PHRASE = 288, | 183 | BRACE_PHRASE = 288, |
| 176 | EXPRESSION_PHRASE = 289, | 184 | BRACKET_PHRASE = 289, |
| 177 | CHAR = 290, | 185 | EXPRESSION_PHRASE = 290, |
| 178 | DOTS = 291, | 186 | CHAR = 291, |
| 179 | IDENT = 292, | 187 | DOTS = 292, |
| 180 | INT = 293, | 188 | IDENT = 293, |
| 181 | REAL = 294, | 189 | INT = 294, |
| 182 | STRING = 295, | 190 | REAL = 295, |
| 183 | TYPE = 296, | 191 | STRING = 296, |
| 184 | OTHER = 297, | 192 | TYPE = 297, |
| 185 | FILENAME = 298 | 193 | OTHER = 298, |
| 194 | FILENAME = 299 | ||
| 186 | }; | 195 | }; |
| 187 | #endif | 196 | #endif |
| 188 | 197 | ||
| @@ -304,6 +313,7 @@ YYID (yyi) | |||
| 304 | # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ | 313 | # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ |
| 305 | || defined __cplusplus || defined _MSC_VER) | 314 | || defined __cplusplus || defined _MSC_VER) |
| 306 | # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ | 315 | # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ |
| 316 | /* Use EXIT_SUCCESS as a witness for stdlib.h. */ | ||
| 307 | # ifndef EXIT_SUCCESS | 317 | # ifndef EXIT_SUCCESS |
| 308 | # define EXIT_SUCCESS 0 | 318 | # define EXIT_SUCCESS 0 |
| 309 | # endif | 319 | # endif |
| @@ -395,20 +405,20 @@ union yyalloc | |||
| 395 | #endif | 405 | #endif |
| 396 | 406 | ||
| 397 | #if defined YYCOPY_NEEDED && YYCOPY_NEEDED | 407 | #if defined YYCOPY_NEEDED && YYCOPY_NEEDED |
| 398 | /* Copy COUNT objects from FROM to TO. The source and destination do | 408 | /* Copy COUNT objects from SRC to DST. The source and destination do |
| 399 | not overlap. */ | 409 | not overlap. */ |
| 400 | # ifndef YYCOPY | 410 | # ifndef YYCOPY |
| 401 | # if defined __GNUC__ && 1 < __GNUC__ | 411 | # if defined __GNUC__ && 1 < __GNUC__ |
| 402 | # define YYCOPY(To, From, Count) \ | 412 | # define YYCOPY(Dst, Src, Count) \ |
| 403 | __builtin_memcpy (To, From, (Count) * sizeof (*(From))) | 413 | __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) |
| 404 | # else | 414 | # else |
| 405 | # define YYCOPY(To, From, Count) \ | 415 | # define YYCOPY(Dst, Src, Count) \ |
| 406 | do \ | 416 | do \ |
| 407 | { \ | 417 | { \ |
| 408 | YYSIZE_T yyi; \ | 418 | YYSIZE_T yyi; \ |
| 409 | for (yyi = 0; yyi < (Count); yyi++) \ | 419 | for (yyi = 0; yyi < (Count); yyi++) \ |
| 410 | (To)[yyi] = (From)[yyi]; \ | 420 | (Dst)[yyi] = (Src)[yyi]; \ |
| 411 | } \ | 421 | } \ |
| 412 | while (YYID (0)) | 422 | while (YYID (0)) |
| 413 | # endif | 423 | # endif |
| 414 | # endif | 424 | # endif |
| @@ -417,20 +427,20 @@ union yyalloc | |||
| 417 | /* YYFINAL -- State number of the termination state. */ | 427 | /* YYFINAL -- State number of the termination state. */ |
| 418 | #define YYFINAL 4 | 428 | #define YYFINAL 4 |
| 419 | /* YYLAST -- Last index in YYTABLE. */ | 429 | /* YYLAST -- Last index in YYTABLE. */ |
| 420 | #define YYLAST 532 | 430 | #define YYLAST 514 |
| 421 | 431 | ||
| 422 | /* YYNTOKENS -- Number of terminals. */ | 432 | /* YYNTOKENS -- Number of terminals. */ |
| 423 | #define YYNTOKENS 53 | 433 | #define YYNTOKENS 54 |
| 424 | /* YYNNTS -- Number of nonterminals. */ | 434 | /* YYNNTS -- Number of nonterminals. */ |
| 425 | #define YYNNTS 49 | 435 | #define YYNNTS 49 |
| 426 | /* YYNRULES -- Number of rules. */ | 436 | /* YYNRULES -- Number of rules. */ |
| 427 | #define YYNRULES 132 | 437 | #define YYNRULES 132 |
| 428 | /* YYNRULES -- Number of states. */ | 438 | /* YYNRULES -- Number of states. */ |
| 429 | #define YYNSTATES 188 | 439 | #define YYNSTATES 187 |
| 430 | 440 | ||
| 431 | /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ | 441 | /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ |
| 432 | #define YYUNDEFTOK 2 | 442 | #define YYUNDEFTOK 2 |
| 433 | #define YYMAXUTOK 298 | 443 | #define YYMAXUTOK 299 |
| 434 | 444 | ||
| 435 | #define YYTRANSLATE(YYX) \ | 445 | #define YYTRANSLATE(YYX) \ |
| 436 | ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) | 446 | ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) |
| @@ -442,15 +452,15 @@ static const yytype_uint8 yytranslate[] = | |||
| 442 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 452 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 443 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 453 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 444 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 454 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 445 | 47, 49, 48, 2, 46, 2, 2, 2, 2, 2, | 455 | 48, 49, 50, 2, 47, 2, 2, 2, 2, 2, |
| 446 | 2, 2, 2, 2, 2, 2, 2, 2, 52, 44, | 456 | 2, 2, 2, 2, 2, 2, 2, 2, 53, 45, |
| 447 | 2, 50, 2, 2, 2, 2, 2, 2, 2, 2, | 457 | 2, 51, 2, 2, 2, 2, 2, 2, 2, 2, |
| 448 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 458 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 449 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 459 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 450 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 460 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 451 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 461 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 452 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 462 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 453 | 2, 2, 2, 51, 2, 45, 2, 2, 2, 2, | 463 | 2, 2, 2, 52, 2, 46, 2, 2, 2, 2, |
| 454 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 464 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 455 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 465 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 456 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | 466 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| @@ -467,7 +477,7 @@ static const yytype_uint8 yytranslate[] = | |||
| 467 | 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, | 477 | 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, |
| 468 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, | 478 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, |
| 469 | 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, | 479 | 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, |
| 470 | 35, 36, 37, 38, 39, 40, 41, 42, 43 | 480 | 35, 36, 37, 38, 39, 40, 41, 42, 43, 44 |
| 471 | }; | 481 | }; |
| 472 | 482 | ||
| 473 | #if YYDEBUG | 483 | #if YYDEBUG |
| @@ -478,78 +488,77 @@ static const yytype_uint16 yyprhs[] = | |||
| 478 | 0, 0, 3, 5, 8, 9, 12, 13, 18, 19, | 488 | 0, 0, 3, 5, 8, 9, 12, 13, 18, 19, |
| 479 | 23, 25, 27, 29, 31, 34, 37, 41, 42, 44, | 489 | 23, 25, 27, 29, 31, 34, 37, 41, 42, 44, |
| 480 | 46, 50, 55, 56, 58, 60, 63, 65, 67, 69, | 490 | 46, 50, 55, 56, 58, 60, 63, 65, 67, 69, |
| 481 | 71, 73, 75, 77, 79, 81, 87, 92, 95, 98, | 491 | 71, 73, 75, 77, 79, 81, 86, 88, 91, 94, |
| 482 | 101, 105, 109, 113, 116, 119, 122, 124, 126, 128, | 492 | 97, 101, 105, 109, 112, 115, 118, 120, 122, 124, |
| 483 | 130, 132, 134, 136, 138, 140, 142, 144, 147, 148, | 493 | 126, 128, 130, 132, 134, 136, 138, 140, 143, 144, |
| 484 | 150, 152, 155, 157, 159, 161, 163, 166, 168, 170, | 494 | 146, 148, 151, 153, 155, 157, 159, 162, 164, 166, |
| 485 | 175, 180, 183, 187, 191, 194, 196, 198, 200, 205, | 495 | 171, 176, 179, 183, 187, 190, 192, 194, 196, 201, |
| 486 | 210, 213, 217, 221, 224, 226, 230, 231, 233, 235, | 496 | 206, 209, 213, 217, 220, 222, 226, 227, 229, 231, |
| 487 | 239, 242, 245, 247, 248, 250, 252, 257, 262, 265, | 497 | 235, 238, 241, 243, 244, 246, 248, 253, 258, 261, |
| 488 | 269, 273, 277, 278, 280, 283, 287, 291, 292, 294, | 498 | 265, 269, 273, 274, 276, 279, 283, 287, 288, 290, |
| 489 | 296, 299, 303, 306, 307, 309, 311, 315, 318, 321, | 499 | 292, 295, 299, 302, 303, 305, 307, 311, 314, 317, |
| 490 | 323, 326, 327, 330, 334, 339, 341, 345, 347, 351, | 500 | 319, 322, 323, 326, 330, 335, 337, 341, 343, 347, |
| 491 | 354, 355, 357 | 501 | 350, 351, 353 |
| 492 | }; | 502 | }; |
| 493 | 503 | ||
| 494 | /* YYRHS -- A `-1'-separated list of the rules' RHS. */ | 504 | /* YYRHS -- A `-1'-separated list of the rules' RHS. */ |
| 495 | static const yytype_int8 yyrhs[] = | 505 | static const yytype_int8 yyrhs[] = |
| 496 | { | 506 | { |
| 497 | 54, 0, -1, 55, -1, 54, 55, -1, -1, 56, | 507 | 55, 0, -1, 56, -1, 55, 56, -1, -1, 57, |
| 498 | 57, -1, -1, 12, 23, 58, 60, -1, -1, 23, | 508 | 58, -1, -1, 12, 23, 59, 61, -1, -1, 23, |
| 499 | 59, 60, -1, 60, -1, 84, -1, 99, -1, 101, | 509 | 60, 61, -1, 61, -1, 85, -1, 100, -1, 102, |
| 500 | -1, 1, 44, -1, 1, 45, -1, 64, 61, 44, | 510 | -1, 1, 45, -1, 1, 46, -1, 65, 62, 45, |
| 501 | -1, -1, 62, -1, 63, -1, 62, 46, 63, -1, | 511 | -1, -1, 63, -1, 64, -1, 63, 47, 64, -1, |
| 502 | 74, 100, 95, 85, -1, -1, 65, -1, 66, -1, | 512 | 75, 101, 96, 86, -1, -1, 66, -1, 67, -1, |
| 503 | 65, 66, -1, 67, -1, 68, -1, 5, -1, 17, | 513 | 66, 67, -1, 68, -1, 69, -1, 5, -1, 17, |
| 504 | -1, 21, -1, 11, -1, 14, -1, 69, -1, 73, | 514 | -1, 21, -1, 11, -1, 14, -1, 70, -1, 74, |
| 505 | -1, 28, 47, 65, 48, 49, -1, 28, 47, 65, | 515 | -1, 28, 48, 82, 49, -1, 32, -1, 22, 38, |
| 506 | 49, -1, 22, 37, -1, 24, 37, -1, 10, 37, | 516 | -1, 24, 38, -1, 10, 38, -1, 22, 38, 88, |
| 507 | -1, 22, 37, 87, -1, 24, 37, 87, -1, 10, | 517 | -1, 24, 38, 88, -1, 10, 38, 97, -1, 10, |
| 508 | 37, 96, -1, 10, 96, -1, 22, 87, -1, 24, | 518 | 97, -1, 22, 88, -1, 24, 88, -1, 7, -1, |
| 509 | 87, -1, 7, -1, 19, -1, 15, -1, 16, -1, | 519 | 19, -1, 15, -1, 16, -1, 20, -1, 25, -1, |
| 510 | 20, -1, 25, -1, 13, -1, 9, -1, 26, -1, | 520 | 13, -1, 9, -1, 26, -1, 6, -1, 42, -1, |
| 511 | 6, -1, 41, -1, 48, 71, -1, -1, 72, -1, | 521 | 50, 72, -1, -1, 73, -1, 74, -1, 73, 74, |
| 512 | 73, -1, 72, 73, -1, 8, -1, 27, -1, 31, | 522 | -1, 8, -1, 27, -1, 31, -1, 18, -1, 71, |
| 513 | -1, 18, -1, 70, 74, -1, 75, -1, 37, -1, | 523 | 75, -1, 76, -1, 38, -1, 76, 48, 79, 49, |
| 514 | 75, 47, 78, 49, -1, 75, 47, 1, 49, -1, | 524 | -1, 76, 48, 1, 49, -1, 76, 34, -1, 48, |
| 515 | 75, 33, -1, 47, 74, 49, -1, 47, 1, 49, | 525 | 75, 49, -1, 48, 1, 49, -1, 71, 77, -1, |
| 516 | -1, 70, 76, -1, 77, -1, 37, -1, 41, -1, | 526 | 78, -1, 38, -1, 42, -1, 78, 48, 79, 49, |
| 517 | 77, 47, 78, 49, -1, 77, 47, 1, 49, -1, | 527 | -1, 78, 48, 1, 49, -1, 78, 34, -1, 48, |
| 518 | 77, 33, -1, 47, 76, 49, -1, 47, 1, 49, | 528 | 77, 49, -1, 48, 1, 49, -1, 80, 37, -1, |
| 519 | -1, 79, 36, -1, 79, -1, 80, 46, 36, -1, | 529 | 80, -1, 81, 47, 37, -1, -1, 81, -1, 82, |
| 520 | -1, 80, -1, 81, -1, 80, 46, 81, -1, 65, | 530 | -1, 81, 47, 82, -1, 66, 83, -1, 71, 83, |
| 521 | 82, -1, 70, 82, -1, 83, -1, -1, 37, -1, | 531 | -1, 84, -1, -1, 38, -1, 42, -1, 84, 48, |
| 522 | 41, -1, 83, 47, 78, 49, -1, 83, 47, 1, | 532 | 79, 49, -1, 84, 48, 1, 49, -1, 84, 34, |
| 523 | 49, -1, 83, 33, -1, 47, 82, 49, -1, 47, | 533 | -1, 48, 83, 49, -1, 48, 1, 49, -1, 65, |
| 524 | 1, 49, -1, 64, 74, 32, -1, -1, 86, -1, | 534 | 75, 33, -1, -1, 87, -1, 51, 35, -1, 52, |
| 525 | 50, 34, -1, 51, 88, 45, -1, 51, 1, 45, | 535 | 89, 46, -1, 52, 1, 46, -1, -1, 90, -1, |
| 526 | -1, -1, 89, -1, 90, -1, 89, 90, -1, 64, | 536 | 91, -1, 90, 91, -1, 65, 92, 45, -1, 1, |
| 527 | 91, 44, -1, 1, 44, -1, -1, 92, -1, 93, | 537 | 45, -1, -1, 93, -1, 94, -1, 93, 47, 94, |
| 528 | -1, 92, 46, 93, -1, 76, 95, -1, 37, 94, | 538 | -1, 77, 96, -1, 38, 95, -1, 95, -1, 53, |
| 529 | -1, 94, -1, 52, 34, -1, -1, 95, 31, -1, | 539 | 35, -1, -1, 96, 31, -1, 52, 98, 46, -1, |
| 530 | 51, 97, 45, -1, 51, 97, 46, 45, -1, 98, | 540 | 52, 98, 47, 46, -1, 99, -1, 98, 47, 99, |
| 531 | -1, 97, 46, 98, -1, 37, -1, 37, 50, 34, | 541 | -1, 38, -1, 38, 51, 35, -1, 30, 45, -1, |
| 532 | -1, 30, 44, -1, -1, 30, -1, 29, 47, 37, | 542 | -1, 30, -1, 29, 48, 38, 49, 45, -1 |
| 533 | 49, 44, -1 | ||
| 534 | }; | 543 | }; |
| 535 | 544 | ||
| 536 | /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ | 545 | /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ |
| 537 | static const yytype_uint16 yyrline[] = | 546 | static const yytype_uint16 yyrline[] = |
| 538 | { | 547 | { |
| 539 | 0, 123, 123, 124, 128, 128, 134, 134, 136, 136, | 548 | 0, 124, 124, 125, 129, 129, 135, 135, 137, 137, |
| 540 | 138, 139, 140, 141, 142, 143, 147, 161, 162, 166, | 549 | 139, 140, 141, 142, 143, 144, 148, 162, 163, 167, |
| 541 | 174, 187, 193, 194, 198, 199, 203, 209, 213, 214, | 550 | 175, 188, 194, 195, 199, 200, 204, 210, 214, 215, |
| 542 | 215, 216, 217, 221, 222, 223, 224, 228, 230, 232, | 551 | 216, 217, 218, 222, 223, 224, 225, 229, 231, 233, |
| 543 | 236, 238, 240, 245, 248, 249, 253, 254, 255, 256, | 552 | 237, 239, 241, 246, 249, 250, 254, 255, 256, 257, |
| 544 | 257, 258, 259, 260, 261, 262, 263, 267, 272, 273, | 553 | 258, 259, 260, 261, 262, 263, 264, 268, 273, 274, |
| 545 | 277, 278, 282, 282, 282, 283, 291, 292, 296, 305, | 554 | 278, 279, 283, 283, 283, 284, 292, 293, 297, 306, |
| 546 | 307, 309, 311, 313, 320, 321, 325, 326, 327, 329, | 555 | 308, 310, 312, 314, 321, 322, 326, 327, 328, 330, |
| 547 | 331, 333, 335, 340, 341, 342, 346, 347, 351, 352, | 556 | 332, 334, 336, 341, 342, 343, 347, 348, 352, 353, |
| 548 | 357, 362, 364, 368, 369, 377, 381, 383, 385, 387, | 557 | 358, 363, 365, 369, 370, 378, 382, 384, 386, 388, |
| 549 | 389, 394, 403, 404, 409, 414, 415, 419, 420, 424, | 558 | 390, 395, 404, 405, 410, 415, 416, 420, 421, 425, |
| 550 | 425, 429, 431, 436, 437, 441, 442, 446, 447, 448, | 559 | 426, 430, 432, 437, 438, 442, 443, 447, 448, 449, |
| 551 | 452, 456, 457, 461, 462, 466, 467, 470, 475, 483, | 560 | 453, 457, 458, 462, 463, 467, 468, 471, 476, 484, |
| 552 | 487, 488, 492 | 561 | 488, 489, 493 |
| 553 | }; | 562 | }; |
| 554 | #endif | 563 | #endif |
| 555 | 564 | ||
| @@ -565,9 +574,9 @@ static const char *const yytname[] = | |||
| 565 | "SHORT_KEYW", "SIGNED_KEYW", "STATIC_KEYW", "STRUCT_KEYW", | 574 | "SHORT_KEYW", "SIGNED_KEYW", "STATIC_KEYW", "STRUCT_KEYW", |
| 566 | "TYPEDEF_KEYW", "UNION_KEYW", "UNSIGNED_KEYW", "VOID_KEYW", | 575 | "TYPEDEF_KEYW", "UNION_KEYW", "UNSIGNED_KEYW", "VOID_KEYW", |
| 567 | "VOLATILE_KEYW", "TYPEOF_KEYW", "EXPORT_SYMBOL_KEYW", "ASM_PHRASE", | 576 | "VOLATILE_KEYW", "TYPEOF_KEYW", "EXPORT_SYMBOL_KEYW", "ASM_PHRASE", |
| 568 | "ATTRIBUTE_PHRASE", "BRACE_PHRASE", "BRACKET_PHRASE", | 577 | "ATTRIBUTE_PHRASE", "TYPEOF_PHRASE", "BRACE_PHRASE", "BRACKET_PHRASE", |
| 569 | "EXPRESSION_PHRASE", "CHAR", "DOTS", "IDENT", "INT", "REAL", "STRING", | 578 | "EXPRESSION_PHRASE", "CHAR", "DOTS", "IDENT", "INT", "REAL", "STRING", |
| 570 | "TYPE", "OTHER", "FILENAME", "';'", "'}'", "','", "'('", "'*'", "')'", | 579 | "TYPE", "OTHER", "FILENAME", "';'", "'}'", "','", "'('", "')'", "'*'", |
| 571 | "'='", "'{'", "':'", "$accept", "declaration_seq", "declaration", "$@1", | 580 | "'='", "'{'", "':'", "$accept", "declaration_seq", "declaration", "$@1", |
| 572 | "declaration1", "$@2", "$@3", "simple_declaration", | 581 | "declaration1", "$@2", "$@3", "simple_declaration", |
| 573 | "init_declarator_list_opt", "init_declarator_list", "init_declarator", | 582 | "init_declarator_list_opt", "init_declarator_list", "init_declarator", |
| @@ -584,7 +593,7 @@ static const char *const yytname[] = | |||
| 584 | "member_declarator_list_opt", "member_declarator_list", | 593 | "member_declarator_list_opt", "member_declarator_list", |
| 585 | "member_declarator", "member_bitfield_declarator", "attribute_opt", | 594 | "member_declarator", "member_bitfield_declarator", "attribute_opt", |
| 586 | "enum_body", "enumerator_list", "enumerator", "asm_definition", | 595 | "enum_body", "enumerator_list", "enumerator", "asm_definition", |
| 587 | "asm_phrase_opt", "export_definition", 0 | 596 | "asm_phrase_opt", "export_definition", YY_NULL |
| 588 | }; | 597 | }; |
| 589 | #endif | 598 | #endif |
| 590 | 599 | ||
| @@ -597,28 +606,28 @@ static const yytype_uint16 yytoknum[] = | |||
| 597 | 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, | 606 | 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, |
| 598 | 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, | 607 | 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, |
| 599 | 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, | 608 | 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, |
| 600 | 295, 296, 297, 298, 59, 125, 44, 40, 42, 41, | 609 | 295, 296, 297, 298, 299, 59, 125, 44, 40, 41, |
| 601 | 61, 123, 58 | 610 | 42, 61, 123, 58 |
| 602 | }; | 611 | }; |
| 603 | # endif | 612 | # endif |
| 604 | 613 | ||
| 605 | /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ | 614 | /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ |
| 606 | static const yytype_uint8 yyr1[] = | 615 | static const yytype_uint8 yyr1[] = |
| 607 | { | 616 | { |
| 608 | 0, 53, 54, 54, 56, 55, 58, 57, 59, 57, | 617 | 0, 54, 55, 55, 57, 56, 59, 58, 60, 58, |
| 609 | 57, 57, 57, 57, 57, 57, 60, 61, 61, 62, | 618 | 58, 58, 58, 58, 58, 58, 61, 62, 62, 63, |
| 610 | 62, 63, 64, 64, 65, 65, 66, 66, 67, 67, | 619 | 63, 64, 65, 65, 66, 66, 67, 67, 68, 68, |
| 611 | 67, 67, 67, 68, 68, 68, 68, 68, 68, 68, | 620 | 68, 68, 68, 69, 69, 69, 69, 69, 69, 69, |
| 612 | 68, 68, 68, 68, 68, 68, 69, 69, 69, 69, | 621 | 69, 69, 69, 69, 69, 69, 70, 70, 70, 70, |
| 613 | 69, 69, 69, 69, 69, 69, 69, 70, 71, 71, | 622 | 70, 70, 70, 70, 70, 70, 70, 71, 72, 72, |
| 614 | 72, 72, 73, 73, 73, 73, 74, 74, 75, 75, | 623 | 73, 73, 74, 74, 74, 74, 75, 75, 76, 76, |
| 615 | 75, 75, 75, 75, 76, 76, 77, 77, 77, 77, | 624 | 76, 76, 76, 76, 77, 77, 78, 78, 78, 78, |
| 616 | 77, 77, 77, 78, 78, 78, 79, 79, 80, 80, | 625 | 78, 78, 78, 79, 79, 79, 80, 80, 81, 81, |
| 617 | 81, 82, 82, 83, 83, 83, 83, 83, 83, 83, | 626 | 82, 83, 83, 84, 84, 84, 84, 84, 84, 84, |
| 618 | 83, 84, 85, 85, 86, 87, 87, 88, 88, 89, | 627 | 84, 85, 86, 86, 87, 88, 88, 89, 89, 90, |
| 619 | 89, 90, 90, 91, 91, 92, 92, 93, 93, 93, | 628 | 90, 91, 91, 92, 92, 93, 93, 94, 94, 94, |
| 620 | 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, | 629 | 95, 96, 96, 97, 97, 98, 98, 99, 99, 100, |
| 621 | 100, 100, 101 | 630 | 101, 101, 102 |
| 622 | }; | 631 | }; |
| 623 | 632 | ||
| 624 | /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ | 633 | /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ |
| @@ -627,7 +636,7 @@ static const yytype_uint8 yyr2[] = | |||
| 627 | 0, 2, 1, 2, 0, 2, 0, 4, 0, 3, | 636 | 0, 2, 1, 2, 0, 2, 0, 4, 0, 3, |
| 628 | 1, 1, 1, 1, 2, 2, 3, 0, 1, 1, | 637 | 1, 1, 1, 1, 2, 2, 3, 0, 1, 1, |
| 629 | 3, 4, 0, 1, 1, 2, 1, 1, 1, 1, | 638 | 3, 4, 0, 1, 1, 2, 1, 1, 1, 1, |
| 630 | 1, 1, 1, 1, 1, 5, 4, 2, 2, 2, | 639 | 1, 1, 1, 1, 1, 4, 1, 2, 2, 2, |
| 631 | 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, | 640 | 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, |
| 632 | 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, | 641 | 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, |
| 633 | 1, 2, 1, 1, 1, 1, 2, 1, 1, 4, | 642 | 1, 2, 1, 1, 1, 1, 2, 1, 1, 4, |
| @@ -648,68 +657,68 @@ static const yytype_uint8 yydefact[] = | |||
| 648 | 4, 4, 2, 0, 1, 3, 0, 28, 55, 46, | 657 | 4, 4, 2, 0, 1, 3, 0, 28, 55, 46, |
| 649 | 62, 53, 0, 31, 0, 52, 32, 48, 49, 29, | 658 | 62, 53, 0, 31, 0, 52, 32, 48, 49, 29, |
| 650 | 65, 47, 50, 30, 0, 8, 0, 51, 54, 63, | 659 | 65, 47, 50, 30, 0, 8, 0, 51, 54, 63, |
| 651 | 0, 0, 0, 64, 56, 5, 10, 17, 23, 24, | 660 | 0, 0, 0, 64, 36, 56, 5, 10, 17, 23, |
| 652 | 26, 27, 33, 34, 11, 12, 13, 14, 15, 39, | 661 | 24, 26, 27, 33, 34, 11, 12, 13, 14, 15, |
| 653 | 0, 43, 6, 37, 0, 44, 22, 38, 45, 0, | 662 | 39, 0, 43, 6, 37, 0, 44, 22, 38, 45, |
| 654 | 0, 129, 68, 0, 58, 0, 18, 19, 0, 130, | 663 | 0, 0, 129, 68, 0, 58, 0, 18, 19, 0, |
| 655 | 67, 25, 42, 127, 0, 125, 22, 40, 0, 113, | 664 | 130, 67, 25, 42, 127, 0, 125, 22, 40, 0, |
| 656 | 0, 0, 109, 9, 17, 41, 0, 0, 0, 0, | 665 | 113, 0, 0, 109, 9, 17, 41, 93, 0, 0, |
| 657 | 57, 59, 60, 16, 0, 66, 131, 101, 121, 71, | 666 | 0, 0, 57, 59, 60, 16, 0, 66, 131, 101, |
| 658 | 0, 0, 123, 0, 7, 112, 106, 76, 77, 0, | 667 | 121, 71, 0, 0, 123, 0, 7, 112, 106, 76, |
| 659 | 0, 0, 121, 75, 0, 114, 115, 119, 105, 0, | 668 | 77, 0, 0, 0, 121, 75, 0, 114, 115, 119, |
| 660 | 110, 130, 0, 36, 0, 73, 72, 61, 20, 102, | 669 | 105, 0, 110, 130, 94, 56, 0, 93, 90, 92, |
| 661 | 0, 93, 0, 84, 87, 88, 128, 124, 126, 118, | 670 | 35, 0, 73, 72, 61, 20, 102, 0, 0, 84, |
| 662 | 0, 76, 0, 120, 74, 117, 80, 0, 111, 0, | 671 | 87, 88, 128, 124, 126, 118, 0, 76, 0, 120, |
| 663 | 35, 132, 122, 0, 21, 103, 70, 94, 56, 0, | 672 | 74, 117, 80, 0, 111, 0, 0, 95, 0, 91, |
| 664 | 93, 90, 92, 69, 83, 0, 82, 81, 0, 0, | 673 | 98, 0, 132, 122, 0, 21, 103, 70, 69, 83, |
| 665 | 116, 104, 0, 95, 0, 91, 98, 0, 85, 89, | 674 | 0, 82, 81, 0, 0, 116, 100, 99, 0, 0, |
| 666 | 79, 78, 100, 99, 0, 0, 97, 96 | 675 | 104, 85, 89, 79, 78, 97, 96 |
| 667 | }; | 676 | }; |
| 668 | 677 | ||
| 669 | /* YYDEFGOTO[NTERM-NUM]. */ | 678 | /* YYDEFGOTO[NTERM-NUM]. */ |
| 670 | static const yytype_int16 yydefgoto[] = | 679 | static const yytype_int16 yydefgoto[] = |
| 671 | { | 680 | { |
| 672 | -1, 1, 2, 3, 35, 76, 56, 36, 65, 66, | 681 | -1, 1, 2, 3, 36, 77, 57, 37, 66, 67, |
| 673 | 67, 79, 38, 39, 40, 41, 42, 68, 90, 91, | 682 | 68, 80, 39, 40, 41, 42, 43, 69, 92, 93, |
| 674 | 43, 121, 70, 112, 113, 132, 133, 134, 135, 161, | 683 | 44, 123, 71, 114, 115, 138, 139, 140, 141, 128, |
| 675 | 162, 44, 154, 155, 55, 80, 81, 82, 114, 115, | 684 | 129, 45, 165, 166, 56, 81, 82, 83, 116, 117, |
| 676 | 116, 117, 129, 51, 74, 75, 45, 98, 46 | 685 | 118, 119, 136, 52, 75, 76, 46, 100, 47 |
| 677 | }; | 686 | }; |
| 678 | 687 | ||
| 679 | /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing | 688 | /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing |
| 680 | STATE-NUM. */ | 689 | STATE-NUM. */ |
| 681 | #define YYPACT_NINF -135 | 690 | #define YYPACT_NINF -140 |
| 682 | static const yytype_int16 yypact[] = | 691 | static const yytype_int16 yypact[] = |
| 683 | { | 692 | { |
| 684 | -135, 20, -135, 321, -135, -135, 30, -135, -135, -135, | 693 | -140, 29, -140, 207, -140, -140, 40, -140, -140, -140, |
| 685 | -135, -135, -28, -135, 2, -135, -135, -135, -135, -135, | 694 | -140, -140, -27, -140, 44, -140, -140, -140, -140, -140, |
| 686 | -135, -135, -135, -135, -6, -135, 9, -135, -135, -135, | 695 | -140, -140, -140, -140, -22, -140, -18, -140, -140, -140, |
| 687 | -5, 15, -17, -135, -135, -135, -135, 18, 491, -135, | 696 | -9, 22, 28, -140, -140, -140, -140, -140, 42, 472, |
| 688 | -135, -135, -135, -135, -135, -135, -135, -135, -135, -22, | 697 | -140, -140, -140, -140, -140, -140, -140, -140, -140, -140, |
| 689 | 31, -135, -135, 19, 106, -135, 491, 19, -135, 491, | 698 | 46, 43, -140, -140, 47, 107, -140, 472, 47, -140, |
| 690 | 50, -135, -135, 11, -3, 51, 57, -135, 18, -14, | 699 | 472, 62, -140, -140, 16, -3, 57, 56, -140, 42, |
| 691 | 14, -135, -135, 48, 46, -135, 491, -135, 33, 32, | 700 | 35, -11, -140, -140, 53, 48, -140, 472, -140, 51, |
| 692 | 59, 154, -135, -135, 18, -135, 365, 56, 60, 61, | 701 | 21, 59, 157, -140, -140, 42, -140, 388, 58, 60, |
| 693 | -135, -3, -135, -135, 18, -135, -135, -135, -135, -135, | 702 | 70, 81, -140, -3, -140, -140, 42, -140, -140, -140, |
| 694 | 202, 74, -135, -23, -135, -135, -135, 77, -135, 16, | 703 | -140, -140, 253, 71, -140, -20, -140, -140, -140, 83, |
| 695 | 101, 49, -135, 34, 92, 93, -135, -135, -135, 94, | 704 | -140, 5, 102, 34, -140, 12, 95, 94, -140, -140, |
| 696 | -135, 110, 95, -135, 97, -135, -135, -135, -135, -20, | 705 | -140, 97, -140, 113, -140, -140, 2, 41, -140, 27, |
| 697 | 96, 410, 99, 113, 100, -135, -135, -135, -135, -135, | 706 | -140, 99, -140, -140, -140, -140, -24, 98, 101, 109, |
| 698 | 103, -135, 107, -135, -135, 111, -135, 239, -135, 32, | 707 | 104, -140, -140, -140, -140, -140, 105, -140, 110, -140, |
| 699 | -135, -135, -135, 123, -135, -135, -135, -135, -135, 3, | 708 | -140, 117, -140, 298, -140, 21, 112, -140, 120, -140, |
| 700 | 52, -135, 38, -135, -135, 454, -135, -135, 117, 128, | 709 | -140, 343, -140, -140, 121, -140, -140, -140, -140, -140, |
| 701 | -135, -135, 134, -135, 135, -135, -135, 276, -135, -135, | 710 | 434, -140, -140, 131, 137, -140, -140, -140, 138, 141, |
| 702 | -135, -135, -135, -135, 137, 138, -135, -135 | 711 | -140, -140, -140, -140, -140, -140, -140 |
| 703 | }; | 712 | }; |
| 704 | 713 | ||
| 705 | /* YYPGOTO[NTERM-NUM]. */ | 714 | /* YYPGOTO[NTERM-NUM]. */ |
| 706 | static const yytype_int16 yypgoto[] = | 715 | static const yytype_int16 yypgoto[] = |
| 707 | { | 716 | { |
| 708 | -135, -135, 187, -135, -135, -135, -135, -50, -135, -135, | 717 | -140, -140, 190, -140, -140, -140, -140, -45, -140, -140, |
| 709 | 98, 0, -59, -37, -135, -135, -135, -77, -135, -135, | 718 | 96, 1, -60, -31, -140, -140, -140, -78, -140, -140, |
| 710 | -54, -30, -135, -90, -135, -134, -135, -135, 24, -58, | 719 | -55, -7, -140, -92, -140, -139, -140, -140, -59, -39, |
| 711 | -135, -135, -135, -135, -18, -135, -135, 109, -135, -135, | 720 | -140, -140, -140, -140, -13, -140, -140, 111, -140, -140, |
| 712 | 44, 87, 84, 148, -135, 102, -135, -135, -135 | 721 | 39, 87, 84, 147, -140, 106, -140, -140, -140 |
| 713 | }; | 722 | }; |
| 714 | 723 | ||
| 715 | /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If | 724 | /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If |
| @@ -718,149 +727,145 @@ static const yytype_int16 yypgoto[] = | |||
| 718 | #define YYTABLE_NINF -109 | 727 | #define YYTABLE_NINF -109 |
| 719 | static const yytype_int16 yytable[] = | 728 | static const yytype_int16 yytable[] = |
| 720 | { | 729 | { |
| 721 | 86, 71, 111, 37, 172, 10, 83, 69, 58, 49, | 730 | 87, 88, 113, 156, 38, 10, 146, 163, 72, 127, |
| 722 | 92, 152, 88, 169, 73, 20, 96, 140, 97, 142, | 731 | 94, 50, 84, 59, 174, 20, 54, 90, 74, 148, |
| 723 | 4, 144, 137, 50, 29, 52, 104, 61, 33, 50, | 732 | 58, 150, 179, 101, 29, 51, 143, 164, 33, 4, |
| 724 | 153, 53, 111, 89, 111, 77, -93, 127, 95, 85, | 733 | 55, 70, 106, 113, 55, 113, -93, 102, 134, 60, |
| 725 | 157, 131, 59, 185, 173, 54, 57, 99, 62, 71, | 734 | 124, 78, 87, 147, 157, 86, 152, 110, 127, 127, |
| 726 | 159, 64, -93, 141, 160, 62, 84, 108, 63, 64, | 735 | 126, -93, 65, 111, 63, 65, 72, 91, 85, 109, |
| 727 | 54, 100, 60, 109, 64, 63, 64, 146, 73, 107, | 736 | 153, 160, 97, 110, 64, 98, 65, 53, 99, 111, |
| 728 | 54, 176, 111, 108, 47, 48, 84, 105, 106, 109, | 737 | 61, 65, 147, 62, 112, 161, 110, 113, 85, 124, |
| 729 | 64, 147, 160, 160, 110, 177, 141, 87, 131, 157, | 738 | 63, 74, 111, 157, 65, 48, 49, 158, 159, 126, |
| 730 | 108, 102, 103, 173, 71, 93, 109, 64, 101, 159, | 739 | 64, 65, 65, 87, 104, 105, 107, 108, 51, 55, |
| 731 | 64, 174, 175, 94, 118, 124, 131, 78, 136, 125, | 740 | 89, 87, 95, 96, 103, 120, 142, 130, 79, 131, |
| 732 | 126, 7, 8, 9, 10, 11, 12, 13, 131, 15, | 741 | 87, 182, 7, 8, 9, 10, 11, 12, 13, 132, |
| 733 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 110, | 742 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, |
| 734 | 26, 27, 28, 29, 30, 143, 148, 33, 105, 149, | 743 | 133, 26, 27, 28, 29, 30, 112, 149, 33, 34, |
| 735 | 96, 151, 152, -22, 150, 156, 165, 34, 163, 164, | 744 | 154, 155, 107, 98, 162, -22, 169, 167, 163, 35, |
| 736 | -22, -107, 166, -22, -22, 119, 167, 171, -22, 7, | 745 | 168, 170, -22, -107, 171, -22, 180, -22, 121, 172, |
| 737 | 8, 9, 10, 11, 12, 13, 180, 15, 16, 17, | 746 | -22, 176, 7, 8, 9, 10, 11, 12, 13, 177, |
| 738 | 18, 19, 20, 21, 22, 23, 24, 181, 26, 27, | 747 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, |
| 739 | 28, 29, 30, 182, 183, 33, 186, 187, 5, 179, | 748 | 183, 26, 27, 28, 29, 30, 184, 185, 33, 34, |
| 740 | 120, -22, 128, 170, 139, 34, 145, 72, -22, -108, | 749 | 186, 5, 135, 122, 175, -22, 145, 73, 151, 35, |
| 741 | 0, -22, -22, 130, 0, 138, -22, 7, 8, 9, | 750 | 0, 0, -22, -108, 0, -22, 0, -22, 6, 0, |
| 742 | 10, 11, 12, 13, 0, 15, 16, 17, 18, 19, | 751 | -22, 144, 7, 8, 9, 10, 11, 12, 13, 14, |
| 743 | 20, 21, 22, 23, 24, 0, 26, 27, 28, 29, | 752 | 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, |
| 744 | 30, 0, 0, 33, 0, 0, 0, 0, -86, 0, | 753 | 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, |
| 745 | 168, 0, 0, 34, 7, 8, 9, 10, 11, 12, | 754 | 0, 0, 0, 0, 0, -22, 0, 0, 0, 35, |
| 746 | 13, -86, 15, 16, 17, 18, 19, 20, 21, 22, | 755 | 0, 0, -22, 0, 137, -22, 0, -22, 7, 8, |
| 747 | 23, 24, 0, 26, 27, 28, 29, 30, 0, 0, | 756 | 9, 10, 11, 12, 13, 0, 15, 16, 17, 18, |
| 748 | 33, 0, 0, 0, 0, -86, 0, 184, 0, 0, | 757 | 19, 20, 21, 22, 23, 24, 0, 26, 27, 28, |
| 749 | 34, 7, 8, 9, 10, 11, 12, 13, -86, 15, | 758 | 29, 30, 0, 0, 33, 34, 0, 0, 0, 0, |
| 750 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, | 759 | -86, 0, 0, 0, 0, 35, 0, 0, 0, 173, |
| 751 | 26, 27, 28, 29, 30, 0, 0, 33, 0, 0, | 760 | 0, 0, -86, 7, 8, 9, 10, 11, 12, 13, |
| 752 | 0, 0, -86, 0, 0, 0, 0, 34, 0, 0, | 761 | 0, 15, 16, 17, 18, 19, 20, 21, 22, 23, |
| 753 | 0, 0, 6, 0, 0, -86, 7, 8, 9, 10, | 762 | 24, 0, 26, 27, 28, 29, 30, 0, 0, 33, |
| 754 | 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, | 763 | 34, 0, 0, 0, 0, -86, 0, 0, 0, 0, |
| 755 | 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, | 764 | 35, 0, 0, 0, 178, 0, 0, -86, 7, 8, |
| 756 | 31, 32, 33, 0, 0, 0, 0, 0, -22, 0, | 765 | 9, 10, 11, 12, 13, 0, 15, 16, 17, 18, |
| 757 | 0, 0, 34, 0, 0, -22, 0, 0, -22, -22, | 766 | 19, 20, 21, 22, 23, 24, 0, 26, 27, 28, |
| 758 | 7, 8, 9, 10, 11, 12, 13, 0, 15, 16, | 767 | 29, 30, 0, 0, 33, 34, 0, 0, 0, 0, |
| 759 | 17, 18, 19, 20, 21, 22, 23, 24, 0, 26, | 768 | -86, 0, 0, 0, 0, 35, 0, 0, 0, 0, |
| 760 | 27, 28, 29, 30, 0, 0, 33, 0, 0, 0, | 769 | 0, 0, -86, 7, 8, 9, 10, 11, 12, 13, |
| 761 | 0, 0, 0, 0, 0, 0, 34, 0, 0, 0, | 770 | 0, 15, 16, 17, 18, 19, 20, 21, 22, 23, |
| 762 | 0, 0, 0, 122, 123, 7, 8, 9, 10, 11, | 771 | 24, 0, 26, 27, 28, 29, 30, 0, 0, 33, |
| 763 | 12, 13, 0, 15, 16, 17, 18, 19, 20, 21, | 772 | 34, 0, 0, 0, 0, 0, 124, 0, 0, 0, |
| 764 | 22, 23, 24, 0, 26, 27, 28, 29, 30, 0, | 773 | 125, 0, 0, 0, 0, 0, 126, 0, 65, 7, |
| 765 | 0, 33, 0, 0, 0, 0, 0, 157, 0, 0, | ||
| 766 | 0, 158, 0, 0, 0, 0, 0, 159, 64, 7, | ||
| 767 | 8, 9, 10, 11, 12, 13, 0, 15, 16, 17, | 774 | 8, 9, 10, 11, 12, 13, 0, 15, 16, 17, |
| 768 | 18, 19, 20, 21, 22, 23, 24, 0, 26, 27, | 775 | 18, 19, 20, 21, 22, 23, 24, 0, 26, 27, |
| 769 | 28, 29, 30, 0, 0, 33, 0, 0, 0, 0, | 776 | 28, 29, 30, 0, 0, 33, 34, 0, 0, 0, |
| 770 | 178, 0, 0, 0, 0, 34, 7, 8, 9, 10, | 777 | 0, 181, 0, 0, 0, 0, 35, 7, 8, 9, |
| 771 | 11, 12, 13, 0, 15, 16, 17, 18, 19, 20, | 778 | 10, 11, 12, 13, 0, 15, 16, 17, 18, 19, |
| 772 | 21, 22, 23, 24, 0, 26, 27, 28, 29, 30, | 779 | 20, 21, 22, 23, 24, 0, 26, 27, 28, 29, |
| 773 | 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, | 780 | 30, 0, 0, 33, 34, 0, 0, 0, 0, 0, |
| 774 | 0, 0, 34 | 781 | 0, 0, 0, 0, 35 |
| 775 | }; | 782 | }; |
| 776 | 783 | ||
| 777 | #define yypact_value_is_default(yystate) \ | 784 | #define yypact_value_is_default(yystate) \ |
| 778 | ((yystate) == (-135)) | 785 | ((yystate) == (-140)) |
| 779 | 786 | ||
| 780 | #define yytable_value_is_error(yytable_value) \ | 787 | #define yytable_value_is_error(yytable_value) \ |
| 781 | YYID (0) | 788 | YYID (0) |
| 782 | 789 | ||
| 783 | static const yytype_int16 yycheck[] = | 790 | static const yytype_int16 yycheck[] = |
| 784 | { | 791 | { |
| 785 | 59, 38, 79, 3, 1, 8, 56, 37, 26, 37, | 792 | 60, 60, 80, 1, 3, 8, 1, 31, 39, 87, |
| 786 | 64, 31, 1, 147, 37, 18, 30, 1, 32, 109, | 793 | 65, 38, 57, 26, 153, 18, 38, 1, 38, 111, |
| 787 | 0, 111, 45, 51, 27, 23, 76, 44, 31, 51, | 794 | 38, 113, 161, 34, 27, 52, 46, 51, 31, 0, |
| 788 | 50, 37, 109, 63, 111, 53, 33, 91, 68, 57, | 795 | 52, 38, 77, 111, 52, 113, 34, 48, 93, 48, |
| 789 | 37, 100, 47, 177, 41, 51, 37, 33, 37, 86, | 796 | 38, 54, 102, 38, 42, 58, 34, 42, 126, 127, |
| 790 | 47, 48, 49, 37, 131, 37, 56, 41, 47, 48, | 797 | 48, 49, 50, 48, 38, 50, 87, 64, 57, 38, |
| 791 | 51, 47, 47, 47, 48, 47, 48, 33, 37, 37, | 798 | 48, 34, 69, 42, 48, 30, 50, 23, 33, 48, |
| 792 | 51, 33, 149, 41, 44, 45, 76, 44, 45, 47, | 799 | 48, 50, 38, 45, 53, 48, 42, 155, 77, 38, |
| 793 | 48, 47, 159, 160, 52, 47, 37, 37, 147, 37, | 800 | 38, 38, 48, 42, 50, 45, 46, 126, 127, 48, |
| 794 | 41, 45, 46, 41, 131, 44, 47, 48, 50, 47, | 801 | 48, 50, 50, 153, 46, 47, 45, 46, 52, 52, |
| 795 | 48, 159, 160, 46, 45, 49, 165, 1, 34, 49, | 802 | 38, 161, 45, 47, 51, 46, 35, 49, 1, 49, |
| 796 | 49, 5, 6, 7, 8, 9, 10, 11, 177, 13, | 803 | 170, 170, 5, 6, 7, 8, 9, 10, 11, 49, |
| 797 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 52, | 804 | 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, |
| 798 | 24, 25, 26, 27, 28, 34, 44, 31, 44, 46, | 805 | 49, 24, 25, 26, 27, 28, 53, 35, 31, 32, |
| 799 | 30, 44, 31, 37, 49, 49, 46, 41, 49, 36, | 806 | 45, 47, 45, 30, 45, 38, 37, 49, 31, 42, |
| 800 | 44, 45, 49, 47, 48, 1, 49, 34, 52, 5, | 807 | 49, 47, 45, 46, 49, 48, 35, 50, 1, 49, |
| 801 | 6, 7, 8, 9, 10, 11, 49, 13, 14, 15, | 808 | 53, 49, 5, 6, 7, 8, 9, 10, 11, 49, |
| 802 | 16, 17, 18, 19, 20, 21, 22, 49, 24, 25, | 809 | 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, |
| 803 | 26, 27, 28, 49, 49, 31, 49, 49, 1, 165, | 810 | 49, 24, 25, 26, 27, 28, 49, 49, 31, 32, |
| 804 | 81, 37, 94, 149, 107, 41, 112, 49, 44, 45, | 811 | 49, 1, 96, 82, 155, 38, 109, 50, 114, 42, |
| 805 | -1, 47, 48, 1, -1, 103, 52, 5, 6, 7, | 812 | -1, -1, 45, 46, -1, 48, -1, 50, 1, -1, |
| 806 | 8, 9, 10, 11, -1, 13, 14, 15, 16, 17, | 813 | 53, 105, 5, 6, 7, 8, 9, 10, 11, 12, |
| 807 | 18, 19, 20, 21, 22, -1, 24, 25, 26, 27, | 814 | 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, |
| 808 | 28, -1, -1, 31, -1, -1, -1, -1, 36, -1, | 815 | 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, |
| 809 | 1, -1, -1, 41, 5, 6, 7, 8, 9, 10, | 816 | -1, -1, -1, -1, -1, 38, -1, -1, -1, 42, |
| 810 | 11, 49, 13, 14, 15, 16, 17, 18, 19, 20, | 817 | -1, -1, 45, -1, 1, 48, -1, 50, 5, 6, |
| 811 | 21, 22, -1, 24, 25, 26, 27, 28, -1, -1, | 818 | 7, 8, 9, 10, 11, -1, 13, 14, 15, 16, |
| 812 | 31, -1, -1, -1, -1, 36, -1, 1, -1, -1, | 819 | 17, 18, 19, 20, 21, 22, -1, 24, 25, 26, |
| 813 | 41, 5, 6, 7, 8, 9, 10, 11, 49, 13, | 820 | 27, 28, -1, -1, 31, 32, -1, -1, -1, -1, |
| 814 | 14, 15, 16, 17, 18, 19, 20, 21, 22, -1, | 821 | 37, -1, -1, -1, -1, 42, -1, -1, -1, 1, |
| 815 | 24, 25, 26, 27, 28, -1, -1, 31, -1, -1, | 822 | -1, -1, 49, 5, 6, 7, 8, 9, 10, 11, |
| 816 | -1, -1, 36, -1, -1, -1, -1, 41, -1, -1, | 823 | -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, |
| 817 | -1, -1, 1, -1, -1, 49, 5, 6, 7, 8, | 824 | 22, -1, 24, 25, 26, 27, 28, -1, -1, 31, |
| 818 | 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, | 825 | 32, -1, -1, -1, -1, 37, -1, -1, -1, -1, |
| 819 | 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, | 826 | 42, -1, -1, -1, 1, -1, -1, 49, 5, 6, |
| 820 | 29, 30, 31, -1, -1, -1, -1, -1, 37, -1, | 827 | 7, 8, 9, 10, 11, -1, 13, 14, 15, 16, |
| 821 | -1, -1, 41, -1, -1, 44, -1, -1, 47, 48, | 828 | 17, 18, 19, 20, 21, 22, -1, 24, 25, 26, |
| 822 | 5, 6, 7, 8, 9, 10, 11, -1, 13, 14, | 829 | 27, 28, -1, -1, 31, 32, -1, -1, -1, -1, |
| 823 | 15, 16, 17, 18, 19, 20, 21, 22, -1, 24, | 830 | 37, -1, -1, -1, -1, 42, -1, -1, -1, -1, |
| 824 | 25, 26, 27, 28, -1, -1, 31, -1, -1, -1, | 831 | -1, -1, 49, 5, 6, 7, 8, 9, 10, 11, |
| 825 | -1, -1, -1, -1, -1, -1, 41, -1, -1, -1, | 832 | -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, |
| 826 | -1, -1, -1, 48, 49, 5, 6, 7, 8, 9, | 833 | 22, -1, 24, 25, 26, 27, 28, -1, -1, 31, |
| 827 | 10, 11, -1, 13, 14, 15, 16, 17, 18, 19, | 834 | 32, -1, -1, -1, -1, -1, 38, -1, -1, -1, |
| 828 | 20, 21, 22, -1, 24, 25, 26, 27, 28, -1, | 835 | 42, -1, -1, -1, -1, -1, 48, -1, 50, 5, |
| 829 | -1, 31, -1, -1, -1, -1, -1, 37, -1, -1, | ||
| 830 | -1, 41, -1, -1, -1, -1, -1, 47, 48, 5, | ||
| 831 | 6, 7, 8, 9, 10, 11, -1, 13, 14, 15, | 836 | 6, 7, 8, 9, 10, 11, -1, 13, 14, 15, |
| 832 | 16, 17, 18, 19, 20, 21, 22, -1, 24, 25, | 837 | 16, 17, 18, 19, 20, 21, 22, -1, 24, 25, |
| 833 | 26, 27, 28, -1, -1, 31, -1, -1, -1, -1, | 838 | 26, 27, 28, -1, -1, 31, 32, -1, -1, -1, |
| 834 | 36, -1, -1, -1, -1, 41, 5, 6, 7, 8, | 839 | -1, 37, -1, -1, -1, -1, 42, 5, 6, 7, |
| 835 | 9, 10, 11, -1, 13, 14, 15, 16, 17, 18, | 840 | 8, 9, 10, 11, -1, 13, 14, 15, 16, 17, |
| 836 | 19, 20, 21, 22, -1, 24, 25, 26, 27, 28, | 841 | 18, 19, 20, 21, 22, -1, 24, 25, 26, 27, |
| 837 | -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, | 842 | 28, -1, -1, 31, 32, -1, -1, -1, -1, -1, |
| 838 | -1, -1, 41 | 843 | -1, -1, -1, -1, 42 |
| 839 | }; | 844 | }; |
| 840 | 845 | ||
| 841 | /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing | 846 | /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing |
| 842 | symbol of state STATE-NUM. */ | 847 | symbol of state STATE-NUM. */ |
| 843 | static const yytype_uint8 yystos[] = | 848 | static const yytype_uint8 yystos[] = |
| 844 | { | 849 | { |
| 845 | 0, 54, 55, 56, 0, 55, 1, 5, 6, 7, | 850 | 0, 55, 56, 57, 0, 56, 1, 5, 6, 7, |
| 846 | 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, | 851 | 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, |
| 847 | 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, | 852 | 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, |
| 848 | 28, 29, 30, 31, 41, 57, 60, 64, 65, 66, | 853 | 28, 29, 30, 31, 32, 42, 58, 61, 65, 66, |
| 849 | 67, 68, 69, 73, 84, 99, 101, 44, 45, 37, | 854 | 67, 68, 69, 70, 74, 85, 100, 102, 45, 46, |
| 850 | 51, 96, 23, 37, 51, 87, 59, 37, 87, 47, | 855 | 38, 52, 97, 23, 38, 52, 88, 60, 38, 88, |
| 851 | 47, 44, 37, 47, 48, 61, 62, 63, 70, 74, | 856 | 48, 48, 45, 38, 48, 50, 62, 63, 64, 71, |
| 852 | 75, 66, 96, 37, 97, 98, 58, 87, 1, 64, | 857 | 75, 76, 67, 97, 38, 98, 99, 59, 88, 1, |
| 853 | 88, 89, 90, 60, 64, 87, 65, 37, 1, 74, | 858 | 65, 89, 90, 91, 61, 65, 88, 66, 82, 38, |
| 854 | 71, 72, 73, 44, 46, 74, 30, 32, 100, 33, | 859 | 1, 75, 72, 73, 74, 45, 47, 75, 30, 33, |
| 855 | 47, 50, 45, 46, 60, 44, 45, 37, 41, 47, | 860 | 101, 34, 48, 51, 46, 47, 61, 45, 46, 38, |
| 856 | 52, 70, 76, 77, 91, 92, 93, 94, 45, 1, | 861 | 42, 48, 53, 71, 77, 78, 92, 93, 94, 95, |
| 857 | 90, 74, 48, 49, 49, 49, 49, 73, 63, 95, | 862 | 46, 1, 91, 75, 38, 42, 48, 71, 83, 84, |
| 858 | 1, 65, 78, 79, 80, 81, 34, 45, 98, 94, | 863 | 49, 49, 49, 49, 74, 64, 96, 1, 79, 80, |
| 859 | 1, 37, 76, 34, 76, 95, 33, 47, 44, 46, | 864 | 81, 82, 35, 46, 99, 95, 1, 38, 77, 35, |
| 860 | 49, 44, 31, 50, 85, 86, 49, 37, 41, 47, | 865 | 77, 96, 34, 48, 45, 47, 1, 42, 83, 83, |
| 861 | 70, 82, 83, 49, 36, 46, 49, 49, 1, 78, | 866 | 34, 48, 45, 31, 51, 86, 87, 49, 49, 37, |
| 862 | 93, 34, 1, 41, 82, 82, 33, 47, 36, 81, | 867 | 47, 49, 49, 1, 79, 94, 49, 49, 1, 79, |
| 863 | 49, 49, 49, 49, 1, 78, 49, 49 | 868 | 35, 37, 82, 49, 49, 49, 49 |
| 864 | }; | 869 | }; |
| 865 | 870 | ||
| 866 | #define yyerrok (yyerrstatus = 0) | 871 | #define yyerrok (yyerrstatus = 0) |
| @@ -890,17 +895,18 @@ static const yytype_uint8 yystos[] = | |||
| 890 | 895 | ||
| 891 | #define YYRECOVERING() (!!yyerrstatus) | 896 | #define YYRECOVERING() (!!yyerrstatus) |
| 892 | 897 | ||
| 893 | #define YYBACKUP(Token, Value) \ | 898 | #define YYBACKUP(Token, Value) \ |
| 894 | do \ | 899 | do \ |
| 895 | if (yychar == YYEMPTY && yylen == 1) \ | 900 | if (yychar == YYEMPTY) \ |
| 896 | { \ | 901 | { \ |
| 897 | yychar = (Token); \ | 902 | yychar = (Token); \ |
| 898 | yylval = (Value); \ | 903 | yylval = (Value); \ |
| 899 | YYPOPSTACK (1); \ | 904 | YYPOPSTACK (yylen); \ |
| 900 | goto yybackup; \ | 905 | yystate = *yyssp; \ |
| 901 | } \ | 906 | goto yybackup; \ |
| 902 | else \ | 907 | } \ |
| 903 | { \ | 908 | else \ |
| 909 | { \ | ||
| 904 | yyerror (YY_("syntax error: cannot back up")); \ | 910 | yyerror (YY_("syntax error: cannot back up")); \ |
| 905 | YYERROR; \ | 911 | YYERROR; \ |
| 906 | } \ | 912 | } \ |
| @@ -995,6 +1001,8 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep) | |||
| 995 | YYSTYPE const * const yyvaluep; | 1001 | YYSTYPE const * const yyvaluep; |
| 996 | #endif | 1002 | #endif |
| 997 | { | 1003 | { |
| 1004 | FILE *yyo = yyoutput; | ||
| 1005 | YYUSE (yyo); | ||
| 998 | if (!yyvaluep) | 1006 | if (!yyvaluep) |
| 999 | return; | 1007 | return; |
| 1000 | # ifdef YYPRINT | 1008 | # ifdef YYPRINT |
| @@ -1246,12 +1254,12 @@ static int | |||
| 1246 | yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, | 1254 | yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, |
| 1247 | yytype_int16 *yyssp, int yytoken) | 1255 | yytype_int16 *yyssp, int yytoken) |
| 1248 | { | 1256 | { |
| 1249 | YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]); | 1257 | YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]); |
| 1250 | YYSIZE_T yysize = yysize0; | 1258 | YYSIZE_T yysize = yysize0; |
| 1251 | YYSIZE_T yysize1; | 1259 | YYSIZE_T yysize1; |
| 1252 | enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; | 1260 | enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; |
| 1253 | /* Internationalized format string. */ | 1261 | /* Internationalized format string. */ |
| 1254 | const char *yyformat = 0; | 1262 | const char *yyformat = YY_NULL; |
| 1255 | /* Arguments of yyformat. */ | 1263 | /* Arguments of yyformat. */ |
| 1256 | char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; | 1264 | char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; |
| 1257 | /* Number of reported tokens (one for the "unexpected", one per | 1265 | /* Number of reported tokens (one for the "unexpected", one per |
| @@ -1311,7 +1319,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, | |||
| 1311 | break; | 1319 | break; |
| 1312 | } | 1320 | } |
| 1313 | yyarg[yycount++] = yytname[yyx]; | 1321 | yyarg[yycount++] = yytname[yyx]; |
| 1314 | yysize1 = yysize + yytnamerr (0, yytname[yyx]); | 1322 | yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]); |
| 1315 | if (! (yysize <= yysize1 | 1323 | if (! (yysize <= yysize1 |
| 1316 | && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) | 1324 | && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) |
| 1317 | return 2; | 1325 | return 2; |
| @@ -1463,7 +1471,7 @@ yyparse () | |||
| 1463 | `yyss': related to states. | 1471 | `yyss': related to states. |
| 1464 | `yyvs': related to semantic values. | 1472 | `yyvs': related to semantic values. |
| 1465 | 1473 | ||
| 1466 | Refer to the stacks thru separate pointers, to allow yyoverflow | 1474 | Refer to the stacks through separate pointers, to allow yyoverflow |
| 1467 | to reallocate them elsewhere. */ | 1475 | to reallocate them elsewhere. */ |
| 1468 | 1476 | ||
| 1469 | /* The state stack. */ | 1477 | /* The state stack. */ |
| @@ -2346,7 +2354,7 @@ yyabortlab: | |||
| 2346 | yyresult = 1; | 2354 | yyresult = 1; |
| 2347 | goto yyreturn; | 2355 | goto yyreturn; |
| 2348 | 2356 | ||
| 2349 | #if !defined(yyoverflow) || YYERROR_VERBOSE | 2357 | #if !defined yyoverflow || YYERROR_VERBOSE |
| 2350 | /*-------------------------------------------------. | 2358 | /*-------------------------------------------------. |
| 2351 | | yyexhaustedlab -- memory exhaustion comes here. | | 2359 | | yyexhaustedlab -- memory exhaustion comes here. | |
| 2352 | `-------------------------------------------------*/ | 2360 | `-------------------------------------------------*/ |
diff --git a/scripts/genksyms/parse.tab.h_shipped b/scripts/genksyms/parse.tab.h_shipped index 93240a3cdecc..a4737dec4532 100644 --- a/scripts/genksyms/parse.tab.h_shipped +++ b/scripts/genksyms/parse.tab.h_shipped | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | /* A Bison parser, made by GNU Bison 2.5. */ | 1 | /* A Bison parser, made by GNU Bison 2.5.1. */ |
| 2 | 2 | ||
| 3 | /* Bison interface for Yacc-like parsers in C | 3 | /* Bison interface for Yacc-like parsers in C |
| 4 | 4 | ||
| 5 | Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc. | 5 | Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. |
| 6 | 6 | ||
| 7 | This program is free software: you can redistribute it and/or modify | 7 | This program is free software: you can redistribute it and/or modify |
| 8 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
| @@ -66,18 +66,19 @@ | |||
| 66 | EXPORT_SYMBOL_KEYW = 284, | 66 | EXPORT_SYMBOL_KEYW = 284, |
| 67 | ASM_PHRASE = 285, | 67 | ASM_PHRASE = 285, |
| 68 | ATTRIBUTE_PHRASE = 286, | 68 | ATTRIBUTE_PHRASE = 286, |
| 69 | BRACE_PHRASE = 287, | 69 | TYPEOF_PHRASE = 287, |
| 70 | BRACKET_PHRASE = 288, | 70 | BRACE_PHRASE = 288, |
| 71 | EXPRESSION_PHRASE = 289, | 71 | BRACKET_PHRASE = 289, |
| 72 | CHAR = 290, | 72 | EXPRESSION_PHRASE = 290, |
| 73 | DOTS = 291, | 73 | CHAR = 291, |
| 74 | IDENT = 292, | 74 | DOTS = 292, |
| 75 | INT = 293, | 75 | IDENT = 293, |
| 76 | REAL = 294, | 76 | INT = 294, |
| 77 | STRING = 295, | 77 | REAL = 295, |
| 78 | TYPE = 296, | 78 | STRING = 296, |
| 79 | OTHER = 297, | 79 | TYPE = 297, |
| 80 | FILENAME = 298 | 80 | OTHER = 298, |
| 81 | FILENAME = 299 | ||
| 81 | }; | 82 | }; |
| 82 | #endif | 83 | #endif |
| 83 | 84 | ||
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y index 23c39998ad86..b9f4cf202302 100644 --- a/scripts/genksyms/parse.y +++ b/scripts/genksyms/parse.y | |||
| @@ -103,6 +103,7 @@ static void record_compound(struct string_list **keyw, | |||
| 103 | 103 | ||
| 104 | %token ASM_PHRASE | 104 | %token ASM_PHRASE |
| 105 | %token ATTRIBUTE_PHRASE | 105 | %token ATTRIBUTE_PHRASE |
| 106 | %token TYPEOF_PHRASE | ||
| 106 | %token BRACE_PHRASE | 107 | %token BRACE_PHRASE |
| 107 | %token BRACKET_PHRASE | 108 | %token BRACKET_PHRASE |
| 108 | %token EXPRESSION_PHRASE | 109 | %token EXPRESSION_PHRASE |
| @@ -220,8 +221,8 @@ storage_class_specifier: | |||
| 220 | type_specifier: | 221 | type_specifier: |
| 221 | simple_type_specifier | 222 | simple_type_specifier |
| 222 | | cvar_qualifier | 223 | | cvar_qualifier |
| 223 | | TYPEOF_KEYW '(' decl_specifier_seq '*' ')' | 224 | | TYPEOF_KEYW '(' parameter_declaration ')' |
| 224 | | TYPEOF_KEYW '(' decl_specifier_seq ')' | 225 | | TYPEOF_PHRASE |
| 225 | 226 | ||
| 226 | /* References to s/u/e's defined elsewhere. Rearrange things | 227 | /* References to s/u/e's defined elsewhere. Rearrange things |
| 227 | so that it is easier to expand the definition fully later. */ | 228 | so that it is easier to expand the definition fully later. */ |
